Initial render of threejs scene doesn't add texture - three.js

I originally had an animate function in place for my three.js scene that is loaded within an AngularJS Modal, but found that after closing the Modal, the animation keeps going, and that is unneeded since I don't require constant animation like a video game would have.
At this point, I switched it to only render when someone uses the OrbitControls to move the simple box in my example, and have an initial call to render the scene so that users can see the box instead of a big blacked out square.
However, upon initial render, the texture does not appear to be applied until I use the orbit controls and move the box, at which point they appear. This is odd, since both my initial call and the listener tied to the OrbitControls are to the same function. How do I get the initial load to show the texture?
$scope.generate3D = function () {
// 3D OBJECT - Variables
var texture0 = baseBlobURL + 'Texture_0.png';
var boxDAE = baseBlobURL + 'Box.dae';
var scene;
var camera;
var renderer;
var box;
var controls;
var newtexture;
// Update texture
newtexture = THREE.ImageUtils.loadTexture(texture0);
//Instantiate a Collada loader
var loader = new THREE.ColladaLoader();
loader.options.convertUpAxis = true;
loader.load(boxDAE, function (collada) {
box = collada.scene;
box.traverse(function (child) {
if (child instanceof THREE.SkinnedMesh) {
var animation = new THREE.Animation(child, child.geometry.animation);
animation.play();
}
});
box.scale.x = box.scale.y = box.scale.z = .2;
box.updateMatrix();
init();
// Initial call to render scene, from this point, Orbit Controls render the scene per the event listener
render();
});
function init() {
scene = new THREE.Scene();
camera = new THREE.PerspectiveCamera(75, window.innerWidth / window.innerHeight, 0.1, 1000);
renderer = new THREE.WebGLRenderer();
renderer.setClearColor(0xdddddd);
//renderer.setSize(window.innerWidth, window.innerHeight);
renderer.setSize(500, 500);
// Load the box file
scene.add(box);
// Lighting
var light = new THREE.AmbientLight();
scene.add(light);
// Camera
camera.position.x = 40;
camera.position.y = 40;
camera.position.z = 40;
camera.lookAt(scene.position);
// Rotation Controls
controls = new THREE.OrbitControls(camera, renderer.domElement);
controls.addEventListener('change', render);
controls.rotateSpeed = 5.0;
controls.zoomSpeed = 5;
controls.noZoom = false;
controls.noPan = false;
var myEl = angular.element(document.querySelector('#webGL-container'));
myEl.append(renderer.domElement);
}
function render() {
renderer.render(scene, camera);
console.log('loaded');
}
}

You are using ColladaLoader and you want to force a call to render() when the model and all the textures are loaded.
If you add the model to the scene in the loader callback, there is still a chance that even though the model has loaded, the textures may not have.
One thing you can do is add the following before instantiating the loader:
THREE.DefaultLoadingManager.onLoad = function () {
// console.log( 'everything loaded' ); // debug
render();
};
Or alternatively,
THREE.DefaultLoadingManager.onProgress = function ( item, loaded, total ) {
// console.log( item, loaded, total ); // debug
if ( loaded === total ) render();
};
three.js r.72

Related

Auto rotate animation on Three.js

I'm using Three.js in order to render a 3D element.
I work with mousemove in order to rotate the scene with the movement of the mouse.
I'm looking to add an animation that slightly rotates the scene automatically.
It essentially would emulate the rotation done via mouse movement but automatically so the object doesn't appear static when loading.
Does someone know a trivial way to achieve that?
Thanks in advance,
$(function() {
var logoSrc = './assets/vector_gltf/scene.gltf';
var renderer,
scene,
camera,
holder = document.getElementById('holder');
canvas = document.getElementById('canvasLogo');
renderer = new THREE.WebGLRenderer({
canvas: canvas,
antialias: true,
alpha: true,
});
renderer.setSize($(holder).width(), $(holder).height());
holder.appendChild(renderer.domElement);
renderer.setClearColor( 0x000000, 0 );
renderer.setPixelRatio(window.devicePixelRatio);
// renderer.setSize(window.innerWidth, window.innerHeight);
//on resize
window.addEventListener('resize', () => {
// Update camera
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
// Update renderer
// renderer.setSize(window.innerWidth, window.innerHeight);
renderer.setSize($(holder).width(), $(holder).height());
holder.appendChild(renderer.domElement);
renderer.setPixelRatio(Math.min(window.devicePixelRatio, 2));
});
camera = new THREE.PerspectiveCamera(55, window.innerWidth / window.innerHeight, 0.1, 1000 );
scene = new THREE.Scene();
var light = new THREE.AmbientLight(0x0000ff, 1);
scene.add(light);
var light2 = new THREE.PointLight(0xff0000, 1);
scene.add(light2);
var loader = new THREE.GLTFLoader();
loader.load(logoSrc, handle_load);
var s;var mesh;
function handle_load(gltf) {
s = gltf.scene;
mesh = s.children[0].children[0].children[0];
// s.children[0].material = new THREE.MeshStandardMaterial({
// normalMap : logoSrc + 'textures/StingrayPBS1SG_normal.png',
// emissiveMap : logoSrc + 'textures/StingrayPBS1SG_emissive.png',
// metalnessMap : logoSrc + 'textures/StingrayPBS1SG_metallicRoughness.png',
// map : logoSrc + 'textures/StingrayPBS1SG_baseColor.png'
// });
scene.add(s);
s.position.y = -0.2;
s.position.z = -2;//15
}
$(".intro").on("mousemove", function(e){
mesh.rotation.set(-0.003 * (e.pageY - window.innerHeight / 20),0, -0.02 * (e.pageX - (window.innerWidth / 2) - 3.14));
//.set(0.0018 * (e.pageY - 291.45) - 1.45,0, 0.02 * (e.pageX - (window.innerWidth / 2) - 3.14))
});
//anim
const clock = new THREE.Clock();
const loop = () =>
{
const elapsedTime = clock.getElapsedTime();//delta time
// Render
renderer.render(scene, camera);
// Call tick again on the next frame
window.requestAnimationFrame(loop);
}
loop();
});
This is what you want: http://threejs.org/examples/misc_controls_orbit.html
Include the orbit controls (after you have downloaded them):
<script src="js/controls/OrbitControls.js"></script>
Setup the variable:
var controls;
Attach the controls to the camera and add a listener:
controls = new THREE.OrbitControls( camera );
controls.addEventListener( 'change', render );
and in your animate function update the controls:
controls.update();
controls.autoRotate();
That last line (autoRotate) is really what you want for the rotation, but everything else is setting up your controls.
You can use a function called animate() that is making your 3D element move automatically and combining it to a render() method.
function animate() {
requestAnimationFrame( animate );
group.rotation += 0.005;
render();
}
function render() {
renderer.render( scene, camera );
}

Three.js LegacyGLTFLoader.js shadows missing

I have a GLTF version 1.0 model that I am importing into Three.js using LegacyGLTFLoader.js. When I do so, everything looks good, except that the model does not receive shadows. I am guessing that this is because the imported model's material is THREE.RawShaderMaterial, which does not support receiving shadows (I think). How can I fix this so that my imported model can receive shadows?
Here is sample code:
// Construct scene.
var scene = new THREE.Scene();
// Get window dimensions.
var width = window.innerWidth;
var height = window.innerHeight;
// Construct camera.
var camera = new THREE.PerspectiveCamera(75, width/height);
camera.position.set(20, 20, 20);
camera.lookAt(scene.position);
// Construct renderer.
var renderer = new THREE.WebGLRenderer();
renderer.setSize(width, height);
renderer.shadowMapEnabled = true;
document.body.appendChild(renderer.domElement);
// Construct cube.
var cubeGeometry = new THREE.BoxGeometry(10, 1, 10);
var cubeMaterial = new THREE.MeshPhongMaterial({color: 0x00ff00});
var cube = new THREE.Mesh(cubeGeometry, cubeMaterial);
cube.castShadow = true;
cube.translateY(15);
scene.add(cube);
// Construct floor.
var floorGeometry = new THREE.BoxGeometry(20, 1, 20);
var floorMaterial = new THREE.MeshPhongMaterial({color: 0x00ffff});
var floor = new THREE.Mesh(floorGeometry, floorMaterial);
floor.receiveShadow = true;
scene.add(floor);
// Construct light.
var light = new THREE.DirectionalLight(0xffffff);
light.position.set(0, 20, 0);
light.castShadow = true;
scene.add(light);
// Construct light helper.
var lightHelper = new THREE.DirectionalLightHelper(light);
scene.add(lightHelper);
// Construct orbit controls.
new THREE.OrbitControls(camera, renderer.domElement);
// Construct GLTF loader.
var loader = new THREE.LegacyGLTFLoader();
// Load GLTF model.
loader.load(
"https://dl.dropboxusercontent.com/s/5piiujui3sdiaj3/1.glb",
function(event) {
var model = event.scene.children[0];
var mesh = model.children[0];
mesh.receiveShadow = true;
scene.add(model);
},
null,
function(event) {
alert("Loading model failed.");
}
);
// Animates the scene.
var animate = function () {
requestAnimationFrame(animate);
renderer.render(scene, camera);
};
// Animate the scene.
animate();
Here are my resources:
https://dl.dropboxusercontent.com/s/y2r8bsrppv0oqp4/three.js
https://dl.dropboxusercontent.com/s/5wh92lnsxz2ge1e/LegacyGLTFLoader.js
https://dl.dropboxusercontent.com/s/1jygy1eavetnp0d/OrbitControls.js
https://dl.dropboxusercontent.com/s/5piiujui3sdiaj3/1.glb
Here is a JSFiddle:
https://jsfiddle.net/rmilbert/8tqc3yx4/26/
One way to fix the problem is to replace the instance of RawShaderMaterial with MeshStandardMaterial. To get the intended effect, you have to apply the existing texture to the new material like so:
var newMaterial = new THREE.MeshStandardMaterial( { roughness: 1, metalness: 0 } );
newMaterial.map = child.material.uniforms.u_tex.value;
You also have to compute normal data for the respective geometry so lighting can be computed correctly. If you need no shadows, the unlint MeshBasicMaterial is actually the better choice.
Updated fiddle: https://jsfiddle.net/e67hbj1q/2/

Using OutlinePass (THREE.js r102) with skinned mesh

/examples/js/postprocessing/OutlinePass.js from THREE.js r102 does not appear to work with skinned meshes. Specifically, the rendered outline always stays in the mesh's rest position.
Is there some way to get this working (that is, to update the outline to reflect the current pose of an animated mesh)? OutlinePass does not appear to be documented (mod the comments in the code itself).
Is there some other accepted method of outlining animated meshes? I'm in the process of migrating some code from r7x, where I ended up accomplishing this by manually creating a copy of the mesh and applying a shader material that scales along the normals. I can do that again, but if there's a simpler/better supported method to accomplish the same effect I'd rather use it instead of reproducing a method that breaks every new major release.
A simple jsfiddle illustrating the issue:
https://jsfiddle.net/L69pe5q2/3/
This is the code from the jsfiddle. The mesh I use is the SimpleSkinning.gltf example from the three.js distribution. In the jsfiddle I load it from a dataURI so it doesn't complain about XSS loading, and I've edited the base64-encoded data out (and replaced it with [FOO]) in the code below, purely for readability.
The OutlinePass is created and added to the composer in initComposer().
var camera, light, renderer, composer, mixer, loader, clock;
var scene, mesh, outlinePass;
var height = 480,
width = 640;
var clearColor = '#666666';
load();
function load() {
loader = new THREE.GLTFLoader();
clock = new THREE.Clock();
scene = new THREE.Scene();
loader.load('data:text/plain;base64,[FOO]', function(obj) {
scene.add(obj.scene);
mixer = new THREE.AnimationMixer(obj.scene);
var clip = THREE.AnimationClip.findByName(obj.animations,
'Take 01');
var a = mixer.clipAction(clip);
a.reset();
a.play();
mesh = obj.scene;
mesh.position.set(-7, 2.5, -7);
init();
animate();
});
}
function init() {
initCamera();
initScene();
initRenderer();
initComposer();
outlinePass.selectedObjects = [mesh];
}
function initCamera() {
camera = new THREE.PerspectiveCamera(30, width / height, 1, 10000);
camera.position.set(7, 0, 7);
camera.lookAt(0, 0, 0);
}
function initScene() {
light = new THREE.AmbientLight(0xffffff)
scene.add(light);
}
function initRenderer() {
renderer = new THREE.WebGLRenderer({
width: width,
height: height,
antialias: false,
});
renderer.setSize(width, height);
renderer.setClearColor(clearColor);
document.body.appendChild(renderer.domElement);
}
function initComposer() {
var renderPass, copyPass;
composer = new THREE.EffectComposer(renderer);
renderPass = new THREE.RenderPass(scene, camera);
composer.addPass(renderPass);
outlinePass = new THREE.OutlinePass(new THREE.Vector2(width, height),
scene, camera);
composer.addPass(outlinePass);
outlinePass.edgeStrength = 10;
outlinePass.edgeThickness = 4;
outlinePass.visibleEdgeColor.set('#ff0000');
copyPass = new THREE.ShaderPass(THREE.CopyShader);
copyPass.renderToScreen = true;
composer.addPass(copyPass);
}
function animate() {
var delta = clock.getDelta();
requestAnimationFrame(animate);
update(delta);
render(delta);
}
function update(delta) {
if (mixer) mixer.update(delta);
}
function render(delta) {
composer.render();
}
according to Mugen87 in Jan 2019 he said:
With this small patch, it's now possible to use the outline pass with animated meshes. The only thing users have to do at app level is to set morphTargets or skinning to true for OutlinePass.depthMaterial and OutlinePass.prepareMaskMaterial. That's of course still a manual effort but at least the more complicated shader enhancement is already done.
take this example:
https://jsfiddle.net/2ybks7rd/
reference link on github

touch controls: repeat action until touchend

I am trying to add touch controls to a three.js scene. I want to move the camera in whatever direction the user touches. It works great using the keyboard because you can press and hold the button and the camera moves continuously. But when I try the same thing using touchstart, you have to keep tapping the screen over and over to move, you can't just hold your finger down like on a keyboard or mouse.
I looked at touchmove, but if you just tap and hold without moving, there are no new touches.
Is there something similar to holding down the keyboard or mousekey using touch events?
There is no builtin callback for a touch event which fires repeatedly like the keyboard. You can, however, simply track the start and end of the touch and then call the move method at a set interval.
First, subscribe to the correct events and set a bool to track the state:
var isTouching = false;
window.addEventListener("touchstart", () => isTouching = true);
window.addEventListener("touchend", () => isTouching = false);
In Three.js you will most likely already have a render loop (e.g. a function called "animate"). Check the state variable at every iteration and apply the movement each time. You may need to also factor in deltaTime (the duration of the last frame), to make movement framerate independent.
function animate() {
requestAnimationFrame(animate);
mesh.rotation.x += 0.005;
mesh.rotation.y += 0.01;
if (isTouching) {
console.log("move camera");
}
renderer.render(scene, camera);
}
Here is a snippet which shows the basic approach. Click and hold in the left or right half of the output window to move the camera.
var camera, scene, renderer, mesh, material, clock;
init();
animate();
var isTouching = false;
var mousePositionX;
window.addEventListener("mousedown", (e) => {
isTouching = true;
mousePositionX = e.clientX;
});
window.addEventListener("mouseup", (e) => isTouching = false);
function init() {
renderer = new THREE.WebGLRenderer();
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
clock = new THREE.Clock();
camera = new THREE.PerspectiveCamera(70, window.innerWidth / window.innerHeight, 1, 1000);
camera.position.z = 400;
scene = new THREE.Scene();
material = new THREE.MeshPhongMaterial();
var geometry = new THREE.BoxGeometry(200, 200, 200);
mesh = new THREE.Mesh(geometry, material);
scene.add(mesh);
var light = new THREE.AmbientLight(0x404040);
scene.add(light);
var directionalLight = new THREE.DirectionalLight(0xffffff);
directionalLight.position.set(1, 1, 1).normalize();
scene.add(directionalLight);
window.addEventListener('resize', onWindowResize, false);
}
function animate() {
requestAnimationFrame(animate);
mesh.rotation.x += 0.005;
mesh.rotation.y += 0.01;
let deltaTime = clock.getDelta();
if (isTouching) {
let speed = 200; // px per second
let movement = speed * deltaTime;
if (mousePositionX > window.innerWidth / 2) {
camera.translateX(-movement);
} else {
camera.translateX(movement);
}
}
renderer.render(scene, camera);
}
function onWindowResize() {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
}
body {
padding: 0;
margin: 0;
}
canvas {
display: block;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/93/three.min.js"></script>

Need to render textures uploaded by user in input files with Canvas renderer three.js

I've loaded a Blender model using the three.js library and want to allow the users to change the texture of some faces through an input field in a form. I don't have any problem when I use the WebGLRenderer, and it works fine in Chrome, but it doesn't work with the canvas renderer when the texture coming from the input is in data:image... format. Seems if I load a full path image from the server it works fine. Does anybody know if there's a way to load textures this way and render them with the canvasrenderer?
Thank you.
I add here the code after I set the camera, lights and detect it the browswer detects webgl or not to use the WebGLRenderer or the CanvasRenderer.
First I load the model from blender:
var loader = new THREE.JSONLoader();
loader.load('assets/models/mimaquina9.js', function (geometry, mat) {
//I set the overdraw property to 1 for each material like i show here for 16
mat[16].overdraw = 1;
mesh = new THREE.Mesh(geometry, new THREE.MeshFaceMaterial(mat) );
mesh.scale.x = 5;
mesh.scale.y = 5;
mesh.scale.z = 5;
scene.add(mesh);
}, 'assets/images');
render();
//To render, I try to make an animation in case WebGL is available and just render one frame in case of using the canvas renderer.
function render() {
if(webgl){
if (mesh) {
mesh.rotation.y += 0.02;
}
// render using requestAnimationFrame
requestAnimationFrame(render);
webGLRenderer.render(scene, camera);
}
else if(canvas){
camera.position.x = 30;
camera.position.y = 20;
camera.position.z = 40;
camera.lookAt(new THREE.Vector3(0, 10, 0));
setTimeout(function (){
//something you want delayed
webGLRenderer.render(scene, camera);
}, 1000);
}
}
$('#datafile').change(function(e)
{
e.preventDefault();
var f = e.target.files[0];
if(f && window.FileReader)
{
var reader = new FileReader();
reader.onload = function(evt) {
console.log(evt);
mesh.material.materials[16].map = THREE.ImageUtils.loadTexture(evt.target.result);
if(canvas && !webgl){
//I read that might be a problem of using Lambert materials, so I tried this commented line without success
//mesh.material.materials[16] = new THREE.MeshBasicMaterial( { map: THREE.ImageUtils.loadTexture(evt.target.result)});
//If I uncomment the next line, it displays the texture fine when rendering after.
//mesh.material.materials[16].map = THREE.ImageUtils.loadTexture("assets/images/foto.jpg");
render();
}
}
reader.readAsDataURL(f);
}
});
Thanks once more.
evt.target.result is a DataURL so you should assign that to a image.src. Something like this should work:
var image = document.createElement( 'img' );
image.src = evt.target.result;
mesh.material.materials[16].map = new THREE.Texture( image );

Resources