Cannot get bottom view in three js scene - three.js

In this scene when Orthographic is selected, all the views work except "Bottom"; it produces different views depending on the predecessor.
Here is the view function:
function fView(name){
let p = mset.view[name];
let cam = "Orthographic";
mset.camera.position[cam] = p;
camera.position.set(p[0],p[1],p[2]);
camera.updateProjectionMatrix();
camera.lookAt(scene.position);
}
Here is mset json
"view":{
"Top":[0,1,0],
"Bottom":[0,-1,0],
"Front":[0,0,1],
"Back":[0,0,-1],
"Left":[-1,0,0],
"Right":[1,0,0]
}

Related

Applying user selected texture to cube dynamically?

I’m creating a cube using BoxGeometry and trying to apply the texture that the user has selected.
I created a raycaster and use that raycaster inside mouse down event.
If the object is present on cursor position then a user-selected texture gets applied.
So far everything works fine. Then I added if-else condition if the user selected single image for texture then that single image gets applied whole body of cube means all side of cube gets that image as material.
But if the user selected more than one image suppose three images then only three sides of cube get material. This also works very well.
Now a real problem when user select only three images remaining side of cube remain black and that is okay only material applied side shows up.
Now if user again select a single image and click on cube to apply single image texture to whole body again ray caster gives problem that
“–can’t access property “side”, the material is undefined–”
so this callback function which i added to select image files
function onUserMultiImageSelect(event) {
materialArray.length = 0;
length = multiFilesId.files.length;
let multiImage = [];
let userMultiImageUrl = [];
for (let i = 0; i < length; i++) {
multiImage[i] = multiFilesId.files.item(i);
userMultiImageUrl[i] = URL.createObjectURL(multiImage[i]);
materialArray.push(new THREE.MeshBasicMaterial({ map: new THREE.TextureLoader().load(userMultiImageUrl[i]),side:THREE.DoubleSide }));
}
isTextureSelect = true;
}
this is callback funtion which gets called when i click on object to apply texture
function onMouseButtonDown(event) {
var rect = event.target.getBoundingClientRect();
var mouseX = event.clientX - rect.left;
var mouseY = event.clientY - rect.top;
let raycaster = new THREE.Raycaster();
let pointer = new THREE.Vector2();
pointer.set((mouseX / window.innerWidth) * 2 - 1, -(mouseY / window.innerHeight) * 2 + 1);
raycaster.setFromCamera(pointer, camera);
const intersects = raycaster.intersectObjects(scene.children);
if (intersects.length > 0) {
var curentIntersectedObject = intersects[0];
if (isTextureSelect) {
if(length == 1){
var singleImage = multiFilesId.files.item(0);
var singleImageUrl = URL.createObjectURL(singleImage);
var texture = new THREE.TextureLoader().load(singleImageUrl);
curentIntersectedObject.object.material = new THREE.MeshBasicMaterial({ map: texture });
}else{
curentIntersectedObject.object.material = materialArray;
}
}
}
}

Using OutlinePass (THREE.js r102) with skinned mesh

/examples/js/postprocessing/OutlinePass.js from THREE.js r102 does not appear to work with skinned meshes. Specifically, the rendered outline always stays in the mesh's rest position.
Is there some way to get this working (that is, to update the outline to reflect the current pose of an animated mesh)? OutlinePass does not appear to be documented (mod the comments in the code itself).
Is there some other accepted method of outlining animated meshes? I'm in the process of migrating some code from r7x, where I ended up accomplishing this by manually creating a copy of the mesh and applying a shader material that scales along the normals. I can do that again, but if there's a simpler/better supported method to accomplish the same effect I'd rather use it instead of reproducing a method that breaks every new major release.
A simple jsfiddle illustrating the issue:
https://jsfiddle.net/L69pe5q2/3/
This is the code from the jsfiddle. The mesh I use is the SimpleSkinning.gltf example from the three.js distribution. In the jsfiddle I load it from a dataURI so it doesn't complain about XSS loading, and I've edited the base64-encoded data out (and replaced it with [FOO]) in the code below, purely for readability.
The OutlinePass is created and added to the composer in initComposer().
var camera, light, renderer, composer, mixer, loader, clock;
var scene, mesh, outlinePass;
var height = 480,
width = 640;
var clearColor = '#666666';
load();
function load() {
loader = new THREE.GLTFLoader();
clock = new THREE.Clock();
scene = new THREE.Scene();
loader.load('data:text/plain;base64,[FOO]', function(obj) {
scene.add(obj.scene);
mixer = new THREE.AnimationMixer(obj.scene);
var clip = THREE.AnimationClip.findByName(obj.animations,
'Take 01');
var a = mixer.clipAction(clip);
a.reset();
a.play();
mesh = obj.scene;
mesh.position.set(-7, 2.5, -7);
init();
animate();
});
}
function init() {
initCamera();
initScene();
initRenderer();
initComposer();
outlinePass.selectedObjects = [mesh];
}
function initCamera() {
camera = new THREE.PerspectiveCamera(30, width / height, 1, 10000);
camera.position.set(7, 0, 7);
camera.lookAt(0, 0, 0);
}
function initScene() {
light = new THREE.AmbientLight(0xffffff)
scene.add(light);
}
function initRenderer() {
renderer = new THREE.WebGLRenderer({
width: width,
height: height,
antialias: false,
});
renderer.setSize(width, height);
renderer.setClearColor(clearColor);
document.body.appendChild(renderer.domElement);
}
function initComposer() {
var renderPass, copyPass;
composer = new THREE.EffectComposer(renderer);
renderPass = new THREE.RenderPass(scene, camera);
composer.addPass(renderPass);
outlinePass = new THREE.OutlinePass(new THREE.Vector2(width, height),
scene, camera);
composer.addPass(outlinePass);
outlinePass.edgeStrength = 10;
outlinePass.edgeThickness = 4;
outlinePass.visibleEdgeColor.set('#ff0000');
copyPass = new THREE.ShaderPass(THREE.CopyShader);
copyPass.renderToScreen = true;
composer.addPass(copyPass);
}
function animate() {
var delta = clock.getDelta();
requestAnimationFrame(animate);
update(delta);
render(delta);
}
function update(delta) {
if (mixer) mixer.update(delta);
}
function render(delta) {
composer.render();
}
according to Mugen87 in Jan 2019 he said:
With this small patch, it's now possible to use the outline pass with animated meshes. The only thing users have to do at app level is to set morphTargets or skinning to true for OutlinePass.depthMaterial and OutlinePass.prepareMaskMaterial. That's of course still a manual effort but at least the more complicated shader enhancement is already done.
take this example:
https://jsfiddle.net/2ybks7rd/
reference link on github

Three.js multiple Canvases and Shader

1.
I am trying to set up multiple canvases on a page like the given examples on threejs.org.
My basic code is like this:
var scene, camera, controls, renderer, pointLight, geometry, material;
var container, position, dimensions, apps = [];
var windowWidth = window.innerWidth;
var windowHeight = window.innerHeight;
function preView( id ){
apps.push( new App( id ) );
//animate(); // if i call this here, all canvases renders once
function App( id ) {
container = $('#preView_' + id);
dimensions = { width: container.width(), height: container.height()};
camera = new THREE.PerspectiveCamera(45, dimensions.width/dimensions.height, 1, 5 * radius);
camera.position.x = 0;
camera.position.y = 0;
camera.position.z = 100;
scene = new THREE.Scene();
/* add meshes */
/* ======================= */
renderer = new THREE.WebGLRenderer();
renderer.setSize(dimensions.width, dimensions.height);
container.append(renderer.domElement);
this.animate = function() {
if( camera.position.z > -(1/3) * 100 )
{
/* simple fly through the scene */
camera.position.x += 0.05;
camera.position.y += 0.05;
camera.position.z -= 0.1;
}
camera.lookAt(new THREE.Vector3(0,0,0));
render();
};
}
}
function animate(){
for ( var i = 0; i < apps.length; ++i ) {
apps[ i ].animate();
}
requestAnimationFrame(animate);
}
function render(){
renderer.render(scene, camera);
}
The strange thing what happends is, that only the last canvas renderes (at all) if i call animate(); after all canvases are drawn. And if i call animate(); in the preView(); Function, all sences are rendered once but only the last canvas renderes the 'camera fly through'. But a console.log(apps[i]); in the animate(); function go through all apps, but dont render the scene.
What do i do wrong here?
2.
Also i try to achieve this shader effect for every object which i declare as 'light', nomatter which position it has in the scene.
I tried to play a little with all position values in the shaders with absolutly no effect.
The only effect was in the VolumetericLightShader on line 333.
I hope for any Hints here.
Put all the variables, except apps=[], in App( id ) function. Thus you'll make them local for App. In your case now, every time you call
new App( id )
you put information in global variables which you created once. So in those variables you have the data you've stored there since last call of App( id ).
It means that you re-write the data in global variables. The same about the render() method. Put it inside the App() function too. As you mentioned about the example from Threejs.org, you had to notice where this method is stored. It's inside the App() function there. Sample jsfiddle
Maybe it would be easier to use the technique of lens flares. https://threejs.org/examples/webgl_lensflares.html

Does THREE.Raycaster perform intersection checks on CSS3DObject objects?

I need to run raycast off mouse coordinates and check for intersections on a group of Three CSS3DObject objects.
Here is the function:
RayCastCheck = function(event, objects){
var vector = new THREE.Vector3((event.clientX / window.innerWidth)*2 - 1, -(event.clientX / window.innerHeight )*2 + 1, 0.5);
new THREE.Projector().unprojectVector( vector, camera);
var raycaster = new THREE.Raycaster(camera.position, vector.sub(camera.position).normalize());
var intersects = raycaster.intersectObjects(objects);
console.log(intersects.length);
};
The objects argument is an array of css3dobjects. I am able to use similar function to target drops on the scene to the correct mouse location so I believe my calculation of the mouse point in world space is correct. This led to believe that the Raycaster is does not check intersections on css3dobjects.
My css3dobjects are typically constructed with a div as its element.
createObject = function(){
var element = document.createElement("div");
var obj = new THREE.CSS3DObject(element);
scene.add(obj);
}
My scene is created via this function
//global
var scene;
var camera;
var renderer;
createScene = function(){
camera = new THREE.PerspectiveCamera( 75, 400 / 600, 1, 1000 );
camera.position.z = 500;
scene = new THREE.Scene();
renderer = new THREE.CSS3DRenderer();
renderer.setSize(400, 600);
$(#body).appendChild(renderer.domElement);
}
Do I have all the required elements in the scene to enable raycasting?
Is it possible to perform raycasting on css3dobjects with the css3drenderer?
Thank you for your help
You can just use the usual events with the dom elements. You can even get the relative coordinates:
var x = e.offsetX==undefined?e.layerX:e.offsetX;
var y = e.offsetY==undefined?e.layerY:e.offsetY;
Using Raycaster on css3dobjects won't work. At least this is what I figured out.
Take a look at three.js r76 line 8933. There is the definition of the "raycast" function of the css3dobject.
It is empty so it isn't implemented and won't work because of this of course. probably on a further version. would need this function too
Still isn't implemented in r78.

Collada Model Renders on Mouse Movement

I cant get my collada model to render with three.js until I interact with the browser window. In Chrome, it can be as simple as moving my mouse anywhere on the page (actual canvas is smaller than entire window), and on Safari, it requires a click and drag for the model to render. What's odd is that the rest of the scene is loading properly (I have a grid that I render to the scene and a couple of lights), but the model won't show until I do something. Any thoughts on how I can trick the scene into knowing it should show the model?
var loader = new THREE.ColladaLoader();
loader.options.convertUpAxis = true;
loader.load('../MODELS/teapot.dae', function colladaReady( collada ) {
dae = collada.scene;
skin = collada.skins[ 0 ];
dae.scale.x = dae.scale.y = dae.scale.z = 10;
dae.position.y = 75;
dae.updateMatrix();
scene.add(dae);
});
document.getElementById('myCanvas').appendChild(renderer.domElement);
render();
The loading is occurring asynchronously. You are only showing a code fragment, but it looks like you can move the render() call to the last line of the colladaReady() callback function.

Resources