Fit to screen from different Orthographic camera positions - three.js

Made a simple jsFiddle example to illustrate a problem.
I'm trying to fit object's bounding box to screen from different camera positions. In example in dat.GUI panel you can change camera position and then click button fit to screen.
When changing y and z (positive) camera positions to find camera's top and bottom properties code below is used
var top = boundingBox.max.y * Math.cos(angleToZAxis) + boundingBox.max.z * Math.sin(angleToZAxis); // line 68
var bottom boundingBox.min.y * Math.cos(angleToZAxis) + boundingBox.min.z * Math.sin(angleToZAxis);
I would like to know how I can include camera's x position and negative positions in this calculation, what is the math behind this. Should I use rotation matrix and how to use it?
Or maybe it can be achieved in some simple way with threejs methods, can't figure out, tried the code below but something is wrong:
var matrix = new THREE.Matrix4();
matrix.lookAt ( this.camera.position, new THREE.Vector3(0, 0, 0), new THREE.Vector3(0, 1, 0) );
var bbMax = boundingBox.max.clone().applyMatrix4(matrix);
var bbMin = boundingBox.min.clone().applyMatrix4(matrix)
;

to fit an orthographic camera you have to simply change its zoom and position
you can calculate zoom from the bounding box of your object
(I used the boxes from geometry, but you will have to take in account matrices of the objects in group; I used them because .setFromObject was not returning consistent value)
Canvas3D.prototype.fitToScreen = function() {
this.group.children[0].geometry.computeBoundingBox();
var boundingBox = this.group.children[0].geometry.boundingBox.clone();
this.group.children[1].geometry.computeBoundingBox();
boundingBox.union(this.group.children[1].geometry.boundingBox);
var rotation = new THREE.Matrix4().extractRotation(this.camera.matrix);
boundingBox.applyMatrix4(rotation);
this.camera.zoom = Math.min(this.winWidth / (boundingBox.max.x - boundingBox.min.x),
this.winHeight / (boundingBox.max.y - boundingBox.min.y)) * 0.95;
this.camera.position.copy(boundingBox.center());
this.camera.updateProjectionMatrix();
this.camera.updateMatrix();
};
using this will not work in your fiddle because you are using OrbitControls and they rotate camera on update based on their own state - so either update that state or create your own controls
also either move camera back after
this.camera.position.copy(boundingBox.center());
or set near plane to -1000 to avoid having cut object
this.camera = new THREE.OrthographicCamera(this.winWidth / -2,
this.winWidth / 2 , this.winHeight / 2, this.winHeight / -2, -10000, 10000);
EDIT
now i see that you dont want to just fit the object but the whole box...
to do so an easy way is to project the points of the box and get the distances of extremes in pixels, then you can set ortho camera directly
boundingBox = new THREE.Box3().setFromObject(this.group);
//take all 8 vertices of the box and project them
var p1 = new THREE.Vector3(boundingBox.min.x,boundingBox.min.y,boundingBox.min.z).project(this.camera);
var p2 = new THREE.Vector3(boundingBox.min.x,boundingBox.min.y,boundingBox.max.z).project(this.camera);
var p3 = new THREE.Vector3(boundingBox.min.x,boundingBox.max.y,boundingBox.min.z).project(this.camera);
var p4 = new THREE.Vector3(boundingBox.min.x,boundingBox.max.y,boundingBox.max.z).project(this.camera);
var p5 = new THREE.Vector3(boundingBox.max.x,boundingBox.min.y,boundingBox.min.z).project(this.camera);
var p6 = new THREE.Vector3(boundingBox.max.x,boundingBox.min.y,boundingBox.max.z).project(this.camera);
var p7 = new THREE.Vector3(boundingBox.max.x,boundingBox.max.y,boundingBox.min.z).project(this.camera);
var p8 = new THREE.Vector3(boundingBox.max.x,boundingBox.max.y,boundingBox.max.z).project(this.camera);
//fill a box to get the extremes of the 8 points
var box = new THREE.Box3();
box.expandByPoint(p1);
box.expandByPoint(p2);
box.expandByPoint(p3);
box.expandByPoint(p4);
box.expandByPoint(p5);
box.expandByPoint(p6);
box.expandByPoint(p7);
box.expandByPoint(p8);
//take absolute value because the points already have the correct sign
var top = box.max.y * Math.abs(this.camera.top);
var bottom = box.min.y * Math.abs(this.camera.bottom);
var right = box.max.x * Math.abs(this.camera.right);
var left = box.min.x * Math.abs(this.camera.left);
this.updateCamera(left, right, top, bottom);
this code also stretches the view to fit exactly into the window so you will have to check for the aspect ratio and change one size accordingly, but that should be trivial

Related

three js LoadObject pivot [duplicate]

What I'm trying to achieve is a rotation of the geometry around pivot point and make that the new definition of the geometry. I do not want te keep editing the rotationZ but I want to have the current rotationZ to be the new rotationZ 0.
This way when I create a new rotation task, it will start from the new given pivot point and the newly given rad.
What I've tried, but then the rotation point moves:
// Add cube to do calculations
var box = new THREE.Box3().setFromObject( o );
var size = box.getSize();
var offsetZ = size.z / 2;
o.geometry.translate(0, -offsetZ, 0)
// Do ratation
o.rotateZ(CalcUtils.degreeToRad(degree));
o.geometry.translate(0, offsetZ, 0)
I also tried to add a Group and rotate that group and then remove the group. But I need to keep the rotation without all the extra objects. The code I created
var box = new THREE.Box3().setFromObject( o );
var size = box.size();
var geometry = new THREE.BoxGeometry( 20, 20, 20 );
var material = new THREE.MeshBasicMaterial( { color: 0xcc0000 } );
var cube = new THREE.Mesh( geometry, material );
cube.position.x = o.position.x;
cube.position.y = 0; // Height / 2
cube.position.z = -size.z / 2;
o.position.x = 0;
o.position.y = 0;
o.position.z = size.z / 2;
cube.add(o);
scene.add(cube);
// Do ratation
cube.rotateY(CalcUtils.degreeToRad(degree));
// Remove cube, and go back to single object
var position = o.getWorldPosition();
scene.add(o)
scene.remove(cube);
console.log(o);
o.position.x = position.x;
o.position.y = position.y;
o.position.z = position.z;
So my question, how do I save the current rotation as the new 0 rotation point. Make the rotation final
EDIT
I added an image of what I want to do. The object is green. I have a 0 point of the world (black). I have a 0 point of the object (red). And I have rotation point (blue).
How can I rotate the object around the blue point?
I wouldn't recommend updating the vertices, because you'll run into trouble with the normals (unless you keep them up-to-date, too). Basically, it's a lot of hassle to perform an action for which the transformation matrices were intended.
You came pretty close by translating, rotating, and un-translating, so you were on the right track. There are some built-in methods which can help make this super easy.
// obj - your object (THREE.Object3D or derived)
// point - the point of rotation (THREE.Vector3)
// axis - the axis of rotation (normalized THREE.Vector3)
// theta - radian value of rotation
// pointIsWorld - boolean indicating the point is in world coordinates (default = false)
function rotateAboutPoint(obj, point, axis, theta, pointIsWorld){
pointIsWorld = (pointIsWorld === undefined)? false : pointIsWorld;
if(pointIsWorld){
obj.parent.localToWorld(obj.position); // compensate for world coordinate
}
obj.position.sub(point); // remove the offset
obj.position.applyAxisAngle(axis, theta); // rotate the POSITION
obj.position.add(point); // re-add the offset
if(pointIsWorld){
obj.parent.worldToLocal(obj.position); // undo world coordinates compensation
}
obj.rotateOnAxis(axis, theta); // rotate the OBJECT
}
After this method completes, the rotation/position IS persisted. The next time you call the method, it will transform the object from its current state to wherever your inputs define next.
Also note the compensation for using world coordinates. This allows you to use a point in either world coordinates or local space by converting the object's position vector into the correct coordinate system. It's probably best to use it this way any time your point and object are in different coordinate systems, though your observations may differ.
As a simple solution for anyone trying to quickly change the pivot point of an object, I would recommend creating a group and adding the mesh to the group, and rotating around that.
Full example
const geometry = new THREE.BoxGeometry();
const material = new THREE.MeshBasicMaterial({ color: 0xff0000 });
const cube = new THREE.Mesh(geometry, material);
scene.add(cube)
Right now, this will just rotate around its center
cube.rotation.z = Math.PI / 4
Create a new group and add the cube
const group = new THREE.Group();
group.add(cube)
scene.add(group)
At this point we are back where we started. Now move the mesh:
cube.position.set(0.5,0.5,0)
Then move the group
group.position.set(-0.5, -0.5, 0)
Now use your group to rotate the object:
group.rotation.z = Math.PI / 4

Three js RayCast between two object in scene

I know how raycast object in scene when click on mouse, but now i need to know if two object in scene can raycast each other.
This is, i load a 3D Object in scene for example Two Rooms in OBJ object, then i add three mesh box in some points, for example two point on first room and one point on second room.
Then two points on first room can raycast each other(have direct vision), but two point for first room can't raycast point on second room.(they don't have vision through room wall).
I attached code used for load scene and points, any sugestion hwo to do?
//LOAD MAIN 3D OBJECT
var objLoader = new THREE.OBJLoader();
objLoader.setMaterials(materials);
objLoader.setPath('./asset/3d/');
objLoader.load("model01.obj", function(object){
var mesh = object.children[0];
mesh.castShadow = true;
mesh.receiveShadow = true;
mesh.rotation.x = Math.PI / 2;
var box = new THREE.Box3().setFromObject( object )
var ox = -(box.max.x + box.min.x) / 2;
var oy = -(box.max.y + box.min.y) / 2;
var oz = -(box.max.z + box.min.z) / 2;
mesh.position.set(ox, oy, oz);
_scene.add(mesh);
render();
setTimeout(render, 1000);
}
//LOAD count_points inside scene
for(var i=0;i<cta_points;i++){
var c_r = 2;
var c_geometry = new THREE.BoxBufferGeometry( c_r, c_r, c_r );
var c_material = new THREE.MeshLambertMaterial( { color: new THREE.Color("rgb("+(40 + 30)+", 0, 0)"),opacity: 0.0,
transparent: true} );
var c_mesh = new THREE.Mesh( c_geometry, c_material );
var position = get_positions(i);
c_mesh.position.copy(position);
c_mesh.name="BOX";
scene.add( c_mesh );
}
Possibly take a look at:
How to detect collision in three.js?
Usually, to solve this problem, you would make a collision mask with a collision group.
The collision group is added per object, and is represented by a "bit" in a bitmask,
The wall could be in a separate collision group, like 4 (binary 100)
and the objects could be in another group, say 2 (binary 10)
Then you just need to check collisions of the object against the mask.
(check if the collision group matches against a bitmask (the masks above could be 10, 100,) to check for collisions).
So that way, you can call THREE.Raycaster().intersectObjects(args), where the arguments are the ones that pass the bitmask test ( mask == object.collision_group ).
That way, you won't need to include the wall for collision detection testing, since it is using a separate bitmask.

what's wrong with it - or how to find correct THREE.PerspectiveCamera settings

I have a simple THREE.Scene where the main content is a THREE.Line mesh that visualizes the keyframe based path that the camera will follow for some scripted animation. There is then one THREE.SphereGeometry based mesh that is always repositioned to the current camera location.
The currently WRONG result looks like this (the fractal background is rendered independently but using the same keyframe input - and ultimately the idea is that the "camera path" visualization ends up in the same scale/projection as the respective fractal background...):
The base is an array of keyframes, each of which represents the modelViewMatrix for a specific camera position/orientation and is directly used to drive the vertexshader for the background, e.g.:
varying vec3 eye, dir;
void main() {
gl_Position = vec4(position, 1.0);
eye = vec3(modelViewMatrix[3]);
dir = vec3(modelViewMatrix * vec4(position.x , position.y , 1, 0));
}
(it is my understanding that "eye" is basically the camera position while "dir" reflects the orientation of the camera and by the way it is used during the ray marching implicitly leads to a perspective projection)
The respective mesh objects are created like this:
visualizeCameraPath: function(scene) {
// debug: visualize the camera path
var n= this.getNumberOfKeyFrames();
var material = new THREE.LineBasicMaterial({
color: 0xffffff
});
var geometry = new THREE.Geometry();
for (var i= 0; i<n; i++) {
var m= this.getKeyFrameMatrix(true, i);
var pos= new THREE.Vector3();
var q= new THREE.Quaternion();
var scale= new THREE.Vector3();
m.decompose(pos,q,scale);
geometry.vertices.push( new THREE.Vector3( pos.x, pos.y, pos.z ));
}
this.camPath = new THREE.Line( geometry, material );
this.camPath.frustumCulled = false; // Avoid getting clipped - does not seem to help one little bit
scene.add( this.camPath );
var radius= 0.04;
var g = new THREE.SphereGeometry(radius, 10, 10, 0, Math.PI * 2, 0, Math.PI * 2);
this.marker = new THREE.Mesh(g, new THREE.MeshNormalMaterial());
scene.add(this.marker);
}
in order to play the animation I update the camera and the marker position like this (I guess it is already wrong how I use the input matrix "m" directly on the "shadowCamera" - eventhough I think that it contains the correct position):
syncShadowCamera(m) {
var pos= new THREE.Vector3();
var q= new THREE.Quaternion();
var scale= new THREE.Vector3();
m.decompose(pos,q,scale);
this.applyMatrix(m, this.shadowCamera); // also sets camera position to "pos"
// highlight current camera-position on the camera-path-line
if (this.marker != null) this.marker.position.set(pos.x, pos.y, pos.z);
},
applyMatrix: function(m, targetObj3d) {
var pos= new THREE.Vector3();
var q= new THREE.Quaternion();
var scale= new THREE.Vector3();
m.decompose(pos,q,scale);
targetObj3d.position.set(pos.x, pos.y, pos.z);
targetObj3d.quaternion.set(q.x, q.y, q.z, q.w);
targetObj3d.scale= scale;
targetObj3d.updateMatrix(); // this.matrix.compose( this.position, this.quaternion, this.scale );
targetObj3d.updateMatrixWorld(true);
},
I've tried multiple things with regard to the camera and the screenshot reflects the output with disabled "this.projectionMatrix" (see below code).
createShadowCamera: function() {
var speed = 0.00039507;
var z_near = Math.abs(speed);
var z_far = speed * 65535.0;
var fH = Math.tan( this.FOV_Y * Math.PI / 360.0 ) * z_near;
var fW = Math.tan( this.FOV_X * Math.PI / 360.0 ) * z_near;
// orig opengl used: glFrustum(-fW, fW, -fH, fH, z_near, z_far);
var camera= new THREE.PerspectiveCamera();
camera.updateProjectionMatrix = function() {
// this.projectionMatrix.makePerspective( -fW, fW, fH, -fH, z_near, z_far );
this.projectionMatrix= new THREE.Matrix4(); // hack: fallback to no projection
};
camera.updateProjectionMatrix();
return camera;
},
My initial attempt had been to use the same kind of settings that the opengl shader for the fractal background had been using (see glFrustum above). Unfortunately it seems that I have yet managed to correctly map the input "modelViewMatrix" (and the projection implicitly performed by the raymarching in the shader) to equivalent THREE.PerspectiveCamera settings (orientation/projectionMatrix).
Is there any matrix calculation expert here, that knows how to obtain the correct transformations?
Finally I have found one hack that works.
Actually the problem was made up of two parts:
1) Row- vs column-major order of modelViewMatrix: The order expected by the vertex shader is the oposite of what the remaining THREE.js expects..
2) Object3D-hierarchy: i.e. Scene, Mesh, Geometry, Line vertices + Camera: where to put the modelViewMatrix data so that it creates the desired result (i.e. the same result that the old bloody opengl application produced): I am not happy with the hack that I found here - but so far it is the only one that seems to work:
I DO NOT touch the Camera.. it stays at 0/0/0
I directly move all the vertices of my "line"-Geometry relative to the real camera position (see "position" from the modelViewMatrix)
I then disable "matrixAutoUpdate" on the Mesh that contains my "line" Geometry and copy the modelViewMatrix (in which I first zeroed out the "position") into the "matrix" field.
BINGO.. then it works. (All of my attempts to achieve the same result by rotating/displacing the Camera or by displacing/rotating any of the Object3Ds involved have failed miserably..)
EDIT: I found a better way than updating the vertices and at least keeping all the manipulations on the Mesh level (I am still moving the world around - like the old OpenGL app would have done..). To get the right sequence of translation/rotation one can also use ("m" is still the original OpenGL modelViewMatrix - with 0/0/0 position info):
var t= new THREE.Matrix4().makeTranslation(-cameraPos.x, -cameraPos.y, -cameraPos.z);
t.premultiply ( m );
obj3d.matrixAutoUpdate=false;
obj3d.matrix.copy(t);
If somebody knows a better way that also works (one where the Camera is updated without having to directly manipulate object matrices) I'd certainly be interested to hear it.

Drawing lines between the Icosahedron vertices without wireframe material and with some line width using WEBGLRenderer

I'm new to threejs
I need to draw a sphere connected with triangles. I use Icosahedron to construct the sphere in the following way
var material = new THREE.MeshPhongMaterial({
emissive : 0xffffff,
transparent: true,
opacity : 0.5,
wireframe : true
});
var icogeo = new THREE.IcosahedronGeometry(80,2);
var mesh = new THREE.Mesh(icogeo, material);
scean.add(mesh);
But i need the width of the line to be more but line width won't show up in windows so i taught of looping through the vertices and draw a cylinder/tube between the vertices. (I can't draw lines because the LineBasicMaterial was not responding to Light.)
for(i=0;i<icogeo.faces.length;i++){
var face = icogeo.faces[i];
//get vertices from face and draw cylinder/tube between the three vertices
}
Can some one please help on drawing the tube/cylinder between two vector3 vertices?
**the problem i'm facing with wireframe was it was not smooth and i can't increase width of it in windows.
If you really want to create a cylinder between two points one way to do is to create it in a unit space and then transform it to your line. But that is very mathy.
An intuitive way to create it is to think about how would you do it in a unit space? A circle around the z axis (in x,y) and another one a bit down z.
Creating a circle in 2d is easy: for ( angle(0,360,360/numsteps) ) (x,y)=(sin(angle),cos(angle))*radius. (see for example Calculating the position of points in a circle).
Now the two butt ends of your cylinder are not in x,y! But If you have two vectors dx,dy you can just multiply your x,y with them and get a 3d position!
So how to get dx, dy? One way is http://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process
which reads way more scary than it is. You start with your forward direction, which is your line. forward = normalize(end-start). Then you just pick a direction "up". Usually (0,1,0). Unless forward is already close to up, then pick another one like (1,0,0). Take their cross product. This gives you "left". Then take the cross product between "left" and "forward" to get "right". Now "left" and "right" are you dx and dy!
That way you can make two circles at the two ends of your line. Add triangles in between and you have a cylinder!
Even though I do believe it is an overkill for what you are trying to achieve, here is code that draws a capsule (cylinder with spheres at the end) between two endpoints.
/**
* Returns a THREE.Object3D cylinder and spheres going from top to bottom positions
* #param radius - the radius of the capsule's cylinder
* #param top, bottom - THREE.Vector3, top and bottom positions of cone
* #param radiusSegments - tessellation around equator
* #param openTop, openBottom - whether the end is given a sphere; true means they are not
* #param material - THREE.Material
*/
function createCapsule (radius, top, bottom, radiusSegments, openTop, openBottom, material)
{
radiusSegments = (radiusSegments === undefined) ? 32 : radiusSegments;
openTop = (openTop === undefined) ? false : openTop;
openBottom = (openBottom === undefined) ? false : openBottom;
var capsule = new THREE.Object3D();
var cylinderAxis = new THREE.Vector3();
cylinderAxis.subVectors (top, bottom); // get cylinder height
var cylinderGeom = new THREE.CylinderGeometry (radius, radius, cylinderAxis.length(), radiusSegments, 1, true); // open-ended
var cylinderMesh = new THREE.Mesh (cylinderGeom, material);
// get cylinder center for translation
var center = new THREE.Vector3();
center.addVectors (top, bottom);
center.divideScalar (2.0);
// pass in the cylinder itself, its desired axis, and the place to move the center.
makeLengthAngleAxisTransform (cylinderMesh, cylinderAxis, center);
capsule.add (cylinderMesh);
if (! openTop || ! openBottom)
{
// instance geometry
var hemisphGeom = new THREE.SphereGeometry (radius, radiusSegments, radiusSegments/2, 0, 2*Math.PI, 0, Math.PI/2);
// make a cap instance of hemisphGeom around 'center', looking into some 'direction'
var makeHemiCapMesh = function (direction, center)
{
var cap = new THREE.Mesh (hemisphGeom, material);
makeLengthAngleAxisTransform (cap, direction, center);
return cap;
};
// ================================================================================
if (! openTop)
capsule.add (makeHemiCapMesh (cylinderAxis, top));
// reverse the axis so that the hemiCaps would look the other way
cylinderAxis.negate();
if (! openBottom)
capsule.add (makeHemiCapMesh (cylinderAxis, bottom));
}
return capsule;
}
// Transform object to align with given axis and then move to center
function makeLengthAngleAxisTransform (obj, align_axis, center)
{
obj.matrixAutoUpdate = false;
// From left to right using frames: translate, then rotate; TR.
// So translate is first.
obj.matrix.makeTranslation (center.x, center.y, center.z);
// take cross product of axis and up vector to get axis of rotation
var yAxis = new THREE.Vector3 (0, 1, 0);
// Needed later for dot product, just do it now;
var axis = new THREE.Vector3();
axis.copy (align_axis);
axis.normalize();
var rotationAxis = new THREE.Vector3();
rotationAxis.crossVectors (axis, yAxis);
if (rotationAxis.length() < 0.000001)
{
// Special case: if rotationAxis is just about zero, set to X axis,
// so that the angle can be given as 0 or PI. This works ONLY
// because we know one of the two axes is +Y.
rotationAxis.set (1, 0, 0);
}
rotationAxis.normalize();
// take dot product of axis and up vector to get cosine of angle of rotation
var theta = -Math.acos (axis.dot (yAxis));
// obj.matrix.makeRotationAxis (rotationAxis, theta);
var rotMatrix = new THREE.Matrix4();
rotMatrix.makeRotationAxis (rotationAxis, theta);
obj.matrix.multiply (rotMatrix);
}

three.js - Set the rotation of an object in relation to its own axes

I'm trying to make a static 3D prism out of point clouds with specific numbers of particles in each. I've got the the corner coordinates of each side of the prism based on the angle of turn, and tried spawning the particles in the area bound by these coordinates. Instead, the resulting point clouds have kept only the bottom left coordinate.
Screenshot: http://i.stack.imgur.com/uQ7Q8.png
I've tried to set the rotation of each cloud object such that their edges meet, but they will rotate only around the world centre. I gather this is something to do with rotation matrices and Euler angles, but, having been trying to work them out for 3 solid days, I've despaired. (I'm a sociologist, not a dev, and haven't touched graphics before this project.)
Please help? How do I set the rotation on each face of the prism? Or maybe there is a more sensible way to get the particles to spawn in the correct area in the first place?
The code:
// draw the particles
var n = 0;
do {
var geom = new THREE.Geometry();
var material = new THREE.PointCloudMaterial({size: 1, vertexColors: true, color: 0xffffff});
for (i = 0; i < group[n]; i++) {
if (geom.vertices.length < group[n]){
var particle = new THREE.Vector3(
Math.random() * screens[n].bottomrightback.x + screens[n].bottomleftfront.x,
Math.random() * screens[n].toprightback.y + screens[n].bottomleftfront.y,
Math.random() * screens[n].bottomrightfront.z + screens[n].bottomleftfront.z);
geom.vertices.push(particle);
geom.colors.push(new THREE.Color(Math.random() * 0x00ffff));
}
}
var system = new THREE.PointCloud(geom, material);
scene.add(system);
**// something something matrix Euler something?**
n++
}
while (n < numGroups);
I've tried to set the rotation of each cloud object such that their
edges meet, but they will rotate only around the world centre.
It is true they only rotate around 0,0,0. The simple solution then is to move the object to the center, rotate it, and then move it back to its original position.
For example (Code not tested so might take a bit of tweaking):
var m = new THREE.Matrix4();
var movetocenter = new THREE.Matrix4();
movetocenter.makeTranslation(-x, -y, -z);
var rotate = new THREE.Matrix4();
rotate.makeRotationFromEuler(); //Build your rotation here
var moveback = new THREE.Matrix4();
moveback .makeTranslation(x, y, z);
m.multiply(movetocenter);
m.multiply(rotate);
m.multiply(moveback);
//Now you can use geometry.applyMatrix(m)

Resources