Compute 3D point from mouse-position and depth-map - three.js

I need to compute 3D coordinates from a screen-space position using a rendered depth-map. Unfortunately, using the regular raytracing is not an option for me because I am dealing with a single geometry containing something on the order of 5M faces.
So I figured I will do the following:
render a depth-map with RGBADepthPacking into a renderTarget
use a regular unproject-call to compute a ray from the mouse-position (exactly as I would do when using raycasting)
lookup the depth from the depth-map at the mouse-coordinates and compute a point along the ray using that distance.
This kind of works, but somehow the located point is always slightly behind the object, so there is probably something wrong with my depth-calculations.
Now some details about the steps above
Rendering the depth-map is pretty much straight-forward:
const depthTarget = new THREE.WebGLRenderTarget(w, h);
const depthMaterial = new THREE.MeshDepthMaterial({
depthPacking: THREE.RGBADepthPacking
});
// in renderloop
renderer.setClearColor(0xffffff, 1);
renderer.clear();
scene.overrideMaterial = depthMaterial;
renderer.render(scene, camera, depthTarget);
Lookup the stored color-value at the mouse-position with:
renderer.readRenderTargetPixels(
depthTarget, x, h - y, 1, 1, rgbaBuffer
);
And convert back to float using (adapted from the GLSL-Version in packing.glsl):
const v4 = new THREE.Vector4()
const unpackDownscale = 255 / 256;
const unpackFactors = new THREE.Vector4(
unpackDownscale / (256 * 256 * 256),
unpackDownscale / (256 * 256),
unpackDownscale / 256,
unpackDownscale
);
function unpackRGBAToDepth(rgbaBuffer) {
return v4.fromArray(rgbaBuffer)
.multiplyScalar(1 / 255)
.dot(unpackFactors);
}
and finally computing the depth-value (I found corresponding code in readDepth() in examples/js/shaders/SSAOShader.js which I ported to JS):
function computeDepth() {
const cameraFarPlusNear = cameraFar + cameraNear;
const cameraFarMinusNear = cameraFar - cameraNear;
const cameraCoef = 2.0 * cameraNear;
let z = unpackRGBAToDepth(rgbaBuffer);
return cameraCoef / (cameraFarPlusNear - z * cameraFarMinusNear);
}
Now, as this function returns values in range 0..1 I think it is the depth in clip-space coordinates, so I convert them into "real" units using:
const depth = camera.near + depth * (camera.far - camera.near);
There is obviously something slightly off with these calculations and I didn't figure out the math and details about how depth is stored yet.
Can someone please point me to the mistake I made?
Addition: other things I tried
First I thought it should be possible to just use the unpacked depth-value as value for z in my unproject-call like this:
const x = mouseX/w * 2 - 1;
const y = -mouseY/h * 2 + 1;
const v = new THREE.Vector3(x, y, depth).unproject(camera);
However, this also doesn't get the coordinates right.
[EDIT 1 2017-05-23 11:00CEST]
As per #WestLangleys comment I found the perspectiveDepthToViewZ() function which sounds like it should help. Written in JS that function is
function perspectiveDepthToViewZ(invClipZ, near, far) {
return (near * far) / ((far - near) * invClipZ - far);
}
However, when called with unpacked values from the depth-map, results are several orders of magnitude off. See here.

Ok, so. Finally solved it. So for everyone having trouble with similar issues, here's the solution:
The last line of the computeDepth-function was just wrong. There is a function perspectiveDepthToViewZ in packing.glsl, that is pretty easy to convert to JS:
function perspectiveDepthToViewZ(invClipZ, near, far) {
return (near * far) / ((far - near) * invClipZ - far);
}
(i believe this is somehow part of the inverse projection-matrix)
function computeDepth() {
let z = unpackRGBAToDepth(rgbaBuffer);
return perspectiveDepthToViewZ(z, camera.near, camera.far);
}
Now this will return the z-axis value in view-space for the point. Left to do is converting this back to world-space coordinates:
const setPositionFromViewZ = (function() {
const viewSpaceCoord = new THREE.Vector3();
const projInv = new THREE.Matrix4();
return function(position, viewZ) {
projInv.getInverse(camera.projectionMatrix);
position
.set(
mousePosition.x / windowWidth * 2 - 1,
-(mousePosition.y / windowHeight) * 2 + 1,
0.5
)
.applyMatrix4(projInv);
position.multiplyScalar(viewZ / position.z);
position.applyMatrix4(camera.matrixWorld);
};
}) ();

Related

I have a question about using turtle graphic functions and looping methods on p5.js

I have to create these two included images using the turtle function and the loop method on p5js and I am struggling I was given https://editor.p5js.org/dpapanik/sketches/_lbGWWH6N this code on p5js as a start please help, thanksenter image description here
So I've played around with some of the stuff for awhile, and I've created two functions. One that makes a single quadrant of the first problem, and one that creates a single wiggly line for the second problem. This is just a base for you to work of in this process. Here's each of the functions. Also, note that each of them takes in the turtle as a parameter:
function makeLineQuadrant(turtle) {
// this currently makes the top left corner:
let yVal = windowWidth * 0.5;
let xVal = windowWidth * 0.5;
for (let i = 0; i < 13; i++) {
// loop through the 12 lines in one quadrant
turtle.face(0); // reset for the new round
turtle.penUp();
let startLeft = i * ((windowWidth * 0.5) / 12); // decide which component on the button we should start at
let endTop = (12 - i) * ((windowWidth * 0.5) / 12); // how far down the y-axis should we go? You should write this out on paper to see how it works
turtle.goto(startLeft, yVal);
turtle.penDown();
let deg = turtle.angleTo(xVal, endTop); // what direction do I need to turn?
turtle.face(deg);
let distance = turtle.distanceTo(xVal, endTop); // how far away is it?
turtle.forward(distance);
}
}
I tried to add a few comments throughout, but if there is any step that is confusing, please add a comment.
function makeSquiggle(turtle) {
turtle.setColor(color(random(0, 255), random(0, 255), random(0, 255)));
let middleX = windowWidth * 0.5, middleY = windowHeight * 0.5;
turtle.goto(windowWidth * 0.5, windowHeight * 0.5);
// let's start moving in a random direction UNTIL our distance from the center is greater than some number X
let X = 300; // arbitrary distance from center
// some variables that can help us get some random movement for our turtle:
let turtleXvel = random(-3, 3), turtleYvel = random(-3, 3);
while (turtle.distanceTo(middleX, middleY) < X) {
turtle.face(0);
// calculate movement:
let newXmove = turtle.x + turtleXvel, newYmove = turtle.y + turtleYvel;
// direct our turtle:
turtle.face(turtle.angleTo(newXmove, newYmove));
let distance = turtle.distanceTo(newXmove, newYmove); // how far away is it?
// move our turtle
turtle.penDown();
turtle.forward(distance);
// change the velocity a little bit for a smooth curving:
turtleXvel += random(-1, 1);
turtleYvel += random(-1, 1);
}
}
Note that I'm changing the velocities instead of the position directly. This is a classic Calculus / Physics problem where the derivative gives us a smaller range, so adjusting turtleXvel and turtleYvel change the position in much less drastic ways versus:
turtle.x += random(-1, 1);
turtle.y += random(-1, 1);
You should look at the difference as well to visualize this. Beyond this is working with these structural components to finish this up!

ThreeJS - THREE.BufferGeometry.computeBoundingSphere() Gives Error: NaN Position Values

I am creating a simple THREE.PlaneBufferGeometry using Threejs. The surface is a geologic surface in the earth.
This surface has local gaps or 'holes' in it represented by NaN's. I have read another similar, but older, post where the suggestion was to fill the position Z component with 'undefined' rather than NaN. I tried that but get this error:
THREE.BufferGeometry.computeBoundingSphere(): Computed radius is NaN. The "position" attribute is likely to have NaN values.
PlaneBufferGeometry {uuid: "8D8EFFBF-7F10-4ED5-956D-5AE1EAD4DD41", name: "", type: "PlaneBufferGeometry", index: Uint16BufferAttribute, attributes: Object, …}
Here is the TypeScript function that builds the surface:
AddSurfaces(result) {
let surfaces: Surface[] = result;
if (this.surfaceGroup == null) {
this.surfaceGroup = new THREE.Group();
this.globalGroup.add(this.surfaceGroup);
}
surfaces.forEach(surface => {
var material = new THREE.MeshPhongMaterial({ color: 'blue', side: THREE.DoubleSide });
let mesh: Mesh2D = surface.arealMesh;
let values: number[][] = surface.values;
let geometry: PlaneBufferGeometry = new THREE.PlaneBufferGeometry(mesh.width, mesh.height, mesh.nx - 1, mesh.ny - 1);
var positions = geometry.getAttribute('position');
let node: number = 0;
// Surfaces in Three JS are ordered from top left corner x going fastest left to right
// and then Y ('j') going from top to bottom. This is backwards in Y from how we do the
// modelling in the backend.
for (let j = mesh.ny - 1; j >= 0; j--) {
for (let i = 0; i < mesh.nx; i++) {
let value: number = values[i][j];
if(!isNaN(values[i][j])) {
positions.setZ(node, -values[i][j]);
}
else {
positions.setZ(node, undefined); /// This does not work? Any ideas?
}
node++;
}
}
geometry.computeVertexNormals();
var plane = new THREE.Mesh(geometry, material);
plane.receiveShadow = true;
plane.castShadow = true;
let xOrigin: number = mesh.xOrigin;
let yOrigin: number = mesh.yOrigin;
let cx: number = xOrigin + (mesh.width / 2.0);
let cy: number = yOrigin + (mesh.height / 2.0);
// translate point to origin
let tempX: number = xOrigin - cx;
let tempY: number = yOrigin - cy;
let azi: number = mesh.azimuth;
let aziRad = azi * Math.PI / 180.0;
// now apply rotation
let rotatedX: number = tempX * Math.cos(aziRad) - tempY * Math.sin(aziRad);
let rotatedY: number = tempX * Math.sin(aziRad) + tempY * Math.cos(aziRad);
cx += (tempX - rotatedX);
cy += (tempY - rotatedY);
plane.position.set(cx, cy, 0.0);
plane.rotateZ(aziRad);
this.surfaceGroup.add(plane);
});
this.UpdateCamera();
this.animate();
}
Thanks!
I have read another similar, but older, post where the suggestion was to fill the position Z component with 'undefined' rather than NaN.
Using undefined will fail in the same way like using NaN. BufferGeometry.computeBoundingSphere() computes the radius based on Vector3.distanceToSquared(). If you call this method with a vector that contains no valid numerical data, NaN will be returned.
Hence, you can't represent the gaps in a geometry with NaN or undefined position data. The better way is to generate a geometry which actually represents the geometry of your geologic surface. Using ShapeBufferGeometry might be a better candidate since shapes do support the concept of holes.
three.js r117
THREE.PlaneBufferGeometry:: parameters: {
width: number;
height: number;
widthSegments: number;
heightSegments: number;
};
widthSegments or heightSegments should be greater 1 ,if widthSegments < 1 ,widthSegments may be equal 0 or nan.
In my case, it was happening when I tried to create a beveled shape based on a single vector or a bunch of identical vectors - so there was only a single point. Filtering out such shapes solved the issue.

Vertex color interpolation artifacts

I display a "curved tube" and color its vertices based on their distance to the plane the curve lays on.
It works mostly fine, however, when I reduce the resolution of the tube, artifacts starts to appear in the tube colors.
Those artifacts seem to depend on the camera position. If I move the camera around, sometimes the artifacts disappear. Not sure it makes sense.
Live demo: http://jsfiddle.net/gz1wu369/15/
I do not know if there is actually a problem in the interpolation or if it is just a "screen" artifact.
Afterwards I render the scene to a texture, looking at it from the "top". It then looks like a "deformation" field that I use in another shader, hence the need for continuous color.
I do not know if it is the expected behavior or if there is a problem in my code while setting the vertices color.
Would using the THREEJS Extrusion tools instead of the tube geometry solve my issue?
const tubeGeo = new THREE.TubeBufferGeometry(closedSpline, steps, radius, curveSegments, false);
const count = tubeGeo.attributes.position.count;
tubeGeo.addAttribute('color', new THREE.BufferAttribute(new Float32Array(count * 3), 3));
const colors = tubeGeo.attributes.color;
const color = new THREE.Color();
for (let i = 0; i < count; i++) {
const pp = new THREE.Vector3(
tubeGeo.attributes.position.array[3 * i],
tubeGeo.attributes.position.array[3 * i + 1],
tubeGeo.attributes.position.array[3 * i + 2]);
const distance = plane.distanceToPoint(pp);
const normalizedDist = Math.abs(distance) / radius;
const t2 = Math.floor(i / (curveSegments + 1));
color.setHSL(0.5 * t2 / steps, .8, .5);
const green = 1 - Math.cos(Math.asin(Math.abs(normslizedDist)));
colors.setXYZ(i, color.r, green, 0);
}
Low-res tubes with "Normals" material shows different artifact
High resolution tube hide the artifacts:

Ray tracing to a Point Cloud with a custom vertex shader in Three.js

How can you ray trace to a Point Cloud with a custom vertex shader in three.js.
This is my vertex shader
void main() {
vUvP = vec2( position.x / (width*2.0), position.y / (height*2.0)+0.5 );
colorP = vec2( position.x / (width*2.0)+0.5 , position.y / (height*2.0) );
vec4 pos = vec4(0.0,0.0,0.0,0.0);
depthVariance = 0.0;
if ( (vUvP.x<0.0)|| (vUvP.x>0.5) || (vUvP.y<0.5) || (vUvP.y>0.0)) {
vec2 smp = decodeDepth(vec2(position.x, position.y));
float depth = smp.x;
depthVariance = smp.y;
float z = -depth;
pos = vec4(( position.x / width - 0.5 ) * z * (1000.0/focallength) * -1.0,( position.y / height - 0.5 ) * z * (1000.0/focallength),(- z + zOffset / 1000.0) * 2.0,1.0);
vec2 maskP = vec2( position.x / (width*2.0), position.y / (height*2.0) );
vec4 maskColor = texture2D( map, maskP );
maskVal = ( maskColor.r + maskColor.g + maskColor.b ) / 3.0 ;
}
gl_PointSize = pointSize;
gl_Position = projectionMatrix * modelViewMatrix * pos;
}
In the Points class, ray tracing is implemented as follows:
function testPoint( point, index ) {
var rayPointDistanceSq = ray.distanceSqToPoint( point );
if ( rayPointDistanceSq < localThresholdSq ) {
var intersectPoint = ray.closestPointToPoint( point );
intersectPoint.applyMatrix4( matrixWorld );
var distance = raycaster.ray.origin.distanceTo( intersectPoint );
if ( distance < raycaster.near || distance > raycaster.far ) return;
intersects.push( {
distance: distance,
distanceToRay: Math.sqrt( rayPointDistanceSq ),
point: intersectPoint.clone(),
index: index,
face: null,
object: object
} );
}
}
var vertices = geometry.vertices;
for ( var i = 0, l = vertices.length; i < l; i ++ ) {
testPoint( vertices[ i ], i );
}
However, since I'm using a vertex shader, the geometry.vertices don't match up to the vertices on the screen which prevents the ray trace from working.
Can we get the points back from the vertex shader?
I didn't dive into what your vertex-shader actually does, and I assume there are good reasons for you to do it in the shader, so it's likely not feasible to redo the calculations in javascript when doing the ray-casting.
One approach could be to have some sort of estimate for where the points are, use those for a preselection and do some more involved calculation for the points that are closest to the ray.
If that won't work, your best bet would be to render a lookup-map of your scene, where color-values are the id of a point that is rendered at the coordinates (this is also referred to as GPU-picking, examples here, here and even some library here although that doesn't really do what you will need).
To do that, you need to render your scene twice: create a lookup-map in the first pass and render it regularly in the second pass. The lookup-map will store for every pixel which particle was rendered there.
To get that information you need to setup a THREE.RenderTarget (this might be downscaled to half the width/height for better performance) and a different material. The vertex-shader stays as it is, but the fragment-shader will just output a single, unique color-value for every particle (or anything that you can use to identify them). Then render the scene (or better: only the parts that should be raycast-targets) into the renderTarget:
var size = renderer.getSize();
var renderTarget = new THREE.WebGLRenderTarget(size.width / 2, size.height / 2);
renderer.render(pickingScene, camera, renderTarget);
After rendering, you can obtain the content of this lookup-texture using the renderer.readRenderTargetPixels-method:
var pixelData = new Uint8Array(width * height * 4);
renderer.readRenderTargetPixels(renderTarget, 0, 0, width, height, pixelData);
(the layout of pixelData here is the same as for a regular canvas imageData.data)
Once you have that, the raycaster will only need to lookup a single coordinate, read and interpret the color-value as object-id and do something with it.

Eliminating off-of-ball roll in Trackball controls (with code/fix)

Is the intent of the TrackballControl to have a "border" outside the trackball that induces roll? I personally dislike it. It is a bit discontinuous, and does't really have a lot of purpose (imho).
If not, the function getMouseProjectionOnBall can be changed similar to the following. This does two things (not necessarily "correctly"):
Normalize the radius to fill both axis
Map z values outside of the ball (ie where z was previously 0)
I find this a lot more natural, personally.
Thoughts?
this.getMouseProjectionOnBall = function(clientX, clientY) {
var xnormalized = (clientX - _this.screen.width * 0.5 - _this.screen.offsetLeft) / (_this.screen.width / 2.0);
var ynormalized = (_this.screen.height * 0.5 + _this.screen.offsetTop - clientY) / (_this.screen.height / 2.0);
var mouseOnBall = new THREE.Vector3(
xnormalized,
ynormalized,
0.0
);
var length = mouseOnBall.length();
var ballRadius = 1.0; // As a fraction of the screen
if (length > ballRadius * 0.70710678118654752440) {
var temp = ballRadius / 1.41421356237309504880;
mouseOnBall.z = temp * temp / length;
// Remove old method.
// This Left z = 0, which meant rotation axis
// becomes z, which is a roll
//mouseOnBall.normalize();
} else {
mouseOnBall.z = Math.sqrt(1.0 - length * length);
}
_eye.copy(_this.object.position).sub(_this.target);
var projection = _this.object.up.clone().setLength(mouseOnBall.y);
projection.add(_this.object.up.clone().cross(_eye).setLength(mouseOnBall.x));
projection.add(_eye.setLength(mouseOnBall.z));
return projection;
};

Resources