Three.js: Apply SSAO (Screen Space Ambient Occlusion) to Displacement map - three.js

have I've implemented the Screen Space Ambient Occlusion in my Three.js project correctly, and run perfect, like this:
//Setup SSAO pass
depthMaterial = new THREE.MeshDepthMaterial();
depthMaterial.depthPacking = THREE.RGBADepthPacking;
depthMaterial.blending = THREE.NoBlending;
var pars = { minFilter: THREE.LinearFilter, magFilter: THREE.LinearFilter, format: THREE.RGBAFormat, stencilBuffer: true }; //Stancilbuffer true because not effect transparent object
depthRenderTarget = new THREE.WebGLRenderTarget(window.innerWidth, window.innerHeight, pars);
depthRenderTarget.texture.name = "SSAOShader.rt";
ssaoPass = new THREE.ShaderPass(THREE.SSAOShader);
///////ssaoPass.uniforms[ "tDiffuse" ].value will be set by ShaderPass
ssaoPass.uniforms["tDepth"].value = depthRenderTarget.texture;
ssaoPass.uniforms['size'].value.set(window.innerWidth, window.innerHeight);
ssaoPass.uniforms['cameraNear'].value = camera.near;
ssaoPass.uniforms['cameraFar'].value = camera.far;
ssaoPass.uniforms['radius'].value = radius;
ssaoPass.uniforms['aoClamp'].value = aoClamp;
ssaoPass.uniforms['lumInfluence'].value = lumInfluence;
But, when I set a material with displacementMap (that run correctly without SSAO enabled), this is the result. Notice that the SSAO is applied "correctly" to the original sphere (with a strange-trasparent-artificat), but I need to apply it to the "displaced vertex" of the sphere)
This is my composer passes:
//Main render scene pass
postprocessingComposer.addPass(renderScene);
//Post processing pass
if (ssaoPass) {
postprocessingComposer.addPass(ssaoPass);
}
And this is the rendering loop with composer
if (postprocessingComposer) {
if (ssaoPass) {
//Render depth into depthRenderTarget
scene.overrideMaterial = depthMaterial;
renderer.render(scene, camera, depthRenderTarget, true);
//Render composer
scene.overrideMaterial = null;
postprocessingComposer.render();
renderer.clearDepth();
renderer.render(sceneOrtho, cameraOrtho);
}
else {
//Render loop with post processing (no SSAO, becasue need more checks, see above)
renderer.clear();
postprocessingComposer.render();
renderer.clearDepth();
renderer.render(sceneOrtho, cameraOrtho);
}
}
else {
//Simple render loop (no post-processing)
renderer.clear();
renderer.render(scene, camera);
renderer.clearDepth();
renderer.render(sceneOrtho, cameraOrtho);
}
How can i archive a correct Screen Space Ambient Occlusion applied to a mesh with Displacement Map? Thanks.
[UPDATE]:
After some work i tried to this procedure for every child in the scene, with displacement map, to define a new a new overrideMaterial of the scene equal to a depthMaterial with displacement map parameters of the child material.
var myDepthMaterial = new THREE.MeshDepthMaterial({
depthPacking: THREE.RGBADepthPacking,
displacementMap: child.material.displacementMap,
displacementScale: child.material.displacementScale,
displacementBias: child.material.displacementBias
});
child.onBeforeRender = function (renderer, scene, camera, geometry, material, group) {
scene.overrideMaterial = myDepthMaterial;
};
This solution sounds good, but doesnt work.

You are using SSAO with a displacement map. You need to specify the displacement map when you instantiate the depth material.
depthMaterial = new THREE.MeshDepthMaterial( {
depthPacking: THREE.RGBADepthPacking,
displacementMap: displacementMap,
displacementScale: displacementScale,
displacementBias: displacementBias
} );
three.js r.87

Related

Using OutlinePass (THREE.js r102) with skinned mesh

/examples/js/postprocessing/OutlinePass.js from THREE.js r102 does not appear to work with skinned meshes. Specifically, the rendered outline always stays in the mesh's rest position.
Is there some way to get this working (that is, to update the outline to reflect the current pose of an animated mesh)? OutlinePass does not appear to be documented (mod the comments in the code itself).
Is there some other accepted method of outlining animated meshes? I'm in the process of migrating some code from r7x, where I ended up accomplishing this by manually creating a copy of the mesh and applying a shader material that scales along the normals. I can do that again, but if there's a simpler/better supported method to accomplish the same effect I'd rather use it instead of reproducing a method that breaks every new major release.
A simple jsfiddle illustrating the issue:
https://jsfiddle.net/L69pe5q2/3/
This is the code from the jsfiddle. The mesh I use is the SimpleSkinning.gltf example from the three.js distribution. In the jsfiddle I load it from a dataURI so it doesn't complain about XSS loading, and I've edited the base64-encoded data out (and replaced it with [FOO]) in the code below, purely for readability.
The OutlinePass is created and added to the composer in initComposer().
var camera, light, renderer, composer, mixer, loader, clock;
var scene, mesh, outlinePass;
var height = 480,
width = 640;
var clearColor = '#666666';
load();
function load() {
loader = new THREE.GLTFLoader();
clock = new THREE.Clock();
scene = new THREE.Scene();
loader.load('data:text/plain;base64,[FOO]', function(obj) {
scene.add(obj.scene);
mixer = new THREE.AnimationMixer(obj.scene);
var clip = THREE.AnimationClip.findByName(obj.animations,
'Take 01');
var a = mixer.clipAction(clip);
a.reset();
a.play();
mesh = obj.scene;
mesh.position.set(-7, 2.5, -7);
init();
animate();
});
}
function init() {
initCamera();
initScene();
initRenderer();
initComposer();
outlinePass.selectedObjects = [mesh];
}
function initCamera() {
camera = new THREE.PerspectiveCamera(30, width / height, 1, 10000);
camera.position.set(7, 0, 7);
camera.lookAt(0, 0, 0);
}
function initScene() {
light = new THREE.AmbientLight(0xffffff)
scene.add(light);
}
function initRenderer() {
renderer = new THREE.WebGLRenderer({
width: width,
height: height,
antialias: false,
});
renderer.setSize(width, height);
renderer.setClearColor(clearColor);
document.body.appendChild(renderer.domElement);
}
function initComposer() {
var renderPass, copyPass;
composer = new THREE.EffectComposer(renderer);
renderPass = new THREE.RenderPass(scene, camera);
composer.addPass(renderPass);
outlinePass = new THREE.OutlinePass(new THREE.Vector2(width, height),
scene, camera);
composer.addPass(outlinePass);
outlinePass.edgeStrength = 10;
outlinePass.edgeThickness = 4;
outlinePass.visibleEdgeColor.set('#ff0000');
copyPass = new THREE.ShaderPass(THREE.CopyShader);
copyPass.renderToScreen = true;
composer.addPass(copyPass);
}
function animate() {
var delta = clock.getDelta();
requestAnimationFrame(animate);
update(delta);
render(delta);
}
function update(delta) {
if (mixer) mixer.update(delta);
}
function render(delta) {
composer.render();
}
according to Mugen87 in Jan 2019 he said:
With this small patch, it's now possible to use the outline pass with animated meshes. The only thing users have to do at app level is to set morphTargets or skinning to true for OutlinePass.depthMaterial and OutlinePass.prepareMaskMaterial. That's of course still a manual effort but at least the more complicated shader enhancement is already done.
take this example:
https://jsfiddle.net/2ybks7rd/
reference link on github

Three.js. How do I use a custom material for a scene background rather then color or texture?

The docs for scene say a color or texture can be used for scene.background. I would like to use a ShaderMaterial with my own custom shaders. How can I do this?
Specifically, I want to paint a color ramp behind the foreground elements. Here is the fragment shader:
uniform vec2 uXYPixel;
void main() {
vec2 xy = vec2(gl_FragCoord.x/uXYPixel.x, gl_FragCoord.y/uXYPixel.y);
gl_FragColor.rgb = vec3(xy.x, xy.y, 0);
gl_FragColor.a = 1.0;
}
uXYPixel is a uniform vec2 with the values window.innerWidth, window.innerHeight
You'd need to manually create two render passes. One that renders the background plane with a simple Camera, and the second one that renders the rest of the scene. You can use the most basic Camera class since you won't be using transformation or projection matrices:
// Set renderer with no autoclear
var renderer = new THREE.WebGLRenderer();
renderer.autoClear = false;
document.body.append(renderer.domElement);
renderer.setSize(window.innerWidth, window.innerHeight);
// Set up background scene
var bgScene = new THREE.Scene();
var bgCam = new THREE.Camera();
var bgGeom = new THREE.PlaneBufferGeometry(2, 2);
var bgMat = new THREE.ShaderMaterial({
// Add shader stuff in here
// ..
// Disable depth so it doesn't interfere with foreground scene
depthTest: false,
depthWrite: false
});
var bgMesh = new THREE.Mesh(bgGeom, bgMat);
bgScene.add(bgMesh);
// Set up regular scene
var scene = new THREE.Scene();
var cam = new THREE.PerspectiveCamera(45, w/h, 1, 100);
function update() {
// Clear previous frame
renderer.clear();
// Background render pass
renderer.render(bgScene, bgCam);
// Foreground render pass
renderer.render(scene, cam);
requestAnimationFrame(update);
}
update();
Here you can see a working example.
Notice that the renderer's autoClear = false attribute makes sure it doesn't clear the buffer in between each render() call; you'll have to clear it manually at the beginning of each frame.

Three.js - Faces missing after Blender import

I tried to import a JSON exported file of my 3D model and import it in Three.js but it seems some faces are missing.
I am not sure if it was an export problem because when I rotate it, the left face exists but the right face does not, vice versa.
Here is my original model in Blender:
var scene, camera, renderer;
var WIDTH = window.innerWidth;
var HEIGHT = window.innerHeight;
var SPEED = 0.01;
function init() {
scene = new THREE.Scene();
initMesh();
initCamera();
initLights();
initRenderer();
document.body.appendChild(renderer.domElement);
}
function initCamera() {
camera = new THREE.PerspectiveCamera(70, WIDTH / HEIGHT, 1, 10);
camera.position.set(0, 3.5, 5);
camera.lookAt(scene.position);
}
function initRenderer() {
renderer = new THREE.WebGLRenderer({ antialias: true });
renderer.setSize(WIDTH, HEIGHT);
}
function initLights() {
var directionalLight = new THREE.DirectionalLight( 0xffffff, 0.8 );
directionalLight.position.set( 0, 1, 0 );
scene.add( directionalLight );
}
var mesh = null;
function initMesh() {
var loader = new THREE.JSONLoader();
loader.load('./model.json', function(geometry, materials) {
mesh = new THREE.Mesh(geometry, new THREE.MeshFaceMaterial(materials));
mesh.scale.x = mesh.scale.y = mesh.scale.z = 0.75;
mesh.translation = THREE.GeometryUtils.center(geometry);
mesh.position.x = -5;
scene.add(mesh);
});
}
function rotateMesh() {
if (!mesh) {
return;
}
mesh.rotation.y -= SPEED;
}
function render() {
requestAnimationFrame(render);
rotateMesh();
renderer.render(scene, camera);
}
init();
render();
Hope you can help me with this problem.
Thanks in advance!
I would suspect your problem has to do with the face-normals pointing in the wrong direction. To check if this is the case you could try to set all materials to double-sided:
materials.forEach(function(mat) {
mat.side = THREE.DoubleSide;
});
With Double-Sided mode, the faces are drawn regardless of the normals direction, so you should see all faces if enabled.
Or you could use the THREE.FaceNormalsHelper to have a look at the normals yourself.
scene.add(new THREE.FaceNormalsHelper(mesh, 2, 0x00ff00, 1));
This will render arrows for all faces indicating the normal-direction.
If the normals are wrong you can fix this in blender by selecting all affected faces and using the command Mesh>Faces>Flip Normals from the menu or in the Tools-Panel on the right-hand side in the "Shading/UV"-Tab. Sometimes just selecting all faces and running "Recalculate Normals" from the Tools will work as well.
Blender also has a display-mode for face-normals in the right hand menu in the "Mesh Display"-Section.

Texture applied only on canvas click with Three.js

I am using WebGLRenderer from Three.js to render an object reconstructed from an IndexedFaceStructure that has texture. My problem is that when the page loads the object shows up with no texture, only a black colored mesh displays, however, when i click on the canvas where i render the object the texture shows up.
I have been looking around and tried the texture.needsUpdate = true; trick, but this one removes also the black meshed object on page load so i am at a loss here.
These are the main bits of my code:
function webGLStart() {
container = document.getElementById("webgl-canvas");
renderer = new THREE.WebGLRenderer({canvas:container, alpha:true, antialias: true});
renderer.setClearColor(0x696969,1);
renderer.setSize(container.width, container.height) ;
scene = new THREE.Scene();
camera = new THREE.PerspectiveCamera(45, container.width / container.height, 1, 100000);
camera.position.set(60, 120,2000) ;
//computing the geometry2
controls = new THREE.OrbitControls( camera );
controls.addEventListener( 'change', render );
texture = new THREE.ImageUtils.loadTexture(texFile);
//texture.needsUpdate = true;
material = new THREE.MeshBasicMaterial( {wireframe:false, map: texture, vertexColors: THREE.VertexColors} );
mesh = new THREE.Mesh(geometry2, material);
scene.add(mesh);
render();
animate();
}
function render()
{
renderer.render(scene, camera);
}
function animate()
{
controls.update();
}
And the html part: canvas id="webgl-canvas" style="border: none;" width="900" height="900" (i could not add it properly).
Do you happen to have a clue why is this happening?
If you have a static scene, you do not need an animation loop, and you only need to render the scene when OrbitControls modifies the camera position/orientation.
Consequently, you can use this pattern -- without an animation loop:
controls.addEventListener( 'change', render );
However, you also need to force a render when the texture loads. You do that by specifying a callback to render in the ImageUtils.loadTexture() method:
var texture = THREE.ImageUtils.loadTexture( "textureFile", undefined, render );
Alternatively, you could add the mesh to the scene and call render in the callback.
three.js r.70

Specifying the range for Depth of Field (MeshDepthMaterial range?)

I have adapted this post processing example http://mrdoob.github.com/three.js/examples/webgl_postprocessing_dof.html to apply a Depth of Field / Bokeh effect. How can I specify the focus range (or whatever it could be called)?
If the camera far plane is at 10000, and the model size is 10 it was impossible to focus into invidual parts of the model - because it tries to focus from 1-10000 (camera-near to camera-far) instead of 1-10 (between camera and the back of my model), the actual area of interest.
It did work fine after I realised to set the camera far plane as low as possible (to about same as scene size), so the focus can adjusted where the actual model is.
Now I can't do the camera far plane trick anymore, because I added a skybox, so the camera needs to have it's far plane quite far related to the model size. That messes up the Depth of Field; I can focus very close or very far, but the whole model is either completely blurred or not blurred at all as the adjustable distance is way too big (all the way to the skybox).
If I know the area I want to be able to focus at, how can I specify it in my code?
Here is my setup code:
dof_material_depth = new THREE.MeshDepthMaterial();
dof_scene = new THREE.Scene();
dof_camera = new THREE.OrthographicCamera(SCREEN_WIDTH / - 2, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2, SCREEN_HEIGHT / - 2, -10000, 10000 );
dof_camera.position.z = 100;
dof_scene.add( dof_camera );
var pars = { minFilter: THREE.LinearFilter, magFilter: THREE.LinearFilter, format: THREE.RGBAFormat };
dof_rtTextureDepth = new THREE.WebGLRenderTarget(SCREEN_WIDTH, SCREEN_HEIGHT, pars );
dof_rtTextureColor = new THREE.WebGLRenderTarget(SCREEN_WIDTH, SCREEN_HEIGHT, pars );
dof_bokeh_shader = THREE.BokehShader;
dof_bokeh_uniforms = THREE.UniformsUtils.clone(dof_bokeh_shader.uniforms );
dof_bokeh_uniforms[ "tColor" ].value = dof_rtTextureColor;
dof_bokeh_uniforms[ "tDepth" ].value = dof_rtTextureDepth;
dof_bokeh_uniforms[ "focus" ].value = 1.1;
dof_bokeh_uniforms[ "aspect" ].value = SCREEN_WIDTH / SCREEN_HEIGHT;
dof_materialBokeh = new THREE.ShaderMaterial( {
uniforms: dof_bokeh_uniforms,
vertexShader: dof_bokeh_shader.vertexShader,
fragmentShader: dof_bokeh_shader.fragmentShader
});
dof_quad = new THREE.Mesh( new THREE.PlaneGeometry(SCREEN_WIDTH, SCREEN_HEIGHT), dof_materialBokeh );
dof_quad.position.z = -500;
dof_scene.add(dof_quad );
And here the rendering part:
renderer.render(scene, camera, dof_rtTextureColor, true );
scene.overrideMaterial = dof_material_depth;
renderer.render(scene, camera, dof_rtTextureDepth, true );
dof_scene.overrideMaterial = null;
render(dof_scene, dof_camera );
var delta = 0.01;
composerScene.render( delta);
EDIT:
I did manage to get desired results by setting a low far plane for the camera just before rendering the depth material, then reverting back to normal before rendering the composite:
renderer.render(scene, camera, dof_rtTextureColor, true );
var oldfar = camera.far; // this goes to skybox
camera.far = scenesize; // this goes to just behind the model
scene.overrideMaterial = dof_material_depth;
renderer.render(scene, camera, dof_rtTextureDepth, true );
camera.far = oldfar;
dof_scene.overrideMaterial = null;
render(dof_scene, dof_camera );
var delta = 0.01;
composerScene.render( delta);
This works perfect. I will leave the question open though, as I'm quite new to WebGLL / 3D programming in general, want to learn, and would like to know if it's possible to do this in the shaders/materials setup phase.

Resources