I am looking for solution for take snapshoot from "hidden" camera not used in main canvas/viewport.
I am not looking for multiple viewports like this:
https://threejs.org/examples/webgl_multiple_views.html
I would like to have main canvas which is visible for user and he see view from main camera.But when he click "download" button, it would generate not current view but static view defined in script without changing any view for user.
I tried make this:
var camera2 = new THREE.PerspectiveCamera( view1.fov, window.innerWidth / window.innerHeight, 1, 10000 );
view1.camera = camera2;
var canvasPdfScene = document.createElement('canvas');
canvasPdfScene.id = 'canvasPdfScene';
canvasPdfScene.width = '2048';
canvasPdfScene.height = '512';
canvasPdfScene.style.borderBottom = '1px solid black';
//window.context = temCanvas.getContext('2d');
var pdfContext = canvasPdfScene.getContext('2d', {preserveDrawingBuffer: true});
var actualCanvas = renderer.domElement;
//document.body.appendChild(canvasPdfScene);
renderer.domElement = canvasPdfScene;
renderer.render( scene, camera2 );
renderer.domElement = actualCanvas;
Related
on my p5 canvas i do set some original control as such:
this.mousePressed = function(){
//check if the playback button has been clicked
var isButtonClicked = this.playbackButton.hitCheck();
var isMouseInBlockGUI = blockMidHighLow.isMouseInGUI();
//if not make the visualisation fullscreen
if(isButtonClicked == false && isMouseInBlockGUI == false){
let fs = fullscreen();
fullscreen(!fs);
}
};
on three js wise, i have some control on the 3d animation, however its not working :
const controls = new THREE.OrbitControls(camera, render.domElement);
controls.enabled = true;
controls.target.set(1000, 0, 1000);
controls.update();
You can try this.
set up a camera
set up a renderer
use OrbitControl
set up a camera
//Object to store the size of the viewport
const size = {
width: window.innerWidth,
height: window.innerHeight,
};
//Creates the camera (point of view of the user)
const aspect = size.width / size.height;
const camera = new THREE.PerspectiveCamera(75, aspect);
camera.position.z = 15;
camera.position.y = 13;
camera.position.x = 8;
set up a renderer
//Sets up the renderer, fetching the canvas of the HTML
const threeCanvas = document.getElementById("three-canvas");
const renderer = new THREE.WebGLRenderer({
canvas: threeCanvas,
alpha: true
});
renderer.setSize(size.width, size.height);
renderer.setPixelRatio(Math.min(window.devicePixelRatio, 2));
Setup an OrbitControl
//Creates the orbit controls (to navigate the scene)
const controls = new OrbitControls(camera, threeCanvas);
controls.enableDamping = true;
controls.target.set(-2, 0, 0);
//Animation loop
const animate = () => {
controls.update();
renderer.render(scene, camera);
requestAnimationFrame(animate);
};
Hope this helps. Thanks
Learning material
I personally recommend this blog. It's very useful to learn.
https://sbcode.net/threejs/orbit-controls/
import * as THREE from 'three'
import { OrbitControls } from 'three/examples/jsm/controls/OrbitControls'
import Stats from 'three/examples/jsm/libs/stats.module'
const scene = new THREE.Scene()
scene.add(new THREE.AxesHelper(5))
const camera = new THREE.PerspectiveCamera(
75,
window.innerWidth / window.innerHeight,
0.1,
1000
)
camera.position.z = 2
const renderer = new THREE.WebGLRenderer()
renderer.setSize(window.innerWidth, window.innerHeight)
document.body.appendChild(renderer.domElement)
const controls = new OrbitControls(camera, renderer.domElement)
I'm trying to implement something very similar to this...
https://stemkoski.github.io/Three.js/Camera-Texture.html
... as an a-frame component. However, I'm getting merely a blank white on the quad which is supposed to be "monitoring" this extra camera. It changes to black when entering VR mode.
Here is my code:
AFRAME.registerComponent('viewfinder', {
schema:{
//
},
init:function(){
this.renderer = new THREE.WebGLRenderer({antialias:true});
this.renderer.setSize( window.innerWidth, window.innerHeight );
document.body.appendChild(this.renderer.domElement);
this.dronecamera = new THREE.PerspectiveCamera( 70, window.innerWidth / window.innerHeight, 1, 1000 );
this.dronecamera.position.z=5;
this.monitorbuffercam = new THREE.OrthographicCamera(window.innerWidth/-2, window.innerWidth/2, window.innerHeight/2, window.innerHeight/-2, -10000, 10000);
this.monitorbuffercam.position.z=1;
this.buffertexture = new THREE.WebGLRenderTarget( window.innerWidth, window.innerHeight, { format: THREE.RGBFormat });
this.monitortexture = new THREE.WebGLRenderTarget( window.innerWidth, window.innerHeight, { format: THREE.RGBFormat });
this.bufferscene = new THREE.Scene();
var ambientlight = new THREE.AmbientLight(0xffffff);
this.bufferscene.add(ambientlight);
var bufferplaneG = new THREE.PlaneGeometry( window.innerWidth, window.innerHeight);
var bufferplaneM = new THREE.MeshBasicMaterial({map:this.buffertexture});
this.bufferplane = new THREE.Mesh(bufferplaneG, bufferplaneM);
this.bufferscene.add(this.bufferplane);
this.bufferscene.add(this.monitorbuffercam);
this.el.sceneEl.object3D.add(this.dronecamera);
this.monitorplaneG = new THREE.PlaneGeometry(10, 10);
this.monitorplaneM = new THREE.MeshBasicMaterial({map:this.monitortexture});
this.monitor = new THREE.Mesh(this.monitorplaneG, this.monitorplaneM);
this.el.setObject3D('monitor', this.monitor);
},
tick:function(){
var r = this.renderer;
var s = this.el.sceneEl.object3D;
var bs = this.bufferscene;
var dc = this.dronecamera;
var bt = this.buffertexturee;
var mbc = this.monitorbuffercam;
var mt = this.monitortexture;
requestAnimationFrame(draw);
function draw(){
r.render(s, dc, bt, true);
r.render(bs, mbc, mt, true);
}
}
});
Any help is greatly appreciated. I'm attempting to follow the model from the three.js example, rendering from a camera in the main scene to a quad texture map in a separate off-screen scene, then pointing an orthographic camera at it, and rendering that camera's view to a texture map back in the main scene. I have a hunch that there is merely some boilerplate code I'm forgetting or I'm doing wrong.
Thanks in advance!
I'm not sure if this what you're looking for, but maybe this answer to AFrame: How to render a camera to a texture could be of some help. In short, it shows how to use a component that creates a renderer for a secondary camera, that can be referenced as material for an object.
I have some code at https://jsfiddle.net/72mnd2yt/1/ that doesn't display the sprite I'm trying to draw. I tried to follow the code over at https://stemkoski.github.io/Three.js/Sprite-Text-Labels.html and read it line by line, but I'm not sure where I went wrong. would someone mind taking a look at it?
Here is some relevant code:
// picture
var getPicture = function(message){
var canvas = document.createElement('canvas');
canvas.width = "100%";
canvas.height = "100%";
var context = canvas.getContext('2d');
context.font = "10px";
context.fillText(message, 0, 10);
var picture = canvas.toDataURL();
// return picture;
return canvas;
};
// let there be light and so forth...
var getScene = function(){
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera(
70,
$('body').width(),
$('body').height(),
1,
1000
);
var light = new THREE.PointLight(0xeeeeee);
scene.add(camera);
scene.add(light);
var renderer = new THREE.WebGLRenderer();
renderer.setClearColor(0xefefef);
renderer.setSize($('body').width(), $('body').height());
camera.position.z = -10;
camera.lookAt(new THREE.Vector3(0, 0, 0));
return [scene, renderer, camera];
};
// now for the meat
var getLabel = function(message){
var texture = new THREE.Texture(getPicture(message));
var spriteMaterial = new THREE.SpriteMaterial(
{map: texture }
);
var sprite = new THREE.Sprite(spriteMaterial);
//sprite.scale.set(100, 50, 1.0);
return sprite
};
var setup = function(){
var scene;
var renderer;
var camera;
[scene, renderer, camera] = getScene();
$('body').append(renderer.domElement);
var label = getLabel("Hello, World!");
scene.add(label);
var animate = function(){
requestAnimationFrame(animate);
renderer.render(scene, camera);
};
animate();
};
setup();
A few points:
1) canvas.width = "100%" should be canvas.width = "100" (canvas sizes are assumed to be px). Same with canvas.height.
2) $('body').height() is 0, so the renderer canvas is not visible (you can check this out in dev tools, it's in the element tree but squashed to 0px high). I know nothing about jQuery, so not sure why this is, but I would recommend using window.innerHeight and window.innerWidth instead anyways. So renderer.setSize(window.innerWidth, window.innerHeight). You'll also want to make this change in the camera initialization.
3) Speaking of the camera initialization, you are passing in width and height as separate arguments, when there should only be an aspect ratio argument. See the docs. So
var camera = new THREE.PerspectiveCamera(70, window.innerWidth, window.innerHeight, 1, 1000)
becomes
var camera = new THREE.PerspectiveCamera(70, window.innerWidth/window.innerHeight, 1, 1000)
4) Because textures are assumed to be static, you need to add something to this part:
var texture = new THREE.Texture(getPicture(message))
texture.needsUpdate = true // this signals that the texture has changed
That's a one-time flag, so you need to set it every time the canvas changes if you want a dynamic texture. You don't have to make a new THREE.Texture each time, just add texture.needsUpdate in the render loop (or in an event that only fires when you want the texture to change, if you're going for efficiency). See the docs, under .needsUpdate.
At this point, it should work. Here are some further things to consider:
5) Instead of using Texture you could use CanvasTexture, which sets .needsUpdate for you. The fiddle you posted is using Three.js r71, which doesn't have it, but newer versions do. That would look like this:
var texture = new THREE.CanvasTexture(getPicture(message));
// no needsUpdate necessary
6) It looks like you were on this path already based on the commented out return picture, but you can use either canvas element, or a data url generated from the canvas for a texture. If getPicture now returns a data url, try this:
var texture = new THREE.TextureLoader().load(getPicture(message))
You can also indirectly use a data url with Texture:
var img = document.createElement('img')
var img = new Image()
img.src = getPicture(message)
var texture = new THREE.Texture(img);
texture.needsUpdate = true
If not, just stick with Texture or CanvasTexture, both will take a canvas element. Texture can indirectly take a url by passing in an image element whose src is set to the data url.
Fiddle with the outlined fixes:
https://jsfiddle.net/jbjw/x0uL1kbh/
I have an issue with shadow map showing on the back side of an object, I have been fighting with that for almost two days now, thinking I was surely doing something wrong.
Then I wrote this jsFiddle : http://jsfiddle.net/gui2one/ec1snfee/
code below :
// shadow map issue
var light ,mesh, renderer, scene, camera, controls;
var clock = new THREE.Clock({autoStart:true});
init();
animate();
function init() {
// renderer
renderer = new THREE.WebGLRenderer();
renderer.shadowMapEnabled = true;
renderer.setSize( window.innerWidth, window.innerHeight );
document.body.appendChild( renderer.domElement );
// scene
scene = new THREE.Scene();
// camera
camera = new THREE.PerspectiveCamera( 40, window.innerWidth / window.innerHeight, 1, 10000 );
camera.position.set( -58, 10, 29 );
// controls
controls = new THREE.OrbitControls( camera );
// ambient
scene.add( new THREE.AmbientLight( 0x222222 ) );
// light
light = new THREE.SpotLight();
light.position.set( 20, 0, 0 );
//light.target.position.set(1000,0,0);
light.castShadow = true;
light.shadowBias = -0.001;
light.shadowCameraNear = 1;
light.shadowCameraFar = 100;
light.shadowMapWidth = 2048;
light.shadowMapHeight = 2048;
light.shadowCameraVisible = true;
scene.add( light );
// axes
scene.add( new THREE.AxisHelper( 5 ) );
var materialTest = new THREE.MeshPhongMaterial();
var planetTestGeo = new THREE.SphereGeometry(5);
var planetTest = new THREE.Mesh(planetTestGeo, materialTest);
planetTest.castShadow = true;
planetTest.receiveShadow = true;
scene.add(planetTest);
planetTest.position.set(-30,0,0);
var moonTestGeo = new THREE.SphereGeometry(1);
var moonTest = new THREE.Mesh(moonTestGeo, materialTest);
moonTest.castShadow = true;
moonTest.receiveShadow = true;
scene.add(moonTest);
moonTest.position.set(planetTest.position.x+10 ,planetTest.position.y-0.5 ,planetTest.position.z);
camera.lookAt(moonTest.position);
}
function animate() {
requestAnimationFrame( animate );
//controls.update();
renderer.render( scene, camera );
}
and then I noticed this question ( more than 2 years ago )
THREE.JS Shadow on opposite side of light.
Is there a work around that now ?
I am trying to make an animated solar system. And I can't use different objects , or different materials to solve this problem, because planets and moons evidently always rotate on themselves and around each other.
Easy fix:
light.shadowBias = 0.001;
instead of
light.shadowBias = -0.001;
I've seen examples on the web using negative bias. I'm not sure if that is correct behaviour.
Good luck!
UPDATE
You will see some shadow acne on the light terminal on the sphere. This appears to be an issue with three.js shadow mapping methods and is due for an update AFAIK.
I've been trying for bigger parts of the night to make a export code that quickly will let me texture cubes and export them to a game i'm making, but for some reason I can't make my cube to cover the entire 128x128 width and height that I want it to have.
I have the following code:
function init(){
if( Detector.webgl ){
renderer = new THREE.WebGLRenderer({
antialias : false, // to get smoother output
preserveDrawingBuffer : true // to allow screenshot
});
renderer.setClearColorHex( 0xBBBBBB, 1 );
// uncomment if webgl is required
//}else{
// Detector.addGetWebGLMessage();
// return true;
}else{
renderer = new THREE.CanvasRenderer();
}
renderer.setSize(128,128);
document.getElementById('container').appendChild(renderer.domElement);
// add Stats.js - https://github.com/mrdoob/stats.js
stats = new Stats();
stats.domElement.style.position = 'absolute';
stats.domElement.style.bottom = '0px';
document.body.appendChild( stats.domElement );
var zoom = 1.0;
// create a scene
scene = new THREE.Scene();
// put a camera in the scene
camera = new THREE.OrthographicCamera(WIDTH / -zoom, HEIGHT / zoom, WIDTH / zoom, HEIGHT / -zoom, -2000, 1000);
//camera = new THREE.PerspectiveCamera(35, window.innerWidth / window.innerHeight, 1, 10000 );
camera.position.set(0.45,0.45,0.45);
camera.lookAt(scene.position);
//camera.position.set(0, 0, 5);
scene.add(camera);
// create a camera contol
//cameraControls = new THREEx.DragPanControls(camera)
// transparently support window resize
THREEx.WindowResize.bind(renderer, camera);
// allow 'p' to make screenshot
THREEx.Screenshot.bindKey(renderer);
// allow 'f' to go fullscreen where this feature is supported
//if( THREEx.FullScreen.available() ){
// THREEx.FullScreen.bindKey();
// document.getElementById('inlineDoc').innerHTML += "- <i>f</i> for fullscreen";
//}
// here you add your objects
// - you will most likely replace this part by your own
//var geometry = new THREE.TorusGeometry( 1, 0.42 );
var cubeSize = 128;
var geometry = new THREE.CubeGeometry( cubeSize, cubeSize, cubeSize );
var material = new THREE.MeshNormalMaterial();
mesh= new THREE.Mesh( geometry, material );
mesh.rotation.x = 0;
mesh.rotation.y = 0;
mesh.rotation.z = 0;
scene.add( mesh );
}
I've been trying out different "zooms" but it still ends up either too big or too small.
The point with all this is to end up with a code that can generate something like this:
https://dl.dropboxusercontent.com/u/5256694/cube_ex.png
What am I doing wrong?
Kind Regards
Hiam
Instead of thinking about the parameters of THREE.OrthographicCamera as "zoom" levels, you should think of them as coordinates of boundary planes for what the camera is able to see.
Also see the answer to Three.js - Orthographic camera for more details about using orthographic cameras in Three.js