Three.js canvas with multiple scissored scenes; how do i print each scene to a pdf? - three.js

I have successfully copied this example;
https://threejs.org/examples/webgl_multiple_elements.html
My singe page web app is working well. I have three scenes, each displaying and behaving correctly. Now i want the user to be able to print a pdf document, containing each of the three scenes (along with other text).
I have tried to print using jsPDF, using this;
scenes.forEach( function ( scene ) {
// get the element that is a place holder for where we want to
// draw the scene
const element = scene.userData.element; //this gets the DOM element
// get its position relative to the page's viewport
const rect = element.getBoundingClientRect();
// check if it's offscreen. If so skip it
if ( rect.bottom < 0 || rect.top > renderer.domElement.clientHeight ||
rect.right < 0 || rect.left > renderer.domElement.clientWidth ) {
console.log('off screen')
return; // it's off screen
}
// set the viewport
const width = (rect.right - rect.left);
const height = (rect.bottom - rect.top);
const left = rect.left;
const bottom = renderer.domElement.clientHeight - rect.bottom;
renderer.setViewport( left, bottom, width, height );
renderer.setScissor( left, bottom, width, height );
const camera = scene.userData.camera;
renderer.render( scene, camera );
//PRINTING FUNCTIONALITY
var button = document.getElementById( 'download' );
button.addEventListener( 'click', function() {
const results = document.getElementById('c');
var imgData = results.toDataURL("image/jpeg", 1.0);
var pdf = new jsPDF();
pdf.addImage(imgData, 'JPEG', 0, 0);
pdf.save("download.pdf");
} );
All i get is a black background on the PDF.
I know that the setScissor is probably to be blame here, but i don't know what the work around is.
Any help would be very much appreciated.

Related

Applying user selected texture to cube dynamically?

I’m creating a cube using BoxGeometry and trying to apply the texture that the user has selected.
I created a raycaster and use that raycaster inside mouse down event.
If the object is present on cursor position then a user-selected texture gets applied.
So far everything works fine. Then I added if-else condition if the user selected single image for texture then that single image gets applied whole body of cube means all side of cube gets that image as material.
But if the user selected more than one image suppose three images then only three sides of cube get material. This also works very well.
Now a real problem when user select only three images remaining side of cube remain black and that is okay only material applied side shows up.
Now if user again select a single image and click on cube to apply single image texture to whole body again ray caster gives problem that
“–can’t access property “side”, the material is undefined–”
so this callback function which i added to select image files
function onUserMultiImageSelect(event) {
materialArray.length = 0;
length = multiFilesId.files.length;
let multiImage = [];
let userMultiImageUrl = [];
for (let i = 0; i < length; i++) {
multiImage[i] = multiFilesId.files.item(i);
userMultiImageUrl[i] = URL.createObjectURL(multiImage[i]);
materialArray.push(new THREE.MeshBasicMaterial({ map: new THREE.TextureLoader().load(userMultiImageUrl[i]),side:THREE.DoubleSide }));
}
isTextureSelect = true;
}
this is callback funtion which gets called when i click on object to apply texture
function onMouseButtonDown(event) {
var rect = event.target.getBoundingClientRect();
var mouseX = event.clientX - rect.left;
var mouseY = event.clientY - rect.top;
let raycaster = new THREE.Raycaster();
let pointer = new THREE.Vector2();
pointer.set((mouseX / window.innerWidth) * 2 - 1, -(mouseY / window.innerHeight) * 2 + 1);
raycaster.setFromCamera(pointer, camera);
const intersects = raycaster.intersectObjects(scene.children);
if (intersects.length > 0) {
var curentIntersectedObject = intersects[0];
if (isTextureSelect) {
if(length == 1){
var singleImage = multiFilesId.files.item(0);
var singleImageUrl = URL.createObjectURL(singleImage);
var texture = new THREE.TextureLoader().load(singleImageUrl);
curentIntersectedObject.object.material = new THREE.MeshBasicMaterial({ map: texture });
}else{
curentIntersectedObject.object.material = materialArray;
}
}
}
}

Three.js / React-three-fiber | How to spread PointsMaterial evenly on a BufferGeometry(costum SphereGeometry) and compare vertices with Image data

I try to reacreate the github landingpage globe (if youre not logged in https://github.com/) with three.js and react-three-fiber by the example provided by them (https://github.blog/2020-12-21-how-we-built-the-github-globe/). My goal is to archieve that, with the only differents being, using three.js pointsMaterial and hide those materials, which would have the same coordinates as water on an image of the earth.
Sorry for bad english or misspelling (english is not my native language) and this is my first stackoverflow question too.. So if something is unclear or I wasnt specific enough let me know and I try my best to correct it. Thanks in advance for any help!
My Questions are:
how do you spread those three.js points along different latitudes from, in this case south to the north pole.
how would you compare for example image color / alpha values with the pointsMaterial and decide if it should be visible (land) or not (water)
I played around with the code I found on the Github explanation above for a view days, but cant figure out how to translate it, so I could use it:
for (let lat = -90; lat <= 90; lat += 180/rows) {
const radius = Math.cos(Math.abs(lat) * DEG2RAD) * GLOBE_RADIUS; // espacially this part what would DEG2RAD and GLOBE_RADIUS mean?
const circumference = radius * Math.PI * 2;
const dotsForLat = circumference * dotDensity;
for (let x = 0; x < dotsForLat; x++) {
const long = -180 + x*360/dotsForLat;
if (!this.visibilityForCoordinate(long, lat)) continue;
// Setup and save circle matrix data
}
}
Currently I managed to get some data from an image of our planet, by creating a non rendered canvas, created context for it, painted the image in there and getting the values out of it by using .getImageData(). I was able to create a halo by using a for loop for the vertices position of the BufferGeometry, but i guess you'll see it in the image / code provided below.
Current progress Image
import { useEffect } from "react";
import * as THREE from "three";
// Had some troubles with setting up loaders in next.js, so I used Three / react-three-fiber
const Box = () => {
// Loading and append image to get values of it from a not rendered canvas
useEffect(() => {
const textureLoader = new THREE.TextureLoader();
textureLoader.load("/globe/earth.png", (texture) => {
const width = texture.image.width;
const height = texture.image.height;
const img = texture.image;
const canvas = document.createElement("canvas");
const ctx = canvas.getContext("2d");
canvas.width = width;
canvas.height = height;
ctx.scale(1, -1);
ctx.drawImage(img, 0, 0, width, height * -1);
const imgData = ctx.getImageData(0, 0, canvas.width, canvas.height);
console.log(imgData);
});
}, []);
// Creating Geometry / Material
const count = 500;
const vertices = new Float32Array(count * 3);
// Looping to get values for vertices
for (let x = 0; x < count * 3; x++) {
const value = Math.cos(Math.abs(x));
vertices[x] = value;
}
const material = new THREE.PointsMaterial({ color: "white", size: 0.005 });
const geometry = new THREE.BufferGeometry();
geometry.setAttribute("position", new THREE.BufferAttribute(vertices, 3));
// Returning to render on canvas in index file
return <points material={material} geometry={geometry}></points>;
};
export default Box;

Rotate webGL canvas to appear landscape-oriented on a portrait-oriented mobile phone

I’m using a-frame and trying to accomplish this task - force the canvas to be rendered as “landscape” when a mobile device is in portrait orientation (ie. device-width = 414px and device-height = 736px).
I have successfully accomplished this with the following steps
camera.aspect = 736 / 414;
camera.updateProjectionMatrix();
renderer.setSize(736, 414);
In css...
.a-canvas {
transform: rotate(90deg) translate(161px, 161px);
height: 414px !important;
width: 736px !important;
}
This all works great except for one major thing…I have 3D buttons in my scene and when I go to click them they don’t line up with the rotated canvas, instead their clickable position remains in the same place as before the canvas was rotated.
I’ve tried to set matrixWorldNeedsUpdate = true on the scene’s object3D along with updateWorldMatrix() with no luck. I tried calling refreshObjects on the raycaster with no luck. I tried rotating the scene and the camera with no luck.
I’m not sure what else to do. Any help would be greatly appreciated!
ANSWER:
Thanks to Marquizzo and gman for the help. Here's the updated a-frame source code (v1.0.4) to make the raycaster handle this forced landscape canvas properly
// line: 66884
onMouseMove: (function () {
var direction = new THREE.Vector3();
var mouse = new THREE.Vector2();
var origin = new THREE.Vector3();
var rayCasterConfig = {origin: origin, direction: direction};
return function (evt) {
var bounds = this.canvasBounds;
var camera = this.el.sceneEl.camera;
var left;
var point;
var top;
camera.parent.updateMatrixWorld();
// Calculate mouse position based on the canvas element
if (evt.type === 'touchmove' || evt.type === 'touchstart') {
// Track the first touch for simplicity.
point = evt.touches.item(0);
} else {
point = evt;
}
left = point.clientX - bounds.left;
top = point.clientY - bounds.top;
// mouse.x = (left / bounds.width) * 2 - 1;
// mouse.y = -(top / bounds.height) * 2 + 1;
// HAYDEN's CODE: flipping x and y coordinates to force landscape
// --------------------------------------------------------------
let clickX = (left / bounds.width) * 2 - 1;
let clickY = - (top / bounds.height) * 2 + 1;
mouse.x = -clickY;
mouse.y = clickX;
// --------------------------------------------------------------
origin.setFromMatrixPosition(camera.matrixWorld);
direction.set(mouse.x, mouse.y, 0.5).unproject(camera).sub(origin).normalize();
this.el.setAttribute('raycaster', rayCasterConfig);
if (evt.type === 'touchmove') { evt.preventDefault(); }
};
})(),
A-Frame uses a Raycaster internally to determine if the spot you clicked has hit an object. You can see in the Three.js documentation the raycaster needs the mouse x&y coordinates to determine where you clicked. Here's a working demo of that concept. However with your setup, x, y turns into -y, x.
I think you'll have to write your own Raycaster function to trigger on the click event instead of relying on the built-in AFrame functionality, and then swap the x&y values:
function onClick() {
let clickX = ( event.clientX / window.innerWidth ) * 2 - 1;
let clickY = - ( event.clientY / window.innerHeight ) * 2 + 1;
mouse.x = -clickY;
mouse.y = clickX;
// Then continue with raycaster.setFromCamera(), etc...
}
window.addEventListener( 'click', onClick, false );

GLTF model and interaction in Three.js

My js skills could be improved to say the least! But struggling with this
I can get my model to load ok into the scene but cannot seem to get the interaction working.
It's like i need to tie in the GLTF file into the raycaster, the below code is part of it. The full Codepen link is below this code.
class PickHelper {
constructor() {
this.raycaster = new THREE.Raycaster();
this.pickedObject = null;
this.pickedObjectSavedColor = 0;
}
pick(normalizedPosition, scene, camera, time) {
if (this.pickedObject) {
this.pickedObject.material.emissive.setHex(this.pickedObjectSavedColor);
this.pickedObject = undefined;
}
this.raycaster.setFromCamera(normalizedPosition, camera);
const intersectedObjects = this.raycaster.intersectObjects(scene.children);
if (intersectedObjects.length) {
this.pickedObject = intersectedObjects[0].object;
this.pickedObjectSavedColor = this.pickedObject.material.emissive.getHex();
this.pickedObject.material.emissive.setHex((time * 8) % 2 > 1 ? 0xFFFF00 : 0xFF0000);
this.pickedObject.rotation.y += 0.1 ;
}
}
https://codepen.io/johneemac/pen/abzqdye << FULL Code
Sorry: Cross origin issue with the gltf file on CodePen though! It won't load but you get the idea hopefully.
Super appreciate any help, thanks!
You have to perform the intersection test like so:
const intersectedObjects = this.raycaster.intersectObjects(scene.children, true);
Notice the second argument of intersectObjects(). It indicates that the raycaster should process the entire hierarchy of objects which is necessary in context of a loaded glTF asset.
three.js R112
It's not clear what you're trying to do. GLTF files are collection of materials, animations, geometries, meshes, etc.. so you can't "pick" a GLTF file. You can "pick" individual elements inside. You could write some code that if something is picked, checks of the thing that was picked is one of the meshes loaded in the GLTF scene and then pick every other thing that was loaded in the GLTF scene.
In any case,
You need to give the RayCaster a list of objects to select from. In the original example that was scene.children which is just the list of Boxes added to the root of the scene. But when loading a GLTF, unless you already know the structure of the GLTF because you created the scene yourself you'll need to go find the things you want to be able to select and add them to some list that you can pass to RayCaster.intersectObjects
This code gets all the Mesh objects from the loaded GLTF file
let pickableMeshes = [];
// this is run after loading the gLTT
// get a list of all the meshes in the scene
root.traverse((node) => {
if (node instanceof THREE.Mesh) {
pickableMeshes.push(node);
}
});
Note that you could also pass true as the second argument to RayCaster.intersectObjects as in rayCaster.intersectObjects(scene.children, true). That's probably rarely what you want though as likely you have things in the scene you don't want the user to be able to select. For example if you only wanted the user to be able to select the cars then something like
// get a list of all the meshes in the scene who's names start with "car"
root.traverse((node) => {
if (node instanceof THREE.Mesh && (/^car/i).test(node.name)) {
pickableMeshes.push(node);
}
});
Then, PickHelper class you used was changing the color of the material on each Box but that only works because each Box has its own material. If the Boxes shared materials then changing the material color would change all the boxes.
Loading a different GLTF most the objects shared the same material so to be able to highlight one requires changing the material used with that object or choosing some other method to highlight the selected thing.
function main() {
const canvas = document.querySelector('#c');
const renderer = new THREE.WebGLRenderer({canvas});
const fov = 60;
const aspect = 2; // the canvas default
const near = 0.1;
const far = 200;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.z = 30;
const scene = new THREE.Scene();
scene.background = new THREE.Color('white');
// put the camera on a pole (parent it to an object)
// so we can spin the pole to move the camera around the scene
const cameraPole = new THREE.Object3D();
scene.add(cameraPole);
cameraPole.add(camera);
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.DirectionalLight(color, intensity);
light.position.set(-1, 2, 4);
camera.add(light);
}
function frameArea(sizeToFitOnScreen, boxSize, boxCenter, camera) {
const halfSizeToFitOnScreen = sizeToFitOnScreen * 0.5;
const halfFovY = THREE.Math.degToRad(camera.fov * .5);
const distance = halfSizeToFitOnScreen / Math.tan(halfFovY);
// compute a unit vector that points in the direction the camera is now
// in the xz plane from the center of the box
const direction = (new THREE.Vector3())
.subVectors(camera.position, boxCenter)
.multiply(new THREE.Vector3(1, 0, 1))
.normalize();
// move the camera to a position distance units way from the center
// in whatever direction the camera was from the center already
camera.position.copy(direction.multiplyScalar(distance).add(boxCenter));
// pick some near and far values for the frustum that
// will contain the box.
camera.near = boxSize / 100;
camera.far = boxSize * 100;
camera.updateProjectionMatrix();
// point the camera to look at the center of the box
camera.lookAt(boxCenter.x, boxCenter.y, boxCenter.z);
}
let pickableMeshes = [];
{
const gltfLoader = new THREE.GLTFLoader();
gltfLoader.load('https://threejsfundamentals.org/threejs/resources/models/cartoon_lowpoly_small_city_free_pack/scene.gltf', (gltf) => {
const root = gltf.scene;
scene.add(root);
// compute the box that contains all the stuff
// from root and below
const box = new THREE.Box3().setFromObject(root);
const boxSize = box.getSize(new THREE.Vector3()).length();
const boxCenter = box.getCenter(new THREE.Vector3());
// set the camera to frame the box
frameArea(boxSize * 0.7, boxSize, boxCenter, camera);
// get a list of all the meshes in the scen
root.traverse((node) => {
if (node instanceof THREE.Mesh) {
pickableMeshes.push(node);
}
});
});
}
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
class PickHelper {
constructor() {
this.raycaster = new THREE.Raycaster();
this.pickedObject = null;
this.pickedObjectSavedMaterial = null;
this.selectMaterial = new THREE.MeshBasicMaterial();
this.infoElem = document.querySelector('#info');
}
pick(normalizedPosition, scene, camera, time) {
// restore the color if there is a picked object
if (this.pickedObject) {
this.pickedObject.material = this.pickedObjectSavedMaterial;
this.pickedObject = undefined;
this.infoElem.textContent = '';
}
// cast a ray through the frustum
this.raycaster.setFromCamera(normalizedPosition, camera);
// get the list of objects the ray intersected
const intersectedObjects = this.raycaster.intersectObjects(pickableMeshes);
if (intersectedObjects.length) {
// pick the first object. It's the closest one
this.pickedObject = intersectedObjects[0].object;
// save its color
this.pickedObjectSavedMaterial = this.pickedObject.material;
this.pickedObject.material = this.selectMaterial;
// flash select material color to flashing red/yellow
this.selectMaterial.color.setHex((time * 8) % 2 > 1 ? 0xFFFF00 : 0xFF0000);
this.infoElem.textContent = this.pickedObject.name;
}
}
}
const pickPosition = {x: 0, y: 0};
const pickHelper = new PickHelper();
clearPickPosition();
function render(time) {
time *= 0.001; // convert to seconds;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
cameraPole.rotation.y = time * .1;
pickHelper.pick(pickPosition, scene, camera, time);
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
function getCanvasRelativePosition(event) {
const rect = canvas.getBoundingClientRect();
return {
x: event.clientX - rect.left,
y: event.clientY - rect.top,
};
}
function setPickPosition(event) {
const pos = getCanvasRelativePosition(event);
pickPosition.x = (pos.x / canvas.clientWidth ) * 2 - 1;
pickPosition.y = (pos.y / canvas.clientHeight) * -2 + 1; // note we flip Y
}
function clearPickPosition() {
// unlike the mouse which always has a position
// if the user stops touching the screen we want
// to stop picking. For now we just pick a value
// unlikely to pick something
pickPosition.x = -100000;
pickPosition.y = -100000;
}
window.addEventListener('mousemove', setPickPosition);
window.addEventListener('mouseout', clearPickPosition);
window.addEventListener('mouseleave', clearPickPosition);
window.addEventListener('touchstart', (event) => {
// prevent the window from scrolling
event.preventDefault();
setPickPosition(event.touches[0]);
}, {passive: false});
window.addEventListener('touchmove', (event) => {
setPickPosition(event.touches[0]);
});
window.addEventListener('touchend', clearPickPosition);
}
main();
body { margin: 0; }
#c { width: 100vw; height: 100vh; display: block; }
#info { position: absolute; left: 0; top: 0; background: black; color: white; padding: 0.5em; font-family: monospace; }
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r112/build/three.min.js"></script>
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r112/examples/js/loaders/GLTFLoader.js"></script>
<canvas id="c"></canvas>
<div id="info"></div>

WebGL single frame "screenshot" of webGL

tried searching for something like this, but I've had no luck. I'm trying to open a new tab with a screenshot of the current state of my webgl image. Basically, it's a 3d model, with the ability to change which objects are displayed, the color of those objects, and the background color. Currently, I am using the following:
var screenShot = window.open(renderer.domElement.toDataURL("image/png"), 'DNA_Screen');
This line succeeds in opening a new tab with a current image of my model, but does not display the current background color. It also does not properly display the tab name. Instead, the tab name is always "PNG 1024x768".
Is there a way to change my window.open such that the background color is shown? The proper tab name would be great as well, but the background color is my biggest concern.
If you open the window with no URL you can access it's entire DOM directly from the JavaScript that opened the window.
var w = window.open('', '');
You can then set or add anything you want
w.document.title = "DNA_screen";
w.document.body.style.backgroundColor = "red";
And add the screenshot
var img = new Image();
img.src = someCanvas.toDataURL();
w.document.body.appendChild(img);
Well it is much longer than your one liner but you can change the background color of the rectangle of the context.
printCanvas (renderer.domElement.toDataURL ("image/png"), width, height,
function (url) { window.open (url, '_blank'); });
// from THREEx.screenshot.js
function printCanvas (srcUrl, dstW, dstH, callback)
{
// to compute the width/height while keeping aspect
var cpuScaleAspect = function (maxW, maxH, curW, curH)
{
var ratio = curH / curW;
if (curW >= maxW && ratio <= 1)
{
curW = maxW;
curH = maxW * ratio;
}
else if (curH >= maxH)
{
curH = maxH;
curW = maxH / ratio;
}
return { width: curW, height: curH };
}
// callback once the image is loaded
var onLoad = function ()
{
// init the canvas
var canvas = document.createElement ('canvas');
canvas.width = dstW;
canvas.height = dstH;
var context = canvas.getContext ('2d');
context.fillStyle = "black";
context.fillRect (0, 0, canvas.width, canvas.height);
// scale the image while preserving the aspect
var scaled = cpuScaleAspect (canvas.width, canvas.height, image.width, image.height);
// actually draw the image on canvas
var offsetX = (canvas.width - scaled.width ) / 2;
var offsetY = (canvas.height - scaled.height) / 2;
context.drawImage (image, offsetX, offsetY, scaled.width, scaled.height);
// notify the url to the caller
callback && callback (canvas.toDataURL ("image/png")); // dump the canvas to an URL
}
// Create new Image object
var image = new Image();
image.onload = onLoad;
image.src = srcUrl;
}

Resources