html5 canvas large sprite sheet image animation performance optimization - animation

I am using the method from
http://www.williammalone.com/articles/create-html5-canvas-javascript-sprite-animation/
to render sprite sheet animation on canvas
and it works fine mostly
But when the sprite sheet image is large, (30000*500, 60 frames png file in my case) the performance is so bad especially on Android webview.
I think the image resource tooks too much memory, but how do I optimize its memory usage ? I've tried compress the image by "ImageOptim", but it didn't make any noticeable difference.
Can someone give me some advices about performance optimization when rendering large sprite sheet on canvas ?

Use ImageBitmaps.
Your image is probably too big to be held in the graphic cards memory, this means that every time it has to be moved there again, which may makes things really slow indeed.
ImageBitmap objects allow to keep the raw bitmap data ready to be used by the GPU with no extra.
So at init, you'd prepare your ImageBitmaps and throw away the full image.
Now only small bitmaps ready to be put would get passed to the GPU.
async function initSprites( img, sprite_width = img.width, sprite_height = img.height ) {
// you would have to make the sprite factory logic based on your actual spritesheet,
// here Im using a simple fixed-size grid from wikimedia
const sprites = [];
for( let y = 0; y < img.height; y += sprite_height) {
for( let x = 0; x < img.width; x += sprite_width ) {
sprites.push(
await createImageBitmap( img, x, y, sprite_width, sprite_height )
);
}
}
return sprites;
}
async function loadImage( url ) {
var img = new Image();
img.crossOrigin = 'anonymous';
await new Promise( (res, rej) => {
img.onload = (e) => res( img );
img.onerror = rej;
img.src = url;
} );
return img;
}
async function init() {
const url = "https://upload.wikimedia.org/wikipedia/commons/b/be/SpriteSheet.png";
const FPS = 10;
const sprite_width = 132;
const sprite_height = 97;
// we don't keep any reference to the Image
const sprites = await initSprites( await loadImage( url ), sprite_width, sprite_height );
const canvas = document.getElementById( 'canvas' );
const ctx = canvas.getContext( '2d' );
const frame_time = 1000 / FPS;
let sprite_index = 0;
let lastTime = performance.now();
requestAnimationFrame( update );
function update( t ) {
if( t - lastTime > frame_time ) {
lastTime = t;
sprite_index = (sprite_index + 1) % sprites.length;
draw();
}
requestAnimationFrame( update );
}
function draw() {
ctx.clearRect( 0, 0, canvas.width, canvas.height );
// we just reproduce the original spritesheet
// but animated
let inner_index = 0;
for(let y = 0; y < 3; y++) {
for( let x = 0; x < 4; x ++ ) {
inner_index ++;
const index = (sprite_index + inner_index) % sprites.length;
const sprite = sprites[ index ];
ctx.drawImage( sprite, x * sprite_width, y * sprite_height );
}
}
}
}
// for Safari and IE
monkeyPatchCreateImageBitmap();
init().catch( console.error );
// a very poor monkey patch, only allowing HTMLImageElement as source
// returns an HTMLCanvasElement
function monkeyPatchCreateImageBitmap() {
if( typeof window.createImageBitmap === 'undefined' ) {
window.createImageBitmap = monkeyPatch;
}
function monkeyPatch( img, sx = 0 , sy = 0, sw = img.width, sh = img.height ){
const canvas = document.createElement( 'canvas' );
canvas.width = sw;
canvas.height = sh;
canvas.getContext( '2d' )
.drawImage( img, sx, sy, sw, sh, 0, 0, sw, sh );
canvas.close = (e) => canvas.width = 0;
return Promise.resolve( canvas );
};
}
<canvas id="canvas" width="528" height="291"></canvas>

Related

Three.js Move an object to front of camera

Hello I'm trying to move an object to front of camera, and when it reached to target position, I want it to stop. but it doesn't work.
function objectToCamera(mX, mY, object)
{
var vector = new THREE.Vector3(mX, mY, 1);
vector.unproject(camera);
vector.sub(object.position);
var dx = object.position.x - camera.position.x;
var dy = object.position.y - camera.position.y;
var dz = object.position.z - camera.position.z;
var distance = Math.sqrt(dx*dx + dy*dy + dz*dz);
if(lastDistance < distance && lastDistance != -1)
keepOut = -1;
lastDistance = distance;
setTimeout(function(){
if( distance > 200 && keepOut == 1)
{
var amount = (1)*(indexForZoom/3);
amount = (amount>15) ? 15 : (1)*(indexForZoom/3);
if(distance - amount < 200)
amount = (distance-200)+1;
indexForZoom++;
object.translateZ(amount);
controls.target.addVectors(controls.target,vector.setLength(amount));
objectToCamera(mX, mY, object)
}
else
{
// stopForZoom = 1;
keepOut = -1;
objectClickHandler(object.name, object);
}
}, 10);
}
I'm checking the distance between camera and object, and if target distance has reached I'm letting it stop, but it doesn't work.
In coordinates, if i'm in positive X coordinates, distance is decreasing, and otherwise, distance is increasing.
I think, in my codes, distance should be decreasing always, but it is not.
Please help. Thanks.
you can use object.position.lerp(target, amount) to move an object toward target. Amount is a value from 0 to 1 with 1 = 100% all the way to target and 0.5 = 50% way to target.
If you want to move at a fixed speed then you can get the distance to the target
distance = object.position.distanceTo(target);
Say you want a max of 0.1 units per interation. then
moveSpeed = 0.1;
distance = object.position.distanceTo(target);
amount = Math.min(moveSpeed, distance) / distance;
object.position.lerp(target, amount)
All that's left is for you to choose a target.
The position in front of the camera is
const distanceFromCamera = 3; // 3 units
const target = new THREE.Vector3(0, 0, -distanceToCamera);
target.applyMatrix4(camera.matrixWorld);
So for example if you move the camera (drag with mouse, use scrollwheel). Note: in the code the speed is adjusted to be frame rate independent.
function main() {
const canvas = document.querySelector('#c');
const renderer = new THREE.WebGLRenderer({canvas});
const fov = 45;
const aspect = 2; // the canvas default
const near = 0.1;
const far = 1000;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.set(0, 10, 20);
const controls = new THREE.OrbitControls(camera, canvas);
controls.target.set(0, 0, 0);
controls.update();
const scene = new THREE.Scene();
scene.background = new THREE.Color('lightblue');
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.DirectionalLight(color, intensity);
light.position.set(0, 10, 0);
light.target.position.set(-5, 0, 0);
scene.add(light);
scene.add(light.target);
}
const gridHelper = new THREE.GridHelper(100, 10);
scene.add(gridHelper);
gridHelper.position.set(0, -5, 0);
const cube = new THREE.Mesh(
new THREE.BoxBufferGeometry(1, 1, 1),
new THREE.MeshPhongMaterial({color: 'red'}),
);
scene.add(cube);
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
let then = 0;
function render(now) {
now *= 0.001; // convert to seconds
const deltaTime = now - then;
then = now;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
cube.rotation.x = now;
cube.rotation.y = now * 1.1;
// move cube in front of camera
{
const distanceFromCamera = 3; // 3 units
const target = new THREE.Vector3(0, 0, -distanceFromCamera);
target.applyMatrix4(camera.matrixWorld);
const moveSpeed = 15; // units per second
const distance = cube.position.distanceTo(target);
if (distance > 0) {
const amount = Math.min(moveSpeed * deltaTime, distance) / distance;
cube.position.lerp(target, amount);
cube.material.color.set('green');
} else {
cube.material.color.set('red');
}
}
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
body { margin: 0; }
#c { width: 100vw; height: 100vh; display: block; }
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r112/build/three.min.js"></script>
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r112/examples/js/controls/OrbitControls.js"></script>
<canvas id="c"></canvas>
Note, you might want to call camera.updateMatrixWorld() before all that math to make sure the target isn't one frame late.
If the object is in a hierarchy then there's more to do. You can do the math or you can use just attach the object to the scene and then attach it it back to its place in the hierarchy
const parent = object.parent;
// move object to scene without changing it's world orientation
scene.attach(object);
// do stuff above
// move object to parent without changing it's world orientation
parent.attach(object);

How to use compressedTexSubImage2D with threejs Texture

I try to make a TextureAtlas for threejs using compressedTexture but I cant find a way to get the correct format/internal format
The error I get is : glCompressedTexSubImage2D: format does not match internalformat.
I load the compressed texture from a THREE.KTXLoader and feed the update function of TextureAtlas with the mipmaps[0].data parsed from KTXLoader
The class is composed in 3 part :
constructor : setup the basic info of the texture
setRender( renderer ) : setup the renderer and utils for futur used
setSize : setup the size of my texture
update : update a part of the texture with a compressed texture
Here the full class :
class TextureAtlas extends THREE.Texture{
constructor() {
super()
this.magFilter = THREE.LinearFilter;
this.minFilter = THREE.LinearMipMapLinearFilter;
const canvas = document.createElement('canvas');
canvas.width = 1;
canvas.height = 1;
const ctx = canvas.getContext('2d');
const imageData = ctx.createImageData(1, 1);
this.image = imageData
this.generateMipmaps = false
this.flipY = false
// this.unpackAlignment = 1
this.format = THREE.RGBAFormat
this.needsUpdate = true
this.isUpdatableTexture = true
}
setRenderer( renderer ) {
this.renderer = renderer;
this.gl = this.renderer.getContext()
this.ct = this.gl.getExtension("WEBGL_compressed_texture_s3tc");
this.utils = THREE.WebGLUtils(this.gl, this.renderer.extensions)
}
setSize( width, height ) {
if( width === this.width && height === this.height ) return;
if(!this.renderer){ return }
var textureProperties = this.renderer.properties.get( this );
if( !textureProperties.__webglTexture ) return;
this.width = width;
this.height = height;
var activeTexture = this.gl.getParameter( this.gl.TEXTURE_BINDING_2D );
this.gl.bindTexture( this.gl.TEXTURE_2D, textureProperties.__webglTexture );
if( !textureProperties.__webglTexture ) this.width = null;
this.gl.texImage2D(
this.gl.TEXTURE_2D,
0,
this.utils.convert( this.format ),
width,
height,
0,
this.utils.convert( this.format ),
this.utils.convert( this.type ),
null
);
this.gl.bindTexture( this.gl.TEXTURE_2D, activeTexture );
this.needsUpdate = true
}
update = ( pixels, x, y, width, height,format )=> {
// return
var textureProperties = this.renderer.properties.get( this );
if( !textureProperties.__webglTexture ) { return}
var activeTexture = this.gl.getParameter( this.gl.TEXTURE_BINDING_2D );
this.gl.bindTexture( this.gl.TEXTURE_2D, textureProperties.__webglTexture );
// WebGL 1 & 2:
// https://developer.mozilla.org/en-US/docs/Web/API/WebGLRenderingContext/compressedTexSubImage2D
// void gl.compressedTexSubImage2D(target, level, xoffset, yoffset, width, height, format, ArrayBufferView? pixels);
this.gl.compressedTexSubImage2D(
this.gl.TEXTURE_2D, // texture 2d 3d or cubemap
0, // level of details
0,//x, // left offset
0,//y, // top offset
width, // texture width
height, // texture height
this.utils.convert(this.ct.COMPRESSED_RGBA_S3TC_DXT5_EXT), // format of the compressed texture
pixels // ArrayBufferView
);
this.gl.generateMipmap( this.gl.TEXTURE_2D );
this.gl.bindTexture( this.gl.TEXTURE_2D, activeTexture );
this.needsUpdate = true
}
}

Can I create point cloud from depth and rgb image?

I am new to 3js, I have 2 images ie RGB image and Depth image. Can I create a point cloud combining these two using 3js?
if yes then how?
To solve this problem I went the three.js examples and searched for "point". I checked each matching sample for one that had different colors for each particle. Then I clicked the "view source" button to checkout the code. I ended up starting with this example and looked at the source. It made it pretty clear how to make a set of points of different colors.
So after that I just needed to load the 2 images, RGB and Depth, make a grid of points, for each point set the Z position to the depth and the color to the color of the image.
I used my phone to take these RGB and Depth images using this app
To get the data I draw the image into a canvas and then call getImageData. That gives me the data in values from 0 to 255 for each channel, red, green, blue, alpha.
I then wrote a function to get a single pixel out and return the colors in the 0 to 1 range. Just to be safe it checks the boundaries.
// return the pixel at UV coordinates (0 to 1) in 0 to 1 values
function getPixel(imageData, u, v) {
const x = u * (imageData.width - 1) | 0;
const y = v * (imageData.height - 1) | 0;
if (x < 0 || x >= imageData.width || y < 0 || y >= imageData.height) {
return [0, 0, 0, 0];
} else {
const offset = (y * imageData.width + x) * 4;
return Array.from(imageData.data.slice(offset, offset + 4)).map(v => v / 255);
}
}
result
'use strict';
/* global THREE */
function loadImage(url) {
return new Promise((resolve, reject) => {
const img = new Image();
img.crossOrigin = "anonymous";
img.onload = (e) => { resolve(img); };
img.onerror = reject;
img.src = url;
});
}
function getImageData(img) {
const ctx = document.createElement("canvas").getContext("2d");
ctx.canvas.width = img.width;
ctx.canvas.height = img.height;
ctx.drawImage(img, 0, 0);
return ctx.getImageData(0, 0, ctx.canvas.width, ctx.canvas.height);
}
// return the pixel at UV coordinates (0 to 1) in 0 to 1 values
function getPixel(imageData, u, v) {
const x = u * (imageData.width - 1) | 0;
const y = v * (imageData.height - 1) | 0;
if (x < 0 || x >= imageData.width || y < 0 || y >= imageData.height) {
return [0, 0, 0, 0];
} else {
const offset = (y * imageData.width + x) * 4;
return Array.from(imageData.data.slice(offset, offset + 4)).map(v => v / 255);
}
}
async function main() {
const images = await Promise.all([
loadImage("https://i.imgur.com/UKBsvV0.jpg"), // RGB
loadImage("https://i.imgur.com/arPMCZl.jpg"), // Depth
]);
const data = images.map(getImageData);
const canvas = document.querySelector('canvas');
const renderer = new THREE.WebGLRenderer({canvas: canvas});
const fov = 75;
const aspect = 2; // the canvas default
const near = 1;
const far = 4000;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.z = 2000;
const controls = new THREE.OrbitControls(camera, canvas);
controls.target.set(0, 0, 0);
controls.update();
const scene = new THREE.Scene();
const rgbData = data[0];
const depthData = data[1];
const skip = 20;
const across = Math.ceil(rgbData.width / skip);
const down = Math.ceil(rgbData.height / skip);
const positions = [];
const colors = [];
const color = new THREE.Color();
const spread = 1000;
const depthSpread = 1000;
const imageAspect = rgbData.width / rgbData.height;
for (let y = 0; y < down; ++y) {
const v = y / (down - 1);
for (let x = 0; x < across; ++x) {
const u = x / (across - 1);
const rgb = getPixel(rgbData, u, v);
const depth = 1 - getPixel(depthData, u, v)[0];
positions.push(
(u * 2 - 1) * spread * imageAspect,
(v * -2 + 1) * spread,
depth * depthSpread,
);
colors.push( ...rgb.slice(0,3) );
}
}
const geometry = new THREE.BufferGeometry();
geometry.addAttribute( 'position', new THREE.Float32BufferAttribute( positions, 3 ) );
geometry.addAttribute( 'color', new THREE.Float32BufferAttribute( colors, 3 ) );
geometry.computeBoundingSphere();
const material = new THREE.PointsMaterial( { size: 15, vertexColors: THREE.VertexColors } );
const points = new THREE.Points( geometry, material );
scene.add( points );
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render(time) {
time *= 0.001;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
body {
margin: 0;
}
canvas {
width: 100vw;
height: 100vh;
display: block;
}
<canvas></canvas>
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r94/three.min.js"></script>
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r94/js/controls/OrbitControls.js"></script>

Dynamic videos from user inputs - How does it work?

Is there a way to generate a video frame's content by code?
For example :
I want to make a program that takes some string variable as input and then gives an output video of same text but now with special effects on the text itself.
It came to my mind the idea after seeing some projects that Facebook has done in their website. They have a database of pictures, comments, friends, events, likes and so on, that are related to a User. With all this information they do videos like for example Friend's day, a video that is completely related to the user that wants to post it.
How does these things work?
Is there some sort of software I can use? Can someone give me a place to start with?
I want to make a program that takes some string variable as input and then gives an output video of same text but now with special effects on the text itself.
You can render the text using canvas and record the result using the new Media Recorder API.
The effects can be made various ways of course, and the text content itself can be used against a database, or in conjunction with machine learning to structure/obtain some data relative to the text. This has a wide scope though, wider than what fits in here.
In any case, the generation of the video itself is shown below. It is a simple example where you can enter some text, it gets animated and recorded as video. When you click stop the actual video will show instead (which you can allow user to download). You can extend with other effects, storyboard features so you can record scenes and develop it to make an entire movie.
Do notice that all animation happens on the canvas in real-time. Typical video is recorded at 30 frames per second and not 60 frames per second, which is the typical frame rate for canvas. This is also something to consider in regard to smoothness. However, you can use this to your advantage by targeting 30 FPS to give your code a little more time to process. The API is suppose to support step-recording by not giving a frame rate as argument, but I have not been able to make this work as of yet.
There are currently limits with support of the API as not all browsers supports it yet.
Example
// un-refined code for demo use only
// set up globals, get key elements
var div = document.querySelector("div");
var btn = document.querySelector("button");
var txt = document.querySelector("input");
var c = document.querySelector("canvas");
// make 2d context without alpha channel
var ctx = c.getContext("2d", {alpha: false});
var isRecording = false;
// stream vars
var rec, stream, tref;
// if clicked, lets go
btn.onclick = setup;
function setup() {
// toggle button text/status for demo
if (isRecording) {
this.disabled = true;
this.innerHTML = "Making video...";
stop();
return
}
else {
isRecording = true;
this.innerHTML = "Click to stop & show video";
}
// Setup canvas for text rendering
var ct1 = document.createElement("canvas");
var ct2 = document.createElement("canvas");
var ctxText1 = ct1.getContext("2d");
var ctxText2 = ct2.getContext("2d");
var w, h;
w = ct1.width = ct2.width = c.width;
h = ct1.height = ct2.height = c.height;
setupCtx(ctxText1, "#333", "#FF9F05");
setupCtx(ctxText2, "#FF9F05", "#000");
function setupCtx(ctxText, bg, fg) {
ctxText.textAlign = "center";
ctxText.textBaseline = "middle";
ctxText.font = "92px sans-serif";
ctxText.fillStyle = bg;
ctxText.fillRect(0, 0, w, h);
ctxText.fillStyle = fg;
ctxText.translate(w/2, h/2);
ctxText.rotate(-0.5);
ctxText.fillText(txt.value.toUpperCase(), 0, 0);
}
// populate grid (see Square object below which handles the tiles)
var cols = 18,rows = 11,
cellWidth = (c.width / cols)|0,
cellHeight = (c.height / rows)|0,
grid = [],
len = cols * rows, y = 0, x,
index, hasActive = true;
for (; y < rows; y++) {
for (x = 0; x < cols; x++) {
grid.push(new Square(ctx, x * cellWidth, y * cellHeight, cellWidth, cellHeight, ct1, ct2, 0.01));
}
}
x = 0;
// start recording canvas to video
record();
//animation loop (refactor at will)
function loop() {
ctx.setTransform(1, 0, 0, 1, 0, 0);
ctx.globalAlpha = 1;
ctx.clearRect(0, 0, w, h);
// trigger cells
for (y = 0; y < rows; y++) {
var gx = (x | 0) - y;
if (gx >= 0 && gx < cols) {
index = y * cols + gx;
grid[index].trigger();
}
}
x += 0.25;
// update all
for (var i = 0; i < grid.length; i++) grid[i].update();
tref = requestAnimationFrame(loop);
}
// setup media recorder to record canvas # 30 FPS (note: canvas = 60 FPS by def.)
function record() {
stream = c.captureStream(30);
rec = new MediaRecorder(stream);
rec.addEventListener('dataavailable', done);
rec.start();
requestAnimationFrame(loop);
}
}
// stop recorder and trigger dataavailable event
function stop() {
rec.stop();
cancelAnimationFrame(tref); // stop loop as well
}
// finish up, show new shiny video instead of canvas
function done(e) {
var blob = new Blob([e.data], {"type": "video/webm"});
var video = document.createElement("video");
document.body.removeChild(c);
document.body.appendChild(video);
// play, Don
video.autoplay = video.loop = video.controls = true;
video.src = URL.createObjectURL(blob);
video.onplay = function() {
div.innerHTML = "Playing resulting video below:";
};
}
// stolen from a previous example I made (CC3.0-attr btw as always)
// this is included for the sole purpose of some animation.
function Square(ctx, x, y, w, h, image, image2, speed) {
this.ctx = ctx;
this.x = x;
this.y = y;
this.height = h;
this.width = w;
this.image = image;
this.image2 = image2;
this.first = true;
this.alpha = 0; // current alpha for this instance
this.speed = speed; // increment for alpha per frame
this.triggered = false; // is running
this.done = false; // has finished
}
Square.prototype = {
trigger: function () { // start this rectangle
this.triggered = true
},
update: function () {
if (this.triggered && !this.done) { // only if active
this.alpha += this.speed; // update alpha
if (this.alpha <= 0 || this.alpha >= 1)
this.speed = -this.speed;
}
var t = Math.sin(Math.PI * this.alpha);
if (t <= 0) this.first = !this.first;
this.ctx.fillStyle = this.color; // render this instance
this.ctx.globalAlpha = Math.max(0, Math.min(1, t));
var cx = this.x + this.width * 0.5, // center position
cy = this.y + this.width * 0.5;
this.ctx.setTransform(t*0.5+0.5, 0, 0, t*0.5+0.5, cx, cy); // scale and translate
this.ctx.rotate(-Math.PI * (1 - t)); // rotate, 90° <- alpha
this.ctx.translate(-cx, -cy); // translate back
this.ctx.drawImage(this.first ? this.image : this.image2, this.x, this.y, this.width, this.height, this.x, this.y, this.width, this.height);
}
};
body {background: #555;color:#ddd;font:16px sans-serif}
<div>
<label>Enter text to animate: <input value="U R AWESOME"></label>
<button>Click to start animation + recording</button>
</div>
<canvas width=640 height=400></canvas>

Three.js geometry shape stretched?

I have a strange problem in Three.js where a geometry is very stretched, vertically.
I'm dynamically loading a picture, and creating a planeGeometry to place it in a scene.
The picture comes in at 196 x 190, and I use a power of 2 function we used before in Away3d to scale the image to the power of 2. Preiously this was coming out at 512 x 256.
At this size the image doesn't appear at all, so it has to be scaled up to even show. It's possible this is way too big, and part of the image is sticking out the side of the cube that holds the panoramic image.
This picture is stretched out vertically, and not showing the full width or height image - It should be around the same size as the signboard that's partly covering it (that's what it does in Away3d)
Here is the code I'm using:
function photo(data){
console.log(data);
var texture = new THREE.Texture(texture_placeholder);
var photoMaterial = new THREE.MeshBasicMaterial({
map: texture,
overdraw : 0.5
});
var image = new Image();
image.crossOrigin = '';
image.onload = function() {
texture.image = this;
texture.needsUpdate = true;
texture.repeat.set( 1, 1 );
texture.mipmaps[ 0 ] = texture.image;
texture.generateMipmaps = true;
var w = powerOf2Down(texture.width);
var scale = w/texture.width;
var scaledHeight = texture.height * scale;
var h = powerOf2Up(scaledHeight);
console.log(w + '/' + h);
var photoGeom = new THREE.PlaneGeometry( w , h );
//photoGeom.scale.normalize().multiplyScalar(0.3);
var photoMesh = new THREE.Mesh( photoGeom, photoMaterial );
photo.add( photoMesh );
scene.add( photo );
};
image.src = 'http://cdn.beek.co/' + data.file.realName;
photo = new THREE.Object3D();
photo.hotspotData = data;
photo.position.copy( panTiltToVector( data.pan, data.tilt, data.distance ) );
photo.rotation.x = data.rotationX != null ? -data.rotationX : 0;
photo.rotation.y = data.rotationY != null ? -data.rotationY : 0;
photo.rotation.z = data.rotationZ != null ? -data.rotationZ : 0;
photo.rotation.y += Math.PI;
//targetList.push( photo );
}
function powerOf2Down(value)
{
if(value < 80)
return 64;
else if(value < 150)
return 128;
else if(value < 400)
return 256;
else if(value < 800)
return 512;
return 1024;
}
function powerOf2Up(value)
{
if(value <= 64)
return 64;
else if(value <= 128)
return 128;
else if(value <= 256)
return 256;
else if(value <= 512)
return 512;
return 1024;
}
Any clues on how to get the full image at a normal size gratefully received!
Was scaling down the wrong thing
photo.scale.normalize().multiplyScalar(0.1);

Resources