I use an animation to move a camera view around my object. I clamp the animation at the final position. I then try to duplicate the camera’s information back into my default camera so I can have the cameraControls pick up where the animation ended. But the default camera does not located / orientated to where the animation left off. Both cameras are THREE.OrthographicCamera.
AnimationMixer.addEventListener('finished', function(e){
// activeCamera is what is used in render( scene, activeCamera )
// activeCamera currently hold the camera used to animate the movement
// of the camera around to the back of my scene
console.log('anime fini callback');
let p = new THREE.Vector3(0,0,0);
activeCamera.getWorldPosition(p);
let q = new THREE.Quaternion(0,0,0,0);
activeCamera.getWorldQuaternion(q);
let s = new THREE.Vector3(0,0,0);
activeCamera.getWorldScale(s);
let d = new THREE.Vector3(0,0,0);
activeCamera.getWorldDirection(d);
let z = activeCamera.zoom;
let r = activeCamera.rotation;
console.log('anicam position: ' + p.x + ', ' + p.y + ', ' + p.z);
console.log(' quaternion: ' + q.x + ', ' + q.y + ', ' + q.z + ', ' + q.w);
console.log(' rotation: ' + r.x + ', ' + r.y + ', ' + r.z);
console.log(' scale: ' + s.x + ', ' + s.y + ', ' + s.z);
console.log(' zoom: ' + z);
console.log(' direction: ' + d.x + ', ' + d.y + ', ' + d.z);
// defaultCamera is camera tied to cameraControls used for standard
// rotating, zooming, panning, etc.
console.log('switching to camera: [default]');
defaultCamera.copy(activeCamera);
activeCamera = defaultCamera;
// you would think the above code would be enough, but no
// so, I tried...
//activeCamera.position.set(p.x, p.y, p.z);
//let t = new THREE.Vector3(that.controls._target.x, that.controls._target.y, that.controls._target.z);
//that.controls.setLookAt(p.x, p.y, p.z, t.x, t.y, t.z, true);
//that.controls.setPosition(p.x, p.y, p.z, true);
//activeCamera.setRotationFromQuaternion(q);
//that.controls.updateCameraUp();
});
The camera/animation I am using to rotate the camera view is coming from a glTF file. Can this be a problem with a +Z difference in cameras? Both camera show rotation order as "XYZ".
Any suggestions, please.
Related
The problem is follow:
when the object has a positive angle and if negative angle is clicked - it follows logic and makes a counterclockwise rotation instead of clockwise rotation like it requires and vice versa. It doesn`t look like rotation of a unit in RTS games. How to fix it?
`
var object = document.createElement("div");
object.style.backgroundColor = "red";
object.style.width = "50px";
object.style.height = "50px";
object.style.position = "absolute";
object.style.transition = "transform 3s";
document.body.appendChild(object);
function action(event){
mousePosition = {
x : event.clientX - (object.offsetWidth/2),
y : event.clientY - (object.offsetHeight/2)
};
let unitBoundingRect = object.getBoundingClientRect();
let unitCenter = {
x: unitBoundingRect.left + unitBoundingRect.width/2,
y: unitBoundingRect.top + unitBoundingRect.height/2
}
let angle = Math.atan2(event.pageX - unitCenter.x, - (event.pageY - unitCenter.y))*(180 / Math.PI);
object.style.transform = "translate(" + mousePosition.x + "px," + mousePosition.y + "px) rotate(" + angle + "deg)";
object.innerText = Math.floor(angle);
}
window.addEventListener("click",action,false);
`
I'm working on a project using D3js V5 that takes the input of a CSV file and returns a Workflow diagram, everything is working fine besides on it's own.
My problem is when I fit the workflow, through a Fit Button, and after that I use the mouse wheel to zoom in or out it just jumps to the previous zoom and position settings.
After some research I've found that after D3v4+ that "Zoom behaviours no longer store the active zoom transform (i.e., the visible region; the scale and translate) internally. The zoom transform is now stored on any elements to which the zoom behaviour has been applied.(link)"
That being said I cannot make it work and that's why I'm asking for your help.
The code is has follows:
document.getElementById('fitBTN').addEventListener('click', zoomFit);
function zoomFit() {
let bounds = document.getElementById('svgContent').getBBox();
let parent = document.getElementById('svgContent').parentElement;
let fullWidth = parent.clientWidth,
fullHeight = parent.clientHeight;
let width = bounds.width,
height = bounds.height;
let midX = bounds.x + width / 2,
midY = bounds.y + height / 2;
if (width == 0 || height == 0) return; // nothing to fit
let scale = 0.95 / Math.max(width / fullWidth, height / fullHeight);
let translate = [
fullWidth / 2 - scale * midX,
fullHeight / 2 - scale * midY
];
document
.getElementById('svgContent')
.setAttribute(
'transform',
'translate(' +
translate[0] +
',' +
translate[1] +
') scale(' +
scale +
')'
);
}
svg
.call(
zoom()
.on('zoom', () => {
document
.getElementById('svgContent')
.setAttribute(
'transform',
'translate(' +
currentEvent.transform.x * 0.125 +
',' +
currentEvent.transform.y * 0.125 +
') scale(' +
currentEvent.transform.k +
')'
);
})
.scaleExtent([0.1, 1])
)
.on('dblclick.zoom', false)
.on('wheel', function() {
currentEvent.preventDefault();
})
.on('wheel', false);
I believe that I have to had some variable that is able to read the state of the transform translate, but how?
So the answer provided by #AndrewReid has helped me resolve my issue and the corrected version of the code is here:
function zoomFit() {
let bounds = document.getElementById('svgContent').getBBox();
let parent = document.getElementById('svgContent').parentElement;
let fullWidth = parent.clientWidth,
fullHeight = parent.clientHeight;
let width = bounds.width,
height = bounds.height;
let midX = bounds.x + width / 2,
midY = bounds.y + height / 2;
if (width == 0 || height == 0) return; // nothing to fit
let scale = 0.95 / Math.max(width / fullWidth, height / fullHeight);
let translate = [
fullWidth / 2 - scale * midX,
fullHeight / 2 - scale * midY
];
document
.getElementById('svgContent')
.setAttribute(
'transform',
'translate(' +
translate[0] +
',' +
translate[1] +
') scale(' +
scale +
')'
);
// Create a zoom transform from d3.zoomIdentity
var transform = zoomIdentity
.scale(scale)
.translate(-translate[0], -translate[1]);
// Apply the zoom and trigger a zoom event:
svg.call(zoom().transform, transform);
}
var zooming = zoom()
.scaleExtent([0.1, 1])
.translateExtent([[0, 0], [840, 740]])
.on('zoom', zoomed);
svg.call(zooming);
function zoomed() {
svg
.attr('transform', currentEvent.transform)
.on('dblclick.zoom', false)
.on('wheel', function() {
currentEvent.preventDefault();
});
}
But another issue got in the way which is the panning function was lost.
I'll have to figure that one out and I'll update this answer has soon has the solutions is found.
Best regards,
I am using SVG.js and jquery to develop a simple yacht racing SVG animation to simulate a yacht race. I can easily animate an image [yacht icon] along the path. But I also want the yacht to rotate or the yacht heading change as the yacht travels up the path/route. I have tried a view things but cannot get it working. please help!
i am using this code..
function move_yacht_new(route,yacht,score){
var route1 = draw.path('M 535.07931,510.0164 C 594.9314,509.34072 667.14382,469.54596 691.80372,382.84271');
route1.fill('none');
var length = route1.length();
var y_pos = score;
var sailAngle = 90;
var currentRotation;
route1.hide();
var text = draw.text(function(add) {
add.tspan('We go ')
add.tspan('up').fill('#f09').dy(-40)
add.tspan(', then we go down, then up again').dy(40)
})
var image = draw.image('../globals/game/images/' + yacht + '-yacht.png',20, 20).id('yacht_' + yacht);
var scorePerc = (length/100) * (score);
//vdiscountPerc = calcPerc.toFixed();
var text = draw.text(yacht + ' (' + Math.round(score) + ')');
image.animate(5000, '<>').during(function(pos, morph, eased){
var p = route1.pointAt(eased * scorePerc);
image.move(p.x, p.y);
text.move(p.x, p.y).font({ fill: '#fff', size: 5, family: 'Helvetica', leading: '1.5em' });
var coord = image.bbox();
var center = coord.x + (coord.width/2) + ' '+ coord.y + (coord.height/2);
console.log('center ' + center);
var x_coord = image.attr('x');
var y_coord = image.attr('y');
//console.log(x_coord + ',' + y_coord);
image.rotate(-45, coord.x + (coord.width/2), coord.y + (coord.height/2));
var angle = Math.atan2(p.y, p.x) * 180 / Math.PI;//angle for tangent
//Shifting center to center of rocket
var centerX = p.x - 24,
centerY = p.y - 12;
console.log('angle > ' + angle);
//}).loop(true, true)
})
}
Blockquote
But this rotates the icons away from the path in a wrong direction.
I managed to resolve this using animation.js script.
Add the yacht icon into a group, translate the group along the path and rotate the yacht icon in the group around the group origin:
var g = draw.group()
var yacht = g.image(yachtUrl, width, height).center(0,0)
// in animate function:
g.transform(x: x, y: y)
yacht.transform(rotation: degrees, cx: 0, cy: 0)
I load a model with vertex and vertex nromal,
for (var i = 0, vindex = 0; i < triangle.length; i++, vindex += 3) {
x = parseFloat(triangle[i].attributes.getNamedItem('x1').value);
y = parseFloat(triangle[i].attributes.getNamedItem('y1').value);
z = parseFloat(triangle[i].attributes.getNamedItem('z1').value);
this.geometry.vertices.push(new THREE.Vector3(x * scale + this.translateVector.x, y * scale + this.translateVector.y, z * scale + this.translateVector.z));
x = parseFloat(triangle[i].attributes.getNamedItem('x2').value);
y = parseFloat(triangle[i].attributes.getNamedItem('y2').value);
z = parseFloat(triangle[i].attributes.getNamedItem('z2').value);
this.geometry.vertices.push(new THREE.Vector3(x * scale + this.translateVector.x, y * scale + this.translateVector.y, z * scale + this.translateVector.z));
x = parseFloat(triangle[i].attributes.getNamedItem('x3').value);
y = parseFloat(triangle[i].attributes.getNamedItem('y3').value);
z = parseFloat(triangle[i].attributes.getNamedItem('z3').value);
this.geometry.vertices.push(new THREE.Vector3(x * scale + this.translateVector.x, y * scale + this.translateVector.y, z * scale + this.translateVector.z));
var face = new THREE.Face3(vindex, vindex + 1, vindex + 2);
face.color.setHex(this.faceColor || this.defaultcolor);
face.vertexNormals = [];
nx = parseFloat(triangle[i].attributes.getNamedItem('nx1').value);
ny = parseFloat(triangle[i].attributes.getNamedItem('ny1').value);
nz = parseFloat(triangle[i].attributes.getNamedItem('nz1').value);
face.vertexNormals.push(new THREE.Vector3(-nx, -ny, -nz));
nx1 = parseFloat(triangle[i].attributes.getNamedItem('nx2').value);
ny1 = parseFloat(triangle[i].attributes.getNamedItem('ny2').value);
nz1 = parseFloat(triangle[i].attributes.getNamedItem('nz2').value);
face.vertexNormals.push(new THREE.Vector3(-nx1, -ny1, -nz1));
nx2 = parseFloat(triangle[i].attributes.getNamedItem('nx3').value);
ny2 = parseFloat(triangle[i].attributes.getNamedItem('ny3').value);
nz2 = parseFloat(triangle[i].attributes.getNamedItem('nz3').value);
face.vertexNormals.push(new THREE.Vector3(-nx2, -ny2, -nz2));
face.normal.set((nx + nx1 + nx2) / 3, (ny + ny1 + ny2) / 3,(nz + nz1 + nz2) / 3);
this.geometry.faces.push(face);
}
this.material = new THREE.MeshBasicMaterial({ vertexColors: THREE.FaceColors, overdraw: true , opacity: 1, transparent: 0 });
this.mesh = new THREE.Mesh(this.geometry, this.material);
this.mesh.name = this.id;
this.mesh.updateMatrix();
this.mesh.matrixAutoUpdate = false;
scene.add(this.mesh);
the house below, front face is invisible, so front wall and left wall is invisible, we can see through the inside of house, but I want it to show all walls and not see through, could anyone help me?
after I change to Lambert material it still show house inside, I've tried, cw,ccw, or invert index of vertex, invert normal. could any body help?
it is possible there is something wrong with the face UV's. try making the material applied doublesided.
seems find the answer.
that's because part of the house model position.z < 0, and camera's near < 0, maybe three.js z-buffer clear negative = 0, z-buffer determines the sheltery relation.
I'm probably doing something boneheaded here but I'm having difficulties getting alpha values to cooperate in Canvas. I'm trying to sample an opaque color from a spot on the canvas, make it more transparent, and lay it down in another spot -- but the alpha part doesn't seem to be working. Stripped down it goes sort of like this (condensed from functions strewn across the script):
p = ground.ctx.getImageData(loc.x, loc.y, 1, 1).data;
col = {R: p[0], G: p[1], B: p[2], a: p[3]};
col.a = col.a - 0.1;
ground.ctx.fillStyle = 'rgba(' + col.R + ', ' + col.G + ', ' + col.B + ', ' + col.a + ')';
ground.ctx.fillRect(nuLoc.x, nuLoc.y, sqrSize, sqrSize);
It all runs, but when I test the value of fillStyle I get just a standard RGB "#fa674c" or whatever -- no mention of any alpha -- and when I getImageData() from the newly drawn Rect the value is fully opaque again.
Another thing I haven't been able to figure out either empirically or by reading every tutorial (and the spec) is whether alpha wants to be 0-1.0 or 0-255. Most sources talk about 0-1.0 -- but getImageData() returns 0-255... and I can't make it work either way.
Use context.globalAlpha instead of using rgba fill:
p = ground.ctx.getImageData(loc.x, loc.y, 1, 1).data;
col = {R: p[0], G: p[1], B: p[2], a: p[3]};
// note: globalAlpha uses a scale of 0-1
// and getImageData uses a scale of 0-255
ground.ctx.globalAlpha = a/255-.1;
ground.ctx.fillStyle = 'rgb(' + col.R + ', ' + col.G + ', ' + col.B + ')';
ground.ctx.fillRect(nuLoc.x, nuLoc.y, sqrSize, sqrSize);
// reset globalAlpha when you're done
ground.ctx.globalAlpha = 1;