Rotating around origin - Famo.us - rotation

How can I rotate ImageSurface around it's origin if I have applied translate to it ?
It does not rotate around origin. Can someone explain me is it using "align point" as center of rotation ?
EDIT
My ImageSurface is rotating like it has distante point of rotation and it scales up.
function _createFb() {
this.fbLogo = new ImageSurface({
size : [true, true],
content : 'images/fb.png',
properties: {
zIndex: 10
}
});
var fbModifier = new StateModifier({
origin: [0.5,0.5],
align:[0.5,0.5],
transform: Transform.scale(0.4,0.4,1)
});
var fbPosModifier = new StateModifier({
transform: Transform.translate(-250,520,0)
});
this.fbLogo.on("mouseleave", function(){
fbModifier.setTransform(Transform.rotateZ(Math.PI/4), { duration: 1000});
});
this.layout.content.add(fbModifier).add(fbPosModifier).add(this.fbLogo);
}
MY SOLUTION
function _createFb() {
this.fbLogo = new ImageSurface({
size : [true, true],
content : 'images/fb.png',
properties: {
zIndex: 10
}
});
var fbModifier = new StateModifier({
origin: [0.5,0.5],
align:[0.5,0.5],
transform: Transform.scale(0.4,0.4,1)
});
var fbPosModifier = new StateModifier({
transform: Transform.translate(-250,520,0)
});
var fbRotateModifier = new Modifier();
var transitionable = new Transitionable(0);
this.fbLogo.on("mouseleave", function(){
transitionable.reset(0);
fbRotateModifier.transformFrom(function(){
return Transform.rotateZ(transitionable.get());
}
);
transitionable.set(2 * Math.PI, {curve: "inOutQuad", duration: 500});
});
this.layout.content.add(fbModifier).add(fbPosModifier).add(fbRotateModifier).add(this.fbLogo);
}

This can be done using straight Famo.us, no need to modify CSS. Here's an example. Some of these modifiers can be combined, but I'm breaking them up for clarity. Centering the origin is first applied to a Surface. Rotations now pivot about the newly defined origin. Then the rotated Surface is translated.
var surface = new Surface({
size : [100,100],
properties : {background : 'red'}
});
var translateModifier = new Modifier({
transform : Transform.translate(100,0,0)
});
//rotates around and around based on your current system time
var rotateModifier = new Modifier({
transform : function(){ return Transform.rotateZ(Date.now() * .001) }
});
var centerModifier = new Modifier({
origin : [.5,.5]
});
context
.add(translateModifier)
.add(rotateModifier)
.add(centerModifier)
.add(surface)

I had similar problems to spin an element. The transform origin needs to be set center (50% 50%). I used css class for this.
.myClass {
-webkit-transform-origin: 50% 50% !important;
}
var myElem = new Surface({
size: [40, 40],
classes: ['myClass']
});
this.myElemModifier = new StateModifier();
// called from user action
this.myElemModifier.setTransform(
Transform.rotateZ(Math.PI), { duration: 5000 }
);

Related

How do you correctly include three.js and gltf.js

I'm rewriting my question because stackoverflow thought my post was spam (because I included 6000+ lines of code). I'm trying to make a web app that tracks the user's face and puts a 3D object over the face like a "filter". The thing is, I don't want this app to have any external dependencies except 1 (at least for scripts/packages/modules/whatever). Therefore, I copied three.js minified (from https://cdn.jsdelivr.net/npm/three#0.120.1/build/three.min.js) inbetween in the HTML file, as well as the GLTFLoader.js script from the GitHub repository in examples/jsm/loaders/.
I started with Jeeliz's face filter repository, and I'm trying to implement GLTFLoader.js, but when I use
const loader = new GLTFLoader();
it gives me a GLTFLoader is not defined error, as well as console messages that ES6 module was not imported.
When I use
const loader = new THREE.GLTFLoader();
it says it's not a constructor, so I lean towards the former being the correct way to construct the loader.
I appreciate any help in advance! I mostly code in Python or C++ and I'm still a beginner, but I've tinkered with JavaScript a few times so I thought I could handle this. I posted this question once but stackoverflow crashed after saying the post is spam, so I'm including a link to the html file:
https://www.dropbox.com/s/p86xchlldr7j2vf/index-mine.html?dl=0
Code:
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta http-equiv="content-language" content="en-EN" />
<title>Filter Web App</title>
<!-- INCLUDE FACEFILTER SCRIPT -->
<script src=‘https://raw.githubusercontent.com/jeeliz/jeelizFaceFilter/master/dist/jeelizFaceFilter.js’>
</script>
<!-- INCLUDE THREE.JS -->
<script src=‘https://cdn.jsdelivr.net/npm/three#0.120.1/build/three.min.js’>
</script>
<!-- INCLUDE RESIZER -->
<script src=‘https://raw.githubusercontent.com/jeeliz/jeelizFaceFilter/master/helpers/JeelizResizer.js’>
</script>
<!-- INCLUDE GLTFLOADER.JS -->
<script src=‘https://raw.githubusercontent.com/mrdoob/three.js/dev/examples/jsm/loaders/GLTFLoader.js’>
</script>
<!-- INCLUDE JEELIZ THREE.JS HELPER -->
<script>
/*
Helper for Three.js
*/
const punkThreeHelper = (function(){
// internal settings:
const _settings = {
rotationOffsetX: 0.0, // negative -> look upper. in radians
pivotOffsetYZ: [0.2, 0.6],// YZ of the distance between the center of the cube and the pivot
detectionThreshold: 0.8, // sensibility, between 0 and 1. Less -> more sensitive
detectionHysteresis: 0.02,
//tweakMoveYRotateX: 0,//0.5, // tweak value: move detection window along Y axis when rotate the face around X (look up <-> down)
cameraMinVideoDimFov: 35 // Field of View for the smallest dimension of the video in degrees
};
// private vars:
let _threeRenderer = null,
_threeScene = null,
_threeVideoMesh = null,
_threeVideoTexture = null,
_threeTranslation = null;
let _maxFaces = -1,
_isMultiFaces = false,
_detectCallback = null,
_isVideoTextureReady = false,
_isSeparateThreeCanvas = false,
_faceFilterCv = null,
_videoElement = null,
_isDetected = false,
_scaleW = 1,
_canvasAspectRatio = -1;
const _threeCompositeObjects = [];
let _gl = null,
_glVideoTexture = null,
_glShpCopyCut = null,
_glShpCopyCutVideoMatUniformPointer = null;
let _videoTransformMat2 = null;
// private funcs:
function destroy(){
_isVideoTextureReady = false;
_threeCompositeObjects.splice(0);
if (_threeVideoTexture){
_threeVideoTexture.dispose();
_threeVideoTexture = null;
}
}
function create_threeCompositeObjects(){
for (let i=0; i<_maxFaces; ++i){
// COMPOSITE OBJECT WHICH WILL TRACK A DETECTED FACE
const threeCompositeObject = new THREE.Object3D();
threeCompositeObject.frustumCulled = false;
threeCompositeObject.visible = false;
_threeCompositeObjects.push(threeCompositeObject);
_threeScene.add(threeCompositeObject);
}
}
function create_videoScreen(){
const videoScreenVertexShaderSource = "attribute vec2 position;\n\
uniform mat2 videoTransformMat2;\n\
varying vec2 vUV;\n\
void main(void){\n\
gl_Position = vec4(position, 0., 1.);\n\
vUV = 0.5 + videoTransformMat2 * position;\n\
}";
const videoScreenFragmentShaderSource = "precision lowp float;\n\
uniform sampler2D samplerVideo;\n\
varying vec2 vUV;\n\
void main(void){\n\
gl_FragColor = texture2D(samplerVideo, vUV);\n\
}";
if (_isSeparateThreeCanvas){
const compile_shader = function(source, type, typeString) {
const glShader = _gl.createShader(type);
_gl.shaderSource(glShader, source);
_gl.compileShader(glShader);
if (!_gl.getShaderParameter(glShader, _gl.COMPILE_STATUS)) {
alert("ERROR IN " + typeString + " SHADER: " + _gl.getShaderInfoLog(glShader));
return null;
}
return glShader;
};
const glShaderVertex = compile_shader(videoScreenVertexShaderSource, _gl.VERTEX_SHADER, 'VERTEX');
const glShaderFragment = compile_shader(videoScreenFragmentShaderSource, _gl.FRAGMENT_SHADER, 'FRAGMENT');
_glShpCopyCut = _gl.createProgram();
_gl.attachShader(_glShpCopyCut, glShaderVertex);
_gl.attachShader(_glShpCopyCut, glShaderFragment);
_gl.linkProgram(_glShpCopyCut);
const samplerVideo = _gl.getUniformLocation(_glShpCopyCut, 'samplerVideo');
_glShpCopyCutVideoMatUniformPointer = _gl.getUniformLocation(_glShpCopyCut, 'videoTransformMat2');
return;
}
// init video texture with red:
_threeVideoTexture = new THREE.DataTexture( new Uint8Array([255,0,0]), 1, 1, THREE.RGBFormat);
_threeVideoTexture.needsUpdate = true;
// CREATE THE VIDEO BACKGROUND:
const videoMaterial = new THREE.RawShaderMaterial({
depthWrite: false,
depthTest: false,
vertexShader: videoScreenVertexShaderSource,
fragmentShader: videoScreenFragmentShaderSource,
uniforms:{
samplerVideo: {value: _threeVideoTexture},
videoTransformMat2: {
value: _videoTransformMat2
}
}
});
const videoGeometry = new THREE.BufferGeometry()
const videoScreenCorners = new Float32Array([-1,-1, 1,-1, 1,1, -1,1]);
// handle both new and old THREE.js versions:
const setVideoGeomAttribute = (videoGeometry.setAttribute || videoGeometry.addAttribute).bind(videoGeometry);
setVideoGeomAttribute( 'position', new THREE.BufferAttribute( videoScreenCorners, 2 ) );
videoGeometry.setIndex(new THREE.BufferAttribute(new Uint16Array([0,1,2, 0,2,3]), 1));
_threeVideoMesh = new THREE.Mesh(videoGeometry, videoMaterial);
that.apply_videoTexture(_threeVideoMesh);
_threeVideoMesh.renderOrder = -1000; // render first
_threeVideoMesh.frustumCulled = false;
_threeScene.add(_threeVideoMesh);
} //end create_videoScreen()
function detect(detectState){
_threeCompositeObjects.forEach(function(threeCompositeObject, i){
_isDetected = threeCompositeObject.visible;
const ds = detectState[i];
if (_isDetected && ds.detected < _settings.detectionThreshold-_settings.detectionHysteresis){
// DETECTION LOST
if (_detectCallback) _detectCallback(i, false);
threeCompositeObject.visible = false;
} else if (!_isDetected && ds.detected > _settings.detectionThreshold+_settings.detectionHysteresis){
// FACE DETECTED
if (_detectCallback) _detectCallback(i, true);
threeCompositeObject.visible = true;
}
}); //end loop on all detection slots
}
function update_poses(ds, threeCamera){
// tan( <horizontal FoV> / 2 ):
const halfTanFOVX = Math.tan(threeCamera.aspect * threeCamera.fov * Math.PI/360); //tan(<horizontal FoV>/2), in radians (threeCamera.fov is vertical FoV)
_threeCompositeObjects.forEach(function(threeCompositeObject, i){
if (!threeCompositeObject.visible) return;
const detectState = ds[i];
// tweak Y position depending on rx:
//const tweak = _settings.tweakMoveYRotateX * Math.tan(detectState.rx);
const cz = Math.cos(detectState.rz), sz = Math.sin(detectState.rz);
// relative width of the detection window (1-> whole width of the detection window):
const W = detectState.s * _scaleW;
// distance between the front face of the cube and the camera:
const DFront = 1 / ( 2 * W * halfTanFOVX );
// D is the distance between the center of the unit cube and the camera:
const D = DFront + 0.5;
// coords in 2D of the center of the detection window in the viewport:
const xv = detectState.x * _scaleW;
const yv = detectState.y * _scaleW;
// coords in 3D of the center of the cube (in the view coordinates system):
const z = -D; // minus because view coordinate system Z goes backward
const x = xv * D * halfTanFOVX;
const y = yv * D * halfTanFOVX / _canvasAspectRatio;
// set position before pivot:
threeCompositeObject.position.set(-sz*_settings.pivotOffsetYZ[0], -cz*_settings.pivotOffsetYZ[0], -_settings.pivotOffsetYZ[1]);
// set rotation and apply it to position:
threeCompositeObject.rotation.set(detectState.rx+_settings.rotationOffsetX, detectState.ry, detectState.rz, "ZYX");
threeCompositeObject.position.applyEuler(threeCompositeObject.rotation);
// add translation part:
_threeTranslation.set(x, y+_settings.pivotOffsetYZ[0], z+_settings.pivotOffsetYZ[1]);
threeCompositeObject.position.add(_threeTranslation);
}); //end loop on composite objects
}
//public methods:
const that = {
// launched with the same spec object than callbackReady. set spec.threeCanvasId to the ID of the threeCanvas to be in 2 canvas mode:
init: function(spec, detectCallback){
destroy();
_maxFaces = spec.maxFacesDetected;
_glVideoTexture = spec.videoTexture;
_videoTransformMat2 = spec.videoTransformMat2;
_gl = spec.GL;
_faceFilterCv = spec.canvasElement;
_isMultiFaces = (_maxFaces>1);
_videoElement = spec.videoElement;
// enable 2 canvas mode if necessary:
let threeCanvas = null;
if (spec.threeCanvasId){
_isSeparateThreeCanvas = true;
// adjust the threejs canvas size to the threejs canvas:
threeCanvas = document.getElementById(spec.threeCanvasId);
threeCanvas.setAttribute('width', _faceFilterCv.width);
threeCanvas.setAttribute('height', _faceFilterCv.height);
} else {
threeCanvas = _faceFilterCv;
}
if (typeof(detectCallback) !== 'undefined'){
_detectCallback = detectCallback;
}
// init THREE.JS context:
_threeRenderer = new THREE.WebGLRenderer({
context: (_isSeparateThreeCanvas) ? null : _gl,
canvas: threeCanvas,
alpha: (_isSeparateThreeCanvas || spec.alpha) ? true : false,
preserveDrawingBuffer: true // to make image capture possible
});
_threeScene = new THREE.Scene();
_threeTranslation = new THREE.Vector3();
create_threeCompositeObjects();
create_videoScreen();
// handle device orientation change:
window.addEventListener('orientationchange', function(){
setTimeout(punkfacefilter.resize, 1000);
}, false);
const returnedDict = {
videoMesh: _threeVideoMesh,
renderer: _threeRenderer,
scene: _threeScene
};
if (_isMultiFaces){
returnedDict.faceObjects = _threeCompositeObjects
} else {
returnedDict.faceObject = _threeCompositeObjects[0];
}
return returnedDict;
}, //end that.init()
detect: function(detectState){
const ds = (_isMultiFaces) ? detectState : [detectState];
// update detection states:
detect(ds);
},
get_isDetected: function() {
return _isDetected;
},
render: function(detectState, threeCamera){
const ds = (_isMultiFaces) ? detectState : [detectState];
// update detection states then poses:
detect(ds);
update_poses(ds, threeCamera);
if (_isSeparateThreeCanvas){
// render the video texture on the faceFilter canvas:
_gl.viewport(0, 0, _faceFilterCv.width, _faceFilterCv.height);
_gl.useProgram(_glShpCopyCut);
_gl.uniformMatrix2fv(_glShpCopyCutVideoMatUniformPointer, false, _videoTransformMat2);
_gl.activeTexture(_gl.TEXTURE0);
_gl.bindTexture(_gl.TEXTURE_2D, _glVideoTexture);
_gl.drawElements(_gl.TRIANGLES, 3, _gl.UNSIGNED_SHORT, 0);
} else {
// reinitialize the state of THREE.JS because JEEFACEFILTER have changed stuffs:
// -> can be VERY costly !
_threeRenderer.state.reset();
}
// trigger the render of the THREE.JS SCENE:
_threeRenderer.render(_threeScene, threeCamera);
},
sortFaces: function(bufferGeometry, axis, isInv){ // sort faces long an axis
// Useful when a bufferGeometry has alpha: we should render the last faces first
const axisOffset = {X:0, Y:1, Z:2}[axis.toUpperCase()];
const sortWay = (isInv) ? -1 : 1;
// fill the faces array:
const nFaces = bufferGeometry.index.count/3;
const faces = new Array(nFaces);
for (let i=0; i<nFaces; ++i){
faces[i] = [bufferGeometry.index.array[3*i], bufferGeometry.index.array[3*i+1], bufferGeometry.index.array[3*i+2]];
}
// compute centroids:
const aPos = bufferGeometry.attributes.position.array;
const centroids = faces.map(function(face, faceIndex){
return [
(aPos[3*face[0]]+aPos[3*face[1]]+aPos[3*face[2]])/3, // X
(aPos[3*face[0]+1]+aPos[3*face[1]+1]+aPos[3*face[2]+1])/3, // Y
(aPos[3*face[0]+2]+aPos[3*face[1]+2]+aPos[3*face[2]+2])/3, // Z
face
];
});
// sort centroids:
centroids.sort(function(ca, cb){
return (ca[axisOffset]-cb[axisOffset]) * sortWay;
});
// reorder bufferGeometry faces:
centroids.forEach(function(centroid, centroidIndex){
const face = centroid[3];
bufferGeometry.index.array[3*centroidIndex] = face[0];
bufferGeometry.index.array[3*centroidIndex+1] = face[1];
bufferGeometry.index.array[3*centroidIndex+2] = face[2];
});
}, //end sortFaces
get_threeVideoTexture: function(){
return _threeVideoTexture;
},
apply_videoTexture: function(threeMesh){
if (_isVideoTextureReady){
return;
}
threeMesh.onAfterRender = function(){
// Replace _threeVideoTexture.__webglTexture by the real video texture:
try {
_threeRenderer.properties.update(_threeVideoTexture, '__webglTexture', _glVideoTexture);
_threeVideoTexture.magFilter = THREE.LinearFilter;
_threeVideoTexture.minFilter = THREE.LinearFilter;
_isVideoTextureReady = true;
} catch(e){
console.log('WARNING in punkThreeHelper: the glVideoTexture is not fully initialized');
}
delete(threeMesh.onAfterRender);
};
},
// create an occluder, IE a transparent object which writes on the depth buffer:
create_threejsOccluder: function(occluderURL, callback){
const occluderMesh = new THREE.Mesh();
new THREE.BufferGeometryLoader().load(occluderURL, function(occluderGeometry){
const mat = new THREE.ShaderMaterial({
vertexShader: THREE.ShaderLib.basic.vertexShader,
fragmentShader: "precision lowp float;\n void main(void){\n gl_FragColor=vec4(1.,0.,0.,1.);\n }",
uniforms: THREE.ShaderLib.basic.uniforms,
colorWrite: false
});
occluderMesh.renderOrder = -1; //render first
occluderMesh.material = mat;
occluderMesh.geometry = occluderGeometry;
if (typeof(callback)!=='undefined' && callback) callback(occluderMesh);
});
return occluderMesh;
},
set_pivotOffsetYZ: function(pivotOffset) {
_settings.pivotOffsetYZ = pivotOffset;
},
create_camera: function(zNear, zFar){
const threeCamera = new THREE.PerspectiveCamera(1, 1, (zNear) ? zNear : 0.1, (zFar) ? zFar : 100);
that.update_camera(threeCamera);
return threeCamera;
},
update_camera: function(threeCamera){
// compute aspectRatio:
const canvasElement = _threeRenderer.domElement;
const cvw = canvasElement.width;
const cvh = canvasElement.height;
_canvasAspectRatio = cvw / cvh;
// compute vertical field of view:
const vw = _videoElement.videoWidth;
const vh = _videoElement.videoHeight;
const videoAspectRatio = vw / vh;
const fovFactor = (vh > vw) ? (1.0 / videoAspectRatio) : 1.0;
const fov = _settings.cameraMinVideoDimFov * fovFactor;
console.log('INFO in punkThreeHelper - update_camera(): Estimated vertical video FoV is', fov);
// compute X and Y offsets in pixels:
let scale = 1.0;
if (_canvasAspectRatio > videoAspectRatio) {
// the canvas is more in landscape format than the video, so we crop top and bottom margins:
scale = cvw / vw;
} else {
// the canvas is more in portrait format than the video, so we crop right and left margins:
scale = cvh / vh;
}
const cvws = vw * scale, cvhs = vh * scale;
const offsetX = (cvws - cvw) / 2.0;
const offsetY = (cvhs - cvh) / 2.0;
_scaleW = cvw / cvws;
// apply parameters:
threeCamera.aspect = _canvasAspectRatio;
threeCamera.fov = fov;
console.log('INFO in punkThreeHelper.update_camera(): camera vertical estimated FoV is', fov, 'deg');
threeCamera.setViewOffset(cvws, cvhs, offsetX, offsetY, cvw, cvh);
threeCamera.updateProjectionMatrix();
// update drawing area:
_threeRenderer.setSize(cvw, cvh, false);
_threeRenderer.setViewport(0, 0, cvw, cvh);
}, //end update_camera()
resize: function(w, h, threeCamera){
_threeRenderer.domElement.width = w;
_threeRenderer.domElement.height = h;
punkfacefilter.resize();
if (threeCamera){
that.update_camera(threeCamera);
}
}
}
return that;
})();
// Export ES6 module:
try {
module.exports = punkThreeHelper;
} catch(e){
console.log('punkThreeHelper ES6 Module not exported');
window.punkThreeHelper = punkThreeHelper;
}
</script>
<!-- INCLUDE DEMO SCRIPT -->
<script>
let THREECAMERA = null;
// callback: launched if a face is detected or lost.
function detect_callback(faceIndex, isDetected) {
if (isDetected) {
console.log('INFO in detect_callback(): DETECTED');
} else {
console.log('INFO in detect_callback(): LOST');
}
}
// build the 3D. called once when punk Face Filter is OK
function init_threeScene(spec) {
const threeStuffs = punkThreeHelper.init(spec, detect_callback);
// CREATE A CUBE
const loader = new GLTFLoader();
loader.load( '/your-glb-file2.glb', function ( gltf ) {
threeStuffs.faceObject.add( gltf.scene );
} );
//CREATE THE CAMERA
THREECAMERA = punkThreeHelper.create_camera();
}
// entry point:
function main(){
punkResizer.size_canvas({
canvasId: 'jeeFaceFilterCanvas',
callback: function(isError, bestVideoSettings){
init_faceFilter(bestVideoSettings);
}
})
}
function init_faceFilter(videoSettings){
punkfacefilter.init({
followZRot: true,
canvasId: 'jeeFaceFilterCanvas',
NNCPath: '/', // root of NN_DEFAULT.json file
maxFacesDetected: 1,
callbackReady: function(errCode, spec){
if (errCode){
console.log('AN ERROR HAPPENS. ERR =', errCode);
return;
}
console.log('INFO: punkfacefilter IS READY');
init_threeScene(spec);
},
// called at each render iteration (drawing loop):
callbackTrack: function(detectState){
punkThreeHelper.render(detectState, THREECAMERA);
}
}); //end punkfacefilter.init call
}
window.addEventListener('load', main);
</script>
<style>
a {color: #eee; text-decoration: none}
a:hover {color: blue;}
body {overflow: auto; overflow-y: auto;
background-color: white;
background-attachment: fixed;
background-position: center;
background-size: contain;
margin: 0px;}
#jeeFaceFilterCanvas {
z-index: 10;
position: absolute;
max-height: 100%;
max-width: 100%;
left: 50%;
top: 50%;
width: 100vmin;
transform: translate(-50%, -50%) rotateY(180deg);
}
#threeCanvas{
z-index: 11;
position: absolute;
max-height: 100%;
max-width: 100%;
left: 50%;
top: 50%;
width: 100vmin;
transform: translate(-50%, -50%) rotateY(180deg);
}
#media (max-width: 787px) {
#jeeFaceFilterCanvas {
right: 0px;
top: 60px;
transform: rotateY(180deg);
}
}
</style>
</head>
<body>
<canvas width="600" height="600" id='jeeFaceFilterCanvas'></canvas>
</body>
</html>
/your-glb-file2.glb is a correct 3D file in the same directory as this. I made this code from the html file I linked, but src’d the script from URLs, unlike the real html file I linked.
The
// CREATE A CUBE
const loader = new GLTFLoader();
loader.load( '/your-glb-file2.glb', function ( gltf ) {
threeStuffs.faceObject.add( gltf.scene );
} );
near the end is the problem. The app correctly puts a cube over my face like I want it to when it’s
// CREATE A CUBE
const cubeGeometry = new THREE.BoxGeometry(1,1,1);
const cubeMaterial = new THREE.MeshNormalMaterial();
const threeCube = new THREE.Mesh(cubeGeometry, cubeMaterial);
threeCube.frustumCulled = false;
threeStuffs.faceObject.add(threeCube);
I tried constructing the loader as a "sub"function of three.js and not as one, but it isn't working either way, when it worked with a cube over my face and the cube 3D object loaded with three.js' native functions.

MapBox Markers Filter - Marker from Wordpress loop

I'm using Mapbox to display 50-70 markers. There is 5 different categories.
Each marker has a category.
I would like to click on each category name (ul li a) to filter the markers on the map.
To build marker (with infobox): I'm using .addTo(map);
For filter markers: map.setFilter('all', ['==', ['get', 'category'], 'categoryslug']);
But no filtering and i have this warning: Error: The layer 'all' does not exist in the map's style and cannot be filtered.
I think it's because my markers are not inside a layer? Or which is the default layer?
$allGeaoSon .= '
{
type: "MyMarkers",
geometry: {
type: "Point",
coordinates: ['.$theCurrentLng.', '.$theCurrentLat.']
},
properties: {
title: "'.get_the_title( get_the_ID() ).'",
description: "'.$termsNames.'",
category:"'.$theCurrentCatsList.'",
infoboxLink: "'.$markerUrl.'",
infoboxImgUrl: "'.$featured_img.'",
}
},
';
<script>
mapboxgl.accessToken = "'. esc_attr( $a['accesstoken'] ).'";
var map = new mapboxgl.Map({
container: "map", // container ID
style: "'. esc_attr( $a['style-url'] ).'", // style URL
center: ['.esc_attr( $a['center-lng'] ).', '.esc_attr( $a['center-lat'] ).'], // starting position [centerLng, centerLat]
zoom: '.esc_attr( $a['zoom'] ).', // starting zoom
pitch: 60, // pitch in degrees
bearing: -60 // bearing in degrees
});
// Add zoom and rotation controls to the map.
map.addControl(new mapboxgl.NavigationControl(),"bottom-right");
// disable map zoom when using scroll
map.scrollZoom.disable();
var geojson ={
type: "FontaineCollection",
features: [
'.$allGeaoSon.'
]
};
// add markers to map
geojson.features.forEach(function(marker) {
// create a HTML element for each feature
var el = document.createElement("div");
el.className = "marker" + " " + marker.properties.category;
// make a marker for each feature and add to the map
new mapboxgl.Marker(el)
.setLngLat(marker.geometry.coordinates)
.setPopup(new mapboxgl.Popup({ offset: 15,} ) // add popups
.setHTML("<div class=\"mapboxgl-popup-content-categories " + marker.properties.category + "\"><div class=\"infobox-columns\"><div class=\"infobox-column-1\"><img src=\"" + marker.properties.infoboxImgUrl + "\"></div><div class=\"infobox-column-2\"><h3>" + marker.properties.title + "</h3><p>" + marker.properties.description + "</p></div></div>"))
//.setHTML("<div class=\"infobox-columns\"><div class=\"infobox-column-1\"></div><div class=\"infobox-column-1\"><h3>" + marker.properties.title + "</h3><p>" + marker.properties.description + "</p></div></div></div>")
.addTo(map);
});
var coordinates = ['. $boundselement.'];
var bounds = coordinates.reduce(function(bounds, coord) {
return bounds.extend(coord);
}, new mapboxgl.LngLatBounds(coordinates[0], coordinates[0]));
map.fitBounds(bounds, {
padding: 100
});
</script>
For the filter:
$(".map-filter a").on("click", function() {
// For each filter link, get the "data-filter" attribute value.
var filter = $(this).data("filter");
$(this).addClass("active").siblings().removeClass("active");
map.setFilter('all', ['==', ['get', 'category'], 'catslug']);
return false;
});
Any idea to help me?

Updating three.js texture throughthe dat.gui dropdown

I have loaded different textures using textureLoader and I am trying to update them using dat.gui controls.
Why is the code below not working?
gui.add(mesh.position, "y", -1, 1, 0.1);
gui.add(mesh.material, "map", { alpha: alphaTexture, color: colorTexture, normal: normalTexture })
.onChange(() => {
mesh.material.needsUpdate = true;
console.log("updated");
});
It gives this error:
"Uncaught TypeError: m is undefined" [error][1]
After some tweaking, I found that the values of object(or array) in the third argument only supports string types, so passing a object as a value would not work.
This is the closest workaround that I could think of..
/* GUI options */
const guiOptions = {
mesh_material_map: "color",
};
/* Textures */
const textureLoader = new THREE.TextureLoader(loadingManager);
const colorTexture = textureLoader.load("/textures/door/color.jpg");
const alphaTexture = textureLoader.load("/textures/door/alpha.jpg");
const normalTexture = textureLoader.load("/textures/door/normal.jpg");
const guiTextureHash = {
color: colorTexture,
alpha: alphaTexture,
normal: normalTexture,
};
/* Add to gui */
gui.add(guiOptions, "mesh_material_map", Object.keys(guiTextureHash)).onChange((value) => {
mesh.material.map = guiTextureHash[value];
mesh.needsUpdate = true;
console.log("updated", value);
});
I found your topic looking for a texture picker. It's probably a little away from your starting point but could help some other. I finally made a simple texture picker with a dropdown selection key with dat.gui. The goal is to be able to change on the fly my matcap texture, going through an array of loaded texture.
const gui = new dat.GUI()
const textureLoader = new THREE.TextureLoader()
const myMatCap = [
textureLoader.load('./textures/matcaps/1.png'),
textureLoader.load('./textures/matcaps/2.png'),
textureLoader.load('./textures/matcaps/3.png')
]
const parameters = {
color: 0xff0000,
matCapTexture: 0
}
const updateAllMaterials = () => {
scene.traverse( (child)=>{
if(child instanceof THREE.Mesh && child.material instanceof THREE.MeshMatcapMaterial) {
child.material.matcap = myMatCap[ parameters.matCapTexture]
child.material.needsUpdate = true
}
})
}
gui.add(parameters, 'matCapTexture', {
terracotta: 0,
grey: 1,
chrome: 2,
}).onFinishChange(()=>{
updateAllMaterials()
})
let mesh = new THREE.Mesh(
geometry,
new THREE.MeshMatcapMaterial({
side: THREE.DoubleSide,
matcap: myMatCap[ parameters.matCapTexture ]
})
);
scene.add(mesh)

Nativescript plugin "nativescript-camera-plus": Can´t set Picture Size (width and height)

Hello iam developing a nativescript app and im using the "nativescript-camera-plus" plugin (Plugin on Github: https://github.com/nstudio/nativescript-camera-plus), even setting the parameters (width and height) exactly as described in the Option Interfaces section:
cam.takePicture({ saveToGallery: true, confirm: false, keepAspectRatio: true, width: 1920, height: 1440 });
...the plugin takes the picture and saves it, taking as reference the screen resolution of the mobile device divided by 2. Example: If the screen resolution is 1920, the plugin divides by 2 and saves the photo in 960. I need it save the photo with the width and height size that I determined in the options (parameters) of the code. Can someone help me?
MY CODE:
exports.takePicFromCam = function (args) {
cam.requestCameraPermissions().then(function () {
if (!cam) {
cam = new CameraPlus();
}
cam.takePicture({ saveToGallery: true, confirm: false, keepAspectRatio: true, width: 1920, height: 1440 });
});
}
cam.on(CameraPlus.photoCapturedEvent, function (args) {
fromAsset(args.data)
.then(function (res) {
var preview = topmost().getViewById('testImagePickResult');
var picture = res;
var matrix = new android.graphics.Matrix();
matrix.postRotate(180);
var imagemfinal = android.graphics.Bitmap.createBitmap(picture.android, 0, 0, picture.android.getWidth(), picture.android.getHeight(), matrix, true);
preview.src = imagemfinal;
var novonomedafoto = new Date().getTime();
});
imagemsource = page.getViewById("testImagePickResult").imageSource;
const folderDest = fileSystemModule.knownFolders.currentApp();
const pathDest = fileSystemModule.path.join(folderDest.path, "photos/" + novonomedafoto + ".jpg");
const saved = imagemsource.saveToFile(pathDest, "jpg");
if (saved) {
console.log("Image saved successfully!");
}
});
exports.onNavigatingTo = onNavigatingTo;
The plugin do not write the file with the width & height you proposed but instead it just returns the resized image asset (data) that holds the Bitmap with your width & height spec.
data.getImageAsync((bitmap, error) => {
// Use `bitmap` (android.graphics.Bitmap)
});

How to select all features in cluster layer in openlayers 3

I've got a simple code and a simple map with adding features and clustering them all together. Straight from example:
var vectorSource = new ol.source.Vector({
projection: 'EPSG:4326'
});
var clusterSource = new ol.source.Cluster({
distance: 30,
source: vectorSource
});
var styleCache = {};
var clusters = new ol.layer.Vector({
source: clusterSource,
style: function(feature, resolution) {
var size = feature.get('features').length;
var style = styleCache[size];
var src;
if (!style) {
if( size == 1 ){
src = 'images/location-single.png';
}else{
src = 'images/location-multi.png';
}
style = [
new ol.style.Style({
image: new ol.style.Circle({
radius: 5,
fill: new ol.style.Fill({
color: '#5bc0de'
})
})
}),
new ol.style.Style({
image: new ol.style.Icon(({
// scale: 1 + rnd,
// rotateWithView: (rnd < 0.9) ? true : false,
// rotation: 360 * rnd * Math.PI / 180,
anchor: [0.45, 1],
anchorXUnits: 'fraction',
anchorYUnits: 'fraction',
// opacity: rnd,
src: src
})),
text: new ol.style.Text({
text: size.toString(),
fill: new ol.style.Fill({
color: '#000'
})
})
})
];
styleCache[size] = style;
}
return style;
}
});
var map = new ol.Map({
target: 'map', // The DOM element that will contains the map
renderer: 'canvas', // Force the renderer to be used
layers: [
// Add a new Tile layer getting tiles from OpenStreetMap source
new ol.layer.Tile({
source: new ol.source.OSM()
}),
clusters
],
// Create a view centered on the specified location and zoom level
view: new ol.View({
center: ol.proj.transform([2.1833, 41.3833], 'EPSG:4326', 'EPSG:3857'),
zoom: 6
})
});
Now i got cluster function working fine. But i need to show coordinates for every point in the cluster, i've tryed to use map.forEachFeatureAtPixel, but it doesent work for ALL the features in the cluster. How do i select them all?
Oh. I think i got it! A cluster is a feature and got its properties. so we can GET all features in a cluster by using .getProperties()
as in:
map.on('singleclick', function(event) {
map.forEachFeatureAtPixel(event.pixel, function(feature) {
var featuresInCluster = feature.getProperties().features;
});
});
But i would really like to know if is there another way?
/***First create a select interaction object by assigning the cluster layer you created**/
var select = new ol.interaction.Select({
layers: [clusters]
});
/**Then add the created select object**/
map.addInteraction(select);
var selectedFeatures = select.getFeatures();
/**Then write this code**/
selectedFeatures.on('add', function (event) {
// event.target only contains the clustered point
var feature = event.target.item(0);
console.log(feature)
});
/***HOPE IT WILL WORK**//

Resources