I'm new to Fineuploader and using this great uploader together with Pica for image scaling. Pica works well for scaling and thumbnails.
scaling: {
sendOriginal: false,
hideScaled: false,
sizes: [
{name: "a", maxSize: 200},
{name: "b", maxSize: 600},
{name: "c", maxSize: 1000}
],
customResizer: !qq.ios() && function(resizeInfo) {
return new Promise(function(resolve, reject) {
pica.debug = console.log.bind(console),
pica.resizeCanvas(resizeInfo.sourceCanvas, resizeInfo.targetCanvas, {}, resolve)
})
}
}, thumbnails: {
customResizer: !qq.ios() && function(resizeInfo) {
return new Promise(function(resolve, reject) {
pica.resizeCanvas(resizeInfo.sourceCanvas, resizeInfo.targetCanvas, {}, resolve)
})
}
}
However, I need to drawThumbnail to other places. I've tried googling all around and going through the Fineuploader's documentation, but still no luck in making Pica run with drawThumbnails.
Fineuploader's documentation
drawThumbnail (id, targetContainer[, maxSize[, fromServer[, customResizer]]])
My current code
var canvas = document.createElement('canvas');
canvas.width = 300;
canvas.height = 200;
$(".frame").html(canvas);
uploader.drawThumbnail(id, canvas, 300, false);
It would be great if someone could guide me on modifying the current code to make drawThumbnail run with Pica, thanks.
As you pointed out in your question, the documentation shows how to use an alternate resizer function when using drawThumbnail:
drawThumbnail(id, targetContainer[, maxSize[, fromServer[, customResizer]]])
...so, your code would look like this:
var canvas = document.createElement('canvas')
var customResizer = function(resizeInfo) {
return new Promise(function(resolve, reject) {
pica.resizeCanvas(
resizeInfo.sourceCanvas,
resizeInfo.targetCanvas,
{},
resolve
)
})
}
canvas.width = 300;
canvas.height = 200;
$(".frame").html(canvas);
uploader.drawThumbnail(id, canvas, 300, false, !qq.ios() && customResizer);
Related
I am using Mapbox GL JS to capture frame by frame video of the animation of a geoJson (similar to what is described here: https://docs.mapbox.com/mapbox-gl-js/example/animate-a-line/).
The strategy for encoding mapbox animations into mp4 here are described here:
https://github.com/mapbox/mapbox-gl-js/issues/5297 and https://github.com/mapbox/mapbox-gl-js/pull/10172 .
I would like to show the distance the polyline has covered as I draw each frame. I need the distance to be in the GL itself (as opposed to, for example, an HTML element on top of the canvas), since that's where I'm capturing the video from.
Can someone help describe a performant strategy for doing this to me?
Aside from tearing apart the Mapbox GL JS, why not simply try drawing directly onto the mapbox canvas?
In this example, there are two canvases, created identically.
With the second, there is another requestAnimationFrame loop that adds an overlay.
I've also shown how it can be recorded with MediaRecorder.
const canvas1 = document.getElementById('canvas1')
const canvas2 = document.getElementById('canvas2')
const video = document.getElementById('output')
const status = document.getElementById('status')
let x = 0 // Value to be displayed
const setupCanvas = (canvas) => {
canvas.height = 300
canvas.width = 300
const canvas1Ctx = canvas.getContext('2d')
canvas1Ctx.fillStyle = 'black'
canvas1Ctx.fillRect(300, 100, 100, 100)
const animateCanvas = () => {
x += 2;
if (x > 300) x = 10
canvas1Ctx.fillStyle = 'rgba(20,20,20,1)'
canvas1Ctx.fillRect(0, 0, canvas.width, canvas.height)
canvas1Ctx.beginPath()
canvas1Ctx.arc(x, 100, 20, 0, 2 * Math.PI)
canvas1Ctx.fillStyle = 'rgba(250,0,0,1)'
canvas1Ctx.fill()
requestAnimationFrame(animateCanvas)
}
animateCanvas()
}
const addOverlay = (canvas) => {
const canvasCtx = canvas2.getContext('2d')
function animateOverlay() {
canvasCtx.font = '48px serif'
canvasCtx.fillStyle = 'white'
canvasCtx.fillText(`X: ${x}`, 10, 50)
requestAnimationFrame(animateOverlay)
}
animateOverlay()
}
const initVideoCapture = () => {
var videoStream = canvas2.captureStream(30)
var mediaRecorder = new MediaRecorder(videoStream)
var chunks = []
mediaRecorder.ondataavailable = function(e) {
chunks.push(e.data)
}
mediaRecorder.onstop = function(e) {
var blob = new Blob(chunks, {
'type': 'video/mp4'
})
chunks = []
var videoURL = URL.createObjectURL(blob)
video.src = videoURL
}
mediaRecorder.ondataavailable = function(e) {
chunks.push(e.data)
}
mediaRecorder.start()
status.textContent = 'Recording...'
setTimeout(function() {
mediaRecorder.stop()
status.textContent = 'Complete'
}, 5000)
}
setupCanvas(canvas1)
setupCanvas(canvas2)
addOverlay(canvas2)
initVideoCapture()
<canvas id="canvas1"></canvas>
<canvas id="canvas2"></canvas>
<video id="output" autoplay controls></video>
<p>Status: <span id="status">Loading</span></p>
Try this :
// show the distance a polyline has covered while animating it in mapbox gl js
var map = new mapboxgl.Map({
container: 'map',
style: 'mapbox://styles/mapbox/streets-v9',
center: [-74.5, 40],
zoom: 9,
})
var geojson = {
type: 'Feature',
properties: {},
geometry: {
type: 'LineString',
coordinates: [
[-74.5, 40],
[-74.45, 40.7],
[-74.36, 40.8],
[-74.36, 41.2],
],
},
}
map.on('load', function () {
map.addLayer({
id: 'route',
type: 'line',
source: {
type: 'geojson',
data: geojson,
},
layout: {
'line-join': 'round',
'line-cap': 'round',
},
paint: {
'line-color': '#888',
'line-width': 8,
},
})
var lineDistance = turf.lineDistance(geojson, 'kilometers')
var arc = []
var maxTravelTime = 0
for (var i = 0; i < lineDistance; i += lineDistance / 200) {
var segment = turf.along(geojson, i, 'kilometers')
arc.push(segment.geometry.coordinates)
}
map.addLayer({
id: 'point',
type: 'symbol',
source: {
type: 'geojson',
data: {
type: 'FeatureCollection',
features: [
{
type: 'Feature',
properties: {},
geometry: {
type: 'Point',
coordinates: arc[0],
},
},
],
},
},
layout: {
'icon-image': 'car',
'icon-rotate': ['get', 'bearing'],
'icon-rotation-alignment': 'map',
'icon-allow-overlap': true,
'icon-ignore-placement': true,
},
})
function animate() {
map.getSource('point').setData(geojson)
if (maxTravelTime < lineDistance) {
maxTravelTime += lineDistance / 200
} else {
maxTravelTime = 0
}
requestAnimationFrame(animate)
}
animate()
})
No collisions between map layer and player sprite. But collisions between world bounds work. What is wrong?
I tried different workarounds that could find online and none of them worked.
Game config
const config = {
type: Phaser.AUTO,
width: 800,
height: 600,
backgroundColor: "#f",
physics: {
default: 'arcade',
arcade: {
gravity: { y: gameState.gravity },
debug: true
}
},
scene: {
preload,
create,
update
}
};
Code in create() regarding the tilemap and its layers and the character
gameState.map = this.add.tilemap('map');
gameState.dungeonTileset = gameState.map.addTilesetImage('dungeon', 'dungeonTiles');
gameState.backgroundLayer = gameState.map.createStaticLayer('Background', gameState.dungeonTileset);
gameState.mapLayer = gameState.map.createStaticLayer('Map', gameState.dungeonTileset);
gameState.miscLayer = gameState.map.createStaticLayer('Misc', gameState.dungeonTileset);
gameState.mapLayer.setCollisionByExclusion([-1]);
this.physics.world.bounds.width = gameState.mapLayer.width;
this.physics.world.bounds.height = gameState.mapLayer.height;
gameState.player = this.physics.add.sprite(73, 398, 'player', 0);
gameState.player.setCollideWorldBounds(true);
this.physics.add.collider(gameState.player, gameState.mapLayer);
No warning and no errors are coming up in the console. I don't know what to do anymore.
Thanks in advance!
I have addited a little bit the original example so you can see how it looks, I think the problem in your code is the line concerning gameState.mapLayer.setCollisionByExclusion([-1]); but am not sure because I can't see how the tilemap is created
var config = {
type: Phaser.WEBGL,
width: 800,
height: 576,
backgroundColor: '#2d2d2d',
parent: 'phaser-example',
loader: {
baseURL: 'https://labs.phaser.io',
crossOrigin: 'anonymous'
},
pixelArt: true,
physics: {
default: 'arcade',
arcade: { gravity: { y: 300 } }
},
scene: {
preload: preload,
create: create,
update: update
}
};
var game = new Phaser.Game(config);
var map;
var cursors;
var player;
var groundLayer;
function preload ()
{
this.load.image('ground_1x1', 'assets/tilemaps/tiles/ground_1x1.png');
this.load.tilemapTiledJSON('map', 'assets/tilemaps/maps/tile-collision-test.json');
this.load.image('player', 'assets/sprites/phaser-dude.png');
}
function create ()
{
map = this.make.tilemap({ key: 'map' });
var groundTiles = map.addTilesetImage('ground_1x1');
map.createDynamicLayer('Background Layer', groundTiles, 0, 0);
groundLayer = map.createDynamicLayer('Ground Layer', groundTiles, 0, 0);
groundLayer.setCollisionBetween(1, 25);//here
player = this.physics.add.sprite(80, 70, 'player')
.setBounce(0.1);
this.physics.add.collider(player, groundLayer);
cursors = this.input.keyboard.createCursorKeys();
this.cameras.main.setBounds(0, 0, map.widthInPixels, map.heightInPixels);
this.cameras.main.startFollow(player);
}
function update ()
{
player.body.setVelocityX(0);
if (cursors.left.isDown)
{
player.body.setVelocityX(-200);
}
else if (cursors.right.isDown)
{
player.body.setVelocityX(200);
}
if (cursors.up.isDown && player.body.onFloor())
{
player.body.setVelocityY(-300);
}
}
<script src="//cdn.jsdelivr.net/npm/phaser#3.17.0/dist/phaser.min.js"></script>
Hello iam developing a nativescript app and im using the "nativescript-camera-plus" plugin (Plugin on Github: https://github.com/nstudio/nativescript-camera-plus), even setting the parameters (width and height) exactly as described in the Option Interfaces section:
cam.takePicture({ saveToGallery: true, confirm: false, keepAspectRatio: true, width: 1920, height: 1440 });
...the plugin takes the picture and saves it, taking as reference the screen resolution of the mobile device divided by 2. Example: If the screen resolution is 1920, the plugin divides by 2 and saves the photo in 960. I need it save the photo with the width and height size that I determined in the options (parameters) of the code. Can someone help me?
MY CODE:
exports.takePicFromCam = function (args) {
cam.requestCameraPermissions().then(function () {
if (!cam) {
cam = new CameraPlus();
}
cam.takePicture({ saveToGallery: true, confirm: false, keepAspectRatio: true, width: 1920, height: 1440 });
});
}
cam.on(CameraPlus.photoCapturedEvent, function (args) {
fromAsset(args.data)
.then(function (res) {
var preview = topmost().getViewById('testImagePickResult');
var picture = res;
var matrix = new android.graphics.Matrix();
matrix.postRotate(180);
var imagemfinal = android.graphics.Bitmap.createBitmap(picture.android, 0, 0, picture.android.getWidth(), picture.android.getHeight(), matrix, true);
preview.src = imagemfinal;
var novonomedafoto = new Date().getTime();
});
imagemsource = page.getViewById("testImagePickResult").imageSource;
const folderDest = fileSystemModule.knownFolders.currentApp();
const pathDest = fileSystemModule.path.join(folderDest.path, "photos/" + novonomedafoto + ".jpg");
const saved = imagemsource.saveToFile(pathDest, "jpg");
if (saved) {
console.log("Image saved successfully!");
}
});
exports.onNavigatingTo = onNavigatingTo;
The plugin do not write the file with the width & height you proposed but instead it just returns the resized image asset (data) that holds the Bitmap with your width & height spec.
data.getImageAsync((bitmap, error) => {
// Use `bitmap` (android.graphics.Bitmap)
});
I have a dataset of 100000 and i am sending in the patch of 10000 each while fetching the data i am adding the markers and redrawing the cluster so in the end i get a single cluster of 100000. But once i zoom in and try to zoom out again they overlap in patches of 10000 each instead reforming the single cluster of 100000.
var mapDiv = document.getElementById('newmap');
map = new google.maps.Map(mapDiv, {
center: new google.maps.LatLng(latitude, longitude),
zoom: 3,
panControl: true,
mapTypeControl: true,
mapTypeControlOptions: {
style: google.maps.MapTypeControlStyle.DROPDOWN_MENU
},
zoomControl: true,
zoomControlOptions: {
position: google.maps.ControlPosition.LEFT_TOP,
},
streetViewControl: true,
mapTypeId: google.maps.MapTypeId.ROADMAP
});
function addMarker1(locations,outletname,outletData)
{
var infoWindow = new google.maps.InfoWindow();
markers = locations.map(function(location, i)
{
return new google.maps.Marker
({
position: location,
});
});
markerCluster = new MarkerClusterer(map, markers,{imagePath: 'https://developers.google.com/maps/documentation/javascript/examples/markerclusterer/m'});
new_arr=new_arr.concat(markers);
markerCluster.clearMarkers()
markerCluster.addMarkers(new_arr);
markerCluster.redraw();
}
// this is sending data 10000 each
for (var i = 0; i < outletDataLen; i++) {
outletArray.push(outletData[i]['Outletview']['name']);
j.push({
lat: parseFloat(outletData[i]['Outletview']['latitude']),
lng: parseFloat(outletData[i]['Outletview']['longitude'])
});
outletname.push(outletData[i]['Outletview']['name']);
}
addMarker1(j, outletname, outletData);
The solution is to clear the data before looping through the markers
if (markerCluster)
{
markerCluster.clearMarkers();
markerCluster.resetViewport();
markers = [];
markerCluster.removeMarker(new_arr);
}
I've got a simple code and a simple map with adding features and clustering them all together. Straight from example:
var vectorSource = new ol.source.Vector({
projection: 'EPSG:4326'
});
var clusterSource = new ol.source.Cluster({
distance: 30,
source: vectorSource
});
var styleCache = {};
var clusters = new ol.layer.Vector({
source: clusterSource,
style: function(feature, resolution) {
var size = feature.get('features').length;
var style = styleCache[size];
var src;
if (!style) {
if( size == 1 ){
src = 'images/location-single.png';
}else{
src = 'images/location-multi.png';
}
style = [
new ol.style.Style({
image: new ol.style.Circle({
radius: 5,
fill: new ol.style.Fill({
color: '#5bc0de'
})
})
}),
new ol.style.Style({
image: new ol.style.Icon(({
// scale: 1 + rnd,
// rotateWithView: (rnd < 0.9) ? true : false,
// rotation: 360 * rnd * Math.PI / 180,
anchor: [0.45, 1],
anchorXUnits: 'fraction',
anchorYUnits: 'fraction',
// opacity: rnd,
src: src
})),
text: new ol.style.Text({
text: size.toString(),
fill: new ol.style.Fill({
color: '#000'
})
})
})
];
styleCache[size] = style;
}
return style;
}
});
var map = new ol.Map({
target: 'map', // The DOM element that will contains the map
renderer: 'canvas', // Force the renderer to be used
layers: [
// Add a new Tile layer getting tiles from OpenStreetMap source
new ol.layer.Tile({
source: new ol.source.OSM()
}),
clusters
],
// Create a view centered on the specified location and zoom level
view: new ol.View({
center: ol.proj.transform([2.1833, 41.3833], 'EPSG:4326', 'EPSG:3857'),
zoom: 6
})
});
Now i got cluster function working fine. But i need to show coordinates for every point in the cluster, i've tryed to use map.forEachFeatureAtPixel, but it doesent work for ALL the features in the cluster. How do i select them all?
Oh. I think i got it! A cluster is a feature and got its properties. so we can GET all features in a cluster by using .getProperties()
as in:
map.on('singleclick', function(event) {
map.forEachFeatureAtPixel(event.pixel, function(feature) {
var featuresInCluster = feature.getProperties().features;
});
});
But i would really like to know if is there another way?
/***First create a select interaction object by assigning the cluster layer you created**/
var select = new ol.interaction.Select({
layers: [clusters]
});
/**Then add the created select object**/
map.addInteraction(select);
var selectedFeatures = select.getFeatures();
/**Then write this code**/
selectedFeatures.on('add', function (event) {
// event.target only contains the clustered point
var feature = event.target.item(0);
console.log(feature)
});
/***HOPE IT WILL WORK**//