Total noob question. I've been abusing Google and for some reason, have surprisingly not been able to find anything regarding this...? I feel like I'm missing something here. :P
I currently have a resize() function that modifies the canvas dimensions to the size of the window. In a minimal example (which also uses jQuery), I have a variable that references my Shape object. According to the documents, the Shape Object does not include a width & height property. What is the most efficient way of resizing a Shape Object? Removing/redrawing dynamically?
This is what I have:
var stage;
var bgColor;
$(document).ready(function(){
init();
});
function init()
{
stage = new createjs.Stage("canvasStage");
bgColor = new createjs.Shape();
bgColor.graphics.beginFill("#000000").drawRect(0,0, stage.canvas.width, stage.canvas.width);
stage.addChild(bgColor);
$(window).resize(function(){windowResize();});
windowResize();
}
function windowResize()
{
stage.canvas.width = $(window).width();
stage.canvas.height = $(window).height();
//bgColor.width = $(window).height();// No width property
//bgColor.height = $(window).height();// NO height property
stage.update();}
You can use the shape's scaleX and scaleY to scale the shape.
Note: The Shape Object extends the DisplayObject so you might also want to look at the DisplayObject docs for many more useful properties/methods.
myShape.scaleX=1.2;
myShape.scaleY=1.2;
as mentioned by #markE, only scaleX and scaleY are available.
A way to work around the problem is to instantiate shapes with 1px width and 1px height:
mc_bg = new createjs.Shape();
mc_bg.graphics.beginFill("#CCCCCC").drawRect(0,0,1,1);
stage.addChild(mc_bg);
mc_left = new createjs.Shape();
mc_left.graphics.beginFill("#333333").drawRect(0,0,1,1);
stage.addChild(mc_left);
mc_circle = new createjs.Shape();
mc_circle.graphics.beginFill("#888888").drawCircle(0,0,1,1);
stage.addChild(mc_circle);
Allowing to set your dimensions with pixel units without conversion directly by using scaleX and scaleY before rendering:
mc_bg.scaleX = stage_width;
mc_bg.scaleY = stage_height;
mc_left.scaleX = stage_width/2;
mc_left.scaleY = stage_height;
mc_circle.x = stage_width/4;
mc_circle.y = stage_width/4;
mc_circle.scaleX = stage_width/4;
mc_circle.scaleY = stage_width/4;
stage.update();
See fiddle example with live resizing on window resize: https://jsfiddle.net/jckleinbourg/go3fs1zt/
Conversion methods to change scale --> dimensions and vice versa:
function dimToScale(origDim, desiredDim)
{
return desiredDim / origDim;
}
function scaleToDim(origDim, scale)
{
return scale * origDim;
}
Related
Still very much a newbie to coding, so please be gentle :)
I'm hoping someone might be able to help how to use Paper.js on a second canvas after the first one has been executed?
I'm trying to use x2 canvas elements:
Canvas 1 - to capture a html5 video image still and convert to base64 (tick :-) = done)
Canvas 2 - Use the base64 image and perform the 'Working with Rasters to find the colors of pixels' and convert to circle paths (boo = fail :-( )
Something like this:
The code:
<script src="https://cdn.jsdelivr.net/npm/hls.js#latest"></script>
<video id="video" preload="auto" muted="" playsinline="" width="580" src="blob:https://www.georgefisher.co.uk/78e3a45c-ae07-4ea5-af56-45a5ed9cf1b0"></video>
<script>
var video = document.getElementById('video');
var videoSrc = 'https://camsecure.co/HLS/georgefisher.m3u8';
if (Hls.isSupported()) {
var hls = new Hls();
hls.loadSource(videoSrc);
hls.attachMedia(video);
}
else if (video.canPlayType('application/vnd.apple.mpegurl')) {
video.src = videoSrc;
}
video.play()
</script>
<br>
<button onclick="capture()">Capture</button>
<br>
<canvas id="canvas" style="overflow:auto">
</canvas>
<canvas id="canvas2" resize>
<img src="" id="myImg"/></canvas>
var resultb64="";
function capture() {
var canvas = document.getElementById('canvas');
var video = document.getElementById('video');
canvas.width = video.videoWidth/4;
canvas.height = video.videoHeight/4;
canvas.getContext('2d').drawImage(video, 0, 0, video.videoWidth/4, video.videoHeight/4);
resultb64=canvas.toDataURL();
document.querySelector("#myImg").src = canvas.toDataURL();
}
/*Paper JS Setup for working in CodePen */
/* ====================== *
* 0. Initiate Canvas *
* ====================== */
// expose paperjs classes into global scope
paper.install(window);
// Only executed our code once the DOM is ready.
window.onload = function() {
// bind paper to the canvas
paper.setup('canvas2');
// paper.activate();
// Get a reference to the canvas object
var canvas = document.getElementById('canvas2');
var ctx = canvas.getContext('2d');
// console.log(ctx, image);
// ctx.drawImage(image, 0, 0);
// return;
// }
// Create a raster item using the image id='' tag
var image = document.querySelector('img');
var raster = new Raster(image);
// Hide the raster:
raster.visible = false;
// The size of our grid cells:
var gridSize = 15;
// Space the cells by 120%:
var spacing = 1
;
// As the web is asynchronous, we need to wait for the raster to load before we can perform any operation on its pixels.
raster.onLoad = function() {
// Since the example image we're using is much too large, and therefore has way too many pixels, lets downsize it to 40 pixels wide and 30 pixels high:
raster.size = new Size(40, 30);
for (var y = 0; y < raster.height; y++) {
for(var x = 0; x < raster.width; x++) {
// Get the color of the pixel:
var color = raster.getPixel(x, y);
// Create a circle shaped path:
var path = new Path.Circle({
center: new Point(x, y).multiply(gridSize),
radius: gridSize / 2 / spacing,
});
// Set the fill color of the path to the color
// of the pixel:
path.fillColor = color;
}
}
// Move the active layer to the center of the view, so all the created paths in it appear centered.
project.activeLayer.position = view.center;
}
}
I've tried giving the second canvas a different Id="canvas2" and referencing that, which I can see in the console. However, nothing appears in the second canvas and the paper.js script doesn't seem to execute, can someone help me understand why?
Please see also see link to the fiddle below:
https://jsfiddle.net/jmnes/o4Lpkfs6/1/
Alternatives method.
You don't need to capture the video, you don't need to capture the pixels using paper.js and raster. You don't need to find the color of each circle and draw it.
All these methods are slow, complex, and power hungry.
You can create a mask and mask out the circles, with the colors drawn from a smaller canvas with a res that matches the number off circles.
How to
Add one (main canvas) canvas to the DOM. This will display the result
Create 2 offscreen canvas.
One (color canvas) has the same resolution as the circles you want to display. Eg if you have 30 by 40 circle the canvas res should be 30 by 40
One (mask canvas) is the circle mask. It is the same resolution as the main canvas. Draw the circles all in one color on this canvas.
Then rendering once a frame
Draw the video on the color canvas to fit.
Turn off smoothing on the main canvas eg ctxMain.imageSmoothingEnabled = false
Draw the color canvas onto the main canvas to fit.
This will draw a color square at each circle position. ctx.drawImage(colorCanvas, 0, 0, mainCanvas.width, mainCanvas.height)
Set composite operation "destination-in" eg ctxMain.globalCompositeOperation = "destination-in"
Draw the mask canvas (canvas with circles on it) onto the main canvas. This will remove pixels outside each circle.
Restore default composite operation for the main canvas ctxMain.globalCompositeOperation = "source-over"
All done for a real-time FX on almost any device.
The above methods is the fastest way to render the effect you are after using the 2D API
I retrieve a rect from openSeadragonSelection:
viewer:
this.viewer = OpenSeadragon(this.config);
this.selection = this.viewer.selection({
showConfirmDenyButtons: true,
styleConfirmDenyButtons: true,
returnPixelCoordinates: true,
onSelection: rect => console.log(rect)
});
this.selection.enable();
rect by onSelection:
t.SelectionRect {x: 3502, y: 2265, width: 1122, height: 887, rotation:0, degrees: 0, …}
I have no idea how to get the canvas by rect from my viewer instance.
this.viewer.open(new OpenSeadragon.ImageTileSource(this.getTile(this.src)));
A self implemented imageViewer returned the canvas of the selected area. So I could get the blob and post it to the server:
onSave(canvas){
let source = canvas.toDataURL();
this.setState({source:source, crop: false, angle: 0});
save(this.dataURItoBlob(source), source.match(new RegExp("\/(.*);"))1]);
}
dataURItoBlob(dataURI) {
// convert base64/URLEncoded data component to raw binary data held in a string
var byteString;
if (dataURI.split(',')[0].indexOf('base64') >= 0)
byteString = atob(dataURI.split(',')[1]);
else
byteString = unescape(dataURI.split(',')[1]);
// separate out the mime component
var mimeString = dataURI.split(',')[0].split(':')[1].split(';')[0];
// write the bytes of the string to a typed array
var ia = new Uint8Array(byteString.length);
for (var i = 0; i < byteString.length; i++) {
ia[i] = byteString.charCodeAt(i);
}
return new Blob([ia], {type:mimeString});
}
How can I get the image of the viewer by rect. Rotation should be considered as well.
#iangilman:
Thank's alot for your advice. I created another canvas which I crop and after that put it back into the viewer. I was not sure if something similar was supported by your library yet:
const viewportRect = self.viewer.viewport.imageToViewportRectangle(rect);
const webRect = self.viewer.viewport.viewportToViewerElementRectangle(viewportRect);
const { x, y, width, height } = webRect || {};
const { canvas } = self.viewer.drawer;
let source = canvas.toDataURL();
const img = new Image();
img.onload = function () {
let croppedCanvas = document.createElement('canvas');
let ctx = croppedCanvas.getContext('2d');
croppedCanvas.width = width;
croppedCanvas.height = height;
ctx.drawImage(img, x, y, width, height, 0, 0, width, height);
let croppedSrc = croppedCanvas.toDataURL();
//update viewer with cropped image
self.tile = self.getTile(croppedSrc);
self.ImageTileSource = new OpenSeadragon.ImageTileSource(self.tile);
self.viewer.open(self.ImageTileSource);
}
img.src = source;
Rotation hasn't been considered yet.
I imagine you'll need to convert the rectangle into the proper coordinates, then create a second canvas and copy the appropriate bit out of the OSD canvas into the second one.
Looks like maybe the selection rectangle is in image coordinates? The OSD canvas will be in web coordinates, or maybe double that on an HDPI display. OSD has a number of conversion functions, for instance:
var viewportRect = viewer.viewport.imageToViewportRectangle(imageRect);
var webRect = viewer.viewport.viewportToViewerElementRectangle(viewportRect);
You can find out the pixel density via OpenSeadragon.pixelDensityRatio.
Once you have the appropriate rectangle it should be easy to copy out of the one canvas into another. I'm not sure how you incorporate rotation, but it might be as simple as adding a rotation call to one of the canvas contexts.
Sorry this is kind of vague, but I hope it helps!
Firstly, I'd like to make it clear that I am an absolute beginner when it comes to canvas & three.js
I've taken an existing project and am trying to make some minor modifications, namely, drawing an image on to a texture instead of just writing text. I would like the texture to have a set background color and will be attempting to draw a transparent png image on top of it.
The method that I am attempting to modify returns a texture for use in another function which is currently doing a lot more stuff that I probably won't be able to explain here.
I've tried a few approaches and I can't seem to get the behaviour that I want.
Approach #1
In this approach, I am getting the background color but I am not getting the image displayed
var ts = calc_texture_size(size + size * 2 * margin) * 2;
canvas.width = canvas.height = ts;
context.fillStyle = back_color;
context.fillRect(0, 0, canvas.width, canvas.height);
var img = new Image();
function drawIcon() {
context.drawImage(img,0,0);
}
img.onload = drawIcon();
img.src = "img/test.png";
var texture = new THREE.Texture(canvas);
texture.needsUpdate = true;
return texture;
Approach #2
After doing some research, it seemed I was supposed to use a TextureLoader but I couldn't figure out how to draw the color/shape on the canvas. In this case, the image appears on its own without any background color.
var imgTex = new new THREE.TextureLoader().load( "img/test.png",function(t){
texture.map = imgTex;
});
texture.needsUpdate = true;
return texture;
How can I successfully draw an image on top of a coloured background and return it as a texture for use by other functions?
Your problem is much related to JavaScript event handling, like a question I answered yesterday: How to make a Json model auto-rotates in the scene?. You can never have an "onload" and then expect that to return the variable for you - the variable is just not going to be defined until the callback is executed. Instead you need to defer the work to the callback, or elsewhere keep polling the variable until it's defined.
More specific to your question though:
With approach #1, you're sending the backfilled texture to the GPU, then rendering additional content to it, but not updating the texture that was sent to the GPU after the image was loaded. Moving the texture.needsUpdate = true; to your drawIcon() would probably solve it here. Untested example:
var ts = calc_texture_size(size + size * 2 * margin) * 2;
var img = new Image();
var texture = new THREE.Texture(canvas);
canvas.width = canvas.height = ts;
context.fillStyle = back_color;
context.fillRect(0, 0, canvas.width, canvas.height);
img.onload = function() {
context.drawImage(img,0,0);
texture.needsUpdate = true;
};
img.src = "img/test.png";
return texture;
NB: You should always define your var variables at the start of a scope in JavaScript to make things clearer ... drawIcon() has access to that texture variable, though it doesn't look like it from your example code because it appears to be defined after it, but that's not how JavaScript works: var variables are always global to the scope they're defined in.
With approach #2, you need a little extra work to paint on the texture, turning the texture back in to a canvas and sending the resulting canvas to the GPU. The ImageLoader and Texture documentation combined hint at how you need to paint to the image you get from the TextureLoader.
Combining the two approaches, it should be something like this (also utterly untested):
var ts = calc_texture_size(size + size * 2 * margin) * 2;
canvas.width = canvas.height = ts;
context.fillStyle = back_color;
context.fillRect(0, 0, canvas.width, canvas.height);
var textureLoader = new THREE.TextureLoader().load(
"img/test.png",
function( texture ) {
var image = texture.image;
context.drawImage( texture.image, 0, 0 0 );
var texture = new THREE.Texture(canvas);
texture.needsUpdate = true;
// Now do something with texture, like assigning it
// to your material
}
);
NB: Don't forget about using data URL's, if the image you need to paint with is small then this can be included in your code as a "data:" URL and this avoids the need to go to the server and wait for it to load in an event handler. Example at MDN.
I am working on an application where the user comes and sees a blank area(div or canvas or whatever, lets call it mycanvas hereafter). Now he drags some images from outside(a div) and drops them on mycanvas. He can also resize them. And, he can also draw something in mycanvas with pencils and colors with erasing feature. Now, as per my research till now, I've figured out that the drawing part is a pure HTML 5 canvas stuff. So, no problem with that. But I'm not sure whether he can drop images from an outside div/canvas to mycanvas. Please tell me how to achieve all the three features(drag-drop from outside, draw with pencil, resize images) in a single area.
I have create a online dnd editor by Html5Canvas.
I will create a loop first
var loop = function(){
// Operation Here
}
self.setInterval(loop, 1000/60);
Create the data model, for example a image
var DndImage = function(x, y, width, height, image){
this.type = "image";
this.image = image;
this.x = x;
this.y = y;
this.width = width;
this.height = height;
}
Then we draw the image in the looping
var ObjectArray = new Array();
var WIDTH = 800;
var HEIGHT = 600;
var loop = function(){
var canvas = document.getElementById("canvas");
var context = canvas.getContext("2d");
context.clearRect(0, 0, WIDTH, HEIGHT);
for(var x = 0; x < ObjectArray.length; x++){
if(ObjectArray[x].type == "image")
context.drawImage(ObjectArray[x].image,ObjectArray[x].x,ObjectArray[x].y, ObjectArray[x].width, ObjectArray[x].height);
}
}
Function to add New image object
function addImage(src, x, y, width, height){
var img = new Image();
img.src = src;
img.onload = function(){
ObjectArray.push(new DndImage(x, y, width, height, img));
}
}
And now if you want to do a dnd, You need to do is set up a Listener to listen the mouse move event. And set the DndImage Object x and y to follow the mouse position in the image canavs. You can scale the image or changing the size too.
docuemnt.addEventListener("mousedown", function(){ });
docuemnt.addEventListener("mouseup", function(){ });
docuemnt.addEventListener("mousemove", function(){ });
docuemnt.addEventListener("click", function(){ });
Hope I can help you :D
You can achieve all the required features using kinetic js.
To drag, drop and resize
http://www.html5canvastutorials.com/labs/html5-canvas-drag-and-drop-resize-and-invert-images/
To paint using different shapes, say a line:
http://www.html5canvastutorials.com/kineticjs/html5-canvas-kineticjs-line-tutorial/
and dropping from outside canvas is the simplest thing, probably:
http://www.w3schools.com/html/html5_draganddrop.asp
Just check these and let me know if there is any problem in integration.
I am completely new to HTML5 and have been reading about it for the past few days mainly because I wanted to create a rotating image to put in a <div>. I found a code that does exactly what I want, but it throws the canvas on to the bottom left corner of my page (I'm not sure why, but I think it has something to do with the very first line of the code below). I'm not sure how to adapt the code to a element so that I can put it where I want. From looking at other people's scripts and trying to emulate them, I know you're supposed to do this sort of thing to hold the canvas "<canvas width="100" height="100" id="pageCanvas"></canvas>," but I don't know how to name the below code in order to do that. I greatly appreciate any help anyone can offer me - thank you so much for reading! :)
<script>
window.addEventListener("load", init);
var counter = 0,
logoImage = new Image(),
TO_RADIANS = Math.PI/180;
logoImage.src = 'IMG URL';
var canvas = document.createElement('canvas');
canvas.width = 100;
canvas.height = 100;
var context = canvas.getContext('2d');
document.body.appendChild(canvas);
function init(){
setInterval(loop, 1000/30);
}
function loop() {
context.clearRect(0,0,canvas.width, canvas.height);
drawRotatedImage(logoImage,100,100,counter);
drawRotatedImage(logoImage,300,100,counter+90);
drawRotatedImage(logoImage,500,100,counter+180);
counter+=2;
}
function drawRotatedImage(image, x, y, angle) {
// save the current co-ordinate system
// before we screw with it
context.save();
// move to the middle of where we want to draw our image
context.translate(x, y);
// rotate around that point, converting our
// angle from degrees to radians
context.rotate(angle * TO_RADIANS);
// draw it up and to the left by half the width
// and height of the image
context.drawImage(image, -(image.width/2), -(image.height/2));
// and restore the co-ords to how they were when we began
context.restore();
}
</script>
Create a canvas element in your HTML code so you can place it exactly where you want (with html + css) :
<canvas id='canvas' height='100' width='100'> Your browser does not support HTML5 canvas </canvas>
And replace this javascript code :
var canvas = document.createElement('canvas');
canvas.width = 100;
canvas.height = 100;
var context = canvas.getContext('2d');
document.body.appendChild(canvas);
by this one :
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');