I want to stream an image to webpage via websocket. the data is in RGBA. how do I change the blog into image data?
this is my current code, it doesn't work and it will be slow. is there a direct way of assigning event.data to canvas' image data?
void onMessage(MessageEvent event)
{
print("received!");
var imgData = canvas.getImageData(0, 0, 100, 100);
var j = 0;
for (var i=0;i<imgData.data.length;i+=4)
{
imgData.data[i+0]=event.data[j];
imgData.data[i+1]=event.data[j+1];
imgData.data[i+2]=event.data[j+2];
imgData.data[i+3]=255;
j+=3;
}
canvas.putImageData(imgData,0,0);
}
On Firefox you can use the toBlob method. Put the image data on a temporany canvas and call toBlob method. Proof of concept example:
var canvas = document.createElement('canvas');
canvas.width = imageData.width;
canvas.height = imageData.width;
me._dstCanvas.getContext('2d').putImageData(a.dstImgData, 0, 0);
me._dstCanvas.toBlob(function(blob) {
blob// this is yout file
}, 'image/png', 1);
For more have a look at Moz Dev:
https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement/toBlob
Related
Am following the blog at https://antmedia.io/how-to-merge-live-stream-and-canvas-in-webrtc-easily/ that explains how to embed a logo in antmedia live stream. However, I couldn't quite figure out to initialise a localStream with javascript SDK as illustrated in the blog. Specifically, where is the implementation of initWebRTCAdaptor():
//initialize the webRTCAdaptor with the localStream created.
//initWebRTCAdaptor method is implemented below
initWebRTCAdaptor(localStream);
A complete working sample would be very helpful.
It seems that blog post is not fully up to date.
Let me share what to do to have this feature.
Just add a localStream parameter to the WebRTCAdaptor constructor.
Secondly, use the below code in place of initWebRTCAdaptor
For the full code, please take a look at this gist.
https://gist.github.com/mekya/d7d21f78e7ecb2c34d89bd6ec5bf5799
Make sure that you use your own image in image.src.(Use local images)
var canvas = document.getElementById('canvas');
var vid = document.getElementById('localVideo');
var image=new Image();
image.src="images/play.png";
function draw() {
if (canvas.getContext) {
var ctx = canvas.getContext('2d');
ctx.drawImage(vid, 0, 0, 200, 150);
ctx.drawImage(image,50, 10, 100, 30);
}
}
setInterval(function() { draw(); }, 50);
//capture stream from canvas
var localStream = canvas.captureStream(25);
navigator.mediaDevices.getUserMedia({video: true, audio:true}).then(function (stream) {
var video = document.querySelector('video#localVideo');
video.srcObject = stream;
video.onloadedmetadata = function(e) {
video.play();
};
//initialize the webRTCAdaptor with the localStream created.
//initWebRTCAdaptor method is implemented below
localStream.addTrack(stream.getAudioTracks()[0]);
initWebRTCAdaptor(false, autoRepublishEnabled);
});
I would like to use MediaStream.captureStream() method, but it is either rendered useless due to specification and bugs or I am using it totally wrong.
I know that captureStream gets maximal framerate as the parameter, not constant and it does not even guarantee that, but it is possible to change MediaStream currentTime (currently in Chrome, in Firefox it has no effect but in return there is requestFrame, not available at Chrome), but the idea of manual frame requests or setting the placement of the frame in the MediaStream should override this effect. It doesn't.
In Firefox it smoothly renders the video, frame by frame, but the video result is as long as wall clock time used for processing.
In Chrome there are some dubious black frames or reordered ones (currently I do not care about it until the FPS matches), and the manual setting of currentTime gives nothing, the same result as in FF.
I use modified code from MediaStream Capture Canvas and Audio Simultaneously answer.
const FPS = 30;
var cStream, vid, recorder, chunks = [], go = true,
Q = 61, rec = document.getElementById('rec'),
canvas = document.getElementById('canvas'),
ctx = canvas.getContext('2d');
ctx.strokeStyle = 'rgb(255, 0, 0)';
function clickHandler() {
this.textContent = 'stop recording';
//it has no effect no matter if it is empty or set to 30
cStream = canvas.captureStream(FPS);
recorder = new MediaRecorder(cStream);
recorder.ondataavailable = saveChunks;
recorder.onstop = exportStream;
this.onclick = stopRecording;
recorder.start();
draw();
}
function exportStream(e) {
if (chunks.length) {
var blob = new Blob(chunks)
var vidURL = URL.createObjectURL(blob);
var vid2 = document.createElement('video');
vid2.controls = true;
vid2.src = vidURL;
vid2.onend = function() {
URL.revokeObjectURL(vidURL);
}
document.body.insertBefore(vid2, vid);
} else {
document.body.insertBefore(document.createTextNode('no data saved'), canvas);
}
}
function saveChunks(e) {
e.data.size && chunks.push(e.data);
}
function stopRecording() {
go = false;
this.parentNode.removeChild(this);
recorder.stop();
}
var loadVideo = function() {
vid = document.createElement('video');
document.body.insertBefore(vid, canvas);
vid.oncanplay = function() {
rec.onclick = clickHandler;
rec.disabled = false;
canvas.width = vid.videoWidth;
canvas.height = vid.videoHeight;
vid.oncanplay = null;
ctx.drawImage(vid, 0, 0);
}
vid.onseeked = function() {
ctx.drawImage(vid, 0, 0);
/*
Here I want to include additional drawing per each frame,
for sure taking more than 180ms
*/
if(cStream && cStream.requestFrame) cStream.requestFrame();
draw();
}
vid.crossOrigin = 'anonymous';
vid.src = 'https://dl.dropboxusercontent.com/s/bch2j17v6ny4ako/movie720p.mp4';
vid.currentTime = 0;
}
function draw() {
if(go && cStream) {
++Q;
cStream.currentTime = Q / FPS;
vid.currentTime = Q / FPS;
}
};
loadVideo();
<button id="rec" disabled>record</button><br>
<canvas id="canvas" width="500" height="500"></canvas>
Is there a way to make it operational?
The goal is to load video, process every frame (which is time consuming in my case) and return the processed one.
Footnote: I do not want to use ffmpeg.js, external server or other technologies. I can process it by classic ffmpeg without using JavaScript at all, but this is not the point of this question, it is more about MediaStream usability / maturity. The context is Firefox/Chrome here, but it may be node.js or nw.js as well. If this is possible at all or awaiting bug fixes, the next question would be feeding audio to it, but I think it would be good as separate question.
I am working on a project for my programming class. I'd like to essentially have a canvas with a background element (say a room.jpg) and then maybe three interactive objects in the room (lamp.jpg, couch.jpg, desk.jpg). I'd like for it to be that if you hover over the lamp a small box or text pops out, giving you some information. Or maybe have it so if you click an image, the same concept happens. You know, something interactive with the objects in the canvas. Again, I'm new to canvas but we have to use it in our assignment. My current code is:
function loadImages(sources, callback) {
var images = {};
var loadedImages = 0;
var numImages = 0;
// get num of sources
for(var src in sources) {
numImages++;
}
for(var src in sources) {
images[src] = new Image();
images[src].onload = function() {
if(++loadedImages >= numImages) {
callback(images);
}
};
images[src].src = sources[src];
}
}
var sources = {
room: 'room.jpg',
title: 'title.jpg'
};
loadImages(sources, function(images) {
context.drawImage(images.room, 0,0);
context.drawImage(images.title, 0,0);
});
}
But from what I understand, it makes the two jpegs a "permanent" canvas element (unable to be messed with). I had been trying to get it so that when I clicked I'd go from the title.jpg to the room.jpg but I've since given up. Essentially, all I want now is just to have the room.jpg appear when the page is first loaded, and have three other png objects on top (as objects in the room). Are these able to be interacted with, or do I have to put the images into the canvas in another way? Thanks for all your help and your patience!
// --- Image Loader ----
var images = {};
var loadedImages = 0;
var pictures = {
room: 'room.jpg',
title: 'title.jpg'
lamp1: 'lampoff.jpg'
lamp2: 'lampon.jpg'
};
function loadImages(sources, callback) {
var numImages = 0;
for(var src in sources)numImages++;
for(var src in sources) {
images[src] = new Image();
images[src].onload = function() {
if(++loadedImages >= numImages) {
callback(images);
}
};
images[src].src = sources[src];
}
}
// --- Mouse Down Functionality ----
$('#canvas').addEventListener('mouseDown', function(e){
if(e.clientX){
var rect = this.getBoundingClientRect();
if(rect) clickCanvas(e.clientX - rect.left, e.clientY - rect.top)
else clickCanvas(e.clientX - this.offsetLeft, e.clientY - this.offsetTop);
}else if(e.offsetX) clickCanvas(e.offsetX, e.offsetY);
else if(e.layerX) clickCanvas(e.layerX, e.layerY);
else console.warn("Couldn't Determine Mouse Coordinates");
})
var lampOn;
function drawCanvas(showLamp){
lampOn = showLamp;
canvas.width = canvas.width //clears canvas
context.drawImage(images.room, 0,0);
context.drawImage(images.title, 0,0);
if(lampOn){
context.drawImage(images.lamp2, 100,100);
}else{
context.drawImage(images.lamp1, 100,100);
}
}
function clickCanvas(x,y){
console.log('clicked canvas at:',x,y)
if(clickedLamp){
drawCanvas(!lampOn)
}
}
loadImages(pictures, function(images) {
drawCanvas(false)
});
Make sure to replace "clickedLamp" and "#canvas"! The idea here is that you redraw the same canvas, using the same function. Everytime you modify any of it, you rerender ALL of it. See if you can get this example working, it will help clarify alot. If you don't understand something comment
ctx.drawImage() is not working when I use a transparent PNG, but does work when I use a regular PNG.
var context = document.getElementById("canvas").getContext('2d');
....
function draw(img, x, y){
context.drawImage(img, x, y);
}
//actulaly there is loop here, but for simplicity I put only this.
var img = new Image();
img.src = "images/a.png";
img.onload = draw(img, 10, 10);
If I use a regular PNG image it works, but with a PNG with transparency that has the background deleted, it is not working.
Do you guys have any idea why? Thank you.
img.onload takes a function reference rather than a function call.
So do this:
img.onload=function(){draw(img,10,10);}
If you need to load many images, here is an image preloader that fully loads all images before calling the start() function:
// image loader
// put the paths to your images in imageURLs[]
var imageURLs=[];
// push all your image urls!
imageURLs.push("");
imageURLs.push("");
// the loaded images will be placed in images[]
var imgs=[];
var imagesOK=0;
loadAllImages(start);
function loadAllImages(callback){
for (var i=0; i<imageURLs.length; i++) {
var img = new Image();
imgs.push(img);
img.onload = function(){
imagesOK++;
if (imagesOK>=imageURLs.length ) {
callback();
}
};
img.onerror=function(){alert("image load failed");}
img.crossOrigin="anonymous";
img.src = imageURLs[i];
}
}
function start(){
// the imgs[] array now holds fully loaded images
// the imgs[] are in the same order as imageURLs[]
}
I'm using an imported png with an alpha gradient that I'm setting as a mask that reveals the bitmap it is assigned to. The mask object is draggable (kind of like a flashlight). I know I'm supposed to use an AlphaMaskFilter as one of the filters, and I know I'm supposed to use .updateCache()... I'm just not sure I'm using them correctly?
var stage;
var assetQueue;
var bg;
var bgMask;
var container;
var amf;
$(document).ready(function(){
loadImages();
});
function loadImages()
{
// Set up preload queue
assetQueue = new createjs.LoadQueue();
assetQueue.addEventListener("complete", preloadComplete);
assetQueue.loadManifest([{id:"img_bg",src:"images/Nintendo-logo-red.jpg"}, {id:"img_bg_mask",src:"images/background_mask.png"}]);
}
function preloadComplete()
{
assetQueue.removeEventListener("complete", preloadComplete);
init();
}
function init()
{
stage = new createjs.Stage("stage_canvas");
setBackgrounds();
sizeStage();
$(document).mousemove(function(evt){
trackMouse(evt);
});
}
function trackMouse(evt)
{
var mouseX = evt.pageX;
var mouseY = evt.pageY;
// Move the containing clip around
container.x = mouseX - (bgMask.image.width / 2);
container.y = mouseY - (bgMask.image.height / 2);
// Offset the position of the masked image.
bg.x = -container.x;
bg.y = -container.y;
container.updateCache();
stage.update();
}
function setBackgrounds()
{
bg = new createjs.Bitmap(assetQueue.getResult("img_bg"));
bgMask = new createjs.Bitmap(assetQueue.getResult("img_bg_mask"));
container = new createjs.Container();
container.addChild(bg);
amf = new createjs.AlphaMaskFilter(bgMask.image)
container.filters = [amf];
container.cache(0, 0, bg.image.width, bg.image.height);
stage.addChild(container);
stage.update();
}
function sizeStage()
{
var windowW = 600;
var windowH = 600;
stage.canvas.width = windowW;
stage.canvas.height = windowH;
stage.update();
}
Solution found (for anyone interested). The key is to add the image you want to mask to a container. Move the container to any position you want, then offset the contained image within the container. The code has been updated to reflect this.