I have a code that works on my computer but when I moved it to a new one it doesn't find the ffmpeg dependency.
It happens on these lines of code:
var videoshow = require('videoshow')
var image = [{path: './screenshot.jpg'}]
var videoOption = {
loop: 10,
fps: 25,
transition: false,
transitionDuration: 0, // seconds
videoBitrate: 1024,
videoCodec: 'libx264',
size: '640x?',
audioBitrate: '128k',
audioChannels: 2,
format: 'mp4',
pixelFormat: 'yuv420p'
}
//call the videoshow library
videoshow(image,videoOption).save(filename+"_"+"movie.mp4").on('start',function(command){
console.log("conversion started" + command)
}).on('error',function(err,stdout,stderr){
console.log("some error occured"+ err)
}).on('end',function(output){
console.log("conversion complete "+ output)
It throws an error "Cannot find ffmpeg".
I tried to do npm install or npm install ffmpeg but it didn't help.
I think this happens because I don't know how to make dependencies work on a different computer.
Any help would be appreciated!
Apparently there was a problem with videoshow that couldn't find ffmpeg (although it was installed) so I added a these dependencies:
"dependencies": {
"#ffmpeg-installer/ffmpeg": "^1.1.0",
"#ffprobe-installer/ffprobe": "^1.2.0",
"ffprobe": "^1.1.2"
}
And I added these lines of code to the project:
const ffmpegPath = require('#ffmpeg-installer/ffmpeg').path;
const ffprobePath = require('#ffprobe-installer/ffprobe').path;
const ffmpeg = require('fluent-ffmpeg');
ffmpeg.setFfmpegPath(ffmpegPath);
ffmpeg.setFfprobePath(ffprobePath);
And it fixed the problem
Related
I am trying to concatenate 4 videos with different codec, size and FPS with the below query in node.js. I have been using child_process spawn.
const mergeAllVideo = async () => {
try {
const all = [
'-y',
'-i', './gifs_0.mp4',
'-i', './gifs_1.mp4',
'-i', './gifs_2.mp4',
'-i', './gifs_3.mp4',
'-filter_complex', "[0:v][0:a][1:v][1:a][2:v][2:a][3:v][3:a] concat=n=4:v=1:a=1 [vv] [aa]",
'-map', '[vv]',
'-map', '[aa]',
'./allMerged.mp4'
];
const proc = spawn(cmd, all);
proc.stdout.on('end', function () {
console.log("Added mergeAllVideo !!! \n");
end = new Date().getTime();
const diffinsec = (end-start)/1000
console.log("Execution time : ",diffinsec,'s');
});
proc.stdout.on('error', function (err) {
console.log(" ::::: Error at all : ", err);
});
} catch (err) {
console.log(":::::::: getting error at mergeAllVideo() ", err);
}
}
Query is running successfully but no video file is being generated in the given directory. Could someone please help me?
I to am working on this. my approach is a "for file in" Queue building one.
pick the head of the queue (the first clip) and format it correctly
start a "for file in" loop
format $file correctly
concatenate resulting file onto end of queue file
I'm trying to stream a video from webcam to the local computer. The stream has resolution of 3840x2160 and 30fps. Computer I'm using is Mac Pro. However when I run it with next command:
ffmpeg -f avfoundation -framerate 30 -video_size 3840x2160 -pix_fmt nv12 -probesize "50M" -i "0" -pix_fmt nv12 -preset ultrafast -vcodec libx264 -tune zerolatency -f mpegts udp://192.168.1.5:5100/mystream
it has a latency of 3-4 seconds. This problem is not present in Chromium, when using MediaStream API stream is displayed in realtime.
I believe that's because Chromium has "dmb1" four char code supported:
+ (media::VideoPixelFormat)FourCCToChromiumPixelFormat:(FourCharCode)code {
switch (code) {
case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange:
return media::PIXEL_FORMAT_NV12; // Mac fourcc: "420v".
case kCVPixelFormatType_422YpCbCr8:
return media::PIXEL_FORMAT_UYVY; // Mac fourcc: "2vuy".
case kCMPixelFormat_422YpCbCr8_yuvs:
return media::PIXEL_FORMAT_YUY2;
case kCMVideoCodecType_JPEG_OpenDML:
return media::PIXEL_FORMAT_MJPEG; // Mac fourcc: "dmb1".
default:
return media::PIXEL_FORMAT_UNKNOWN;
}
}
To set pixel format Chromium is using next piece of code:
NSDictionary* videoSettingsDictionary = #{
(id)kCVPixelBufferWidthKey : #(width),
(id)kCVPixelBufferHeightKey : #(height),
(id)kCVPixelBufferPixelFormatTypeKey : #(best_fourcc),
AVVideoScalingModeKey : AVVideoScalingModeResizeAspectFill
};
[_captureVideoDataOutput setVideoSettings:videoSettingsDictionary];
I tried doing the same thing in ffmpeg by changing avfoundation.m file. First I added new pixel format AV_PIX_FMT_MJPEG:
static const struct AVFPixelFormatSpec avf_pixel_formats[] = {
{ AV_PIX_FMT_MONOBLACK, kCVPixelFormatType_1Monochrome },
{ AV_PIX_FMT_RGB555BE, kCVPixelFormatType_16BE555 },
{ AV_PIX_FMT_RGB555LE, kCVPixelFormatType_16LE555 },
{ AV_PIX_FMT_RGB565BE, kCVPixelFormatType_16BE565 },
{ AV_PIX_FMT_RGB565LE, kCVPixelFormatType_16LE565 },
{ AV_PIX_FMT_RGB24, kCVPixelFormatType_24RGB },
{ AV_PIX_FMT_BGR24, kCVPixelFormatType_24BGR },
{ AV_PIX_FMT_0RGB, kCVPixelFormatType_32ARGB },
{ AV_PIX_FMT_BGR0, kCVPixelFormatType_32BGRA },
{ AV_PIX_FMT_0BGR, kCVPixelFormatType_32ABGR },
{ AV_PIX_FMT_RGB0, kCVPixelFormatType_32RGBA },
{ AV_PIX_FMT_BGR48BE, kCVPixelFormatType_48RGB },
{ AV_PIX_FMT_UYVY422, kCVPixelFormatType_422YpCbCr8 },
{ AV_PIX_FMT_YUVA444P, kCVPixelFormatType_4444YpCbCrA8R },
{ AV_PIX_FMT_YUVA444P16LE, kCVPixelFormatType_4444AYpCbCr16 },
{ AV_PIX_FMT_YUV444P, kCVPixelFormatType_444YpCbCr8 },
{ AV_PIX_FMT_YUV422P16, kCVPixelFormatType_422YpCbCr16 },
{ AV_PIX_FMT_YUV422P10, kCVPixelFormatType_422YpCbCr10 },
{ AV_PIX_FMT_YUV444P10, kCVPixelFormatType_444YpCbCr10 },
{ AV_PIX_FMT_YUV420P, kCVPixelFormatType_420YpCbCr8Planar },
{ AV_PIX_FMT_NV12, kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange },
{ AV_PIX_FMT_YUYV422, kCVPixelFormatType_422YpCbCr8_yuvs },
{ AV_PIX_FMT_MJPEG, kCMVideoCodecType_JPEG_OpenDML }, //dmb1
#if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080
{ AV_PIX_FMT_GRAY8, kCVPixelFormatType_OneComponent8 },
#endif
{ AV_PIX_FMT_NONE, 0 }
};
After that I tried to hardcode it:
pxl_fmt_spec = avf_pixel_formats[22];
ctx->pixel_format = pxl_fmt_spec.ff_id;
pixel_format = [NSNumber numberWithUnsignedInt:pxl_fmt_spec.avf_id];
capture_dict = [NSDictionary dictionaryWithObject:pixel_format
forKey:(id)kCVPixelBufferPixelFormatTypeKey];
[ctx->video_output setVideoSettings:capture_dict];
Code compiles and builds successfully, but when I run it with above command, without -pix_fmt specified, program enters infinite loop in get_video_config function:
while (ctx->frames_captured < 1) {
CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.1, YES);
}
It looks obvious that ffmpeg is not able to load first frame. My camera is more than capable of supporting this pixel and stream formats. I proved it with this piece of code which comes after ffmpeg selects which format to use for specified width, height and fps:
FourCharCode fcc = CMFormatDescriptionGetMediaSubType([selected_format formatDescription]);
char fcc_string[5] = { 0, 0, 0, 0, '\0'};
fcc_string[0] = (char) (fcc >> 24);
fcc_string[1] = (char) (fcc >> 16);
fcc_string[2] = (char) (fcc >> 8);
fcc_string[3] = (char) fcc;
av_log(s, AV_LOG_ERROR, "Selected format: %s\n", fcc_string);
Above code prints "Selected format: dmb1".
Can someone tell me why ffmpeg can't load first frame and how to add new pixel format in this library?
Also, any suggestion on how to resolve input latency of 3 seconds in some other way is more than welcome.
EDIT:
If you try setting any other pixel format in Chromium other then MJPEG there is a latency of 2 seconds. When I say "setting" I mean changing Chromium source code and recompiling it. I am pretty sure that the problem is in pixel format, because camera is sending dmb1 and ffmpeg doesn't know about that format.
Also latency is only present on MacOS.
Hey everyone so I have a canvas that I write a rather complex animation to. Let's say I want to take screenshots of the canvas at 60 frames a second. The canvas doesn't have to play in real-time I just need it to capture 60 frames a second so I can send the screenshots to FFmpeg and make a video. I know I can use canvas.toDataURL but how do I capture the frames smoothly?
Use this code to pause the video and lottie animations if you are using lottie-web for after effects content in the browser. Than take screenshots and use Whammy to compile a webm file which you can than run through ffmpeg to get your desired output.
generateVideo(){
const vid = new Whammy.fromImageArray(this.captures, 30);
vid.name = "project_id_238.webm";
vid.lastModifiedDate = new Date();
this.file = URL.createObjectURL(vid);
},
async pauseAll(){
this.pauseVideo();
if(this.animations.length){
this.pauseLotties()
}
this.captures.push(this.canvas.toDataURL('image/webp'));
if(!this.ended){
setTimeout(()=>{
this.pauseAll();
}, 500);
}
},
async pauseVideo(){
console.log("curretTime",this.video.currentTime);
console.log("duration", this.video.duration);
this.video.pause();
const oneFrame = 1/30;
this.video.currentTime += oneFrame;
},
async pauseLotties(){
lottie.freeze();
for(let i =0; i<this.animations.length; i++){
let step =0;
let animation = this.animations[i].lottie;
if(animation.currentFrame<=animation.totalFrames){
step = animation.currentFrame + animation.totalFrames/30;
}
lottie.goToAndStop(step, true, animation.name);
}
}
I have problem making video from website screenshots taken from phantomjs.
the phantomjs did not make screenshots for all frames within the same second and even not all seconds there , there is huge missing frames .
the result is high speed video playing with many jumps in video effects .
test.js :
var page = require('webpage').create(),
address = 'http://raphaeljs.com/polar-clock.html',
duration = 5, // duration of the video, in seconds
framerate = 24, // number of frames per second. 24 is a good value.
counter = 0,
width = 1024,
height = 786;
frame = 10001;
page.viewportSize = { width: width, height: height };
page.open(address, function(status) {
if (status !== 'success') {
console.log('Unable to load the address!');
phantom.exit(1);
} else {
window.setTimeout(function () {
page.clipRect = { top: 0, left: 0, width: width, height: height };
window.setInterval(function () {
counter++;
page.render('newtest/image'+(frame++)+'.png', { format: 'png' });
if (counter > duration * framerate) {
phantom.exit();
}
}, 1/framerate);
}, 200);
}
});
this will create 120 image , this is correct count , but when you see the images one by one you will see many duplicate the same contents and many missing frames
ffmpeg :
fmpeg -start_number 10001 -i newtest/image%05d.png -c:v libx264 -r 24 -pix_fmt yuv420p out.mp4
I know this script and ffmpeg command not perfect , because I did hundred of changes without lucky, and I lost the correct setting understanding .
an anyone guide me to fix this ?.
thank you all
How to do 'gm composite -gravity center change_image_url base_image_url' with GM Node.js?
How to call gm().command() & gm().in() or gm().out() to achieve the above?
After struggling for an hour, here is my solution for your question:
gm composite -gravity center change_image_url base_image_url
gm()
.command("composite")
.in("-gravity", "center")
.in(change_image_url)
.in(base_image_url)
.write( output_file, function (err) {
if (!err)
console.log(' hooray! ');
else
console.log(err);
});
Good luck! Hope it will be helpful to others as well :)
Install gm, (make sure you already install graphicsmagick
npm install gm
following is my example code to merge two image together (use gm.in)
var gm = require('gm');
gm()
.in('-page', '+0+0')
.in('bg.jpg')
.in('-page', '+10+20') // location of smallIcon.jpg is x,y -> 10, 20
.in('smallIcon.jpg')
.mosaic()
.write('tesOutput.jpg', function (err) {
if (err) console.log(err);
});
i am doing that this way:
var exec = require('child_process').exec
var command = [
'-composite',
'-watermark', '20x50',
'-gravity', 'center',
'-quality', 100,
'images/watermark.png',
'images/input.jpg', //input
'images/watermarked.png' //output
];
// making watermark through exec - child_process
exec(command.join(' '), function(err, stdout, stderr) {
if (err) console.log(err);
});
Why does nobody use composite command? (https://github.com/aheckmann/gm)
var gm = require('gm');
var bgImage = 'bg.jpg',
frontImage = 'front.jpg',
resultImage = 'result.jpg',
xy = '+100+150';
gm(bgImage)
.composite(frontImage)
.geometry(xy)
.write(resultImage, function (err) {
if (!err) console.log('All done');
});
UPDATE Oh, I watched history of source of this method. It becomes available only on 2014
If you want to resize and merge, you can use this:
gm()
.in('-geometry', '+0+0')
.in('./img/img1.png')
.in('-geometry', '300x300+100+200')
.in('./img/img2.png')
.flatten()
.write('resultGM.png', function (err) {
if (err) console.log(err);
});
Having the pleasure of being confined to a windows machine for the moment I solved this problem eventually by not using the "gm" module at all. For some reason, even though I have installed graphics-magick via its installer the node module refused to find it in my environment variables. But that could have something to do with the fact that I am trying to make an application with Electron.js (which is similar to Node.js but has its "gotcha's").
var exec = require("child_process").execSync;
var commandArray = [
__dirname + "/bin/graphicsMagick/gm.exe", // the relative path to the graphics-magick executable
"-composite",
"-gravity",
"center",
__dirname + "/data/images/qr/logo-small.png", // relative paths to the images you want to composite
__dirname + "/data/images/qr/qr-webpage.png",
__dirname + "/data/images/qr/qr-webpage-logo.png" // relative path to the result
];
var returnValue = exec(commandArray.join(" "));
For windows I think this is the correct portable way to do it.