Merge/Concatenate two videos with different frame rates using FFmpegFrameGrabber & FFmpegFrameRecorder javacv in Spring Boot - spring-boot

I am trying to merge/concatenate 2 videos. One of the videos has a frame rate of 0.25fps and the other has a frame rate of 30fps. When I merge the two videos using FFmpegFrameGrabber & FFmpegFrameRecorder javacv libraries, the second video becomes slowed down and looks like a slow motion video and the length of the second video increases drastically. I was hoping to know what I need to do in order to keep the videos in their respective frame rates and merge them both without altering none of them. I have attached the code below for reference:
FFmpegFrameGrabber video1Grabber = new FFmpegFrameGrabber("video-test1.mp4");
video1Grabber.start();
FFmpegFrameGrabber video2Grabber = new FFmpegFrameGrabber("video-test2.mp4");
video2Grabber.start();
URL url = new URL(S3Urls.AUDIO_DREAMY);
BufferedInputStream inputStream = new BufferedInputStream(url.openStream());
FileOutputStream fileOS = new FileOutputStream("dreamy-audioFile.mp3");
byte data[] = new byte[1024];
int byteContent;
while ((byteContent = inputStream.read(data, 0, 1024)) != -1) {
fileOS.write(data, 0, byteContent);
}
FFmpegFrameGrabber audioGrabber = new FFmpegFrameGrabber("dreamy-audioFile.mp3");
audioGrabber.start();
FFmpegFrameRecorder audioRecorder = new FFmpegFrameRecorder("audio-output.mp3", audioGrabber.getAudioChannels());
audioRecorder.setSampleRate(audioGrabber.getSampleRate());
audioRecorder.start();
Frame audioFrame;
while ((audioFrame = audioGrabber.grab()) != null) {
audioRecorder.record(audioFrame);
}
audioRecorder.stop();
FFmpegFrameGrabber finalAudio = new FFmpegFrameGrabber("audio-output.mp3");
finalAudio.start();
FFmpegFrameRecorder recorder = new FFmpegFrameRecorder("video-output.mp4", 1280, 720);
recorder.setVideoCodec(avcodec.AV_CODEC_ID_H264);
recorder.setAudioCodec(avcodec.AV_CODEC_ID_AAC);
recorder.setSampleRate(finalAudio.getSampleRate());
recorder.setAudioChannels(finalAudio.getAudioChannels());
recorder.setFormat("mp4");
recorder.start();
Frame video1Frame;
recorder.setFrameRate(video1Grabber.getFrameRate());
while((video1Frame = video1Grabber.grabFrame())!= null) {
recorder.record(video1Frame);
}
video1Grabber.stop();
Frame video2Frame;
recorder.setFrameRate(video2Grabber.getFrameRate());
while((video2Frame = video2Grabber.grabFrame())!= null) {
recorder.record(video2Frame);
}
video2Grabber.stop();
finalAudio.stop();
recorder.stop();
fileOS.close();
fileOS.flush();

Related

Maintain orientation when resizing image with opencv

FYI, I'm new to OpenCV. I'm creating an AWS Lambda function that will utilize OpenCV 2.4 to resize images that I have in S3. S3 generates an event that launches my Lambda function when an image is uploaded to my source bucket. The lambda function successfully resizes the images and stores the resized image to the target bucket with the same object key as the original image. I have test images that where taken with a digital camera and some from an Android phone. What I've noticed is if I have an image from the phone that was taken in portrait the image in the source bucket is oriented correctly but the resized version in the target bucket is orientated incorrectly. Images taken in portrait orientation are converted to landscape. How do I maintain the orientation when resizing images. Below is the code I'm using.
public Object handleRequest(S3Event input, Context context) {
OpenCV.loadLibrary();
for (S3EventNotificationRecord record : input.getRecords()) {
String bucketName = record.getS3().getBucket().getName();
S3ObjectEntity entity = record.getS3().getObject();
String objectKey = entity.getUrlDecodedKey();
context.getLogger().log("ObjectKey: " + objectKey);
String fileExtension = objectKey.substring(objectKey.lastIndexOf("."));
context.getLogger().log("file extension: " + fileExtension);
S3Object object = s3Client.getObject(new GetObjectRequest(bucketName, objectKey));
InputStream objectData = object.getObjectContent();
ObjectMetadata objectMetadata = object.getObjectMetadata();
// Process the objectData stream.
int nRead;
byte[] data = new byte[16 * 1024];
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
try {
while ((nRead = objectData.read(data, 0, data.length)) != -1) {
buffer.write(data, 0, nRead);
}
byte[] bytes = buffer.toByteArray();
Mat srcImage = Highgui.imdecode(new MatOfByte(bytes), Highgui.CV_LOAD_IMAGE_UNCHANGED);
Mat resizedImage = new Mat();
Imgproc.resize(srcImage, resizedImage, new Size(0,0), 0.1, 0.1, Imgproc.INTER_AREA);
MatOfByte resizedMatOfByte = new MatOfByte();
Highgui.imencode(fileExtension, resizedImage, resizedMatOfByte);
byte[] bytesToWrite = resizedMatOfByte.toArray();
ObjectMetadata meta = new ObjectMetadata();
meta.setContentLength(bytesToWrite.length);
meta.setContentType(objectMetadata.getContentType());
PutObjectRequest putRequest = new PutObjectRequest(Configuration.MINIMIZED_ARTIFACT_BUCKET, objectKey, new ByteArrayInputStream(bytesToWrite), meta);
s3Client.putObject(putRequest);
objectData.close();
} catch (IOException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
}
return null;
}

Save image from Elgato Game Capture using DirectShow

Im using a Elgato Game Capture HD60 to live prewview a GoPro Hero 5 in my application. Now i want to save the stream as a JPG in my folder. But i cant find out how to.
To bind the pin
DsROTEntry rot; //Used for remotely connecting to graph
IFilterGraph2 graph;
ICaptureGraphBuilder2 captureGraph;
IBaseFilter elgatoFilter;
IBaseFilter smartTeeFilter;
IBaseFilter videoRendererFilter;
Size videoSize;
private IPin GetPin(PinDirection pinDir, IBaseFilter filter)
{
IEnumPins epins;
int hr = filter.EnumPins(out epins);
if (hr < 0)
return null;
IntPtr fetched = Marshal.AllocCoTaskMem(4);
IPin[] pins = new IPin[1];
epins.Reset();
while (epins.Next(1, pins, fetched) == 0)
{
PinInfo pinfo;
pins[0].QueryPinInfo(out pinfo);
bool found = (pinfo.dir == pinDir);
DsUtils.FreePinInfo(pinfo);
if (found)
return pins[0];
}
return null;
}
private IPin GetPin(PinDirection pinDir, string name, IBaseFilter filter)
{
IEnumPins epins;
int hr = filter.EnumPins(out epins);
if (hr < 0)
return null;
IntPtr fetched = Marshal.AllocCoTaskMem(4);
IPin[] pins = new IPin[1];
epins.Reset();
while (epins.Next(1, pins, fetched) == 0)
{
PinInfo pinfo;
pins[0].QueryPinInfo(out pinfo);
bool found = (pinfo.dir == pinDir && pinfo.name == name);
DsUtils.FreePinInfo(pinfo);
if (found)
return pins[0];
}
return null;
}
And to start the stream
private void button1_Click(object sender, EventArgs e)
{
//Set the video size to use for capture and recording
videoSize = new Size(1280, 720);
//Initialize filter graph and capture graph
graph = (IFilterGraph2)new FilterGraph();
captureGraph = (ICaptureGraphBuilder2)new CaptureGraphBuilder2();
captureGraph.SetFiltergraph(graph);
rot = new DsROTEntry(graph);
//Create filter for Elgato
Guid elgatoGuid = new Guid("39F50F4C-99E1-464A-B6F9-D605B4FB5918");
Type comType = Type.GetTypeFromCLSID(elgatoGuid);
elgatoFilter = (IBaseFilter)Activator.CreateInstance(comType);
graph.AddFilter(elgatoFilter, "Elgato Video Capture Filter");
//Create smart tee filter, add to graph, connect Elgato's video out to smart tee in
smartTeeFilter = (IBaseFilter)new SmartTee();
graph.AddFilter(smartTeeFilter, "Smart Tee");
IPin outPin = GetPin(PinDirection.Output, "Video", elgatoFilter);
IPin inPin = GetPin(PinDirection.Input, smartTeeFilter);
graph.Connect(outPin, inPin);
//Create video renderer filter, add it to graph, connect smartTee Preview pin to video renderer's input pin
videoRendererFilter = (IBaseFilter)new VideoRenderer();
graph.AddFilter(videoRendererFilter, "Video Renderer");
outPin = GetPin(PinDirection.Output, "Preview", smartTeeFilter);
inPin = GetPin(PinDirection.Input, videoRendererFilter);
graph.Connect(outPin, inPin);
//Render stream from video renderer
captureGraph.RenderStream(PinCategory.Preview, MediaType.Video, videoRendererFilter, null, null);
//Set the video preview to be the videoFeed panel
IVideoWindow vw = (IVideoWindow)graph;
vw.put_Owner(pictureBox1.Handle);
vw.put_MessageDrain(this.Handle);
vw.put_WindowStyle(WindowStyle.Child | WindowStyle.ClipSiblings | WindowStyle.ClipChildren);
vw.SetWindowPosition(0, 0, 1280, 720);
//Start the preview
IMediaControl mediaControl = graph as IMediaControl;
mediaControl.Run();
}
Can you run the filter graph successfully, and which step did you get error info?
You can get sample code \Samples\Capture\PlayCap to see how to build video capture filter graph.
If you want to get a video snapshot, you can get the sample code at \Samples\Capture\DxSnap.
You can modify the video source index and video snapshot size to get what you want.
const int VIDEODEVICE = 0; // zero based index of video capture device to use
const int VIDEOWIDTH = 2048; // Depends on video device caps
const int VIDEOHEIGHT = 1536; // Depends on video device caps
const int VIDEOBITSPERPIXEL = 24; // BitsPerPixel values determined by device

Insert an image into an existing pdf document using iText Sharp

I need to insert an image into an existing pdf at a specific location. I tried the answer at this question. But whatever different ways I do the image is being inserted at (0,0) position (bottom left corner). I tried another approach where instead of using stream I used Document class in iTextSharp as shown here. Now I am able to place the image at the desired position but this method is creating a new document with just this image. Most of the articles I searched are using PdfReader and PdfStamper so I think this is the recommended way. Any help is appreciated. Posting below code for both the methods I tried.
PdfStamper method
private void AddImage(string filePath)
{
string imageURL = #"ImagePath\Image.jpg";
using (Stream inputPdfStream = new FileStream(filePath, FileMode.Open, FileAccess.Read))
using (Stream inputImageStream = new FileStream(imageURL, FileMode.Open, FileAccess.Read))
using (Stream outputPdfStream = new FileStream(#"ResultingPdfPath\Abcd.pdf", FileMode.Create, FileAccess.ReadWrite))
{
Image image = Image.GetInstance(inputImageStream);
image.ScaleToFit(100, 100);
var reader = new PdfReader(inputPdfStream);
var stamper = new PdfStamper(reader, outputPdfStream);
PdfContentByte content = stamper.GetUnderContent(1);
image.SetAbsolutePosition(100f, 150f);
content.AddImage(image);
stamper.Close();
reader.Close();
}
}
Document class method
private void TestMessage(string filePath)
{
string imageURL = #"ImagePath\Image.jpg";
Document doc = new Document(PageSize.A4);
PdfWriter writer = PdfWriter.GetInstance(doc, new FileStream(filePath, FileMode.Open));
doc.Open();
iTextSharp.text.Image jpg = iTextSharp.text.Image.GetInstance(imageURL);
jpg.ScaleToFit(140f, 120f);
jpg.SetAbsolutePosition(100, 100);
jpg.SpacingBefore = 10f;
jpg.SpacingAfter = 1f;
jpg.Alignment = Element.ALIGN_LEFT;
doc.Add(jpg);
doc.Close();
}
Let me know if you need further information.
I adapted your method to accept variable out paths and positions and tested it with iTextSharp 5.5.7 like this:
[TestFixture]
class TestInsertImage
{
/// iText stamp image on top not always working
/// http://stackoverflow.com/questions/33898280/itext-stamp-image-on-top-not-always-working
///
[Test]
public void AddStampToTestPdf()
{
Directory.CreateDirectory(#"C:\Temp\test-results\content\");
AddImage(#"d:\Issues\stackoverflow\iText stamp image on top not always working\Multipage.pdf", #"C:\Temp\test-results\content\Multipage-stamp-Image-100-150.pdf", 100f, 150f);
AddImage(#"d:\Issues\stackoverflow\iText stamp image on top not always working\Multipage.pdf", #"C:\Temp\test-results\content\Multipage-stamp-Image-150-100.pdf", 150f, 100f);
}
private void AddImage(string filePath, string outPath, float x, float y)
{
string imageURL = #"c:\Repo\GitHub\testarea\itext5\src\test\resources\mkl\testarea\itext5\layer\Willi-1.jpg";
using (Stream inputPdfStream = new FileStream(filePath, FileMode.Open, FileAccess.Read))
using (Stream inputImageStream = new FileStream(imageURL, FileMode.Open, FileAccess.Read))
using (Stream outputPdfStream = new FileStream(outPath, FileMode.Create, FileAccess.ReadWrite))
{
Image image = Image.GetInstance(inputImageStream);
image.ScaleToFit(100, 100);
var reader = new PdfReader(inputPdfStream);
var stamper = new PdfStamper(reader, outputPdfStream);
PdfContentByte content = stamper.GetUnderContent(1);
image.SetAbsolutePosition(x, y);
content.AddImage(image);
stamper.Close();
reader.Close();
}
}
}
The results are included below.
As you see, the positioning information clearly are respected, and the image is definitely not always at the bottom left corner.
If this indeed does not work for the OP, he is keeping information from us required to help him.
Multipage-stamp-Image-100-150.pdf
Created using
AddImage(#"d:\Issues\stackoverflow\iText stamp image on top not always working\Multipage.pdf", #"C:\Temp\test-results\content\Multipage-stamp-Image-100-150.pdf", 100f, 150f);
Multipage-stamp-Image-150-100.pdf
Created using:
AddImage(#"d:\Issues\stackoverflow\iText stamp image on top not always working\Multipage.pdf", #"C:\Temp\test-results\content\Multipage-stamp-Image-150-100.pdf", 150f, 100f);

Windows Phone - update live tile from background agent with custom image

I am trying to add cloud image to album cover if the cover is loaded from internet. I am trying to do this in Background Audio agent and I think I almost got it. The problem is that I have black image in tile. Few times when testing I got cover image with my cloud image in it but mostly I get black image (and sometimes black image with cloud in it).
Can anyone help me find the problem? Thanks
private void UpdateAppTile()
{
var apptile = ShellTile.ActiveTiles.First();
if (apptile != null && _playList != null && _playList.Any())
{
var track = _playList[currentTrackNumber];
var size = 360;
Uri coverUrl;
if (track.AlbumArt.OriginalString.StartsWith("http"))
{
BitmapImage img = null;
using (AutoResetEvent are = new AutoResetEvent(false))
{
string filename = Path.GetFileNameWithoutExtension(track.AlbumArt.OriginalString);
var urlToNewCover = String.Format("http://.../{0}/{1}", filename, size);
coverUrl = new Uri(urlToNewCover, UriKind.Absolute);
Deployment.Current.Dispatcher.BeginInvoke(() =>
{
img = new BitmapImage(coverUrl);
are.Set();
});
are.WaitOne();
var wbmp = CreateTileImageWithCloud(img);
SaveTileImage(wbmp, "/shared/shellcontent/test.jpg");
coverUrl = new Uri("isostore:/shared/shellcontent/test.jpg", UriKind.RelativeOrAbsolute);
}
}
else
{
var coverId = track.Tag.Split(',')[1];
var urlToNewCover = String.Format("http://.../{0}/{1}", coverId, size);
coverUrl = new Uri(urlToNewCover, UriKind.Absolute);
}
var appTileData = new FlipTileData
{
BackgroundImage = coverUrl,
WideBackgroundImage = coverUrl,
...
}
apptile.Update(appTileData);
}
}
public static BitmapImage LoadBitmap(string iFilename)
{
Uri imgUri = new Uri(iFilename, UriKind.Relative);
StreamResourceInfo imageResource = Application.GetResourceStream(imgUri);
BitmapImage image = new BitmapImage();
image.SetSource(imageResource.Stream);
return image;
}
private void SaveTileImage(WriteableBitmap wbmp, string filename)
{
using (var store = IsolatedStorageFile.GetUserStoreForApplication())
{
if (store.FileExists(filename))
store.DeleteFile(filename);
var stream = store.OpenFile(filename, FileMode.OpenOrCreate);
wbmp.SaveJpeg(stream, wbmp.PixelWidth, wbmp.PixelHeight, 100, 100);
stream.Close();
}
}
private WriteableBitmap CreateTileImageWithCloud(BitmapImage img)
{
Image image = null;
WriteableBitmap wbmp = null;
using (AutoResetEvent are = new AutoResetEvent(false))
{
Deployment.Current.Dispatcher.BeginInvoke(() =>
{
image = new Image { Source = img };
Canvas.SetLeft(image, 0);
Canvas.SetTop(image, 0);
var cloud = new BitmapImage(new Uri("Assets/Images/Other/Cloud_no.png", UriKind.Relative));
var cloudImg = new Image { Source = cloud };
Canvas.SetLeft(cloudImg, 125);
Canvas.SetTop(cloudImg, 10);
var canvas = new Canvas
{
Height = 176,
Width = 176
};
canvas.Children.Add(image);
canvas.Children.Add(cloudImg);
wbmp = new WriteableBitmap(176, 176);
wbmp.Render(canvas, null);
wbmp.Invalidate();
are.Set();
});
are.WaitOne();
}
return wbmp;
}
Edit
I found little pattern in which this is working and in which not. When application is running and I called this twice (in TrackReady and SkipNext) then I very often get cover image with cloud. When I am running just background agent (without running app) I get always black image. And often first UpdateAppTile call is just black image and second it's black image with cloud. That black color is default canvas background so I guess I have problems with delay when loading cover image from url. But I am not sure how in my case use ImageOpened event and if it help.
I think that you should call Measure and Arrange after adding elements to canvas and before rendering canvas (as with other UIElements):
canvas.Measure( new Size( Width, Height ) );
canvas.Arrange( new Rect( 0, 0, Width, Height ) );

Is it possible to record sound played on the sound card?

Is it even remotely possible to record sound that is being played on the sound card?
Supposedly, I am playing an audio file, what I need is to redirect the output to the disk. DirectShow or might be feasible.
Any help is greatly appreciated, thanks.
You need to enable audio loopback device, and you will be able to record from in a stadnard way with all the well-known APIs (including DirectShow).
Loopback Recording
Enabling "Stereo Mix" in Vista
Capturing Window's audio in C#
Once enabled, you will see the device on DirectShow apps:
Check out NAudio and this thread for a WasapiLoopbackCapture class that takes the output from your soundcard and turns it into a wavein device you can record or whatever...
https://naudio.codeplex.com/discussions/203605/
My Solution C# NAUDIO Library v1.9.0
waveInStream = new WasapiLoopbackCapture(); //record sound card.
waveInStream.DataAvailable += new EventHandler<WaveInEventArgs>(this.OnDataAvailable); // sound data event
waveInStream.RecordingStopped += new EventHandler<StoppedEventArgs>(this.OnDataStopped); // record stopped event
MessageBox.Show(waveInStream.WaveFormat.Encoding + " - " + waveInStream.WaveFormat.SampleRate +" - " + waveInStream.WaveFormat.BitsPerSample + " - " + waveInStream.WaveFormat.Channels);
//IEEEFLOAT - 48000 - 32 - 2
//Explanation: IEEEFLOAT = Encoding | 48000 Hz | 32 bit | 2 = STEREO and 1 = mono
writer = new WaveFileWriter("out.wav", new WaveFormat(48000, 24, 2));
waveInStream.StartRecording(); // record start
Events
WaveFormat myOutFormat = new WaveFormat(48000, 24, 2); // --> Encoding PCM standard.
private void OnDataAvailable(object sender, WaveInEventArgs e)
{
//standard e.Buffer encoding = IEEEFLOAT
//MessageBox.Show(e.Buffer + " - " + e.BytesRecorded);
//if you needed change for encoding. FOLLOW WaveFormatConvert ->
byte[] output = WaveFormatConvert(e.Buffer, e.BytesRecorded, waveInStream.WaveFormat, myOutFormat);
writer.Write(output, 0, output.Length);
}
private void OnDataStopped(object sender, StoppedEventArgs e)
{
if (writer != null)
{
writer.Close();
}
if (waveInStream != null)
{
waveInStream.Dispose();
}
}
WaveFormatConvert -> Optional
public byte[] WaveFormatConvert(byte[] input, int length, WaveFormat inFormat, WaveFormat outFormat)
{
if (length == 0)
return new byte[0];
using (var memStream = new MemoryStream(input, 0, length))
{
using (var inputStream = new RawSourceWaveStream(memStream, inFormat))
{
using (var resampler = new MediaFoundationResampler(inputStream, outFormat)) {
resampler.ResamplerQuality = 60; // 1 low - 60 max high
//CONVERTED READ STREAM
byte[] buffer = new byte[length];
using (var stream = new MemoryStream())
{
int read;
while ((read = resampler.Read(buffer, 0, length)) > 0)
{
stream.Write(buffer, 0, read);
}
return stream.ToArray();
}
}
}
}
}

Resources