Save image from Elgato Game Capture using DirectShow - image

Im using a Elgato Game Capture HD60 to live prewview a GoPro Hero 5 in my application. Now i want to save the stream as a JPG in my folder. But i cant find out how to.
To bind the pin
DsROTEntry rot; //Used for remotely connecting to graph
IFilterGraph2 graph;
ICaptureGraphBuilder2 captureGraph;
IBaseFilter elgatoFilter;
IBaseFilter smartTeeFilter;
IBaseFilter videoRendererFilter;
Size videoSize;
private IPin GetPin(PinDirection pinDir, IBaseFilter filter)
{
IEnumPins epins;
int hr = filter.EnumPins(out epins);
if (hr < 0)
return null;
IntPtr fetched = Marshal.AllocCoTaskMem(4);
IPin[] pins = new IPin[1];
epins.Reset();
while (epins.Next(1, pins, fetched) == 0)
{
PinInfo pinfo;
pins[0].QueryPinInfo(out pinfo);
bool found = (pinfo.dir == pinDir);
DsUtils.FreePinInfo(pinfo);
if (found)
return pins[0];
}
return null;
}
private IPin GetPin(PinDirection pinDir, string name, IBaseFilter filter)
{
IEnumPins epins;
int hr = filter.EnumPins(out epins);
if (hr < 0)
return null;
IntPtr fetched = Marshal.AllocCoTaskMem(4);
IPin[] pins = new IPin[1];
epins.Reset();
while (epins.Next(1, pins, fetched) == 0)
{
PinInfo pinfo;
pins[0].QueryPinInfo(out pinfo);
bool found = (pinfo.dir == pinDir && pinfo.name == name);
DsUtils.FreePinInfo(pinfo);
if (found)
return pins[0];
}
return null;
}
And to start the stream
private void button1_Click(object sender, EventArgs e)
{
//Set the video size to use for capture and recording
videoSize = new Size(1280, 720);
//Initialize filter graph and capture graph
graph = (IFilterGraph2)new FilterGraph();
captureGraph = (ICaptureGraphBuilder2)new CaptureGraphBuilder2();
captureGraph.SetFiltergraph(graph);
rot = new DsROTEntry(graph);
//Create filter for Elgato
Guid elgatoGuid = new Guid("39F50F4C-99E1-464A-B6F9-D605B4FB5918");
Type comType = Type.GetTypeFromCLSID(elgatoGuid);
elgatoFilter = (IBaseFilter)Activator.CreateInstance(comType);
graph.AddFilter(elgatoFilter, "Elgato Video Capture Filter");
//Create smart tee filter, add to graph, connect Elgato's video out to smart tee in
smartTeeFilter = (IBaseFilter)new SmartTee();
graph.AddFilter(smartTeeFilter, "Smart Tee");
IPin outPin = GetPin(PinDirection.Output, "Video", elgatoFilter);
IPin inPin = GetPin(PinDirection.Input, smartTeeFilter);
graph.Connect(outPin, inPin);
//Create video renderer filter, add it to graph, connect smartTee Preview pin to video renderer's input pin
videoRendererFilter = (IBaseFilter)new VideoRenderer();
graph.AddFilter(videoRendererFilter, "Video Renderer");
outPin = GetPin(PinDirection.Output, "Preview", smartTeeFilter);
inPin = GetPin(PinDirection.Input, videoRendererFilter);
graph.Connect(outPin, inPin);
//Render stream from video renderer
captureGraph.RenderStream(PinCategory.Preview, MediaType.Video, videoRendererFilter, null, null);
//Set the video preview to be the videoFeed panel
IVideoWindow vw = (IVideoWindow)graph;
vw.put_Owner(pictureBox1.Handle);
vw.put_MessageDrain(this.Handle);
vw.put_WindowStyle(WindowStyle.Child | WindowStyle.ClipSiblings | WindowStyle.ClipChildren);
vw.SetWindowPosition(0, 0, 1280, 720);
//Start the preview
IMediaControl mediaControl = graph as IMediaControl;
mediaControl.Run();
}

Can you run the filter graph successfully, and which step did you get error info?
You can get sample code \Samples\Capture\PlayCap to see how to build video capture filter graph.
If you want to get a video snapshot, you can get the sample code at \Samples\Capture\DxSnap.
You can modify the video source index and video snapshot size to get what you want.
const int VIDEODEVICE = 0; // zero based index of video capture device to use
const int VIDEOWIDTH = 2048; // Depends on video device caps
const int VIDEOHEIGHT = 1536; // Depends on video device caps
const int VIDEOBITSPERPIXEL = 24; // BitsPerPixel values determined by device

Related

Merge/Concatenate two videos with different frame rates using FFmpegFrameGrabber & FFmpegFrameRecorder javacv in Spring Boot

I am trying to merge/concatenate 2 videos. One of the videos has a frame rate of 0.25fps and the other has a frame rate of 30fps. When I merge the two videos using FFmpegFrameGrabber & FFmpegFrameRecorder javacv libraries, the second video becomes slowed down and looks like a slow motion video and the length of the second video increases drastically. I was hoping to know what I need to do in order to keep the videos in their respective frame rates and merge them both without altering none of them. I have attached the code below for reference:
FFmpegFrameGrabber video1Grabber = new FFmpegFrameGrabber("video-test1.mp4");
video1Grabber.start();
FFmpegFrameGrabber video2Grabber = new FFmpegFrameGrabber("video-test2.mp4");
video2Grabber.start();
URL url = new URL(S3Urls.AUDIO_DREAMY);
BufferedInputStream inputStream = new BufferedInputStream(url.openStream());
FileOutputStream fileOS = new FileOutputStream("dreamy-audioFile.mp3");
byte data[] = new byte[1024];
int byteContent;
while ((byteContent = inputStream.read(data, 0, 1024)) != -1) {
fileOS.write(data, 0, byteContent);
}
FFmpegFrameGrabber audioGrabber = new FFmpegFrameGrabber("dreamy-audioFile.mp3");
audioGrabber.start();
FFmpegFrameRecorder audioRecorder = new FFmpegFrameRecorder("audio-output.mp3", audioGrabber.getAudioChannels());
audioRecorder.setSampleRate(audioGrabber.getSampleRate());
audioRecorder.start();
Frame audioFrame;
while ((audioFrame = audioGrabber.grab()) != null) {
audioRecorder.record(audioFrame);
}
audioRecorder.stop();
FFmpegFrameGrabber finalAudio = new FFmpegFrameGrabber("audio-output.mp3");
finalAudio.start();
FFmpegFrameRecorder recorder = new FFmpegFrameRecorder("video-output.mp4", 1280, 720);
recorder.setVideoCodec(avcodec.AV_CODEC_ID_H264);
recorder.setAudioCodec(avcodec.AV_CODEC_ID_AAC);
recorder.setSampleRate(finalAudio.getSampleRate());
recorder.setAudioChannels(finalAudio.getAudioChannels());
recorder.setFormat("mp4");
recorder.start();
Frame video1Frame;
recorder.setFrameRate(video1Grabber.getFrameRate());
while((video1Frame = video1Grabber.grabFrame())!= null) {
recorder.record(video1Frame);
}
video1Grabber.stop();
Frame video2Frame;
recorder.setFrameRate(video2Grabber.getFrameRate());
while((video2Frame = video2Grabber.grabFrame())!= null) {
recorder.record(video2Frame);
}
video2Grabber.stop();
finalAudio.stop();
recorder.stop();
fileOS.close();
fileOS.flush();

BufferedSoundStream can't play wav files

I'm making an app for my drum classes and to make it cross-platform I've chosen Urho.Sharp, because it has low level Sound API as well as rich graphics capabilities.
As a first step I'm making a metronome app and for that I'm working with BufferedSoundStream adding here audio and then needed silence, as described here: https://github.com/xamarin/urho-samples/blob/master/FeatureSamples/Core/29_SoundSynthesis/SoundSynthesis.cs
But the resulting sound is not a sound at all, like random bits got into buffered stream.
This is my code:
///
/// this code initialize sound subsystem
///
void CreateSound()
{
// Sound source needs a node so that it is considered enabled
node = new Node();
SoundSource source = node.CreateComponent<SoundSource>();
soundStream = new BufferedSoundStream();
// Set format: 44100 Hz, sixteen bit, stereo
soundStream.SetFormat(44100, true, true);
// Start playback. We don't have data in the stream yet, but the
SoundSource will wait until there is data,
// as the stream is by default in the "don't stop at end" mode
source.Play(soundStream);
}
///
/// this code preload all sound resources
///
readonly Dictionary<PointSoundType, string> SoundsMapping = new Dictionary<PointSoundType, string>
{
{PointSoundType.beat, "wav/beat.wav"},
{PointSoundType.click, "wav/click.wav"},
{PointSoundType.click_accent, "wav/click_accent.wav"},
{PointSoundType.crash, "wav/crash.wav"},
{PointSoundType.foot_hh, "wav/foot_hh.wav"},
{PointSoundType.hh, "wav/hh.wav"},
{PointSoundType.open_hh, "wav/open_hh.wav"},
{PointSoundType.ride, "wav/ride.wav"},
{PointSoundType.snare, "wav/snare.wav"},
{PointSoundType.tom_1, "wav/tom_1.wav"},
{PointSoundType.tom_2, "wav/tom_2.wav"},
};
Dictionary<PointSoundType, Sound> SoundCache = new Dictionary<PointSoundType, Sound>();
private void LoadSoundResources()
{
// preload all sounds
foreach (var s in SoundsMapping)
{
SoundCache[s.Key] = ResourceCache.GetSound(s.Value);
Debug.WriteLine("resource loaded: " + s.Value + ", length = " + SoundCache[s.Key].Length);
}
}
///
/// this code fill up the stream with audio
///
private void UpdateSound()
{
// Try to keep 1/10 seconds of sound in the buffer, to avoid both dropouts and unnecessary latency
//float targetLength = 1.0f / 10.0f;
// temporary increase buffer to 1s
float targetLength = 1.0f;
float requiredLength = targetLength - soundStream.BufferLength;
if (requiredLength < 0.0f)
return;
uint numSamples = (uint)(soundStream.Frequency * requiredLength);
// check if stream is still full
if (numSamples == 0)
return;
var silencePause = new short[44100];
// iterate and play all sounds
SoundCache.All(s =>
{
soundStream.AddData(s.Value.Handle, s.Value.DataSize);
// add silencio
soundStream.AddData(silencePause, 0, silencePause.Length);
return true;
});
}
Make sure your wav files are in the resource cache. Then don't play the BufferedSoundStream, but the Urho.Audio.Sound sound. This is just a different override of the same method Urho.Audio.SoundSource.Play(), but it works.
int PlaySound(string sSound)
{
var cache = Application.Current.ResourceCache;
Urho.Audio.Sound sound = cache.GetSound(sSound);
if (sound != null)
{
Node soundNode = scene.CreateChild("Sound");
Urho.Audio.SoundSource soundSource = soundNode.CreateComponent<Urho.Audio.SoundSource>();
soundSource.Play(sound);
soundSource.Gain = 0.99f;
return 1;
}
return 0;
}
Since you're using urhosamples, you can start each drum sample from the override update something like this:
public float fRun = 0.0f;
public int iRet = 0; // keep counting the played sounds
public override void OnUpdate(float timeStep)
{
fRun = fRun + timeStep;
int iMS = (int)(10f * fRun); // tenth of seconds
if (iMS == 100) iRet = iRet + PlaySound("wav/hh.wav");
if (iMS == 120) iRet = iRet + PlaySound("wav/hh.wav");
if (iMS == 140) iRet = iRet + PlaySound("wav/hh.wav");
if (iMS == 160) iRet = iRet + PlaySound("wav/open_hh.wav");
if (iMS >= 160) fRun = 0.8f;
}

Windows 10 Store App - save URI as file

I am writing a Windows 10 Store app. In the app the User can input a Text, and then press "Read Text" and Cortana reads the text loud. That works fine.
Now I want to add the feature, to press a button called "Save" or something like that and then save Cortanas output as mp3 file. This should work via a normal save-file dialog.
This is what I got so far.
private static MediaElement mediaplayer = new MediaElement();
/// ... mediaplayer element gets content ...
Uri file = mediaplayer.Source;
Instead of an Uri element I could also get an SpeechSynthesisStream with this information.
How can I save this Uri / Stream to a file?
EDIT:
this is the final code:
var stream2 = stream.CloneStream();
//... use stream2 as mediaelement ...
if(stream != null)
{
using (var reader = new DataReader(stream))
{
FileSavePicker savePicker = new FileSavePicker();
savePicker.SuggestedStartLocation = PickerLocationId.PicturesLibrary;
savePicker.FileTypeChoices.Add("WAV", new List<string>() { ".wav" });
savePicker.SuggestedFileName = "sound.wav";
StorageFile file = await savePicker.PickSaveFileAsync();
if (file != null)
{
using (var outputStream = await file.OpenAsync(FileAccessMode.ReadWrite))
{
using (var writer = new DataWriter(outputStream.GetOutputStreamAt(0)))
{
long writtenBytes = 0;
const int bufferSize = 8192;
uint loadedBytes = 0;
while ((loadedBytes = (await reader.LoadAsync(bufferSize))) > 0) //!!!
{
IBuffer buffer = reader.ReadBuffer(loadedBytes);
writer.WriteBuffer(buffer);
uint tmpWritten = await writer.StoreAsync(); //!!!
writtenBytes += tmpWritten;
}
}
}
}
}
}
If you're trying to write the output to a file instead (or as well as) rendering it audibly to a MediaElement, you probably want something like this in here as well.
SpeechSynthesisStream synthesisStream = await synthesizer.SynthesizeTextToStreamAsync(text);
var stream2 = synthesisStream.CloneStream();
FileSavePicker savePicker = new FileSavePicker();
savePicker.SuggestedStartLocation = PickerLocationId.MusicLibrary;
savePicker.FileTypeChoices.Add("WAV", new List<string>() { ".wav" });
savePicker.SuggestedFileName = "sound.wav";
StorageFile file = await savePicker.PickSaveFileAsync();
using (var reader = new DataReader(synthesisStream))
{
using (var outputStream = await file.OpenAsync(FileAccessMode.ReadWrite))
{
using (var writer = new DataWriter(outputStream.GetOutputStreamAt(0)))
{
long writtenBytes = 0;
const int bufferSize = 8192;
uint loadedBytes = 0;
while ((loadedBytes = (await reader.LoadAsync(bufferSize))) > 0) //!!!
{
IBuffer buffer = reader.ReadBuffer(loadedBytes);
writer.WriteBuffer(buffer);
uint tmpWritten = await writer.StoreAsync(); //!!!
writtenBytes += tmpWritten;
}
}
}
}
// Set the source and start playing the synthesized audio stream.
media.AutoPlay = true;
media.SetSource(stream2, synthesisStream.ContentType);
media.Play();
The one problem is that the synthesisStream isn't rewindable (so far as I can tell), so you might have to synthesize it twice, or make a second (in memory) copy of the stream if you want to make it audible at the same time.

show image in picture gallery in windows phone 7

I have an image app in wp7.
class Images
{
public string Title {get;set;}
public string Path {get;set;}
}
on page level, i bind title and path(relative to my app) it to a list.
What i need is, when user click on list item the respective image open in picture gallery of windows phone 7.
You should clarify your question, but I suppose Path is the location of your image in isolated storage. Providing that Image is the name of your Image in xaml
img.Source = GetImage(LoadIfExists(image.Path));
LoadIfExists returns the binary data for a file in Isolated Storage, and GetImage returns it as a WriteableBitmap :
public static WriteableBitmap GetImage(byte[] buffer)
{
int width = buffer[0] * 256 + buffer[1];
int height = buffer[2] * 256 + buffer[3];
long matrixSize = width * height;
WriteableBitmap retVal = new WriteableBitmap(width, height);
int bufferPos = 4;
for (int matrixPos = 0; matrixPos < matrixSize; matrixPos++)
{
int pixel = buffer[bufferPos++];
pixel = pixel << 8 | buffer[bufferPos++];
pixel = pixel << 8 | buffer[bufferPos++];
pixel = pixel << 8 | buffer[bufferPos++];
retVal.Pixels[matrixPos] = pixel;
}
return retVal;
}
public static byte[] LoadIfExists(string fileName)
{
byte[] retVal;
using (IsolatedStorageFile iso = IsolatedStorageFile.GetUserStoreForApplication())
{
if (iso.FileExists(fileName))
{
using (IsolatedStorageFileStream stream = iso.OpenFile(fileName, FileMode.Open))
{
retVal = new byte[stream.Length];
stream.Read(retVal, 0, retVal.Length);
}
}
else
{
retVal = new byte[0];
}
}
return retVal;
}
If you want to write the image into the Picture Library, it's basically the same process, ending by calling SavePictureToCameraRoll() of MediaLibrary as explained on this MSDN Article

Is it possible to record sound played on the sound card?

Is it even remotely possible to record sound that is being played on the sound card?
Supposedly, I am playing an audio file, what I need is to redirect the output to the disk. DirectShow or might be feasible.
Any help is greatly appreciated, thanks.
You need to enable audio loopback device, and you will be able to record from in a stadnard way with all the well-known APIs (including DirectShow).
Loopback Recording
Enabling "Stereo Mix" in Vista
Capturing Window's audio in C#
Once enabled, you will see the device on DirectShow apps:
Check out NAudio and this thread for a WasapiLoopbackCapture class that takes the output from your soundcard and turns it into a wavein device you can record or whatever...
https://naudio.codeplex.com/discussions/203605/
My Solution C# NAUDIO Library v1.9.0
waveInStream = new WasapiLoopbackCapture(); //record sound card.
waveInStream.DataAvailable += new EventHandler<WaveInEventArgs>(this.OnDataAvailable); // sound data event
waveInStream.RecordingStopped += new EventHandler<StoppedEventArgs>(this.OnDataStopped); // record stopped event
MessageBox.Show(waveInStream.WaveFormat.Encoding + " - " + waveInStream.WaveFormat.SampleRate +" - " + waveInStream.WaveFormat.BitsPerSample + " - " + waveInStream.WaveFormat.Channels);
//IEEEFLOAT - 48000 - 32 - 2
//Explanation: IEEEFLOAT = Encoding | 48000 Hz | 32 bit | 2 = STEREO and 1 = mono
writer = new WaveFileWriter("out.wav", new WaveFormat(48000, 24, 2));
waveInStream.StartRecording(); // record start
Events
WaveFormat myOutFormat = new WaveFormat(48000, 24, 2); // --> Encoding PCM standard.
private void OnDataAvailable(object sender, WaveInEventArgs e)
{
//standard e.Buffer encoding = IEEEFLOAT
//MessageBox.Show(e.Buffer + " - " + e.BytesRecorded);
//if you needed change for encoding. FOLLOW WaveFormatConvert ->
byte[] output = WaveFormatConvert(e.Buffer, e.BytesRecorded, waveInStream.WaveFormat, myOutFormat);
writer.Write(output, 0, output.Length);
}
private void OnDataStopped(object sender, StoppedEventArgs e)
{
if (writer != null)
{
writer.Close();
}
if (waveInStream != null)
{
waveInStream.Dispose();
}
}
WaveFormatConvert -> Optional
public byte[] WaveFormatConvert(byte[] input, int length, WaveFormat inFormat, WaveFormat outFormat)
{
if (length == 0)
return new byte[0];
using (var memStream = new MemoryStream(input, 0, length))
{
using (var inputStream = new RawSourceWaveStream(memStream, inFormat))
{
using (var resampler = new MediaFoundationResampler(inputStream, outFormat)) {
resampler.ResamplerQuality = 60; // 1 low - 60 max high
//CONVERTED READ STREAM
byte[] buffer = new byte[length];
using (var stream = new MemoryStream())
{
int read;
while ((read = resampler.Read(buffer, 0, length)) > 0)
{
stream.Write(buffer, 0, read);
}
return stream.ToArray();
}
}
}
}
}

Resources