getting the amplitude of the microphone in windows phone - windows-phone-7

I want to get the real time amplitude of the microphone input in windows phone. What is the simplest and efficient way to achieve this ?

To get the amplitude, you will have to handle the BufferReady event of the Microphone class:
http://msdn.microsoft.com/en-us/library/windowsphone/develop/gg442302(v=vs.105).aspx
Setup code
Microphone microphone = Microphone.Default;
microphone.BufferReady += new EventHandler<EventArgs>(microphone_BufferReady);
microphone.BufferDuration = TimeSpan.FromMilliseconds(1000);
byte[] buffer;
buffer = new byte[microphone.GetSampleSizeInBytes(microphone.BufferDuration)];
microphone.Start();
Event handler block
void microphone_BufferReady(object sender, EventArgs e)
{
microphone.GetData(buffer);
for(int i = 0; i< buffer.Length; i+=2)
{
//The value of sample is the amplitude of the signal
short sample = BitConverter.ToInt16(new byte[2] { buffer[i], buffer[i + 1] }, 0);
}
}

Related

Save image from Elgato Game Capture using DirectShow

Im using a Elgato Game Capture HD60 to live prewview a GoPro Hero 5 in my application. Now i want to save the stream as a JPG in my folder. But i cant find out how to.
To bind the pin
DsROTEntry rot; //Used for remotely connecting to graph
IFilterGraph2 graph;
ICaptureGraphBuilder2 captureGraph;
IBaseFilter elgatoFilter;
IBaseFilter smartTeeFilter;
IBaseFilter videoRendererFilter;
Size videoSize;
private IPin GetPin(PinDirection pinDir, IBaseFilter filter)
{
IEnumPins epins;
int hr = filter.EnumPins(out epins);
if (hr < 0)
return null;
IntPtr fetched = Marshal.AllocCoTaskMem(4);
IPin[] pins = new IPin[1];
epins.Reset();
while (epins.Next(1, pins, fetched) == 0)
{
PinInfo pinfo;
pins[0].QueryPinInfo(out pinfo);
bool found = (pinfo.dir == pinDir);
DsUtils.FreePinInfo(pinfo);
if (found)
return pins[0];
}
return null;
}
private IPin GetPin(PinDirection pinDir, string name, IBaseFilter filter)
{
IEnumPins epins;
int hr = filter.EnumPins(out epins);
if (hr < 0)
return null;
IntPtr fetched = Marshal.AllocCoTaskMem(4);
IPin[] pins = new IPin[1];
epins.Reset();
while (epins.Next(1, pins, fetched) == 0)
{
PinInfo pinfo;
pins[0].QueryPinInfo(out pinfo);
bool found = (pinfo.dir == pinDir && pinfo.name == name);
DsUtils.FreePinInfo(pinfo);
if (found)
return pins[0];
}
return null;
}
And to start the stream
private void button1_Click(object sender, EventArgs e)
{
//Set the video size to use for capture and recording
videoSize = new Size(1280, 720);
//Initialize filter graph and capture graph
graph = (IFilterGraph2)new FilterGraph();
captureGraph = (ICaptureGraphBuilder2)new CaptureGraphBuilder2();
captureGraph.SetFiltergraph(graph);
rot = new DsROTEntry(graph);
//Create filter for Elgato
Guid elgatoGuid = new Guid("39F50F4C-99E1-464A-B6F9-D605B4FB5918");
Type comType = Type.GetTypeFromCLSID(elgatoGuid);
elgatoFilter = (IBaseFilter)Activator.CreateInstance(comType);
graph.AddFilter(elgatoFilter, "Elgato Video Capture Filter");
//Create smart tee filter, add to graph, connect Elgato's video out to smart tee in
smartTeeFilter = (IBaseFilter)new SmartTee();
graph.AddFilter(smartTeeFilter, "Smart Tee");
IPin outPin = GetPin(PinDirection.Output, "Video", elgatoFilter);
IPin inPin = GetPin(PinDirection.Input, smartTeeFilter);
graph.Connect(outPin, inPin);
//Create video renderer filter, add it to graph, connect smartTee Preview pin to video renderer's input pin
videoRendererFilter = (IBaseFilter)new VideoRenderer();
graph.AddFilter(videoRendererFilter, "Video Renderer");
outPin = GetPin(PinDirection.Output, "Preview", smartTeeFilter);
inPin = GetPin(PinDirection.Input, videoRendererFilter);
graph.Connect(outPin, inPin);
//Render stream from video renderer
captureGraph.RenderStream(PinCategory.Preview, MediaType.Video, videoRendererFilter, null, null);
//Set the video preview to be the videoFeed panel
IVideoWindow vw = (IVideoWindow)graph;
vw.put_Owner(pictureBox1.Handle);
vw.put_MessageDrain(this.Handle);
vw.put_WindowStyle(WindowStyle.Child | WindowStyle.ClipSiblings | WindowStyle.ClipChildren);
vw.SetWindowPosition(0, 0, 1280, 720);
//Start the preview
IMediaControl mediaControl = graph as IMediaControl;
mediaControl.Run();
}
Can you run the filter graph successfully, and which step did you get error info?
You can get sample code \Samples\Capture\PlayCap to see how to build video capture filter graph.
If you want to get a video snapshot, you can get the sample code at \Samples\Capture\DxSnap.
You can modify the video source index and video snapshot size to get what you want.
const int VIDEODEVICE = 0; // zero based index of video capture device to use
const int VIDEOWIDTH = 2048; // Depends on video device caps
const int VIDEOHEIGHT = 1536; // Depends on video device caps
const int VIDEOBITSPERPIXEL = 24; // BitsPerPixel values determined by device

BufferedSoundStream can't play wav files

I'm making an app for my drum classes and to make it cross-platform I've chosen Urho.Sharp, because it has low level Sound API as well as rich graphics capabilities.
As a first step I'm making a metronome app and for that I'm working with BufferedSoundStream adding here audio and then needed silence, as described here: https://github.com/xamarin/urho-samples/blob/master/FeatureSamples/Core/29_SoundSynthesis/SoundSynthesis.cs
But the resulting sound is not a sound at all, like random bits got into buffered stream.
This is my code:
///
/// this code initialize sound subsystem
///
void CreateSound()
{
// Sound source needs a node so that it is considered enabled
node = new Node();
SoundSource source = node.CreateComponent<SoundSource>();
soundStream = new BufferedSoundStream();
// Set format: 44100 Hz, sixteen bit, stereo
soundStream.SetFormat(44100, true, true);
// Start playback. We don't have data in the stream yet, but the
SoundSource will wait until there is data,
// as the stream is by default in the "don't stop at end" mode
source.Play(soundStream);
}
///
/// this code preload all sound resources
///
readonly Dictionary<PointSoundType, string> SoundsMapping = new Dictionary<PointSoundType, string>
{
{PointSoundType.beat, "wav/beat.wav"},
{PointSoundType.click, "wav/click.wav"},
{PointSoundType.click_accent, "wav/click_accent.wav"},
{PointSoundType.crash, "wav/crash.wav"},
{PointSoundType.foot_hh, "wav/foot_hh.wav"},
{PointSoundType.hh, "wav/hh.wav"},
{PointSoundType.open_hh, "wav/open_hh.wav"},
{PointSoundType.ride, "wav/ride.wav"},
{PointSoundType.snare, "wav/snare.wav"},
{PointSoundType.tom_1, "wav/tom_1.wav"},
{PointSoundType.tom_2, "wav/tom_2.wav"},
};
Dictionary<PointSoundType, Sound> SoundCache = new Dictionary<PointSoundType, Sound>();
private void LoadSoundResources()
{
// preload all sounds
foreach (var s in SoundsMapping)
{
SoundCache[s.Key] = ResourceCache.GetSound(s.Value);
Debug.WriteLine("resource loaded: " + s.Value + ", length = " + SoundCache[s.Key].Length);
}
}
///
/// this code fill up the stream with audio
///
private void UpdateSound()
{
// Try to keep 1/10 seconds of sound in the buffer, to avoid both dropouts and unnecessary latency
//float targetLength = 1.0f / 10.0f;
// temporary increase buffer to 1s
float targetLength = 1.0f;
float requiredLength = targetLength - soundStream.BufferLength;
if (requiredLength < 0.0f)
return;
uint numSamples = (uint)(soundStream.Frequency * requiredLength);
// check if stream is still full
if (numSamples == 0)
return;
var silencePause = new short[44100];
// iterate and play all sounds
SoundCache.All(s =>
{
soundStream.AddData(s.Value.Handle, s.Value.DataSize);
// add silencio
soundStream.AddData(silencePause, 0, silencePause.Length);
return true;
});
}
Make sure your wav files are in the resource cache. Then don't play the BufferedSoundStream, but the Urho.Audio.Sound sound. This is just a different override of the same method Urho.Audio.SoundSource.Play(), but it works.
int PlaySound(string sSound)
{
var cache = Application.Current.ResourceCache;
Urho.Audio.Sound sound = cache.GetSound(sSound);
if (sound != null)
{
Node soundNode = scene.CreateChild("Sound");
Urho.Audio.SoundSource soundSource = soundNode.CreateComponent<Urho.Audio.SoundSource>();
soundSource.Play(sound);
soundSource.Gain = 0.99f;
return 1;
}
return 0;
}
Since you're using urhosamples, you can start each drum sample from the override update something like this:
public float fRun = 0.0f;
public int iRet = 0; // keep counting the played sounds
public override void OnUpdate(float timeStep)
{
fRun = fRun + timeStep;
int iMS = (int)(10f * fRun); // tenth of seconds
if (iMS == 100) iRet = iRet + PlaySound("wav/hh.wav");
if (iMS == 120) iRet = iRet + PlaySound("wav/hh.wav");
if (iMS == 140) iRet = iRet + PlaySound("wav/hh.wav");
if (iMS == 160) iRet = iRet + PlaySound("wav/open_hh.wav");
if (iMS >= 160) fRun = 0.8f;
}

C# Windows form. How to use the button_click method?

I am using a for loop to keep adding items to an array by keep pressing the button, i call it btnEnter, after input some data.
something like
double[] inputarr = new double[10];
for (int i = 0; i < inputarr.Length; i++)
{
inputarr[i] = Double.Parse(txtAmount.Text);
}
I want to jump out from the loop and perform something by clicking another button. Can button_click() do the job for me?
like
for (int i = 0; i < inputarr.Length; i++)
{
inputarr[i] = Double.Parse(txtAmount.Text);
if (btnStop_Click() == true)
{
break;
}
}
how to make this work? can anyone help me with this?
If I understand you correctly, you want to prompt for input 10 times in a row. You're thought process is a bit inverted. I think all you need is a prompt dialog. See Prompt Dialog in Windows Forms for an example.
You can call another button like this
for (int i = 0; i < inputarr.Length; i++)
{
inputarr[i] = Double.Parse(txtAmount.Text);
btnStop_Click(null,null);
break;
}
}
or u can use timer
int i = 0;Timer t = new Timer();
button_click(object sender,event e)
{
t.Interval = 4000;
t.Tick += t_Tick;
t.Start();
}
void t_Tick(object sender, EventArgs e)
{
if (i <= 9) { inputarr[i] = Double.Parse(txtAmount.Text); }
else { t.Stop(); Do other staff }
i++;
}

WriteableBitmap not invalidating and rendering in Windows Phone but working in silverlight5?

I am having a property which returns WriteableBitmap. when i am binding that property to the silverlight5 Image it showing the image, but when i am doing that in the WP it is not showing the image. when i am googling around this issue i saw that in WP the raw pixel values did not have alpha bits is set. But the same is working with silverlight. I don't know what is happening. Anybody have the similar issue or any round about?
(Imageproperty as WriteableBitmap).Invalidate();
<Image Source="{Binding Imageproperty}"/> (this is working in silverlight not in WP(7.1))
I had a similar sort of issue when trying to port some of the FaceLight code across from Silverlight to Windows Phone. The easiest way around this would be to manually set the Alpha value to 255 / opaque. So say if you had your WriteableBitmap that you wanted to display, result:
//convert to byte array
int stride = result.PixelWidth * 4; //brga32 is 32
int bytes = Math.Abs(stride) * result.PixelHeight;
byte[] data = result.ToByteArray();
int dataIndex = 0;
int nOffset = stride - result.PixelWidth * 4;
for (int y = 0; y < result.PixelHeight; ++y)
{
for (int x = 0; x < result.PixelWidth; ++x)
{
data[dataIndex + 3] = 0xFF; //set alpha to 255
dataIndex += 4; //skip to next pixel
}
dataIndex += nOffset;
}
WriteableBitmap finalImg = new WriteableBitmap(input.PixelWidth, input.PixelHeight);
return finalImg.FromByteArray(data);
Displaying the result from this method (the finalImg.FromByteArray(data) call) should display properly on the phone.
An alternative to this method above as well, is writing the WriteableBitmap to a .jpeg and then display the .jpeg instead - that seemed to work for me also but I didn't investigate that too thoroughly.
If the writeablebitmap has its pixeldata set or changed after the binding is established you need to call Invalidate to cause an update of the screen.
This goes for both Silverlight and Phone but you might have a race condition here that runs differently on both platforms.
I know it's an old post, but today I encountered the same problem on Windows Phone 8.1 Silverlight and I found nice solution, so I have decided to left it for people with similar problem. It was posted by Charles Petzold on MSDN as Video Feeds on Windows Phone 7 (it is shown on VideoSink class example, but it shouldn't be a problem to reproduce it with other case). He created a simple class that derives from VideoSink:
SimpleVideoSink C# code:
public class SimpleVideoSink : VideoSink
{
VideoFormat videoFormat;
WriteableBitmap writeableBitmap;
Action<WriteableBitmap> action;
public SimpleVideoSink(Action<WriteableBitmap> action)
{
this.action = action;
}
protected override void OnCaptureStarted() { }
protected override void OnCaptureStopped() { }
protected override void OnFormatChange(VideoFormat videoFormat)
{
this.videoFormat = videoFormat;
System.Windows.Deployment.Current.Dispatcher.BeginInvoke(() =>
{
writeableBitmap = new WriteableBitmap(videoFormat.PixelWidth,
videoFormat.PixelHeight);
action(writeableBitmap);
});
}
protected override void OnSample(long sampleTimeInHundredNanoseconds,
long frameDurationInHundredNanoseconds, byte[] sampleData)
{
if (writeableBitmap == null)
return;
int baseIndex = 0;
for (int row = 0; row < writeableBitmap.PixelHeight; row++)
{
for (int col = 0; col < writeableBitmap.PixelWidth; col++)
{
int pixel = 0;
if (videoFormat.PixelFormat == PixelFormatType.Format8bppGrayscale)
{
byte grayShade = sampleData[baseIndex + col];
pixel = (int)grayShade | (grayShade << 8) |
(grayShade << 16) | (0xFF << 24);
}
else
{
int index = baseIndex + 4 * col;
pixel = (int)sampleData[index + 0] | (sampleData[index + 1] << 8) |
(sampleData[index + 2] << 16) | (sampleData[index + 3] << 24);
}
writeableBitmap.Pixels[row * writeableBitmap.PixelWidth + col] = pixel;
}
baseIndex += videoFormat.Stride;
}
writeableBitmap.Dispatcher.BeginInvoke(() =>
{
writeableBitmap.Invalidate();
});
}
}
However, this code needs some modification - VideoSink.CaptureSource must be provided with our CaptureSource (I just passed it into constructor):
public SimpleVideoSink(CaptureSource captureSource, Action<WriteableBitmap> action)
{
base.CaptureSource = captureSource;
this.action = action;
}
When we initialize SimpleVideoSink class in a ViewModel, we have to provide it an Action parameter. In my case it was enough to provide ViewModel with initialized field writeableBitmap:
ViewModel C# code:
private WriteableBitmap videoWriteableBitmap;
public WriteableBitmap VideoWriteableBitmap
{
get
{
return videoWriteableBitmap;
}
set
{
videoWriteableBitmap = value;
RaisePropertyChanged("VideoWriteableBitmap");
}
}
private void OnWriteableBitmapChange(WriteableBitmap writeableBitmap)
{
VideoWriteableBitmap = writeableBitmap;
}
//this part goes to constructor/method
SimpleVideoSink videoFrameHandler = new SimpleVideoSink(captureSource, OnWriteableBitmapChange);
Then all we have to do is to bind it to View:
View XAML code:
<Image Source="{Binding VideoWriteableBitmap}" />
In this example Image source is refreshed on every OnSample method invocation, and is dispatched to main thread through WriteableBitmap.Dispatcher.
This solution generates proper image with no blank pixel (alpha channel is also filled), and Invalidate() method works as it should.

Is it possible to record sound played on the sound card?

Is it even remotely possible to record sound that is being played on the sound card?
Supposedly, I am playing an audio file, what I need is to redirect the output to the disk. DirectShow or might be feasible.
Any help is greatly appreciated, thanks.
You need to enable audio loopback device, and you will be able to record from in a stadnard way with all the well-known APIs (including DirectShow).
Loopback Recording
Enabling "Stereo Mix" in Vista
Capturing Window's audio in C#
Once enabled, you will see the device on DirectShow apps:
Check out NAudio and this thread for a WasapiLoopbackCapture class that takes the output from your soundcard and turns it into a wavein device you can record or whatever...
https://naudio.codeplex.com/discussions/203605/
My Solution C# NAUDIO Library v1.9.0
waveInStream = new WasapiLoopbackCapture(); //record sound card.
waveInStream.DataAvailable += new EventHandler<WaveInEventArgs>(this.OnDataAvailable); // sound data event
waveInStream.RecordingStopped += new EventHandler<StoppedEventArgs>(this.OnDataStopped); // record stopped event
MessageBox.Show(waveInStream.WaveFormat.Encoding + " - " + waveInStream.WaveFormat.SampleRate +" - " + waveInStream.WaveFormat.BitsPerSample + " - " + waveInStream.WaveFormat.Channels);
//IEEEFLOAT - 48000 - 32 - 2
//Explanation: IEEEFLOAT = Encoding | 48000 Hz | 32 bit | 2 = STEREO and 1 = mono
writer = new WaveFileWriter("out.wav", new WaveFormat(48000, 24, 2));
waveInStream.StartRecording(); // record start
Events
WaveFormat myOutFormat = new WaveFormat(48000, 24, 2); // --> Encoding PCM standard.
private void OnDataAvailable(object sender, WaveInEventArgs e)
{
//standard e.Buffer encoding = IEEEFLOAT
//MessageBox.Show(e.Buffer + " - " + e.BytesRecorded);
//if you needed change for encoding. FOLLOW WaveFormatConvert ->
byte[] output = WaveFormatConvert(e.Buffer, e.BytesRecorded, waveInStream.WaveFormat, myOutFormat);
writer.Write(output, 0, output.Length);
}
private void OnDataStopped(object sender, StoppedEventArgs e)
{
if (writer != null)
{
writer.Close();
}
if (waveInStream != null)
{
waveInStream.Dispose();
}
}
WaveFormatConvert -> Optional
public byte[] WaveFormatConvert(byte[] input, int length, WaveFormat inFormat, WaveFormat outFormat)
{
if (length == 0)
return new byte[0];
using (var memStream = new MemoryStream(input, 0, length))
{
using (var inputStream = new RawSourceWaveStream(memStream, inFormat))
{
using (var resampler = new MediaFoundationResampler(inputStream, outFormat)) {
resampler.ResamplerQuality = 60; // 1 low - 60 max high
//CONVERTED READ STREAM
byte[] buffer = new byte[length];
using (var stream = new MemoryStream())
{
int read;
while ((read = resampler.Read(buffer, 0, length)) > 0)
{
stream.Write(buffer, 0, read);
}
return stream.ToArray();
}
}
}
}
}

Resources