iOS Core Audio Recording Buffers Issue - core-audio

I'm trying to write a bunch of samples to a a TPCircularBuffer, provided Michael Tyson by http://atastypixel.com/blog/a-simple-fast-circular-buffer-implementation-for-audio-processing/comment-page-1/#comment-4988
I am successful in playing back these recorded samples in real time. Something like a monitor.
However, I wish to keep the samples in the TPCircularBuffer for later playback, and so I implemented 2 flags, rio->recording and rio->playing.
My idea was that I would activate rio->recording to be YES using a button. Record for a while, and then stop recording by setting the flag to be NO. Theoretically, TPCircularBuffer would have my audio info saved.
However, when I activate rio->playing to be YES in the playback Callback, I simply hear some jittery sound that has no semblance of what I recorded.
Am I using the buffer correctly? Or is this usually done another way?
Thanks.
Pier.
static OSStatus recordingCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
RIO *rio = (RIO*)inRefCon;
AudioUnit rioUnit = rio->theAudioUnit;
//ExtAudioFileRef eaf = rio->outEAF;
AudioBufferList abl = rio->audioBufferList;
SInt32 samples[NUMBER_OF_SAMPLES]; // A large enough size to not have to worry about buffer overrun
abl.mNumberBuffers = 1;
abl.mBuffers[0].mData = &samples;
abl.mBuffers[0].mNumberChannels = 1;
abl.mBuffers[0].mDataByteSize = inNumberFrames * sizeof(SInt16);
OSStatus result;
result = AudioUnitRender(rioUnit,
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
&abl);
if (noErr != result) { NSLog(#"Obtain recorded samples error"); }
// React to a recording flag, if recording, save the abl into own buffer, else ignore
if (rio->recording)
{
TPCircularBufferProduceBytes(&rio->buffer, abl.mBuffers[0].mData, inNumberFrames * sizeof(SInt16));
NSLog(#"Recording!");
}
else
{
NSLog(#"Not Recording!");
}
// once stop recording save the circular buffer to a temp circular buffer
return noErr;
}
static OSStatus playbackCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
RIO *rio = (RIO*)inRefCon;
int bytesToCopy = ioData->mBuffers[0].mDataByteSize;
SInt16 *targetBuffer = (SInt16*)ioData->mBuffers[0].mData;
// Pull audio from playthrough buffer
int32_t availableBytes;
if (rio->playing)
{
SInt16 * tempbuffer = TPCircularBufferTail(&rio->buffer, &availableBytes);
memcpy(targetBuffer, tempbuffer, MIN(bytesToCopy, availableBytes));
TPCircularBufferConsume(&rio->buffer, MIN(bytesToCopy, availableBytes));
NSLog(#"Playing!");
}
else
{
NSLog(#"Playing silence!");
for (int i = 0 ; i < ioData->mNumberBuffers; i++){
//get the buffer to be filled
AudioBuffer buffer = ioData->mBuffers[i];
UInt32 *frameBuffer = buffer.mData;
//loop through the buffer and fill the frames
for (int j = 0; j < inNumberFrames; j++){
frameBuffer[j] = 0;
}
}
}
return noErr;
}

I will answer this question myself.
Basically the rubbish sounds were due to the TPCircularBuffer not being large enough to hold the sounds. The playback callback was simply playing rubbish as the buffer did not contain anymore valid audio data.
Basically, making the TPCircularBuffer larger solved my problem. (duh!)
Pier.

Related

Cannot render audio with VoiceProcessingIO AudioUnit on Mac

I'm currently attempting to use the VoiceProcessingIO AudioUnit to passthrough audio from the mic to another input device. I am using an userspace audio driver that proxies audio from output to input. I'm attempting to render the audio from the AudioUnit and save it into a ring buffer to then play it into that audio driver so it can be used as an input.
Here's a snippet of the code:
#define kNumber_Of_Channels 2
#define kLatency_Frame_Size 0
#define kRing_Buffer_Frame_Size ((65536 + kLatency_Frame_Size))
static Float32 *gMicrophoneRingBuffer;
static bool gMicrophoneRingBufferAllocated = false;
static AudioComponentInstance gAECAudioUnitInstance;
static AudioBufferList *gMicrophoneABL;
static bool gMicrophoneABLAllocated = false;
static OSStatus MicrophoneAECInputCallback(
void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber,
UInt32 inNumberFrames, AudioBufferList *ioData) {
#pragma unused(inRefCon, ioData)
OSStatus theError;
// Render audio into audio buffer list
theError = AudioUnitRender(gAECAudioUnitInstance, ioActionFlags, inTimeStamp,
inBusNumber, inNumberFrames, gMicrophoneABL);
if (theError) {
NSLog(#"! Failed to render AEC audio unit\t\t%d", theError);
return theError;
}
// Copy audio buffer list into ring buffer
AudioBuffer mBuffer = gMicrophoneABL->mBuffers[0];
void *ioMainBuffer = mBuffer.mData;
UInt32 inIOBufferFrameSize = mBuffer.mDataByteSize / (sizeof(Float32) * 1);
UInt64 mSampleTime = inTimeStamp->mSampleTime;
UInt32 ringBufferFrameLocationStart = mSampleTime % kRing_Buffer_Frame_Size;
UInt32 firstPartFrameSize =
kRing_Buffer_Frame_Size - ringBufferFrameLocationStart;
UInt32 secondPartFrameSize = 0;
if (firstPartFrameSize >= inIOBufferFrameSize) {
firstPartFrameSize = inIOBufferFrameSize;
} else {
secondPartFrameSize = inIOBufferFrameSize - firstPartFrameSize;
}
memcpy(gMicrophoneRingBuffer + ringBufferFrameLocationStart * 1, ioMainBuffer,
firstPartFrameSize * 1 * sizeof(Float32));
memcpy(gMicrophoneRingBuffer,
(Float32 *)ioMainBuffer + firstPartFrameSize * 1,
secondPartFrameSize * 1 * sizeof(Float32));
return noErr;
}
static OSStatus MicrophoneOutputDeviceIOProc(AudioObjectID inDevice,
const AudioTimeStamp *inNow,
const AudioBufferList *inInputData,
const AudioTimeStamp *inInputTime,
AudioBufferList *outOutputData,
const AudioTimeStamp *inOutputTime,
void *__nullable inClientData) {
#pragma unused(inDevice, inNow, inInputData, inInputTime, inClientData)
AudioBuffer mBuffer = outOutputData->mBuffers[0];
UInt32 inIOBufferFrameSize = mBuffer.mDataByteSize / (sizeof(Float32) * 1);
UInt64 mSampleTime = inOutputTime->mSampleTime;
UInt32 ringBufferFrameLocationStart = mSampleTime % kRing_Buffer_Frame_Size;
UInt32 firstPartFrameSize =
kRing_Buffer_Frame_Size - ringBufferFrameLocationStart;
UInt32 secondPartFrameSize = 0;
if (firstPartFrameSize >= inIOBufferFrameSize) {
firstPartFrameSize = inIOBufferFrameSize;
} else {
secondPartFrameSize = inIOBufferFrameSize - firstPartFrameSize;
}
memcpy(mBuffer.mData,
gMicrophoneRingBuffer + ringBufferFrameLocationStart * 1,
firstPartFrameSize * 1 * sizeof(Float32));
memcpy((Float32 *)mBuffer.mData + firstPartFrameSize * 1,
gMicrophoneRingBuffer, secondPartFrameSize * 1 * sizeof(Float32));
return noErr;
}
static OSStatus getAudioObjectID(CFStringRef uid, AudioObjectID *result) {
AudioObjectPropertyAddress inAddress;
inAddress.mSelector = kAudioHardwarePropertyDeviceForUID;
UInt32 theSize = sizeof(AudioValueTranslation);
AudioValueTranslation theValue = {&uid, sizeof(CFStringRef), result,
sizeof(AudioObjectID)};
OSStatus theError = AudioObjectGetPropertyData(
kAudioObjectSystemObject, &inAddress, 0, NULL, &theSize, &theValue);
return theError;
}
static OSStatus
createAndStartAECAudioUnit(AudioObjectID microphoneId,
AudioDeviceIOProcID *outputIOProcId) {
gMicrophoneRingBuffer =
calloc(kRing_Buffer_Frame_Size * kNumber_Of_Channels, sizeof(Float32));
gMicrophoneRingBufferAllocated = true;
OSStatus theError;
// Find audio unit component
AudioComponentDescription auDesc;
auDesc.componentType = kAudioUnitType_Output;
auDesc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
auDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
auDesc.componentFlags = 0;
auDesc.componentFlagsMask = 0;
AudioComponent component = AudioComponentFindNext(NULL, &auDesc);
// Create audio unit using component
theError = AudioComponentInstanceNew(component, &gAECAudioUnitInstance);
if (theError != noErr) {
NSLog(#"! Failed to create audio unit \t\t%d", theError);
return theError;
}
UInt32 one;
one = 1;
theError = AudioUnitSetProperty(gAECAudioUnitInstance,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input, 1, &one, sizeof(one));
if (theError != noErr) {
NSLog(#"! Failed to enable IO for input \t\t%d", theError);
return theError;
}
UInt32 zero;
zero = 0;
theError = AudioUnitSetProperty(
gAECAudioUnitInstance, kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output, 0, &zero, sizeof(zero));
if (theError != noErr) {
NSLog(#"! Failed to disable IO for output \t\t%d", theError);
return theError;
}
// Set current output device
theError = AudioUnitSetProperty(
gAECAudioUnitInstance, kAudioOutputUnitProperty_CurrentDevice,
kAudioUnitScope_Global, 0, &microphoneId, sizeof(AudioObjectID));
if (theError != noErr) {
NSLog(#"! Failed to set audio unit microphone device \t\t%d", theError);
return theError;
}
// Set input callback to pull the microphone data into ring buffer
AURenderCallbackStruct inputProc;
inputProc.inputProc = MicrophoneAECInputCallback;
theError = AudioUnitSetProperty(
gAECAudioUnitInstance, kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global, 0, &inputProc, sizeof(inputProc));
if (theError != noErr) {
NSLog(#"! Failed to set audio unit input callback \t\t%d", theError);
return theError;
}
// Get the size of the IO buffer(s)
UInt32 bufferSizeFrames;
UInt32 propertySize = sizeof(bufferSizeFrames);
theError = AudioUnitGetProperty(
gAECAudioUnitInstance, kAudioDevicePropertyBufferFrameSize,
kAudioUnitScope_Global, 0, &bufferSizeFrames, &propertySize);
if (theError != noErr) {
NSLog(#"! Failed to get buffer size frames \t\t%d", theError);
return theError;
}
// Initialize AudioBufferList
UInt32 bufferSizeBytes = bufferSizeFrames * sizeof(Float32);
UInt32 propsize =
offsetof(AudioBufferList, mBuffers[0]) + (sizeof(AudioBuffer) * 1);
gMicrophoneABL = (AudioBufferList *)malloc(propsize);
gMicrophoneABL->mNumberBuffers = streamDesc.mChannelsPerFrame;
for (UInt32 i = 0; i < gMicrophoneABL->mNumberBuffers; i++) {
gMicrophoneABL->mBuffers[i].mNumberChannels = streamDesc.mChannelsPerFrame;
gMicrophoneABL->mBuffers[i].mDataByteSize = bufferSizeBytes;
gMicrophoneABL->mBuffers[i].mData = malloc(bufferSizeBytes);
}
gMicrophoneABLAllocated = true;
// Initialize audio unit
theError = AudioUnitInitialize(gAECAudioUnitInstance);
if (theError != noErr) {
NSLog(#"! Failed to initialize audio unit \t\t%d", theError);
return theError;
}
// Start audio unit
theError = AudioOutputUnitStart(gAECAudioUnitInstance);
if (theError != noErr) {
NSLog(#"! Failed to start output of audio unit \t\t%d", theError);
return theError;
}
// Get microphone driver device id
AudioObjectID microphoneDriverDeviceId = kAudioObjectUnknown;
theError = getAudioObjectID(CFSTR(kMicrophone_Device_UID),
&microphoneDriverDeviceId);
if (theError != noErr) {
NSLog(#"! Failed to get microphone driver device UID\t\t%d", theError);
return theError;
}
// Create output proc
theError = AudioDeviceCreateIOProcID(microphoneDriverDeviceId,
MicrophoneOutputDeviceIOProc, NULL,
outputIOProcId);
if (theError != noErr) {
NSLog(#"! Failed to create output io proc\t\t%d", theError);
return theError;
}
// Starts the proxy process
theError = AudioDeviceStart(microphoneDriverDeviceId, *outputIOProcId);
if (theError != noErr) {
NSLog(#"! Failed to create output io proc\t\t%d", theError);
return theError;
}
return noErr;
}
So currently this code errors when it attempts to render the audio in the "MicrophoneAECInputCallback". This only happens when I change the AudioUnit component subtype to be "kAudioUnitSubType_VoiceProcessingIO" and not "kAudioUnitSubType_HALOutput", which leads me to believe that I'm just using the VoiceProcessingIO AudioUnit incorrectly.
I'm doing all this to perform echo cancellation on the microphone input to ultimately feed it into AVCaptureSession. Is there any alternative approaches any of you suggest or how should I go about fixing my usage of the VoiceProcessingIO AudioUnit on Mac.

Processed audio very noisy when Superpowered Reverb used with Audio Graph

I'm using Superpowered Reverb effect with Audio Graph on OS X.
I'm doing that by calling reverb->process in the render callback of an output audio unit (tested on kAudioUnitSubType_SystemOutput and kAudioUnitSubType_DefaultOutput).
The reverb effect worked but the resulted audio is very noisy. I've tried different things (adjust the samplerate, use extra and zeroed buffers, etc) but it doesn't seems to help. Are there any ways to solve this? Thx.
Simplified code:
SuperpoweredReverb* reverb;
OSStatus callback(void * inComponentStorage,
AudioUnitRenderActionFlags * __nullable flags,
const AudioTimeStamp * inTimeStamp,
UInt32 busNumber,
UInt32 framesCount,
AudioBufferList * ioData)
{
for (int i = 0; i < ioData->mNumberBuffers; ++i)
{
if (ioData->mBuffers[i].mData)
reverb->process(static_cast<float*>(ioData->mBuffers[i].mData),
static_cast<float*>(ioData->mBuffers[i].mData),
framesCount);
}
return noErr;
}
void setupReverb(unsigned int sampleRate, AudioUnit unit)
{
reverb = new SuperpoweredReverb(sampleRate);
reverb->enable(true);
reverb->setMix(0.5);
AudioUnitAddRenderNotify(unit, callback, nullptr);
}
Turns out that in the audio graph, the callback will call multiple times even on the same channel, I made the following changes (using an integer to track current channel) and it work awesomely well now. (below is again simplified code)
SuperpoweredReverb* reverbUnit;
int spliter = 0;
OSStatus callback(void * inComponentStorage,
AudioUnitRenderActionFlags * __nullable flags,
const AudioTimeStamp * inTimeStamp,
UInt32 busNumber,
UInt32 framesCount,
AudioBufferList * ioData)
{
spliter++;
for (int i = 0; i < ioData->mNumberBuffers; ++i)
{
if (ioData->mBuffers[i].mData) {
if (!(spliter % ioData->mBuffers[i].mNumberChannels))
reverbUnit->process(static_cast<float*>(ioData->mBuffers[i].mData),
static_cast<float*>(ioData->mBuffers[i].mData),
framesCount);
}
}
return noErr;
}
void setupReverb(unsigned int sampleRate, AudioUnit unit)
{
reverbUnit = new SuperpoweredReverb(sampleRate);
reverbUnit->enable(true);
reverbUnit->setWet(0.7);
AudioUnitAddRenderNotify(unit, callback, nullptr);
}

Image saved from Fingerprint sensor seems corrupted

I have been trying to put together code to actually save image from fingerprint sensors. I have already tried forums and this is my current code which saves file with correct file size but When i open the image, Its not image of fingerprint rather it looks like a corrupted image. Here is what it looks like.
My code is given below. Any help will be appreciated. I am new to windows development.
bool SaveBMP(BYTE* Buffer, int width, int height, long paddedsize, LPCTSTR bmpfile)
{
BITMAPFILEHEADER bmfh;
BITMAPINFOHEADER info;
memset(&bmfh, 0, sizeof(BITMAPFILEHEADER));
memset(&info, 0, sizeof(BITMAPINFOHEADER));
//Next we fill the file header with data:
bmfh.bfType = 0x4d42; // 0x4d42 = 'BM'
bmfh.bfReserved1 = 0;
bmfh.bfReserved2 = 0;
bmfh.bfSize = sizeof(BITMAPFILEHEADER) +
sizeof(BITMAPINFOHEADER) + paddedsize;
bmfh.bfOffBits = 0x36;
//and the info header:
info.biSize = sizeof(BITMAPINFOHEADER);
info.biWidth = width;
info.biHeight = height;
info.biPlanes = 1;
info.biBitCount = 8;
info.biCompression = BI_RGB;
info.biSizeImage = 0;
info.biXPelsPerMeter = 0x0ec4;
info.biYPelsPerMeter = 0x0ec4;
info.biClrUsed = 0;
info.biClrImportant = 0;
HANDLE file = CreateFile(bmpfile, GENERIC_WRITE, FILE_SHARE_READ,
NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
//Now we write the file header and info header:
unsigned long bwritten;
if (WriteFile(file, &bmfh, sizeof(BITMAPFILEHEADER),
&bwritten, NULL) == false)
{
CloseHandle(file);
return false;
}
if (WriteFile(file, &info, sizeof(BITMAPINFOHEADER),
&bwritten, NULL) == false)
{
CloseHandle(file);
return false;
}
//and finally the image data:
if (WriteFile(file, Buffer, paddedsize, &bwritten, NULL) == false)
{
CloseHandle(file);
return false;
}
//Now we can close our function with
CloseHandle(file);
return true;
}
HRESULT CaptureSample()
{
HRESULT hr = S_OK;
WINBIO_SESSION_HANDLE sessionHandle = NULL;
WINBIO_UNIT_ID unitId = 0;
WINBIO_REJECT_DETAIL rejectDetail = 0;
PWINBIO_BIR sample = NULL;
SIZE_T sampleSize = 0;
// Connect to the system pool.
hr = WinBioOpenSession(
WINBIO_TYPE_FINGERPRINT, // Service provider
WINBIO_POOL_SYSTEM, // Pool type
WINBIO_FLAG_RAW, // Access: Capture raw data
NULL, // Array of biometric unit IDs
0, // Count of biometric unit IDs
WINBIO_DB_DEFAULT, // Default database
&sessionHandle // [out] Session handle
);
// Capture a biometric sample.
wprintf_s(L"\n Calling WinBioCaptureSample - Swipe sensor...\n");
hr = WinBioCaptureSample(
sessionHandle,
WINBIO_NO_PURPOSE_AVAILABLE,
WINBIO_DATA_FLAG_RAW,
&unitId,
&sample,
&sampleSize,
&rejectDetail
);
wprintf_s(L"\n Swipe processed - Unit ID: %d\n", unitId);
wprintf_s(L"\n Captured %d bytes.\n", sampleSize);
PWINBIO_BIR_HEADER BirHeader = (PWINBIO_BIR_HEADER)(((PBYTE)sample) + sample->HeaderBlock.Offset);
PWINBIO_BDB_ANSI_381_HEADER AnsiBdbHeader = (PWINBIO_BDB_ANSI_381_HEADER)(((PBYTE)sample) + sample->StandardDataBlock.Offset);
PWINBIO_BDB_ANSI_381_RECORD AnsiBdbRecord = (PWINBIO_BDB_ANSI_381_RECORD)(((PBYTE)AnsiBdbHeader) + sizeof(WINBIO_BDB_ANSI_381_HEADER));
PBYTE firstPixel = (PBYTE)((PBYTE)AnsiBdbRecord) + sizeof(WINBIO_BDB_ANSI_381_RECORD);
SaveBMP(firstPixel, AnsiBdbRecord->HorizontalLineLength, AnsiBdbRecord->VerticalLineLength, AnsiBdbRecord->BlockLength, "D://test.bmp");
wprintf_s(L"\n Press any key to exit.");
_getch();
}
IInspectable is correct, the corruption looks like it's coming from your implicit use of color tables:
info.biBitCount = 8;
info.biCompression = BI_RGB;
If your data is actually just 24-bit RGB, you can do info.biBitCount = 24; to render a valid bitmap. If it's lower (or higher) than that, then you'll need to do some conversion work. You can check AnsiBdbHeader->PixelDepth to confirm that it's the 8 bits per pixel that you expect.
It also looks like your passing AnsiBdbRecord->BlockLength to SaveBMP isn't quite right. The docs for this field say:
WINBIO_BDB_ANSI_381_RECORD structure
BlockLength
Contains the number of bytes in this structure plus the number of bytes of sample image data.
So you'll want to make sure to subtract sizeof(WINBIO_BDB_ANSI_381_RECORD) before passing it as your bitmap buffer size.
Side note, make sure you free the memory involved after the capture.
WinBioFree(sample);
WinBioCloseSession(sessionHandle);

Muxing with libav

I have a program which is supposed to demux input mpeg-ts, transcode the mpeg2 into h264 and then mux the audio alongside the transcoded video. When I open the resulting muxed file with VLC I neither get audio nor video. Here is the relevant code.
My main worker loop is as follows:
void
*writer_thread(void *thread_ctx) {
struct transcoder_ctx_t *ctx = (struct transcoder_ctx_t *) thread_ctx;
AVStream *video_stream = NULL, *audio_stream = NULL;
AVFormatContext *output_context = init_output_context(ctx, &video_stream, &audio_stream);
struct mux_state_t mux_state = {0};
//from omxtx
mux_state.pts_offset = av_rescale_q(ctx->input_context->start_time, AV_TIME_BASE_Q, output_context->streams[ctx->video_stream_index]->time_base);
//write stream header if any
avformat_write_header(output_context, NULL);
//do not start doing anything until we get an encoded packet
pthread_mutex_lock(&ctx->pipeline.video_encode.is_running_mutex);
while (!ctx->pipeline.video_encode.is_running) {
pthread_cond_wait(&ctx->pipeline.video_encode.is_running_cv, &ctx->pipeline.video_encode.is_running_mutex);
}
while (!ctx->pipeline.video_encode.eos || !ctx->processed_audio_queue->queue_finished) {
//FIXME a memory barrier is required here so that we don't race
//on above variables
//fill a buffer with video data
OERR(OMX_FillThisBuffer(ctx->pipeline.video_encode.h, omx_get_next_output_buffer(&ctx->pipeline.video_encode)));
write_audio_frame(output_context, audio_stream, ctx); //write full audio frame
//FIXME no guarantee that we have a full frame per packet?
write_video_frame(output_context, video_stream, ctx, &mux_state); //write full video frame
//encoded_video_queue is being filled by the previous command
}
av_write_trailer(output_context);
//free all the resources
avcodec_close(video_stream->codec);
avcodec_close(audio_stream->codec);
/* Free the streams. */
for (int i = 0; i < output_context->nb_streams; i++) {
av_freep(&output_context->streams[i]->codec);
av_freep(&output_context->streams[i]);
}
if (!(output_context->oformat->flags & AVFMT_NOFILE)) {
/* Close the output file. */
avio_close(output_context->pb);
}
/* free the stream */
av_free(output_context);
free(mux_state.pps);
free(mux_state.sps);
}
The code for initialising libav output context is this:
static
AVFormatContext *
init_output_context(const struct transcoder_ctx_t *ctx, AVStream **video_stream, AVStream **audio_stream) {
AVFormatContext *oc;
AVOutputFormat *fmt;
AVStream *input_stream, *output_stream;
AVCodec *c;
AVCodecContext *cc;
int audio_copied = 0; //copy just 1 stream
fmt = av_guess_format("mpegts", NULL, NULL);
if (!fmt) {
fprintf(stderr, "[DEBUG] Error guessing format, dying\n");
exit(199);
}
oc = avformat_alloc_context();
if (!oc) {
fprintf(stderr, "[DEBUG] Error allocating context, dying\n");
exit(200);
}
oc->oformat = fmt;
snprintf(oc->filename, sizeof(oc->filename), "%s", ctx->output_filename);
oc->debug = 1;
oc->start_time_realtime = ctx->input_context->start_time;
oc->start_time = ctx->input_context->start_time;
oc->duration = 0;
oc->bit_rate = 0;
for (int i = 0; i < ctx->input_context->nb_streams; i++) {
input_stream = ctx->input_context->streams[i];
output_stream = NULL;
if (input_stream->index == ctx->video_stream_index) {
//copy stuff from input video index
c = avcodec_find_encoder(CODEC_ID_H264);
output_stream = avformat_new_stream(oc, c);
*video_stream = output_stream;
cc = output_stream->codec;
cc->width = input_stream->codec->width;
cc->height = input_stream->codec->height;
cc->codec_id = CODEC_ID_H264;
cc->codec_type = AVMEDIA_TYPE_VIDEO;
cc->bit_rate = ENCODED_BITRATE;
cc->time_base = input_stream->codec->time_base;
output_stream->avg_frame_rate = input_stream->avg_frame_rate;
output_stream->r_frame_rate = input_stream->r_frame_rate;
output_stream->start_time = AV_NOPTS_VALUE;
} else if ((input_stream->codec->codec_type == AVMEDIA_TYPE_AUDIO) && !audio_copied) {
/* i care only about audio */
c = avcodec_find_encoder(input_stream->codec->codec_id);
output_stream = avformat_new_stream(oc, c);
*audio_stream = output_stream;
avcodec_copy_context(output_stream->codec, input_stream->codec);
/* Apparently fixes a crash on .mkvs with attachments: */
av_dict_copy(&output_stream->metadata, input_stream->metadata, 0);
/* Reset the codec tag so as not to cause problems with output format */
output_stream->codec->codec_tag = 0;
audio_copied = 1;
}
}
for (int i = 0; i < oc->nb_streams; i++) {
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
oc->streams[i]->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
if (oc->streams[i]->codec->sample_rate == 0)
oc->streams[i]->codec->sample_rate = 48000; /* ish */
}
if (!(fmt->flags & AVFMT_NOFILE)) {
fprintf(stderr, "[DEBUG] AVFMT_NOFILE set, allocating output container\n");
if (avio_open(&oc->pb, ctx->output_filename, AVIO_FLAG_WRITE) < 0) {
fprintf(stderr, "[DEBUG] error creating the output context\n");
exit(1);
}
}
return oc;
}
Finally this is the code for writing audio:
static
void
write_audio_frame(AVFormatContext *oc, AVStream *st, struct transcoder_ctx_t *ctx) {
AVPacket pkt = {0}; // data and size must be 0;
struct packet_t *source_audio;
av_init_packet(&pkt);
if (!(source_audio = packet_queue_get_next_item_asynch(ctx->processed_audio_queue))) {
return;
}
pkt.stream_index = st->index;
pkt.size = source_audio->data_length;
pkt.data = source_audio->data;
pkt.pts = source_audio->PTS;
pkt.dts = source_audio->DTS;
pkt.duration = source_audio->duration;
pkt.destruct = avpacket_destruct;
/* Write the compressed frame to the media file. */
if (av_interleaved_write_frame(oc, &pkt) != 0) {
fprintf(stderr, "[DEBUG] Error while writing audio frame\n");
}
packet_queue_free_packet(source_audio, 0);
}
A resulting mpeg4 file can be obtained from here: http://87.120.131.41/dl/mpeg4.h264
I have ommited the write_video_frame code since it is a lot more complicated and I might be making something wrong there as I'm doing timebase conversation etc. For audio however I'm doing 1:1 copy. Each packet_t packet contains data from av_read_frame from the input mpegts container. In the worst case I'd expect that my audio is working and not my video. However I cannot get either of those to work. Seems the documentation is rather vague on making things like that - I've tried both libav and ffmpeg irc channels to no avail. Any information regarding how I can debug the issue will be greatly appreciated.
When different containers yield different results in libav it is almost always a timebase issue. All containers have a time_base that they like, and some will accept custom values... sometimes.
You must rescale the time base before putting it in the container. Generally tinkering with the mux state struct isn't something you want to do and I think what you did there doesn't do what you think. Try printing out all of the timebases to find out what they are.
Each frame you must recalculate PTS at least. If you do it before you call encode the encoder will produce the proper DTS. Do the same for the audio, but generally set the DTS it to AV_NO_PTS and sometimes you can get away with setting the audio PTS to that as well. To rescale easily use the av_rescale(...) functions.
Be careful assuming that you have MPEG-2 data in a MPEG-TS container, that is not always true.

iOS RemoteIO - AudioUnitAddRenderNotify Callback

I'm trying to do a recording from RemoteIO using AudioUnitAddRenderNotify like this.
Basically, I'm not able to get the samples from bus1, which is my input bus. The recordingCallback does not go past this :
if (*ioActionFlags & kAudioUnitRenderAction_PostRender || inBusNumber != 1) {
return noErr;
}
But I was told that the recordingCallback should be called for each bus every round. ie. called with inBusNumber ==0, then inBusNumber ==1, which are the output (remoteIO out) and input (recording bus) respectively.
What can I do to get recordingCallback to be called on my input bus so that I can record?
Thanks.
Pier.
Here's the callback.
static OSStatus recordingCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
NSLog(#"Entered recording callback");
// Only do pre render on bus 1
if (*ioActionFlags & kAudioUnitRenderAction_PostRender || inBusNumber != 1) {
return noErr;
}
RIO *rio = (RIO*)inRefCon;
AudioUnit rioUnit = rio->theAudioUnit;
//ExtAudioFileRef eaf = rio->outEAF;
AudioBufferList abl = rio->audioBufferList;
SInt32 samples[NUMBER_OF_SAMPLES]; // A large enough size to not have to worry about buffer overrun
abl.mNumberBuffers = 1;
abl.mBuffers[0].mData = &samples;
abl.mBuffers[0].mNumberChannels = 1;
abl.mBuffers[0].mDataByteSize = inNumberFrames * sizeof(SInt16);
OSStatus result;
result = AudioUnitRender(rioUnit,
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
&abl);
if (noErr != result) { NSLog(#"Obtain recorded samples error! Error : %ld", result); }
NSLog(#"Bus %ld", inBusNumber);
// React to a recording flag, if recording, save the abl into own buffer, else ignore
if (rio->recording)
{
TPCircularBufferProduceBytes(&rio->buffer, abl.mBuffers[0].mData, inNumberFrames * sizeof(SInt16));
//rio->timeIncurred += (('p'float)inNumberFrames) / 44100.0;
//NSLog(#"Self-calculated time incurred: %f", rio->timeIncurred);
}
return noErr;
}
Here's the code which calls the callback.
- (void)setupAudioUnitRemoteIO {
UInt32 framesPerSlice = 0;
UInt32 framesPerSlicePropertySize = sizeof (framesPerSlice);
UInt32 sampleRatePropertySize = sizeof (_graphSampleRate);
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &_remoteIOUnit);
if (noErr != status) { NSLog(#"Get audio units error"); return; }
// Enable IO for recording
UInt32 flag = 1;
status = AudioUnitSetProperty(_remoteIOUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&flag,
sizeof(flag));
if (noErr != status) { NSLog(#"Enable IO for recording error"); return; }
// Enable IO for playback
status = AudioUnitSetProperty(_remoteIOUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
if (noErr != status) { NSLog(#"Enable IO for playback error"); return; }
// Obtain the value of the maximum-frames-per-slice from the I/O unit.
status = AudioUnitGetProperty (
_remoteIOUnit,
kAudioUnitProperty_MaximumFramesPerSlice,
kAudioUnitScope_Global,
0,
&framesPerSlice,
&framesPerSlicePropertySize
);
// Describe format
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;
// Apply format
status = AudioUnitSetProperty(_remoteIOUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&audioFormat,
sizeof(audioFormat));
if (noErr != status) { NSLog(#"Apply format to input bus error"); return; }
status = AudioUnitSetProperty(_remoteIOUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
if (noErr != status) { NSLog(#"Apply format to output bus error"); return; }
rio.theAudioUnit = _remoteIOUnit; // Need this, as used in callbacks to refer to remoteIO
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = &rio;
status = AudioUnitAddRenderNotify(_remoteIOUnit, callbackStruct.inputProc, callbackStruct.inputProcRefCon);
NSAssert (status == noErr, #"Problem adding recordingCallback to RemoteIO. Error code: %d '%.4s'", (int) status, (const char *)&status);
I managed to resolve this by not using AudioUnitAddRenderNotify, and by using the following code.
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = &rio;
status = AudioUnitSetProperty(_remoteIOUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
kInputBus,
&callbackStruct,
sizeof(callbackStruct));
if (noErr != status) { NSLog(#"Set input callback error"); return; }
on the input bus instead

Resources