Cannot render audio with VoiceProcessingIO AudioUnit on Mac - macos

I'm currently attempting to use the VoiceProcessingIO AudioUnit to passthrough audio from the mic to another input device. I am using an userspace audio driver that proxies audio from output to input. I'm attempting to render the audio from the AudioUnit and save it into a ring buffer to then play it into that audio driver so it can be used as an input.
Here's a snippet of the code:
#define kNumber_Of_Channels 2
#define kLatency_Frame_Size 0
#define kRing_Buffer_Frame_Size ((65536 + kLatency_Frame_Size))
static Float32 *gMicrophoneRingBuffer;
static bool gMicrophoneRingBufferAllocated = false;
static AudioComponentInstance gAECAudioUnitInstance;
static AudioBufferList *gMicrophoneABL;
static bool gMicrophoneABLAllocated = false;
static OSStatus MicrophoneAECInputCallback(
void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber,
UInt32 inNumberFrames, AudioBufferList *ioData) {
#pragma unused(inRefCon, ioData)
OSStatus theError;
// Render audio into audio buffer list
theError = AudioUnitRender(gAECAudioUnitInstance, ioActionFlags, inTimeStamp,
inBusNumber, inNumberFrames, gMicrophoneABL);
if (theError) {
NSLog(#"! Failed to render AEC audio unit\t\t%d", theError);
return theError;
}
// Copy audio buffer list into ring buffer
AudioBuffer mBuffer = gMicrophoneABL->mBuffers[0];
void *ioMainBuffer = mBuffer.mData;
UInt32 inIOBufferFrameSize = mBuffer.mDataByteSize / (sizeof(Float32) * 1);
UInt64 mSampleTime = inTimeStamp->mSampleTime;
UInt32 ringBufferFrameLocationStart = mSampleTime % kRing_Buffer_Frame_Size;
UInt32 firstPartFrameSize =
kRing_Buffer_Frame_Size - ringBufferFrameLocationStart;
UInt32 secondPartFrameSize = 0;
if (firstPartFrameSize >= inIOBufferFrameSize) {
firstPartFrameSize = inIOBufferFrameSize;
} else {
secondPartFrameSize = inIOBufferFrameSize - firstPartFrameSize;
}
memcpy(gMicrophoneRingBuffer + ringBufferFrameLocationStart * 1, ioMainBuffer,
firstPartFrameSize * 1 * sizeof(Float32));
memcpy(gMicrophoneRingBuffer,
(Float32 *)ioMainBuffer + firstPartFrameSize * 1,
secondPartFrameSize * 1 * sizeof(Float32));
return noErr;
}
static OSStatus MicrophoneOutputDeviceIOProc(AudioObjectID inDevice,
const AudioTimeStamp *inNow,
const AudioBufferList *inInputData,
const AudioTimeStamp *inInputTime,
AudioBufferList *outOutputData,
const AudioTimeStamp *inOutputTime,
void *__nullable inClientData) {
#pragma unused(inDevice, inNow, inInputData, inInputTime, inClientData)
AudioBuffer mBuffer = outOutputData->mBuffers[0];
UInt32 inIOBufferFrameSize = mBuffer.mDataByteSize / (sizeof(Float32) * 1);
UInt64 mSampleTime = inOutputTime->mSampleTime;
UInt32 ringBufferFrameLocationStart = mSampleTime % kRing_Buffer_Frame_Size;
UInt32 firstPartFrameSize =
kRing_Buffer_Frame_Size - ringBufferFrameLocationStart;
UInt32 secondPartFrameSize = 0;
if (firstPartFrameSize >= inIOBufferFrameSize) {
firstPartFrameSize = inIOBufferFrameSize;
} else {
secondPartFrameSize = inIOBufferFrameSize - firstPartFrameSize;
}
memcpy(mBuffer.mData,
gMicrophoneRingBuffer + ringBufferFrameLocationStart * 1,
firstPartFrameSize * 1 * sizeof(Float32));
memcpy((Float32 *)mBuffer.mData + firstPartFrameSize * 1,
gMicrophoneRingBuffer, secondPartFrameSize * 1 * sizeof(Float32));
return noErr;
}
static OSStatus getAudioObjectID(CFStringRef uid, AudioObjectID *result) {
AudioObjectPropertyAddress inAddress;
inAddress.mSelector = kAudioHardwarePropertyDeviceForUID;
UInt32 theSize = sizeof(AudioValueTranslation);
AudioValueTranslation theValue = {&uid, sizeof(CFStringRef), result,
sizeof(AudioObjectID)};
OSStatus theError = AudioObjectGetPropertyData(
kAudioObjectSystemObject, &inAddress, 0, NULL, &theSize, &theValue);
return theError;
}
static OSStatus
createAndStartAECAudioUnit(AudioObjectID microphoneId,
AudioDeviceIOProcID *outputIOProcId) {
gMicrophoneRingBuffer =
calloc(kRing_Buffer_Frame_Size * kNumber_Of_Channels, sizeof(Float32));
gMicrophoneRingBufferAllocated = true;
OSStatus theError;
// Find audio unit component
AudioComponentDescription auDesc;
auDesc.componentType = kAudioUnitType_Output;
auDesc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
auDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
auDesc.componentFlags = 0;
auDesc.componentFlagsMask = 0;
AudioComponent component = AudioComponentFindNext(NULL, &auDesc);
// Create audio unit using component
theError = AudioComponentInstanceNew(component, &gAECAudioUnitInstance);
if (theError != noErr) {
NSLog(#"! Failed to create audio unit \t\t%d", theError);
return theError;
}
UInt32 one;
one = 1;
theError = AudioUnitSetProperty(gAECAudioUnitInstance,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input, 1, &one, sizeof(one));
if (theError != noErr) {
NSLog(#"! Failed to enable IO for input \t\t%d", theError);
return theError;
}
UInt32 zero;
zero = 0;
theError = AudioUnitSetProperty(
gAECAudioUnitInstance, kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output, 0, &zero, sizeof(zero));
if (theError != noErr) {
NSLog(#"! Failed to disable IO for output \t\t%d", theError);
return theError;
}
// Set current output device
theError = AudioUnitSetProperty(
gAECAudioUnitInstance, kAudioOutputUnitProperty_CurrentDevice,
kAudioUnitScope_Global, 0, &microphoneId, sizeof(AudioObjectID));
if (theError != noErr) {
NSLog(#"! Failed to set audio unit microphone device \t\t%d", theError);
return theError;
}
// Set input callback to pull the microphone data into ring buffer
AURenderCallbackStruct inputProc;
inputProc.inputProc = MicrophoneAECInputCallback;
theError = AudioUnitSetProperty(
gAECAudioUnitInstance, kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global, 0, &inputProc, sizeof(inputProc));
if (theError != noErr) {
NSLog(#"! Failed to set audio unit input callback \t\t%d", theError);
return theError;
}
// Get the size of the IO buffer(s)
UInt32 bufferSizeFrames;
UInt32 propertySize = sizeof(bufferSizeFrames);
theError = AudioUnitGetProperty(
gAECAudioUnitInstance, kAudioDevicePropertyBufferFrameSize,
kAudioUnitScope_Global, 0, &bufferSizeFrames, &propertySize);
if (theError != noErr) {
NSLog(#"! Failed to get buffer size frames \t\t%d", theError);
return theError;
}
// Initialize AudioBufferList
UInt32 bufferSizeBytes = bufferSizeFrames * sizeof(Float32);
UInt32 propsize =
offsetof(AudioBufferList, mBuffers[0]) + (sizeof(AudioBuffer) * 1);
gMicrophoneABL = (AudioBufferList *)malloc(propsize);
gMicrophoneABL->mNumberBuffers = streamDesc.mChannelsPerFrame;
for (UInt32 i = 0; i < gMicrophoneABL->mNumberBuffers; i++) {
gMicrophoneABL->mBuffers[i].mNumberChannels = streamDesc.mChannelsPerFrame;
gMicrophoneABL->mBuffers[i].mDataByteSize = bufferSizeBytes;
gMicrophoneABL->mBuffers[i].mData = malloc(bufferSizeBytes);
}
gMicrophoneABLAllocated = true;
// Initialize audio unit
theError = AudioUnitInitialize(gAECAudioUnitInstance);
if (theError != noErr) {
NSLog(#"! Failed to initialize audio unit \t\t%d", theError);
return theError;
}
// Start audio unit
theError = AudioOutputUnitStart(gAECAudioUnitInstance);
if (theError != noErr) {
NSLog(#"! Failed to start output of audio unit \t\t%d", theError);
return theError;
}
// Get microphone driver device id
AudioObjectID microphoneDriverDeviceId = kAudioObjectUnknown;
theError = getAudioObjectID(CFSTR(kMicrophone_Device_UID),
&microphoneDriverDeviceId);
if (theError != noErr) {
NSLog(#"! Failed to get microphone driver device UID\t\t%d", theError);
return theError;
}
// Create output proc
theError = AudioDeviceCreateIOProcID(microphoneDriverDeviceId,
MicrophoneOutputDeviceIOProc, NULL,
outputIOProcId);
if (theError != noErr) {
NSLog(#"! Failed to create output io proc\t\t%d", theError);
return theError;
}
// Starts the proxy process
theError = AudioDeviceStart(microphoneDriverDeviceId, *outputIOProcId);
if (theError != noErr) {
NSLog(#"! Failed to create output io proc\t\t%d", theError);
return theError;
}
return noErr;
}
So currently this code errors when it attempts to render the audio in the "MicrophoneAECInputCallback". This only happens when I change the AudioUnit component subtype to be "kAudioUnitSubType_VoiceProcessingIO" and not "kAudioUnitSubType_HALOutput", which leads me to believe that I'm just using the VoiceProcessingIO AudioUnit incorrectly.
I'm doing all this to perform echo cancellation on the microphone input to ultimately feed it into AVCaptureSession. Is there any alternative approaches any of you suggest or how should I go about fixing my usage of the VoiceProcessingIO AudioUnit on Mac.

Related

How to enable/disable input or output channels from an aggregate CoreAudio device?

I have thoroughly read the question and answer in this thread:
How to exclude input or output channels from an aggregate CoreAudio device?
And it appears to be missing information on the solution:
I have created an aggregated device containing multiple audio devices. When calling core audio to receive the number of streams (using kAudioDevicePropertyStreams) the return value is always 1. I have also tried the implementation in CoreAudio Utility classes: CAHALAudioDevice::GetIOProcStreamUsage. Still I could not see how to access sub-streams and disable/enable them as mentioned here.
What needs to be done to accomplish disable/enable of sub-streams?
EDIT
Here is CAHALAudioDevice::GetIOProcStreamUsage for reference:
void CAHALAudioDevice::GetIOProcStreamUsage(AudioDeviceIOProcID
inIOProcID, bool inIsInput, bool* outStreamUsage) const
{
// make an AudioHardwareIOProcStreamUsage the right size
UInt32 theNumberStreams = GetNumberStreams(inIsInput);
UInt32 theSize = SizeOf32(void*) + SizeOf32(UInt32) + (theNumberStreams * SizeOf32(UInt32));
CAAutoFree<AudioHardwareIOProcStreamUsage> theStreamUsage(theSize);
// set it up
theStreamUsage->mIOProc = reinterpret_cast<void*>(inIOProcID);
theStreamUsage->mNumberStreams = theNumberStreams;
// get the property
CAPropertyAddress theAddress(kAudioDevicePropertyIOProcStreamUsage, inIsInput ? kAudioDevicePropertyScopeInput : kAudioDevicePropertyScopeOutput);
GetPropertyData(theAddress, 0, NULL, theSize, theStreamUsage);
// fill out the return value
for(UInt32 theIndex = 0; theIndex < theNumberStreams; ++theIndex)
{
outStreamUsage[theIndex] = (theStreamUsage->mStreamIsOn[theIndex] != 0);
}
}
For reference, here is the function my program uses the accomplish the results described in the linked question:
// Tell CoreAudio which input (or output) streams we actually want to use in our device
// #param devID the CoreAudio audio device ID of the aggregate device to modify
// #param ioProc the rendering callback-function (as was passed to AudioDeviceCreateIOProcID()'s second argument)
// #param scope either kAudioObjectPropertyScopeInput or kAudioObjectPropertyScopeOutput depending on which type of channels we want to modify
// #param numValidChannels how many audio channels in the aggregate device we want to actually use
// #param rightJustify if true, we want to use the last (numValidChannels) in the device; if false we want to use the first (numValidChannels) in the device
// #returns 0 on success or -1 on error
// #note this function doesn't change the layout of the audio-sample data in the audio-render callback; rather it causes some channels of audio in the callback to become zero'd out/unused.
int SetProcStreamUsage(AudioDeviceID devID, void * ioProc, AudioObjectPropertyScope scope, int numValidChannels, bool rightJustify)
{
const AudioObjectPropertyAddress sizesAddress =
{
kAudioDevicePropertyStreamConfiguration,
scope,
kAudioObjectPropertyElementMaster
};
Uint32 streamSizesDataSize = 0;
OSStatus err = AudioObjectGetPropertyDataSize(devID, &sizesAddress, 0, NULL, &streamSizesDataSize);
if (err != noErr)
{
printf("SetProcStreamUsage(%u,%i,%i): AudioObjectGetPropertyDataSize(kAudioDevicePropertyStreamConfiguration) failed!\n", (unsigned int) devID, scope, rightJustify);
return -1; // ("AudioObjectGetPropertyDataSize(kAudioDevicePropertyStreamConfiguration) failed");
}
const AudioObjectPropertyAddress usageAddress =
{
kAudioDevicePropertyIOProcStreamUsage,
scope,
kAudioObjectPropertyElementMaster
};
Uint32 streamUsageDataSize = 0;
err = AudioObjectGetPropertyDataSize(devID, &usageAddress, 0, NULL, &streamUsageDataSize);
if (err != noErr)
{
printf("SetProcStreamUsage(%u,%i,%i): AudioObjectGetPropertyDataSize(kAudioDevicePropertyIOProcStreamUsage) failed!\n", (unsigned int) devID, scope, rightJustify);
return -1; // ("AudioObjectGetPropertyDataSize(kAudioDevicePropertyIOProcStreamUsage) failed");
}
AudioBufferList * bufList = (AudioBufferList*) malloc(streamSizesDataSize); // using malloc() because the object-size is variable
if (bufList)
{
int ret;
err = AudioObjectGetPropertyData(devID, &sizesAddress, 0, NULL, &streamSizesDataSize, bufList);
if (err == noErr)
{
AudioHardwareIOProcStreamUsage * streamUsage = (AudioHardwareIOProcStreamUsage *) malloc(streamUsageDataSize); // using malloc() because the object-size is variable
if (streamUsage)
{
streamUsage->mIOProc = ioProc;
err = AudioObjectGetPropertyData(devID, &usageAddress, 0, NULL, &streamUsageDataSize, streamUsage);
if (err == noErr)
{
if (bufList->mNumberBuffers == streamUsage->mNumberStreams)
{
Int32 numChannelsLeft = numValidChannels;
if (rightJustify)
{
// We only want streams corresponding to the last (N) channels to be enabled
for (Int32 i=streamUsage->mNumberStreams-1; i>=0; i--)
{
streamUsage->mStreamIsOn[i] = (numChannelsLeft > 0);
numChannelsLeft -= bufList->mBuffers[i].mNumberChannels;
}
}
else
{
// We only want streams corresponding to the first (N) channels to be enabled
for (Uint32 i=0; i<streamUsage->mNumberStreams; i++)
{
streamUsage->mStreamIsOn[i] = (numChannelsLeft > 0);
numChannelsLeft -= bufList->mBuffers[i].mNumberChannels;
}
}
// Now set the stream-usage per our update, above
err = AudioObjectSetPropertyData(devID, &usageAddress, 0, NULL, streamUsageDataSize, streamUsage);
if (err != noErr)
{
printf("SetProcStreamUsage(%u,%i,%i): AudioObjectSetPropertyData(kAudioDevicePropertyIOProcStreamUsage) failed!\n", (unsigned int) devID, scope, rightJustify);
ret = -1; // ("AudioObjectSetPropertyData(kAudioDevicePropertyIOProcStreamUsage) failed");
}
}
else
{
printf("SetProcStreamUsage(%u,%i,%i): #Buffers (%u) doesn't match #Streams (%u)!\n", (unsigned int) devID, scope, rightJustify, bufList->mNumberBuffers, streamUsage->mNumberStreams);
ret = -1;
}
}
else
{
printf("SetProcStreamUsage(%u,%i,%i): AudioObjectSetPropertyData(kAudioDevicePropertyIOProcStreamUsage) failed!\n", (unsigned int) devID, scope, rightJustify);
ret = -1; // ("AudioObjectGetPropertyData(kAudioDevicePropertyIOProcStreamUsage) failed");
}
free(streamUsage);
}
else ret = -1; // out of memory?
}
else
{
printf("SetProcStreamUsage(%u,%i,%i): AudioObjectGetPropertyData(kAudioDevicePropertyStreamConfiguration) failed!\n", (unsigned int) devID, scope, rightJustify);
ret = -1; // ("AudioObjectGetPropertyData(kAudioDevicePropertyStreamConfiguration) failed");
}
free(bufList);
return ret;
}
else return -1; // out of memory?
}

How to record and play back audio in real time on OS X

I'm trying to record sound from the microphone and play it back in real time on OS X. Eventually it will be streamed over the network, but for now I'm just trying to achieve local recording/playback.
I'm able to record sound and write to a file, which I could do with both AVCaptureSession and AVAudioRecorder. However, I'm not sure how to play back the audio as I record it. Using AVCaptureAudioDataOutput works:
self.captureSession = [[AVCaptureSession alloc] init];
AVCaptureDevice *audioCaptureDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
NSError *error = nil;
AVCaptureDeviceInput *audioInput = [AVCaptureDeviceInput deviceInputWithDevice:audioCaptureDevice error:&error];
AVCaptureAudioDataOutput *audioDataOutput = [[AVCaptureAudioDataOutput alloc] init];
self.serialQueue = dispatch_queue_create("audioQueue", NULL);
[audioDataOutput setSampleBufferDelegate:self queue:self.serialQueue];
if (audioInput && [self.captureSession canAddInput:audioInput] && [self.captureSession canAddOutput:audioDataOutput]) {
[self.captureSession addInput:audioInput];
[self.captureSession addOutput:audioDataOutput];
[self.captureSession startRunning];
// Stop after arbitrary time
double delayInSeconds = 4.0;
dispatch_time_t popTime = dispatch_time(DISPATCH_TIME_NOW, (int64_t)(delayInSeconds * NSEC_PER_SEC));
dispatch_after(popTime, dispatch_get_main_queue(), ^(void){
[self.captureSession stopRunning];
});
} else {
NSLog(#"Couldn't add them; error = %#",error);
}
...but I'm not sure how to implement the callback:
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
?
}
I've tried getting the data out of the sampleBuffer and playing it using AVAudioPlayer by copying the code from this SO answer, but that code crashes on the appendBytes:length: method.
AudioBufferList audioBufferList;
NSMutableData *data= [NSMutableData data];
CMBlockBufferRef blockBuffer;
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, NULL, &audioBufferList, sizeof(audioBufferList), NULL, NULL, 0, &blockBuffer);
for( int y=0; y< audioBufferList.mNumberBuffers; y++ ){
AudioBuffer audioBuffer = audioBufferList.mBuffers[y];
Float32 *frame = (Float32*)audioBuffer.mData;
NSLog(#"Length = %i",audioBuffer.mDataByteSize);
[data appendBytes:frame length:audioBuffer.mDataByteSize]; // Crashes here
}
CFRelease(blockBuffer);
NSError *playerError;
AVAudioPlayer *player = [[AVAudioPlayer alloc] initWithData:data error:&playerError];
if(player && !playerError) {
NSLog(#"Player was valid");
[player play];
} else {
NSLog(#"Error = %#",playerError);
}
Edit The CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer method returns an OSStatus code of -12737, which according to the documentation is kCMSampleBufferError_ArrayTooSmall
Edit2: Based on this mailing list response, I passed a size_t out parameter as the second parameter to ...GetAudioBufferList.... This returned 40. Right now I'm just passing in 40 as a hard-coded value, which seems to work (the OSStatus return value is 0, atleast).
Now the player initWithData:error: method gives the error:
Error Domain=NSOSStatusErrorDomain Code=1954115647 "The operation couldn’t be completed. (OSStatus error 1954115647.)" which I'm looking into.
I've done iOS programming for a long time, but I haven't used AVFoundation, CoreAudio, etc until now. It looks like there are a dozen ways to accomplish the same thing, depending on how low or high level you want to be, so any high level overviews or framework recommendations are appreciated.
Appendix
Recording to a file
Recording to a file using AVCaptureSession:
- (void)applicationDidFinishLaunching:(NSNotification *)aNotification
{
[[NSNotificationCenter defaultCenter] addObserver:self selector:#selector(captureSessionStartedNotification:) name:AVCaptureSessionDidStartRunningNotification object:nil];
self.captureSession = [[AVCaptureSession alloc] init];
AVCaptureDevice *audioCaptureDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
NSError *error = nil;
AVCaptureDeviceInput *audioInput = [AVCaptureDeviceInput deviceInputWithDevice:audioCaptureDevice error:&error];
AVCaptureAudioFileOutput *audioOutput = [[AVCaptureAudioFileOutput alloc] init];
if (audioInput && [self.captureSession canAddInput:audioInput] && [self.captureSession canAddOutput:audioOutput]) {
NSLog(#"Can add the inputs and outputs");
[self.captureSession addInput:audioInput];
[self.captureSession addOutput:audioOutput];
[self.captureSession startRunning];
double delayInSeconds = 5.0;
dispatch_time_t popTime = dispatch_time(DISPATCH_TIME_NOW, (int64_t)(delayInSeconds * NSEC_PER_SEC));
dispatch_after(popTime, dispatch_get_main_queue(), ^(void){
[self.captureSession stopRunning];
});
}
else {
NSLog(#"Error was = %#",error);
}
}
- (void)captureSessionStartedNotification:(NSNotification *)notification
{
AVCaptureSession *session = notification.object;
id audioOutput = session.outputs[0];
NSLog(#"Capture session started; notification = %#",notification);
NSLog(#"Notification audio output = %#",audioOutput);
[audioOutput startRecordingToOutputFileURL:[[self class] outputURL] outputFileType:AVFileTypeAppleM4A recordingDelegate:self];
}
+ (NSURL *)outputURL
{
NSArray *searchPaths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *documentPath = [searchPaths objectAtIndex:0];
NSString *filePath = [documentPath stringByAppendingPathComponent:#"z1.alac"];
return [NSURL fileURLWithPath:filePath];
}
Recording to a file using AVAudioRecorder:
NSDictionary *recordSettings = [NSDictionary
dictionaryWithObjectsAndKeys:
[NSNumber numberWithInt:AVAudioQualityMin],
AVEncoderAudioQualityKey,
[NSNumber numberWithInt:16],
AVEncoderBitRateKey,
[NSNumber numberWithInt: 2],
AVNumberOfChannelsKey,
[NSNumber numberWithFloat:44100.0],
AVSampleRateKey,
#(kAudioFormatAppleLossless),
AVFormatIDKey,
nil];
NSError *recorderError;
self.recorder = [[AVAudioRecorder alloc] initWithURL:[[self class] outputURL] settings:recordSettings error:&recorderError];
self.recorder.delegate = self;
if (self.recorder && !recorderError) {
NSLog(#"Success!");
[self.recorder recordForDuration:10];
} else {
NSLog(#"Failure, recorder = %#",self.recorder);
NSLog(#"Error = %#",recorderError);
}
Ok, I ended up working at a lower level than AVFoundation -- not sure if that was necessary. I read up to Chapter 5 of Learning Core Audio and went with an implementation using Audio Queues. This code is translated from being used for recording to a file/playing back a file, so there are surely some unnecessary bits I've accidentally left in. Additionally, I'm not actually re-enqueuing buffers onto the Output Queue (I should be), but just as a proof of concept this works. The only file is listed here, and is also on Github.
//
// main.m
// Recorder
//
// Created by Maximilian Tagher on 8/7/13.
// Copyright (c) 2013 Tagher. All rights reserved.
//
#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>
#define kNumberRecordBuffers 3
//#define kNumberPlaybackBuffers 3
#define kPlaybackFileLocation CFSTR("/Users/Max/Music/iTunes/iTunes Media/Music/Taylor Swift/Red/02 Red.m4a")
#pragma mark - User Data Struct
// listing 4.3
struct MyRecorder;
typedef struct MyPlayer {
AudioQueueRef playerQueue;
SInt64 packetPosition;
UInt32 numPacketsToRead;
AudioStreamPacketDescription *packetDescs;
Boolean isDone;
struct MyRecorder *recorder;
} MyPlayer;
typedef struct MyRecorder {
AudioQueueRef recordQueue;
SInt64 recordPacket;
Boolean running;
MyPlayer *player;
} MyRecorder;
#pragma mark - Utility functions
// Listing 4.2
static void CheckError(OSStatus error, const char *operation) {
if (error == noErr) return;
char errorString[20];
// See if it appears to be a 4-char-code
*(UInt32 *)(errorString + 1) = CFSwapInt32HostToBig(error);
if (isprint(errorString[1]) && isprint(errorString[2])
&& isprint(errorString[3]) && isprint(errorString[4])) {
errorString[0] = errorString[5] = '\'';
errorString[6] = '\0';
} else {
// No, format it as an integer
NSLog(#"Was integer");
sprintf(errorString, "%d",(int)error);
}
fprintf(stderr, "Error: %s (%s)\n",operation,errorString);
exit(1);
}
OSStatus MyGetDefaultInputDeviceSampleRate(Float64 *outSampleRate)
{
OSStatus error;
AudioDeviceID deviceID = 0;
AudioObjectPropertyAddress propertyAddress;
UInt32 propertySize;
propertyAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice;
propertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
propertyAddress.mElement = 0;
propertySize = sizeof(AudioDeviceID);
error = AudioHardwareServiceGetPropertyData(kAudioObjectSystemObject,
&propertyAddress, 0, NULL,
&propertySize,
&deviceID);
if (error) return error;
propertyAddress.mSelector = kAudioDevicePropertyNominalSampleRate;
propertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
propertyAddress.mElement = 0;
propertySize = sizeof(Float64);
error = AudioHardwareServiceGetPropertyData(deviceID,
&propertyAddress, 0, NULL,
&propertySize,
outSampleRate);
return error;
}
// Recorder
static void MyCopyEncoderCookieToFile(AudioQueueRef queue, AudioFileID theFile)
{
OSStatus error;
UInt32 propertySize;
error = AudioQueueGetPropertySize(queue, kAudioConverterCompressionMagicCookie, &propertySize);
if (error == noErr && propertySize > 0) {
Byte *magicCookie = (Byte *)malloc(propertySize);
CheckError(AudioQueueGetProperty(queue, kAudioQueueProperty_MagicCookie, magicCookie, &propertySize), "Couldn't get audio queue's magic cookie");
CheckError(AudioFileSetProperty(theFile, kAudioFilePropertyMagicCookieData, propertySize, magicCookie), "Couldn't set audio file's magic cookie");
free(magicCookie);
}
}
// Player
static void MyCopyEncoderCookieToQueue(AudioFileID theFile, AudioQueueRef queue)
{
UInt32 propertySize;
// Just check for presence of cookie
OSStatus result = AudioFileGetProperty(theFile, kAudioFilePropertyMagicCookieData, &propertySize, NULL);
if (result == noErr && propertySize != 0) {
Byte *magicCookie = (UInt8*)malloc(sizeof(UInt8) * propertySize);
CheckError(AudioFileGetProperty(theFile, kAudioFilePropertyMagicCookieData, &propertySize, magicCookie), "Get cookie from file failed");
CheckError(AudioQueueSetProperty(queue, kAudioQueueProperty_MagicCookie, magicCookie, propertySize), "Set cookie on file failed");
free(magicCookie);
}
}
static int MyComputeRecordBufferSize(const AudioStreamBasicDescription *format, AudioQueueRef queue, float seconds)
{
int packets, frames, bytes;
frames = (int)ceil(seconds * format->mSampleRate);
if (format->mBytesPerFrame > 0) { // Not variable
bytes = frames * format->mBytesPerFrame;
} else { // variable bytes per frame
UInt32 maxPacketSize;
if (format->mBytesPerPacket > 0) {
// Constant packet size
maxPacketSize = format->mBytesPerPacket;
} else {
// Get the largest single packet size possible
UInt32 propertySize = sizeof(maxPacketSize);
CheckError(AudioQueueGetProperty(queue, kAudioConverterPropertyMaximumOutputPacketSize, &maxPacketSize, &propertySize), "Couldn't get queue's maximum output packet size");
}
if (format->mFramesPerPacket > 0) {
packets = frames / format->mFramesPerPacket;
} else {
// Worst case scenario: 1 frame in a packet
packets = frames;
}
// Sanity check
if (packets == 0) {
packets = 1;
}
bytes = packets * maxPacketSize;
}
return bytes;
}
void CalculateBytesForPlaythrough(AudioQueueRef queue,
AudioStreamBasicDescription inDesc,
Float64 inSeconds,
UInt32 *outBufferSize,
UInt32 *outNumPackets)
{
UInt32 maxPacketSize;
UInt32 propSize = sizeof(maxPacketSize);
CheckError(AudioQueueGetProperty(queue,
kAudioQueueProperty_MaximumOutputPacketSize,
&maxPacketSize, &propSize), "Couldn't get file's max packet size");
static const int maxBufferSize = 0x10000;
static const int minBufferSize = 0x4000;
if (inDesc.mFramesPerPacket) {
Float64 numPacketsForTime = inDesc.mSampleRate / inDesc.mFramesPerPacket * inSeconds;
*outBufferSize = numPacketsForTime * maxPacketSize;
} else {
*outBufferSize = maxBufferSize > maxPacketSize ? maxBufferSize : maxPacketSize;
}
if (*outBufferSize > maxBufferSize &&
*outBufferSize > maxPacketSize) {
*outBufferSize = maxBufferSize;
} else {
if (*outBufferSize < minBufferSize) {
*outBufferSize = minBufferSize;
}
}
*outNumPackets = *outBufferSize / maxPacketSize;
}
#pragma mark - Record callback function
static void MyAQInputCallback(void *inUserData,
AudioQueueRef inQueue,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp *inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription *inPacketDesc)
{
// NSLog(#"Input callback");
// NSLog(#"Input thread = %#",[NSThread currentThread]);
MyRecorder *recorder = (MyRecorder *)inUserData;
MyPlayer *player = recorder->player;
if (inNumPackets > 0) {
// Enqueue on the output Queue!
AudioQueueBufferRef outputBuffer;
CheckError(AudioQueueAllocateBuffer(player->playerQueue, inBuffer->mAudioDataBytesCapacity, &outputBuffer), "Input callback failed to allocate new output buffer");
memcpy(outputBuffer->mAudioData, inBuffer->mAudioData, inBuffer->mAudioDataByteSize);
outputBuffer->mAudioDataByteSize = inBuffer->mAudioDataByteSize;
// [NSData dataWithBytes:inBuffer->mAudioData length:inBuffer->mAudioDataByteSize];
// Assuming LPCM so no packet descriptions
CheckError(AudioQueueEnqueueBuffer(player->playerQueue, outputBuffer, 0, NULL), "Enqueing the buffer in input callback failed");
recorder->recordPacket += inNumPackets;
}
if (recorder->running) {
CheckError(AudioQueueEnqueueBuffer(inQueue, inBuffer, 0, NULL), "AudioQueueEnqueueBuffer failed");
}
}
static void MyAQOutputCallback(void *inUserData, AudioQueueRef inAQ, AudioQueueBufferRef inCompleteAQBuffer)
{
// NSLog(#"Output thread = %#",[NSThread currentThread]);
// NSLog(#"Output callback");
MyPlayer *aqp = (MyPlayer *)inUserData;
MyRecorder *recorder = aqp->recorder;
if (aqp->isDone) return;
}
int main(int argc, const char * argv[])
{
#autoreleasepool {
MyRecorder recorder = {0};
MyPlayer player = {0};
recorder.player = &player;
player.recorder = &recorder;
AudioStreamBasicDescription recordFormat;
memset(&recordFormat, 0, sizeof(recordFormat));
recordFormat.mFormatID = kAudioFormatLinearPCM;
recordFormat.mChannelsPerFrame = 2; //stereo
// Begin my changes to make LPCM work
recordFormat.mBitsPerChannel = 16;
// Haven't checked if each of these flags is necessary, this is just what Chapter 2 used for LPCM.
recordFormat.mFormatFlags = kAudioFormatFlagIsBigEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
// end my changes
MyGetDefaultInputDeviceSampleRate(&recordFormat.mSampleRate);
UInt32 propSize = sizeof(recordFormat);
CheckError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
0,
NULL,
&propSize,
&recordFormat), "AudioFormatGetProperty failed");
AudioQueueRef queue = {0};
CheckError(AudioQueueNewInput(&recordFormat, MyAQInputCallback, &recorder, NULL, NULL, 0, &queue), "AudioQueueNewInput failed");
recorder.recordQueue = queue;
// Fills in ABSD a little more
UInt32 size = sizeof(recordFormat);
CheckError(AudioQueueGetProperty(queue,
kAudioConverterCurrentOutputStreamDescription,
&recordFormat,
&size), "Couldn't get queue's format");
// MyCopyEncoderCookieToFile(queue, recorder.recordFile);
int bufferByteSize = MyComputeRecordBufferSize(&recordFormat,queue,0.5);
NSLog(#"%d",__LINE__);
// Create and Enqueue buffers
int bufferIndex;
for (bufferIndex = 0;
bufferIndex < kNumberRecordBuffers;
++bufferIndex) {
AudioQueueBufferRef buffer;
CheckError(AudioQueueAllocateBuffer(queue,
bufferByteSize,
&buffer), "AudioQueueBufferRef failed");
CheckError(AudioQueueEnqueueBuffer(queue, buffer, 0, NULL), "AudioQueueEnqueueBuffer failed");
}
// PLAYBACK SETUP
AudioQueueRef playbackQueue;
CheckError(AudioQueueNewOutput(&recordFormat,
MyAQOutputCallback,
&player, NULL, NULL, 0,
&playbackQueue), "AudioOutputNewQueue failed");
player.playerQueue = playbackQueue;
UInt32 playBufferByteSize;
CalculateBytesForPlaythrough(queue, recordFormat, 0.1, &playBufferByteSize, &player.numPacketsToRead);
bool isFormatVBR = (recordFormat.mBytesPerPacket == 0
|| recordFormat.mFramesPerPacket == 0);
if (isFormatVBR) {
NSLog(#"Not supporting VBR");
player.packetDescs = (AudioStreamPacketDescription*) malloc(sizeof(AudioStreamPacketDescription) * player.numPacketsToRead);
} else {
player.packetDescs = NULL;
}
// END PLAYBACK
recorder.running = TRUE;
player.isDone = false;
CheckError(AudioQueueStart(playbackQueue, NULL), "AudioQueueStart failed");
CheckError(AudioQueueStart(queue, NULL), "AudioQueueStart failed");
CFRunLoopRunInMode(kCFRunLoopDefaultMode, 10, TRUE);
printf("Playing through, press <return> to stop:\n");
getchar();
printf("* done *\n");
recorder.running = FALSE;
player.isDone = true;
CheckError(AudioQueueStop(playbackQueue, false), "Failed to stop playback queue");
CheckError(AudioQueueStop(queue, TRUE), "AudioQueueStop failed");
AudioQueueDispose(playbackQueue, FALSE);
AudioQueueDispose(queue, TRUE);
}
return 0;
}

Remote IO Play with constant noise

guys! I have a trouble with using remote IO to playback a stream audio.I verified the PCM frame data before I put it in,it's correct.So I'm confused.Could you help me? Thanks a lot!
Below is my codes.
-
(void)initializeAudioPlay
{
OSStatus status;
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &audioPlayUnit);
[self checkStatus:status];
// Enable IO for playback
UInt32 flag = 1;
//kAUVoiceIOProperty_VoiceProcessingEnableAGC
status = AudioUnitSetProperty(audioPlayUnit, kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input, kOutputBus, &flag, sizeof(flag));
[self checkStatus:status];
// Describe format
AudioStreamBasicDescription audioFormat;
memset(&audioFormat, 0, sizeof(audioFormat));
audioFormat.mSampleRate = 8000;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagsCanonical;//kAudioFormatFlagIsNonInterleaved | kAudioFormatFlagIsSignedInteger;
/*kAudioFormatFlagsCanonical
| (kAudioUnitSampleFractionBits << kLinearPCMFormatFlagsSampleFractionShift)*/
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerFrame = (audioFormat.mBitsPerChannel/8) * audioFormat.mChannelsPerFrame;
audioFormat.mBytesPerPacket = audioFormat.mBytesPerFrame;
// Apply format
status = AudioUnitSetProperty(audioPlayUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
[self checkStatus:status];
float value = (float)10 / 255.0;
AudioUnitSetParameter(audioPlayUnit, kAudioUnitParameterUnit_LinearGain, kAudioUnitScope_Input, 0, value, 0);
AudioChannelLayout new_layout;
new_layout.mChannelLayoutTag = kAudioChannelLayoutTag_Mono;
AudioUnitSetProperty( audioPlayUnit,
kAudioUnitProperty_AudioChannelLayout,
kAudioUnitScope_Global,
0, &new_layout, sizeof(new_layout) );
UInt32 bypassEffect = kAudioUnitProperty_RenderQuality;
status = AudioUnitSetProperty(audioPlayUnit,
kAudioUnitProperty_RenderQuality,
kAudioUnitScope_Global,
0,
&bypassEffect,
sizeof(bypassEffect));
[self checkStatus:status];
// Set output callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = playCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty(audioPlayUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
kOutputBus,
&callbackStruct,
sizeof(callbackStruct));
[self checkStatus:status];
flag = 0;
// Initialize
status = AudioUnitInitialize(audioPlayUnit);
[self checkStatus:status];
DGLog(#"audio play unit initialize = %d", status);
circularBuf = [[CircularBuf alloc] initWithBufLen:kBufferLength];
/*
AudioSessionInitialize(NULL, NULL, NULL, NULL);
Float64 rate =32000.0;
AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareSampleRate, sizeof(rate), &rate);
Float32 volume=20.0;
UInt32 size = sizeof(Float32);
AudioSessionSetProperty(
kAudioSessionProperty_PreferredHardwareIOBufferDuration,
&size, &volume);
//float aBufferLength = 0.185759637188209;
//AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(aBufferLength), &aBufferLength);
AudioSessionSetActive(YES);
*/
AudioSessionInitialize(NULL, NULL, NULL, nil);
AudioSessionSetActive(true);
UInt32 sessionCategory = kAudioSessionCategory_MediaPlayback ;
/* for Iphone we need to do this to route the audio to speaker */
status= AudioSessionSetProperty (
kAudioSessionProperty_AudioCategory,
sizeof (sessionCategory),
&sessionCategory
);
//NSLog(#"Error: %d", status);
//
// UInt32 audioRouteOverride = kAudioSessionOverrideAudioRoute_Speaker;
// status = AudioSessionSetProperty (
// kAudioSessionProperty_OverrideAudioRoute,
// sizeof (audioRouteOverride),
// &audioRouteOverride);
UInt32 audioMixed = 1;
status = AudioSessionSetProperty (
kAudioSessionProperty_OverrideCategoryMixWithOthers,
sizeof (audioMixed),
&audioMixed);
}
- (void)processAudio:(AudioBuffer *)buffer
{
short pcmTemp[160];
unsigned char * amrBuffer=NULL;
AudioUnitSampleType sample;
int i = 0;
int j = 0;
if ([circularBuf isReadTwoRegion]) {
amrBuffer = [circularBuf ReadData];
} else {
amrBuffer = [circularBuf ReadData];
i = [circularBuf ReadPos];
}
j = i + circularBuf.Length;
if (j - i >= 320) {
memcpy((void*)pcmTemp, (void*)amrBuffer, 320);
for(i=0; i<160; i++)
{
sample = 3.162277*pcmTemp[i];//10db
if(sample > 32767)sample = 32767;
else if(sample < -32768)sample = -32768;
buffData[i] = sample;
}
memcpy(buffer->mData, buffData, buffer->mDataByteSize);
[circularBuf AdvanceReadPos:320];
}
else
{
memset(buffer->mData, 0, buffer->mDataByteSize);
}
}
/**
This callback is called when the audioUnit needs new data to play through the
speakers. If you don't have any, just don't write anything in the buffers
*/
static OSStatus playCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
// Notes: ioData contains buffers (may be more than one!)
// Fill them up as much as you can. Remember to set the size value in each buffer to match how
// much data is in the buffer.
AudioPlay *audioPlay = (AudioPlay *)inRefCon;
for ( int i=0; i < ioData->mNumberBuffers; i++ ) {
memset(ioData->mBuffers[i].mData, 0, ioData->mBuffers[i].mDataByteSize);
}
ioData->mBuffers[0].mNumberChannels = 1;
[audioPlay processAudio:&ioData->mBuffers[0]];
return noErr;
}

iOS RemoteIO - AudioUnitAddRenderNotify Callback

I'm trying to do a recording from RemoteIO using AudioUnitAddRenderNotify like this.
Basically, I'm not able to get the samples from bus1, which is my input bus. The recordingCallback does not go past this :
if (*ioActionFlags & kAudioUnitRenderAction_PostRender || inBusNumber != 1) {
return noErr;
}
But I was told that the recordingCallback should be called for each bus every round. ie. called with inBusNumber ==0, then inBusNumber ==1, which are the output (remoteIO out) and input (recording bus) respectively.
What can I do to get recordingCallback to be called on my input bus so that I can record?
Thanks.
Pier.
Here's the callback.
static OSStatus recordingCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
NSLog(#"Entered recording callback");
// Only do pre render on bus 1
if (*ioActionFlags & kAudioUnitRenderAction_PostRender || inBusNumber != 1) {
return noErr;
}
RIO *rio = (RIO*)inRefCon;
AudioUnit rioUnit = rio->theAudioUnit;
//ExtAudioFileRef eaf = rio->outEAF;
AudioBufferList abl = rio->audioBufferList;
SInt32 samples[NUMBER_OF_SAMPLES]; // A large enough size to not have to worry about buffer overrun
abl.mNumberBuffers = 1;
abl.mBuffers[0].mData = &samples;
abl.mBuffers[0].mNumberChannels = 1;
abl.mBuffers[0].mDataByteSize = inNumberFrames * sizeof(SInt16);
OSStatus result;
result = AudioUnitRender(rioUnit,
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
&abl);
if (noErr != result) { NSLog(#"Obtain recorded samples error! Error : %ld", result); }
NSLog(#"Bus %ld", inBusNumber);
// React to a recording flag, if recording, save the abl into own buffer, else ignore
if (rio->recording)
{
TPCircularBufferProduceBytes(&rio->buffer, abl.mBuffers[0].mData, inNumberFrames * sizeof(SInt16));
//rio->timeIncurred += (('p'float)inNumberFrames) / 44100.0;
//NSLog(#"Self-calculated time incurred: %f", rio->timeIncurred);
}
return noErr;
}
Here's the code which calls the callback.
- (void)setupAudioUnitRemoteIO {
UInt32 framesPerSlice = 0;
UInt32 framesPerSlicePropertySize = sizeof (framesPerSlice);
UInt32 sampleRatePropertySize = sizeof (_graphSampleRate);
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &_remoteIOUnit);
if (noErr != status) { NSLog(#"Get audio units error"); return; }
// Enable IO for recording
UInt32 flag = 1;
status = AudioUnitSetProperty(_remoteIOUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&flag,
sizeof(flag));
if (noErr != status) { NSLog(#"Enable IO for recording error"); return; }
// Enable IO for playback
status = AudioUnitSetProperty(_remoteIOUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
if (noErr != status) { NSLog(#"Enable IO for playback error"); return; }
// Obtain the value of the maximum-frames-per-slice from the I/O unit.
status = AudioUnitGetProperty (
_remoteIOUnit,
kAudioUnitProperty_MaximumFramesPerSlice,
kAudioUnitScope_Global,
0,
&framesPerSlice,
&framesPerSlicePropertySize
);
// Describe format
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;
// Apply format
status = AudioUnitSetProperty(_remoteIOUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&audioFormat,
sizeof(audioFormat));
if (noErr != status) { NSLog(#"Apply format to input bus error"); return; }
status = AudioUnitSetProperty(_remoteIOUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
if (noErr != status) { NSLog(#"Apply format to output bus error"); return; }
rio.theAudioUnit = _remoteIOUnit; // Need this, as used in callbacks to refer to remoteIO
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = &rio;
status = AudioUnitAddRenderNotify(_remoteIOUnit, callbackStruct.inputProc, callbackStruct.inputProcRefCon);
NSAssert (status == noErr, #"Problem adding recordingCallback to RemoteIO. Error code: %d '%.4s'", (int) status, (const char *)&status);
I managed to resolve this by not using AudioUnitAddRenderNotify, and by using the following code.
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = &rio;
status = AudioUnitSetProperty(_remoteIOUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
kInputBus,
&callbackStruct,
sizeof(callbackStruct));
if (noErr != status) { NSLog(#"Set input callback error"); return; }
on the input bus instead

how to get Audio Device UID to pass into NSSound's setPlaybackDeviceIdentifier:

How can i get audio device UID (USB speaker) to pass into NSSound's setPlaybackDeviceIdentifier: method
Thanks
To avoid the deprecated AudioHardwareGetProperty and AudioDeviceGetProperty calls replace them with something like this:
AudioObjectPropertyAddress propertyAddress;
AudioObjectID *deviceIDs;
UInt32 propertySize;
NSInteger numDevices;
propertyAddress.mSelector = kAudioHardwarePropertyDevices;
propertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
propertyAddress.mElement = kAudioObjectPropertyElementMaster;
if (AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &propertySize) == noErr) {
numDevices = propertySize / sizeof(AudioDeviceID);
deviceIDs = (AudioDeviceID *)calloc(numDevices, sizeof(AudioDeviceID));
if (AudioObjectGetPropertyData(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &propertySize, deviceIDs) == noErr) {
AudioObjectPropertyAddress deviceAddress;
char deviceName[64];
char manufacturerName[64];
for (NSInteger idx=0; idx<numDevices; idx++) {
propertySize = sizeof(deviceName);
deviceAddress.mSelector = kAudioDevicePropertyDeviceName;
deviceAddress.mScope = kAudioObjectPropertyScopeGlobal;
deviceAddress.mElement = kAudioObjectPropertyElementMaster;
if (AudioObjectGetPropertyData(deviceIDs[idx], &deviceAddress, 0, NULL, &propertySize, deviceName) == noErr) {
propertySize = sizeof(manufacturerName);
deviceAddress.mSelector = kAudioDevicePropertyDeviceManufacturer;
deviceAddress.mScope = kAudioObjectPropertyScopeGlobal;
deviceAddress.mElement = kAudioObjectPropertyElementMaster;
if (AudioObjectGetPropertyData(deviceIDs[idx], &deviceAddress, 0, NULL, &propertySize, manufacturerName) == noErr) {
CFStringRef uidString;
propertySize = sizeof(uidString);
deviceAddress.mSelector = kAudioDevicePropertyDeviceUID;
deviceAddress.mScope = kAudioObjectPropertyScopeGlobal;
deviceAddress.mElement = kAudioObjectPropertyElementMaster;
if (AudioObjectGetPropertyData(deviceIDs[idx], &deviceAddress, 0, NULL, &propertySize, &uidString) == noErr) {
NSLog(#"device %s by %s id %#", deviceName, manufacturerName, uidString);
CFRelease(uidString);
}
}
}
}
}
free(deviceIDs);
}
ok i got it myself...
the theCFString will contain the device UID
UInt32 theSize;
char theString[kMaxStringSize];
UInt32 theNumberDevices;
AudioDeviceID *theDeviceList = NULL;
UInt32 theDeviceIndex;
CFStringRef theCFString = NULL;
OSStatus theStatus = noErr;
// this is our driver
const char *nameString = "Burr-Brown Japan PCM2702";
const char *manufacturerString = "Burr-Brown Japan";
// device list size
theSize = 0;
theStatus = AudioHardwareGetPropertyInfo(kAudioHardwarePropertyDevices, &theSize, NULL);
theNumberDevices = theSize / sizeof(AudioDeviceID);
// allocate the device list
theDeviceList = (AudioDeviceID*)malloc(theNumberDevices * sizeof(AudioDeviceID));
// get the device list
theSize = theNumberDevices * sizeof(AudioDeviceID);
theStatus = AudioHardwareGetProperty(kAudioHardwarePropertyDevices, &theSize, theDeviceList);
// iterate through the device list, find our device and return the UID
for(theDeviceIndex = 0; theDeviceIndex < theNumberDevices; ++theDeviceIndex)
{
// get name
theSize = kMaxStringSize;
theStatus = AudioDeviceGetProperty(theDeviceList[theDeviceIndex],
0, 0, kAudioDevicePropertyDeviceName, &theSize, theString);
NSLog(#"%s",theString);
// is it me?
if (strncmp(theString, nameString, strlen(nameString)) == 0) {
// get manufacturer
theSize = kMaxStringSize;
theStatus = AudioDeviceGetProperty(theDeviceList[theDeviceIndex], 0, 0,
kAudioDevicePropertyDeviceManufacturer, &theSize, theString);
NSLog(#"%s",theString);
// is it really me?
if (strncmp(theString, manufacturerString, strlen(manufacturerString)) == 0) {
// get device UID
theSize = sizeof(CFStringRef);
theStatus = AudioDeviceGetProperty(theDeviceList[theDeviceIndex],
0, 0, kAudioDevicePropertyDeviceUID, &theSize, &theCFString);
NSLog(#"%s",theCFString);
break;
}
}
}
AudioHardwareGetProperty is deprecated in snow leopard.

Resources