I have an audio analysis app using Audio Units that works perfectly when the app is run in isolation. However, if there are other audio apps running in the background AudioUnitRender returns a -50 error.
Does anyone know a way to resolve this, so that AudioUnitRender works even when other audio apps are running?
Thanks in advance.
Audio session initiation
AVAudioSession *session = [AVAudioSession sharedInstance];
[session setPreferredHardwareSampleRate:sampleRate error:&err];
[session setCategory:AVAudioSessionCategoryRecord error:&err];
[session setActive:YES error:&err];
[session setMode:setMode:AVAudioSessionModeMeasurement error:&err];
[session setDelegate:listener];
UInt32 audioRouteOverride = kAudioSessionOverrideAudioRoute_None;
AudioSessionSetProperty (kAudioSessionProperty_OverrideAudioRoute,sizeof (audioRouteOverride),&audioRouteOverride);
I/O unit description:
OSStatus err;
AudioComponentDescription ioUnitDescription;
ioUnitDescription.componentType = kAudioUnitType_Output;
ioUnitDescription.componentSubType = kAudioUnitSubType_RemoteIO;
ioUnitDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
ioUnitDescrition.componentFlags = 0;
ioUnitDescription.componentFlagsMask = 0;
// Declare and instantiate an audio processing graph
NewAUGraph(&processingGraph);
// Add an audio unit node to the graph, then instantiate the audio unit.
/*
An AUNode is an opaque type that represents an audio unit in the context
of an audio processing graph. You receive a reference to the new audio unit
instance, in the ioUnit parameter, on output of the AUGraphNodeInfo
function call.
*/
AUNode ioNode;
AUGraphAddNode(processingGraph, &ioUnitDescription, &ioNode);
AUGraphOpen(processingGraph); // indirectly performs audio unit instantiation
// Obtain a reference to the newly-instantiated I/O unit. Each Audio Unit
// requires its own configuration.
AUGraphNodeInfo(processingGraph, ioNode, NULL, &ioUnit);
// Initialize below.
AURenderCallbackStruct callbackStruct = {0};
UInt32 enableInput;
UInt32 enableOutput;
// Enable input and disable output.
enableInput = 1; enableOutput = 0;
callbackStruct.inputProc = RenderFFTCallback;
callbackStruct.inputProcRefCon = (__bridge void*)self;
err = AudioUnitSetProperty(ioUnit, kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus, &enableInput, sizeof(enableInput));
err = AudioUnitSetProperty(ioUnit, kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus, &enableOutput, sizeof(enableOutput));
err = AudioUnitSetProperty(ioUnit, kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Input,
kOutputBus, &callbackStruct, sizeof(callbackStruct));
// Set the stream format.
size_t bytesPerSample = [self ASBDForSoundMode];
err = AudioUnitSetProperty(ioUnit, kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus, &streamFormat, sizeof(streamFormat));
err = AudioUnitSetProperty(ioUnit, kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus, &streamFormat, sizeof(streamFormat));
// Disable system buffer allocation. We'll do it ourselves.
UInt32 flag = 1;
err = AudioUnitSetProperty(ioUnit, kAudioUnitProperty_ShouldAllocateBuffer,
kAudioUnitScope_Output,
kInputBus, &flag, sizeof(flag));}
Render callback:
RIOInterface* THIS = (__bridge_transfer RIOInterface *)inRefCon;
COMPLEX_SPLIT A = THIS->A;
void *dataBuffer = THIS->dataBuffer;
float *outputBuffer = THIS->outputBuffer;
FFTSetup fftSetup = THIS->fftSetup;
float *hammingWeights = THIS->hammingWeights;
uint32_t log2n = THIS->log2n;
uint32_t n = THIS->n;
uint32_t nOver2 = THIS->nOver2;
uint32_t stride = 1;
int bufferCapacity = THIS->bufferCapacity;
SInt16 index = THIS->index;
AudioUnit rioUnit = THIS->ioUnit;
OSStatus renderErr;
UInt32 bus1 = 1;
renderErr = AudioUnitRender(rioUnit, ioActionFlags,
inTimeStamp, bus1, inNumberFrames, THIS->bufferList);
if (renderErr < 0) {
return renderErr;
}
I discovered that this issue was occurring when another AVAudioSession was active in another app, in which case the first initiated AVAudioSession's settings took priority over mine. I was trying to set the sampling frequency to 22050, but if the other audio session had it set at 44100 then it remained at 44100.
I resolved the issue by making my code 'adaptive' to other settings e.g. in respect to the buffer size, so it would still work effectively (if not optimally) with audio settings that differed from my preference.
Related
We are trying to play an 5.1 (6 channels) AAC audio file using AUGraph. We also tried using AVAudioEngine. We have connected to 5.1 surround sound output device (Sony Speakers). The file is played as stereo file. All the channels are played in Left and Right front speakers.
we are setting the file’s AudioStreamBasicDescription (which is 6 channels) to the file unit as input and output format.
//creating file Unit.
AudioComponentDescription fileDesc;
fileDesc.componentType = kAudioUnitType_Generator;
fileDesc.componentSubType = kAudioUnitSubType_AudioFilePlayer;
fileDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
fileDesc.componentFlags = 0;
fileDesc.componentFlagsMask = 0;
status = AUGraphAddNode(_audioGraph,&fileDesc,&filePlayerNode);
status = AUGraphNodeInfo(_audioGraph,filePlayerNode,NULL,&filePlayerUnit);
//creating output Unit.
AudioComponentDescription outputDesc;
outputDesc.componentType = kAudioUnitType_Output;
outputDesc.componentSubType = kAudioUnitSubType_DefaultOutput;
outputDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
outputDesc.componentFlags = 0;
outputDesc.componentFlagsMask = 0;
status = AUGraphAddNode(_audioGraph,&outputDesc,&outputNode);
status = AUGraphNodeInfo(_audioGraph,outputNode,NULL,&outputUnit);
OSStatus error = AudioFileOpenURL(audioFileURL, kAudioFileReadPermission, 0, &_audioInputFileId);
//get file's streamFormat.
UInt32 dataSize = sizeof(AudioStreamBasicDescription);
AudioFileGetProperty(_audioInputFileId, kAudioFilePropertyDataFormat, &dataSize, &_fileStreamDescription);
UInt32 propertySize = sizeof(_fileStreamDescription);
//set input and output format to fileUnit.
OSStatus err = AudioUnitSetProperty(filePlayerUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &_fileStreamDescription, propertySize);
OSStatus err = AudioUnitSetProperty(filePlayerUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &_fileStreamDescription, propertySize);
//set input and output format to outputUnit.
OSStatus err = AudioUnitSetProperty(outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &_fileStreamDescription, propertySize);
OSStatus err = AudioUnitSetProperty(outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &_fileStreamDescription, propertySize);
Similarly for the outputUnit we set the input and output format to that of the _fileStreamDescription.
we registered for render callback for file unit. It shows number of buffers as 2 and each buffer has 1 channel, indicates stereo data.
AudioUnitAddRenderNotify(filePlayerUnit, fileUnitRenderCallback, (__bridge void*)self);
static OSStatus fileUnitRenderCallback(void * inRefCon,AudioUnitRenderActionFlags * ioActionFlags, const AudioTimeStamp *inTimeStamp,UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData){}
we registered for render callback for output unit. It shows number of buffers as 1 and each buffer has 2 channel. Indicates stereo data.
AudioUnitAddRenderNotify(outputUnit, outputUnitRenderCallback, (__bridge void*)self);
static OSStatus outputUnitRenderCallback(void * inRefCon,AudioUnitRenderActionFlags * ioActionFlags, const AudioTimeStamp *inTimeStamp,UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData){}
We will not be able to use Apple’s built-in players like AVPlayer. Because, we need to connect some of the audio units to apply Audio-Effects.
Could you please suggest on how we can achieve 5.1 playback support.
I am hearing a very loud and harsh distortion sound when I run this simple application. I am simply instantiating a default output unit and assign a render callback. And letting the program run in the runloop. I have detected no errors from Core Audio and everything works as usual except for this distortion.
#import <AudioToolbox/AudioToolbox.h>
OSStatus render1(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData)
{
return noErr;
}
int main(int argc, const char * argv[]) {
AudioUnit timerAU;
UInt32 propsize = 0;
AudioComponentDescription outputUnitDesc;
outputUnitDesc.componentType = kAudioUnitType_Output;
outputUnitDesc.componentSubType = kAudioUnitSubType_DefaultOutput;
outputUnitDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
outputUnitDesc.componentFlags = 0;
outputUnitDesc.componentFlagsMask = 0;
//Get RemoteIO AU from Audio Unit Component Manager
AudioComponent outputComp = AudioComponentFindNext(NULL, &outputUnitDesc);
if (outputComp == NULL) exit (-1);
CheckError(AudioComponentInstanceNew(outputComp, &timerAU), "comp");
//Set up render callback function for the RemoteIO AU.
AURenderCallbackStruct renderCallbackStruct;
renderCallbackStruct.inputProc = render1;
renderCallbackStruct.inputProcRefCon = nil;//(__bridge void *)(self);
propsize = sizeof(renderCallbackStruct);
CheckError(AudioUnitSetProperty(timerAU,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
0,
&renderCallbackStruct,
propsize), "set render");
CheckError(AudioUnitInitialize(timerAU), "init");
// tickMethod = completion;
CheckError(AudioOutputUnitStart(timerAU), "start");
CFRunLoopRunInMode(kCFRunLoopDefaultMode, 1000, false);
}
Your question does not seem complete. I don't know about the side effects of silencing the output noise which is probably just undefined behavior. I also don't know what your code would serve for as such. There is an unfinished render callback on the kAudioUnitSubType_DefaultOutput which does nothing (it is not generating silence!). I know for two ways of silencing it.
In the callback the ioData buffers have to be explicitly filled with zeroes, because there's no guarantee they will be initialized empty:
Float32 * lBuffer0;
Float32 * lBuffer1;
lBuffer0 = (Float32 *)ioData->mBuffers[0].mData;
lBuffer1 = (Float32 *)ioData->mBuffers[1].mData;
memset(lBuffer0, 0, inNumberFrames*sizeof(Float32));
memset(lBuffer1, 0, inNumberFrames*sizeof(Float32));
Other possibility is to leave the unfinished callback as it is, but declare the timerAU to be of outputUnitDesc.componentSubType = kAudioUnitSubType_HALOutput; instead of
outputUnitDesc.componentSubType = kAudioUnitSubType_DefaultOutput;
and explicity disable I/O before setting the render callback by means of following code:
UInt32 lEnableIO = 0;
CheckError(AudioUnitSetProperty(timerAU,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
0, //output element
&lEnableIO,
sizeof(lEnableIO)),
"couldn't disable output");
I would strongly encourage into studying thoroughly the CoreAudio API and understanding how to set up an audio unit. This is crucial in understanding the matter. I've seen in your code a comment mentioning a RemoteIO AU. There is nothing like a RemoteIO AU in OSX. In case you're attempting a port from iOS code, please try learning the differences. They are well documented.
I'm developing a music application for iOS using the AVAudioplayer, in which I want to implement an equalizer.
I searched the internet for a good solution, and ended up with and AUGraph configuration like this:
// multichannel mixer unit
AudioComponentDescription mixer_desc;
mixer_desc.componentType = kAudioUnitType_Mixer;
mixer_desc.componentSubType = kAudioUnitSubType_MultiChannelMixer;
mixer_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
mixer_desc.componentFlags = 0;
mixer_desc.componentFlagsMask = 0;
// iPodEQ unit
AudioComponentDescription eq_desc;
eq_desc.componentType = kAudioUnitType_Effect;
eq_desc.componentSubType = kAudioUnitSubType_AUiPodEQ;
eq_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
eq_desc.componentFlags = 0;
eq_desc.componentFlagsMask = 0;
// output unit
AudioComponentDescription output_desc;
output_desc.componentType = kAudioUnitType_Output;
output_desc.componentSubType = kAudioUnitSubType_GenericOutput;
output_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
output_desc.componentFlags = 0;
output_desc.componentFlagsMask = 0;
// create a new AUGraph
OSStatus result = NewAUGraph(&mGraph);
// Add Audio Nodes to graph
AUNode outputNode;
AUNode eqNode;
AUNode mixerNode;
AUGraphAddNode(mGraph, &mixer_desc, &mixerNode);
AUGraphAddNode(mGraph, &eq_desc, &eqNode);
AUGraphAddNode(mGraph, &output_desc, &outputNode);
// open the graph AudioUnits (but not initialized)
result = AUGraphOpen(mGraph);
// grab the audio unit instances from the nodes
AudioUnit mEQ;
AudioUnit mMixer;
result = AUGraphNodeInfo(mGraph, mixerNode, NULL, &mMixer);
result = AUGraphNodeInfo(mGraph, eqNode, NULL, &mEQ);
// set number of input buses for the mixer Audio Unit
UInt32 numbuses = 0;
AudioUnitSetProperty ( mMixer, kAudioUnitProperty_ElementCount,
kAudioUnitScope_Input, 0, &numbuses, sizeof(numbuses));
// get the equalizer factory presets list
CFArrayRef mEQPresetsArray;
UInt32 sizeof1 = sizeof(mEQPresetsArray);
AudioUnitGetProperty(mEQ, kAudioUnitProperty_FactoryPresets,
kAudioUnitScope_Global, 0, &mEQPresetsArray, &sizeof1);
result = AUGraphConnectNodeInput(mGraph, mixerNode, 0, eqNode, 0);
result = AUGraphConnectNodeInput(mGraph, eqNode, 0, outputNode, 0);
AudioUnitSetParameter(mMixer, kMultiChannelMixerParam_Enable, kAudioUnitScope_Input, 0, 1, 0);
AUPreset *aPreset = (AUPreset*)CFArrayGetValueAtIndex(mEQPresetsArray, 7);
AudioUnitSetProperty (mEQ, kAudioUnitProperty_PresentPreset,
kAudioUnitScope_Global, 0, aPreset, sizeof(AUPreset));
AUGraphInitialize(mGraph);
AUGraphStart(mGraph);
The AUGraph is running, but the EQ isn't applied. The argument '7' in AUPreset *aPreset = (AUPreset*)CFArrayGetValueAtIndex(mEQPresetsArray, 7); is the index of the equalizer that should be applied. (Electronic)
I got that index from logging the values of the mEQPresetsArray-Array:
for (int i = 0; i < CFArrayGetCount(mEQPresetsArray); i++) {
AUPreset *aPreset = (AUPreset*)CFArrayGetValueAtIndex(mEQPresetsArray, i);
NSLog(#"%d: %#", (int)aPreset->presetNumber, aPreset->presetName);
}
How can I solve my problem? I've already tried the NVDSP, but it didn't seem to be working as well. I didn't find any other solution on the internet.
Thanks in advance, Fabian.
If this is for iOS then you need to use kAudioUnitSubType_RemoteIO instead of kAudioUnitSubType_GenericOutput.
You cannot use AVAudioPlayer to do your EQ, you need AVPlayer.
See here for a sample project using the audio tap:
https://developer.apple.com/library/ios/samplecode/AudioTapProcessor/Introduction/Intro.html
In this sample app, I was able to load in a file of stereo data and play it using the simulator. But it doesn't work on the device. I tried using a sound editor and convert the stereo clip to mono and changing the descriptor settings and it will work mono only. I had a hard time trying to find out why, I am guessing it has to do with my descriptor configuration problems.
This sample app is at https://github.com/peter7777usa/TestAudio
The PlayBack Function
static OSStatus playbackCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
// Notes: ioData contains buffers (may be more than one!)
// Fill them up as much as you can. Remember to set the size value in each buffer to match how
// much data is in the buffer.
NSLog(#"muffers %d", ioData->mNumberBuffers);
UInt32 size = 2048;
if (iosAudio->incomingCircularBuffer.fillCount>size){
NSLog(#"Playing %d", iosAudio->incomingCircularBuffer.fillCount);
iosAudio.pkgtotal -=2;
int32_t availableBytes;
SInt16 *databuffer = TPCircularBufferTail(&iosAudio->incomingCircularBuffer, &availableBytes);
memcpy(ioData->mBuffers[0].mData, databuffer, size);
ioData->mBuffers[0].mDataByteSize = size; // indicate how much data we wrote in the buffer
TPCircularBufferConsume(&iosAudio->incomingCircularBuffer, size);
}else{
}
return noErr;
}
The AudioStreamDescription
// Describe format
AudioStreamBasicDescription audioFormat;
bzero(&audioFormat, sizeof(AudioStreamBasicDescription));
UInt32 channelCount = 2;
UInt32 sampleSize = sizeof(UInt16);
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagsCanonical;;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = channelCount;
audioFormat.mBitsPerChannel = sampleSize * 8;
audioFormat.mBytesPerPacket = sampleSize * channelCount;
audioFormat.mBytesPerFrame = sampleSize * channelCount;
// Apply format
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&audioFormat,
sizeof(audioFormat));
I successfully managed to build a complex AUGraph that I'm able to reconfigure on the fly, and all is working well.
I'm facing a wall now with what seems a very simple task: selecting a sepcific output device.
I'm able to get the deviceUID and ID thanks to this post: AudioObjectGetPropertyData to get a list of input devices (that I've modified to get output devices) and to the code below (I can't remember where I've found it, unfortunately)
- (AudioDeviceID) deviceIDWithUID:(NSString *)uid
{
AudioDeviceID myDevice;
AudioValueTranslation trans;
CFStringRef *myKnownUID = (__bridge CFStringRef *)uid;
trans.mInputData = &myKnownUID;
trans.mInputDataSize = sizeof (CFStringRef);
trans.mOutputData = &myDevice;
trans.mOutputDataSize = sizeof(AudioDeviceID);
UInt32 size = sizeof (AudioValueTranslation);
AudioHardwareGetProperty (kAudioHardwarePropertyDeviceForUID,
&size,
&trans);
return myDevice;
}
I'm getting the AudioDeviceID from this method which I store in an NSDictionary. I can NSLog it and when I convert it in hexadecimal it gives me the right ID, found in HALLab.
But when I configure my unit (see code below) the graph only plays on the default device (the one selected in Sound Preferences).
AudioComponent comp = AudioComponentFindNext(NULL, &_componentDescription);
if (comp == NULL) {
printf ("Can't get output unit");
exit (-1);
}
CheckError(AudioComponentInstanceNew(comp, &_auUnit),
"Couldn't open component for output Unit");
UInt32 disableFlag = 0;
UInt32 enableFlag = 1;
AudioUnitScope outputBus = 0;
AudioUnitScope inputBus = 1;
CheckError (AudioUnitSetProperty(_auUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
outputBus,
&enableFlag,
sizeof(enableFlag)), "AudioUnitSetProperty[kAudioOutputUnitProperty_EnableIO] failed - enable Output");
CheckError (AudioUnitSetProperty(_auUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
inputBus,
&disableFlag,
sizeof(disableFlag)), "AudioUnitSetProperty[kAudioOutputUnitProperty_EnableIO] failed - disable Input");
AudioDeviceID devID = (AudioDeviceID)[[[_devices objectAtIndex:0] objectForKey:#"deviceID"] unsignedIntValue];
CheckError(AudioUnitSetProperty(_auUnit,
kAudioOutputUnitProperty_CurrentDevice,
kAudioUnitScope_Output,
0,
&devID,
sizeof(AudioDeviceID)), "AudioUnitSetProperty[kAudioOutputUnitProperty_CurrentDevice] failed");
The AUGraph is already configured with all units, nodes are connected, and it's open. What am I doing wrong ?
I would be very grateful for any clue to resolve this problem.