EXC_BAS_ACCESS in Core Audio - writing mic data to file w/ Extended AudioFile Services - core-audio

I am attempting to write incoming mic audio to a file. Because the audio samples are delivered 4096 frames (the set frame rate for my project) at a time in a time-critical callback I cannot simply write the bytes to a file with AudioFileWriteBytes. I also did not wish to go through the effort and complexity of setting up my own ring buffer to store samples to write elsewhere. So I am using Extended Audio File API for its ExtAudioFileWriteAsync function.
As per instructed by the documentation I create the ExtAudioFileRef with a CFURL and than run it once with a null buffer and 0 frames in main. Then I initiate my AUHAL unit and the input callback begins to be called.
ExtAudioFileWriteAsync(player.recordFile, 0, NULL);
There I have my code to write to this file asynchronously. I have the call nested in a dispatch queue so that it runs after the callback function exits scope (tho not sure if that is necessary but I get this error with all without the enclosing dispatch block). This is the callback as it is right now.
OSStatus InputRenderProc(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData)
{
MyAUGraphPlayer *player = (MyAUGraphPlayer*) inRefCon;
// rendering incoming mic samples to player->inputBuffer
OSStatus inputProcErr = noErr;
inputProcErr = AudioUnitRender(player->inputUnit,
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
player->inputBuffer);
printf("%i", inNumberFrames);
dispatch_async(player->fileWritingQueue, ^{
ExtAudioFileWriteAsync(player->recordFile, 4096, player->inputBuffer);
});
return inputProcErr;
}
It immediately bails out with the bad access exception on the first callback invocation. For clarity these are the settings I have for creating the file to begin with.
// describe a PCM format for audio file
AudioStreamBasicDescription format = { 0 };
format.mBytesPerFrame = 4;
format.mBytesPerPacket = 4;
format.mChannelsPerFrame = 2;
format.mBitsPerChannel = 16;
format.mFramesPerPacket = 1;
format.mFormatFlags = kAudioFormatFlagIsPacked | kAudioFormatFlagIsFloat;
format.mFormatID = kAudioFormatLinearPCM;
CFURLRef myFileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, CFSTR("./test2.wav"), kCFURLPOSIXPathStyle, false);
ExtAudioFileCreateWithURL(myFileURL,
kAudioFileWAVEType,
&format,
NULL,
kAudioFileFlags_EraseFile,
&player.recordFile);
player.fileWritingQueue = dispatch_queue_create("myQueue", NULL);
ExtAudioFileWriteAsync(player.recordFile, 0, NULL);

Related

file unit callback shows stereo channels in buffer, but the file loaded was 6 channel

We are trying to play an 5.1 (6 channels) AAC audio file using AUGraph. We also tried using AVAudioEngine. We have connected to 5.1 surround sound output device (Sony Speakers). The file is played as stereo file. All the channels are played in Left and Right front speakers.
we are setting the file’s AudioStreamBasicDescription (which is 6 channels) to the file unit as input and output format.
//creating file Unit.
AudioComponentDescription fileDesc;
fileDesc.componentType = kAudioUnitType_Generator;
fileDesc.componentSubType = kAudioUnitSubType_AudioFilePlayer;
fileDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
fileDesc.componentFlags = 0;
fileDesc.componentFlagsMask = 0;
status = AUGraphAddNode(_audioGraph,&fileDesc,&filePlayerNode);
status = AUGraphNodeInfo(_audioGraph,filePlayerNode,NULL,&filePlayerUnit);
//creating output Unit.
AudioComponentDescription outputDesc;
outputDesc.componentType = kAudioUnitType_Output;
outputDesc.componentSubType = kAudioUnitSubType_DefaultOutput;
outputDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
outputDesc.componentFlags = 0;
outputDesc.componentFlagsMask = 0;
status = AUGraphAddNode(_audioGraph,&outputDesc,&outputNode);
status = AUGraphNodeInfo(_audioGraph,outputNode,NULL,&outputUnit);
OSStatus error = AudioFileOpenURL(audioFileURL, kAudioFileReadPermission, 0, &_audioInputFileId);
//get file's streamFormat.
UInt32 dataSize = sizeof(AudioStreamBasicDescription);
AudioFileGetProperty(_audioInputFileId, kAudioFilePropertyDataFormat, &dataSize, &_fileStreamDescription);
UInt32 propertySize = sizeof(_fileStreamDescription);
//set input and output format to fileUnit.
OSStatus err = AudioUnitSetProperty(filePlayerUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &_fileStreamDescription, propertySize);
OSStatus err = AudioUnitSetProperty(filePlayerUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &_fileStreamDescription, propertySize);
//set input and output format to outputUnit.
OSStatus err = AudioUnitSetProperty(outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &_fileStreamDescription, propertySize);
OSStatus err = AudioUnitSetProperty(outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &_fileStreamDescription, propertySize);
Similarly for the outputUnit we set the input and output format to that of the _fileStreamDescription.
we registered for render callback for file unit. It shows number of buffers as 2 and each buffer has 1 channel, indicates stereo data.
AudioUnitAddRenderNotify(filePlayerUnit, fileUnitRenderCallback, (__bridge void*)self);
static OSStatus fileUnitRenderCallback(void * inRefCon,AudioUnitRenderActionFlags * ioActionFlags, const AudioTimeStamp *inTimeStamp,UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData){}
we registered for render callback for output unit. It shows number of buffers as 1 and each buffer has 2 channel. Indicates stereo data.
AudioUnitAddRenderNotify(outputUnit, outputUnitRenderCallback, (__bridge void*)self);
static OSStatus outputUnitRenderCallback(void * inRefCon,AudioUnitRenderActionFlags * ioActionFlags, const AudioTimeStamp *inTimeStamp,UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData){}
We will not be able to use Apple’s built-in players like AVPlayer. Because, we need to connect some of the audio units to apply Audio-Effects.
Could you please suggest on how we can achieve 5.1 playback support.

Distortion from output Audio Unit

I am hearing a very loud and harsh distortion sound when I run this simple application. I am simply instantiating a default output unit and assign a render callback. And letting the program run in the runloop. I have detected no errors from Core Audio and everything works as usual except for this distortion.
#import <AudioToolbox/AudioToolbox.h>
OSStatus render1(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData)
{
return noErr;
}
int main(int argc, const char * argv[]) {
AudioUnit timerAU;
UInt32 propsize = 0;
AudioComponentDescription outputUnitDesc;
outputUnitDesc.componentType = kAudioUnitType_Output;
outputUnitDesc.componentSubType = kAudioUnitSubType_DefaultOutput;
outputUnitDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
outputUnitDesc.componentFlags = 0;
outputUnitDesc.componentFlagsMask = 0;
//Get RemoteIO AU from Audio Unit Component Manager
AudioComponent outputComp = AudioComponentFindNext(NULL, &outputUnitDesc);
if (outputComp == NULL) exit (-1);
CheckError(AudioComponentInstanceNew(outputComp, &timerAU), "comp");
//Set up render callback function for the RemoteIO AU.
AURenderCallbackStruct renderCallbackStruct;
renderCallbackStruct.inputProc = render1;
renderCallbackStruct.inputProcRefCon = nil;//(__bridge void *)(self);
propsize = sizeof(renderCallbackStruct);
CheckError(AudioUnitSetProperty(timerAU,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
0,
&renderCallbackStruct,
propsize), "set render");
CheckError(AudioUnitInitialize(timerAU), "init");
// tickMethod = completion;
CheckError(AudioOutputUnitStart(timerAU), "start");
CFRunLoopRunInMode(kCFRunLoopDefaultMode, 1000, false);
}
Your question does not seem complete. I don't know about the side effects of silencing the output noise which is probably just undefined behavior. I also don't know what your code would serve for as such. There is an unfinished render callback on the kAudioUnitSubType_DefaultOutput which does nothing (it is not generating silence!). I know for two ways of silencing it.
In the callback the ioData buffers have to be explicitly filled with zeroes, because there's no guarantee they will be initialized empty:
Float32 * lBuffer0;
Float32 * lBuffer1;
lBuffer0 = (Float32 *)ioData->mBuffers[0].mData;
lBuffer1 = (Float32 *)ioData->mBuffers[1].mData;
memset(lBuffer0, 0, inNumberFrames*sizeof(Float32));
memset(lBuffer1, 0, inNumberFrames*sizeof(Float32));
Other possibility is to leave the unfinished callback as it is, but declare the timerAU to be of outputUnitDesc.componentSubType = kAudioUnitSubType_HALOutput; instead of
outputUnitDesc.componentSubType = kAudioUnitSubType_DefaultOutput;
and explicity disable I/O before setting the render callback by means of following code:
UInt32 lEnableIO = 0;
CheckError(AudioUnitSetProperty(timerAU,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
0, //output element
&lEnableIO,
sizeof(lEnableIO)),
"couldn't disable output");
I would strongly encourage into studying thoroughly the CoreAudio API and understanding how to set up an audio unit. This is crucial in understanding the matter. I've seen in your code a comment mentioning a RemoteIO AU. There is nothing like a RemoteIO AU in OSX. In case you're attempting a port from iOS code, please try learning the differences. They are well documented.

Able to play Stereo Type Audio on simulator but not on device using sample RemoteIO/AudioUnit app

In this sample app, I was able to load in a file of stereo data and play it using the simulator. But it doesn't work on the device. I tried using a sound editor and convert the stereo clip to mono and changing the descriptor settings and it will work mono only. I had a hard time trying to find out why, I am guessing it has to do with my descriptor configuration problems.
This sample app is at https://github.com/peter7777usa/TestAudio
The PlayBack Function
static OSStatus playbackCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
// Notes: ioData contains buffers (may be more than one!)
// Fill them up as much as you can. Remember to set the size value in each buffer to match how
// much data is in the buffer.
NSLog(#"muffers %d", ioData->mNumberBuffers);
UInt32 size = 2048;
if (iosAudio->incomingCircularBuffer.fillCount>size){
NSLog(#"Playing %d", iosAudio->incomingCircularBuffer.fillCount);
iosAudio.pkgtotal -=2;
int32_t availableBytes;
SInt16 *databuffer = TPCircularBufferTail(&iosAudio->incomingCircularBuffer, &availableBytes);
memcpy(ioData->mBuffers[0].mData, databuffer, size);
ioData->mBuffers[0].mDataByteSize = size; // indicate how much data we wrote in the buffer
TPCircularBufferConsume(&iosAudio->incomingCircularBuffer, size);
}else{
}
return noErr;
}
The AudioStreamDescription
// Describe format
AudioStreamBasicDescription audioFormat;
bzero(&audioFormat, sizeof(AudioStreamBasicDescription));
UInt32 channelCount = 2;
UInt32 sampleSize = sizeof(UInt16);
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagsCanonical;;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = channelCount;
audioFormat.mBitsPerChannel = sampleSize * 8;
audioFormat.mBytesPerPacket = sampleSize * channelCount;
audioFormat.mBytesPerFrame = sampleSize * channelCount;
// Apply format
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&audioFormat,
sizeof(audioFormat));

from xcode5 to xcode6-beta6, sounds are now distorted on iOS7

I have a remoteIO application that loads and plays samples on iOS. It works fine when built with xcode5. I use iOS7 as a deployment target.
My application was originally built using the AudioUnitSampleType audio format and the kAudioFormatFlagsCanonical format flags. My sample files are 16 bits/44100Hz/Mono/Caf files.
Now I want to run it on iOS8.
Building my app with its original code on xcode6, the app runs fine on an iOS7 device but it produces no sounds on an iOS8 device.
As AudioUnitSampleType and kAudioFormatFlagsCanonical are deprecated in iOS8, I replaced them, after some researches, with float and kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved.
Now my app runs fine on iOS8 but the sounds are saturated on iOS7.
Has anyone experiences this? Any help ? Thanks, I am stuck here.
Pascal
ps : here is my sample loading method
#define AUDIO_SAMPLE_TYPE float
#define AUDIO_FORMAT_FLAGS kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved
-(void) load:(NSURL*)fileNameURL{
if (frameCount>0){
if (leftChannel!= NULL){
free (leftChannel);
leftChannel = 0;
}
if (rightChannel != NULL){
free (rightChannel);
rightChannel = 0;
}
}
soundFileURLRef=(CFURLRef)fileNameURL;
//----------------------------------------------
// 1.[OPEN AUDIO FILE] and associate it with the extended audio file object.
//----------------------------------------------
ExtAudioFileRef audioFileExtendedObject = 0;
log_if_err(ExtAudioFileOpenURL((CFURLRef)soundFileURLRef,
&audioFileExtendedObject),
#"ExtAudioFileOpenURL failed");
//----------------------------------------------
// 2.[AUDIO FILE LENGTH] Get the audio file's length in frames.
//----------------------------------------------
UInt64 totalFramesInFile = 0;
UInt32 frameLengthPropertySize = sizeof (totalFramesInFile);
log_if_err(ExtAudioFileGetProperty(audioFileExtendedObject,
kExtAudioFileProperty_FileLengthFrames,
&frameLengthPropertySize,
&totalFramesInFile),
#"ExtAudioFileGetProperty (audio file length in frames) failed");
frameCount = totalFramesInFile;
//----------------------------------------------
// 3.[AUDIO FILE FORMAT] Get the audio file's number of channels. Normally CAF.
//----------------------------------------------
AudioStreamBasicDescription fileAudioFormat = {0};
UInt32 formatPropertySize = sizeof (fileAudioFormat);
log_if_err(ExtAudioFileGetProperty(audioFileExtendedObject,
kExtAudioFileProperty_FileDataFormat,
&formatPropertySize,
&fileAudioFormat),
#"ExtAudioFileGetProperty (file audio format) failed");
//----------------------------------------------
// 4.[ALLOCATE AUDIO FILE MEMORY] Allocate memory in the soundFiles instance
// variable to hold the left channel, or mono, audio data
//----------------------------------------------
UInt32 channelCount = fileAudioFormat.mChannelsPerFrame;
// DLog(#"fileNameURL=%# | channelCount=%d",fileNameURL,(int)channelCount);
if (leftChannel != NULL){
free (leftChannel);
leftChannel = 0;
}
leftChannel =(AUDIO_UNIT_SAMPLE_TYPE *) calloc (totalFramesInFile, sizeof(AUDIO_UNIT_SAMPLE_TYPE));
AudioStreamBasicDescription importFormat = {0};
if (2==channelCount) {
isStereo = YES;
if (rightChannel != NULL){
free (rightChannel);
rightChannel = 0;
}
rightChannel = (AUDIO_UNIT_SAMPLE_TYPE *) calloc (totalFramesInFile, sizeof (AUDIO_UNIT_SAMPLE_TYPE));
importFormat = stereoStreamFormat;
} else if (1==channelCount) {
isStereo = NO;
importFormat = monoStreamFormat;
} else {
ExtAudioFileDispose (audioFileExtendedObject);
return;
}
//----------------------------------------------
// 5.[ASSIGN THE MIXER INPUT BUS STREAM DATA FORMAT TO THE AUDIO FILE]
// Assign the appropriate mixer input bus stream data format to the extended audio
// file object. This is the format used for the audio data placed into the audio
// buffer in the SoundStruct data structure, which is in turn used in the
// inputRenderCallback callback function.
//----------------------------------------------
UInt32 importFormatPropertySize = (UInt32) sizeof (importFormat);
log_if_err(ExtAudioFileSetProperty(audioFileExtendedObject,
kExtAudioFileProperty_ClientDataFormat,
importFormatPropertySize,
&importFormat),
#"ExtAudioFileSetProperty (client data format) failed");
//----------------------------------------------
// 6.[SET THE AUDIBUFFER LIST STRUCT] which has two roles:
//
// 1. It gives the ExtAudioFileRead function the configuration it
// needs to correctly provide the data to the buffer.
//
// 2. It points to the soundFiles[soundFile].leftChannel buffer, so
// that audio data obtained from disk using the ExtAudioFileRead function
// goes to that buffer
//
// Allocate memory for the buffer list struct according to the number of
// channels it represents.
//----------------------------------------------
AudioBufferList *bufferList;
bufferList = (AudioBufferList *) malloc(sizeof(AudioBufferList)+sizeof(AudioBuffer)*(channelCount-1));
if (NULL==bufferList){
NSLog(#"*** malloc failure for allocating bufferList memory");
return;
}
//----------------------------------------------
// 7.initialize the mNumberBuffers member
//----------------------------------------------
bufferList->mNumberBuffers = channelCount;
//----------------------------------------------
// 8.initialize the mBuffers member to 0
//----------------------------------------------
AudioBuffer emptyBuffer = {0};
size_t arrayIndex;
for (arrayIndex = 0; arrayIndex < channelCount; arrayIndex++) {
bufferList->mBuffers[arrayIndex] = emptyBuffer;
}
//----------------------------------------------
// 9.set up the AudioBuffer structs in the buffer list
//----------------------------------------------
bufferList->mBuffers[0].mNumberChannels = 1;
bufferList->mBuffers[0].mDataByteSize = totalFramesInFile * sizeof (AUDIO_UNIT_SAMPLE_TYPE);
bufferList->mBuffers[0].mData = leftChannel;
if (channelCount==2){
bufferList->mBuffers[1].mNumberChannels = 1;
bufferList->mBuffers[1].mDataByteSize = totalFramesInFile * sizeof (AUDIO_UNIT_SAMPLE_TYPE);
bufferList->mBuffers[1].mData = rightChannel;
}
//----------------------------------------------
// 10.Perform a synchronous, sequential read of the audio data out of the file and
// into the "soundFiles[soundFile].leftChannel" and (if stereo) ".rightChannel" members.
//----------------------------------------------
UInt32 numberOfPacketsToRead = (UInt32) totalFramesInFile;
OSStatus result = ExtAudioFileRead (audioFileExtendedObject,
&numberOfPacketsToRead,
bufferList);
free (bufferList);
if (noErr != result) {
log_if_err(result,#"ExtAudioFileRead failure");
//
// If reading from the file failed, then free the memory for the sound buffer.
//
free (leftChannel);
leftChannel = 0;
if (2==channelCount) {
free (rightChannel);
rightChannel = 0;
}
frameCount = 0;
}
//----------------------------------------------
// Dispose of the extended audio file object, which also
// closes the associated file.
//----------------------------------------------
ExtAudioFileDispose (audioFileExtendedObject);
return;
}

Audio Unit background conflict

I have an audio analysis app using Audio Units that works perfectly when the app is run in isolation. However, if there are other audio apps running in the background AudioUnitRender returns a -50 error.
Does anyone know a way to resolve this, so that AudioUnitRender works even when other audio apps are running?
Thanks in advance.
Audio session initiation
AVAudioSession *session = [AVAudioSession sharedInstance];
[session setPreferredHardwareSampleRate:sampleRate error:&err];
[session setCategory:AVAudioSessionCategoryRecord error:&err];
[session setActive:YES error:&err];
[session setMode:setMode:AVAudioSessionModeMeasurement error:&err];
[session setDelegate:listener];
UInt32 audioRouteOverride = kAudioSessionOverrideAudioRoute_None;
AudioSessionSetProperty (kAudioSessionProperty_OverrideAudioRoute,sizeof (audioRouteOverride),&audioRouteOverride);
I/O unit description:
OSStatus err;
AudioComponentDescription ioUnitDescription;
ioUnitDescription.componentType = kAudioUnitType_Output;
ioUnitDescription.componentSubType = kAudioUnitSubType_RemoteIO;
ioUnitDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
ioUnitDescrition.componentFlags = 0;
ioUnitDescription.componentFlagsMask = 0;
// Declare and instantiate an audio processing graph
NewAUGraph(&processingGraph);
// Add an audio unit node to the graph, then instantiate the audio unit.
/*
An AUNode is an opaque type that represents an audio unit in the context
of an audio processing graph. You receive a reference to the new audio unit
instance, in the ioUnit parameter, on output of the AUGraphNodeInfo
function call.
*/
AUNode ioNode;
AUGraphAddNode(processingGraph, &ioUnitDescription, &ioNode);
AUGraphOpen(processingGraph); // indirectly performs audio unit instantiation
// Obtain a reference to the newly-instantiated I/O unit. Each Audio Unit
// requires its own configuration.
AUGraphNodeInfo(processingGraph, ioNode, NULL, &ioUnit);
// Initialize below.
AURenderCallbackStruct callbackStruct = {0};
UInt32 enableInput;
UInt32 enableOutput;
// Enable input and disable output.
enableInput = 1; enableOutput = 0;
callbackStruct.inputProc = RenderFFTCallback;
callbackStruct.inputProcRefCon = (__bridge void*)self;
err = AudioUnitSetProperty(ioUnit, kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus, &enableInput, sizeof(enableInput));
err = AudioUnitSetProperty(ioUnit, kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus, &enableOutput, sizeof(enableOutput));
err = AudioUnitSetProperty(ioUnit, kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Input,
kOutputBus, &callbackStruct, sizeof(callbackStruct));
// Set the stream format.
size_t bytesPerSample = [self ASBDForSoundMode];
err = AudioUnitSetProperty(ioUnit, kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus, &streamFormat, sizeof(streamFormat));
err = AudioUnitSetProperty(ioUnit, kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus, &streamFormat, sizeof(streamFormat));
// Disable system buffer allocation. We'll do it ourselves.
UInt32 flag = 1;
err = AudioUnitSetProperty(ioUnit, kAudioUnitProperty_ShouldAllocateBuffer,
kAudioUnitScope_Output,
kInputBus, &flag, sizeof(flag));}
Render callback:
RIOInterface* THIS = (__bridge_transfer RIOInterface *)inRefCon;
COMPLEX_SPLIT A = THIS->A;
void *dataBuffer = THIS->dataBuffer;
float *outputBuffer = THIS->outputBuffer;
FFTSetup fftSetup = THIS->fftSetup;
float *hammingWeights = THIS->hammingWeights;
uint32_t log2n = THIS->log2n;
uint32_t n = THIS->n;
uint32_t nOver2 = THIS->nOver2;
uint32_t stride = 1;
int bufferCapacity = THIS->bufferCapacity;
SInt16 index = THIS->index;
AudioUnit rioUnit = THIS->ioUnit;
OSStatus renderErr;
UInt32 bus1 = 1;
renderErr = AudioUnitRender(rioUnit, ioActionFlags,
inTimeStamp, bus1, inNumberFrames, THIS->bufferList);
if (renderErr < 0) {
return renderErr;
}
I discovered that this issue was occurring when another AVAudioSession was active in another app, in which case the first initiated AVAudioSession's settings took priority over mine. I was trying to set the sampling frequency to 22050, but if the other audio session had it set at 44100 then it remained at 44100.
I resolved the issue by making my code 'adaptive' to other settings e.g. in respect to the buffer size, so it would still work effectively (if not optimally) with audio settings that differed from my preference.

Resources