CoreAudio Output Sample Rate Discrepancy - ffmpeg

I am using ffmpeg to acquire audio from .mov files. Looking over my settings, I am not sample rate converting the audio buffers I am generating so that is unlikely to account for the issues I am having. Regardless of the sample rate I set on my Built-in Output, my audio files that are at 44.1 kHz playback at the correct rate. If I playback a 48kHz file, the file plays back slower (at 91% of the normal rate) which indicates that the true rate is 44.1kHz. I can change my built-in output to 44.1, 48, or 96 kHz and the same phenomenon exists. I change my default output rate using the Audio Midi Setup app. I then verify my sample rate using AudioUnitGetProperty on my ouputAudioUnit. This matches the sample rate in the Audio Midi Setup.
Thoughts? I am including my audio graph code
CheckError(NewAUGraph(&fp.graph), "Couldn't create a new AUGraph");
//varispeednode has an input callback
//the vairspeed node feeds an output node which is running
//at the frequency of the system default output
AUNode outputNode;
AudioComponentDescription outputcd = [self defaultOutputComponent];
CheckError(AUGraphAddNode(fp.graph, &outputcd, &outputNode),
"AUGraphAddNode[kAudioUnitSubType_DefaultOutput] failed");
AUNode varispeedNode;
AudioComponentDescription varispeedcd = [self variSpeedComponent];
CheckError(AUGraphAddNode(fp.graph, &varispeedcd, &varispeedNode),
"AUGraphAddNode[kAudioUnitSubType_Varispeed] failed");
CheckError(AUGraphOpen(fp.graph),
"Couldn't Open AudioGraph");
CheckError(AUGraphNodeInfo(fp.graph, outputNode, NULL, &fp.outputAudioUnit),
"Couldn't Retrieve output node");
CheckError(AUGraphNodeInfo(fp.graph, varispeedNode, NULL, &fp.variSpeedAudioUnit),
"Couldn't Retrieve Varispeed Audio Unit");
AURenderCallbackStruct input;
input.inputProc = CBufferProviderCallback;
input.inputProcRefCon = &playerStruct;
CheckError(AudioUnitSetProperty(fp.variSpeedAudioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&input,
sizeof(input)),
"AudioUnitSetProperty failed");
CheckError(AUGraphConnectNodeInput(fp.graph, varispeedNode, 0, outputNode, 0),
"Couldn't Connect varispeed to output");
CheckError(AUGraphInitialize(fp.graph),
"Couldn't Initialize AUGraph");
// check output sample rate
Float64 outputSampleRate = 48000.0;
UInt32 sizeOfFloat64 = sizeof(Float64);
outputSampleRate = 0.0;
CheckError(AudioUnitGetProperty(fp.outputAudioUnit,
kAudioUnitProperty_SampleRate,
kAudioUnitScope_Global,
0,
&outputSampleRate,
&sizeOfFloat64),
"Couldn't get output sampleRate");

I solved the issue. When building the audio graph, you need to specify the input sample rate of the varispeed audio unit before you connect it to an output node inside of the augraph. See the example code at
https://developer.apple.com/library/content/samplecode/CAPlayThrough/Listings/ReadMe_txt.html
CheckError(NewAUGraph(&fp.graph), "BuildGraphError");
AUNode outputNode;
AudioComponentDescription outputcd = [self defaultOutputComponent];
CheckError(AUGraphAddNode(fp.graph, &outputcd, &outputNode),
"AUGraphAddNode[kAudioUnitSubType_DefaultOutput] failed");
AUNode varispeedNode;
AudioComponentDescription varispeedcd = [self variSpeedComponent];
CheckError(AUGraphAddNode(fp.graph, &varispeedcd, &varispeedNode),
"AUGraphAddNode[kAudioUnitSubType_Varispeed] failed");
CheckError(AUGraphOpen(fp.graph),
"Couldn't Open AudioGraph");
CheckError(AUGraphNodeInfo(fp.graph, outputNode, NULL, &fp.outputAudioUnit),
"Couldn't Retrieve File Audio Unit");
CheckError(AUGraphNodeInfo(fp.graph, varispeedNode, NULL, &fp.variSpeedAudioUnit),
"Couldn't Retrieve Varispeed Audio Unit");
AURenderCallbackStruct input;
input.inputProc = CBufferProviderCallback;
input.inputProcRefCon = &playerStruct;
CheckError(AudioUnitSetProperty(fp.variSpeedAudioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&input,
sizeof(input)),
"AudioUnitSetProperty failed");
//you have to set the varispeed rate before you connect it
//see CAPlayThrough
AudioStreamBasicDescription asbd = {0};
UInt32 size;
Boolean outWritable;
//Gets the size of the Stream Format Property and if it is writable
OSStatus result = AudioUnitGetPropertyInfo(fp.variSpeedAudioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
0,
&size,
&outWritable);
//Get the current stream format of the output
result = AudioUnitGetProperty (fp.variSpeedAudioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
0,
&asbd,
&size);
asbd.mSampleRate = targetSampleRate;
//Set the stream format of the output to match the input
result = AudioUnitSetProperty (fp.variSpeedAudioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&asbd,
size);
printf("AudioUnitSetProperty result %d %d\n", result, noErr);
CheckError(AUGraphConnectNodeInput(fp.graph, varispeedNode, 0, outputNode, 0),
"Couldn't Connect varispeed to output");
CheckError(AUGraphInitialize(fp.graph),
"Couldn't Initialize AUGraph");
Float64 outputSampleRate = 48000.0;
UInt32 sizeOfFloat64 = sizeof(Float64);
outputSampleRate = 0.0;
CheckError(AudioUnitGetProperty(fp.outputAudioUnit,
kAudioUnitProperty_SampleRate,
kAudioUnitScope_Global,
0,
&outputSampleRate,
&sizeOfFloat64),
"Couldn't get output sampleRate");
NSLog(#"Output Sample Rate of the ->%f", outputSampleRate);

Related

file unit callback shows stereo channels in buffer, but the file loaded was 6 channel

We are trying to play an 5.1 (6 channels) AAC audio file using AUGraph. We also tried using AVAudioEngine. We have connected to 5.1 surround sound output device (Sony Speakers). The file is played as stereo file. All the channels are played in Left and Right front speakers.
we are setting the file’s AudioStreamBasicDescription (which is 6 channels) to the file unit as input and output format.
//creating file Unit.
AudioComponentDescription fileDesc;
fileDesc.componentType = kAudioUnitType_Generator;
fileDesc.componentSubType = kAudioUnitSubType_AudioFilePlayer;
fileDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
fileDesc.componentFlags = 0;
fileDesc.componentFlagsMask = 0;
status = AUGraphAddNode(_audioGraph,&fileDesc,&filePlayerNode);
status = AUGraphNodeInfo(_audioGraph,filePlayerNode,NULL,&filePlayerUnit);
//creating output Unit.
AudioComponentDescription outputDesc;
outputDesc.componentType = kAudioUnitType_Output;
outputDesc.componentSubType = kAudioUnitSubType_DefaultOutput;
outputDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
outputDesc.componentFlags = 0;
outputDesc.componentFlagsMask = 0;
status = AUGraphAddNode(_audioGraph,&outputDesc,&outputNode);
status = AUGraphNodeInfo(_audioGraph,outputNode,NULL,&outputUnit);
OSStatus error = AudioFileOpenURL(audioFileURL, kAudioFileReadPermission, 0, &_audioInputFileId);
//get file's streamFormat.
UInt32 dataSize = sizeof(AudioStreamBasicDescription);
AudioFileGetProperty(_audioInputFileId, kAudioFilePropertyDataFormat, &dataSize, &_fileStreamDescription);
UInt32 propertySize = sizeof(_fileStreamDescription);
//set input and output format to fileUnit.
OSStatus err = AudioUnitSetProperty(filePlayerUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &_fileStreamDescription, propertySize);
OSStatus err = AudioUnitSetProperty(filePlayerUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &_fileStreamDescription, propertySize);
//set input and output format to outputUnit.
OSStatus err = AudioUnitSetProperty(outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &_fileStreamDescription, propertySize);
OSStatus err = AudioUnitSetProperty(outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &_fileStreamDescription, propertySize);
Similarly for the outputUnit we set the input and output format to that of the _fileStreamDescription.
we registered for render callback for file unit. It shows number of buffers as 2 and each buffer has 1 channel, indicates stereo data.
AudioUnitAddRenderNotify(filePlayerUnit, fileUnitRenderCallback, (__bridge void*)self);
static OSStatus fileUnitRenderCallback(void * inRefCon,AudioUnitRenderActionFlags * ioActionFlags, const AudioTimeStamp *inTimeStamp,UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData){}
we registered for render callback for output unit. It shows number of buffers as 1 and each buffer has 2 channel. Indicates stereo data.
AudioUnitAddRenderNotify(outputUnit, outputUnitRenderCallback, (__bridge void*)self);
static OSStatus outputUnitRenderCallback(void * inRefCon,AudioUnitRenderActionFlags * ioActionFlags, const AudioTimeStamp *inTimeStamp,UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData){}
We will not be able to use Apple’s built-in players like AVPlayer. Because, we need to connect some of the audio units to apply Audio-Effects.
Could you please suggest on how we can achieve 5.1 playback support.

Setting sampling rate of default audio output device programmatically

I'm working on an application that plays sounds through the default audio device on a Mac. I want to change the output sampling rate and bit depth of the default output device but it always gives me a kAudioUnitErr_PropertyNotWritable error code.
Here is my test code:
AudioStreamBasicDescription streamFormat;
AudioStreamBasicDescription newStreamFormat;
newStreamFormat.mSampleRate = 96000; // the sample rate of the audio stream
newStreamFormat.mFormatID = kAudioFormatLinearPCM; // the specific encoding type of audio stream
newStreamFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger;//kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsNonMixable;
newStreamFormat.mFramesPerPacket = 1;
newStreamFormat.mChannelsPerFrame = 1;
newStreamFormat.mBitsPerChannel = 24;
newStreamFormat.mBytesPerPacket = 2;
newStreamFormat.mBytesPerFrame = 2;
UInt32 size = sizeof(AudioStreamBasicDescription);
result = AudioUnitGetProperty(myUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &streamFormat, &size);
result = AudioOutputUnitStop(myUnit);
result = AudioUnitUninitialize(myUnit);
result = AudioUnitSetProperty(myUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &newStreamFormat, size);
result = AudioUnitInitialize(myUnit);
result = AudioOutputUnitStart(myUnit);
result = AudioUnitGetProperty(myUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &streamFormat, &size);
result = AudioUnitGetProperty(myUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, &size);
When I make the call to set the stream format on kAudioUnitScope_Input I don't get any error but when I set it on kAudioUnitScope_Output if fails with the property not writable error.
It must be possible to do this programmatically (Audio MIDI Setup does it) but I have searched and searched but I haven't been able to find any solution.
I did find this post that implies that setting the input sampling rate of the device will update the output as well. I tried this but when I read back the property the output doesn't match what I set on the input.
I'm pretty sure it's not the output AudioUnit's job to configure devices. It's more of an intermediary between clients and audio devices. Which means AudioUnitSetProperty() is the wrong API for the job.
So if you want to configure the device, try setting kAudioDevicePropertyNominalSampleRate on it using the AudioObjectSetPropertyData() function.
Then, unless you want a gratuitous rate conversion, you probably want to make sure your audio unit input format matches the new device sample rate by doing what you're already doing: calling AudioUnitSetProperty() on the input (data going into the audio unit) scope.

iOS 10.2 callkit error -> [aurioc] 892

I use Callkit with iOS10.0.1 and he works perfectly (outbound and inbound calls).
After update my iPhone7 to iOS 10.2. I heard nothing when i receive an inbound call.
For AudioController :
try {
// Configure the audio session
AVAudioSession *sessionInstance = [AVAudioSession sharedInstance];
// we are going to play and record so we pick that category
NSError *error = nil;
[sessionInstance setCategory:AVAudioSessionCategoryPlayAndRecord error:&error];
XThrowIfError((OSStatus)error.code, "couldn't set session's audio category");
// set the mode to voice chat
[sessionInstance setMode:AVAudioSessionModeVoiceChat error:&error];
XThrowIfError((OSStatus)error.code, "couldn't set session's audio mode");
// set the buffer duration to 5 ms
NSTimeInterval bufferDuration = .005;
[sessionInstance setPreferredIOBufferDuration:bufferDuration error:&error];
XThrowIfError((OSStatus)error.code, "couldn't set session's I/O buffer duration");
// set the session's sample rate
[sessionInstance setPreferredSampleRate:44100 error:&error];
XThrowIfError((OSStatus)error.code, "couldn't set session's preferred sample rate");
// add interruption handler
[[NSNotificationCenter defaultCenter] addObserver:self
selector:#selector(handleInterruption:)
name:AVAudioSessionInterruptionNotification
object:sessionInstance];
// we don't do anything special in the route change notification
[[NSNotificationCenter defaultCenter] addObserver:self
selector:#selector(handleRouteChange:)
name:AVAudioSessionRouteChangeNotification
object:sessionInstance];
// if media services are reset, we need to rebuild our audio chain
[[NSNotificationCenter defaultCenter] addObserver: self
selector: #selector(handleMediaServerReset:)
name: AVAudioSessionMediaServicesWereResetNotification
object: sessionInstance];
}
catch (CAXException &e) {
NSLog(#"Error returned from setupAudioSession: %d: %s", (int)e.mError, e.mOperation);
}
catch (...) {
NSLog(#"Unknown error returned from setupAudioSession");
}
and
try {
// Create a new instance of Apple Voice Processing IO
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
AudioComponent comp = AudioComponentFindNext(NULL, &desc);
XThrowIfError(AudioComponentInstanceNew(comp, &_rioUnit), "couldn't create a new instance of Apple Voice Processing IO");
// Enable input and output on Apple Voice Processing IO
// Input is enabled on the input scope of the input element
// Output is enabled on the output scope of the output element
UInt32 one = 1;
XThrowIfError(AudioUnitSetProperty(_rioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &one, sizeof(one)), "could not enable input on Apple Voice Processing IO");
XThrowIfError(AudioUnitSetProperty(_rioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &one, sizeof(one)), "could not enable output on Apple Voice Processing IO");
// Explicitly set the input and output client formats
// sample rate = 44100, num channels = 1, format = 32 bit floating point
CAStreamBasicDescription ioFormat = CAStreamBasicDescription(44100, 1, CAStreamBasicDescription::kPCMFormatFloat32, false);
XThrowIfError(AudioUnitSetProperty(_rioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &ioFormat, sizeof(ioFormat)), "couldn't set the input client format on Apple Voice Processing IO");
XThrowIfError(AudioUnitSetProperty(_rioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &ioFormat, sizeof(ioFormat)), "couldn't set the output client format on Apple Voice Processing IO");
// Set the MaximumFramesPerSlice property. This property is used to describe to an audio unit the maximum number
// of samples it will be asked to produce on any single given call to AudioUnitRender
UInt32 maxFramesPerSlice = 4096;
XThrowIfError(AudioUnitSetProperty(_rioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(UInt32)), "couldn't set max frames per slice on Apple Voice Processing IO");
// Get the property value back from Apple Voice Processing IO. We are going to use this value to allocate buffers accordingly
UInt32 propSize = sizeof(UInt32);
XThrowIfError(AudioUnitGetProperty(_rioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, &propSize), "couldn't get max frames per slice on Apple Voice Processing IO");
// We need references to certain data in the render callback
// This simple struct is used to hold that information
cd.rioUnit = _rioUnit;
cd.muteAudio = &_muteAudio;
cd.audioChainIsBeingReconstructed = &_audioChainIsBeingReconstructed;
// Set the render callback on Apple Voice Processing IO
AURenderCallbackStruct renderCallback;
renderCallback.inputProc = performRender;
renderCallback.inputProcRefCon = NULL;
XThrowIfError(AudioUnitSetProperty(_rioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &renderCallback, sizeof(renderCallback)), "couldn't set render callback on Apple Voice Processing IO");
// Initialize the Apple Voice Processing IO instance
XThrowIfError(AudioUnitInitialize(_rioUnit), "couldn't initialize Apple Voice Processing IO instance");
}
catch (CAXException &e) {
NSLog(#"Error returned from setupIOUnit: %d: %s", (int)e.mError, e.mOperation);
}
catch (...) {
NSLog(#"Unknown error returned from setupIOUnit");
}
and i have this in my log :
[aurioc] 892: failed: '!pri' (enable 3, outf< 1 ch, 44100 Hz, Float32> inf< 1 ch, 44100 Hz, Float32>)
Error returned from setupIOUnit: 561017449: couldn't initialize Apple Voice Processing IO instance
do you have an idea ?
Enable your sound devices after Audio session activate. Write your sound enable call in audio session activation call back. You can also use block to invoke from audio session for answer / unhold / etc.

How to set up Splitter Unit in Core Audio

From the book "Learning Core Audio",I have learned how to mix the stream 1(voice from mic) and stream 2(voice synthesizer) then connect its output
to outputUnit(speaker).
Now I tried to record its output while monitoring its output from speaker.
So I set up a Splitter unit, connect the output of mixerUnit to its input,
connect its outputScope 0 to outputUnit, set a renderCallback(for recording) to its outputScope 1.(I thought Splitter Unit will split the input stream to bus 0 and bus 1)
The result was that I can hear it from speaker, but the renderCallback never get called due to its "Pull Model", since it didn't connect to any output unit.
But if I set up another Generic Output Unit then connect to it, AUGraph showed me an Error message.(No 2 Output Unit allowed)
Anybody can help me out here ?
CheckError(AUGraphNodeInfo(player->graph,
outputNode,
NULL,
&player->outputUnit),
"AUGraphNodeInfo failed");
CheckError(AUGraphNodeInfo(player->graph,
speechNode,
NULL,
&player->speechUnit),
"AUGraphInfo failed");
AudioUnit mixerUnit;
CheckError(AUGraphNodeInfo(player->graph,
mixerNode,
NULL,
&mixerUnit),
"AUGraphNOdeInfo failed");
AudioUnit splitterUnit;
CheckError(AUGraphNodeInfo(player->graph,
splitterNode,
NULL,
&splitterUnit),
"AUGraphInfo failed");
CheckError(AudioUnitSetProperty(mixerUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&player->streamFormat,
propertySize),
"Couldn't set stream format on mixer unit bus 0");
CheckError(AudioUnitSetProperty(mixerUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
1,
&player->streamFormat,
propertySize),
"Couldn't set stream format on mixer unit bus");
CheckError(AudioUnitSetProperty(splitterUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&player->streamFormat,
propertySize),
"Couldn't set stream format on Splitter unit");
CheckError(AUGraphConnectNodeInput(player->graph,
splitterNode,
0,
outputNode,
0),
"Couldn't connect splitter 0 to outputNode");
CheckError(AUGraphConnectNodeInput(player->graph,
speechNode,
0,
mixerNode,
1),
"Couldn't connect speech speechNode to mixer input(1)");
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = GraphRenderProc;
callbackStruct.inputProcRefCon = player;
CheckError(AudioUnitSetProperty(mixerUnit,//was outputUnit
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&callbackStruct,
sizeof(callbackStruct)),
"Couldn't set render callback on mixer unit");
AURenderCallbackStruct recorderCallback;
recorderCallback.inputProc = recordRenderProc;
recorderCallback.inputProcRefCon = player;
CheckError(AudioUnitSetProperty(splitterUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Output,
0,
&recorderCallback,
sizeof(recorderCallback)),
"Couldn't set render callback on splitter output");
Are you just trying to record your final mixed output to a file?
If so, you can write to the file in your render callback. However, you can't block for very long. You have to completely finish your writing before the next callback. The easiest way to write would probably be to use grand central dispatch to asynchronously write your file.

Audio Unit background conflict

I have an audio analysis app using Audio Units that works perfectly when the app is run in isolation. However, if there are other audio apps running in the background AudioUnitRender returns a -50 error.
Does anyone know a way to resolve this, so that AudioUnitRender works even when other audio apps are running?
Thanks in advance.
Audio session initiation
AVAudioSession *session = [AVAudioSession sharedInstance];
[session setPreferredHardwareSampleRate:sampleRate error:&err];
[session setCategory:AVAudioSessionCategoryRecord error:&err];
[session setActive:YES error:&err];
[session setMode:setMode:AVAudioSessionModeMeasurement error:&err];
[session setDelegate:listener];
UInt32 audioRouteOverride = kAudioSessionOverrideAudioRoute_None;
AudioSessionSetProperty (kAudioSessionProperty_OverrideAudioRoute,sizeof (audioRouteOverride),&audioRouteOverride);
I/O unit description:
OSStatus err;
AudioComponentDescription ioUnitDescription;
ioUnitDescription.componentType = kAudioUnitType_Output;
ioUnitDescription.componentSubType = kAudioUnitSubType_RemoteIO;
ioUnitDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
ioUnitDescrition.componentFlags = 0;
ioUnitDescription.componentFlagsMask = 0;
// Declare and instantiate an audio processing graph
NewAUGraph(&processingGraph);
// Add an audio unit node to the graph, then instantiate the audio unit.
/*
An AUNode is an opaque type that represents an audio unit in the context
of an audio processing graph. You receive a reference to the new audio unit
instance, in the ioUnit parameter, on output of the AUGraphNodeInfo
function call.
*/
AUNode ioNode;
AUGraphAddNode(processingGraph, &ioUnitDescription, &ioNode);
AUGraphOpen(processingGraph); // indirectly performs audio unit instantiation
// Obtain a reference to the newly-instantiated I/O unit. Each Audio Unit
// requires its own configuration.
AUGraphNodeInfo(processingGraph, ioNode, NULL, &ioUnit);
// Initialize below.
AURenderCallbackStruct callbackStruct = {0};
UInt32 enableInput;
UInt32 enableOutput;
// Enable input and disable output.
enableInput = 1; enableOutput = 0;
callbackStruct.inputProc = RenderFFTCallback;
callbackStruct.inputProcRefCon = (__bridge void*)self;
err = AudioUnitSetProperty(ioUnit, kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus, &enableInput, sizeof(enableInput));
err = AudioUnitSetProperty(ioUnit, kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus, &enableOutput, sizeof(enableOutput));
err = AudioUnitSetProperty(ioUnit, kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Input,
kOutputBus, &callbackStruct, sizeof(callbackStruct));
// Set the stream format.
size_t bytesPerSample = [self ASBDForSoundMode];
err = AudioUnitSetProperty(ioUnit, kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus, &streamFormat, sizeof(streamFormat));
err = AudioUnitSetProperty(ioUnit, kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus, &streamFormat, sizeof(streamFormat));
// Disable system buffer allocation. We'll do it ourselves.
UInt32 flag = 1;
err = AudioUnitSetProperty(ioUnit, kAudioUnitProperty_ShouldAllocateBuffer,
kAudioUnitScope_Output,
kInputBus, &flag, sizeof(flag));}
Render callback:
RIOInterface* THIS = (__bridge_transfer RIOInterface *)inRefCon;
COMPLEX_SPLIT A = THIS->A;
void *dataBuffer = THIS->dataBuffer;
float *outputBuffer = THIS->outputBuffer;
FFTSetup fftSetup = THIS->fftSetup;
float *hammingWeights = THIS->hammingWeights;
uint32_t log2n = THIS->log2n;
uint32_t n = THIS->n;
uint32_t nOver2 = THIS->nOver2;
uint32_t stride = 1;
int bufferCapacity = THIS->bufferCapacity;
SInt16 index = THIS->index;
AudioUnit rioUnit = THIS->ioUnit;
OSStatus renderErr;
UInt32 bus1 = 1;
renderErr = AudioUnitRender(rioUnit, ioActionFlags,
inTimeStamp, bus1, inNumberFrames, THIS->bufferList);
if (renderErr < 0) {
return renderErr;
}
I discovered that this issue was occurring when another AVAudioSession was active in another app, in which case the first initiated AVAudioSession's settings took priority over mine. I was trying to set the sampling frequency to 22050, but if the other audio session had it set at 44100 then it remained at 44100.
I resolved the issue by making my code 'adaptive' to other settings e.g. in respect to the buffer size, so it would still work effectively (if not optimally) with audio settings that differed from my preference.

Resources