Replace Bento4 with libav / ffmpeg - ffmpeg

We use Bento4 - a really well designed SDK - to demux mp4 files in .mov containers. Decoding is done by an own codec, so only the raw (intraframe) samples are needed. By now this works pretty straightforward
AP4_Track *test_videoTrack = nullptr;
AP4_ByteStream *input = nullptr;
AP4_Result result = AP4_FileByteStream::Create(filename, AP4_FileByteStream::STREAM_MODE_READ, input);
AP4_File m_file (*input, true);
//
// Read movie tracks, and metadata, find the video track
size_t index = 0;
uint32_t m_width = 0, m_height = 0;
auto item = m_file.GetMovie()->GetTracks().FirstItem();
auto track = item->GetData();
if (track->GetType() == AP4_Track::TYPE_VIDEO)
{
m_width = (uint32_t)((double)test_videoTrack->GetWidth() / double(1 << 16));
m_height = (uint32_t)((double)test_videoTrack->GetHeight() / double(1 << 16));
std::string codec("unknown");
auto sd = track->GetSampleDescription(0);
AP4_String c;
if (AP4_SUCCEEDED(sd->GetCodecString(c)))
{
codec = c.GetChars();
}
// Find and instantiate the decoder
AP4_Sample sample;
AP4_DataBuffer sampleData;
test_videoTrack->ReadSample(0, sample, sampleData);
}
For several reasons we would prefer replacing Bento4 with libav/ffmpeg (mainly because we already have in the project and want to reduce dependencies)
How would we ( preferrably in pseudo-code ) replace the Bento4-tasks done above with libav? Please remember that the used codec is not in the ffmpeg library, so we cannot use the standard ffmpeg decoding examples. Opening the media file simply fails. Without decoder we got no size or any other info so far. What we need would
open the media file
get contained tracks (possibly also audio)
get track size / length info
get track samples by index

It turned out to be very easy:
AVFormatContext* inputFile = avformat_alloc_context();
avformat_open_input(&inputFile, filename, nullptr, nullptr);
avformat_find_stream_info(inputFile, nullptr);
//Get just two streams...First Video & First Audio
int videoStreamIndex = -1, audioStreamIndex = -1;
for (int i = 0; i < inputFile->nb_streams; i++)
{
if (inputFile->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && videoStreamIndex == -1)
{
videoStreamIndex = i;
}
else if (inputFile->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audioStreamIndex == -1)
{
audioStreamIndex = i;
}
}
Now test for the correct codec tag
// get codec id
char ct[64] = {0};
static const char* codec_id = "MPAK";
av_get_codec_tag_string( ct, sizeof(ct),inputFile->streams[videoStreamIndex]->codec->codec_tag);
assert(strncmp( ct , codec_id, strlen(codec_id)) == 0)
I did not know that the sizes are set even before a codec is chosen (or even available).
// lookup size
Size2D mediasize(inputFile->streams[videoStreamIndex]->codec->width, inputFile->streams[videoStreamIndex]->codec->height);
Seeking by frame and unpacking (video) is done like this:
AVStream* s = m_file->streams[videoStreamIndex];
int64_t seek_ts = (int64_t(frame_index) * s->r_frame_rate.den * s->time_base.den) / (int64_t(s->r_frame_rate.num) * s->time_base.num);
av_seek_frame(m_hap_file, videoStreamIndex, seek_ts, AVSEEK_FLAG_ANY);
AVPacket pkt;
av_read_frame(inputFile, &pkt);
Now the packet contains a frame ready to unpack with own decoder.

Related

Encoding of raw frames (D3D11Texture2D) to an rtsp stream using libav*

I have managed to create a rtsp stream using libav* and directX texture (which I am obtaining from GDI API using Bitblit method). Here's my approach for creating live rtsp stream:
Create output context and stream (skipping the checks here)
avformat_alloc_output_context2(&ofmt_ctx, NULL, "rtsp", rtsp_url); //RTSP
vid_codec = avcodec_find_encoder(ofmt_ctx->oformat->video_codec);
vid_stream = avformat_new_stream(ofmt_ctx,vid_codec);
vid_codec_ctx = avcodec_alloc_context3(vid_codec);
Set codec params
codec_ctx->codec_tag = 0;
codec_ctx->codec_id = ofmt_ctx->oformat->video_codec;
//codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
codec_ctx->width = width; codec_ctx->height = height;
codec_ctx->gop_size = 12;
//codec_ctx->gop_size = 40;
//codec_ctx->max_b_frames = 3;
codec_ctx->pix_fmt = target_pix_fmt; // AV_PIX_FMT_YUV420P
codec_ctx->framerate = { stream_fps, 1 };
codec_ctx->time_base = { 1, stream_fps};
if (fctx->oformat->flags & AVFMT_GLOBALHEADER)
{
codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
Initialize video stream
if (avcodec_parameters_from_context(stream->codecpar, codec_ctx) < 0)
{
Debug::Error("Could not initialize stream codec parameters!");
return false;
}
AVDictionary* codec_options = nullptr;
if (codec->id == AV_CODEC_ID_H264) {
av_dict_set(&codec_options, "profile", "high", 0);
av_dict_set(&codec_options, "preset", "fast", 0);
av_dict_set(&codec_options, "tune", "zerolatency", 0);
}
// open video encoder
int ret = avcodec_open2(codec_ctx, codec, &codec_options);
if (ret<0) {
Debug::Error("Could not open video encoder: ", avcodec_get_name(codec->id), " error ret: ", AVERROR(ret));
return false;
}
stream->codecpar->extradata = codec_ctx->extradata;
stream->codecpar->extradata_size = codec_ctx->extradata_size;
Start streaming
// Create new frame and allocate buffer
AVFrame* AllocateFrameBuffer(AVCodecContext* codec_ctx, double width, double height)
{
AVFrame* frame = av_frame_alloc();
std::vector<uint8_t> framebuf(av_image_get_buffer_size(codec_ctx->pix_fmt, width, height, 1));
av_image_fill_arrays(frame->data, frame->linesize, framebuf.data(), codec_ctx->pix_fmt, width, height, 1);
frame->width = width;
frame->height = height;
frame->format = static_cast<int>(codec_ctx->pix_fmt);
//Debug::Log("framebuf size: ", framebuf.size(), " frame format: ", frame->format);
return frame;
}
void RtspStream(AVFormatContext* ofmt_ctx, AVStream* vid_stream, AVCodecContext* vid_codec_ctx, char* rtsp_url)
{
printf("Output stream info:\n");
av_dump_format(ofmt_ctx, 0, rtsp_url, 1);
const int width = WindowManager::Get().GetWindow(RtspStreaming::WindowId())->GetTextureWidth();
const int height = WindowManager::Get().GetWindow(RtspStreaming::WindowId())->GetTextureHeight();
//DirectX BGRA to h264 YUV420p
SwsContext* conversion_ctx = sws_getContext(width, height, src_pix_fmt,
vid_stream->codecpar->width, vid_stream->codecpar->height, target_pix_fmt,
SWS_BICUBIC | SWS_BITEXACT, nullptr, nullptr, nullptr);
if (!conversion_ctx)
{
Debug::Error("Could not initialize sample scaler!");
return;
}
AVFrame* frame = AllocateFrameBuffer(vid_codec_ctx,vid_codec_ctx->width,vid_codec_ctx->height);
if (!frame) {
Debug::Error("Could not allocate video frame\n");
return;
}
if (avformat_write_header(ofmt_ctx, NULL) < 0) {
Debug::Error("Error occurred when writing header");
return;
}
if (av_frame_get_buffer(frame, 0) < 0) {
Debug::Error("Could not allocate the video frame data\n");
return;
}
int frame_cnt = 0;
//av start time in microseconds
int64_t start_time_av = av_gettime();
AVRational time_base = vid_stream->time_base;
AVRational time_base_q = { 1, AV_TIME_BASE };
// frame pixel data info
int data_size = width * height * 4;
uint8_t* data = new uint8_t[data_size];
// AVPacket* pkt = av_packet_alloc();
while (RtspStreaming::IsStreaming())
{
/* make sure the frame data is writable */
if (av_frame_make_writable(frame) < 0)
{
Debug::Error("Can't make frame writable");
break;
}
//get copy/ref of the texture
//uint8_t* data = WindowManager::Get().GetWindow(RtspStreaming::WindowId())->GetBuffer();
if (!WindowManager::Get().GetWindow(RtspStreaming::WindowId())->GetPixels(data, 0, 0, width, height))
{
Debug::Error("Failed to get frame buffer. ID: ", RtspStreaming::WindowId());
std::this_thread::sleep_for (std::chrono::seconds(2));
continue;
}
//printf("got pixels data\n");
// convert BGRA to yuv420 pixel format
int srcStrides[1] = { 4 * width };
if (sws_scale(conversion_ctx, &data, srcStrides, 0, height, frame->data, frame->linesize) < 0)
{
Debug::Error("Unable to scale d3d11 texture to frame. ", frame_cnt);
break;
}
//Debug::Log("frame pts: ", frame->pts, " time_base:", av_rescale_q(1, vid_codec_ctx->time_base, vid_stream->time_base));
frame->pts = frame_cnt++;
//frame_cnt++;
//printf("scale conversion done\n");
//encode to the video stream
int ret = avcodec_send_frame(vid_codec_ctx, frame);
if (ret < 0)
{
Debug::Error("Error sending frame to codec context! ",frame_cnt);
break;
}
AVPacket* pkt = av_packet_alloc();
//av_init_packet(pkt);
ret = avcodec_receive_packet(vid_codec_ctx, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
{
//av_packet_unref(pkt);
av_packet_free(&pkt);
continue;
}
else if (ret < 0)
{
Debug::Error("Error during receiving packet: ",AVERROR(ret));
//av_packet_unref(pkt);
av_packet_free(&pkt);
break;
}
if (pkt->pts == AV_NOPTS_VALUE)
{
//Write PTS
//Duration between 2 frames (us)
int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(vid_stream->r_frame_rate);
//Parameters
pkt->pts = (double)(frame_cnt * calc_duration) / (double)(av_q2d(time_base) * AV_TIME_BASE);
pkt->dts = pkt->pts;
pkt->duration = (double)calc_duration / (double)(av_q2d(time_base) * AV_TIME_BASE);
}
int64_t pts_time = av_rescale_q(pkt->dts, time_base, time_base_q);
int64_t now_time = av_gettime() - start_time_av;
if (pts_time > now_time)
av_usleep(pts_time - now_time);
//pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
//pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
//pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
//pkt->pos = -1;
//write frame and send
if (av_interleaved_write_frame(ofmt_ctx, pkt)<0)
{
Debug::Error("Error muxing packet, frame number:",frame_cnt);
break;
}
//Debug::Log("RTSP streaming...");
//sstd::this_thread::sleep_for(std::chrono::milliseconds(1000/20));
//av_packet_unref(pkt);
av_packet_free(&pkt);
}
//av_free_packet(pkt);
delete[] data;
/* Write the trailer, if any. The trailer must be written before you
* close the CodecContexts open when you wrote the header; otherwise
* av_write_trailer() may try to use memory that was freed on
* av_codec_close(). */
av_write_trailer(ofmt_ctx);
av_frame_unref(frame);
av_frame_free(&frame);
printf("streaming thread CLOSED!\n");
}
Now, this allows me to connect to my rtsp server and maintain the connection. However, on the rtsp client side I am getting either gray or single static frame as shown below:
Would appreciate if you can help with following questions:
Firstly, why the stream is not working in spite of continued connection to the server and updating frames?
Video codec. By default rtsp format uses Mpeg4 codec, is it possible to use h264? When I manually set it to AV_CODEC_ID_H264 the program fails at avcodec_open2 with return value of -22.
Do I need to create and allocate new "AVFrame" and "AVPacket" for every frame? Or can I just reuse global variable for this?
Do I need to explicitly define some code for real-time streaming? (Like in ffmpeg we use "-re" flag).
Would be great if you can point out some example code for creating livestream. I have checked following resources:
https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/encode_video.c
streaming FLV to RTMP with FFMpeg using H264 codec and C++ API to flv.js
https://medium.com/swlh/streaming-video-with-ffmpeg-and-directx-11-7395fcb372c4
Update
While test I found that I am able to play the stream using ffplay, while it's getting stuck on VLC player. Here is snapshot on the ffplay log
The basic construct and initialization seems to be okay. Find below responses to your questions
why the stream is not working in spite of continued connection to the server and updating frames?
If you're getting an error or broken stream, you might wanna check into your presentation and decompression timestamps (pts/dts) of your packet.
In your code, I notice that you're taking time_base from video stream object which is not guranteed to be same as codec->time_base value and usually varies depending upon active stream.
AVRational time_base = vid_stream->time_base;
AVRational time_base_q = { 1, AV_TIME_BASE };
Video codec. By default rtsp format uses Mpeg4 codec, is it possible to use h264?
I don't see why not... RTSP is just a protocol for carrying your packets over the network. So you should be able use AV_CODEC_ID_H264 for encoding the stream.
Do I need to create and allocate new "AVFrame" and "AVPacket" for every frame? Or can I just reuse global variable for this?
In libav during encoding process a single packet is used for encoding a video frame, while there can be multiple audio frames in a single packet. I should reference this, but can't seem to find any source at the moment. But anyways the point is you would need to create new packet every time.
Do I need to explicitly define some code for real-time streaming? (Like in ffmpeg we use "-re" flag).
You don't need to add anything else for real time streaming. Although you might wanna implement it to limit the number of frame updates that you pass to encoder and save some performance.
for me the difference between ffplay good capture and VLC bad capture (for UDP packets) was pkt_size=xxx attribute (ffmpeg -re -i test.mp4 -f mpegts udp://127.0.0.1:23000?pkt_size=1316) (VLC open media network tab udp://#:23000:pkt_size=1316). So only if pkt_size is defined (and equal) VLC is able to capture.

FFmpeg muxing to avi

sI have program, that succefully shows h264 stream using SDL: I'm getting h264 frame, decode it using ffmpeg and draw using SDL.
Also I can write frames to file (using fwrite) and play this file through ffplay.
But I want to mux data to the avi and face some problems in av_write_frame.
Here is my code:
...
/*Initializing format context - outFormatContext is the member of my class*/
AVOutputFormat *outFormat;
outFormat = av_guess_format(NULL,"out.avi",NULL);
outFormat->video_codec = AV_CODEC_ID_H264;
outFormat->audio_codec = AV_CODEC_ID_NONE;
avformat_alloc_output_context2(&outFormatContext, outFormat, NULL, "out.avi");
AVCodec *outCodec;
AVStream *outStream = add_stream(outFormatContext, &outCodec, outFormatContext->oformat->video_codec);
avcodec_open2(outStream->codec, outCodec, NULL);
av_dump_format(outFormatContext, 0, "out.avi", 1);
if (avio_open(&outFormatContext->pb, "out.avi", AVIO_FLAG_WRITE) < 0)
throw Exception("Couldn't open file");
if (avformat_write_header(outFormatContext, NULL) < 0)
throw Exception("Couldn't write to file");
//I don't have exceptions here - so there is 6KB header in out.avi.
...
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
enum AVCodecID codec_id)
{
AVCodecContext *c;
AVStream *st;
/* find the encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec))
throw("Could not find encoder");
st = avformat_new_stream(oc, *codec);
if (!st)
throw ("Could not allocate stream");
st->id = oc->nb_streams-1;
c = st->codec;
c->bit_rate = 400000;
/* Resolution must be a multiple of two. */
c->width = 1920;
c->height = 1080;
c->pix_fmt = PIX_FMT_YUV420P;
c->flags = 0;
c->time_base.num = 1;
c->time_base.den = 25;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
return st;
}
...
/* Part of decoding loop. There is AVPacket packet - h264 packet;
int ret = av_write_frame(outFormatContext, &packet); //it return -22 code - Invadlid argument;
if (avcodec_decode_video2(pCodecCtx, pFrame, &frameDecoded, &packet) < 0)
return;
if (frameDecoded)
{
//SDL stuff
}
Also i tried to use avcodec_encode_video2 (encode pFrame back to the H264) next to the SDL stuff but encoding is not working - i've got empty packets :( It is the second problem.
Using av_interleaved_write_frame causes acces violation.
Code of the muxing part i copied from ffmpeg muxing example (https://www.ffmpeg.org/doxygen/2.1/doc_2examples_2muxing_8c-example.html)

How to fill audio AVFrame (ffmpeg) with the data obtained from CMSampleBufferRef (AVFoundation)?

I am writing program for streaming live audio and video from webcamera to rtmp-server. I work in MacOS X 10.8, so I use AVFoundation framework for obtaining audio and video frames from input devices. This frames come into delegate:
-(void) captureOutput:(AVCaptureOutput*)captureOutput didOutputSampleBuffer: (CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection*)connection ,
where sampleBuffer contains audio or video data.
When I recieve audio data in the sampleBuffer, I'm trying to convert this data into AVFrame and encode AVFramewith libavcodec:
aframe = avcodec_alloc_frame(); //AVFrame *aframe;
int got_packet, ret;
CMItemCount numSamples = CMSampleBufferGetNumSamples(sampleBuffer); //CMSampleBufferRef
NSUInteger channelIndex = 0;
CMBlockBufferRef audioBlockBuffer = CMSampleBufferGetDataBuffer(sampleBuffer);
size_t audioBlockBufferOffset = (channelIndex * numSamples * sizeof(SInt16));
size_t lengthAtOffset = 0;
size_t totalLength = 0;
SInt16 *samples = NULL;
CMBlockBufferGetDataPointer(audioBlockBuffer, audioBlockBufferOffset, &lengthAtOffset, &totalLength, (char **)(&samples));
const AudioStreamBasicDescription *audioDescription = CMAudioFormatDescriptionGetStreamBasicDescription(CMSampleBufferGetFormatDescription(sampleBuffer));
aframe->nb_samples =(int) numSamples;
aframe->channels=audioDescription->mChannelsPerFrame;
aframe->sample_rate=(int)audioDescription->mSampleRate;
//my webCamera configured to produce 16bit 16kHz LPCM mono, so sample format hardcoded here, and seems to be correct
avcodec_fill_audio_frame(aframe, aframe->channels, AV_SAMPLE_FMT_S16,
(uint8_t *)samples,
aframe->nb_samples *
av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) *
aframe->channels, 0);
//encoding audio
ret = avcodec_encode_audio2(c, &pkt, aframe, &got_packet);
if (ret < 0) {
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
exit(1);
}
The problem is that when I get so formed frames, I can hear the wanted sound, but it is slowing down and discontinuous (as if after each data frame comes the same frame of silence). It seems that something is wrong in the transformation from CMSampleBuffer to AVFrame , because the preview from the microphone created with AVFoundation from the same sample buffers played normally.
I would be grateful for your help.
UPD: Creating and initializing the AVCodceContext structure
audio_codec= avcodec_find_encoder(AV_CODEC_ID_AAC);
if (!(audio_codec)) {
fprintf(stderr, "Could not find encoder for '%s'\n",
avcodec_get_name(AV_CODEC_ID_AAC));
exit(1);
}
audio_st = avformat_new_stream(oc, audio_codec); //AVFormatContext *oc;
if (!audio_st) {
fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
audio_st->id=1;
audio_st->codec->sample_fmt= AV_SAMPLE_FMT_S16;
audio_st->codec->bit_rate = 64000;
audio_st->codec->sample_rate= 16000;
audio_st->codec->channels=1;
audio_st->codec->codec_type= AVMEDIA_TYPE_AUDIO;

FFmpeg remux without decode/encode

I want to use ffmpeg lib to save rtsp stream to local mp4 file without decode.
both the inpout stream and output file use H264+AAC codec.
For now I use the following code to read the packet from the input stream and write to the output file.
...
av_write_header(oFmtCtx);
av_init_packet(&packet);
int j = 0;
while (av_read_frame(pIFmtCtx, &packet) >= 0 && j < 140/*temp use to get a period of the stream*/)
{
//now I only output the audio stream
if (packet.stream_index == audioStream)
{
AVPacket pkt;
av_init_packet(&pkt);
pkt.size = packet.size;
pkt.data = packet.data;
pkt.dts = AV_NOPTS_VALUE;
pkt.pts = AV_NOPTS_VALUE;
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index = oStream->index;
if (av_interleaved_write_frame(oFmtCtx, &pkt) != 0)
{
LOGI("Error while writing audio frame\n");
break;
}
}
j++;
}
av_write_trailer(oFmtCtx);
The file is generated. and when I open it using VLC, the playback lasts for the right time but no audio is outputed.
Any help will be greatly appreciated, Thanks in advance.
Bolton
Finally I made it. Below is my code:
while (j < 140)
{
av_read_frame(pIFmtCtx, &packet);
pkt_stream_index = packet.stream_index;
pkt_pts = packet.pts;
pkt_dts = packet.dts;
if (packet.stream_index == audioStream)
{
packet.stream_index = oStream->index;
av_write_frame(oFmtCtx, &packet);
packet.stream_index = pkt_stream_index;
packet.pts = pkt_pts;
packet.dts = pkt_dts;
}
av_free_packet(&packet);
j++;
}
I don't know the reason but it works!

Audio Unit and Writing to file

I'm creating real-time audio sequencer app on OS X.
Real-time synth part is implemented by using AURenderCallback.
Now I'm making function to write rendered result to Wave File (44100Hz 16bit Stereo).
Format for render-callback function is 44100Hz 32bit float Stereo interleaved.
I'm using ExtAudioFileWrite to write to file.
But ExtAudioFileWrite function returns error code 1768846202;
I searched 1768846202 but I couldn't get information.
Would you give me some hints?
Thank you.
Here is code.
outFileFormat.mSampleRate = 44100;
outFileFormat.mFormatID = kAudioFormatLinearPCM;
outFileFormat.mFormatFlags =
kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
outFileFormat.mBitsPerChannel = 16;
outFileFormat.mChannelsPerFrame = 2;
outFileFormat.mFramesPerPacket = 1;
outFileFormat.mBytesPerFrame =
outFileFormat.mBitsPerChannel / 8 * outFileFormat.mChannelsPerFrame;
outFileFormat.mBytesPerPacket =
outFileFormat.mBytesPerFrame * outFileFormat.mFramesPerPacket;
AudioBufferList *ioList;
ioList = (AudioBufferList*)calloc(1, sizeof(AudioBufferList)
+ 2 * sizeof(AudioBuffer));
ioList->mNumberBuffers = 2;
ioList->mBuffers[0].mNumberChannels = 1;
ioList->mBuffers[0].mDataByteSize = allocByteSize / 2;
ioList->mBuffers[0].mData = ioDataL;
ioList->mBuffers[1].mNumberChannels = 1;
ioList->mBuffers[1].mDataByteSize = allocByteSize / 2;
ioList->mBuffers[1].mData = ioDataR;
...
while (1) {
//Fill buffer by using render callback func.
RenderCallback(self, nil, nil, 0, frames, ioList);
//i want to create one sec file.
if (renderedFrames >= 44100) break;
err = ExtAudioFileWrite(outAudioFileRef, frames , ioList);
if (err != noErr){
NSLog(#"ERROR AT WRITING TO FILE");
goto errorExit;
}
}
Some of the error codes are actually four character strings. The Core Audio book provides a nice function to handle errors.
static void CheckError(OSStatus error, const char *operation)
{
if (error == noErr) return;
char str[20];
// see if it appears to be a 4-char-code
*(UInt32 *)(str + 1) = CFSwapInt32HostToBig(error);
if (isprint(str[1]) && isprint(str[2]) && isprint(str[3]) && isprint(str[4])) {
str[0] = str[5] = '\'';
str[6] = '\0';
} else
// no, format it as an integer
sprintf(str, "%d", (int)error);
fprintf(stderr, "Error: %s (%s)\n", operation, str);
exit(1);
}
Use it like this:
CheckError(ExtAudioFileSetProperty(outputFile,
kExtAudioFileProperty_CodecManufacturer,
sizeof(codec),
&codec), "Setting codec.");
Before you can do any sort of debugging, you probably need to figure out what that error message actually means. Have you tried passing that status code to GetMacOSStatusErrorString() or GetMacOSStatusCommentString()? They aren't documented so well, but they are declared in CoreServices/CarbonCore/Debugging.h.

Resources