I changed in my code avcodec_decode_audio3 to avcodec_decode_audio4 and added the frame handling. But now I cannot decode AAC frames anymore.
Why does avcodec_decode_audio4 return -22 (invalid argument)?
Following the answer below, does this have something to do with the parameters in AVContext that need to be set?
I had to use avcodec_decode_audio4 because I updated my ffmpeg and then got the following error:
[NULL # 0xb14f020] Custom get_buffer() for use withavcodec_decode_audio3() detected.
Overriding with avcodec_default_get_buffer
[NULL # 0xb14f020] Please port your application to avcodec_decode_audio4()
According to Buffer error in avcodec_decode_audio4() this is a regression, is there any other solution for this than going back to ffmpeg < 0.8 ?
The decoder using avcodec_decode_audio4:
AVCodec *codec;
AVCodecContext *avCtx;
AVFrame * decoded_frame = NULL;
uint8_t *outbuf = static_cast<uint8_t *>(malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE));
AVPacket avPacket;
main(){
av_register_all();
codec = avcodec_find_decoder(CODEC_ID_AAC);
//set parameters
avCtx = avcodec_alloc_context3(codec);
avCtx->channels = 1;
avCtx->sample_rate = 44100;
avCtx->bit_rate=16;
if (avcodec_open2(avCtx, codec, NULL) < 0) printf("Could not open codec\n");
av_init_packet(&avPacket);
//Main decoder loop
while(1)
my_frame_decoder();
return 0;
}
void my_frame_decoder() {
//get data
...
avPacket.size = numBytes;
avPacket.data = inputBytes;
int len;
while (avPacket.size > 0) {
int got_frame = 0;
if (!decoded_frame) {
if (!(decoded_frame = avcodec_alloc_frame())) {
printf("out of memory");
return;
}
} else {
avcodec_get_frame_defaults(decoded_frame);
}
//-------------------->> returns always -22
len = avcodec_decode_audio4(avCtx, decoded_frame, &got_frame, &avPacket);
//do something with the decoded frame
...
avPacket.size -= len;
avPacket.data += len;
}
return;
}
After hours of searching, i found out that the dec_ctx of the avcodec_decode_audio4 must be opened by a dec_codec initialised by av_find_best_stream()
1° av_find_best_stream(in_fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1,
&dec_codec, 0);<br>
2° dec_ctx = m_in_aud_strm->codec;<br>
3° av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);<br>
4° avcodec_open2(dec_ctx, dec_codec, NULL)<br>
.
.
.
5° avcodec_decode_audio4(dec_ctx, pFrame, &got_frame, &pkt);
I think the problem is the parameters set in your codec's context. Please refer to https://www.ffmpeg.org/doxygen/trunk/structAVCodecContext.html for setting the parameters, which has changed from avcodec_decode_audio3 to avcodec_decode_audio4.
No solution but a workaround is to go back to older builds. After testing various builds that work with avcodec_decode_audio3, I thought it might be useful for others to know that ffmpeg-0.10.14.tar.bz2 from https://ffmpeg.org/releases/ works.
<!-- language: c++ -->
#include <windows.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <errno.h>
#include <fcntl.h>
#include <ctype.h>
#include <math.h>
#include <wctype.h>
#include <wchar.h>
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include <locale.h>
#include <signal.h>
#include <limits.h>
#include <float.h>
#include <iso646.h>
#undef NDEBUG
#include <assert.h>
// Use avcodec_send_packet() and avcodec_receive_frame().
//sample code
while (av_read_frame (FormatContext, packet_ptr) >= 0)
{
/* some code */
if (packet_ptr->stream_index == audiostream)
{
assert(NULL == decoded_frame);
decoded_frame = av_frame_alloc();
ret = avcodec_send_packet(pCodecCtx, packet_ptr);
if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
{
av_packet_unref (packet_ptr);
if (decoded_frame)
{
av_frame_unref(decoded_frame);
decoded_frame = NULL;
}
continue;
}
else
{
if (0 <= ret)
packet_ptr->size = 0;
ret = avcodec_receive_frame(pCodecCtx, decoded_frame);
if (ret >= 0)
got_frame = 1;
else
{
got_frame = 0;
if (decoded_frame)
{
av_frame_unref(decoded_frame);
decoded_frame = NULL;
}
av_packet_unref (packet_ptr);
continue;
}
}
if(AV_SAMPLE_FMT_FLTP == pCodecCtx->sample_fmt)//AAC sample format for Libav released 10-October-2020 (ffmpeg 4.3.1)
{
//now get the PCM data ready to play or save
int nb_samples = decoded_frame->nb_samples;
int channels = pCodecCtx->channels;
if(channels > 2) //for this small sample only 2 channels...
{
channels = 2;//it will convert multichannel media files to 2 channels, remember this...more code need to be modified
}
int outputBufferLen = nb_samples * channels * 2;
int size_out //the size of the PCM data as sizeof(char)
=outputBufferLen;
char * buf = malloc(size_out);
short *outputBuffer=(short *)buf;
int in_samples = decoded_frame->nb_samples;
int i=0;
float * inputChannel0 = (float *)decoded_frame->extended_data[0];
// Mono
if (pCodecCtx->channels==1)
{
for (i=0; i<in_samples; i++)
{
float sample = *inputChannel0++;
if (sample<-1.0f) sample=-1.0f;
else if (sample>1.0f) sample=1.0f;
outputBuffer[i] = (int16_t) (sample * 32767.0f);//largest positive int16_t
}
}
// Stereo
else
{
float * inputChannel1 = (float *)decoded_frame->extended_data[1];
for (i=0; i < in_samples; i++)
{
float sample = *inputChannel0++;
if (sample<-1.0f) sample=-1.0f;
else if (sample>1.0f) sample=1.0f;
float sample2 = *inputChannel1++;
if (sample2<-1.0f) sample2=-1.0f;
else if (sample2>1.0f) sample2=1.0f;
outputBuffer[i*2] = (int16_t) ((sample) * 32767.0f);
outputBuffer[i*2+1] = (int16_t) ((sample2) * 32767.0f);
}
}
//use buf and size_out here then free the buf
free(buf);
}
}
av_packet_unref (packet_ptr);
if (decoded_frame)
{
av_frame_unref(decoded_frame);
decoded_frame = NULL;
}
}
Hope it helps...
Related
There is an android_camera.c file in the ffmpeg project. I want to call NDK camera through ffmpeg, take out YUV data, and write it to the file. Currently, the code has been blocking the function wait_for_image_format.
android_camera.c
#include <stdio.h>
#include <math.h>
#include <libavutil/opt.h>
#include <libavcodec/avcodec.h>
#include <libavutil/channel_layout.h>
#include <libavutil/common.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include <libavformat/avformat.h>
#include <libavdevice/avdevice.h>
#include <libavutil/dict.h>
int main(int argc, char **argv) {
int ret;
AVFormatContext *fmtCtx = NULL;
AVPacket pkt1, *pcaket = &pkt1;
/*1、注册*/
avcodec_register_all();
avdevice_register_all();
/*2、连接视频源*/
AVInputFormat *inputFmt = av_find_input_format("android_camera");
if (NULL != inputFmt) {
printf("input device name:%s\n",inputFmt->name);
} else {
printf("Null point!\n");
}
#if 1
AVDictionary *avdict = NULL;
AVDictionaryEntry *t = av_dict_get(avdict, "video_size", NULL, AV_DICT_IGNORE_SUFFIX);
printf("ok1\n");
av_dict_set(&avdict, "video_size", "hd720", 0);
av_dict_set_int(&avdict, "camera_index",1, 0);
av_dict_set_int(&avdict, "input_queue_size",2, 0);
printf("ok2\n");
/*3、打开视频采集设备*/
//ret = avformat_open_input(&fmtCtx, "video=/dev/video1", inputFmt, avdict);
//ret = avformat_open_input(&fmtCtx, "android_camera", inputFmt, avdict);
fmtCtx = avformat_alloc_context();
if (NULL == fmtCtx) {
printf("Open input device seccess!\n");
}
printf("ok3\n");
//if (avformat_find_stream_info(fmtCtx, NULL) < 0) {
// printf("Could not find stream information\n");
//}
//printf("ok4\n");
ret = avformat_open_input(&fmtCtx, NULL, inputFmt, &avdict);
printf("ok4\n");
av_dict_free(&avdict);
#else
fmtCtx = avformat_alloc_context();
if(!fmtCtx)
{
printf("avformat_alloc_contest error\n");
exit(1);
}
#endif
printf("ok5\n");
//ret = av_demuxer_open(fmtCtx);
if(ret<0)
{
printf("av_demuxer_open error\n");
}
printf("ok6\n");
/*4、读取一帧数据,编码依据摄像头类型而定,我使用的摄像头输出的是yuv422格式*/
fmtCtx->flags &= (~0x4);
av_read_frame(fmtCtx, pcaket);
printf("ok7\n");
//printf("packet size:%d\n",(pcaket->size));
/*5、写入帧数据到文件*/
FILE *fp = NULL;
fp = fopen("out.yuv", "a+");
if (NULL != fp) {
//将数据写入文件
fwrite(pcaket->data, 1, pcaket->size, fp);
}
//关闭文件
fclose(fp);
/*6、释放读取的帧数据*/
av_free_packet(pcaket);
/*7、关闭视频输入源*/
avformat_close_input(&fmtCtx);
return 0;
}
I would like to be able to display in a Dicom image in a Qt project with the same render as a Dicom Viewer Program could give.
I was able to do it but with a very bad contrast. I heard you need to operate on the pixels but I'm not sure. Do you have a working example ?
EDIT: I add my code in case it helps you, I commented a lot of things because I noticed the result was exactly the same
#include "mainwindow.h"
#include "ui_mainwindow.h"
#include <iostream>
#undef UNICODE
#undef _UNICODE
#include <dcmtk/config/osconfig.h>
#include <dcmtk/dcmdata/dctk.h>
#include <dcmtk/dcmimgle/dcmimage.h>
#include <QPixmap>
#include <QLabel>
#include <QImageReader>
using namespace std;
MainWindow::MainWindow(QWidget *parent) :
QMainWindow(parent),
ui(new Ui::MainWindow)
{
ui->setupUi(this);
//int sizeX = 600;
// int sizeY = 600;
//initialize random seed
//srand (time(NULL));
//QImage image = QImage( sizeX, sizeY, QImage::Format_RGB32 );
/*for( int l=0; l<sizeX; l++ )
{
for( int c=0; c<sizeY; c++ )
{
///Random color for each pixel
//image.setPixel( l, c, qRgb(rand() % 256, rand() % 256, rand() % 256) );
///Fixed color for each pixel
image.setPixel( l, c, qRgb(100, 150, 200) );
}
}*/
const char *file = "/home/x4rkz/project/Laura/QTImage/IMG00000";
DicomImage *image = new DicomImage(file);
if (image != NULL)
{
if (image->getStatus() == EIS_Normal)
{
Uint8 *pixelData = (Uint8 *)(image->getOutputData(8 )); // bits per sample
// Uint8 is a pointer to internal memory buffer
if (pixelData != NULL)
{
// do something useful with the pixel data
QImage img(pixelData,image->getWidth(), image->getHeight(), QImage::Format_Indexed8 );
/*QColor color;
QImage *img;
void *pDicomDibits;
uchar *px;
// uchar pixel[4];
const int width = (int)(image->getWidth());
const int height = (int)(image->getHeight());
if (image->isMonochrome()){
img = new QImage(width, height, QImage::Format_Indexed8);
img->setColorCount(256);
// define gray palette here
for (int i=0; i<256; i++) {
color.setRgb(i, i, i);
img->setColor(i, color.rgb());
}
image->createWindowsDIB(pDicomDibits, 0, 0, 8, 0, 1);
unsigned char * pd;
pd=(unsigned char *)pDicomDibits;
for (int y=0; y < (long) height; y++)
{
px = img->scanLine(y);
for (int x=0; x < (long) width; x++)
{
px[x] = (unsigned char) (*pd);
pd++;
}
}*/
QGraphicsScene * graphic = new QGraphicsScene( this );
graphic->addPixmap( QPixmap::fromImage( img ) );
ui->graphicsView->setScene(graphic);
/* }else
cout << "Non monochrome image" << endl;*/
}
} else
cerr << "Error: cannot load DICOM image (" << DicomImage::getString(image->getStatus()) << ")" << endl;
}
}
MainWindow::~MainWindow()
{
delete ui;
}
#include "mainwindow.h"
#include <QApplication>
#include <iostream>
#undef UNICODE
#undef _UNICODE
#include <dcmtk/config/osconfig.h>
#include <dcmtk/dcmdata/dctk.h>
#include <dcmtk/dcmimgle/dcmimage.h>
#include <QPixmap>
#include <QLabel>
#include <QImageReader>
using namespace std;
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
MainWindow w;
w.show();
return a.exec();
}
As you cant see, the result has no constrast.
As you cant see, the result has no constrast.
If the rendered image has such a low contrast, you should try to set an appropriate VOI (Value of Interest) window, e.g. using image->setMinMaxWndow(). See API documentation for details.
I have RTP packets with VP8 encoded data. I want to write it to a mkv file or webm file. I tried a bit, but I have not been successful yet. My code is as below
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <math.h>
#include <libavutil/avassert.h>
#include <libavutil/channel_layout.h>
#include <libavutil/opt.h>
#include <libavutil/mathematics.h>
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
bool mfmedia_init_ffmpeg();
void mfmedia_ffprint(void *handle, int cnt, const char *format, va_list valist);
int main()
{
mfmedia_init_ffmpeg();
return 0;
}
bool mfmedia_init_ffmpeg()
{
bool ret = false;
AVCodecContext* context;
AVCodec* codec;
AVFormatContext* format;
AVStream* stream;
unsigned fps = 24;
unsigned width = 768;
unsigned height = 608;
av_register_all();
int err = 0;
char errorLog[128] = { 0 };
av_log_set_level(AV_LOG_TRACE);
av_log_set_callback(mfmedia_ffprint);
err = avformat_alloc_output_context2(&format, NULL, NULL, "o.webm");
if (err < 0)
{
printf("Cannot allocate output context: %s\n", av_make_error_string(errorLog, 128, err));
goto last;
}
codec = avcodec_find_encoder(AV_CODEC_ID_VP8);
if (!codec)
{
printf("Cannot find an encoder\n");
goto last;
}
context = avcodec_alloc_context3(codec);
if (!context)
{
printf("Cannot allocate a codec context\n");
goto last;
}
context->pix_fmt = AV_PIX_FMT_YUV420P;
context->width = width;
context->height = height;
context->time_base = (AVRational){1, fps};
err = avcodec_open2(context, codec, NULL);
if(err < 0)
{
printf("Cannot open codec: %s\n", av_make_error_string(errorLog, 128, err));
goto last;
}
stream = avformat_new_stream(format, codec);
if (!stream)
{
printf("Cannot create a new stream\n");
goto last;
}
//av_dump_format(format, 0, "o.webm", 1);
err = avio_open(&format->pb, "o.webm", AVIO_FLAG_WRITE);
if(err < 0)
{
printf("Cannot open output: %s\n", av_make_error_string(errorLog, 128, err));
goto last;
}
err = avformat_write_header(format, NULL);
if(err < 0)
{
printf("Cannot write header to stream: %s\n", av_make_error_string(errorLog, 128, err));
goto last;
}
ret = true;
last:
return ret;
}
void mfmedia_ffprint(void *handle, int cnt, const char *format, va_list valist)
{
char *log_buf = (char *)malloc(38192);
int length;
if(log_buf)
{
time_t rawtime;
time ( &rawtime );
length = vsprintf(log_buf ,format, valist);
length += sprintf((log_buf + length), " : %s ", ctime (&rawtime));
*(log_buf + length) = 0x0;
printf("%s", log_buf);
fflush(stdout);
free(log_buf);
}
}
It is failing when I call avformat_write_header.
From trace log (towards end) I see
Setting default whitelist 'file,crypto'
: Fri Jan 19 16:58:57 2018
Using AVStream.codec to pass codec parameters to muxers is deprecated, use AVStream.codecpar instead.
: Fri Jan 19 16:58:57 2018
dimensions not set
: Fri Jan 19 16:58:57 2018
Cannot write header to stream: Invalid argument
Please let me know why avformat_write_header is failing.
This should work, enter this code snipped right above of: //av_dump_format(format, 0, "o.webm", 1);
/* copy the stream parameters to the muxer */
err = avcodec_parameters_from_context(stream->codecpar, context);
if (err < 0) {
fprintf(stderr, "Could not copy the stream parameters\n");
exit(1);
}
env: ubuntu 16.04 64 bit; ffmpeg 3.3.2 build whih cuda cuvid libnpp...
use ffmpeg cmd: ffmpeg -vsync 0 -c:v h264_cuvid -i test.264 -f rawvideo test.yuv works fine, the generated yuv file is ok.
BUT When I decode this 264 file by my code use 'h264_cuvid' decoder, something problem happens, this is my code:
#include <stdio.h>
#define __STDC_CONSTANT_MACROS
#ifdef _WIN32
//Windows
extern "C"
{
#include "libavcodec/avcodec.h"
};
#else
//Linux...
#ifdef __cplusplus
extern "C"
{
#endif
#include <libavcodec/avcodec.h>
#ifdef __cplusplus
};
#endif
#endif
//test different codec
#define TEST_H264 1
#define TEST_HEVC 0
int main(int argc, char* argv[])
{
AVCodec *pCodec;
AVCodecContext *pCodecCtx= NULL;
AVCodecParserContext *pCodecParserCtx=NULL;
FILE *fp_in;
FILE *fp_out;
AVFrame *pFrame;
const int in_buffer_size=4096;
unsigned char in_buffer[in_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE]= {0};
unsigned char *cur_ptr;
int cur_size;
AVPacket packet;
int ret, got_picture;
#if TEST_HEVC
enum AVCodecID codec_id=AV_CODEC_ID_HEVC;
char filepath_in[]="bigbuckbunny_480x272.hevc";
#elif TEST_H264
AVCodecID codec_id=AV_CODEC_ID_H264;
char filepath_in[]="2_60_265to264.264";
#else
AVCodecID codec_id=AV_CODEC_ID_MPEG2VIDEO;
char filepath_in[]="bigbuckbunny_480x272.m2v";
#endif
char filepath_out[]="mainSend.yuv";
int first_time=1;
//av_log_set_level(AV_LOG_DEBUG);
avcodec_register_all();
// pCodec = avcodec_find_decoder(codec_id);
pCodec = avcodec_find_decoder_by_name("h264_cuvid");
if (!pCodec)
{
printf("Codec not found\n");
return -1;
}
pCodecCtx = avcodec_alloc_context3(pCodec);
if (!pCodecCtx)
{
printf("Could not allocate video codec context\n");
return -1;
}
pCodecParserCtx=av_parser_init(pCodec->id);
if (!pCodecParserCtx)
{
printf("Could not allocate video parser context\n");
return -1;
}
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
{
printf("Could not open codec\n");
return -1;
}
//Input File
fp_in = fopen(filepath_in, "rb");
if (!fp_in)
{
printf("Could not open input stream\n");
return -1;
}
//Output File
fp_out = fopen(filepath_out, "wb");
if (!fp_out)
{
printf("Could not open output YUV file\n");
return -1;
}
pFrame = av_frame_alloc();
av_init_packet(&packet);
while (1)
{
cur_size = fread(in_buffer, 1, in_buffer_size, fp_in);
if (cur_size == 0)
break;
cur_ptr=in_buffer;
while (cur_size>0)
{
int len = av_parser_parse2(
pCodecParserCtx, pCodecCtx,
&packet.data, &packet.size,
cur_ptr, cur_size,
AV_NOPTS_VALUE, AV_NOPTS_VALUE, AV_NOPTS_VALUE);
cur_ptr += len;
cur_size -= len;
if(packet.size==0)
continue;
//Some Info from AVCodecParserContext
printf("[Packet]Size:%6d\t",packet.size);
switch(pCodecParserCtx->pict_type)
{
case AV_PICTURE_TYPE_I:
printf("Type:I\tNumber:%4d\n",pCodecParserCtx->output_picture_number);
break;
case AV_PICTURE_TYPE_P:
printf("Type:P\t");
break;
case AV_PICTURE_TYPE_B:
printf("Type:B\t");
break;
default:
printf("Type:Other\t");
break;
}
printf("Number:%4d\n",pCodecParserCtx->output_picture_number);
AVFrame* myFrame = av_frame_alloc();
ret = avcodec_decode_video2(pCodecCtx, myFrame, &got_picture, &packet);
if (ret < 0)
{
printf("Decode Error.\n");
return ret;
}
if (got_picture)
{
if(first_time)
{
printf("\nCodec Full Name:%s\n",pCodecCtx->codec->long_name);
printf("width:%d\nheight:%d\n\n",pCodecCtx->width,pCodecCtx->height);
first_time=0;
}
//Y, U, V
for(int i=0; i<myFrame->height; i++)
{
fwrite(myFrame->data[0]+myFrame->linesize[0]*i,1,myFrame->width,fp_out);
}
for(int i=0; i<myFrame->height/2; i++)
{
fwrite(myFrame->data[1]+myFrame->linesize[1]*i,1,myFrame->width/2,fp_out);
}
for(int i=0; i<myFrame->height/2; i++)
{
fwrite(myFrame->data[2]+myFrame->linesize[2]*i,1,myFrame->width/2,fp_out);
}
// printf("pframe's width height %d %d\t key frame %d\n",myFrame->width,myFrame->height,myFrame->key_frame);
printf("Succeed to decode 1 frame!\n");
av_frame_free(&myFrame);
}
}
}
fclose(fp_in);
fclose(fp_out);
av_parser_close(pCodecParserCtx);
av_frame_free(&pFrame);
avcodec_close(pCodecCtx);
av_free(pCodecCtx);
return 0;
}
In this demo code, I call h264_cuvid by vcodec_find_decoder_by_name("h264_cuvid");
BUT the code crash at fwrite(myFrame->data[2]+myFrame->linesize[2]*i,1,myFrame->width/2,fp_out);
So after debug with codeblocks, I found that there is no data in myFrame->data[2] codeblocks watching window
Any suggestion? thanks!
"h264" decoder's frame pix format is AV_PIX_FMT_YUV420P
BUT "h264_cuvid" decoder's frame pix format is AV_PIX_FMT_NV12
so, edit code to
for(int i=0; i<myFrame->height; i++)
{
fwrite(myFrame->data[0]+myFrame->linesize[0]*i,1,myFrame->width,fp_out);
}
for(int i=0; i<myFrame->height/2; i++)
{
fwrite(myFrame->data[1]+myFrame->linesize[1]*i,1,myFrame->width,fp_out);
}
Works fine
I'm new using OpenAl library. I'm following the OpenAl programming guide but i can't find.
I have this code extracted from page 10 of the OpenAl programming guide but still have no sound. I use OSX Snow Leopard, i know OSX doesn't have ALUT defined.
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <stdlib.h>
#include <OpenAL/al.h>
#include <OpenAL/alc.h>
using namespace std;
#define NUM_BUFFERS 3
#define BUFFER_SIZE 4096
int main(int argc, char **argv)
{
ALCdevice *dev;
ALCcontext *ctx;
struct stat statbuf;
Aluint buffer[NUM_BUFFERS];
Aluint source[NUM_SOURCES];
ALsizei size, freq;
ALenum format;
ALvoid *data;
// Initialization
dev = alcOpenDevice(NULL); // select the "preferred dev"
if (dev)
{
ctx = alcCreateContext(dev,NULL);
alcMakeContextCurrent(ctx);
}
// Check for EAX 2.0 support
// g_bEAX = alIsExtensionPresent("EAX2.0");
// Generate Buffers
alGetError(); // clear error code
alGenBuffers(NUM_BUFFERS, buffer);
if ((error = alGetError()) != AL_NO_ERROR)
{
DisplayALError("alGenBuffers :", error);
return 1;
}
// Load test.wav
loadWAVFile("sample.wav", &format, &data, &size, &freq, &loop);
if ((error = alGetError()) != AL_NO_ERROR)
{
DisplayALError("LoadWAVFile sample.wav : ", error);
alDeleteBuffers(NUM_BUFFERS, buffer);
return 1;
}
// Copy test.wav data into AL Buffer 0
alBufferData(buffer[0], format, data, size, freq);
if ((error = alGetError()) != AL_NO_ERROR)
{
DisplayALError("alBufferData buffer 0 : ", error);
alDeleteBuffers(NUM_BUFFERS, buffer);
return 1;
}
// Unload test.wav
unloadWAV(format, data, size, freq);
if ((error = alGetError()) != AL_NO_ERROR)
{
DisplayALError("UnloadWAV : ", error);
alDeleteBuffers(NUM_BUFFERS, buffer);
return 1;
}
// Generate Sources
alGenSources(1, source);
if ((error = alGetError()) != AL_NO_ERROR)
{
DisplayALError("alGenSources 1 : ", error);
return 1;
}
// Attach buffer 0 to source
alSourcei(source[0], AL_BUFFER, buffer[0]);
if ((error = alGetError()) != AL_NO_ERROR)
{
DisplayALError("alSourcei AL_BUFFER 0 : ", error);
}
// Exit
ctx = alcGetCurrentContext();
dev = alcGetContextsDevice(ctx);
alcMakeContextCurrent(NULL);
alcDestroyContext(ctx);
alcCloseDevice(dev);
return 0;
}
What things I missed to make this code work ???
What i'm doing wrong ???
Any advice could help, thanks.
You are not calling alSourcePlay(source[0]) to start the playback.