libx264 Input picture width (640) is greater than stride (0) - ffmpeg

I'm trying to encode a series of Cairo surfaces by using libav. Here I initialize AV stuff:
AVStream* video_stream;
AVCodec* vcodec;
gint ret;
/* Setup video container */
avformat_alloc_output_context2(&img->video_format_context, NULL, NULL, filename);
if (img->video_format_context == NULL)
{
img_message(img, TRUE, _("Failed to find a suitable container for %s\n"),filename);
return FALSE;
}
ret = avio_open(&img->video_format_context->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0)
{
img_message(img, TRUE, _("Couldn't write output file %s\n"),filename);
return FALSE;
}
/* Setup video codec */
vcodec = avcodec_find_encoder(codec_id);
if (!vcodec)
{
img_message(img, TRUE, _("Couldn't find any encoder for %s\n"),filename);
return FALSE;
}
/* Create video stream */
video_stream = avformat_new_stream(img->video_format_context, vcodec);
video_stream->id = 0;
if (! video_stream)
{
img_message(img, TRUE, _("Couldn't not allocate video stream\n"));
return FALSE;
}
/* Allocate video encoding context */
img->codec_context = avcodec_alloc_context3(vcodec);
if (! img->codec_context)
{
img_message(img, TRUE, _("Couldn't allocate video enconding context\n"));
return FALSE;
}
/* Setup video enconding context parameters */
img->codec_context->codec_id = codec_id;
img->codec_context->codec_type = AVMEDIA_TYPE_VIDEO;
img->codec_context->width = img->video_size[0];
img->codec_context->height = img->video_size[1];
img->codec_context->sample_aspect_ratio = (struct AVRational) {1, 1};
img->codec_context->pix_fmt = vcodec->pix_fmts[0];
img->codec_context->framerate = av_d2q(frame_rate, INT_MAX);
if (codec_id == AV_CODEC_ID_VP8 || codec_id == AV_CODEC_ID_VP9 || codec_id == AV_CODEC_ID_THEORA || codec_id == AV_CODEC_ID_FLV1 ||
AV_CODEC_ID_MPEG1VIDEO || codec_id == AV_CODEC_ID_MPEG2VIDEO)
img->codec_context->bit_rate = round(bitrate_crf * 1000000);
img->codec_context->time_base = av_inv_q(img->codec_context->framerate);
video_stream->time_base = img->codec_context->time_base;
if (img->video_format_context->oformat->flags & AVFMT_GLOBALHEADER)
img->codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
/* Some codecs require the CRF value */
if (codec_id == AV_CODEC_ID_H264 || codec_id == AV_CODEC_ID_H265)
{
gchar *crf = g_strdup_printf("%i", bitrate_crf);
av_opt_set(img->codec_context->priv_data, "crf", crf, AV_OPT_SEARCH_CHILDREN);
g_free(crf);
}
/* Set exporting stage to be multithreaded */
AVDictionary* opts = NULL;
av_dict_set(&opts, "threads", "auto", 0);
/* Open video encoder */
ret = avcodec_open2(img->codec_context, vcodec, &opts);
if (ret < 0)
{
img_message(img, TRUE, _("Failed to open the video encoder\n"));
return FALSE;
}
/* Copy video encoder parameters to output stream */
ret = avcodec_parameters_from_context(video_stream->codecpar, img->codec_context);
if (ret < 0)
{
img_message(img, TRUE, _("Failed to copy video encoder parameters to output stream\n"));
return FALSE;
}
/* AVFRAME stuff */
img->video_frame = av_frame_alloc();
img->video_frame->format = AV_PIX_FMT_RGBA;
img->video_frame->width = img->video_size[0];
img->video_frame->height = img->video_size[1];
av_frame_make_writable(img->video_frame);
ret = av_frame_get_buffer(img->video_frame, 1);
if (ret < 0)
img_message(img,TRUE, _("Could not allocate the video frame data\n"));
img->video_packet = av_packet_alloc();
And here I called repeatedly (the function is called somewehere else) av_send_frame() but it throws the error in the subject:
gint width, height, stride, row, col, offset;
uint8_t *pix;
/* Image info and pixel data */
width = cairo_image_surface_get_width( surface );
height = cairo_image_surface_get_height( surface );
stride = cairo_image_surface_get_stride( surface );
pix = cairo_image_surface_get_data( surface );
for( row = 0; row < height; row++ )
{
for( col = 0; col < width; col++ )
{
offset = 3 * col + row * img->video_frame->linesize[0];
img->video_frame->data[0][offset + 0] = pix[0];
img->video_frame->data[0][offset + 1] = pix[1];
img->video_frame->data[0][offset + 2] = pix[2];
}
}
img_export_encode_av_frame(img->video_frame, img->video_format_context, img->codec_context, img->video_packet);
return TRUE;
}
void img_export_encode_av_frame(AVFrame *frame, AVFormatContext *fmt, AVCodecContext *ctx, AVPacket *pkt)
{
gint ret;
/* send the frame to the encoder */
ret = avcodec_send_frame(ctx, frame);
if (ret < 0)
g_print("Error sending a frame for encoding\n");
while (ret >= 0)
{
ret = avcodec_receive_packet(ctx, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
return;
else if (ret < 0)
g_print("Error during encoding\n");
av_interleaved_write_frame(fmt, pkt);
av_packet_unref(pkt);
}
}
I googled here also but with no luck. It seems I'm the only one to encode a cairo surface. Grepping the error message in ffmpeg sources didn't help. How do I set the stride? I read ffmpeg does it for me once I allocate the buffer for the frame but in my case it seems it doesn't. Where am I wrong?

Related

Request for ffmpeg raw data to mp4 container example

I have a binary file with raw h264 data which is arranged like that
NAL(SPS), NAL(PPS), NAL(Frame), NAL(SPS), NAL(PPS)....
and i want to mux it (without encode) into a mp4 container.
The muxing.c in the ffmpeg example do the encoding of yuv data, but it is different from my case, and i have no ideas how to change the example to do what i want to do...
I knew the commaned ffmpeg -i h264file -c copy h264.mp4 can do what i want to do, but i have to do it in my program, so i need to know how to use the ffmpeg api to do the same thing, but so far, i cannot find any simple example to do it. Is there anyone has hint on how to do it?? Thanks
Updated, i have write the code as below from the reference, it seems can create the mp4 but the time is not correct, it lost the frame rate information and the time information, it play very fast.
av_register_all();
int ret;
AVDictionary *opt = NULL;
//bool is264 = true;
const char * inputFileName = "input.264";
const char * outputFileName = "output.mp4";
AVFormatContext *ic = avformat_alloc_context();
if((ret = avformat_open_input(&ic, inputFileName, NULL, NULL)) < 0)
return -1;//
// Get format info (retrieve stream information)
if ((ret = avformat_find_stream_info(ic, NULL)) < 0)
return ret; // Couldn't find stream information
for (int i = 0; i < ic->nb_streams; i++)
{
AVStream *stream;
AVCodecContext *codec_ctx;
stream = ic->streams[i];
codec_ctx = stream->codec;
/* Reencode video & audio and remux subtitles etc. */
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
/* Open decoder */
ret = avcodec_open2(codec_ctx, avcodec_find_decoder(codec_ctx->codec_id), NULL);
if (ret < 0) {
//av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
return ret;
}
}
}
// Dump information about file onto standard error
av_dump_format(ic, 0, inputFileName, 0);
AVFormatContext *oc;
avformat_alloc_output_context2(&oc, NULL, NULL, outputFileName);
if (!oc) {
//printf("Could not deduce output format from file extension: using MPEG.\n");
//avformat_alloc_output_context2(&oc, NULL, "mpeg", outputFileName);
return -1;
}
AVStream *ist = ic->streams[0];
AVCodec *out_vid_codec = avcodec_find_encoder(oc->oformat->video_codec);
if (NULL == out_vid_codec)
return -1; // Couldn't find video encoder
AVStream *out_vid_strm = avformat_new_stream(oc, out_vid_codec);
if (NULL == out_vid_strm)
return -1; // Couldn't output video stream
ret = avcodec_copy_context(out_vid_strm->codec, ist->codec);
if (ret < 0)
return ret; // Failed to copy context
ret = avio_open(&oc->pb, outputFileName, AVIO_FLAG_WRITE);
ret = avformat_write_header(oc, NULL);
AVPacket pkt;
while(1)
{
AVStream *in_stream, *out_stream;
ret = av_read_frame(ic, &pkt);
if (ret < 0)
break;
pkt.stream_index = 0;
in_stream = ic->streams[pkt.stream_index];
out_stream = oc->streams[pkt.stream_index];
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
//log_packet(ofmt_ctx, &pkt, "out");
ret = av_interleaved_write_frame(oc, &pkt);
if (ret < 0) {
fprintf(stderr, "Error muxing packet\n");
break;
}
av_packet_unref(&pkt);
}
av_write_trailer(oc);

Output black when I decode h264 720p with ffmpeg

First, sorry for my english. When I decode h264 720p in ardrone2.0 my output is black and I cant see anything.
I have try to change the value of pCodecCtx->pix_fmt = AV_PIX_FMT_BGR24; to pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P; and the value of pCodecCtxH264->pix_fmt = AV_PIX_FMT_BGR24; to pCodecCtxH264->pix_fmt = AV_PIX_FMT_YUV420P; but my program crash. What am I doing wrong?. Thank you, see part of my code:
av_register_all();
avcodec_register_all();
avformat_network_init();
// 1.2. Open video file
if(avformat_open_input(&pFormatCtx, drone_addr, NULL, NULL) != 0) {
mexPrintf("No conecct with Drone");
EndVideo();
return;
}
pCodec = avcodec_find_decoder(AV_CODEC_ID_H264);
pCodecCtx = avcodec_alloc_context3(pCodec);
pCodecCtx->pix_fmt = AV_PIX_FMT_BGR24;
pCodecCtx->skip_frame = AVDISCARD_DEFAULT;
pCodecCtx->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
pCodecCtx->err_recognition = AV_EF_CAREFUL;
pCodecCtx->skip_loop_filter = AVDISCARD_DEFAULT;
pCodecCtx->workaround_bugs = FF_BUG_AUTODETECT;
pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
pCodecCtx->codec_id = AV_CODEC_ID_H264;
pCodecCtx->skip_idct = AVDISCARD_DEFAULT;
pCodecCtx->width = 1280;
pCodecCtx->height = 720;
pCodecH264 = avcodec_find_decoder(AV_CODEC_ID_H264);
pCodecCtxH264 = avcodec_alloc_context3(pCodecH264);
pCodecCtxH264->pix_fmt = AV_PIX_FMT_BGR24;
pCodecCtxH264->skip_frame = AVDISCARD_DEFAULT;
pCodecCtxH264->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
pCodecCtxH264->err_recognition = AV_EF_CAREFUL;
pCodecCtxH264->skip_loop_filter = AVDISCARD_DEFAULT;
pCodecCtxH264->workaround_bugs = FF_BUG_AUTODETECT;
pCodecCtxH264->codec_type = AVMEDIA_TYPE_VIDEO;
pCodecCtxH264->codec_id = AV_CODEC_ID_H264;
pCodecCtxH264->skip_idct = AVDISCARD_DEFAULT;
if(avcodec_open2(pCodecCtxH264, pCodecH264, &optionsDict) < 0)
{
mexPrintf("Error opening H264 codec");
return ;
}
pFrame_BGR24 = av_frame_alloc();
if(pFrame_BGR24 == NULL) {
mexPrintf("Could not allocate pFrame_BGR24\n");
return ;
}
// Determine required buffer size and allocate buffer
buffer_BGR24 =
(uint8_t *)av_mallocz(av_image_get_buffer_size(AV_PIX_FMT_BGR24,
pCodecCtx->width, ((pCodecCtx->height == 720) ? 720 : pCodecCtx->height) *
sizeof(uint8_t)*3,1));
// Assign buffer to image planes
av_image_fill_arrays(pFrame_BGR24->data, pFrame_BGR24->linesize,
buffer_BGR24,AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height,1);
// format conversion context
pConvertCtx_BGR24 = sws_getContext(pCodecCtx->width, pCodecCtx->height,
pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_BGR24,
SWS_BILINEAR | SWS_ACCURATE_RND, 0, 0, 0);
// 1.6. get video frames
pFrame = av_frame_alloc();
av_init_packet(&packet);
packet.data = NULL;
packet.size = 0;
}
//Captura un frame
void video::capture(mxArray *plhs[]) {
if(av_read_frame(pFormatCtx, &packet) < 0){
mexPrintf("Error al leer frame");
return;
}
do {
do {
rest = avcodec_send_packet(pCodecCtxH264, &packet);
} while(rest == AVERROR(EAGAIN));
if(rest == AVERROR_EOF || rest == AVERROR(EINVAL)) {
printf("AVERROR(EAGAIN): %d, AVERROR_EOF: %d,
AVERROR(EINVAL): %d\n", AVERROR(EAGAIN), AVERROR_EOF,
AVERROR(EINVAL));
printf("fe_read_frame: Frame getting error (%d)!\n", rest);
return;
}
rest = avcodec_receive_frame(pCodecCtxH264, pFrame);
} while(rest == AVERROR(EAGAIN));
if(rest == AVERROR_EOF || rest == AVERROR(EINVAL)) {
// An error or EOF occured,index break out and return what
// we have so far.
printf("AVERROR(EAGAIN): %d, AVERROR_EOF: %d, AVERROR(EINVAL): %d\n",
AVERROR(EAGAIN), AVERROR_EOF, AVERROR(EINVAL));
printf("fe_read_frame: EOF or some othere decoding error (%d)!\n",
rest);
return;
}
// 2.1.1. convert frame to GRAYSCALE [or BGR] for OpenCV
sws_scale(pConvertCtx_BGR24, (const uint8_t* const*)pFrame->data,
pFrame->linesize, 0,pCodecCtx->height, pFrame_BGR24->data,
pFrame_BGR24->linesize);
//}
av_packet_unref(&packet);
av_init_packet(&packet);
mwSize dims[] = {(pCodecCtx->width)*((pCodecCtx->height == 720) ? 720 :
pCodecCtx->height)*sizeof(uint8_t)*3};
plhs[0] = mxCreateNumericArray(1,dims,mxUINT8_CLASS, mxREAL);
//plhs[0]=mxCreateDoubleMatrix(pCodecCtx->height,pCodecCtx-
>width,mxREAL);
point=mxGetPr(plhs[0]);
memcpy(point, pFrame_BGR24->data[0],(pCodecCtx->width)*(pCodecCtx-
>height)*sizeof(uint8_t)*3);
}
Go to debugger and see your memcpy. I am not sure if it works for all dimensions that you want. Also, there may be more memory problems. For example, try to see what is the value of buffer_BGR24 and pFrame. I bet that sometimes, they do not return right values. Check them out in the code.

RTSP relay using ffmpeg library

I am implementing an RTSP relay using libavcodec (ffmpeg). Basically, it connects to an IP camera, which is an RTSP server, PULLs the stream and then PUSHes it to wowza. I am finding it very difficult to get any sample implementation for this in the internet. So I started implementing this using bits and pieces from here and there. I took code for code samples to a) read RTSP stream and write to file b)read from file and serve it over RTSP.
This is the current state of the code:
/*
* Copyright (c) 2010 Nicolas George
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2014 Andrey Utkin
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* #file
* API example for demuxing, decoding, filtering, encoding and muxing
* #example transcoding.c
*/
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavfilter/avfiltergraph.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
#include <libavutil/pixdesc.h>
static AVFormatContext *ifmt_ctx,*temp;
static AVFormatContext *ofmt_ctx;
typedef struct FilteringContext {
AVFilterContext *buffersink_ctx;
AVFilterContext *buffersrc_ctx;
AVFilterGraph *filter_graph;
} FilteringContext;
static FilteringContext *filter_ctx;
static int open_input_file(const char *filename)
{
int ret;
unsigned int i;
ifmt_ctx = NULL;
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
return ret;
}
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return ret;
}
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
AVStream *stream;
AVCodecContext *codec_ctx;
stream = ifmt_ctx->streams[i];
codec_ctx = stream->codec;
/* Reencode video & audio and remux subtitles etc. */
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
/* Open decoder */
ret = avcodec_open2(codec_ctx,
avcodec_find_decoder(codec_ctx->codec_id), NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
return ret;
}
}
}
av_dump_format(ifmt_ctx, 0, filename, 0);
return 0;
}
static int open_output_file(const char *filename)
{
AVStream *out_stream;
AVStream *in_stream;
AVCodecContext *dec_ctx, *enc_ctx;
AVCodec *encoder,*codec;
int ret;
unsigned int i;
ofmt_ctx = NULL;
avformat_open_input(&temp, "rtsp://localhost:8554/live", NULL, NULL);
avformat_alloc_output_context2(&ofmt_ctx, temp->oformat,"rtsp", filename);
if (!ofmt_ctx) {
av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
return AVERROR_UNKNOWN;
}
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
out_stream = avformat_new_stream(ofmt_ctx, NULL);
if (!out_stream) {
av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
return AVERROR_UNKNOWN;
}
in_stream = ifmt_ctx->streams[i];
dec_ctx = in_stream->codec;
enc_ctx = out_stream->codec;
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
/* in this example, we choose transcoding to same codec */
encoder = avcodec_find_encoder(dec_ctx->codec_id);
printf("codec : %d",dec_ctx->codec_id);
if (!encoder) {
av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
return AVERROR_INVALIDDATA;
}
/* In this example, we transcode to same properties (picture size,
* sample rate etc.). These properties can be changed for output
* streams easily using filters */
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
enc_ctx->height = dec_ctx->height;
enc_ctx->width = dec_ctx->width;
enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
/* take first format from list of supported formats */
enc_ctx->pix_fmt = encoder->pix_fmts[0];
//enc_ctx->codec_id = AV_CODEC_ID_H264;
/* video time_base can be set to whatever is handy and supported by encoder */
enc_ctx->time_base = dec_ctx->time_base;
} else {
enc_ctx->sample_rate = dec_ctx->sample_rate;
enc_ctx->channel_layout = dec_ctx->channel_layout;
enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
/* take first format from list of supported formats */
enc_ctx->sample_fmt = encoder->sample_fmts[0];
enc_ctx->pix_fmt =AV_PIX_FMT_RGB24 ;
enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
}
/* Third parameter can be used to pass settings to encoder */
//codec = avcodec_find_encoder(AV_CODEC_ID_H264);
ret = avcodec_open2(enc_ctx, encoder, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
return ret;
}
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
return AVERROR_INVALIDDATA;
} else {
/* if this stream must be remuxed */
ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec,
ifmt_ctx->streams[i]->codec);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n");
return ret;
}
}
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
av_dump_format(ofmt_ctx, 0, filename, 1);
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
return ret;
}
}
/* init muxer, write output file header */
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
return ret;
}
return 0;
}
static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
AVCodecContext *enc_ctx, const char *filter_spec)
{
char args[512];
int ret = 0;
AVFilter *buffersrc = NULL;
AVFilter *buffersink = NULL;
AVFilterContext *buffersrc_ctx = NULL;
AVFilterContext *buffersink_ctx = NULL;
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
AVFilterGraph *filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph) {
ret = AVERROR(ENOMEM);
goto end;
}
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
buffersrc = avfilter_get_by_name("buffer");
buffersink = avfilter_get_by_name("buffersink");
if (!buffersrc || !buffersink) {
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
ret = AVERROR_UNKNOWN;
goto end;
}
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
dec_ctx->time_base.num, dec_ctx->time_base.den,
dec_ctx->sample_aspect_ratio.num,
dec_ctx->sample_aspect_ratio.den);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
goto end;
}
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
goto end;
}
ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
(uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
goto end;
}
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
buffersrc = avfilter_get_by_name("abuffer");
buffersink = avfilter_get_by_name("abuffersink");
if (!buffersrc || !buffersink) {
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
ret = AVERROR_UNKNOWN;
goto end;
}
if (!dec_ctx->channel_layout)
dec_ctx->channel_layout =
av_get_default_channel_layout(dec_ctx->channels);
snprintf(args, sizeof(args),
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
av_get_sample_fmt_name(dec_ctx->sample_fmt),
dec_ctx->channel_layout);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
goto end;
}
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
goto end;
}
ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
(uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
goto end;
}
ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
(uint8_t*)&enc_ctx->channel_layout,
sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
goto end;
}
ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
(uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
goto end;
}
} else {
ret = AVERROR_UNKNOWN;
goto end;
}
/* Endpoints for the filter graph. */
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if (!outputs->name || !inputs->name) {
ret = AVERROR(ENOMEM);
goto end;
}
if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
&inputs, &outputs, NULL)) < 0)
goto end;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
goto end;
/* Fill FilteringContext */
fctx->buffersrc_ctx = buffersrc_ctx;
fctx->buffersink_ctx = buffersink_ctx;
fctx->filter_graph = filter_graph;
end:
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return ret;
}
static int init_filters(void)
{
const char *filter_spec;
unsigned int i;
int ret;
filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
if (!filter_ctx)
return AVERROR(ENOMEM);
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
filter_ctx[i].buffersrc_ctx = NULL;
filter_ctx[i].buffersink_ctx = NULL;
filter_ctx[i].filter_graph = NULL;
if (!(ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO
|| ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO))
continue;
if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
filter_spec = "null"; /* passthrough (dummy) filter for video */
else
filter_spec = "anull"; /* passthrough (dummy) filter for audio */
ret = init_filter(&filter_ctx[i], ifmt_ctx->streams[i]->codec,
ofmt_ctx->streams[i]->codec, filter_spec);
if (ret)
return ret;
}
return 0;
}
static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
int ret;
int got_frame_local;
AVPacket enc_pkt;
int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
(ifmt_ctx->streams[stream_index]->codec->codec_type ==
AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
if (!got_frame)
got_frame = &got_frame_local;
av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
/* encode filtered frame */
enc_pkt.data = NULL;
enc_pkt.size = 0;
av_init_packet(&enc_pkt);
ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
filt_frame, got_frame);
av_frame_free(&filt_frame);
if (ret < 0)
return ret;
if (!(*got_frame))
return 0;
/* prepare packet for muxing */
enc_pkt.stream_index = stream_index;
av_packet_rescale_ts(&enc_pkt,
ofmt_ctx->streams[stream_index]->codec->time_base,
ofmt_ctx->streams[stream_index]->time_base);
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
/* mux encoded frame */
ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
return ret;
}
static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
{
int ret;
AVFrame *filt_frame;
av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
/* push the decoded frame into the filtergraph */
ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
frame, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
return ret;
}
/* pull filtered frames from the filtergraph */
while (1) {
filt_frame = av_frame_alloc();
if (!filt_frame) {
ret = AVERROR(ENOMEM);
break;
}
av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
filt_frame);
if (ret < 0) {
/* if no more frames for output - returns AVERROR(EAGAIN)
* if flushed and no more frames for output - returns AVERROR_EOF
* rewrite retcode to 0 to show it as normal procedure completion
*/
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
ret = 0;
av_frame_free(&filt_frame);
break;
}
filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
ret = encode_write_frame(filt_frame, stream_index, NULL);
if (ret < 0)
break;
}
return ret;
}
static int flush_encoder(unsigned int stream_index)
{
int ret;
int got_frame;
if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities &
AV_CODEC_CAP_DELAY))
return 0;
while (1) {
av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
ret = encode_write_frame(NULL, stream_index, &got_frame);
if (ret < 0)
break;
if (!got_frame)
return 0;
}
return ret;
}
int main(int argc, char **argv)
{
int ret;
AVPacket packet = { .data = NULL, .size = 0 };
AVFrame *frame = NULL;
enum AVMediaType type;
unsigned int stream_index;
unsigned int i;
int got_frame;
int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
if (argc != 3) {
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
return 1;
}
av_register_all();
avfilter_register_all();
avformat_network_init();
if ((ret = open_input_file(argv[1])) < 0)
goto end;
if ((ret = open_output_file(argv[2])) < 0)
goto end;
if ((ret = init_filters()) < 0)
goto end;
/* read all packets */
while (1) {
if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
break;
stream_index = packet.stream_index;
type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
stream_index);
if (filter_ctx[stream_index].filter_graph) {
av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
frame = av_frame_alloc();
if (!frame) {
ret = AVERROR(ENOMEM);
break;
}
av_packet_rescale_ts(&packet,
ifmt_ctx->streams[stream_index]->time_base,
ifmt_ctx->streams[stream_index]->codec->time_base);
dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
avcodec_decode_audio4;
ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
&got_frame, &packet);
if (ret < 0) {
av_frame_free(&frame);
av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
break;
}
if (got_frame) {
frame->pts = av_frame_get_best_effort_timestamp(frame);
ret = filter_encode_write_frame(frame, stream_index);
av_frame_free(&frame);
if (ret < 0)
goto end;
} else {
av_frame_free(&frame);
}
} else {
/* remux this frame without reencoding */
av_packet_rescale_ts(&packet,
ifmt_ctx->streams[stream_index]->time_base,
ofmt_ctx->streams[stream_index]->time_base);
ret = av_interleaved_write_frame(ofmt_ctx, &packet);
if (ret < 0)
goto end;
}
av_packet_unref(&packet);
}
/* flush filters and encoders */
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
/* flush filter */
if (!filter_ctx[i].filter_graph)
continue;
ret = filter_encode_write_frame(NULL, i);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
goto end;
}
/* flush encoder */
ret = flush_encoder(i);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
goto end;
}
}
av_write_trailer(ofmt_ctx);
end:
av_packet_unref(&packet);
av_frame_free(&frame);
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
avcodec_close(ifmt_ctx->streams[i]->codec);
if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
avcodec_close(ofmt_ctx->streams[i]->codec);
if (filter_ctx && filter_ctx[i].filter_graph)
avfilter_graph_free(&filter_ctx[i].filter_graph);
}
av_free(filter_ctx);
avformat_close_input(&ifmt_ctx);
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
avio_closep(&ofmt_ctx->pb);
avformat_free_context(ofmt_ctx);
if (ret < 0)
av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
return ret ? 1 : 0;
}
When we run this we are getting this error:
Input #0, rtsp, from 'rtsp://localhost:8554/live':
Metadata:
title : Unnamed
comment : N/A
Duration: N/A, start: 5294.642467, bitrate: N/A
Stream #0:0: Video: h264 (Constrained Baseline), yuv420p, 640x480 [SAR 1:1 DAR 4:3], 30 fps, 30 tbr, 90k tbn, 60 tbc
[libx264 # 0xe87c80] Specified pixel format -1 is invalid or not supported
Cannot open video encoder for stream #0
codec : 28 and pix :12
Error occurred: Invalid argument
But I could see that the correct pixel format macro "AV_PIX_FMT_YUV420P" is provided.
UPDATE 1:
This is the section in ffmpeg where the error is thrown from(utils.c line 1547):
if (avctx->codec->pix_fmts) {
for (i = 0; avctx->codec->pix_fmts[i] != AV_PIX_FMT_NONE; i++)
if (avctx->pix_fmt == avctx->codec->pix_fmts[i])
break;
if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_NONE
&& !((avctx->codec_id == AV_CODEC_ID_MJPEG || avctx->codec_id == AV_CODEC_ID_LJPEG)
&& avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL)) {
char buf[128];
snprintf(buf, sizeof(buf), "%d", avctx->pix_fmt);
av_log(avctx, AV_LOG_ERROR, "Specified pixel format %s is invalid or not supported\n",
(char *)av_x_if_null(av_get_pix_fmt_name(avctx->pix_fmt), buf));
ret = AVERROR(EINVAL);
goto free_and_end;
}
Can someone suggest a solution for this.
Thanks

record and play file using wrapper ffmpeg

I am using C# wrapper for ffmpeg from ffmpeg
I want record rtsp stream and play it but I can not decoder frame from file
using this code I write file test.avi
unsafe
{
AVFormatContext* context = FFmpegInvoke.avformat_alloc_context();
int video_stream_index=0;
FFmpegInvoke.av_register_all();
FFmpegInvoke.avcodec_register_all();
FFmpegInvoke.avformat_network_init();
//open rtsp
if (FFmpegInvoke.avformat_open_input(&context, "rtsp://admin:admin#192.168.0.71:554", null, null) != 0)
{
return ;
}
if (FFmpegInvoke.avformat_find_stream_info(context, null) < 0)
{
return ;
}
//search video stream
for (int i = 0; i < context->nb_streams; i++)
{
if (context->streams[i]->codec->codec_type == AVMediaType.AVMEDIA_TYPE_VIDEO)
video_stream_index = i;
}
AVPacket packet;
FFmpegInvoke.av_init_packet(&packet);
//open output file
AVOutputFormat* fmt = FFmpegInvoke.av_guess_format("h264", null, null);
AVFormatContext* oc = FFmpegInvoke.avformat_alloc_context();
oc->oformat = fmt;
FFmpegInvoke.avio_open2(&oc->pb, "test.mkv", FFmpegInvoke.AVIO_FLAG_WRITE, null, null);
AVStream* stream = null;
int cnt = 0;
//start reading packets from stream and write them to file
/// FFmpegInvoke.av_read_play(context);//play RTSP
while (FFmpegInvoke.av_read_frame(context, &packet) >= 0 && cnt < 1000)
{//read 100 frames
if (packet.stream_index == video_stream_index)
{//packet is video
if (stream == null)
{//create stream in file
stream = FFmpegInvoke.avformat_new_stream(oc, context->streams[video_stream_index]->codec->codec);
FFmpegInvoke.avcodec_copy_context(stream->codec, context->streams[video_stream_index]->codec);
stream->sample_aspect_ratio = context->streams[video_stream_index]->codec->sample_aspect_ratio;
FFmpegInvoke.avformat_write_header(oc, null);
}
packet.stream_index = stream->id;
var p1 = new FileInfo("test.mkv").Length;
FFmpegInvoke.av_write_frame(oc, &packet);
cnt++;
}
FFmpegInvoke.av_free_packet(&packet);
FFmpegInvoke.av_init_packet(&packet);
}
FFmpegInvoke.av_read_pause(context);
FFmpegInvoke.av_write_trailer(oc);
FFmpegInvoke.avio_close(oc->pb);
FFmpegInvoke.avformat_free_context(oc);
}
and using this code I want to play me file
unsafe{
string url = "test.mkv";
FFmpegInvoke.av_register_all();
FFmpegInvoke.avcodec_register_all();
FFmpegInvoke.avformat_network_init();
AVFormatContext* pFormatContext = FFmpegInvoke.avformat_alloc_context();
if (FFmpegInvoke.avformat_open_input(&pFormatContext, url, null, null) != 0)
throw new Exception("Could not open file");
if (FFmpegInvoke.avformat_find_stream_info(pFormatContext, null) != 0)
throw new Exception("Could not find stream info");
AVStream* pStream = null;
for (int i = 0; i < pFormatContext->nb_streams; i++)
{
if (pFormatContext->streams[i]->codec->codec_type == AVMediaType.AVMEDIA_TYPE_VIDEO)
{
pStream = pFormatContext->streams[i];
break;
}
}
var packet = new AVPacket();
AVPacket* pPacket = &packet;
FFmpegInvoke.av_init_packet(pPacket);
AVCodecContext codecContext = *(pStream->codec);
int width = codecContext.width;
int height = codecContext.height;
AVPixelFormat sourcePixFmt = codecContext.pix_fmt;
AVCodecID codecId = codecContext.codec_id;
var convertToPixFmt = AVPixelFormat.PIX_FMT_BGR24;
SwsContext* pConvertContext = FFmpegInvoke.sws_getContext(width, height, sourcePixFmt,
width, height, convertToPixFmt,
FFmpegInvoke.SWS_FAST_BILINEAR, null, null, null);
if (pConvertContext == null)
throw new Exception("Could not initialize the conversion context");
var pConvertedFrame = (AVPicture*)FFmpegInvoke.avcodec_alloc_frame();
int convertedFrameBufferSize = FFmpegInvoke.avpicture_get_size(convertToPixFmt, width, height);
var pConvertedFrameBuffer = (byte*)FFmpegInvoke.av_malloc((uint)convertedFrameBufferSize);
FFmpegInvoke.avpicture_fill(pConvertedFrame, pConvertedFrameBuffer, convertToPixFmt, width, height);
AVCodec* pCodec = FFmpegInvoke.avcodec_find_decoder(codecId);
if (pCodec == null)
throw new Exception("Unsupported codec");
// Reusing codec context from stream info,
// as an alternative way it could look like this: (but it works not for all kind of codecs)
// AVCodecContext* pCodecContext = FFmpegInvoke.avcodec_alloc_context3(pCodec);
AVCodecContext* pCodecContext = &codecContext;
if ((pCodec->capabilities & FFmpegInvoke.CODEC_CAP_TRUNCATED) == FFmpegInvoke.CODEC_CAP_TRUNCATED)
pCodecContext->flags |= FFmpegInvoke.CODEC_FLAG_TRUNCATED;
AVFrame* pDecodedFrame = FFmpegInvoke.avcodec_alloc_frame();
if (FFmpegInvoke.av_read_frame(pFormatContext, pPacket) < 0)
throw new System.IO.EndOfStreamException();
int gotPicture = 0;
int size = FFmpegInvoke.avcodec_decode_video2(pCodecContext, pDecodedFrame, &gotPicture, pPacket);
if (size < 0)
throw new Exception(string.Format("Error while decoding frame "));
if (gotPicture == 1)
{
}
size =-22.Why? what is wrong?How play my file using ffmpeg?

Audio encoding using avcodec_fill_audio_frame() and memory leaks

As a part of encoding decoded audio packets, I'm using avcodec_fill_audio_frame(). I'm passing allocated AVFrame pointer to along with buffer containing the decoded samples and other parameters number of channels, sample format, buffer size. Though the encoding is working fine I'm not able to completely eliminate the memory leaks. I've taken care of most of things but still I'm not able detect the leakage.
Below is the function which I'm using for encoding. Please suggest something.
AudioSample contains decoded data and it is completely managed in different class(free in class destructor). I'm freeing the AVFrame in FFmpegEncoder destructor and AVPacket is freed every time using av_free_packet() with av_packet_destruct enabled. What more do I need to free?
void FfmpegEncoder::WriteAudioSample(AudioSample *audS)
{
int num_audio_frame = 0;
AVCodecContext *c = NULL;
// AVFrame *frame;
AVPacket pkt;
av_init_packet(&pkt);
pkt.destruct = av_destruct_packet;
pkt.data = NULL;
pkt.size = 0;
int ret = 0, got_packet = 0;
c = m_out_aud_strm->codec;
static int64_t aud_pts_in = -1;
if((audS != NULL) && (audS->GetSampleLength() > 0) )
{
int byte_per_sample = av_get_bytes_per_sample(c->sample_fmt);
PRINT_VAL("Byte Per Sample ", byte_per_sample)
m_frame->nb_samples = (audS->GetSampleLength())/(c->channels*av_get_bytes_per_sample(c->sample_fmt));
if(m_frame->nb_samples == c->frame_size)
{
#if 1
if(m_need_resample && (c->channels >= 2))
{
uint8_t * t_buff1 = new uint8_t[audS->GetSampleLength()];
if(t_buff1 != NULL)
{
for(int64_t i = 0; i< m_frame->nb_samples; i++)
{
memcpy(t_buff1 + i*byte_per_sample, (uint8_t*)((uint8_t*)audS->GetAudioSampleData() + i*byte_per_sample*c->channels), byte_per_sample);
memcpy(t_buff1 + (audS->GetSampleLength())/2 + i*byte_per_sample, (uint8_t*)((uint8_t*)audS->GetAudioSampleData() + i*byte_per_sample*c->channels+ byte_per_sample), byte_per_sample);
}
audS->FillAudioSample(t_buff1, audS->GetSampleLength());
delete[] t_buff1;
}
}
#endif
ret = avcodec_fill_audio_frame(m_frame, c->channels, c->sample_fmt, (uint8_t*)audS->GetAudioSampleData(),m_frame->nb_samples*byte_per_sample*c->channels, 0);
//ret = avcodec_fill_audio_frame(&frame, c->channels, c->sample_fmt, t_buff,frame.nb_samples*byte_per_sample*c->channels, 0);
if(ret != 0)
{
PRINT_MSG("Avcodec Fill Audio Failed ")
}
else
{
got_packet = 0;
ret = avcodec_encode_audio2(c, &pkt, m_frame, &got_packet);
if(ret < 0 || got_packet == 0)
{
PRINT_MSG("failed to encode audio ")
}
else
{
PRINT_MSG("Audio Packet Encoded ");
aud_pts_in++;
pkt.pts = aud_pts_in;
pkt.dts = pkt.pts;
pkt.stream_index = m_out_aud_strm->index;
ret = av_interleaved_write_frame(oc, &pkt);
if(ret != 0)
{
PRINT_MSG("Error Write Audio PKT ")
}
else
{
PRINT_MSG("Audio PKT Writen ")
}
}
}
}
avcodec_flush_buffers(c);
// avcodec_free_frame(&frame);
}
av_free_packet(&pkt);
}
Thanks,
Pradeep
//================== SEND AUDIO OUTPUT =======================
void AVOutputStream::sendAudioOutput (AVFrame* inputFrame)
{
AVCodecContext *codecCtx = pOutputAudioStream->codec;
// set source data variables
sourceNumberOfChannels = inputFrame->channels;
sourceChannelLayout = inputFrame->channel_layout;
sourceSampleRate = inputFrame->sample_rate;
_sourceSampleFormat = (AVSampleFormat)inputFrame->format;
sourceNumberOfSamples = inputFrame->nb_samples;
// set destination data variables
destinationNumberOfChannels = codecCtx->channels;
destinationChannelLayout = codecCtx->channel_layout;
destinationSampleRate = codecCtx->sample_rate;
destinationSampleFormat = codecCtx->sample_fmt;//AV_SAMPLE_FMT_FLTP;//EncodecCtx->sample_fmt;
destinationLineSize = 0;
destinationData = NULL;
int returnVal = 0;
if (startDecode == false)
{
startDecode = true;
resamplerCtx = swr_alloc_set_opts(NULL,
destinationChannelLayout,
destinationSampleFormat,
destinationSampleRate,
sourceChannelLayout,
_sourceSampleFormat,
sourceSampleRate,
0,
NULL);
if (resamplerCtx == NULL)
{
std::cout << "Unable to create the resampler context for the audio frame";
isConnected = false;
}
// initialize the resampling context
returnVal = swr_init(resamplerCtx);
if (returnVal < 0)
{
std::cout << "Unable to init the resampler context, error:";
isConnected = false;
}
} //if (startDecode == false)
if (sourceSampleRate != 0)
destinationNumberOfSamples = destinationSampleRate/sourceSampleRate * sourceNumberOfSamples;
// allocate the destination samples buffer
returnVal = av_samples_alloc_array_and_samples(&destinationData,
&destinationLineSize,
destinationNumberOfChannels,
destinationNumberOfSamples,
destinationSampleFormat,
0);
if (returnVal < 0)
{
std::cout << "Unable to allocate destination samples, error";
isConnected = false;
}
// convert to destination format
returnVal = swr_convert(resamplerCtx,
destinationData,
destinationNumberOfSamples,
(const uint8_t **)inputFrame->data, //sourceData,
sourceNumberOfSamples);
if (returnVal < 0)
{
std::cout << "Resampling failed, error \n";
isConnected = false;
}
int bufferSize = av_samples_get_buffer_size(&destinationLineSize,
destinationNumberOfChannels,
destinationNumberOfSamples,
destinationSampleFormat,
0);
//whithout fifo
pOutputAudioFrame = av_frame_alloc();
pOutputAudioFrame->nb_samples = codecCtx->frame_size;//frameNumberOfSamples;
pOutputAudioFrame->format = codecCtx->sample_fmt;
pOutputAudioFrame->channel_layout = codecCtx->channel_layout;
pOutputAudioFrame->channels = codecCtx->channels;
pOutputAudioFrame->sample_rate = codecCtx->sample_rate;
returnVal = avcodec_fill_audio_frame(pOutputAudioFrame,
pOutputAudioFrame->channels,
(AVSampleFormat)pOutputAudioFrame->format,
(const uint8_t *)destinationData[0],
bufferSize,0);
pOutputAudioFrame->pts = inputFrame->pts;
if (returnVal < 0)
{
std::cout << "Unable to fill the audio frame wsampleIndexith captured audio data,error";
isConnected = false;
}
// encode the audio frame, fill a packet for streaming
av_init_packet(&outAudioPacket);
outAudioPacket.data = NULL;
outAudioPacket.size = 0;
outAudioPacket.dts = outAudioPacket.pts = 0;
int gotPacket;
// encoding
returnVal = avcodec_encode_audio2(codecCtx, &outAudioPacket, pOutputAudioFrame, &gotPacket);
// free buffers
av_freep(&destinationData[0]);
av_freep(&destinationData);
av_frame_free(&pOutputAudioFrame);
if (gotPacket)
{
outAudioPacket.stream_index = pOutputAudioStream->index;
outAudioPacket.flags |= AV_PKT_FLAG_KEY;
returnVal = av_interleaved_write_frame(pOutputFormatCtx, &outAudioPacket);
//returnVal = av_write_frame(pOutputFormatCtx, &outAudioPacket);
if (returnVal != 0)
{
std::cout << "Cannot write audio packet \n";
isConnected = false;
}
av_free_packet(&outAudioPacket);
} // if (gotPacket)
}
You can see after resample i free used buffers.
// free buffers
av_freep(&destinationData[0]);
av_freep(&destinationData);

Resources