Request for ffmpeg raw data to mp4 container example - ffmpeg

I have a binary file with raw h264 data which is arranged like that
NAL(SPS), NAL(PPS), NAL(Frame), NAL(SPS), NAL(PPS)....
and i want to mux it (without encode) into a mp4 container.
The muxing.c in the ffmpeg example do the encoding of yuv data, but it is different from my case, and i have no ideas how to change the example to do what i want to do...
I knew the commaned ffmpeg -i h264file -c copy h264.mp4 can do what i want to do, but i have to do it in my program, so i need to know how to use the ffmpeg api to do the same thing, but so far, i cannot find any simple example to do it. Is there anyone has hint on how to do it?? Thanks
Updated, i have write the code as below from the reference, it seems can create the mp4 but the time is not correct, it lost the frame rate information and the time information, it play very fast.
av_register_all();
int ret;
AVDictionary *opt = NULL;
//bool is264 = true;
const char * inputFileName = "input.264";
const char * outputFileName = "output.mp4";
AVFormatContext *ic = avformat_alloc_context();
if((ret = avformat_open_input(&ic, inputFileName, NULL, NULL)) < 0)
return -1;//
// Get format info (retrieve stream information)
if ((ret = avformat_find_stream_info(ic, NULL)) < 0)
return ret; // Couldn't find stream information
for (int i = 0; i < ic->nb_streams; i++)
{
AVStream *stream;
AVCodecContext *codec_ctx;
stream = ic->streams[i];
codec_ctx = stream->codec;
/* Reencode video & audio and remux subtitles etc. */
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
/* Open decoder */
ret = avcodec_open2(codec_ctx, avcodec_find_decoder(codec_ctx->codec_id), NULL);
if (ret < 0) {
//av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
return ret;
}
}
}
// Dump information about file onto standard error
av_dump_format(ic, 0, inputFileName, 0);
AVFormatContext *oc;
avformat_alloc_output_context2(&oc, NULL, NULL, outputFileName);
if (!oc) {
//printf("Could not deduce output format from file extension: using MPEG.\n");
//avformat_alloc_output_context2(&oc, NULL, "mpeg", outputFileName);
return -1;
}
AVStream *ist = ic->streams[0];
AVCodec *out_vid_codec = avcodec_find_encoder(oc->oformat->video_codec);
if (NULL == out_vid_codec)
return -1; // Couldn't find video encoder
AVStream *out_vid_strm = avformat_new_stream(oc, out_vid_codec);
if (NULL == out_vid_strm)
return -1; // Couldn't output video stream
ret = avcodec_copy_context(out_vid_strm->codec, ist->codec);
if (ret < 0)
return ret; // Failed to copy context
ret = avio_open(&oc->pb, outputFileName, AVIO_FLAG_WRITE);
ret = avformat_write_header(oc, NULL);
AVPacket pkt;
while(1)
{
AVStream *in_stream, *out_stream;
ret = av_read_frame(ic, &pkt);
if (ret < 0)
break;
pkt.stream_index = 0;
in_stream = ic->streams[pkt.stream_index];
out_stream = oc->streams[pkt.stream_index];
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
//log_packet(ofmt_ctx, &pkt, "out");
ret = av_interleaved_write_frame(oc, &pkt);
if (ret < 0) {
fprintf(stderr, "Error muxing packet\n");
break;
}
av_packet_unref(&pkt);
}
av_write_trailer(oc);

Related

Decoding of 16bit gray image encoded with FFV1

I have a problem with decoding of gray images encoded with FFV1 codec.
I successfully encode 16 bit gray image (with avcodec_receive_packet(...) function) and save AvPacket data to file. Then I read this data from file and try to decode (with avcodec_decode_video2 or avcodec_send_packet/avcodec_receive_frame) with no success:
when I try to decode packet with avcodec_decode_video2 function I get an error "Access violation occurred, unable to write location 0x0000000000000000".
when I try to decode packet with avcodec_send_packet/avcodec_receive_frame functions I get an error "chroma shift parameters 7 0 are invalid".
I compared packets after encoding and before decoding and all fields and values seems to be the same. I even try to decode packet just after avcodec_receive_packet (encoding function), however with the same error.
I use the 4.0 version of ffmpeg and the program is based on decode_video.c and encode_video.c examples.
When I use containers (eg. avi) to support read/write encoded images from file (based on demuxing_decoding.c and muxing.c examples) I successfully encode and decode frames with FFV1. However I cannot use containers, because I want to encode frames with different resolutions and mix few video sources together. Additionally the compression level is significantly lower (falls from 2.9 to 2.2) for few hundred of images, what is also very surprising.
So my question is how to correctly save/read (from binary file not container) and prepare AVPacker for decoding with FFV1.
Any help is greatly appreciated.
The decoding code:
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
}
#pragma warning(disable: 4996)
#define INBUF_SIZE 4096
#define FF_INPUT_BUFFER_PADDING_SIZE 64
uint8_t endcode[4];
AVCodecContext *c, c2;
AVCodec *codec;
int i, ret, x, y;
AVFrame *frame;
AVPacket *pkt, *pkt_temp;
FILE *encodedVideoFile;
AVDictionary *opts = NULL;
uint8_t *video_dst_data[4] = { NULL };
int video_dst_linesize[4];
int imageSize;
uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
/* flush the encoder */
frame = NULL;
encode();
/* add sequence end code to have a real MPEG file */
//fwrite(endcode, 1, sizeof(endcode), encodedVideoFile);
fclose(encodedVideoFile);
avcodec_free_context(&c);
av_frame_free(&frame);
av_packet_free(&pkt);
}
void initDecoding(const char *filename)
{
/* set end of buffer to 0 (this ensures that no overreading happens for damaged MPEG streams) */
memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);
/* find the MPEG-1 video decoder */
codec = avcodec_find_decoder(AV_CODEC_ID_FFV1);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
/* resolution must be a multiple of two */
c->width = 1280;
c->height = 484;
/* frames per second */
c->time_base.den = 1;
c->time_base.num = 10;
c->bits_per_raw_sample = 16;
c->framerate.den = 10;
c->framerate.num = 1;
c->pix_fmt = AV_PIX_FMT_GRAY16;
//Version of FFV1 codec
c->level = 3;
/* Init the decoders, with or without reference counting */
av_dict_set(&opts, "refcounted_frames", 0 ? "1" : "0", 0);
if ((ret = avcodec_open2(c, codec, &opts)) < 0) {
return;
}
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
ret = av_image_alloc(video_dst_data, video_dst_linesize,
c->width, c->height, c->pix_fmt, 4);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw video buffer\n");
}
encodedVideoFile = fopen(filename, "rb");
if (!encodedVideoFile) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
ret = av_frame_get_buffer(frame, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate the video frame data\n");
exit(1);
}
/* make sure the frame data is writable */
ret = av_frame_make_writable(frame);
if (ret < 0)
exit(1);
}
void closeDecoding()
{
fclose(encodedVideoFile);
av_parser_close(parser);
avcodec_free_context(&c);
av_frame_free(&frame);
av_packet_free(&pkt);
}
void decodePacket()
{
size_t data_size;
int *got_frame = 0;
read_packt_from_file(pkt, encodedVideoFile);
ret = av_frame_is_writable(frame);
//First decoding function
/*ret = avcodec_decode_video2(c, frame, got_frame, pkt);
if (ret < 0) {
fprintf(stderr, "Error decoding video frame (%s)\n");
}*/
ret = avcodec_send_packet(c, pkt);
if (ret < 0) {
fprintf(stderr, "Error sending a packet for decoding\n");
exit(1);
}
while (ret >= 0) {
ret = avcodec_receive_frame(c, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
return;
else if (ret < 0) {
fprintf(stderr, "Error during decoding\n");
exit(1);
}
printf("saving frame %3d\n", c->frame_number);
fflush(stdout);
}
}
size_t read_packt_from_file(AVPacket *packet, FILE *file)
{
size_t ret = 0;
int size;
uint8_t * data;
//av_packet_from_data
ret = fread(packet, sizeof(AVPacket), 1, file);
size = packet->size;
data = new uint8_t[size];
ret = fread(data, size, 1, file);
av_new_packet(packet, size);
av_packet_from_data(packet, data, size);
return ret;
}
//To write encoded AVPacket
size_t write_packt_to_file(AVPacket *packet, FILE *file)
{
size_t ret = 0;
ret = fwrite(packet, sizeof(AVPacket), 1, file);
ret = fwrite(packet->data, packet->size, 1, file);
if (packet->buf) {
fwrite(packet->buf->data, packet->buf->size, 1, file);
}
fflush(file);
return ret;
}

ffmpeg 4.0 decoded h264 slower than old version one(n2.8.14)

I use ffmpeg to decode h264 and transcode to jpeg.
I used to use the 2.8 version, now change to 4.0.
The new version avcodec_decode_video2 is deprecated, so i use the new API (send, receive).
I found that the new version ffmpeg is much slower than the old one when decoding h264.
why is that? How to reach the original speed?
Here's the decoder init codes:
version 2.8
//2.8
if (avformat_open_input(&format_, path, NULL, &options) != 0) {
printf("FileMediaBase: open input failed\n");
return -1;
}
if (avformat_find_stream_info(format_, NULL) < 0) {
printf("FileMediaBase: avformat_find_stream_info failed\n");
avformat_close_input(&format_);
return -1;
}
AVCodecContext *code_ctx = 0;
AVCodec *codec = 0;
// Find the first audio and video stream
for (int i = 0; i < format_->nb_streams; i++) {
code_ctx = format_->streams[i]->codec;
codec = avcodec_find_decoder(code_ctx->codec_id);
if (code_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
vcode_ctx_ = code_ctx;
vstream_ = format_->streams[i];
}else if (code_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
acode_ctx_ = code_ctx;
astream_ = format_->streams[i];
}
// Open the codec
if (!codec || (avcodec_open2(code_ctx, codec, NULL) < 0)) {
printf("avcodec_open2() failed\n");
avformat_close_input(&format_);
return -1;
}
version 4.0
if (avformat_open_input(&format_, path, NULL, &options) != 0) {
printf("FileMediaBase: open input failed\n");
return -1;
}
if (avformat_find_stream_info(format_, NULL) < 0) {
printf("FileMediaBase: avformat_find_stream_info failed\n");
avformat_close_input(&format_);
return -1;
}
AVStream *st;
AVCodec *dec = NULL;
AVDictionary *opts = NULL;
ret = av_find_best_stream(format_, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
if (ret < 0) {
return ret;
}
else {
stream_index = ret;
st = format_->streams[stream_index];
/* find decoder for the stream */
dec = avcodec_find_decoder(st->codecpar->codec_id);
if (!dec) {
fprintf(stderr, "Failed to find %s codec\n",
av_get_media_type_string(type));
return AVERROR(EINVAL);
}
/* Allocate a codec context for the decoder */
*dec_ctx = avcodec_alloc_context3(dec);
if (!*dec_ctx) {
fprintf(stderr, "Failed to allocate the %s codec context\n",
av_get_media_type_string(type));
return AVERROR(ENOMEM);
}
/* Copy codec parameters from input stream to output codec context */
if ((ret = avcodec_parameters_to_context(*dec_ctx, st->codecpar)) < 0) {
fprintf(stderr, "Failed to copy %s codec parameters to decoder context\n",
av_get_media_type_string(type));
avcodec_free_context(dec_ctx);
return ret;
}
/*if (dec->id == AV_CODEC_ID_H264) {
(*dec_ctx)->flags |= AV_CODEC_FLAG_TRUNCATED;
}*/
/* Init the decoders, with or without reference counting */
//av_dict_set(&opts, "refcounted_frames", refcount ? "1" : "0", 0);
if ((ret = avcodec_open2(*dec_ctx, dec, &opts)) < 0) {
fprintf(stderr, "Failed to open %s codec\n",
av_get_media_type_string(type));
avcodec_free_context(dec_ctx);
return ret;
}
I print out the time spent on some API
I've solved this problem, change the number of decoding threads(default is 1 in version 4.0) to 6(default is 6 in version 2.8).
(*dec_ctx)->thread_count = 6;
I have faced the same issue when decoding h264 video stream using ffmpeg 2.8.14 and 4.0.1 versions.
In both cases when accessing the codec context after avformat_find_stream_info()
AVFormatContext *pFormatCtx;
AVCodecContext* pCodecCtxOrig;
...
avformat_find_stream_info(pFormatCtx, NULL)
...
for (uint i = 0; i < pFormatCtx->nb_streams; i++)
{
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
pCodecCtxOrig = pFormatCtx->streams[i]->codec;
...
}
}
then the pCodecCtxOrig::thread_count is actually set to 0, which means that ffmpeg library finds suitable thread_count by itself based on CPU cores when opening codec context with avcodec_open2(). In my case the thread count was set to 5.
In case of 2.8.14 the pCodecCtxOrig::thread_count is set to 0 and when doing a copy from it
AVCodecContext* pCodecCtxOrig;
AVCodecContext* pCodecCtx;
....
avcodec_copy_context(pCodecCtx, pCodecCtxOrig)
the thread_count is copied as well.
In case of 4.0.1 the avcodec_copy_context() is deprecated and the avcodec_parameters_to_context() should be used instead. The coping struct AVCodecParameters is not containing thread_count thus the info gets lost. Thread count remains the default value 1, which is set by avcodec_alloc_context3. To overcome the problem set thread_count to 0 before opening the codec context.

libavformat/libavcodec providing invalid container header

I'm using libavcodec to encode a stream to h264 and libavformat to store it in an mp4. The resulting container has an invalid header that can be played in VLC, but not any other player.
I've found that using the mp4 container and the "mpeg4" codec produces a valid mp4 file, but using libx265 (HEVC) or the libx264 codec produces invalid mp4s.
I can use ffmpeg -i invalid.mp4 -vcodec copy valid.mp4 and I get a file of almost the exact same size, but in a valid container.
Examples of these files are here: Broken file and
Repaied file [use the download links in the upper right to examine]
I used a hex editor to see the differences in the headers of the two files and the invalid one is 1 byte smaller than the valid one.
The code I'm using to open the container and codec and to write the header is here:
AVOutputFormat *container_format;
AVFormatContext *container_format_context;
AVStream *video_stream;
int ret;
/* allocate the output media context */
avformat_alloc_output_context2(&container_format_context, NULL, NULL, out_file);
if (!container_format_context) {
log(INFO, "Unable to determine container format from filename, exiting\n");
exit(1);
}
else {
log(INFO, "Using container %s\n", container_format_context->oformat->name);
}
if (!container_format_context) {
log(ERROR, "Could not build container format context. Encoding failed.");
exit(1);
}
container_format = container_format_context->oformat;
/* Pull codec based on name */
AVCodec* codec = avcodec_find_encoder_by_name(codec_name);
if (codec == NULL) {
log(ERROR, "Failed to locate codec \"%s\".",
codec_name);
exit(1);
}
/* create stream */
video_stream = NULL;
video_stream = avformat_new_stream(container_format_context, codec);
if (!video_stream) {
log(ERROR, "Could not allocate encoder stream. Cannot continue.\n");
exit(1);
}
video_stream->id = container_format_context->nb_streams - 1;
video_stream->time_base = video_stream->codec->time_base = (AVRational) { 1, 25};
av_dump_format(container_format_context, 0, out_file, 1);
/* Retrieve encoding context */
AVCodecContext* avcodec_context = video_stream->codec;
if (avcodec_context == NULL) {
log(ERROR, "Failed to allocate context for "
"codec \"%s\".", codec_name);
exit(1);
}
/* Init context with encoding parameters */
avcodec_context->bit_rate = bitrate;
avcodec_context->width = width;
avcodec_context->height = height;
avcodec_context->gop_size = 10;
avcodec_context->max_b_frames = 1;
avcodec_context->qmax = 31;
avcodec_context->qmin = 2;
avcodec_context->pix_fmt = AV_PIX_FMT_YUV420P;
av_dump_format(container_format_context, 0, out_file, 1);
/* Open codec for use */
if (avcodec_open2(avcodec_context, codec, NULL) < 0) {
log(ERROR, "Failed to open codec \"%s\".", codec_name);
exit(1);
}
/* Allocate corresponding frame */
AVFrame* frame = av_frame_alloc();
if (frame == NULL) {
exit(1);
}
/* Copy necessary data for frame from avcodec_context */
frame->format = avcodec_context->pix_fmt;
frame->width = avcodec_context->width;
frame->height = avcodec_context->height;
/* Allocate actual backing data for frame */
if (av_image_alloc(frame->data, frame->linesize, frame->width,
frame->height, frame->format, 32) < 0) {
exit(1);
}
/* open the output file, if the container needs it */
if (!(container_format->flags & AVFMT_NOFILE)) {
ret = avio_open(&container_format_context->pb, out_file, AVIO_FLAG_WRITE);
if (ret < 0) {
log(ERROR, "Error occurred while opening output file: %s\n",
av_err2str(ret));
exit(1);
}
}
/* write the stream header, if needed */
ret = avformat_write_header(container_format_context, NULL);
if (ret < 0) {
log(ERROR, "Error occurred while writing output file header: %s\n",
av_err2str(ret));
}
The code to encode a frame is here:
/* Init video packet */
AVPacket packet;
av_init_packet(&packet);
/* Request that encoder allocate data for packet */
packet.data = NULL;
packet.size = 0;
/* Write frame to video */
int got_data;
if (avcodec_encode_video2(avcontext, &packet, frame, &got_data) < 0) {
log(WARNING, "Error encoding frame #%" PRId64,
video_struct->next_pts);
return -1;
}
/* Write corresponding data to file */
if (got_data) {
if (packet.pts != AV_NOPTS_VALUE) {
packet.pts = av_rescale_q(packet.pts, video_struct->output_stream->codec->time_base, video_struct->output_stream->time_base);
}
if (packet.dts != AV_NOPTS_VALUE) {
packet.dts = av_rescale_q(packet.dts, video_struct->output_stream->codec->time_base, video_struct->output_stream->time_base);
}
write_packet(video_struct, &packet, packet.size);
av_packet_unref(&packet);
}
And the code to write the packet to the video stream:
static int write_packet(video_struct* video, void* data, int size) {
int ret;
/* use AVStream is not null, otherwise write to output fd */
AVPacket *pkt = (AVPacket*) data;
pkt->stream_index = video->output_stream->index;
ret = av_interleaved_write_frame(video->container_format_context, pkt);
if (ret != 0) {
return -1;
}
/* Data was written successfully */
return ret;
}
Solved this issue. The problem was that I wasn't assigning global headers to the container if the container required it. While assigning properties like height, width, bit rate and so forth to the avcodec_context, I added
if (container_format_context->oformat->flags & AVFMT_GLOBALHEADER) {
avcodec_context->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
which seems to have fixed the issue.

After ffmpeg encode, AVPacket pts and dts is AV_NOPTS_VALUE

I would like to ask a question about ffmpeg when i use encoder (x264).
this is my code :
int
FFVideoEncoder::init(AVCodecID codecId, int bitrate, int fps, int gopSize,
int width, int height, AVPixelFormat format) {
release();
const AVCodec *codec = avcodec_find_encoder(codecId);
m_pCodecCtx = avcodec_alloc_context3(codec);
m_pCodecCtx->width = width;
m_pCodecCtx->height = height;
m_pCodecCtx->pix_fmt = format;
m_pCodecCtx->bit_rate = bitrate;
m_pCodecCtx->thread_count = 5;
m_pCodecCtx->max_b_frames = 0;
m_pCodecCtx->gop_size = gopSize;
m_pCodecCtx->time_base.num = 1;
m_pCodecCtx->time_base.den = fps;
//H.264
if (m_pCodecCtx->codec_id == AV_CODEC_ID_H264) {
// av_dict_set(&opts, "preset", "slow", 0);
av_dict_set(&m_pEncoderOpts, "preset", "superfast", 0);
av_dict_set(&m_pEncoderOpts, "tune", "zerolatency", 0);
m_pCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;
m_pCodecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
int ret = avcodec_open2(m_pCodecCtx, m_pCodecCtx->codec, &m_pEncoderOpts);
if (ret == 0) {
LOGI("open avcodec success!");
} else {
LOGE("open avcodec error!");
return -1;
}
return ret;
}
int FFVideoEncoder::encode(const Frame &inFrame, AVPacket *outPacket) {
AVFrame *frame = av_frame_alloc();
// avpicture_fill((AVPicture *) frame, inFrame.getData(), AV_PIX_FMT_YUV420P, inFrame.getWidth(),
// inFrame.getHeight());
av_image_fill_arrays(frame->data, frame->linesize, inFrame.getData(), m_pCodecCtx->pix_fmt,
inFrame.getWidth(), inFrame.getHeight(), 1);
int ret = 0;
ret = avcodec_send_frame(m_pCodecCtx, frame);
if (ret != 0) {
LOGE("send frame error! %s", av_err2str(ret));
} else {
ret = avcodec_receive_packet(m_pCodecCtx, outPacket);
LOGI("extract data size = %d", m_pCodecCtx->extradata_size);
if (ret != 0) {
LOGE("receive packet error! %s", av_err2str(ret));
}
};
av_frame_free(&frame);
return ret;
}
I expect that the AVPacket will carry the pts and dts about this frame.
but in fact, i only can get encoded frame data and size.
//====================================
except this question, i have another quesiont:
x264 docs say that "tune" opts can be set like film、animation and others. but i only can get a normal video when i set "zerolatency" params. When i set others opts, video's bitrate is very low.
Thanks your answer.
This is for simple example to see if it works:
I believe you should set frame->pts beforehand.
Try this:
Set frame->pts = framecount before sending to ret = avcodec_send_frame(m_pCodecCtx, frame)
Add this framecount as a simple counter of frames you send for encode. Increases each time.
Hope that helps.

RTSP relay using ffmpeg library

I am implementing an RTSP relay using libavcodec (ffmpeg). Basically, it connects to an IP camera, which is an RTSP server, PULLs the stream and then PUSHes it to wowza. I am finding it very difficult to get any sample implementation for this in the internet. So I started implementing this using bits and pieces from here and there. I took code for code samples to a) read RTSP stream and write to file b)read from file and serve it over RTSP.
This is the current state of the code:
/*
* Copyright (c) 2010 Nicolas George
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2014 Andrey Utkin
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* #file
* API example for demuxing, decoding, filtering, encoding and muxing
* #example transcoding.c
*/
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavfilter/avfiltergraph.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
#include <libavutil/pixdesc.h>
static AVFormatContext *ifmt_ctx,*temp;
static AVFormatContext *ofmt_ctx;
typedef struct FilteringContext {
AVFilterContext *buffersink_ctx;
AVFilterContext *buffersrc_ctx;
AVFilterGraph *filter_graph;
} FilteringContext;
static FilteringContext *filter_ctx;
static int open_input_file(const char *filename)
{
int ret;
unsigned int i;
ifmt_ctx = NULL;
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
return ret;
}
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return ret;
}
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
AVStream *stream;
AVCodecContext *codec_ctx;
stream = ifmt_ctx->streams[i];
codec_ctx = stream->codec;
/* Reencode video & audio and remux subtitles etc. */
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
/* Open decoder */
ret = avcodec_open2(codec_ctx,
avcodec_find_decoder(codec_ctx->codec_id), NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
return ret;
}
}
}
av_dump_format(ifmt_ctx, 0, filename, 0);
return 0;
}
static int open_output_file(const char *filename)
{
AVStream *out_stream;
AVStream *in_stream;
AVCodecContext *dec_ctx, *enc_ctx;
AVCodec *encoder,*codec;
int ret;
unsigned int i;
ofmt_ctx = NULL;
avformat_open_input(&temp, "rtsp://localhost:8554/live", NULL, NULL);
avformat_alloc_output_context2(&ofmt_ctx, temp->oformat,"rtsp", filename);
if (!ofmt_ctx) {
av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
return AVERROR_UNKNOWN;
}
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
out_stream = avformat_new_stream(ofmt_ctx, NULL);
if (!out_stream) {
av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
return AVERROR_UNKNOWN;
}
in_stream = ifmt_ctx->streams[i];
dec_ctx = in_stream->codec;
enc_ctx = out_stream->codec;
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
/* in this example, we choose transcoding to same codec */
encoder = avcodec_find_encoder(dec_ctx->codec_id);
printf("codec : %d",dec_ctx->codec_id);
if (!encoder) {
av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
return AVERROR_INVALIDDATA;
}
/* In this example, we transcode to same properties (picture size,
* sample rate etc.). These properties can be changed for output
* streams easily using filters */
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
enc_ctx->height = dec_ctx->height;
enc_ctx->width = dec_ctx->width;
enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
/* take first format from list of supported formats */
enc_ctx->pix_fmt = encoder->pix_fmts[0];
//enc_ctx->codec_id = AV_CODEC_ID_H264;
/* video time_base can be set to whatever is handy and supported by encoder */
enc_ctx->time_base = dec_ctx->time_base;
} else {
enc_ctx->sample_rate = dec_ctx->sample_rate;
enc_ctx->channel_layout = dec_ctx->channel_layout;
enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
/* take first format from list of supported formats */
enc_ctx->sample_fmt = encoder->sample_fmts[0];
enc_ctx->pix_fmt =AV_PIX_FMT_RGB24 ;
enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
}
/* Third parameter can be used to pass settings to encoder */
//codec = avcodec_find_encoder(AV_CODEC_ID_H264);
ret = avcodec_open2(enc_ctx, encoder, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
return ret;
}
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
return AVERROR_INVALIDDATA;
} else {
/* if this stream must be remuxed */
ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec,
ifmt_ctx->streams[i]->codec);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n");
return ret;
}
}
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
av_dump_format(ofmt_ctx, 0, filename, 1);
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
return ret;
}
}
/* init muxer, write output file header */
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
return ret;
}
return 0;
}
static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
AVCodecContext *enc_ctx, const char *filter_spec)
{
char args[512];
int ret = 0;
AVFilter *buffersrc = NULL;
AVFilter *buffersink = NULL;
AVFilterContext *buffersrc_ctx = NULL;
AVFilterContext *buffersink_ctx = NULL;
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
AVFilterGraph *filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph) {
ret = AVERROR(ENOMEM);
goto end;
}
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
buffersrc = avfilter_get_by_name("buffer");
buffersink = avfilter_get_by_name("buffersink");
if (!buffersrc || !buffersink) {
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
ret = AVERROR_UNKNOWN;
goto end;
}
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
dec_ctx->time_base.num, dec_ctx->time_base.den,
dec_ctx->sample_aspect_ratio.num,
dec_ctx->sample_aspect_ratio.den);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
goto end;
}
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
goto end;
}
ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
(uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
goto end;
}
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
buffersrc = avfilter_get_by_name("abuffer");
buffersink = avfilter_get_by_name("abuffersink");
if (!buffersrc || !buffersink) {
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
ret = AVERROR_UNKNOWN;
goto end;
}
if (!dec_ctx->channel_layout)
dec_ctx->channel_layout =
av_get_default_channel_layout(dec_ctx->channels);
snprintf(args, sizeof(args),
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
av_get_sample_fmt_name(dec_ctx->sample_fmt),
dec_ctx->channel_layout);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
goto end;
}
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
goto end;
}
ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
(uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
goto end;
}
ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
(uint8_t*)&enc_ctx->channel_layout,
sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
goto end;
}
ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
(uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
goto end;
}
} else {
ret = AVERROR_UNKNOWN;
goto end;
}
/* Endpoints for the filter graph. */
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if (!outputs->name || !inputs->name) {
ret = AVERROR(ENOMEM);
goto end;
}
if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
&inputs, &outputs, NULL)) < 0)
goto end;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
goto end;
/* Fill FilteringContext */
fctx->buffersrc_ctx = buffersrc_ctx;
fctx->buffersink_ctx = buffersink_ctx;
fctx->filter_graph = filter_graph;
end:
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return ret;
}
static int init_filters(void)
{
const char *filter_spec;
unsigned int i;
int ret;
filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
if (!filter_ctx)
return AVERROR(ENOMEM);
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
filter_ctx[i].buffersrc_ctx = NULL;
filter_ctx[i].buffersink_ctx = NULL;
filter_ctx[i].filter_graph = NULL;
if (!(ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO
|| ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO))
continue;
if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
filter_spec = "null"; /* passthrough (dummy) filter for video */
else
filter_spec = "anull"; /* passthrough (dummy) filter for audio */
ret = init_filter(&filter_ctx[i], ifmt_ctx->streams[i]->codec,
ofmt_ctx->streams[i]->codec, filter_spec);
if (ret)
return ret;
}
return 0;
}
static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
int ret;
int got_frame_local;
AVPacket enc_pkt;
int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
(ifmt_ctx->streams[stream_index]->codec->codec_type ==
AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
if (!got_frame)
got_frame = &got_frame_local;
av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
/* encode filtered frame */
enc_pkt.data = NULL;
enc_pkt.size = 0;
av_init_packet(&enc_pkt);
ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
filt_frame, got_frame);
av_frame_free(&filt_frame);
if (ret < 0)
return ret;
if (!(*got_frame))
return 0;
/* prepare packet for muxing */
enc_pkt.stream_index = stream_index;
av_packet_rescale_ts(&enc_pkt,
ofmt_ctx->streams[stream_index]->codec->time_base,
ofmt_ctx->streams[stream_index]->time_base);
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
/* mux encoded frame */
ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
return ret;
}
static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
{
int ret;
AVFrame *filt_frame;
av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
/* push the decoded frame into the filtergraph */
ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
frame, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
return ret;
}
/* pull filtered frames from the filtergraph */
while (1) {
filt_frame = av_frame_alloc();
if (!filt_frame) {
ret = AVERROR(ENOMEM);
break;
}
av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
filt_frame);
if (ret < 0) {
/* if no more frames for output - returns AVERROR(EAGAIN)
* if flushed and no more frames for output - returns AVERROR_EOF
* rewrite retcode to 0 to show it as normal procedure completion
*/
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
ret = 0;
av_frame_free(&filt_frame);
break;
}
filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
ret = encode_write_frame(filt_frame, stream_index, NULL);
if (ret < 0)
break;
}
return ret;
}
static int flush_encoder(unsigned int stream_index)
{
int ret;
int got_frame;
if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities &
AV_CODEC_CAP_DELAY))
return 0;
while (1) {
av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
ret = encode_write_frame(NULL, stream_index, &got_frame);
if (ret < 0)
break;
if (!got_frame)
return 0;
}
return ret;
}
int main(int argc, char **argv)
{
int ret;
AVPacket packet = { .data = NULL, .size = 0 };
AVFrame *frame = NULL;
enum AVMediaType type;
unsigned int stream_index;
unsigned int i;
int got_frame;
int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
if (argc != 3) {
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
return 1;
}
av_register_all();
avfilter_register_all();
avformat_network_init();
if ((ret = open_input_file(argv[1])) < 0)
goto end;
if ((ret = open_output_file(argv[2])) < 0)
goto end;
if ((ret = init_filters()) < 0)
goto end;
/* read all packets */
while (1) {
if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
break;
stream_index = packet.stream_index;
type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
stream_index);
if (filter_ctx[stream_index].filter_graph) {
av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
frame = av_frame_alloc();
if (!frame) {
ret = AVERROR(ENOMEM);
break;
}
av_packet_rescale_ts(&packet,
ifmt_ctx->streams[stream_index]->time_base,
ifmt_ctx->streams[stream_index]->codec->time_base);
dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
avcodec_decode_audio4;
ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
&got_frame, &packet);
if (ret < 0) {
av_frame_free(&frame);
av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
break;
}
if (got_frame) {
frame->pts = av_frame_get_best_effort_timestamp(frame);
ret = filter_encode_write_frame(frame, stream_index);
av_frame_free(&frame);
if (ret < 0)
goto end;
} else {
av_frame_free(&frame);
}
} else {
/* remux this frame without reencoding */
av_packet_rescale_ts(&packet,
ifmt_ctx->streams[stream_index]->time_base,
ofmt_ctx->streams[stream_index]->time_base);
ret = av_interleaved_write_frame(ofmt_ctx, &packet);
if (ret < 0)
goto end;
}
av_packet_unref(&packet);
}
/* flush filters and encoders */
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
/* flush filter */
if (!filter_ctx[i].filter_graph)
continue;
ret = filter_encode_write_frame(NULL, i);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
goto end;
}
/* flush encoder */
ret = flush_encoder(i);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
goto end;
}
}
av_write_trailer(ofmt_ctx);
end:
av_packet_unref(&packet);
av_frame_free(&frame);
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
avcodec_close(ifmt_ctx->streams[i]->codec);
if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
avcodec_close(ofmt_ctx->streams[i]->codec);
if (filter_ctx && filter_ctx[i].filter_graph)
avfilter_graph_free(&filter_ctx[i].filter_graph);
}
av_free(filter_ctx);
avformat_close_input(&ifmt_ctx);
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
avio_closep(&ofmt_ctx->pb);
avformat_free_context(ofmt_ctx);
if (ret < 0)
av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
return ret ? 1 : 0;
}
When we run this we are getting this error:
Input #0, rtsp, from 'rtsp://localhost:8554/live':
Metadata:
title : Unnamed
comment : N/A
Duration: N/A, start: 5294.642467, bitrate: N/A
Stream #0:0: Video: h264 (Constrained Baseline), yuv420p, 640x480 [SAR 1:1 DAR 4:3], 30 fps, 30 tbr, 90k tbn, 60 tbc
[libx264 # 0xe87c80] Specified pixel format -1 is invalid or not supported
Cannot open video encoder for stream #0
codec : 28 and pix :12
Error occurred: Invalid argument
But I could see that the correct pixel format macro "AV_PIX_FMT_YUV420P" is provided.
UPDATE 1:
This is the section in ffmpeg where the error is thrown from(utils.c line 1547):
if (avctx->codec->pix_fmts) {
for (i = 0; avctx->codec->pix_fmts[i] != AV_PIX_FMT_NONE; i++)
if (avctx->pix_fmt == avctx->codec->pix_fmts[i])
break;
if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_NONE
&& !((avctx->codec_id == AV_CODEC_ID_MJPEG || avctx->codec_id == AV_CODEC_ID_LJPEG)
&& avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL)) {
char buf[128];
snprintf(buf, sizeof(buf), "%d", avctx->pix_fmt);
av_log(avctx, AV_LOG_ERROR, "Specified pixel format %s is invalid or not supported\n",
(char *)av_x_if_null(av_get_pix_fmt_name(avctx->pix_fmt), buf));
ret = AVERROR(EINVAL);
goto free_and_end;
}
Can someone suggest a solution for this.
Thanks

Resources