After ffmpeg encode, AVPacket pts and dts is AV_NOPTS_VALUE - ffmpeg

I would like to ask a question about ffmpeg when i use encoder (x264).
this is my code :
int
FFVideoEncoder::init(AVCodecID codecId, int bitrate, int fps, int gopSize,
int width, int height, AVPixelFormat format) {
release();
const AVCodec *codec = avcodec_find_encoder(codecId);
m_pCodecCtx = avcodec_alloc_context3(codec);
m_pCodecCtx->width = width;
m_pCodecCtx->height = height;
m_pCodecCtx->pix_fmt = format;
m_pCodecCtx->bit_rate = bitrate;
m_pCodecCtx->thread_count = 5;
m_pCodecCtx->max_b_frames = 0;
m_pCodecCtx->gop_size = gopSize;
m_pCodecCtx->time_base.num = 1;
m_pCodecCtx->time_base.den = fps;
//H.264
if (m_pCodecCtx->codec_id == AV_CODEC_ID_H264) {
// av_dict_set(&opts, "preset", "slow", 0);
av_dict_set(&m_pEncoderOpts, "preset", "superfast", 0);
av_dict_set(&m_pEncoderOpts, "tune", "zerolatency", 0);
m_pCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;
m_pCodecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
int ret = avcodec_open2(m_pCodecCtx, m_pCodecCtx->codec, &m_pEncoderOpts);
if (ret == 0) {
LOGI("open avcodec success!");
} else {
LOGE("open avcodec error!");
return -1;
}
return ret;
}
int FFVideoEncoder::encode(const Frame &inFrame, AVPacket *outPacket) {
AVFrame *frame = av_frame_alloc();
// avpicture_fill((AVPicture *) frame, inFrame.getData(), AV_PIX_FMT_YUV420P, inFrame.getWidth(),
// inFrame.getHeight());
av_image_fill_arrays(frame->data, frame->linesize, inFrame.getData(), m_pCodecCtx->pix_fmt,
inFrame.getWidth(), inFrame.getHeight(), 1);
int ret = 0;
ret = avcodec_send_frame(m_pCodecCtx, frame);
if (ret != 0) {
LOGE("send frame error! %s", av_err2str(ret));
} else {
ret = avcodec_receive_packet(m_pCodecCtx, outPacket);
LOGI("extract data size = %d", m_pCodecCtx->extradata_size);
if (ret != 0) {
LOGE("receive packet error! %s", av_err2str(ret));
}
};
av_frame_free(&frame);
return ret;
}
I expect that the AVPacket will carry the pts and dts about this frame.
but in fact, i only can get encoded frame data and size.
//====================================
except this question, i have another quesiont:
x264 docs say that "tune" opts can be set like film、animation and others. but i only can get a normal video when i set "zerolatency" params. When i set others opts, video's bitrate is very low.
Thanks your answer.

This is for simple example to see if it works:
I believe you should set frame->pts beforehand.
Try this:
Set frame->pts = framecount before sending to ret = avcodec_send_frame(m_pCodecCtx, frame)
Add this framecount as a simple counter of frames you send for encode. Increases each time.
Hope that helps.

Related

libx264 Input picture width (640) is greater than stride (0)

I'm trying to encode a series of Cairo surfaces by using libav. Here I initialize AV stuff:
AVStream* video_stream;
AVCodec* vcodec;
gint ret;
/* Setup video container */
avformat_alloc_output_context2(&img->video_format_context, NULL, NULL, filename);
if (img->video_format_context == NULL)
{
img_message(img, TRUE, _("Failed to find a suitable container for %s\n"),filename);
return FALSE;
}
ret = avio_open(&img->video_format_context->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0)
{
img_message(img, TRUE, _("Couldn't write output file %s\n"),filename);
return FALSE;
}
/* Setup video codec */
vcodec = avcodec_find_encoder(codec_id);
if (!vcodec)
{
img_message(img, TRUE, _("Couldn't find any encoder for %s\n"),filename);
return FALSE;
}
/* Create video stream */
video_stream = avformat_new_stream(img->video_format_context, vcodec);
video_stream->id = 0;
if (! video_stream)
{
img_message(img, TRUE, _("Couldn't not allocate video stream\n"));
return FALSE;
}
/* Allocate video encoding context */
img->codec_context = avcodec_alloc_context3(vcodec);
if (! img->codec_context)
{
img_message(img, TRUE, _("Couldn't allocate video enconding context\n"));
return FALSE;
}
/* Setup video enconding context parameters */
img->codec_context->codec_id = codec_id;
img->codec_context->codec_type = AVMEDIA_TYPE_VIDEO;
img->codec_context->width = img->video_size[0];
img->codec_context->height = img->video_size[1];
img->codec_context->sample_aspect_ratio = (struct AVRational) {1, 1};
img->codec_context->pix_fmt = vcodec->pix_fmts[0];
img->codec_context->framerate = av_d2q(frame_rate, INT_MAX);
if (codec_id == AV_CODEC_ID_VP8 || codec_id == AV_CODEC_ID_VP9 || codec_id == AV_CODEC_ID_THEORA || codec_id == AV_CODEC_ID_FLV1 ||
AV_CODEC_ID_MPEG1VIDEO || codec_id == AV_CODEC_ID_MPEG2VIDEO)
img->codec_context->bit_rate = round(bitrate_crf * 1000000);
img->codec_context->time_base = av_inv_q(img->codec_context->framerate);
video_stream->time_base = img->codec_context->time_base;
if (img->video_format_context->oformat->flags & AVFMT_GLOBALHEADER)
img->codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
/* Some codecs require the CRF value */
if (codec_id == AV_CODEC_ID_H264 || codec_id == AV_CODEC_ID_H265)
{
gchar *crf = g_strdup_printf("%i", bitrate_crf);
av_opt_set(img->codec_context->priv_data, "crf", crf, AV_OPT_SEARCH_CHILDREN);
g_free(crf);
}
/* Set exporting stage to be multithreaded */
AVDictionary* opts = NULL;
av_dict_set(&opts, "threads", "auto", 0);
/* Open video encoder */
ret = avcodec_open2(img->codec_context, vcodec, &opts);
if (ret < 0)
{
img_message(img, TRUE, _("Failed to open the video encoder\n"));
return FALSE;
}
/* Copy video encoder parameters to output stream */
ret = avcodec_parameters_from_context(video_stream->codecpar, img->codec_context);
if (ret < 0)
{
img_message(img, TRUE, _("Failed to copy video encoder parameters to output stream\n"));
return FALSE;
}
/* AVFRAME stuff */
img->video_frame = av_frame_alloc();
img->video_frame->format = AV_PIX_FMT_RGBA;
img->video_frame->width = img->video_size[0];
img->video_frame->height = img->video_size[1];
av_frame_make_writable(img->video_frame);
ret = av_frame_get_buffer(img->video_frame, 1);
if (ret < 0)
img_message(img,TRUE, _("Could not allocate the video frame data\n"));
img->video_packet = av_packet_alloc();
And here I called repeatedly (the function is called somewehere else) av_send_frame() but it throws the error in the subject:
gint width, height, stride, row, col, offset;
uint8_t *pix;
/* Image info and pixel data */
width = cairo_image_surface_get_width( surface );
height = cairo_image_surface_get_height( surface );
stride = cairo_image_surface_get_stride( surface );
pix = cairo_image_surface_get_data( surface );
for( row = 0; row < height; row++ )
{
for( col = 0; col < width; col++ )
{
offset = 3 * col + row * img->video_frame->linesize[0];
img->video_frame->data[0][offset + 0] = pix[0];
img->video_frame->data[0][offset + 1] = pix[1];
img->video_frame->data[0][offset + 2] = pix[2];
}
}
img_export_encode_av_frame(img->video_frame, img->video_format_context, img->codec_context, img->video_packet);
return TRUE;
}
void img_export_encode_av_frame(AVFrame *frame, AVFormatContext *fmt, AVCodecContext *ctx, AVPacket *pkt)
{
gint ret;
/* send the frame to the encoder */
ret = avcodec_send_frame(ctx, frame);
if (ret < 0)
g_print("Error sending a frame for encoding\n");
while (ret >= 0)
{
ret = avcodec_receive_packet(ctx, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
return;
else if (ret < 0)
g_print("Error during encoding\n");
av_interleaved_write_frame(fmt, pkt);
av_packet_unref(pkt);
}
}
I googled here also but with no luck. It seems I'm the only one to encode a cairo surface. Grepping the error message in ffmpeg sources didn't help. How do I set the stride? I read ffmpeg does it for me once I allocate the buffer for the frame but in my case it seems it doesn't. Where am I wrong?

SIGABRT: abort attempting encoding PCM to AAC

I am trying to encoding incoming raw PCM audio data into an AAC encoded audio file. The following crashes with SIGABRT when it hits the avcodec_encode_audio2 call:
aac_encoding.c
#include <stdio.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswresample/swresample.h>
typedef struct AACEncoder {
AVFormatContext* pFormatCtx;
AVStream* audio_st;
AVCodecContext* pCodecCtx;
AVFrame* pFrame;
AVPacket* pkt;
uint8_t* frame_buf;
} AACEncoder;
AACEncoder *openEncoder(char* out_file) {
AACEncoder* encoder = (AACEncoder*)malloc(sizeof(AACEncoder*));
av_register_all();
AVFormatContext* pFormatCtx = avformat_alloc_context();
encoder->pFormatCtx = pFormatCtx;
AVOutputFormat* outFormat = av_guess_format(NULL, out_file, NULL);
pFormatCtx->oformat = outFormat;
if (avio_open(&pFormatCtx->pb, out_file, AVIO_FLAG_READ_WRITE) < 0) {
printf("Failed to open output file!\n");
return NULL;
}
AVStream* audio_st = avformat_new_stream(pFormatCtx, 0);
if (audio_st==NULL){
return NULL;
}
encoder->audio_st;
AVCodecContext* pCodecCtx = audio_st->codec;
encoder->pCodecCtx = pCodecCtx;
pCodecCtx->codec_id = outFormat->audio_codec;
pCodecCtx->codec_type = AVMEDIA_TYPE_AUDIO;
pCodecCtx->sample_fmt = AV_SAMPLE_FMT_FLTP;
pCodecCtx->sample_rate= 48000;
pCodecCtx->channel_layout = AV_CH_LAYOUT_MONO;
pCodecCtx->channels = av_get_channel_layout_nb_channels(pCodecCtx->channel_layout);
pCodecCtx->bit_rate = 64000;
av_dump_format(pFormatCtx, 0, out_file, 1);
AVCodec* pCodec = avcodec_find_encoder(pCodecCtx->codec_id);
if (!pCodec){
printf("Can not find encoder!\n");
return NULL;
}
if (avcodec_open2(pCodecCtx, pCodec,NULL) < 0){
printf("Failed to open encoder!\n");
return NULL;
}
AVFrame* pFrame = av_frame_alloc();
encoder->pFrame = pFrame;
pFrame->nb_samples= pCodecCtx->frame_size;
pFrame->format= pCodecCtx->sample_fmt;
int size = av_samples_get_buffer_size(NULL, pCodecCtx->channels,pCodecCtx->frame_size,pCodecCtx->sample_fmt, 1);
uint8_t* frame_buf = (uint8_t *)av_malloc(size);
encoder->frame_buf = frame_buf;
avcodec_fill_audio_frame(pFrame, pCodecCtx->channels, pCodecCtx->sample_fmt,(const uint8_t*)frame_buf, size, 1);
//Write Header
avformat_write_header(pFormatCtx,NULL);
AVPacket pkt;
encoder->pkt = &pkt;
av_new_packet(&pkt,size);
return encoder;
}
int writePCM(AACEncoder* encoder, int16_t* pcmData, size_t pcmSize) {
SwrContext* swr = swr_alloc();
av_opt_set_int(swr, "in_channel_layout", encoder->pCodecCtx->channel_layout, 0);
av_opt_set_int(swr, "out_channel_layout", encoder->pCodecCtx->channel_layout, 0);
av_opt_set_int(swr, "in_sample_rate", encoder->pCodecCtx->sample_rate, 0);
av_opt_set_int(swr, "out_sample_rate", encoder->pCodecCtx->sample_rate, 0);
av_opt_set_sample_fmt(swr, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_FLT, 0);
swr_init(swr);
printf("Initialized SwrContext\n");
fflush(stdout);
int ret = swr_convert(swr, encoder->pFrame->extended_data, pcmSize, &pcmData, pcmSize);
int got_frame=0;
if(ret < 0){
printf("Failed to resample!\n");
return -1;
}
//Encode
ret = avcodec_encode_audio2(encoder->pCodecCtx, encoder->pkt, encoder->pFrame, &got_frame);
printf("Encoded audio using AAC\n");
fflush(stdout);
swr_free(&swr);
if(ret < 0){
printf("Failed to encode!\n");
return -1;
}
if (got_frame==1){
printf("Succeed to encode 1 frame! \tsize:%5d\n", encoder->pkt->size);
encoder->pkt->stream_index = encoder->audio_st->index;
ret = av_write_frame(encoder->pFormatCtx, encoder->pkt);
av_free_packet(encoder->pkt);
}
}

Request for ffmpeg raw data to mp4 container example

I have a binary file with raw h264 data which is arranged like that
NAL(SPS), NAL(PPS), NAL(Frame), NAL(SPS), NAL(PPS)....
and i want to mux it (without encode) into a mp4 container.
The muxing.c in the ffmpeg example do the encoding of yuv data, but it is different from my case, and i have no ideas how to change the example to do what i want to do...
I knew the commaned ffmpeg -i h264file -c copy h264.mp4 can do what i want to do, but i have to do it in my program, so i need to know how to use the ffmpeg api to do the same thing, but so far, i cannot find any simple example to do it. Is there anyone has hint on how to do it?? Thanks
Updated, i have write the code as below from the reference, it seems can create the mp4 but the time is not correct, it lost the frame rate information and the time information, it play very fast.
av_register_all();
int ret;
AVDictionary *opt = NULL;
//bool is264 = true;
const char * inputFileName = "input.264";
const char * outputFileName = "output.mp4";
AVFormatContext *ic = avformat_alloc_context();
if((ret = avformat_open_input(&ic, inputFileName, NULL, NULL)) < 0)
return -1;//
// Get format info (retrieve stream information)
if ((ret = avformat_find_stream_info(ic, NULL)) < 0)
return ret; // Couldn't find stream information
for (int i = 0; i < ic->nb_streams; i++)
{
AVStream *stream;
AVCodecContext *codec_ctx;
stream = ic->streams[i];
codec_ctx = stream->codec;
/* Reencode video & audio and remux subtitles etc. */
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
/* Open decoder */
ret = avcodec_open2(codec_ctx, avcodec_find_decoder(codec_ctx->codec_id), NULL);
if (ret < 0) {
//av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
return ret;
}
}
}
// Dump information about file onto standard error
av_dump_format(ic, 0, inputFileName, 0);
AVFormatContext *oc;
avformat_alloc_output_context2(&oc, NULL, NULL, outputFileName);
if (!oc) {
//printf("Could not deduce output format from file extension: using MPEG.\n");
//avformat_alloc_output_context2(&oc, NULL, "mpeg", outputFileName);
return -1;
}
AVStream *ist = ic->streams[0];
AVCodec *out_vid_codec = avcodec_find_encoder(oc->oformat->video_codec);
if (NULL == out_vid_codec)
return -1; // Couldn't find video encoder
AVStream *out_vid_strm = avformat_new_stream(oc, out_vid_codec);
if (NULL == out_vid_strm)
return -1; // Couldn't output video stream
ret = avcodec_copy_context(out_vid_strm->codec, ist->codec);
if (ret < 0)
return ret; // Failed to copy context
ret = avio_open(&oc->pb, outputFileName, AVIO_FLAG_WRITE);
ret = avformat_write_header(oc, NULL);
AVPacket pkt;
while(1)
{
AVStream *in_stream, *out_stream;
ret = av_read_frame(ic, &pkt);
if (ret < 0)
break;
pkt.stream_index = 0;
in_stream = ic->streams[pkt.stream_index];
out_stream = oc->streams[pkt.stream_index];
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
//log_packet(ofmt_ctx, &pkt, "out");
ret = av_interleaved_write_frame(oc, &pkt);
if (ret < 0) {
fprintf(stderr, "Error muxing packet\n");
break;
}
av_packet_unref(&pkt);
}
av_write_trailer(oc);

Decoding of 16bit gray image encoded with FFV1

I have a problem with decoding of gray images encoded with FFV1 codec.
I successfully encode 16 bit gray image (with avcodec_receive_packet(...) function) and save AvPacket data to file. Then I read this data from file and try to decode (with avcodec_decode_video2 or avcodec_send_packet/avcodec_receive_frame) with no success:
when I try to decode packet with avcodec_decode_video2 function I get an error "Access violation occurred, unable to write location 0x0000000000000000".
when I try to decode packet with avcodec_send_packet/avcodec_receive_frame functions I get an error "chroma shift parameters 7 0 are invalid".
I compared packets after encoding and before decoding and all fields and values seems to be the same. I even try to decode packet just after avcodec_receive_packet (encoding function), however with the same error.
I use the 4.0 version of ffmpeg and the program is based on decode_video.c and encode_video.c examples.
When I use containers (eg. avi) to support read/write encoded images from file (based on demuxing_decoding.c and muxing.c examples) I successfully encode and decode frames with FFV1. However I cannot use containers, because I want to encode frames with different resolutions and mix few video sources together. Additionally the compression level is significantly lower (falls from 2.9 to 2.2) for few hundred of images, what is also very surprising.
So my question is how to correctly save/read (from binary file not container) and prepare AVPacker for decoding with FFV1.
Any help is greatly appreciated.
The decoding code:
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
}
#pragma warning(disable: 4996)
#define INBUF_SIZE 4096
#define FF_INPUT_BUFFER_PADDING_SIZE 64
uint8_t endcode[4];
AVCodecContext *c, c2;
AVCodec *codec;
int i, ret, x, y;
AVFrame *frame;
AVPacket *pkt, *pkt_temp;
FILE *encodedVideoFile;
AVDictionary *opts = NULL;
uint8_t *video_dst_data[4] = { NULL };
int video_dst_linesize[4];
int imageSize;
uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
/* flush the encoder */
frame = NULL;
encode();
/* add sequence end code to have a real MPEG file */
//fwrite(endcode, 1, sizeof(endcode), encodedVideoFile);
fclose(encodedVideoFile);
avcodec_free_context(&c);
av_frame_free(&frame);
av_packet_free(&pkt);
}
void initDecoding(const char *filename)
{
/* set end of buffer to 0 (this ensures that no overreading happens for damaged MPEG streams) */
memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);
/* find the MPEG-1 video decoder */
codec = avcodec_find_decoder(AV_CODEC_ID_FFV1);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
/* resolution must be a multiple of two */
c->width = 1280;
c->height = 484;
/* frames per second */
c->time_base.den = 1;
c->time_base.num = 10;
c->bits_per_raw_sample = 16;
c->framerate.den = 10;
c->framerate.num = 1;
c->pix_fmt = AV_PIX_FMT_GRAY16;
//Version of FFV1 codec
c->level = 3;
/* Init the decoders, with or without reference counting */
av_dict_set(&opts, "refcounted_frames", 0 ? "1" : "0", 0);
if ((ret = avcodec_open2(c, codec, &opts)) < 0) {
return;
}
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
ret = av_image_alloc(video_dst_data, video_dst_linesize,
c->width, c->height, c->pix_fmt, 4);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw video buffer\n");
}
encodedVideoFile = fopen(filename, "rb");
if (!encodedVideoFile) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
ret = av_frame_get_buffer(frame, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate the video frame data\n");
exit(1);
}
/* make sure the frame data is writable */
ret = av_frame_make_writable(frame);
if (ret < 0)
exit(1);
}
void closeDecoding()
{
fclose(encodedVideoFile);
av_parser_close(parser);
avcodec_free_context(&c);
av_frame_free(&frame);
av_packet_free(&pkt);
}
void decodePacket()
{
size_t data_size;
int *got_frame = 0;
read_packt_from_file(pkt, encodedVideoFile);
ret = av_frame_is_writable(frame);
//First decoding function
/*ret = avcodec_decode_video2(c, frame, got_frame, pkt);
if (ret < 0) {
fprintf(stderr, "Error decoding video frame (%s)\n");
}*/
ret = avcodec_send_packet(c, pkt);
if (ret < 0) {
fprintf(stderr, "Error sending a packet for decoding\n");
exit(1);
}
while (ret >= 0) {
ret = avcodec_receive_frame(c, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
return;
else if (ret < 0) {
fprintf(stderr, "Error during decoding\n");
exit(1);
}
printf("saving frame %3d\n", c->frame_number);
fflush(stdout);
}
}
size_t read_packt_from_file(AVPacket *packet, FILE *file)
{
size_t ret = 0;
int size;
uint8_t * data;
//av_packet_from_data
ret = fread(packet, sizeof(AVPacket), 1, file);
size = packet->size;
data = new uint8_t[size];
ret = fread(data, size, 1, file);
av_new_packet(packet, size);
av_packet_from_data(packet, data, size);
return ret;
}
//To write encoded AVPacket
size_t write_packt_to_file(AVPacket *packet, FILE *file)
{
size_t ret = 0;
ret = fwrite(packet, sizeof(AVPacket), 1, file);
ret = fwrite(packet->data, packet->size, 1, file);
if (packet->buf) {
fwrite(packet->buf->data, packet->buf->size, 1, file);
}
fflush(file);
return ret;
}

AV_PIX_FMT_YUVJ422P to jpeg conversion

i am able to convert image from AV_PIX_FMT_YUVJ422P to jpeg format (below Code) but the resultant image having green shade on complete bottom half plz suggest where i am doing wrong.
Following step i have taken
Initially i have AV_PIX_FMT_UYVY422 image from camera, i have convert it in AV_PIX_FMT_YUVJ422P format and able to see this image on http://rawpixels.net/ the parameters shown by website is size 2448X2050, Bpp1= 8,Bpp2 = 8 and Bpp3 = 8,alignment 1, SubSampling H =2, and SubSampling V = 1, format: YUV422P
so input image is Correct AV_PIX_FMT_YUVJ422P format. & also able to see on "YUV image viewer Software" using YUV422 format.
Now i am trying to convert it in jpeg format using below Code and attached is the resultant Image having green shade on complete bottom half.
AVFormatContext* pFormatCtx;
AVOutputFormat* fmt;
AVStream* video_st;
AVCodecContext* pCodecCtx;
AVCodec* pCodec;
uint8_t* picture_buf;
AVFrame* picture;
AVPacket pkt;
int y_size;
int size;
int got_picture=0;
int ret=0;
int main( int argc, char* argv[] )
{
FILE *in_file = NULL;
unsigned int in_width = 2448;
unsigned int in_height = 2050;
const char* out_file = "encoded_pic.jpg";
in_file = fopen("c:\\test_Planar.yuv","rb");
if(in_file == NULL) { printf("\n\tFile Opening error...!!"); exit(1); }
else printf("\n\tYUV File Open Sucessfully...!!\n\n");
av_register_all(); // Loads the whole database of available codecs and formats.
pFormatCtx = avformat_alloc_context();
fmt = NULL;
fmt = av_guess_format("mjpeg",NULL,NULL);
pFormatCtx->oformat = fmt;
//------Output URL-------------------------
if (avio_open(&pFormatCtx->pb,out_file, AVIO_FLAG_READ_WRITE) < 0)
{
printf("Couldn't open output file.");
return -1;
}
video_st = avformat_new_stream(pFormatCtx, 0);
if (video_st==NULL) return -1;
pCodecCtx = video_st->codec;
pCodecCtx->codec_id = fmt->video_codec;
pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
pCodecCtx->pix_fmt = AV_PIX_FMT_YUVJ422P;
//--------------------------MY SOURCE PIXEL FORMAT--------------
pCodecCtx->width = in_width;
pCodecCtx->height = in_height;
pCodecCtx->time_base.num = 1;
pCodecCtx->time_base.den = 1;//25;
//Output some information
av_dump_format(pFormatCtx, 0, out_file, 1);
// Determine if desired video encoder is installed
pCodec = avcodec_find_encoder(pCodecCtx->codec_id);
if (!pCodec)
{
printf("Codec not found.");
return -1;
}
printf("\nCodec Identified done\n");
if (avcodec_open2(pCodecCtx, pCodec,NULL) < 0){
printf("Could not open codec.\n");
return -1;
}
picture = av_frame_alloc();
size = avpicture_get_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
picture_buf = (uint8_t *)av_malloc(size);
if (!picture_buf) return -1;
avpicture_fill((AVPicture *)picture, picture_buf, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
printf("\t\nWrite Header..");
avformat_write_header(pFormatCtx,NULL);
y_size = pCodecCtx->width * pCodecCtx->height;
av_new_packet(&pkt,y_size*3);
//Read YUV
if (fread(picture_buf, 1, y_size*3/2, in_file) <=0)
{
printf("Could not read input file.");
return -1;
}
//--------------------------------------------input image format UYVY
picture->data[0] = picture_buf; // Y
picture->data[1] = picture_buf+ y_size; // U
picture->data[2] = picture_buf+ y_size*5/4; // V
//-----------------------------------------------
printf("\t\n Encode the image..\n");
ret = avcodec_encode_video2(pCodecCtx, &pkt,picture, &got_picture);
if(ret < 0)
{
printf("Encode Error.\n");
return -1;
}
if (got_picture==1)
{
pkt.stream_index = video_st->index;
ret = av_write_frame(pFormatCtx, &pkt);
}
av_free_packet(&pkt);
//Write Trailer
av_write_trailer(pFormatCtx);
printf("Encode Successful.\n");
if (video_st)
{
avcodec_close(video_st->codec);
av_free(picture);
av_free(picture_buf);
}
avio_close(pFormatCtx->pb);
avformat_free_context(pFormatCtx);
fclose(in_file);
printf("\n\tYUV File Close Sucessfully...!!");
}
Resultant output jpeg encoded image from yuvj422p image having green shade
Changing the input Pixel format from AV_PIX_FMT_YUVJ422P to AV_PIX_FMT_YUVJ420P resolve the issue.

Resources