I'm using fffmpeg to do some video work,and now I get some troulbe.
I don't know how to get the progress of transcode.
I check ffmpeg.c and found that most time cost is 'transcode',
here is the source code of ffmpeg.c#transcode:
static int transcode(void)
{
XLOGD("==========transcode==========");
...
XLOGD("start transcode");
while (!received_sigterm) {
int64_t cur_time= av_gettime_relative();
/* if 'q' pressed, exits */
if (stdin_interaction)
if (check_keyboard_interaction(cur_time) < 0)
break;
/* check if there's any stream where output is still needed */
if (!need_output()) {
av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write
to, finishing.\n");
break;
}
ret = transcode_step();
if (ret < 0 && ret != AVERROR_EOF) {
av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n",
av_err2str(ret));
break;
}
/* dump report by using the output first video and audio streams */
print_report(0, timer_start, cur_time);
}
return ret;
}
I called ffmpeg like this:
int execute(int argc, char **argv)
{
if CONFIG_AVDEVICE
/* parse options and open all input/output files */
ret = ffmpeg_parse_options(argc, argv);
if (ret < 0){
return exit_program(1);
}
if (nb_output_files <= 0 && nb_input_files == 0) {
show_usage();
av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
return exit_program(1);
}
/* file converter / grab */
if (nb_output_files <= 0) {
av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
return exit_program(1);
}
if (nb_input_files == 0) {
av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
return exit_program(1);
}
for (i = 0; i < nb_output_files; i++) {
if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
want_sdp = 0;
}
current_time = ti = getutime();
if (transcode() < 0){
return exit_program(1);
}
return main_return_code;
}
Any ideas anyone?
Many thanks in advance.
#
many thanks, now I figure it out.
in ffmpeg.c, function
print_report(int is_last_report, int64_t timer_start, int64_t cur_time),
there I got some code block:
secs = FFABS(pts) / AV_TIME_BASE;
us = FFABS(pts) % AV_TIME_BASE;
mins = secs / 60;
secs %= 60;
hours = mins / 60;
mins %= 60;
from this I can know the duration that had been transcode.
First get duration of the movie :
int64_t duration = output_files[0]->ctx->duration;
Second in while loop :
You can calculate the percent with this formule :
int percent = (int)(((double)(cur_time - timer_start) / (double)(duration )) * 100);
Related
We need to read ADC module over I2C continuously 20 times to get stable values of ADC.
We have created a task for it, but code stop working in couple of min showing below error.
E (1925655) task_wdt: Task watchdog got triggered. The following tasks did not r
eset the watchdog in time:
E (1925655) task_wdt: - IDLE (CPU 0)
E (1925655) task_wdt: Tasks currently running:
E (1925655) task_wdt: CPU 0: esp_timer
We are not getting any exact solution for our error. Below is the task espfor reference.
void Hal_Read_Average_Voltage_For_TLA202x (void *pvParameters)
{
float read_value = 0, bigger_value = 0, voltage = 0;
while(1)
{
Hal_TLA2024_Re_Initialize ();
{
for (int count1 = 0; count1 < 20; count1++)
{
voltage = readVoltage (); //Performs I2C register read
if (voltage > bigger_value)
{
bigger_value = voltage;
}
vTaskDelay (1);
}
read_value += bigger_value;
bigger_value = 0;
}
ESP_LOGE(TAG, "ADC Highest Value = %f\n", (read_value));
read_value = 0;
bigger_value = 0;
voltage = 0;
vTaskDelay (300 / portTICK_PERIOD_MS);
}
}
float readVoltage (void)
{
int16_t raw_voltage;
uint16_t u16TempRead = 0;
uint8_t u8TempRead[2] = { 0, 0 };
uint8_t u8TempAddress = TLA202x_DATA_REG;
Hal_I2C_Read_Register (TLA202x_I2CADDR_DEFAULT, u8TempAddress, u8TempRead, 2, 1);
u16TempRead = u8TempRead[1] | (u8TempRead[0] << 8);
raw_voltage = u16TempRead;
// this will read the sign bit correctly, but shifting will move the bit out
// of the msbit
if (raw_voltage & 0x8000)
{
raw_voltage >>= 4;
raw_voltage |= 0x8000;
}
else
{
raw_voltage >>= 4;
}
switch (current_range)
{
case TLA202x_RANGE_6_144_V:
voltage = raw_voltage *= 3;
break;
case TLA202x_RANGE_4_096_V:
voltage = raw_voltage *= 2;
break;
case TLA202x_RANGE_2_048_V:
voltage = raw_voltage *= 1;
break;
case TLA202x_RANGE_1_024_V:
voltage = raw_voltage *= 0.5;
break;
case TLA202x_RANGE_0_512_V:
voltage = raw_voltage *= 0.25;
break;
case TLA202x_RANGE_0_256_V:
voltage = raw_voltage *= 0.125;
break;
}
voltage /= 1000.0; // mV =>V
return voltage;
}
void Hal_I2C_Read_Register (uint32_t slave_address, int register_address, uint8_t read_data_buffer[],
uint8_t read_buffer_length, uint8_t write_buffer_length)
{
i2c_cmd_handle_t cmd;
DATA_READ: cmd = i2c_cmd_link_create ();
i2c_master_start (cmd);
if (register_address != -1)
{
i2c_master_write_byte (cmd, slave_address << 1 | I2C_MASTER_WRITE,
ACK_CHECK_EN);
i2c_master_write_byte (cmd, register_address, ACK_CHECK_EN);
i2c_master_start (cmd);
}
i2c_master_write_byte (cmd, slave_address << 1 | I2C_MASTER_READ,
ACK_CHECK_EN);
if (read_buffer_length > 1)
{
i2c_master_read (cmd, read_data_buffer, read_buffer_length - 1, ACK_VAL);
}
i2c_master_read_byte (cmd, read_data_buffer + read_buffer_length - 1,
NACK_VAL);
i2c_master_stop (cmd);
esp_err_t ret = i2c_master_cmd_begin (I2C_NUM_0, cmd, 1000/ portTICK_RATE_MS);
i2c_cmd_link_delete (cmd);
if (ret == ESP_OK)
{
}
else if (ret == ESP_ERR_TIMEOUT)
{
// ESP_LOGW(TAG, "Bus is busy");
}
else
{
ESP_LOGW(TAG, "Read failed %d", ret);
goto DATA_READ;
}
}
We are having 2 more thread running on same priority.
If we remove the for loop in the above thread then there is no WDT error.
Updated ESP-IDF version to 4.4 and it solved this issue.
Considering the following snippet: ( from https://ffmpeg.org/doxygen/trunk/encode_audio_8c-example.html )
for (i = 0; i < 200; i++) {
/* make sure the frame is writable -- makes a copy if the encoder
* kept a reference internally */
ret = av_frame_make_writable(frame);
if (ret < 0)
exit(1);
samples = (uint16_t*)frame->data[0];
for (j = 0; j < c->frame_size; j++) {
samples[2*j] = (int)(sin(t) * 10000);
for (k = 1; k < c->channels; k++)
samples[2*j + k] = samples[2*j];
t += tincr;
}
encode(c, frame, pkt, f);
}
If I understand the example correctly, the generated audio stream consists exactly of 200 frames of size c->frame_size which are encoded and saved to disk.
However, if I want to encode a generic stream of data of size soundsize, I will have a certain number of frames of fixed size c->frame_size, i.e. size_t nframes = soundsize / c->frame_size; plus one last frame of size: size_t rem_lastframe = soundsize % c->frame_size;
Can you explain me how to process this last frame? The frame_size seems to be fixed and chosen by the codec.
This is what ffmpeg does.
if (src->nb_samples < avctx->frame_size) {
ret = pad_last_frame(avctx, dst, src);
...
You can use apad filter or mimic what libavcodec does
/**
* Pad last frame with silence.
*/
static int pad_last_frame(AVCodecContext *s, AVFrame *frame, const AVFrame *src)
{
int ret;
frame->format = src->format;
frame->channel_layout = src->channel_layout;
frame->channels = src->channels;
frame->nb_samples = s->frame_size;
ret = av_frame_get_buffer(frame, 0);
if (ret < 0)
goto fail;
ret = av_frame_copy_props(frame, src);
if (ret < 0)
goto fail;
if ((ret = av_samples_copy(frame->extended_data, src->extended_data, 0, 0,
src->nb_samples, s->channels, s->sample_fmt)) < 0)
goto fail;
if ((ret = av_samples_set_silence(frame->extended_data, src->nb_samples,
frame->nb_samples - src->nb_samples,
s->channels, s->sample_fmt)) < 0)
goto fail;
return 0;
fail:
av_frame_unref(frame);
return ret;
}
Opening a HLS stream using avformat_open_input retrieves data from all streams and I would like to only retrieve data from some of them. Is that possible?
Consider the following MWE:
#include <libavformat/avformat.h>
int main(int argc, char **argv)
{
AVFormatContext *inFmtCtx = NULL;
AVPacket packet;
const char *inUrl;
int ret;
if (argc < 2) { return -1; }
inUrl = argv[1];
if ((ret = avformat_open_input(&inFmtCtx, inUrl, NULL, NULL)) < 0)
goto end;
if ((ret = avformat_find_stream_info(inFmtCtx, NULL)) < 0)
goto end;
while (1) {
ret = av_read_frame(inFmtCtx, &packet);
if (ret < 0) break;
// # Placeholder: Do Something # //
printf("%i, ", packet.stream_index);
av_packet_unref(&packet);
}
end:
avformat_close_input(&inFmtCtx);
if (ret < 0 && ret != AVERROR_EOF) {
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
return 1;
}
return 0;
}
Using the example HLS url "http://mcdn.daserste.de/daserste/de/master.m3u8" (might be geolocked), the printf returns values between 0 and 9, indicating that all 10 streams (5 video, 5 audio) are retrieved.
Of course, one could discard all but the selected ones, after they have been read, e.g. using
if(packet.stream_index != selectedVideoStreamId && packet.stream_index != selectedAudioStreamId) {
av_packet_unref(&packet);
continue;
}
But can the input context / ffmpeg be configured to only retrieve the selected streams, i.e. not downloading all the data that is not needed (the unselected streams)?
You can disable a HLS variant by discarding all streams that belong to it:
if ((ret = avformat_open_input(&inFmtCtx, inUrl, NULL, NULL)) < 0)
goto end;
// disable all but the last stream
for (i = 0; i < inFmtCtx->nb_streams - 1; ++i) {
AVStream *st = inFmtCtx->streams[i];
st->discard = AVDISCARD_ALL;
}
if ((ret = avformat_find_stream_info(inFmtCtx, NULL)) < 0)
goto end;
Reading your stream for a few seconds yields:
stream=0 pkt_count=0
stream=1 pkt_count=0
stream=2 pkt_count=0
stream=3 pkt_count=0
stream=4 pkt_count=0
stream=5 pkt_count=0
stream=6 pkt_count=0
stream=7 pkt_count=0
stream=8 pkt_count=998
stream=9 pkt_count=937
As you can see it reads two streams corresponding to the multiplexed audio/video streams in the last playlist, even if a single stream was enabled. If you need better granularity than that you'll have to modify the HLS demuxer.
EDIT: I can't get some of the indents to work correctly, but the code is complete and blocked correctly. Sorry.
For a class assignment I've had to implement part of a simple UNIX shell. It must support redirection, piping, and backgrounding. I was provided with a parser that populates a struct called Command_line (I'll include the struct prototype below). My job is to write a function that processes these Command_lines (handles redirection, backgrounding, piping, and executes programs).
I've almost got it working but for some reason it doesn't properly handle commands of the form program1 | program2 - file. For example, cat < file1.in | cat - file2.in. The problem doesn't seem to be in the redirection as I've written test programs to put in front of the pipe that do not require redirection but still cause the same problem. The pipelining does work in most cases; it's just these programs with "-" as an argument that cause problems.
When I run one of these problematic command lines, the output from the first program is printed and the process hangs up (I have to manually suspend and kill it). It does not give the user a prompt afterwards or react to input (aside from ctrl + z which I use to suspend the process).
Any advice on how to get this working would be much appreciated.
Here's the struct:
/* This is the structure that holds the information about a parsed
* command line. The argvs array is an array of string vectors; in
* other words, for some int i, argvs[i] is an array of strings.
* You should be able to use argvs[i] in calls to one of the execv*()
* functions.
*/
typedef struct {
char *argvs[MAX_PROGS + 1][MAX_ARGS + 1];
int num_progs; /* Number of argument vectors; if > 1, piping is requested */
char *infile; /* Name of stdin redirect file; NULL if no redirection */
char *outfile; /* Name of stdout redirect file; NULL if no redirection */
int append; /* Is output redirection appending? */
int bg; /* Put command into background? */
} Command_line;
And my code, that processes one of these structs (I've left out the #includes).
pid_t runproc(int fd[][2], int num, Command_line *cmd);
void execute_command_line(Command_line *cmd) {
int n;
int temp_pipe[2];
int fd[MAX_PROGS-1][2];
pid_t pids[MAX_PROGS];
/* Clears pipes (sets all values to -1*/
for(n = 0; n < cmd->num_progs; n++){
fd[n][0] = -1;
fd[n][1] = -1;
}
/*Uses temp_pipe to connect write end of nth pipe to read end of (n+1)th
pipe*/
for(n = 0; n < cmd->num_progs - 1; n++){
pipe(temp_pipe);
fd[n][1] = temp_pipe[1];
fd[n+1][0] = temp_pipe[0];
}
/*If input file redirection is occuring, redirects read end of first pipe to
file*/
if(cmd->infile){
fd[0][0] = open(cmd->infile, O_RDONLY);
if(fd[0][0] < 0){
printf("Error executing command\n");
exit(1);
}
}
/*If output file redirection is occurring, redirects write end of last pipe to
file. Sets append option according to append field of command*/
if(cmd->outfile){
if(cmd->append){
fd[cmd->num_progs - 1][1] = open(cmd->outfile, O_APPEND | O_WRONLY);
if(fd[cmd->num_progs - 1][1] < 0){
printf("Error executing command\n");
exit(1);
}
}else{
fd[cmd->num_progs - 1][1] = open(cmd->outfile, O_WRONLY);
if(fd[cmd->num_progs - 1][1] < 0){
printf("Error executing command\n");
exit(1);
}
}
}
/*Runs runproc for every program in pipe, stores return values (pids of
children) in array*/
for(n = 0; n < cmd->num_progs; n++){
pids[n] = runproc(fd, n, cmd);
}
/*Closes all pipes*/
for(n = 0; n < cmd->num_progs; n++){
if(fd[n][0] >= 0) close(fd[n][0]);
if(fd[n][1] >= 0) close(fd[n][1]);
}
/*Waits for all children*/
for(n = 0; n < cmd->num_progs; n++){
wait(NULL);
}
}
pid_t runproc(int fd[][2], int num, Command_line *cmd){
pid_t pid;
int n;
int frk_chk;
pid = fork();
if(pid < 0){
printf("Error executing command\n");
exit(1);
}else if (!pid){ /*Child code*/
/*Redirects stdin/stdout of process to read/write end of corresponding
pipe*/
if(fd[num][0] >= 0) dup2(fd[num][0], STDIN_FILENO);
if(fd[num][1] >= 0) dup2(fd[num][1], STDOUT_FILENO);
/*Closes pipe ends*/
for(n=0; n < cmd->num_progs - 1; n++){
if(fd[num][0] >= 0) close(fd[num][0]);
if(fd[num][1] >= 0) close(fd[num][1]);
}
/*If backgrounding: forks, parent exits, child executes program.
If not backgrounding: program just executes*/
if(cmd->bg){
if((frk_chk = fork()) < 0){
printf("Error executing command\n");
exit(1);
}else if(frk_chk){
exit(0);
}else{
if(!(cmd->infile) && num == 0) close(STDIN_FILENO);
execvp(cmd->argvs[num][0], cmd->argvs[num]);
}
}else{
if(!num){
dup2(fd[0][1], STDOUT_FILENO);
}
execvp(cmd->argvs[num][0], cmd->argvs[num]);
}
printf("Error executing command\n");
exit(1);
}else{ /*Parent code*/
/*Returns pid of child, used for reaping loop*/
return pid;
}
}
Within the run_proc(), in the /*close pipe ends*/ loop,
it should be
for(n=0; n < cmd->num_progs - 1; n++)
{
if(fd[n][0] >= 0) close(fd[n][0]);
if(fd[n][1] >= 0) close(fd[n][1]);
}
I want to write an encoder with ffmpeg which can put iFrames (keyframes) at positions I want. Where can I found tutorials or reference material for it?
P.S
Is it possible to do this with mencoder or any opensource encoder. I want to encode H263 file. I am writing under & for linux.
You'll need to look at the libavcodec documentation - specifically, at avcodec_encode_video(). I found that the best available documentation is in the ffmpeg header files and the API sample source code that's provided with the ffmpeg source. Specifically, look at libavcodec/api-example.c or even ffmpeg.c.
To force an I frame, you'll need to set the pict_type member of the picture you're encoding to 1: 1 is an I frame, 2 is a P frame, and I don't remember what's the code for a B frame off the top of my head... Also, the key_frame member needs to be set to 1.
Some introductory material is available here and here, but I don't really know how good it is.
You'll need to be careful how you allocate the frame objects that the API calls require. api-example.c is your best bet as far as that goes, in my opinion. Look for the function video_encode_example() - it's concise and illustrates all the important things you need to worry about - pay special attention to the second call to avcodec_encode_video() that passes a NULL picture argument - it's required to get the last frames of video since MPEG video is encoded out of sequence and you may end up with a delay of a few frames.
An up-to-date version of api-example.c can be found at http://ffmpeg.org/doxygen/trunk/doc_2examples_2decoding_encoding_8c-example.html
It does the entire video encoding in a single and relatively short function. So this is probably a good place to start. Compile and run it. And then start modifying it until it does what you want.
It also has audio encoding and audio & video decoding examples.
GStreamer has decent documentation, has bindings for a number of languages (although the native API is C), and supports any video format you can find plugins for, including H.263 via gstreamer-ffmpeg.
you will need libavcodec library, For the first step I think you can learn about its use in ffplay.c file inside ffmpeg source code. It would tell you a lot. You can check my project also about video at rtstegvideo.sourceforge.net.
Hope this help.
If you're Java programmer then use Xuggler.
Minimal runnable example on FFmpeg 2.7
Based on Ori Pessach's answer, below is a minimal example that generates frames of form.
I
P
B
P
...
The key parts of the code that control frame type are:
c = avcodec_alloc_context3(codec);
/* Minimal distance of I-frames. This is the maximum value allowed,
or else we get a warning at runtime. */
c->keyint_min = 600;
/* Or else it defaults to 0 b-frames are not allowed. */
c->max_b_frames = 1;
and:
frame->key_frame = 0;
switch (frame->pts % 4) {
case 0:
frame->key_frame = 1;
frame->pict_type = AV_PICTURE_TYPE_I;
break;
case 1:
case 3:
frame->pict_type = AV_PICTURE_TYPE_P;
break;
case 2:
frame->pict_type = AV_PICTURE_TYPE_B;
break;
}
We can then verify the frame type with:
ffprobe -select_streams v \
-show_frames \
-show_entries frame=pict_type \
-of csv \
tmp.h264
as mentioned at: https://superuser.com/questions/885452/extracting-the-index-of-key-frames-from-a-video-using-ffmpeg
Some rules were enforced by FFmpeg even if I try to overcome them:
the first frame is an I-frame
cannot place a B0frame before an I-frame (TODO why?)
Preview of generated output.
#include <libavcodec/avcodec.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
static AVCodecContext *c = NULL;
static AVFrame *frame;
static AVPacket pkt;
static FILE *file;
struct SwsContext *sws_context = NULL;
/*
Convert RGB24 array to YUV. Save directly to the `frame`,
modifying its `data` and `linesize` fields
*/
static void ffmpeg_encoder_set_frame_yuv_from_rgb(uint8_t *rgb) {
const int in_linesize[1] = { 3 * c->width };
sws_context = sws_getCachedContext(sws_context,
c->width, c->height, AV_PIX_FMT_RGB24,
c->width, c->height, AV_PIX_FMT_YUV420P,
0, 0, 0, 0);
sws_scale(sws_context, (const uint8_t * const *)&rgb, in_linesize, 0,
c->height, frame->data, frame->linesize);
}
/*
Generate 2 different images with four colored rectangles, each 25 frames long:
Image 1:
black | red
------+-----
green | blue
Image 2:
yellow | red
-------+-----
green | white
*/
uint8_t* generate_rgb(int width, int height, int pts, uint8_t *rgb) {
int x, y, cur;
rgb = realloc(rgb, 3 * sizeof(uint8_t) * height * width);
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
cur = 3 * (y * width + x);
rgb[cur + 0] = 0;
rgb[cur + 1] = 0;
rgb[cur + 2] = 0;
if ((frame->pts / 25) % 2 == 0) {
if (y < height / 2) {
if (x < width / 2) {
/* Black. */
} else {
rgb[cur + 0] = 255;
}
} else {
if (x < width / 2) {
rgb[cur + 1] = 255;
} else {
rgb[cur + 2] = 255;
}
}
} else {
if (y < height / 2) {
rgb[cur + 0] = 255;
if (x < width / 2) {
rgb[cur + 1] = 255;
} else {
rgb[cur + 2] = 255;
}
} else {
if (x < width / 2) {
rgb[cur + 1] = 255;
rgb[cur + 2] = 255;
} else {
rgb[cur + 0] = 255;
rgb[cur + 1] = 255;
rgb[cur + 2] = 255;
}
}
}
}
}
return rgb;
}
/* Allocate resources and write header data to the output file. */
void ffmpeg_encoder_start(const char *filename, int codec_id, int fps, int width, int height) {
AVCodec *codec;
int ret;
codec = avcodec_find_encoder(codec_id);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
c->bit_rate = 400000;
c->width = width;
c->height = height;
c->time_base.num = 1;
c->time_base.den = fps;
/* I, P, B frame placement parameters. */
c->gop_size = 600;
c->max_b_frames = 1;
c->keyint_min = 600;
c->pix_fmt = AV_PIX_FMT_YUV420P;
if (codec_id == AV_CODEC_ID_H264)
av_opt_set(c->priv_data, "preset", "slow", 0);
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
file = fopen(filename, "wb");
if (!file) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height, c->pix_fmt, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw picture buffer\n");
exit(1);
}
}
/*
Write trailing data to the output file
and free resources allocated by ffmpeg_encoder_start.
*/
void ffmpeg_encoder_finish(void) {
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
int got_output, ret;
do {
fflush(stdout);
ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
fwrite(pkt.data, 1, pkt.size, file);
av_packet_unref(&pkt);
}
} while (got_output);
fwrite(endcode, 1, sizeof(endcode), file);
fclose(file);
avcodec_close(c);
av_free(c);
av_freep(&frame->data[0]);
av_frame_free(&frame);
}
/*
Encode one frame from an RGB24 input and save it to the output file.
Must be called after ffmpeg_encoder_start, and ffmpeg_encoder_finish
must be called after the last call to this function.
*/
void ffmpeg_encoder_encode_frame(uint8_t *rgb) {
int ret, got_output;
ffmpeg_encoder_set_frame_yuv_from_rgb(rgb);
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
switch (frame->pts % 4) {
case 0:
frame->key_frame = 1;
frame->pict_type = AV_PICTURE_TYPE_I;
break;
case 1:
case 3:
frame->key_frame = 0;
frame->pict_type = AV_PICTURE_TYPE_P;
break;
case 2:
frame->key_frame = 0;
frame->pict_type = AV_PICTURE_TYPE_B;
break;
}
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
fwrite(pkt.data, 1, pkt.size, file);
av_packet_unref(&pkt);
}
}
/* Represents the main loop of an application which generates one frame per loop. */
static void encode_example(const char *filename, int codec_id) {
int pts;
int width = 320;
int height = 240;
uint8_t *rgb = NULL;
ffmpeg_encoder_start(filename, codec_id, 25, width, height);
for (pts = 0; pts < 100; pts++) {
frame->pts = pts;
rgb = generate_rgb(width, height, pts, rgb);
ffmpeg_encoder_encode_frame(rgb);
}
ffmpeg_encoder_finish();
}
int main(void) {
avcodec_register_all();
encode_example("tmp.h264", AV_CODEC_ID_H264);
encode_example("tmp.mpg", AV_CODEC_ID_MPEG1VIDEO);
/* TODO: is this encoded correctly? Possible to view it without container? */
/*encode_example("tmp.vp8", AV_CODEC_ID_VP8);*/
return 0;
}
Tested on Ubuntu 15.10. GitHub upstream.
Do you really want to do this?
In most cases, you are better off just controlling the global parameters of AVCodecContext.
FFmpeg does smart things like using a keyframe if the new frame is completely different from the previous one, and not much would be gained from differential encoding.
For example, if we set just:
c->keyint_min = 600;
then we get exactly 4 key-frames on the above example, which is logical since there are 4 abrupt frame changes on the generated video.