GPS file properties (FMX) - firemonkey

I can find basic properties of a file with System.IOUtils.TFile like size, date etc. But, i can't figure how to get the GPS coordinates from a JPEG (latitude and longitude) in my C++ Builder FMX app for WIN32.
I can do it with a console application based on this GDI+ example from Microsoft. I just can't figure out how to do this up at System.IOUtils.TFile level. I don't want to run a console app to get the GPS data if don't have to.

You can open the exif data on your own ... This is my ancient C++/VCL code doing so:
AnsiString exif_datetime(AnsiString file)
{
AnsiString t="";
int hnd,adr,siz;
BYTE *dat;
hnd=FileOpen(file,fmOpenRead);
if (hnd<0) return t;
siz=FileSeek(hnd,0,2);
FileSeek(hnd,0,0);
dat=new BYTE[siz];
if (dat==NULL) { FileClose(hnd); return t; }
siz=FileRead(hnd,dat,siz);
FileClose(hnd);
for (adr=0;adr<siz-4;adr++)
{
if (dat[adr+0]=='E')
if (dat[adr+1]=='x')
if (dat[adr+2]=='i')
if (dat[adr+3]=='f')
if (dat[adr+4]== 0 ) // Exif header found
{
for (;adr<siz-18;adr++)
{
int e=1;
char a; // "2008:07:17 19:19:10"
a=dat[adr+ 0]; if ((a<'0')||(a>'9')) e=0;
a=dat[adr+ 1]; if ((a<'0')||(a>'9')) e=0;
a=dat[adr+ 2]; if ((a<'0')||(a>'9')) e=0;
a=dat[adr+ 3]; if ((a<'0')||(a>'9')) e=0;
a=dat[adr+ 4]; if (a!=':') e=0;
a=dat[adr+ 5]; if ((a<'0')||(a>'9')) e=0;
a=dat[adr+ 6]; if ((a<'0')||(a>'9')) e=0;
a=dat[adr+ 7]; if (a!=':') e=0;
a=dat[adr+ 8]; if ((a<'0')||(a>'9')) e=0;
a=dat[adr+ 9]; if ((a<'0')||(a>'9')) e=0;
a=dat[adr+10]; if (a!=' ') e=0;
a=dat[adr+11]; if ((a<'0')||(a>'9')) e=0;
a=dat[adr+12]; if ((a<'0')||(a>'9')) e=0;
a=dat[adr+13]; if (a!=':') e=0;
a=dat[adr+14]; if ((a<'0')||(a>'9')) e=0;
a=dat[adr+15]; if ((a<'0')||(a>'9')) e=0;
a=dat[adr+16]; if (a!=':') e=0;
a=dat[adr+17]; if ((a<'0')||(a>'9')) e=0;
a=dat[adr+18]; if ((a<'0')||(a>'9')) e=0;
if (e)
{
for (e=0;e<19;e++) t+=char(dat[adr+e]);
break;
}
}
break;
}
}
delete[] dat;
return t;
}
It opens and loads JPG into memory, scan for EXIF structure and if found return date time from it ...
So just extract info you want instead ofthe datetime ... on how to do it see:
Exif standard version 2.31
Its the first file format specs I found (from wiki).
In case you got big images the EXIF in JPG is usually placed at the start of file so you do not need to load the whole image to memory just few first (K)Bytes ...

Related

Processes read data from the same file

I have a mesh file and I did a partitioning of it using METIS(in 4 parts/processes).METIS provided me with the partitioning file of the mesh(gave me a file with the number of process where each element of the mesh belongs to).My job now is to input these information to my parallel code.I tried to do it by letting each process to have access to the same mesh file and read the data that it wants based on partitioning file.
#include <iostream>
#include <fstream>
#include <sstream>
#include "mpi.h"
using namespace std;
//each process stores the partitioning
int* PartitionFile(){
ifstream file ("epart.txt");
int NE=14;
int part,i=0;
int *partition=new int[14];
while(file>>part){
partition[i]=part;
i++;
}
file.close();
return partition;
}
int FindSizeOfLocalElements(int *epart,int rank){
int size=0;
for (int i=0;i<14;i++){
if(epart[i]==rank){
size+=1;
}
}
return size;
}
//stores the elements of each process
int * LocalPartition(int* epart,int size,int rank){
int *localPart=new int[size];
int j=0;
for(int i=0;i<14;i++){
if (epart[i]==rank){
localPart[j]=i+1;//+1 because elements start from 1(global numbering)
j+=1;
}
}
return localPart;
}
int **ElementConnectivityMeshFile(int* localPart,int size){
ifstream file ("mesh.txt");
int node1,node2,node3;
int elem=1;
int i=0;
int **elemConn=new int*[size];
for(int j=0;j<size;j++){
elemConn[j]=new int[3];//each element has 3 nodes.Here elements has local numbering.Global numbering is stored in localPart
}
while(file>>node1>>node2>>node3){
if (elem==localPart[i]){
elemConn[i][0]=node1;
elemConn[i][1]=node2;
elemConn[i][2]=node3;
i+=1;
}
elem+=1;
if(elem>14){break;}
}
file.close();
return elemConn;
}
int main(){
MPI_Init(NULL, NULL);
int numProc;
MPI_Comm_size(MPI_COMM_WORLD, &numProc);
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
int *epart=PartitionFile();
int size=FindSizeOfLocalElements(epart,rank);
int *elem=LocalPartition(epart,size,rank);
int **elemConn=ElementConnectivityMeshFile(elem,size);
MPI_Finalize();
return 0;
}
This part of code gives me the desired results,however I want to know how efficient is letting MPI processes read the same file,by using c++ standard functions, and if that can affect scalability and performance.For this demostration i used a mesh of 14 elements and 4 processes.
mesh file
1 3 2
2 3 4
3 5 4
4 5 6
5 7 6
8 7 5
3 8 5
9 7 8
9 8 3
1 9 3
10 9 1
11 10 1
11 1 12
12 1 2
epart file
2
2
0
0
0
1
0
1
1
3
3
3
2
2
I think this program illustrates the basic approach using MPI-IO with binary files:
#include <stdio.h>
#include <mpi.h>
#define NELEM 14
#define NVERT 3
int meshfile[NELEM][NVERT] =
{ { 1, 3, 2},
{ 2, 3, 4},
{ 3, 5, 4},
{ 4, 5, 6},
{ 5, 7, 6},
{ 8, 7, 5},
{ 3, 8, 5},
{ 9, 7, 8},
{ 9, 8, 3},
{ 1, 9, 3},
{10, 9, 1},
{11, 10, 1},
{11, 1, 12},
{12, 1, 2}, };
int partfile[NELEM] = {2, 2, 0, 0, 0, 1, 0, 1, 1, 3, 3, 3, 2, 2};
int main(void)
{
int i;
int part[NELEM];
int mesh[NELEM][NVERT];
/* Should really malloc smaller mesh based on local size */
FILE *fp;
int rank, size;
MPI_Comm comm;
MPI_Status status;
MPI_File fh;
MPI_Datatype filetype;
int disp[NELEM];
int nelemlocal;
/* Should really malloc smaller displ based on nelemlocal */
comm = MPI_COMM_WORLD;
MPI_Init(NULL, NULL);
MPI_Comm_size(comm, &size);
MPI_Comm_rank(comm, &rank);
if (rank == 0)
{
printf("Running on %d processes\n", size);
// data files should already exist but create them here so we
// have a self-contained program
fp = fopen("mesh.dat", "w");
fwrite(meshfile, sizeof(int), NELEM*NVERT, fp);
fclose(fp);
fp = fopen("part.dat", "w");
fwrite(partfile, sizeof(int), NELEM, fp);
fclose(fp);
}
// could read on rank 0 and broadcast, but using MPI-IO then
// "readall" should take an efficient collective approach
// every rank read the whole partition file
MPI_File_open(comm, "part.dat", MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
MPI_File_set_view(fh, 0, MPI_INT, MPI_INT, "native", MPI_INFO_NULL);
MPI_File_read_all(fh, part, NELEM, MPI_INT, &status);
MPI_File_close(&fh);
nelemlocal = 0;
// pick out local elements and record displacements
for (i=0; i < NELEM; i++)
{
if (part[i] == rank)
{
disp[nelemlocal] = i*NVERT;
nelemlocal += 1;
}
}
printf("on rank %d, nelemlocal = %d\n", rank, nelemlocal);
// create the MPI datatype to use as the filetype, which is
// effectively a mask that selects only the elements for this rank
MPI_Type_create_indexed_block(nelemlocal, NVERT, disp, MPI_INT, &filetype);
MPI_Type_commit(&filetype);
MPI_File_open(comm, "mesh.dat", MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
// set the file view appropriate to this rank
MPI_File_set_view(fh, 0, MPI_INT, filetype, "native", MPI_INFO_NULL);
// each rank only reads its own set of elements based on file view
MPI_File_read_all(fh, mesh, nelemlocal*NVERT, MPI_INT, &status);
MPI_File_close(&fh);
// check we got the correct data
for (i=0; i < nelemlocal; i++)
{
printf("on rank %d, mesh[%d] = %d, %d, %d\n",
rank, i, mesh[i][0], mesh[i][1], mesh[i][2]);
}
MPI_Finalize();
}
and it seems to give the right answer;
dsh#laptop$ mpicc -o metisio metisio.c
dsh#laptop$ mpirun -n 4 ./metisio | sort
on rank 0, mesh[0] = 3, 5, 4
on rank 0, mesh[1] = 4, 5, 6
on rank 0, mesh[2] = 5, 7, 6
on rank 0, mesh[3] = 3, 8, 5
on rank 0, nelemlocal = 4
on rank 1, mesh[0] = 8, 7, 5
on rank 1, mesh[1] = 9, 7, 8
on rank 1, mesh[2] = 9, 8, 3
on rank 1, nelemlocal = 3
on rank 2, mesh[0] = 1, 3, 2
on rank 2, mesh[1] = 2, 3, 4
on rank 2, mesh[2] = 11, 1, 12
on rank 2, mesh[3] = 12, 1, 2
on rank 2, nelemlocal = 4
on rank 3, mesh[0] = 1, 9, 3
on rank 3, mesh[1] = 10, 9, 1
on rank 3, mesh[2] = 11, 10, 1
on rank 3, nelemlocal = 3
Running on 4 processes

Why atomic operation only works within a thread group in HLSL compute shader?

I have two buffers, src and target. src are random values with duplicates (maybe something like {1,3,1,3,2,3,...}). target is initially all-zero.
What I want to do is like (in c++ syntax):
for (int i : src) {
target[i]++;
}
But I want to do it in HLSL compute shaders. Due to the duplicates in src, there may be data races. So I implemented AtomicAdd operation:
#define AtomicAdd(BUFFER, IDX, VALUE) \
keepWaiting = true;\
do {\
uint ov;\
InterlockedCompareExchange(BUFFER##_Mutex[IDX], 0, 1, ov);\
if (ov == 0) {\
BUFFER[IDX] += VALUE;\
BUFFER##_Mutex[IDX] = 0;\
keepWaiting = false;\
}\
} while (keepWaiting)
In the compute shader with only one group like:
// called by group number (1, 1, 1)
[numthreads(512, 1, 1)]
void kernel(uint3 id : SV_DispatchThreadID) {
for (uint i = 0; i <= SrcSize / 512; i++) {
uint idx = i * 512 + id.x;
if (idx >= SrcSize) {
return;
}
bool keepWaiting;
uint srcValue = src[idx];
AtomicAdd(Target, srcValue, 1);
}
}
It works well (at least on my device). But In the compute shader with many groups like:
// called by group numbers (X, 1, 1), X is a large number and X * 512 > SrcSize.
[numthreads(512, 1, 1)]
void kernel(uint3 id : SV_DispatchThreadID) {
if (id.x >= SrcSize) {
return;
}
bool keepWaiting;
uint srcValue = src[idx];
AtomicAdd(Target, srcValue, 1);
}
It does not produce correct results.
So my question is: why this happens? What is the difference between these two kernels?

Replace pointer to pointer by initializer_list

#include <initializer_list>
#include <iostream>
#include <vector>
//this api is anti intuition
void original(int const **data)
{
for(size_t i = 0; i != 3; ++i){
int const *ptr = *data;
//std::cout<<*ptr++<<", "<<*ptr<<std::endl; //this line may cause undefined behavior
std::cout<<ptr[0]<<", "<<ptr[1]<<std::endl;
++data;
}
}
//my eyes prefer this api than original like api
void replace_original(std::initializer_list<std::initializer_list<int>> list)
{
std::vector<int const*> results(list.size());
for(auto data : list){
results.push_back(std::begin(data)); //#1
}
original(&results[0]);
}
int main()
{
int first[] = {0, 1};
int second[] = {2, 3};
int third[] = {4, 5};
int const *array[] = {first, second, third};
original(array);
replace_original({ {0, 1}, {2, 3}, {4, 5} });
return 0;
}
The results are
0, 1
2, 3
4, 5
expected results are
0, 1
2, 3
4, 5
0, 1
2, 3
4, 5
I want to encapsulate the api of original(old, c style api) by the api like replace_original
But can't figure out why #1 can't work.
Ah, stupid mistake, I should change the loop to
size_t const size = list.size();
std::vector<int const*> results(size);
for(size_t i = 0; i != size; ++i){
results[i] = std::begin( *(std::begin(list) + i) );
}
Do you have a better solution to encapsulate this kind of api?
After google, I find out that in c++14, size() of initializer_list will
become constexpr so we should be able to use std::array to replace std::vector

how to create a new QImage from an array of floats

I have an array of floats that represents an Image.(column first).
I want to show the image on a QGraphicsSecene as a QPixmap. In order to do that I tried to create anew image from my array with the QImage constructor - QImage ( const uchar * data, int width, int height, Format format ).
I first created a new unsigned char and casted every value from my original array to new unsigned char one, and then tried to create a new image with the following code:
unsigned char * data = new unsigned char[fres.length()];
for (int i =0; i < fres.length();i++)
data[i] = char(fres.dataPtr()[i]);
bcg = new QImage(data,fres.cols(),fres.rows(),1,QImage::Format_Mono);
The problem is when I try to access the information in the following way:
bcg->pixel(i,j);
I get only the value 12345.
How can I create a viewable image from my array.
Thanks
There are two problems here.
One, casting a float to a char simply rounds the float, so 0.3 may be rounded to 0 and 0.9 may be rounded to 1. For a range of 0..1, the char will only contain 0 or 1.
To give the char the full range, use a multiply:
data[i] = (unsigned char)(fres.dataPtr()[i] * 255);
(Also, your cast was incorrect.)
The other problem is that your QImage::Format is incorrect; Format_Mono expects 1BPP bitpacked data, not 8BPP as you're expecting. There are two ways to fix this issue:
// Build a colour table of grayscale
QByteArray data(fres.length());
for (int i = 0; i < fres.length(); ++i) {
data[i] = (unsigned char)(fres.dataPtr()[i] * 255);
}
QVector<QRgb> grayscale;
for (int i = 0; i < 256; ++i) {
grayscale.append(qRgb(i, i, i));
}
QImage image(data.constData(), fres.cols(), fres.rows(), QImage::Format_Index8);
image.setColorTable(grayscale);
// Use RGBA directly
QByteArray data(fres.length() * 4);
for (int i = 0, j = 0; i < fres.length(); ++i, j += 4) {
data[j] = data[j + 1] = data[j + 2] = // R, G, B
(unsigned char)(fres.dataPtr()[i] * 255);
data[j + 4] = ~0; // Alpha
}
QImage image(data.constData(), fres.cols(), fres.rows(), QImage::Format_ARGB32_Premultiplied);

How to write a video encoder with ffmpeg that explicitly controls the position of keyframes?

I want to write an encoder with ffmpeg which can put iFrames (keyframes) at positions I want. Where can I found tutorials or reference material for it?
P.S
Is it possible to do this with mencoder or any opensource encoder. I want to encode H263 file. I am writing under & for linux.
You'll need to look at the libavcodec documentation - specifically, at avcodec_encode_video(). I found that the best available documentation is in the ffmpeg header files and the API sample source code that's provided with the ffmpeg source. Specifically, look at libavcodec/api-example.c or even ffmpeg.c.
To force an I frame, you'll need to set the pict_type member of the picture you're encoding to 1: 1 is an I frame, 2 is a P frame, and I don't remember what's the code for a B frame off the top of my head... Also, the key_frame member needs to be set to 1.
Some introductory material is available here and here, but I don't really know how good it is.
You'll need to be careful how you allocate the frame objects that the API calls require. api-example.c is your best bet as far as that goes, in my opinion. Look for the function video_encode_example() - it's concise and illustrates all the important things you need to worry about - pay special attention to the second call to avcodec_encode_video() that passes a NULL picture argument - it's required to get the last frames of video since MPEG video is encoded out of sequence and you may end up with a delay of a few frames.
An up-to-date version of api-example.c can be found at http://ffmpeg.org/doxygen/trunk/doc_2examples_2decoding_encoding_8c-example.html
It does the entire video encoding in a single and relatively short function. So this is probably a good place to start. Compile and run it. And then start modifying it until it does what you want.
It also has audio encoding and audio & video decoding examples.
GStreamer has decent documentation, has bindings for a number of languages (although the native API is C), and supports any video format you can find plugins for, including H.263 via gstreamer-ffmpeg.
you will need libavcodec library, For the first step I think you can learn about its use in ffplay.c file inside ffmpeg source code. It would tell you a lot. You can check my project also about video at rtstegvideo.sourceforge.net.
Hope this help.
If you're Java programmer then use Xuggler.
Minimal runnable example on FFmpeg 2.7
Based on Ori Pessach's answer, below is a minimal example that generates frames of form.
I
P
B
P
...
The key parts of the code that control frame type are:
c = avcodec_alloc_context3(codec);
/* Minimal distance of I-frames. This is the maximum value allowed,
or else we get a warning at runtime. */
c->keyint_min = 600;
/* Or else it defaults to 0 b-frames are not allowed. */
c->max_b_frames = 1;
and:
frame->key_frame = 0;
switch (frame->pts % 4) {
case 0:
frame->key_frame = 1;
frame->pict_type = AV_PICTURE_TYPE_I;
break;
case 1:
case 3:
frame->pict_type = AV_PICTURE_TYPE_P;
break;
case 2:
frame->pict_type = AV_PICTURE_TYPE_B;
break;
}
We can then verify the frame type with:
ffprobe -select_streams v \
-show_frames \
-show_entries frame=pict_type \
-of csv \
tmp.h264
as mentioned at: https://superuser.com/questions/885452/extracting-the-index-of-key-frames-from-a-video-using-ffmpeg
Some rules were enforced by FFmpeg even if I try to overcome them:
the first frame is an I-frame
cannot place a B0frame before an I-frame (TODO why?)
Preview of generated output.
#include <libavcodec/avcodec.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
static AVCodecContext *c = NULL;
static AVFrame *frame;
static AVPacket pkt;
static FILE *file;
struct SwsContext *sws_context = NULL;
/*
Convert RGB24 array to YUV. Save directly to the `frame`,
modifying its `data` and `linesize` fields
*/
static void ffmpeg_encoder_set_frame_yuv_from_rgb(uint8_t *rgb) {
const int in_linesize[1] = { 3 * c->width };
sws_context = sws_getCachedContext(sws_context,
c->width, c->height, AV_PIX_FMT_RGB24,
c->width, c->height, AV_PIX_FMT_YUV420P,
0, 0, 0, 0);
sws_scale(sws_context, (const uint8_t * const *)&rgb, in_linesize, 0,
c->height, frame->data, frame->linesize);
}
/*
Generate 2 different images with four colored rectangles, each 25 frames long:
Image 1:
black | red
------+-----
green | blue
Image 2:
yellow | red
-------+-----
green | white
*/
uint8_t* generate_rgb(int width, int height, int pts, uint8_t *rgb) {
int x, y, cur;
rgb = realloc(rgb, 3 * sizeof(uint8_t) * height * width);
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
cur = 3 * (y * width + x);
rgb[cur + 0] = 0;
rgb[cur + 1] = 0;
rgb[cur + 2] = 0;
if ((frame->pts / 25) % 2 == 0) {
if (y < height / 2) {
if (x < width / 2) {
/* Black. */
} else {
rgb[cur + 0] = 255;
}
} else {
if (x < width / 2) {
rgb[cur + 1] = 255;
} else {
rgb[cur + 2] = 255;
}
}
} else {
if (y < height / 2) {
rgb[cur + 0] = 255;
if (x < width / 2) {
rgb[cur + 1] = 255;
} else {
rgb[cur + 2] = 255;
}
} else {
if (x < width / 2) {
rgb[cur + 1] = 255;
rgb[cur + 2] = 255;
} else {
rgb[cur + 0] = 255;
rgb[cur + 1] = 255;
rgb[cur + 2] = 255;
}
}
}
}
}
return rgb;
}
/* Allocate resources and write header data to the output file. */
void ffmpeg_encoder_start(const char *filename, int codec_id, int fps, int width, int height) {
AVCodec *codec;
int ret;
codec = avcodec_find_encoder(codec_id);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
c->bit_rate = 400000;
c->width = width;
c->height = height;
c->time_base.num = 1;
c->time_base.den = fps;
/* I, P, B frame placement parameters. */
c->gop_size = 600;
c->max_b_frames = 1;
c->keyint_min = 600;
c->pix_fmt = AV_PIX_FMT_YUV420P;
if (codec_id == AV_CODEC_ID_H264)
av_opt_set(c->priv_data, "preset", "slow", 0);
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
file = fopen(filename, "wb");
if (!file) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height, c->pix_fmt, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw picture buffer\n");
exit(1);
}
}
/*
Write trailing data to the output file
and free resources allocated by ffmpeg_encoder_start.
*/
void ffmpeg_encoder_finish(void) {
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
int got_output, ret;
do {
fflush(stdout);
ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
fwrite(pkt.data, 1, pkt.size, file);
av_packet_unref(&pkt);
}
} while (got_output);
fwrite(endcode, 1, sizeof(endcode), file);
fclose(file);
avcodec_close(c);
av_free(c);
av_freep(&frame->data[0]);
av_frame_free(&frame);
}
/*
Encode one frame from an RGB24 input and save it to the output file.
Must be called after ffmpeg_encoder_start, and ffmpeg_encoder_finish
must be called after the last call to this function.
*/
void ffmpeg_encoder_encode_frame(uint8_t *rgb) {
int ret, got_output;
ffmpeg_encoder_set_frame_yuv_from_rgb(rgb);
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
switch (frame->pts % 4) {
case 0:
frame->key_frame = 1;
frame->pict_type = AV_PICTURE_TYPE_I;
break;
case 1:
case 3:
frame->key_frame = 0;
frame->pict_type = AV_PICTURE_TYPE_P;
break;
case 2:
frame->key_frame = 0;
frame->pict_type = AV_PICTURE_TYPE_B;
break;
}
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
fwrite(pkt.data, 1, pkt.size, file);
av_packet_unref(&pkt);
}
}
/* Represents the main loop of an application which generates one frame per loop. */
static void encode_example(const char *filename, int codec_id) {
int pts;
int width = 320;
int height = 240;
uint8_t *rgb = NULL;
ffmpeg_encoder_start(filename, codec_id, 25, width, height);
for (pts = 0; pts < 100; pts++) {
frame->pts = pts;
rgb = generate_rgb(width, height, pts, rgb);
ffmpeg_encoder_encode_frame(rgb);
}
ffmpeg_encoder_finish();
}
int main(void) {
avcodec_register_all();
encode_example("tmp.h264", AV_CODEC_ID_H264);
encode_example("tmp.mpg", AV_CODEC_ID_MPEG1VIDEO);
/* TODO: is this encoded correctly? Possible to view it without container? */
/*encode_example("tmp.vp8", AV_CODEC_ID_VP8);*/
return 0;
}
Tested on Ubuntu 15.10. GitHub upstream.
Do you really want to do this?
In most cases, you are better off just controlling the global parameters of AVCodecContext.
FFmpeg does smart things like using a keyframe if the new frame is completely different from the previous one, and not much would be gained from differential encoding.
For example, if we set just:
c->keyint_min = 600;
then we get exactly 4 key-frames on the above example, which is logical since there are 4 abrupt frame changes on the generated video.

Resources