FFMPEG RGB to YUV420P : Warning: data is not aligned! This can lead to a speedloss [duplicate] - ffmpeg

This question already has answers here:
Pixel format conversion issue [FFMPEG]
(2 answers)
Closed 4 years ago.
I am trying to convert a standard RGB color space to YUV420P. I am struggling to figure out why I keep getting 'Warning: data is not aligned! This can lead to a speedloss' when executing the code. I have looked at a multitude of examples.
int ImageDecoder::rgb2yuv(uint8_t *src,
uint8_t *dest,
uint32_t width,
uint32_t height)
{
struct SwsContext *imgCtx = NULL;
AVFrame *pFrameYUV;
enum AVPixelFormat src_pix_fmt = AV_PIX_FMT_RGB24;
enum AVPixelFormat dst_pix_fmt = AV_PIX_FMT_YUV420P;
int ret;
int size;
const int RGBLinesize[1] = { 3 * (int)width };
pFrameYUV = av_frame_alloc();
pFrameYUV->width = width;
pFrameYUV->height = height;
pFrameYUV->format = dst_pix_fmt;
// Initialize pFrameYUV linesize
ret = av_image_alloc(pFrameYUV->data, pFrameYUV->linesize, pFrameYUV->width, pFrameYUV->height, AV_PIX_FMT_YUV420P, 1);
getLogger()->info("ImageDecoder:{} width={} height={} linesize[0]={} linesize[1]={} linesize[2]={}",
__func__, pFrameYUV->width, pFrameYUV->height, pFrameYUV->linesize[0], pFrameYUV->linesize[1], pFrameYUV->linesize[2]);
size = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pFrameYUV->width, pFrameYUV->height, 1);
imgCtx = sws_getCachedContext(imgCtx,
width,
height,
AV_PIX_FMT_RGB24,
pFrameYUV->width,
pFrameYUV->height,
AV_PIX_FMT_YUV420P,
SWS_BICUBIC, 0, 0, 0);
if( imgCtx == NULL)
{
getLogger()->error("ERROR: ImageDecoder: {} Cannot initialize the conversion context", __func__);
}
sws_scale(imgCtx,
(const uint8_t* const*)&src,
RGBLinesize,
0,
height,
pFrameYUV->data,
pFrameYUV->linesize);
memcpy(dest, &pFrameYUV->data[0], size);
sws_freeContext(imgCtx);
av_free(pFrameYUV);
}

I hope it helps you.
I am converting YUV444 decoded frames to RGBA format.
AVFrame* RGBFrame = av_frame_alloc();
RGBFrame->width = YUV_frame->width; RGBFrame->format = AV_PIX_FMT_RGBA;
RGBFrame->height = YUV_frame->height;
int ret = av_image_alloc(RGBFrame->data, RGBFrame->linesize, RGBFrame->width, RGBFrame->height, AV_PIX_FMT_RGBA, YUV_frame->pict_type);
if (ret < 0)
return false;
SwsContext* sws_Context = NULL;
sws_Context = sws_getCachedContext(sws_Context, YUV_frame->width, YUV_frame->height, pVideoCodecCtx->pix_fmt,
YUV_frame->width, YUV_frame->height, AV_PIX_FMT_RGBA, SWS_BILINEAR, NULL, NULL, NULL);
if (sws_Context == NULL) return false;
int result = sws_scale(sws_Context, YUV_frame->data, YUV_frame->linesize, 0, (int)YUV_frame->height, RGBFrame->data, RGBFrame->linesize);
if (result < 0) return false;
if (RGBFrame == NULL) {
av_frame_unref(RGBFrame);
return false;
}
sws_freeContext(sws_Context);

int ImageDecoder::rgb2yuv(uint8_t *src,
uint8_t *dest,
uint32_t *outBufferSize,
uint32_t width,
uint32_t height)
{
struct SwsContext *imgCtx = NULL;
uint8_t * RGBData[1] = {src};
const int RGBLinesize[1] = {3 * (int) width};
uint8_t * YUVData[] = {dest,
YUVData[0] + ((int) width * (int) height),
YUVData[1] + (((int) width * (int) height) / 4)};
const int YUVLinesize[] = {(int) width, (int) width / 2, (int) width / 2};
int size;
size = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, width, height, 1);
*outBufferSize = size;
imgCtx = sws_getCachedContext(imgCtx,
width,
height,
AV_PIX_FMT_RGB24,
width,
height,
AV_PIX_FMT_YUV420P,
SWS_BICUBIC, 0, 0, 0);
if (imgCtx == NULL)
{
getLogger()->error("ERROR: ImageDecoder: {} Cannot initialize the conversion context", __func__);
return -1;
}
sws_scale(imgCtx,
RGBData,
RGBLinesize,
0,
height,
YUVData,
YUVLinesize);
sws_freeContext(imgCtx);
return 0;
}

Related

direct3d 11 and 2D: pass coordinates of a vertex as int and not float

My purpose is to write a backend of a toolkit using only Direct3D 11 for 2D (no additional library like Direct2D, or SpriteBatch or something else).
Note that it is the first time I use Direct3D, and I'm currently learning d3D 11.
So for now, I can display a triangle or rectangle of the color I want.
The vertex structure of my C code contains 2 float for the position and 4 unsigned char for the color. In my vertex shader, the vertex structure has 2 floats for the position of the vertex, and 4 floats for the color.
I have remarked that if I use DXGI_FORMAT_R8G8B8A8_UNORM for the color in my D3D11_INPUT_ELEMENT_DESC array, then the color is interpolated automatically from the values 0 to 255 to the values 0.0f to 1.0f. It seems resonnable when I read the documentation (DXGI Format anumeration, the description of _UNORM):
"Unsigned normalized integer; which is interpreted in a resource as an unsigned integer, and is interpreted in a shader as an unsigned normalized floating-point value in the range [0, 1]. All 0's maps to 0.0f, and all 1's maps to 1.0f. A sequence of evenly spaced floating-point values from 0.0f to 1.0f are represented. For instance, a 2-bit UNORM represents 0.0f, 1/3, 2/3, and 1.0f."
Or at least that is how I interpret this doc (I may be wrong). And the color of the triangle is correct.
What I would like to do is the same for pixels: if I pass an integer for the coordinates (x between 0 and the width of the window -1, and y between 0 and the height of the window - 1), then is it interpreted as the correct signed normalized floating-point value bythe vertex shader (-1.0f to 1.0f for x, and 1.0f to -1.0f for y). I tried several values in my Vertex C struct and D3D11_INPUT_ELEMENT_DESC array, without luck. So I have 2 questions:
Is it possible ?
If it is not possible, is it faster to convert the coordinates in the C code, or in the shader code (with the viewport as a constant buffer) ? See the macros XF and YF in the code below for the conversion from int to float.
Below is my complete code that displays a simple triangle, followed with the HLSL code for vertex and pixel shader. I use the C api of Direct3D. I support Win 7 and Win 10.
Source code:
/* Windows 10 */
#define _WIN32_WINNT 0x0A00
#if defined _WIN32_WINNT && _WIN32_WINNT >= 0x0A00
# define HAVE_WIN10
#endif
#include <stdio.h>
#ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>
/* C API for d3d11 */
#define COBJMACROS
#include <guiddef.h>
#ifdef HAVE_WIN10
# include <dxgi1_3.h>
#else
# include <dxgi.h>
#endif
#include <d3d11.h>
#include "d3d11_vs.h"
#include "d3d11_ps.h"
/* comment for no debug informations */
#define _DEBUG
#ifdef _DEBUG
# define FCT \
do { printf(" * %s\n", __FUNCTION__); fflush(stdout); } while (0)
#else
# define FCT \
do { } while (0)
#endif
#define XF(w,x) ((float)(2 * (x) - (w)) / (float)(w))
#define YF(h,y) ((float)((h) - 2 * (y)) / (float)(h))
typedef struct Window Window;
typedef struct D3d D3d;
struct Window
{
HINSTANCE instance;
RECT rect;
HWND win;
D3d *d3d;
};
struct D3d
{
#ifdef HAVE_WIN10
IDXGIFactory2 *dxgi_factory;
IDXGISwapChain1 *dxgi_swapchain;
#else
IDXGIFactory *dxgi_factory;
IDXGISwapChain *dxgi_swapchain;
#endif
ID3D11Device *d3d_device;
ID3D11DeviceContext *d3d_device_ctx;
ID3D11RenderTargetView *d3d_render_target_view;
ID3D11InputLayout *d3d_input_layout;
ID3D11VertexShader *d3d_vertex_shader;
ID3D11PixelShader *d3d_pixel_shader;
D3D11_VIEWPORT viewport;
Window *win;
unsigned int vsync : 1;
};
typedef struct
{
FLOAT x;
FLOAT y;
BYTE r;
BYTE g;
BYTE b;
BYTE a;
} Vertex;
void d3d_resize(D3d *d3d, UINT width, UINT height);
void d3d_render(D3d *d3d);
/************************* Window *************************/
LRESULT CALLBACK
_window_procedure(HWND window,
UINT message,
WPARAM window_param,
LPARAM data_param)
{
switch (message)
{
case WM_CLOSE:
PostQuitMessage(0);
return 0;
case WM_KEYUP:
if (window_param == 'Q')
{
PostQuitMessage(0);
}
return 0;
case WM_ERASEBKGND:
/* no need to erase back */
return 1;
/* GDI notifications */
case WM_CREATE:
#ifdef _DEBUG
printf(" * WM_CREATE\n");
fflush(stdout);
#endif
return 0;
case WM_SIZE:
{
Window *win;
#ifdef _DEBUG
printf(" * WM_SIZE\n");
fflush(stdout);
#endif
win = (Window *)GetWindowLongPtr(window, GWLP_USERDATA);
d3d_resize(win->d3d,
(UINT)LOWORD(data_param), (UINT)HIWORD(data_param));
return 0;
}
case WM_PAINT:
{
#ifdef _DEBUG
printf(" * WM_PAINT\n");
fflush(stdout);
#endif
if (GetUpdateRect(window, NULL, FALSE))
{
PAINTSTRUCT ps;
Window *win;
BeginPaint(window, &ps);
win = (Window *)GetWindowLongPtr(window, GWLP_USERDATA);
d3d_render(win->d3d);
EndPaint(window, &ps);
}
return 0;
}
default:
return DefWindowProc(window, message, window_param, data_param);
}
}
Window *window_new(int x, int y, int w, int h)
{
WNDCLASS wc;
RECT r;
Window *win;
win = (Window *)calloc(1, sizeof(Window));
if (!win)
return NULL;
win->instance = GetModuleHandle(NULL);
if (!win->instance)
goto free_win;
memset(&wc, 0, sizeof(WNDCLASS));
wc.style = CS_HREDRAW | CS_VREDRAW;
wc.lpfnWndProc = _window_procedure;
wc.cbClsExtra = 0;
wc.cbWndExtra = 0;
wc.hInstance = win->instance;
wc.hIcon = LoadIcon(NULL, IDI_APPLICATION);
wc.hCursor = LoadCursor(NULL, IDC_ARROW);
wc.hbrBackground = NULL;
wc.lpszMenuName = NULL;
wc.lpszClassName = "D3D";
if (!RegisterClass(&wc))
goto free_library;
r.left = 0;
r.top = 0;
r.right = w;
r.bottom = h;
if (!AdjustWindowRectEx(&r,
WS_OVERLAPPEDWINDOW | WS_SIZEBOX,
FALSE,
0U))
goto unregister_class;
win->win = CreateWindowEx(0U,
"D3D", "Test",
WS_OVERLAPPEDWINDOW | WS_SIZEBOX,
x, y,
r.right - r.left,
r.bottom - r.top,
NULL,
NULL, win->instance, NULL);
if (!win->win)
goto unregister_class;
return win;
unregister_class:
UnregisterClass("D2D", win->instance);
free_library:
FreeLibrary(win->instance);
free_win:
free(win);
return NULL;
}
void window_del(Window *win)
{
if (!win)
return;
DestroyWindow(win->win);
UnregisterClass("D2D", win->instance);
FreeLibrary(win->instance);
free(win);
}
void window_show(Window *win)
{
ShowWindow(win->win, SW_SHOWNORMAL);
}
/************************** D3D11 **************************/
static void d3d_refresh_rate_get(D3d *d3d, UINT *num, UINT *den)
{
DXGI_MODE_DESC *display_mode_list = NULL; /* 28 bytes */
IDXGIAdapter *dxgi_adapter;
IDXGIOutput *dxgi_output;
UINT nbr_modes;
UINT i;
HRESULT res;
*num = 0U;
*den = 1U;
if (!d3d->vsync)
return;
/* adapter of primary desktop : pass 0U */
res = IDXGIFactory_EnumAdapters(d3d->dxgi_factory, 0U, &dxgi_adapter);
if (FAILED(res))
return;
/* output of primary desktop : pass 0U */
res = IDXGIAdapter_EnumOutputs(dxgi_adapter, 0U, &dxgi_output);
if (FAILED(res))
goto release_dxgi_adapter;
/* number of mode that fit the format */
res = IDXGIOutput_GetDisplayModeList(dxgi_output,
DXGI_FORMAT_B8G8R8A8_UNORM,
DXGI_ENUM_MODES_INTERLACED,
&nbr_modes, NULL);
if (FAILED(res))
goto release_dxgi_output;
printf("display mode list : %d\n", nbr_modes);
fflush(stdout);
display_mode_list = (DXGI_MODE_DESC *)malloc(nbr_modes * sizeof(DXGI_MODE_DESC));
if (!display_mode_list)
goto release_dxgi_output;
/* fill the mode list */
res = IDXGIOutput_GetDisplayModeList(dxgi_output,
DXGI_FORMAT_B8G8R8A8_UNORM,
DXGI_ENUM_MODES_INTERLACED,
&nbr_modes, display_mode_list);
if (FAILED(res))
goto free_mode_list;
for (i = 0; i < nbr_modes; i++)
{
if ((display_mode_list[i].Width == (UINT)GetSystemMetrics(SM_CXSCREEN)) &&
(display_mode_list[i].Height == (UINT)GetSystemMetrics(SM_CYSCREEN)))
{
*num = display_mode_list[i].RefreshRate.Numerator;
*den = display_mode_list[i].RefreshRate.Denominator;
break;
}
}
#ifdef _DEBUG
{
DXGI_ADAPTER_DESC adapter_desc;
IDXGIAdapter_GetDesc(dxgi_adapter, &adapter_desc);
printf(" * video mem: %llu B, %llu MB\n",
adapter_desc.DedicatedVideoMemory,
adapter_desc.DedicatedVideoMemory / 1024 / 1024);
fflush(stdout);
wprintf(L" * description: %ls\n", adapter_desc.Description);
fflush(stdout);
}
#endif
free_mode_list:
free(display_mode_list);
release_dxgi_output:
IDXGIOutput_Release(dxgi_output);
release_dxgi_adapter:
IDXGIFactory_Release(dxgi_adapter);
}
D3d *d3d_init(Window *win, int vsync)
{
D3D11_INPUT_ELEMENT_DESC desc_ie[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "COLOR", 0, DXGI_FORMAT_R8G8B8A8_UNORM, 0, 2 * sizeof(float), D3D11_INPUT_PER_VERTEX_DATA, 0 }
};
#ifdef HAVE_WIN10
DXGI_SWAP_CHAIN_DESC1 desc;
DXGI_SWAP_CHAIN_FULLSCREEN_DESC desc_fs;
#else
DXGI_SWAP_CHAIN_DESC desc;
#endif
D3d *d3d;
RECT r;
HRESULT res;
UINT flags;
UINT num;
UINT den;
D3D_FEATURE_LEVEL feature_level[4];
d3d = (D3d *)calloc(1, sizeof(D3d));
if (!d3d)
return NULL;
d3d->vsync = vsync;
win->d3d = d3d;
d3d->win = win;
/* create the DXGI factory */
flags = 0;
#ifdef HAVE_WIN10
# ifdef _DEBUG
flags = DXGI_CREATE_FACTORY_DEBUG;
# endif
res = CreateDXGIFactory2(flags, &IID_IDXGIFactory2, (void **)&d3d->dxgi_factory);
#else
res = CreateDXGIFactory(&IID_IDXGIFactory, (void **)&d3d->dxgi_factory);
#endif
if (FAILED(res))
goto free_d3d;
/* single threaded for now */
flags = D3D11_CREATE_DEVICE_SINGLETHREADED |
D3D11_CREATE_DEVICE_BGRA_SUPPORT;
#ifdef HAVE_WIN10
# ifdef _DEBUG
flags |= D3D11_CREATE_DEVICE_DEBUG;
# endif
#endif
feature_level[0] = D3D_FEATURE_LEVEL_11_1;
feature_level[1] = D3D_FEATURE_LEVEL_11_0;
feature_level[2] = D3D_FEATURE_LEVEL_10_1;
feature_level[3] = D3D_FEATURE_LEVEL_10_0;
/* create device and device context with hardware support */
res = D3D11CreateDevice(NULL,
D3D_DRIVER_TYPE_HARDWARE,
NULL,
flags,
feature_level,
3U,
D3D11_SDK_VERSION,
&d3d->d3d_device,
NULL,
&d3d->d3d_device_ctx);
if (FAILED(res))
goto release_dxgi_factory2;
if (!GetClientRect(win->win, &r))
goto release_d3d_device;
/*
* create the swap chain. It needs some settings...
* the size of the internal buffers
* the image format
* the number of back buffers (>= 2 for flip model, see SwapEffect field)
*
* Settings are different in win 7 and win10
*/
d3d_refresh_rate_get(d3d, &num, &den);
#ifdef HAVE_WIN10
desc.Width = r.right - r.left;
desc.Height = r.bottom - r.top;
desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
desc.Stereo = FALSE;
#else
desc.BufferDesc.Width = r.right - r.left;
desc.BufferDesc.Height = r.bottom - r.top;
desc.BufferDesc.RefreshRate.Numerator = num;
desc.BufferDesc.RefreshRate.Denominator = den;
desc.BufferDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;;
desc.BufferDesc.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
desc.BufferDesc.Scaling = DXGI_MODE_SCALING_UNSPECIFIED;
#endif
desc.SampleDesc.Count = 1U;
desc.SampleDesc.Quality = 0U;
desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
desc.BufferCount = 2U;
#ifdef HAVE_WIN10
desc.Scaling = DXGI_SCALING_NONE;
#else
desc.OutputWindow = win->win;
desc.Windowed = TRUE;
#endif
desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
#ifdef HAVE_WIN10
desc.AlphaMode = DXGI_ALPHA_MODE_UNSPECIFIED;
#endif
desc.Flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH;
#ifdef HAVE_WIN10
desc_fs.RefreshRate.Numerator = num;
desc_fs.RefreshRate.Denominator = den;
desc_fs.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
desc_fs.Scaling = DXGI_MODE_SCALING_UNSPECIFIED;
desc_fs.Windowed = TRUE;
#endif
#ifdef HAVE_WIN10
res = IDXGIFactory2_CreateSwapChainForHwnd(d3d->dxgi_factory,
(IUnknown *)d3d->d3d_device,
win->win,
&desc,
&desc_fs,
NULL,
&d3d->dxgi_swapchain);
#else
res = IDXGIFactory_CreateSwapChain(d3d->dxgi_factory,
(IUnknown *)d3d->d3d_device,
&desc,
&d3d->dxgi_swapchain);
#endif
if (FAILED(res))
goto release_d3d_device;
/* Vertex shader */
res = ID3D11Device_CreateVertexShader(d3d->d3d_device,
d3d_vertex_shader,
sizeof(d3d_vertex_shader),
NULL,
&d3d->d3d_vertex_shader);
if (FAILED(res))
{
printf(" * CreateVertexShader() failed\n");
goto release_dxgi_swapchain;
}
/* Pixel shader */
res = ID3D11Device_CreatePixelShader(d3d->d3d_device,
d3d_pixel_shader,
sizeof(d3d_pixel_shader),
NULL,
&d3d->d3d_pixel_shader);
if (FAILED(res))
{
printf(" * CreatePixelShader() failed\n");
goto release_vertex_shader;
}
/* create the input layout */
res = ID3D11Device_CreateInputLayout(d3d->d3d_device,
desc_ie,
sizeof(desc_ie) / sizeof(D3D11_INPUT_ELEMENT_DESC),
d3d_vertex_shader,
sizeof(d3d_vertex_shader),
&d3d->d3d_input_layout);
if (FAILED(res))
{
printf(" * CreateInputLayout() failed\n");
goto release_pixel_shader;
}
return d3d;
release_pixel_shader:
ID3D11PixelShader_Release(d3d->d3d_pixel_shader);
release_vertex_shader:
ID3D11VertexShader_Release(d3d->d3d_vertex_shader);
release_dxgi_swapchain:
#ifdef HAVE_WIN10
IDXGISwapChain1_SetFullscreenState(d3d->dxgi_swapchain, FALSE, NULL);
IDXGISwapChain1_Release(d3d->dxgi_swapchain);
#else
IDXGISwapChain_SetFullscreenState(d3d->dxgi_swapchain, FALSE, NULL);
IDXGISwapChain_Release(d3d->dxgi_swapchain);
#endif
release_d3d_device:
ID3D11DeviceContext_Release(d3d->d3d_device_ctx);
ID3D11Device_Release(d3d->d3d_device);
release_dxgi_factory2:
#ifdef HAVE_WIN10
IDXGIFactory2_Release(d3d->dxgi_factory);
#else
IDXGIFactory_Release(d3d->dxgi_factory);
#endif
free_d3d:
free(d3d);
return NULL;
}
void d3d_shutdown(D3d *d3d)
{
#ifdef _DEBUG
ID3D11Debug *d3d_debug;
HRESULT res;
#endif
if (!d3d)
return;
#ifdef _DEBUG
res = ID3D11Debug_QueryInterface(d3d->d3d_device, &IID_ID3D11Debug,
(void **)&d3d_debug);
#endif
ID3D11PixelShader_Release(d3d->d3d_pixel_shader);
ID3D11VertexShader_Release(d3d->d3d_vertex_shader);
ID3D11InputLayout_Release(d3d->d3d_input_layout);
ID3D11RenderTargetView_Release(d3d->d3d_render_target_view);
#ifdef HAVE_WIN10
IDXGISwapChain1_SetFullscreenState(d3d->dxgi_swapchain, FALSE, NULL);
IDXGISwapChain1_Release(d3d->dxgi_swapchain);
#else
IDXGISwapChain_SetFullscreenState(d3d->dxgi_swapchain, FALSE, NULL);
IDXGISwapChain_Release(d3d->dxgi_swapchain);
#endif
ID3D11DeviceContext_Release(d3d->d3d_device_ctx);
ID3D11Device_Release(d3d->d3d_device);
#ifdef HAVE_WIN10
IDXGIFactory2_Release(d3d->dxgi_factory);
#else
IDXGIFactory_Release(d3d->dxgi_factory);
#endif
free(d3d);
#ifdef _DEBUG
if (SUCCEEDED(res))
{
ID3D11Debug_ReportLiveDeviceObjects(d3d_debug, D3D11_RLDO_DETAIL);
ID3D11Debug_Release(d3d_debug);
}
#endif
}
void d3d_resize(D3d *d3d, UINT width, UINT height)
{
D3D11_RENDER_TARGET_VIEW_DESC desc_rtv;
ID3D11Texture2D *back_buffer;
HRESULT res;
FCT;
/* set viewport, depends on size of the window */
d3d->viewport.TopLeftX = 0.0f;
d3d->viewport.TopLeftY = 0.0f;
d3d->viewport.Width = (float)width;
d3d->viewport.Height = (float)height;
d3d->viewport.MinDepth = 0.0f;
d3d->viewport.MaxDepth = 1.0f;
/* release the render target view */
if (d3d->d3d_render_target_view)
ID3D11RenderTargetView_Release(d3d->d3d_render_target_view);
/* unset the render target view in the output merger */
ID3D11DeviceContext_OMSetRenderTargets(d3d->d3d_device_ctx,
0U, NULL, NULL);
/* resize the internal nuffers of the swapt chain to the new size */
#ifdef HAVE_WIN10
res = IDXGISwapChain1_ResizeBuffers(d3d->dxgi_swapchain,
0U, /* preserve buffer count */
width, height,
DXGI_FORMAT_UNKNOWN, /* preserve format */
0U);
#else
res = IDXGISwapChain_ResizeBuffers(d3d->dxgi_swapchain,
0U, /* preserve buffer count */
width, height,
DXGI_FORMAT_UNKNOWN, /* preserve format */
0U);
#endif
if ((res == DXGI_ERROR_DEVICE_REMOVED) ||
(res == DXGI_ERROR_DEVICE_RESET) ||
(res == DXGI_ERROR_DRIVER_INTERNAL_ERROR))
{
return;
}
if (FAILED(res))
{
printf("ResizeBuffers() failed\n");
fflush(stdout);
return;
}
/* get the internal buffer of the swap chain */
#ifdef HAVE_WIN10
res = IDXGISwapChain1_GetBuffer(d3d->dxgi_swapchain, 0,
&IID_ID3D11Texture2D,
(void **)&back_buffer);
#else
res = IDXGISwapChain_GetBuffer(d3d->dxgi_swapchain, 0,
&IID_ID3D11Texture2D,
(void **)&back_buffer);
#endif
if (FAILED(res))
{
printf("swapchain GetBuffer() failed\n");
fflush(stdout);
return;
}
ZeroMemory(&desc_rtv, sizeof(D3D11_RENDER_TARGET_VIEW_DESC));
desc_rtv.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
desc_rtv.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;
/* create the new render target view from this internal buffer */
res = ID3D11Device_CreateRenderTargetView(d3d->d3d_device,
(ID3D11Resource *)back_buffer,
&desc_rtv,
&d3d->d3d_render_target_view);
ID3D11Texture2D_Release(back_buffer);
}
/*** triangle ***/
typedef struct
{
Vertex vertices[3];
unsigned int indices[3];
ID3D11Buffer *vertex_buffer;
ID3D11Buffer *index_buffer; /* not useful for a single triangle */
UINT stride;
UINT offset;
UINT count;
UINT index_count;
} Triangle;
Triangle *triangle_new(D3d *d3d,
int w, int h,
int x1, int y1,
int x2, int y2,
int x3, int y3,
unsigned char r,
unsigned char g,
unsigned char b,
unsigned char a)
{
D3D11_BUFFER_DESC desc;
D3D11_SUBRESOURCE_DATA sr_data;
Triangle *t;
HRESULT res;
t = (Triangle *)malloc(sizeof(Triangle));
if (!t)
return NULL;
t->vertices[0].x = XF(w, x1);
t->vertices[0].y = YF(h, y1);
t->vertices[0].r = r;
t->vertices[0].g = g;
t->vertices[0].b = b;
t->vertices[0].a = a;
t->vertices[1].x = XF(w, x2);
t->vertices[1].y = YF(h, y2);
t->vertices[1].r = r;
t->vertices[1].g = g;
t->vertices[1].b = b;
t->vertices[1].a = a;
t->vertices[2].x = XF(w, x3);
t->vertices[2].y = YF(h, y3);
t->vertices[2].r = r;
t->vertices[2].g = g;
t->vertices[2].b = b;
t->vertices[2].a = a;
/* useful only for the rectangle later */
t->indices[0] = 0;
t->indices[1] = 1;
t->indices[2] = 2;
t->stride = sizeof(Vertex);
t->offset = 0U;
t->index_count = 3U;
desc.ByteWidth = sizeof(t->vertices);
desc.Usage = D3D11_USAGE_DYNAMIC;
desc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
desc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
desc.MiscFlags = 0U;
desc.StructureByteStride = 0U;
sr_data.pSysMem = t->vertices;
sr_data.SysMemPitch = 0U;
sr_data.SysMemSlicePitch = 0U;
res = ID3D11Device_CreateBuffer(d3d->d3d_device,
&desc,
&sr_data,
&t->vertex_buffer);
if (FAILED(res))
{
free(t);
return NULL;
}
desc.ByteWidth = sizeof(t->indices);
desc.Usage = D3D11_USAGE_DYNAMIC;
desc.BindFlags = D3D11_BIND_INDEX_BUFFER;
desc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
desc.MiscFlags = 0U;
desc.StructureByteStride = 0U;
sr_data.pSysMem = t->indices;
sr_data.SysMemPitch = 0U;
sr_data.SysMemSlicePitch = 0U;
res = ID3D11Device_CreateBuffer(d3d->d3d_device,
&desc,
&sr_data,
&t->index_buffer);
if (FAILED(res))
{
free(t);
return NULL;
}
return t;
}
void triangle_free(Triangle *t)
{
if (!t)
return;
ID3D11Buffer_Release(t->index_buffer);
ID3D11Buffer_Release(t->vertex_buffer);
free(t);
}
void d3d_render(D3d *d3d)
{
#ifdef HAVE_WIN10
DXGI_PRESENT_PARAMETERS pp;
#endif
const FLOAT color[4] = { 0.10f, 0.18f, 0.24f, 1.0f };
RECT rect;
HRESULT res;
FCT;
if (!GetClientRect(d3d->win->win, &rect))
{
return;
}
/* scene */
Triangle *t;
t = triangle_new(d3d,
rect.right - rect.left,
rect.bottom - rect.top,
320, 120,
480, 360,
160, 360,
255, 255, 0, 255); /* r, g, b, a */
/* clear render target */
ID3D11DeviceContext_ClearRenderTargetView(d3d->d3d_device_ctx,
d3d->d3d_render_target_view,
color);
/* Input Assembler (IA) */
/* TRIANGLESTRIP only useful for the rectangle later */
ID3D11DeviceContext_IASetPrimitiveTopology(d3d->d3d_device_ctx,
D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP);
ID3D11DeviceContext_IASetInputLayout(d3d->d3d_device_ctx,
d3d->d3d_input_layout);
ID3D11DeviceContext_IASetVertexBuffers(d3d->d3d_device_ctx,
0,
1,
&t->vertex_buffer,
&t->stride,
&t->offset);
ID3D11DeviceContext_IASetIndexBuffer(d3d->d3d_device_ctx,
t->index_buffer,
DXGI_FORMAT_R32_UINT,
0);
/* vertex shader */
ID3D11DeviceContext_VSSetShader(d3d->d3d_device_ctx,
d3d->d3d_vertex_shader,
NULL,
0);
/* pixel shader */
ID3D11DeviceContext_PSSetShader(d3d->d3d_device_ctx,
d3d->d3d_pixel_shader,
NULL,
0);
/* set viewport in the Rasterizer Stage */
ID3D11DeviceContext_RSSetViewports(d3d->d3d_device_ctx, 1U, &d3d->viewport);
/* Output merger */
ID3D11DeviceContext_OMSetRenderTargets(d3d->d3d_device_ctx,
1U, &d3d->d3d_render_target_view,
NULL);
/* draw */
ID3D11DeviceContext_DrawIndexed(d3d->d3d_device_ctx,
t->index_count,
0, 0);
triangle_free(t);
/*
* present frame, that is flip the back buffer and the front buffer
* if no vsync, we present immediatly
*/
#ifdef HAVE_WIN10
pp.DirtyRectsCount = 0;
pp.pDirtyRects = NULL;
pp.pScrollRect = NULL;
pp.pScrollOffset = NULL;
res = IDXGISwapChain1_Present1(d3d->dxgi_swapchain,
d3d->vsync ? 1 : 0, 0, &pp);
#else
res = IDXGISwapChain_Present(d3d->dxgi_swapchain,
d3d->vsync ? 1 : 0, 0);
#endif
if (res == DXGI_ERROR_DEVICE_RESET || res == DXGI_ERROR_DEVICE_REMOVED)
{
printf("device removed or lost, need to recreate everything\n");
fflush(stdout);
}
else if (res == DXGI_STATUS_OCCLUDED)
{
printf("window is not visible, so vsync won't work. Let's sleep a bit to reduce CPU usage\n");
fflush(stdout);
}
}
int main()
{
Window *win;
D3d *d3d;
/* remove scaling on HiDPI */
#ifdef HAVE_WIN10
SetProcessDpiAwarenessContext(DPI_AWARENESS_CONTEXT_SYSTEM_AWARE);
#endif
win = window_new(100, 100, 800, 480);
if (!win)
return 1;
d3d = d3d_init(win, 0);
if (!d3d)
goto del_window;
SetWindowLongPtr(win->win, GWLP_USERDATA, (LONG_PTR)win);
window_show(win);
/* mesage loop */
while (1)
{
MSG msg;
BOOL ret;
ret = PeekMessage(&msg, NULL, 0, 0, PM_REMOVE);
if (ret)
{
do
{
if (msg.message == WM_QUIT)
goto beach;
TranslateMessage(&msg);
DispatchMessageW(&msg);
} while (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE));
}
}
beach:
d3d_shutdown(d3d);
window_del(win);
return 0;
del_window:
window_del(win);
printf(" error\n");
fflush(stdout);
return 1;
}
Vertex shader:
struct vs_input
{
float2 position : POSITION;
float4 color : COLOR;
};
struct ps_input
{
float4 position : SV_POSITION;
float4 color : COLOR;
};
ps_input main(vs_input input )
{
ps_input output;
output.position = float4(input.position, 0.0f, 1.0f);
output.color = input.color;
return output;
}
Pixel shader:
struct ps_input
{
float4 position : SV_POSITION;
float4 color : COLOR;
};
float4 main(ps_input input) : SV_TARGET
{
return input.color;
}
thank you
If you want to use pixel coordinates for your vertex, you can use one of those 2 formats :
DXGI_FORMAT_R32G32_FLOAT (same as you use right now, pixel in floating point)
DXGI_FORMAT_R32G32_UINT (pixel coordinates as int, vertex shader position input becomes uint2 position : POSITION)
if you use float, the float conversion is done in C side, if you use UINT, the conversion is done on the vertex shader side. Speed difference would need profiling, if number of vertices is low I'd expect it to be negligible.
you can then easily remap those values into the -1 to 1 range in vertex shader (which is quite efficient), you only need to pass the inverse viewport size in a constant buffer.
so your vertex shader becomes :
struct vs_input
{
float2 position : POSITION;
//uint2 position : POSITION; If you use UINT
float4 color : COLOR;
};
struct ps_input
{
float4 position : SV_POSITION;
float4 color : COLOR;
};
cbuffer cbViewport : register(b0)
{
float2 inverseViewportSize;
}
ps_input main(vs_input input )
{
ps_input output;
float2 p = input.position; //if you use UINT, conversion is done here
p *= inverseViewportSize;
p *= 2.0f;
p -= 1.0f;
p.y *= -1.0f; (clip space is bottom to top, pixel is top to bottom)
output.position = float4(p, 0.0f, 1.0f);
output.color = input.color;
return output;
}

How to cache a AVI file using ffMpeg

I am reading a AVI file using ffMpeg.
I want to cache the file into a vector and resuse it later.
This is my code.
typedef struct {
AVFormatContext *fmt_ctx;
int stream_idx;
AVStream *video_stream;
AVCodecContext *codec_ctx;
AVCodec *decoder;
AVPacket *packet;
AVFrame *av_frame;
AVFrame *gl_frame;
struct SwsContext *conv_ctx;
unsigned int frame_tex;
}AppData;
AppData data;
Here i am caching the file to a std::vector
std::vector< AVFrame* > cache;
bool initReadFrame()
{
do {
glBindTexture(GL_TEXTURE_2D, data.frame_tex);
int error = av_read_frame(data.fmt_ctx, data.packet);
if (error)
{
av_free_packet(data.packet);
return false;
}
if (data.packet->stream_index == data.stream_idx)
{
int frame_finished = 0;
if (avcodec_decode_video2(data.codec_ctx, data.av_frame, &frame_finished,
data.packet) < 0) {
av_free_packet(data.packet);
return false;
}
if (frame_finished)
{
if (!data.conv_ctx)
{
data.conv_ctx = sws_getContext(data.codec_ctx->width,
data.codec_ctx->height, data.codec_ctx->pix_fmt,
data.codec_ctx->width, data.codec_ctx->height, AV_PIX_FMT_RGBA,
SWS_BICUBIC, NULL, NULL, NULL);
}
sws_scale(data.conv_ctx, data.av_frame->data, data.av_frame->linesize, 0,
data.codec_ctx->height, data.gl_frame->data, data.gl_frame->linesize);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, data.codec_ctx->width,
data.codec_ctx->height, GL_RGBA, GL_UNSIGNED_BYTE,
data.gl_frame->data[0]);
cache.push_back(av_frame_clone(data.gl_frame)); // Pushing AVFrame* to vector
}
}
av_free_packet(data.packet);
} while (data.packet->stream_index != data.stream_idx);
return true;
}
here i am trying to read the buffer and updating GL_TEXTURE_2D
void playCache()
{
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, data.codec_ctx->width,
data.codec_ctx->height, GL_RGBA, GL_UNSIGNED_BYTE,
cache[temp]->data[0]);
temp++;
}
The issue i am facing is that when i try to read the Cached data the application crashes.
You are storing dead reference in your cache.
cache.push_back(av_frame_clone(data.gl_frame));
the doc says:
av_frame_clone: Create a new frame that references the same data as src.
When you destroy src, you loose its content and you can't access it in your cache.
You can try to move the ref to your new frame, or to copies its value.
Move:
AVFrame* cachedValue;
av_frame_move_ref(cachedValue, data.gl_frame);
cache.push_back(cachedValue);
Copy
AVFrame *cachedValue= av_frame_alloc();
cachedValue->format = data.gl_frame->format;
cachedValue->width = data.gl_frame->width;
cachedValue->height = data.gl_frame->height;
cachedValue->channels = data.gl_frame->channels;
cachedValue->channel_layout = data.gl_frame->channel_layout;
cachedValue->nb_samples = data.gl_frame->nb_samples;
av_frame_get_buffer(cachedValue, 32);
av_frame_copy(cachedValue, data.gl_frame);
av_frame_copy_props(cachedValue, data.gl_frame);
cache.push_back(cachedValue);
/////////////////////////////////////////////////
avformat_network_init();
initializeAppData();
// open video
if (avformat_open_input(&data.fmt_ctx, stdstrPathOfVideo.c_str(), NULL, NULL) < 0) {
clearAppData();
return;
}
// find stream info
if (avformat_find_stream_info(data.fmt_ctx, NULL) < 0) {
clearAppData();
return;
}
// dump debug info
// av_dump_format(data.fmt_ctx, 0, "D:\\E\\Event\\2019\\AVI_Badges\\Generic\\Generic.avi", 0);
// find the video stream
for (unsigned int i = 0; i < data.fmt_ctx->nb_streams; ++i)
{
if (data.fmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
data.stream_idx = i;
break;
}
}
if (data.stream_idx == -1)
{
clearAppData();
return;
}
data.video_stream = data.fmt_ctx->streams[data.stream_idx];
data.codec_ctx = data.video_stream->codec;
// find the decoder
data.decoder = avcodec_find_decoder(data.codec_ctx->codec_id);
if (data.decoder == NULL)
{
clearAppData();
return;
}
// open the decoder
if (avcodec_open2(data.codec_ctx, data.decoder, NULL) < 0)
{
clearAppData();
return;
}
// allocate the video frames
data.av_frame = av_frame_alloc();
data.gl_frame = av_frame_alloc();
int size = avpicture_get_size(AV_PIX_FMT_RGBA, data.codec_ctx->width,
data.codec_ctx->height);
uint8_t *internal_buffer = (uint8_t *)av_malloc(size * sizeof(uint8_t));
avpicture_fill((AVPicture *)data.gl_frame, internal_buffer, AV_PIX_FMT_RGBA,
data.codec_ctx->width, data.codec_ctx->height);
data.packet = (AVPacket *)av_malloc(sizeof(AVPacket));

What is wrong while providing arguments for sws_scale?

In the following code, I can't figure out what's wrong:
uint8_t *dstData[4];
int dstLinesize[4];
AVPixelFormat convertToPixFmt = AV_PIX_FMT_RGBA;
int ret;
// ...
printf("tmp_frame format: %d (%s) %dx%d\n", tmp_frame->format, av_get_pix_fmt_name((AVPixelFormat)tmp_frame->format), tmp_frame->width, tmp_frame->height);
// The above line prints: tmp_frame format: 23 (nv12) 480x480
int size = av_image_get_buffer_size(convertToPixFmt, tmp_frame->width, tmp_frame->height, 1);
uint8_t *buffer = (uint8_t *) av_malloc(size);
ret = av_image_copy_to_buffer(buffer, size,
(const uint8_t * const *)&tmp_frame->data[i],
(const int *)&tmp_frame->linesize[i], (AVPixelFormat)tmp_frame->format,
tmp_frame->width, tmp_frame->height, 1);
ASSERT(ret >= 0);
ret = av_image_fill_arrays(dstData, dstLinesize, buffer, convertToPixFmt, dest_width, dest_height, 1);
ASSERT(ret >= 0);
ret = sws_scale(
convertContext,
dstData,
dstLinesize,
0,
dest_width,
convertedFrame->data,
convertedFrame->linesize);
printf("sws_scale returns %d\n", ret); // prints: sws_scale returns 0
ASSERT(ret == tmp_frame->height);
// ...
It's part of a code which uses dxva2 to obtain tmp_frame. I inspired the code from hw_decode.c and am sure that there's no mistake in the code. The tmp_frame is properly made in NV12 format. The error occurs just when I call sws_scale and it's:
bad src image pointers
So I don't know how to provide pointers not to get this error and sws_scale may work properly.
Any idea?
I update the question to include my whole code:
static AVBufferRef *hw_device_ctx = NULL;
static enum AVPixelFormat hw_pix_fmt;
static FILE *output_file = NULL;
int main(int argc, char *argv[])
{
AVFormatContext *input_ctx = NULL;
int video_stream, ret;
AVStream *video = NULL;
AVCodecContext *decoder_ctx = NULL;
AVCodec *decoder = NULL;
AVPacket packet;
enum AVHWDeviceType type;
int i;
if (argc < 2)
{
fprintf(stderr, "Usage: %s <input file>\n", argv[0]);
return -1;
}
type = av_hwdevice_find_type_by_name("dxva2");
ASSERT(type != AV_HWDEVICE_TYPE_NONE);
ASSERT(avformat_open_input(&input_ctx, argv[1], NULL, NULL) == 0);
ASSERT(avformat_find_stream_info(input_ctx, NULL) >= 0);
video_stream = av_find_best_stream(input_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &decoder, 0);
ASSERT(video_stream >= 0);
decoder_ctx = avcodec_alloc_context3(decoder);
ASSERT(decoder_ctx);
video = input_ctx->streams[video_stream];
ASSERT(avcodec_parameters_to_context(decoder_ctx, video->codecpar) >= 0);
ASSERT(av_hwdevice_ctx_create(&hw_device_ctx, type, NULL, NULL, 0) >= 0);
decoder_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
ASSERT(avcodec_open2(decoder_ctx, decoder, NULL) >= 0);
printf("video info: %dx%d\n", decoder_ctx->width, decoder_ctx->height);
AVFrame *frame = av_frame_alloc();
ASSERT(frame);
AVFrame *sw_frame = av_frame_alloc();
ASSERT(sw_frame);
AVFrame* convertedFrame = av_frame_alloc();
ASSERT(convertedFrame);
AVPixelFormat convertToPixFmt = AV_PIX_FMT_RGBA;
//int dest_width = 320, dest_height = 200;
int dest_width = decoder_ctx->width, dest_height = decoder_ctx->height;
SwsContext* convertContext = sws_getContext(decoder_ctx->width, decoder_ctx->height, AV_PIX_FMT_YUV420P,
dest_width, dest_height, convertToPixFmt,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
ASSERT(convertContext);
int convertedFrameAspectBufferSize = avpicture_get_size(convertToPixFmt, dest_width, dest_height);
void *convertedFrameBuffer = av_malloc(convertedFrameAspectBufferSize);
avpicture_fill((AVPicture*)convertedFrame, (uint8_t *)convertedFrameBuffer, convertToPixFmt, dest_width, dest_height);
output_file = fopen("1.out", "w+");
for (int i = 0; /*i < 20*/; i++)
{
ret = av_read_frame(input_ctx, &packet);
if (ret == AVERROR_EOF)
break;
ASSERT(ret >= 0);
if (video_stream != packet.stream_index)
continue;
int ret = avcodec_send_packet(decoder_ctx, &packet);
ASSERT(ret >= 0);
//printf("%p", decoder->hw_configs->hwaccel);
ret = avcodec_receive_frame(decoder_ctx, frame);
if (ret < 0)
printf("%d\t%d\n", i, ret);
AVFrame *tmp_frame;
if (frame->format > 0) // hw enabled
{
ASSERT(av_hwframe_transfer_data(sw_frame, frame, 0) >= 0);
tmp_frame = sw_frame;
}
else
{
tmp_frame = frame;
}
printf("frame format: %d (%s) %dx%d\n", frame->format, av_get_pix_fmt_name((AVPixelFormat)frame->format), frame->width, frame->height);
printf("sw_frame format: %d (%s) %dx%d\n", sw_frame->format, av_get_pix_fmt_name((AVPixelFormat)sw_frame->format), sw_frame->width, sw_frame->height);
printf("tmp_frame format: %d (%s) %dx%d\n", tmp_frame->format, av_get_pix_fmt_name((AVPixelFormat)tmp_frame->format), tmp_frame->width, tmp_frame->height);
/*
video info: 480x480
frame format: 53 (dxva2_vld) 480x480
sw_frame format: 23 (nv12) 480x480
[swscaler # 004cb2c0] bad src image pointers
*/
int size = av_image_get_buffer_size(convertToPixFmt, tmp_frame->width, tmp_frame->height, 1);
uint8_t *buffer = (uint8_t *) av_malloc(size);
ret = av_image_copy_to_buffer(buffer, size,
(const uint8_t * const *)&tmp_frame->data[i],
(const int *)&tmp_frame->linesize[i], (AVPixelFormat)tmp_frame->format,
tmp_frame->width, tmp_frame->height, 1);
ASSERT(ret > 0);
ret = av_image_fill_arrays(dstData, dstLinesize, buffer, convertToPixFmt, dest_width, dest_height, 1);
ASSERT(ret > 0);
ret = sws_scale(
convertContext,
tmp_frame->data,
tmp_frame->linesize,
0,
dest_width,
convertedFrame->data,
convertedFrame->linesize);
printf("sws_scale returns %d\n", ret);
ASSERT(ret == tmp_frame->height);
ret = fwrite(convertedFrame->data, tmp_frame->height * tmp_frame->width, 1, output_file);
ASSERT(ret == 1);
break;
}
av_frame_free(&frame);
av_packet_unref(&packet);
avcodec_free_context(&decoder_ctx);
avformat_close_input(&input_ctx);
av_buffer_unref(&hw_device_ctx);
return 0;
}

How to get picture buffer data in ffmpeg?

I'm trying to pass bitmap from ffmpeg to android.
It already works but it's displaying picture right on surface passed from java to native code.
How can i get frame buffer bitmap data to pass it to java?
I've tried to save out_frame buffer data:
unsigned char bmpFileHeader[14] = {'B', 'M', 0,0,0,0, 0,0, 0,0, 54, 0,0,0};
unsigned char bmpInfoHeader[40] = {40,0,0,0, 0,0,0,0, 0,0,0,0, 1,0, 24,0};
unsigned char bmpPad[3] = {0, 0, 0};
void saveBuffer(int fileIndex, int width, int height, unsigned char *buffer, int buffer_size) {
unsigned char filename[1024];
sprintf(filename, "/storage/sdcard0/3d_player_%d.bmp", fileIndex);
LOGI(10, "saving ffmpeg bitmap file: %d to %s", fileIndex, filename);
FILE *bitmapFile = fopen(filename, "wb");
if (!bitmapFile) {
LOGE(10, "failed to create ffmpeg bitmap file");
return;
}
unsigned char filesize = 54 + 3 * width * height; // 3 = (r,g,b)
bmpFileHeader[2] = (unsigned char)(filesize);
bmpFileHeader[3] = (unsigned char)(filesize >> 8);
bmpFileHeader[4] = (unsigned char)(filesize >> 16);
bmpFileHeader[5] = (unsigned char)(filesize >> 24);
bmpInfoHeader[4] = (unsigned char)(width);
bmpInfoHeader[5] = (unsigned char)(width >> 8);
bmpInfoHeader[6] = (unsigned char)(width >> 16);
bmpInfoHeader[7] = (unsigned char)(width >> 24);
bmpInfoHeader[8] = (unsigned char)(height);
bmpInfoHeader[9] = (unsigned char)(height >> 8);
bmpInfoHeader[10] = (unsigned char)(height >> 16);
bmpInfoHeader[11] = (unsigned char)(height >> 24);
fwrite(bmpFileHeader, 1, 14, bitmapFile);
fwrite(bmpInfoHeader, 1, 40, bitmapFile);
int i;
for (i=0; i<height; i++) {
fwrite(buffer + width * (height - 1) * 3, 3, width, bitmapFile);
fwrite(bmpPad, 1, (4-(width * 3) % 4) % 4, bitmapFile);
}
fflush(bitmapFile);
fclose(bitmapFile);
}
int player_decode_video(struct DecoderData * decoder_data, JNIEnv * env,
struct PacketData *packet_data) {
int got_frame_ptr;
struct Player *player = decoder_data->player;
int stream_no = decoder_data->stream_no;
AVCodecContext * ctx = player->input_codec_ctxs[stream_no];
AVFrame * frame = player->input_frames[stream_no];
AVStream * stream = player->input_streams[stream_no];
int interrupt_ret;
int to_write;
int err = 0;
AVFrame *rgb_frame = player->rgb_frame;
ANativeWindow_Buffer buffer;
ANativeWindow * window;
#ifdef MEASURE_TIME
struct timespec timespec1, timespec2, diff;
#endif // MEASURE_TIME
LOGI(10, "player_decode_video decoding");
int frameFinished;
#ifdef MEASURE_TIME
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timespec1);
#endif // MEASURE_TIME
int ret = avcodec_decode_video2(ctx, frame, &frameFinished,
packet_data->packet);
#ifdef MEASURE_TIME
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timespec2);
diff = timespec_diff(timespec1, timespec2);
LOGI(3, "decode_video timediff: %d.%9ld", diff.tv_sec, diff.tv_nsec);
#endif // MEASURE_TIME
if (ret < 0) {
LOGE(1, "player_decode_video Fail decoding video %d\n", ret);
return -ERROR_WHILE_DECODING_VIDEO;
}
if (!frameFinished) {
LOGI(10, "player_decode_video Video frame not finished\n");
return 0;
}
// saving in buffer converted video frame
LOGI(7, "player_decode_video copy wait");
#ifdef MEASURE_TIME
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timespec1);
#endif // MEASURE_TIME
pthread_mutex_lock(&player->mutex_queue);
window = player->window;
if (window == NULL) {
pthread_mutex_unlock(&player->mutex_queue);
goto skip_frame;
}
ANativeWindow_setBuffersGeometry(window, ctx->width, ctx->height,
WINDOW_FORMAT_RGBA_8888);
if (ANativeWindow_lock(window, &buffer, NULL) != 0) {
pthread_mutex_unlock(&player->mutex_queue);
goto skip_frame;
}
pthread_mutex_unlock(&player->mutex_queue);
int format = buffer.format;
if (format < 0) {
LOGE(1, "Could not get window format")
}
enum PixelFormat out_format;
if (format == WINDOW_FORMAT_RGBA_8888) {
out_format = PIX_FMT_RGBA;
LOGI(6, "Format: WINDOW_FORMAT_RGBA_8888");
} else if (format == WINDOW_FORMAT_RGBX_8888) {
out_format = PIX_FMT_RGB0;
LOGE(1, "Format: WINDOW_FORMAT_RGBX_8888 (not supported)");
} else if (format == WINDOW_FORMAT_RGB_565) {
out_format = PIX_FMT_RGB565;
LOGE(1, "Format: WINDOW_FORMAT_RGB_565 (not supported)");
} else {
LOGE(1, "Unknown window format");
}
avpicture_fill((AVPicture *) rgb_frame, buffer.bits, out_format,
buffer.width, buffer.height);
rgb_frame->data[0] = buffer.bits;
if (format == WINDOW_FORMAT_RGBA_8888) {
rgb_frame->linesize[0] = buffer.stride * 4;
} else {
LOGE(1, "Unknown window format");
}
LOGI(6,
"Buffer: width: %d, height: %d, stride: %d",
buffer.width, buffer.height, buffer.stride);
int i = 0;
#ifdef MEASURE_TIME
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timespec2);
diff = timespec_diff(timespec1, timespec2);
LOGI(1,
"lockPixels and fillimage timediff: %d.%9ld", diff.tv_sec, diff.tv_nsec);
#endif // MEASURE_TIME
#ifdef MEASURE_TIME
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timespec1);
#endif // MEASURE_TIME
LOGI(7, "player_decode_video copying...");
AVFrame * out_frame;
int rescale;
if (ctx->width == buffer.width && ctx->height == buffer.height) {
// This always should be true
out_frame = rgb_frame;
rescale = FALSE;
} else {
out_frame = player->tmp_frame2;
rescale = TRUE;
}
if (ctx->pix_fmt == PIX_FMT_YUV420P) {
__I420ToARGB(frame->data[0], frame->linesize[0], frame->data[2],
frame->linesize[2], frame->data[1], frame->linesize[1],
out_frame->data[0], out_frame->linesize[0], ctx->width,
ctx->height);
} else if (ctx->pix_fmt == PIX_FMT_NV12) {
__NV21ToARGB(frame->data[0], frame->linesize[0], frame->data[1],
frame->linesize[1], out_frame->data[0], out_frame->linesize[0],
ctx->width, ctx->height);
} else {
LOGI(3, "Using slow conversion: %d ", ctx->pix_fmt);
struct SwsContext *sws_context = player->sws_context;
sws_context = sws_getCachedContext(sws_context, ctx->width, ctx->height,
ctx->pix_fmt, ctx->width, ctx->height, out_format,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
player->sws_context = sws_context;
if (sws_context == NULL) {
LOGE(1, "could not initialize conversion context from: %d"
", to :%d\n", ctx->pix_fmt, out_format);
// TODO some error
}
sws_scale(sws_context, (const uint8_t * const *) frame->data,
frame->linesize, 0, ctx->height, out_frame->data,
out_frame->linesize);
}
if (rescale) {
// Never occurs
__ARGBScale(out_frame->data[0], out_frame->linesize[0], ctx->width,
ctx->height, rgb_frame->data[0], rgb_frame->linesize[0],
buffer.width, buffer.height, __kFilterNone);
out_frame = rgb_frame;
}
// TODO: (4ntoine) frame decoded and rescaled, ready to call callback with frame picture from buffer
int bufferSize = buffer.width * buffer.height * 3; // 3 = (r,g,b);
static int bitmapCounter = 0;
if (bitmapCounter < 10) {
saveBuffer(bitmapCounter++, buffer.width, buffer.height, (unsigned char *)out_frame->data, bufferSize);
}
but out_frame is empty and file has header and 0x00 bytes body.
How to get picture buffer data in ffmpeg?
Solved, in short: you should take buffer from ANativeWindow_Buffer - buffer.bits. Pay attention buffer is (rgba) but BMP is usually (rgb) - 3 bytes. To save it as BMP one need to add BMP header and save lines with padding.

How to resize a picture using ffmpeg's sws_scale()?

I wanna resize a picture by using the ffmpeg's func--->sws_scale().
Is there any one knows how to do it?
Do you have the source code for this function?
First you need to create a SwsContext (you need to do this only once) :
struct SwsContext *resize;
resize = sws_getContext(width1, height1, AV_PIX_FMT_YUV420P, width2, height2, PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);
You need two frames for conversion, frame1 is the original frame, you need to explicitly allocate frame2 :
AVFrame* frame1 = avcodec_alloc_frame(); // this is your original frame
AVFrame* frame2 = avcodec_alloc_frame();
int num_bytes = avpicture_get_size(AV_PIX_FMT_RGB24, width2, height2);
uint8_t* frame2_buffer = (uint8_t *)av_malloc(num_bytes*sizeof(uint8_t));
avpicture_fill((AVPicture*)frame2, frame2_buffer, AV_PIX_FMT_RGB24, width2, height2);
You may use this part inside a loop if you need to resize each frame you receive :
// frame1 should be filled by now (eg using avcodec_decode_video)
sws_scale(resize, frame1->data, frame1->linesize, 0, height1, frame2->data, frame2->linesize);
Note that I also changed pixel format, but you can use the same pixel format for both frames
Runnable example in FFmpeg 2.8
Basically using arash's method, but runnable so you can try it out.
Generate one short video procedurally, and then convert it to 3 different sizes.
ffmpeg_encoder_init_frame and ffmpeg_encoder_scale are the key methods.
Source:
#include <libavcodec/avcodec.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
static AVCodecContext *c = NULL;
static AVFrame *frame;
static AVFrame *frame2;
static AVPacket pkt;
static FILE *file;
static struct SwsContext *sws_context = NULL;
static void ffmpeg_encoder_init_frame(AVFrame **framep, int width, int height) {
int ret;
AVFrame *frame;
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame->format = c->pix_fmt;
frame->width = width;
frame->height = height;
ret = av_image_alloc(frame->data, frame->linesize, frame->width, frame->height, frame->format, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw picture buffer\n");
exit(1);
}
*framep = frame;
}
static void ffmpeg_encoder_scale(uint8_t *rgb) {
sws_context = sws_getCachedContext(sws_context,
frame->width, frame->height, AV_PIX_FMT_YUV420P,
frame2->width, frame2->height, AV_PIX_FMT_YUV420P,
SWS_BICUBIC, NULL, NULL, NULL);
sws_scale(sws_context, (const uint8_t * const *)frame->data, frame->linesize, 0,
frame->height, frame2->data, frame2->linesize);
}
static void ffmpeg_encoder_set_frame_yuv_from_rgb(uint8_t *rgb) {
const int in_linesize[1] = { 3 * frame->width };
sws_context = sws_getCachedContext(sws_context,
frame->width, frame->height, AV_PIX_FMT_RGB24,
frame->width, frame->height, AV_PIX_FMT_YUV420P,
0, NULL, NULL, NULL);
sws_scale(sws_context, (const uint8_t * const *)&rgb, in_linesize, 0,
frame->height, frame->data, frame->linesize);
}
void generate_rgb(int width, int height, int pts, uint8_t **rgbp) {
int x, y, cur;
uint8_t *rgb = *rgbp;
rgb = realloc(rgb, 3 * sizeof(uint8_t) * height * width);
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
cur = 3 * (y * width + x);
rgb[cur + 0] = 0;
rgb[cur + 1] = 0;
rgb[cur + 2] = 0;
if ((frame->pts / 25) % 2 == 0) {
if (y < height / 2) {
if (x < width / 2) {
/* Black. */
} else {
rgb[cur + 0] = 255;
}
} else {
if (x < width / 2) {
rgb[cur + 1] = 255;
} else {
rgb[cur + 2] = 255;
}
}
} else {
if (y < height / 2) {
rgb[cur + 0] = 255;
if (x < width / 2) {
rgb[cur + 1] = 255;
} else {
rgb[cur + 2] = 255;
}
} else {
if (x < width / 2) {
rgb[cur + 1] = 255;
rgb[cur + 2] = 255;
} else {
rgb[cur + 0] = 255;
rgb[cur + 1] = 255;
rgb[cur + 2] = 255;
}
}
}
}
}
*rgbp = rgb;
}
void ffmpeg_encoder_start(const char *filename, int codec_id, int fps, int width, int height, float factor) {
AVCodec *codec;
int ret;
int width2 = width * factor;
int height2 = height * factor;
avcodec_register_all();
codec = avcodec_find_encoder(codec_id);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
c->bit_rate = 400000;
c->width = width2;
c->height = height2;
c->time_base.num = 1;
c->time_base.den = fps;
c->gop_size = 10;
c->max_b_frames = 1;
c->pix_fmt = AV_PIX_FMT_YUV420P;
if (codec_id == AV_CODEC_ID_H264)
av_opt_set(c->priv_data, "preset", "slow", 0);
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
file = fopen(filename, "wb");
if (!file) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
ffmpeg_encoder_init_frame(&frame, width, height);
ffmpeg_encoder_init_frame(&frame2, width2, height2);
}
void ffmpeg_encoder_finish(void) {
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
int got_output, ret;
do {
fflush(stdout);
ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
fwrite(pkt.data, 1, pkt.size, file);
av_packet_unref(&pkt);
}
} while (got_output);
fwrite(endcode, 1, sizeof(endcode), file);
fclose(file);
avcodec_close(c);
av_free(c);
av_freep(&frame->data[0]);
av_frame_free(&frame);
av_freep(&frame2->data[0]);
av_frame_free(&frame2);
}
void ffmpeg_encoder_encode_frame(uint8_t *rgb) {
int ret, got_output;
ffmpeg_encoder_set_frame_yuv_from_rgb(rgb);
ffmpeg_encoder_scale(rgb);
frame2->pts = frame->pts;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
ret = avcodec_encode_video2(c, &pkt, frame2, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
fwrite(pkt.data, 1, pkt.size, file);
av_packet_unref(&pkt);
}
}
static void encode_example(float factor) {
char filename[255];
int pts;
int width = 320;
int height = 240;
uint8_t *rgb = NULL;
snprintf(filename, 255, "tmp." __FILE__ ".%.2f.h264", factor);
ffmpeg_encoder_start(filename, AV_CODEC_ID_H264, 25, width, height, factor);
for (pts = 0; pts < 100; pts++) {
frame->pts = pts;
generate_rgb(width, height, pts, &rgb);
ffmpeg_encoder_encode_frame(rgb);
}
ffmpeg_encoder_finish();
free(rgb);
}
int main(void) {
encode_example(0.5);
encode_example(1.0);
encode_example(2.0);
return EXIT_SUCCESS;
}
Run with:
gcc main.c -lavformat -lavcodec -lswresample -lswscale -lavutil -lx264
./a.out
ffplay tmp.main.c.0.50.h264
ffplay tmp.main.c.1.00.h264
ffplay tmp.main.c.2.00.h264
Tested on ffmpeg 3.4.11, Ubuntu 16.04. Source on GitHub. Adapted from doc/examples/decoding_encoding.c.

Resources