Programatically take screen shots and save as PNG - winapi

I've been using the following code for quite a while:
#include <fstream>
#include <gdiplus.h>
#include <windows.h>
#include <iostream>
using namespace std;
void CaptureScreen(const char* filename)
{
HDC hScreenDC = GetDC(0);
HDC hMemoryDC = CreateCompatibleDC(hScreenDC);
int upper_left_x = GetSystemMetrics(SM_XVIRTUALSCREEN);
int upper_left_y = GetSystemMetrics(SM_YVIRTUALSCREEN);
int bitmap_dx = GetSystemMetrics(SM_CXVIRTUALSCREEN ) * 1.25f;
int bitmap_dy = GetSystemMetrics(SM_CYVIRTUALSCREEN ) * 1.25f;
// create file
ofstream file(filename, ios::binary);
if(!file) return;
// save bitmap file headers
BITMAPFILEHEADER fileHeader;
BITMAPINFOHEADER infoHeader;
fileHeader.bfType = 0x4d42;
fileHeader.bfSize = 0;
fileHeader.bfReserved1 = 0;
fileHeader.bfReserved2 = 0;
fileHeader.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER);
infoHeader.biSize = sizeof(infoHeader);
infoHeader.biWidth = bitmap_dx;
infoHeader.biHeight = -bitmap_dy;
infoHeader.biPlanes = 1;
infoHeader.biBitCount = 16;
infoHeader.biCompression = BI_RGB;
infoHeader.biSizeImage = 0;
infoHeader.biXPelsPerMeter = 0;
infoHeader.biYPelsPerMeter = 0;
infoHeader.biClrUsed = 0;
infoHeader.biClrImportant = 0;
file.write((char*)&fileHeader, sizeof(fileHeader));
file.write((char*)&infoHeader, sizeof(infoHeader));
// dibsection information
BITMAPINFO info;
info.bmiHeader = infoHeader;
// ------------------
// THE IMPORTANT CODE
// ------------------
// create a dibsection and blit the window contents to the bitmap
BYTE* memory = 0;
HBITMAP bitmap = CreateDIBSection(hScreenDC, &info, DIB_RGB_COLORS, (void**)&memory, 0, 0);
SelectObject(hMemoryDC, bitmap);
BitBlt(hMemoryDC, 0, 0, bitmap_dx, bitmap_dy, hScreenDC, upper_left_x, upper_left_y, SRCCOPY);
DeleteDC(hMemoryDC);
ReleaseDC(NULL, hScreenDC);
// save dibsection data
int bytes = (((16*bitmap_dx + 31) & (~31))/8)*bitmap_dy;
file.write((const char *)memory, bytes);
DeleteObject(bitmap);
}
int main()
{
CaptureScreen("ok.jpg");
return 0;
}
But it seems to generate a too large BMP file because the bitmaps are saved uncompressed.
I'm looking for a way to capture a screen shot and save it to a buffer in PNG format, send it over a TCP connection and save it as a PNG file there.
I believe it has something to do with assigning BI_PNG to infoHeader.biCompression and a different calculation of bytes but I can't figure exactly what.

http://lodev.org/lodepng/
#include <fstream>
#include <gdiplus.h>
#include <windows.h>
#include <iostream>
#include "lodepng.h"
const int bits_per_pixel = 24;
using namespace std;
void CaptureScreen(const char* filename)
{
HDC hScreenDC = GetDC(0);
HDC hMemoryDC = CreateCompatibleDC(hScreenDC);
int upper_left_x = GetSystemMetrics(SM_XVIRTUALSCREEN);
int upper_left_y = GetSystemMetrics(SM_YVIRTUALSCREEN);
int bitmap_dx = GetSystemMetrics(SM_CXVIRTUALSCREEN ) * 1.25f;
int bitmap_dy = GetSystemMetrics(SM_CYVIRTUALSCREEN ) * 1.25f;
// create file
ofstream file(filename, ios::binary);
if(!file) return;
// save bitmap file headers
BITMAPFILEHEADER fileHeader;
BITMAPINFOHEADER infoHeader;
fileHeader.bfType = 0x4d42;
fileHeader.bfSize = 0;
fileHeader.bfReserved1 = 0;
fileHeader.bfReserved2 = 0;
fileHeader.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER);
infoHeader.biSize = sizeof(infoHeader);
infoHeader.biWidth = bitmap_dx;
infoHeader.biHeight = -bitmap_dy;
infoHeader.biPlanes = 1;
infoHeader.biBitCount = bits_per_pixel;
infoHeader.biCompression = BI_RGB;
infoHeader.biSizeImage = 0;
infoHeader.biXPelsPerMeter = 0;
infoHeader.biYPelsPerMeter = 0;
infoHeader.biClrUsed = 0;
infoHeader.biClrImportant = 0;
file.write((char*)&fileHeader, sizeof(fileHeader));
file.write((char*)&infoHeader, sizeof(infoHeader));
// dibsection information
BITMAPINFO info;
info.bmiHeader = infoHeader;
// ------------------
// THE IMPORTANT CODE
// ------------------
// create a dibsection and blit the window contents to the bitmap
BYTE* memory = 0;
HBITMAP bitmap = CreateDIBSection(hScreenDC, &info, DIB_RGB_COLORS, (void**)&memory, 0, 0);
SelectObject(hMemoryDC, bitmap);
BitBlt(hMemoryDC, 0, 0, bitmap_dx, bitmap_dy, hScreenDC, upper_left_x, upper_left_y, SRCCOPY);
DeleteDC(hMemoryDC);
ReleaseDC(NULL, hScreenDC);
// save dibsection data
int bytes = (((bits_per_pixel*bitmap_dx + 31) & (~31))/8)*bitmap_dy;
file.write((const char *)memory, bytes);
unsigned char *out_buffer;
size_t out_buffer_len;
unsigned error;
if ( bits_per_pixel == 24 )
{
// convert memory from bgr format to rgb
for ( unsigned i = 0; i< bytes-2; i+=3)
{
int tmp = memory[i+2];
memory[i+2] = memory[i];
memory[i] = tmp;
}
error = lodepng_encode24(&out_buffer,
&out_buffer_len,
memory,
bitmap_dx,
bitmap_dy);
}
if ( bits_per_pixel == 32 )
{
// convert memory from bgr format to rgb
for ( unsigned i = 0; i< bytes-3; i+=4)
{
int tmp = memory[i+2];
memory[i+2] = memory[i];
memory[i] = tmp;
}
error = lodepng_encode32(&out_buffer,
&out_buffer_len,
memory,
bitmap_dx,
bitmap_dy);
}
if ( error )
{
std::cout << "error: " << error << '\n';
return;
}
lodepng_save_file(out_buffer, out_buffer_len, "stam.png");
// free(out);
DeleteObject(bitmap);
}
int main()
{
CaptureScreen("ok.jpg");
return 0;
}

Related

File Name Path Bug

I'm starting to program and I'm using Visual Studio. I have this simple program that some days ago worked, but after working with another project, it now returns me an error:
// Include file header
#include <Windows.h>
#include <iostream>
#include <sstream>
#include <string>
#include <string.h>
#include <gif_lib.h>
void SaveDialogCreation(HWND hWnd)
{
OPENFILENAMEA ofn;
char szFileName[MAX_PATH] = "";
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hWnd;
ofn.lpstrFilter = "Text Files (*.txt)\0*.txt\0All Files (*.*)\0*.*\0";
ofn.lpstrFile = szFileName;
ofn.nMaxFile = MAX_PATH;
ofn.lpstrDefExt = "txt";
ofn.Flags = OFN_EXPLORER | OFN_PATHMUSTEXIST | OFN_HIDEREADONLY | OFN_OVERWRITEPROMPT;
// open save dialog
if (GetSaveFileNameA(ofn))
{
gif_write(szFileName);
}
}
//gif_write code function
bool gif_write(const char *fileName)
{
// it using giflib_win library
int error;
// Open gif file
GifFileType* gifFile = EGifOpenFileName(fileName, false, &error);
// error while open gif file
if (!gifFile) {
std::cout << "EGifOpenFileName() failed - " << error << std::endl;
return false;
}
GifColorType colors[64];
GifColorType* c = colors;
int level[4] = { 0, 85, 170, 255 };
for (int r = 0; r < 4; ++r) {
for (int g = 0; g < 4; ++g) {
for (int b = 0; b < 4; ++b, ++c) {
c->Red = level[r];
c->Green = level[g];
c->Blue = level[b];
}
}
}
// Write pixel to gif
GifByteType pix[16] = {
0, 1, 2, 3, // B
0, 4, 8, 12, // G
0, 16, 32, 48, // R
0, 21, 42, 63, // BK
};
gifFile->SWidth = 4;
gifFile->SHeight = 4;
gifFile->SColorResolution = 8;
gifFile->SBackGroundColor = 0;
gifFile->SColorMap = GifMakeMapObject(64, colors);
SavedImage gifImage;
gifImage.ImageDesc.Left = 0;
gifImage.ImageDesc.Top = 0;
gifImage.ImageDesc.Width = 4;
gifImage.ImageDesc.Height = 4;
gifImage.ImageDesc.Interlace = false;
gifImage.ImageDesc.ColorMap = nullptr;
gifImage.RasterBits = (GifByteType*)malloc(16);
gifImage.ExtensionBlockCount = 0;
gifImage.ExtensionBlocks = nullptr;
memcpy(gifImage.RasterBits, pix, 16);
GifMakeSavedImage(gifFile, &gifImage);
if (EGifSpew(gifFile) == GIF_ERROR) {
std::cout << "EGifSpew() failed - " << gifFile->Error << std::endl;
EGifCloseFile(gifFile, &error);
return false;
}
// Close gif file
EGifCloseFile(gifFile, &error);
return true;
}
I can compile it, but when I use the save dialog in gif_write(), it saves the gif file but has the first character in the filename.
How can I solve this?
It can compile but crashed.
It's definite that there is a semantic problem with the code. A possible one is OFN_EXPLORER which Indicates that
any customizations made to the Open or Save As dialog box. By default,
the Open and Save As dialog boxes use the Explorer-style user
interface regardless of whether this flag is set. This flag is
necessary only if you provide a hook procedure or custom template,
or set the OFN_ALLOWMULTISELECT flag.
You could need the example.

direct3d 11 and 2D: pass coordinates of a vertex as int and not float

My purpose is to write a backend of a toolkit using only Direct3D 11 for 2D (no additional library like Direct2D, or SpriteBatch or something else).
Note that it is the first time I use Direct3D, and I'm currently learning d3D 11.
So for now, I can display a triangle or rectangle of the color I want.
The vertex structure of my C code contains 2 float for the position and 4 unsigned char for the color. In my vertex shader, the vertex structure has 2 floats for the position of the vertex, and 4 floats for the color.
I have remarked that if I use DXGI_FORMAT_R8G8B8A8_UNORM for the color in my D3D11_INPUT_ELEMENT_DESC array, then the color is interpolated automatically from the values 0 to 255 to the values 0.0f to 1.0f. It seems resonnable when I read the documentation (DXGI Format anumeration, the description of _UNORM):
"Unsigned normalized integer; which is interpreted in a resource as an unsigned integer, and is interpreted in a shader as an unsigned normalized floating-point value in the range [0, 1]. All 0's maps to 0.0f, and all 1's maps to 1.0f. A sequence of evenly spaced floating-point values from 0.0f to 1.0f are represented. For instance, a 2-bit UNORM represents 0.0f, 1/3, 2/3, and 1.0f."
Or at least that is how I interpret this doc (I may be wrong). And the color of the triangle is correct.
What I would like to do is the same for pixels: if I pass an integer for the coordinates (x between 0 and the width of the window -1, and y between 0 and the height of the window - 1), then is it interpreted as the correct signed normalized floating-point value bythe vertex shader (-1.0f to 1.0f for x, and 1.0f to -1.0f for y). I tried several values in my Vertex C struct and D3D11_INPUT_ELEMENT_DESC array, without luck. So I have 2 questions:
Is it possible ?
If it is not possible, is it faster to convert the coordinates in the C code, or in the shader code (with the viewport as a constant buffer) ? See the macros XF and YF in the code below for the conversion from int to float.
Below is my complete code that displays a simple triangle, followed with the HLSL code for vertex and pixel shader. I use the C api of Direct3D. I support Win 7 and Win 10.
Source code:
/* Windows 10 */
#define _WIN32_WINNT 0x0A00
#if defined _WIN32_WINNT && _WIN32_WINNT >= 0x0A00
# define HAVE_WIN10
#endif
#include <stdio.h>
#ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>
/* C API for d3d11 */
#define COBJMACROS
#include <guiddef.h>
#ifdef HAVE_WIN10
# include <dxgi1_3.h>
#else
# include <dxgi.h>
#endif
#include <d3d11.h>
#include "d3d11_vs.h"
#include "d3d11_ps.h"
/* comment for no debug informations */
#define _DEBUG
#ifdef _DEBUG
# define FCT \
do { printf(" * %s\n", __FUNCTION__); fflush(stdout); } while (0)
#else
# define FCT \
do { } while (0)
#endif
#define XF(w,x) ((float)(2 * (x) - (w)) / (float)(w))
#define YF(h,y) ((float)((h) - 2 * (y)) / (float)(h))
typedef struct Window Window;
typedef struct D3d D3d;
struct Window
{
HINSTANCE instance;
RECT rect;
HWND win;
D3d *d3d;
};
struct D3d
{
#ifdef HAVE_WIN10
IDXGIFactory2 *dxgi_factory;
IDXGISwapChain1 *dxgi_swapchain;
#else
IDXGIFactory *dxgi_factory;
IDXGISwapChain *dxgi_swapchain;
#endif
ID3D11Device *d3d_device;
ID3D11DeviceContext *d3d_device_ctx;
ID3D11RenderTargetView *d3d_render_target_view;
ID3D11InputLayout *d3d_input_layout;
ID3D11VertexShader *d3d_vertex_shader;
ID3D11PixelShader *d3d_pixel_shader;
D3D11_VIEWPORT viewport;
Window *win;
unsigned int vsync : 1;
};
typedef struct
{
FLOAT x;
FLOAT y;
BYTE r;
BYTE g;
BYTE b;
BYTE a;
} Vertex;
void d3d_resize(D3d *d3d, UINT width, UINT height);
void d3d_render(D3d *d3d);
/************************* Window *************************/
LRESULT CALLBACK
_window_procedure(HWND window,
UINT message,
WPARAM window_param,
LPARAM data_param)
{
switch (message)
{
case WM_CLOSE:
PostQuitMessage(0);
return 0;
case WM_KEYUP:
if (window_param == 'Q')
{
PostQuitMessage(0);
}
return 0;
case WM_ERASEBKGND:
/* no need to erase back */
return 1;
/* GDI notifications */
case WM_CREATE:
#ifdef _DEBUG
printf(" * WM_CREATE\n");
fflush(stdout);
#endif
return 0;
case WM_SIZE:
{
Window *win;
#ifdef _DEBUG
printf(" * WM_SIZE\n");
fflush(stdout);
#endif
win = (Window *)GetWindowLongPtr(window, GWLP_USERDATA);
d3d_resize(win->d3d,
(UINT)LOWORD(data_param), (UINT)HIWORD(data_param));
return 0;
}
case WM_PAINT:
{
#ifdef _DEBUG
printf(" * WM_PAINT\n");
fflush(stdout);
#endif
if (GetUpdateRect(window, NULL, FALSE))
{
PAINTSTRUCT ps;
Window *win;
BeginPaint(window, &ps);
win = (Window *)GetWindowLongPtr(window, GWLP_USERDATA);
d3d_render(win->d3d);
EndPaint(window, &ps);
}
return 0;
}
default:
return DefWindowProc(window, message, window_param, data_param);
}
}
Window *window_new(int x, int y, int w, int h)
{
WNDCLASS wc;
RECT r;
Window *win;
win = (Window *)calloc(1, sizeof(Window));
if (!win)
return NULL;
win->instance = GetModuleHandle(NULL);
if (!win->instance)
goto free_win;
memset(&wc, 0, sizeof(WNDCLASS));
wc.style = CS_HREDRAW | CS_VREDRAW;
wc.lpfnWndProc = _window_procedure;
wc.cbClsExtra = 0;
wc.cbWndExtra = 0;
wc.hInstance = win->instance;
wc.hIcon = LoadIcon(NULL, IDI_APPLICATION);
wc.hCursor = LoadCursor(NULL, IDC_ARROW);
wc.hbrBackground = NULL;
wc.lpszMenuName = NULL;
wc.lpszClassName = "D3D";
if (!RegisterClass(&wc))
goto free_library;
r.left = 0;
r.top = 0;
r.right = w;
r.bottom = h;
if (!AdjustWindowRectEx(&r,
WS_OVERLAPPEDWINDOW | WS_SIZEBOX,
FALSE,
0U))
goto unregister_class;
win->win = CreateWindowEx(0U,
"D3D", "Test",
WS_OVERLAPPEDWINDOW | WS_SIZEBOX,
x, y,
r.right - r.left,
r.bottom - r.top,
NULL,
NULL, win->instance, NULL);
if (!win->win)
goto unregister_class;
return win;
unregister_class:
UnregisterClass("D2D", win->instance);
free_library:
FreeLibrary(win->instance);
free_win:
free(win);
return NULL;
}
void window_del(Window *win)
{
if (!win)
return;
DestroyWindow(win->win);
UnregisterClass("D2D", win->instance);
FreeLibrary(win->instance);
free(win);
}
void window_show(Window *win)
{
ShowWindow(win->win, SW_SHOWNORMAL);
}
/************************** D3D11 **************************/
static void d3d_refresh_rate_get(D3d *d3d, UINT *num, UINT *den)
{
DXGI_MODE_DESC *display_mode_list = NULL; /* 28 bytes */
IDXGIAdapter *dxgi_adapter;
IDXGIOutput *dxgi_output;
UINT nbr_modes;
UINT i;
HRESULT res;
*num = 0U;
*den = 1U;
if (!d3d->vsync)
return;
/* adapter of primary desktop : pass 0U */
res = IDXGIFactory_EnumAdapters(d3d->dxgi_factory, 0U, &dxgi_adapter);
if (FAILED(res))
return;
/* output of primary desktop : pass 0U */
res = IDXGIAdapter_EnumOutputs(dxgi_adapter, 0U, &dxgi_output);
if (FAILED(res))
goto release_dxgi_adapter;
/* number of mode that fit the format */
res = IDXGIOutput_GetDisplayModeList(dxgi_output,
DXGI_FORMAT_B8G8R8A8_UNORM,
DXGI_ENUM_MODES_INTERLACED,
&nbr_modes, NULL);
if (FAILED(res))
goto release_dxgi_output;
printf("display mode list : %d\n", nbr_modes);
fflush(stdout);
display_mode_list = (DXGI_MODE_DESC *)malloc(nbr_modes * sizeof(DXGI_MODE_DESC));
if (!display_mode_list)
goto release_dxgi_output;
/* fill the mode list */
res = IDXGIOutput_GetDisplayModeList(dxgi_output,
DXGI_FORMAT_B8G8R8A8_UNORM,
DXGI_ENUM_MODES_INTERLACED,
&nbr_modes, display_mode_list);
if (FAILED(res))
goto free_mode_list;
for (i = 0; i < nbr_modes; i++)
{
if ((display_mode_list[i].Width == (UINT)GetSystemMetrics(SM_CXSCREEN)) &&
(display_mode_list[i].Height == (UINT)GetSystemMetrics(SM_CYSCREEN)))
{
*num = display_mode_list[i].RefreshRate.Numerator;
*den = display_mode_list[i].RefreshRate.Denominator;
break;
}
}
#ifdef _DEBUG
{
DXGI_ADAPTER_DESC adapter_desc;
IDXGIAdapter_GetDesc(dxgi_adapter, &adapter_desc);
printf(" * video mem: %llu B, %llu MB\n",
adapter_desc.DedicatedVideoMemory,
adapter_desc.DedicatedVideoMemory / 1024 / 1024);
fflush(stdout);
wprintf(L" * description: %ls\n", adapter_desc.Description);
fflush(stdout);
}
#endif
free_mode_list:
free(display_mode_list);
release_dxgi_output:
IDXGIOutput_Release(dxgi_output);
release_dxgi_adapter:
IDXGIFactory_Release(dxgi_adapter);
}
D3d *d3d_init(Window *win, int vsync)
{
D3D11_INPUT_ELEMENT_DESC desc_ie[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "COLOR", 0, DXGI_FORMAT_R8G8B8A8_UNORM, 0, 2 * sizeof(float), D3D11_INPUT_PER_VERTEX_DATA, 0 }
};
#ifdef HAVE_WIN10
DXGI_SWAP_CHAIN_DESC1 desc;
DXGI_SWAP_CHAIN_FULLSCREEN_DESC desc_fs;
#else
DXGI_SWAP_CHAIN_DESC desc;
#endif
D3d *d3d;
RECT r;
HRESULT res;
UINT flags;
UINT num;
UINT den;
D3D_FEATURE_LEVEL feature_level[4];
d3d = (D3d *)calloc(1, sizeof(D3d));
if (!d3d)
return NULL;
d3d->vsync = vsync;
win->d3d = d3d;
d3d->win = win;
/* create the DXGI factory */
flags = 0;
#ifdef HAVE_WIN10
# ifdef _DEBUG
flags = DXGI_CREATE_FACTORY_DEBUG;
# endif
res = CreateDXGIFactory2(flags, &IID_IDXGIFactory2, (void **)&d3d->dxgi_factory);
#else
res = CreateDXGIFactory(&IID_IDXGIFactory, (void **)&d3d->dxgi_factory);
#endif
if (FAILED(res))
goto free_d3d;
/* single threaded for now */
flags = D3D11_CREATE_DEVICE_SINGLETHREADED |
D3D11_CREATE_DEVICE_BGRA_SUPPORT;
#ifdef HAVE_WIN10
# ifdef _DEBUG
flags |= D3D11_CREATE_DEVICE_DEBUG;
# endif
#endif
feature_level[0] = D3D_FEATURE_LEVEL_11_1;
feature_level[1] = D3D_FEATURE_LEVEL_11_0;
feature_level[2] = D3D_FEATURE_LEVEL_10_1;
feature_level[3] = D3D_FEATURE_LEVEL_10_0;
/* create device and device context with hardware support */
res = D3D11CreateDevice(NULL,
D3D_DRIVER_TYPE_HARDWARE,
NULL,
flags,
feature_level,
3U,
D3D11_SDK_VERSION,
&d3d->d3d_device,
NULL,
&d3d->d3d_device_ctx);
if (FAILED(res))
goto release_dxgi_factory2;
if (!GetClientRect(win->win, &r))
goto release_d3d_device;
/*
* create the swap chain. It needs some settings...
* the size of the internal buffers
* the image format
* the number of back buffers (>= 2 for flip model, see SwapEffect field)
*
* Settings are different in win 7 and win10
*/
d3d_refresh_rate_get(d3d, &num, &den);
#ifdef HAVE_WIN10
desc.Width = r.right - r.left;
desc.Height = r.bottom - r.top;
desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
desc.Stereo = FALSE;
#else
desc.BufferDesc.Width = r.right - r.left;
desc.BufferDesc.Height = r.bottom - r.top;
desc.BufferDesc.RefreshRate.Numerator = num;
desc.BufferDesc.RefreshRate.Denominator = den;
desc.BufferDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;;
desc.BufferDesc.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
desc.BufferDesc.Scaling = DXGI_MODE_SCALING_UNSPECIFIED;
#endif
desc.SampleDesc.Count = 1U;
desc.SampleDesc.Quality = 0U;
desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
desc.BufferCount = 2U;
#ifdef HAVE_WIN10
desc.Scaling = DXGI_SCALING_NONE;
#else
desc.OutputWindow = win->win;
desc.Windowed = TRUE;
#endif
desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
#ifdef HAVE_WIN10
desc.AlphaMode = DXGI_ALPHA_MODE_UNSPECIFIED;
#endif
desc.Flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH;
#ifdef HAVE_WIN10
desc_fs.RefreshRate.Numerator = num;
desc_fs.RefreshRate.Denominator = den;
desc_fs.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
desc_fs.Scaling = DXGI_MODE_SCALING_UNSPECIFIED;
desc_fs.Windowed = TRUE;
#endif
#ifdef HAVE_WIN10
res = IDXGIFactory2_CreateSwapChainForHwnd(d3d->dxgi_factory,
(IUnknown *)d3d->d3d_device,
win->win,
&desc,
&desc_fs,
NULL,
&d3d->dxgi_swapchain);
#else
res = IDXGIFactory_CreateSwapChain(d3d->dxgi_factory,
(IUnknown *)d3d->d3d_device,
&desc,
&d3d->dxgi_swapchain);
#endif
if (FAILED(res))
goto release_d3d_device;
/* Vertex shader */
res = ID3D11Device_CreateVertexShader(d3d->d3d_device,
d3d_vertex_shader,
sizeof(d3d_vertex_shader),
NULL,
&d3d->d3d_vertex_shader);
if (FAILED(res))
{
printf(" * CreateVertexShader() failed\n");
goto release_dxgi_swapchain;
}
/* Pixel shader */
res = ID3D11Device_CreatePixelShader(d3d->d3d_device,
d3d_pixel_shader,
sizeof(d3d_pixel_shader),
NULL,
&d3d->d3d_pixel_shader);
if (FAILED(res))
{
printf(" * CreatePixelShader() failed\n");
goto release_vertex_shader;
}
/* create the input layout */
res = ID3D11Device_CreateInputLayout(d3d->d3d_device,
desc_ie,
sizeof(desc_ie) / sizeof(D3D11_INPUT_ELEMENT_DESC),
d3d_vertex_shader,
sizeof(d3d_vertex_shader),
&d3d->d3d_input_layout);
if (FAILED(res))
{
printf(" * CreateInputLayout() failed\n");
goto release_pixel_shader;
}
return d3d;
release_pixel_shader:
ID3D11PixelShader_Release(d3d->d3d_pixel_shader);
release_vertex_shader:
ID3D11VertexShader_Release(d3d->d3d_vertex_shader);
release_dxgi_swapchain:
#ifdef HAVE_WIN10
IDXGISwapChain1_SetFullscreenState(d3d->dxgi_swapchain, FALSE, NULL);
IDXGISwapChain1_Release(d3d->dxgi_swapchain);
#else
IDXGISwapChain_SetFullscreenState(d3d->dxgi_swapchain, FALSE, NULL);
IDXGISwapChain_Release(d3d->dxgi_swapchain);
#endif
release_d3d_device:
ID3D11DeviceContext_Release(d3d->d3d_device_ctx);
ID3D11Device_Release(d3d->d3d_device);
release_dxgi_factory2:
#ifdef HAVE_WIN10
IDXGIFactory2_Release(d3d->dxgi_factory);
#else
IDXGIFactory_Release(d3d->dxgi_factory);
#endif
free_d3d:
free(d3d);
return NULL;
}
void d3d_shutdown(D3d *d3d)
{
#ifdef _DEBUG
ID3D11Debug *d3d_debug;
HRESULT res;
#endif
if (!d3d)
return;
#ifdef _DEBUG
res = ID3D11Debug_QueryInterface(d3d->d3d_device, &IID_ID3D11Debug,
(void **)&d3d_debug);
#endif
ID3D11PixelShader_Release(d3d->d3d_pixel_shader);
ID3D11VertexShader_Release(d3d->d3d_vertex_shader);
ID3D11InputLayout_Release(d3d->d3d_input_layout);
ID3D11RenderTargetView_Release(d3d->d3d_render_target_view);
#ifdef HAVE_WIN10
IDXGISwapChain1_SetFullscreenState(d3d->dxgi_swapchain, FALSE, NULL);
IDXGISwapChain1_Release(d3d->dxgi_swapchain);
#else
IDXGISwapChain_SetFullscreenState(d3d->dxgi_swapchain, FALSE, NULL);
IDXGISwapChain_Release(d3d->dxgi_swapchain);
#endif
ID3D11DeviceContext_Release(d3d->d3d_device_ctx);
ID3D11Device_Release(d3d->d3d_device);
#ifdef HAVE_WIN10
IDXGIFactory2_Release(d3d->dxgi_factory);
#else
IDXGIFactory_Release(d3d->dxgi_factory);
#endif
free(d3d);
#ifdef _DEBUG
if (SUCCEEDED(res))
{
ID3D11Debug_ReportLiveDeviceObjects(d3d_debug, D3D11_RLDO_DETAIL);
ID3D11Debug_Release(d3d_debug);
}
#endif
}
void d3d_resize(D3d *d3d, UINT width, UINT height)
{
D3D11_RENDER_TARGET_VIEW_DESC desc_rtv;
ID3D11Texture2D *back_buffer;
HRESULT res;
FCT;
/* set viewport, depends on size of the window */
d3d->viewport.TopLeftX = 0.0f;
d3d->viewport.TopLeftY = 0.0f;
d3d->viewport.Width = (float)width;
d3d->viewport.Height = (float)height;
d3d->viewport.MinDepth = 0.0f;
d3d->viewport.MaxDepth = 1.0f;
/* release the render target view */
if (d3d->d3d_render_target_view)
ID3D11RenderTargetView_Release(d3d->d3d_render_target_view);
/* unset the render target view in the output merger */
ID3D11DeviceContext_OMSetRenderTargets(d3d->d3d_device_ctx,
0U, NULL, NULL);
/* resize the internal nuffers of the swapt chain to the new size */
#ifdef HAVE_WIN10
res = IDXGISwapChain1_ResizeBuffers(d3d->dxgi_swapchain,
0U, /* preserve buffer count */
width, height,
DXGI_FORMAT_UNKNOWN, /* preserve format */
0U);
#else
res = IDXGISwapChain_ResizeBuffers(d3d->dxgi_swapchain,
0U, /* preserve buffer count */
width, height,
DXGI_FORMAT_UNKNOWN, /* preserve format */
0U);
#endif
if ((res == DXGI_ERROR_DEVICE_REMOVED) ||
(res == DXGI_ERROR_DEVICE_RESET) ||
(res == DXGI_ERROR_DRIVER_INTERNAL_ERROR))
{
return;
}
if (FAILED(res))
{
printf("ResizeBuffers() failed\n");
fflush(stdout);
return;
}
/* get the internal buffer of the swap chain */
#ifdef HAVE_WIN10
res = IDXGISwapChain1_GetBuffer(d3d->dxgi_swapchain, 0,
&IID_ID3D11Texture2D,
(void **)&back_buffer);
#else
res = IDXGISwapChain_GetBuffer(d3d->dxgi_swapchain, 0,
&IID_ID3D11Texture2D,
(void **)&back_buffer);
#endif
if (FAILED(res))
{
printf("swapchain GetBuffer() failed\n");
fflush(stdout);
return;
}
ZeroMemory(&desc_rtv, sizeof(D3D11_RENDER_TARGET_VIEW_DESC));
desc_rtv.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
desc_rtv.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;
/* create the new render target view from this internal buffer */
res = ID3D11Device_CreateRenderTargetView(d3d->d3d_device,
(ID3D11Resource *)back_buffer,
&desc_rtv,
&d3d->d3d_render_target_view);
ID3D11Texture2D_Release(back_buffer);
}
/*** triangle ***/
typedef struct
{
Vertex vertices[3];
unsigned int indices[3];
ID3D11Buffer *vertex_buffer;
ID3D11Buffer *index_buffer; /* not useful for a single triangle */
UINT stride;
UINT offset;
UINT count;
UINT index_count;
} Triangle;
Triangle *triangle_new(D3d *d3d,
int w, int h,
int x1, int y1,
int x2, int y2,
int x3, int y3,
unsigned char r,
unsigned char g,
unsigned char b,
unsigned char a)
{
D3D11_BUFFER_DESC desc;
D3D11_SUBRESOURCE_DATA sr_data;
Triangle *t;
HRESULT res;
t = (Triangle *)malloc(sizeof(Triangle));
if (!t)
return NULL;
t->vertices[0].x = XF(w, x1);
t->vertices[0].y = YF(h, y1);
t->vertices[0].r = r;
t->vertices[0].g = g;
t->vertices[0].b = b;
t->vertices[0].a = a;
t->vertices[1].x = XF(w, x2);
t->vertices[1].y = YF(h, y2);
t->vertices[1].r = r;
t->vertices[1].g = g;
t->vertices[1].b = b;
t->vertices[1].a = a;
t->vertices[2].x = XF(w, x3);
t->vertices[2].y = YF(h, y3);
t->vertices[2].r = r;
t->vertices[2].g = g;
t->vertices[2].b = b;
t->vertices[2].a = a;
/* useful only for the rectangle later */
t->indices[0] = 0;
t->indices[1] = 1;
t->indices[2] = 2;
t->stride = sizeof(Vertex);
t->offset = 0U;
t->index_count = 3U;
desc.ByteWidth = sizeof(t->vertices);
desc.Usage = D3D11_USAGE_DYNAMIC;
desc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
desc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
desc.MiscFlags = 0U;
desc.StructureByteStride = 0U;
sr_data.pSysMem = t->vertices;
sr_data.SysMemPitch = 0U;
sr_data.SysMemSlicePitch = 0U;
res = ID3D11Device_CreateBuffer(d3d->d3d_device,
&desc,
&sr_data,
&t->vertex_buffer);
if (FAILED(res))
{
free(t);
return NULL;
}
desc.ByteWidth = sizeof(t->indices);
desc.Usage = D3D11_USAGE_DYNAMIC;
desc.BindFlags = D3D11_BIND_INDEX_BUFFER;
desc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
desc.MiscFlags = 0U;
desc.StructureByteStride = 0U;
sr_data.pSysMem = t->indices;
sr_data.SysMemPitch = 0U;
sr_data.SysMemSlicePitch = 0U;
res = ID3D11Device_CreateBuffer(d3d->d3d_device,
&desc,
&sr_data,
&t->index_buffer);
if (FAILED(res))
{
free(t);
return NULL;
}
return t;
}
void triangle_free(Triangle *t)
{
if (!t)
return;
ID3D11Buffer_Release(t->index_buffer);
ID3D11Buffer_Release(t->vertex_buffer);
free(t);
}
void d3d_render(D3d *d3d)
{
#ifdef HAVE_WIN10
DXGI_PRESENT_PARAMETERS pp;
#endif
const FLOAT color[4] = { 0.10f, 0.18f, 0.24f, 1.0f };
RECT rect;
HRESULT res;
FCT;
if (!GetClientRect(d3d->win->win, &rect))
{
return;
}
/* scene */
Triangle *t;
t = triangle_new(d3d,
rect.right - rect.left,
rect.bottom - rect.top,
320, 120,
480, 360,
160, 360,
255, 255, 0, 255); /* r, g, b, a */
/* clear render target */
ID3D11DeviceContext_ClearRenderTargetView(d3d->d3d_device_ctx,
d3d->d3d_render_target_view,
color);
/* Input Assembler (IA) */
/* TRIANGLESTRIP only useful for the rectangle later */
ID3D11DeviceContext_IASetPrimitiveTopology(d3d->d3d_device_ctx,
D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP);
ID3D11DeviceContext_IASetInputLayout(d3d->d3d_device_ctx,
d3d->d3d_input_layout);
ID3D11DeviceContext_IASetVertexBuffers(d3d->d3d_device_ctx,
0,
1,
&t->vertex_buffer,
&t->stride,
&t->offset);
ID3D11DeviceContext_IASetIndexBuffer(d3d->d3d_device_ctx,
t->index_buffer,
DXGI_FORMAT_R32_UINT,
0);
/* vertex shader */
ID3D11DeviceContext_VSSetShader(d3d->d3d_device_ctx,
d3d->d3d_vertex_shader,
NULL,
0);
/* pixel shader */
ID3D11DeviceContext_PSSetShader(d3d->d3d_device_ctx,
d3d->d3d_pixel_shader,
NULL,
0);
/* set viewport in the Rasterizer Stage */
ID3D11DeviceContext_RSSetViewports(d3d->d3d_device_ctx, 1U, &d3d->viewport);
/* Output merger */
ID3D11DeviceContext_OMSetRenderTargets(d3d->d3d_device_ctx,
1U, &d3d->d3d_render_target_view,
NULL);
/* draw */
ID3D11DeviceContext_DrawIndexed(d3d->d3d_device_ctx,
t->index_count,
0, 0);
triangle_free(t);
/*
* present frame, that is flip the back buffer and the front buffer
* if no vsync, we present immediatly
*/
#ifdef HAVE_WIN10
pp.DirtyRectsCount = 0;
pp.pDirtyRects = NULL;
pp.pScrollRect = NULL;
pp.pScrollOffset = NULL;
res = IDXGISwapChain1_Present1(d3d->dxgi_swapchain,
d3d->vsync ? 1 : 0, 0, &pp);
#else
res = IDXGISwapChain_Present(d3d->dxgi_swapchain,
d3d->vsync ? 1 : 0, 0);
#endif
if (res == DXGI_ERROR_DEVICE_RESET || res == DXGI_ERROR_DEVICE_REMOVED)
{
printf("device removed or lost, need to recreate everything\n");
fflush(stdout);
}
else if (res == DXGI_STATUS_OCCLUDED)
{
printf("window is not visible, so vsync won't work. Let's sleep a bit to reduce CPU usage\n");
fflush(stdout);
}
}
int main()
{
Window *win;
D3d *d3d;
/* remove scaling on HiDPI */
#ifdef HAVE_WIN10
SetProcessDpiAwarenessContext(DPI_AWARENESS_CONTEXT_SYSTEM_AWARE);
#endif
win = window_new(100, 100, 800, 480);
if (!win)
return 1;
d3d = d3d_init(win, 0);
if (!d3d)
goto del_window;
SetWindowLongPtr(win->win, GWLP_USERDATA, (LONG_PTR)win);
window_show(win);
/* mesage loop */
while (1)
{
MSG msg;
BOOL ret;
ret = PeekMessage(&msg, NULL, 0, 0, PM_REMOVE);
if (ret)
{
do
{
if (msg.message == WM_QUIT)
goto beach;
TranslateMessage(&msg);
DispatchMessageW(&msg);
} while (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE));
}
}
beach:
d3d_shutdown(d3d);
window_del(win);
return 0;
del_window:
window_del(win);
printf(" error\n");
fflush(stdout);
return 1;
}
Vertex shader:
struct vs_input
{
float2 position : POSITION;
float4 color : COLOR;
};
struct ps_input
{
float4 position : SV_POSITION;
float4 color : COLOR;
};
ps_input main(vs_input input )
{
ps_input output;
output.position = float4(input.position, 0.0f, 1.0f);
output.color = input.color;
return output;
}
Pixel shader:
struct ps_input
{
float4 position : SV_POSITION;
float4 color : COLOR;
};
float4 main(ps_input input) : SV_TARGET
{
return input.color;
}
thank you
If you want to use pixel coordinates for your vertex, you can use one of those 2 formats :
DXGI_FORMAT_R32G32_FLOAT (same as you use right now, pixel in floating point)
DXGI_FORMAT_R32G32_UINT (pixel coordinates as int, vertex shader position input becomes uint2 position : POSITION)
if you use float, the float conversion is done in C side, if you use UINT, the conversion is done on the vertex shader side. Speed difference would need profiling, if number of vertices is low I'd expect it to be negligible.
you can then easily remap those values into the -1 to 1 range in vertex shader (which is quite efficient), you only need to pass the inverse viewport size in a constant buffer.
so your vertex shader becomes :
struct vs_input
{
float2 position : POSITION;
//uint2 position : POSITION; If you use UINT
float4 color : COLOR;
};
struct ps_input
{
float4 position : SV_POSITION;
float4 color : COLOR;
};
cbuffer cbViewport : register(b0)
{
float2 inverseViewportSize;
}
ps_input main(vs_input input )
{
ps_input output;
float2 p = input.position; //if you use UINT, conversion is done here
p *= inverseViewportSize;
p *= 2.0f;
p -= 1.0f;
p.y *= -1.0f; (clip space is bottom to top, pixel is top to bottom)
output.position = float4(p, 0.0f, 1.0f);
output.color = input.color;
return output;
}

Taking screenshot of window (handle) in C++

I'm trying to take a screenshot of a particular window (HWND) on Windows using C++. The following code works on Notepad but not on another specific process. Instead, the code returns a completely different screenshot for the other process:
#include <Windows.h>
HBITMAP dump_client_window(const HWND window_handle)
{
RECT window_handle_rectangle;
GetClientRect(window_handle, &window_handle_rectangle);
const HDC hdc_screen = GetDC(nullptr);
const HDC hdc = CreateCompatibleDC(hdc_screen);
const auto cx = window_handle_rectangle.right - window_handle_rectangle.left;
const auto cy = window_handle_rectangle.bottom - window_handle_rectangle.top;
const HBITMAP bitmap = CreateCompatibleBitmap(hdc_screen, cx, cy);
SelectObject(hdc, bitmap);
const auto old_bitmap = SelectObject(hdc, bitmap);
PrintWindow(window_handle, hdc, PW_CLIENTONLY);
// Cleanup
SelectObject(hdc, old_bitmap);
DeleteDC(hdc);
ReleaseDC(nullptr, hdc_screen);
return bitmap;
}
What could be the reason for it? If I use DirectX11 for taking the screenshot of the window, it works correctly for both processes:
#include <dxgi.h>
#include <inspectable.h>
#include <dxgi1_2.h>
#include <d3d11.h>
#include <winrt/Windows.System.h>
#include <winrt/Windows.Graphics.Capture.h>
#include <Windows.Graphics.Capture.Interop.h>
#include <windows.graphics.directx.direct3d11.interop.h>
#include <roerrorapi.h>
#include <ShlObj_core.h>
#include <dwmapi.h>
#include <filesystem>
#include "ImageFormatConversion.hpp"
#pragma comment(lib, "Dwmapi.lib")
#pragma comment(lib, "windowsapp.lib")
void capture_window(HWND window_handle, const std::wstring& output_file_path)
{
// Init COM
init_apartment(winrt::apartment_type::multi_threaded);
// Create Direct 3D Device
winrt::com_ptr<ID3D11Device> d3d_device;
winrt::check_hresult(D3D11CreateDevice(nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr, D3D11_CREATE_DEVICE_BGRA_SUPPORT,
nullptr, 0, D3D11_SDK_VERSION, d3d_device.put(), nullptr, nullptr));
winrt::Windows::Graphics::DirectX::Direct3D11::IDirect3DDevice device;
const auto dxgiDevice = d3d_device.as<IDXGIDevice>();
{
winrt::com_ptr<IInspectable> inspectable;
winrt::check_hresult(CreateDirect3D11DeviceFromDXGIDevice(dxgiDevice.get(), inspectable.put()));
device = inspectable.as<winrt::Windows::Graphics::DirectX::Direct3D11::IDirect3DDevice>();
}
auto idxgi_device2 = dxgiDevice.as<IDXGIDevice2>();
winrt::com_ptr<IDXGIAdapter> adapter;
winrt::check_hresult(idxgi_device2->GetParent(winrt::guid_of<IDXGIAdapter>(), adapter.put_void()));
winrt::com_ptr<IDXGIFactory2> factory;
winrt::check_hresult(adapter->GetParent(winrt::guid_of<IDXGIFactory2>(), factory.put_void()));
ID3D11DeviceContext* d3d_context = nullptr;
d3d_device->GetImmediateContext(&d3d_context);
RECT rect{};
DwmGetWindowAttribute(window_handle, DWMWA_EXTENDED_FRAME_BOUNDS, &rect, sizeof(RECT));
const auto size = winrt::Windows::Graphics::SizeInt32{ rect.right - rect.left, rect.bottom - rect.top };
winrt::Windows::Graphics::Capture::Direct3D11CaptureFramePool m_frame_pool =
winrt::Windows::Graphics::Capture::Direct3D11CaptureFramePool::Create(
device,
winrt::Windows::Graphics::DirectX::DirectXPixelFormat::B8G8R8A8UIntNormalized,
2,
size);
const auto activation_factory = winrt::get_activation_factory<
winrt::Windows::Graphics::Capture::GraphicsCaptureItem>();
auto interop_factory = activation_factory.as<IGraphicsCaptureItemInterop>();
winrt::Windows::Graphics::Capture::GraphicsCaptureItem capture_item = { nullptr };
interop_factory->CreateForWindow(window_handle, winrt::guid_of<ABI::Windows::Graphics::Capture::IGraphicsCaptureItem>(),
winrt::put_abi(capture_item));
auto is_frame_arrived = false;
winrt::com_ptr<ID3D11Texture2D> texture;
const auto session = m_frame_pool.CreateCaptureSession(capture_item);
m_frame_pool.FrameArrived([&](auto& frame_pool, auto&)
{
if (is_frame_arrived)
{
return;
}
auto frame = frame_pool.TryGetNextFrame();
struct __declspec(uuid("A9B3D012-3DF2-4EE3-B8D1-8695F457D3C1"))
IDirect3DDxgiInterfaceAccess : ::IUnknown
{
virtual HRESULT __stdcall GetInterface(GUID const& id, void** object) = 0;
};
auto access = frame.Surface().as<IDirect3DDxgiInterfaceAccess>();
access->GetInterface(winrt::guid_of<ID3D11Texture2D>(), texture.put_void());
is_frame_arrived = true;
return;
});
session.StartCapture();
// Message pump
MSG message;
while (!is_frame_arrived)
{
if (PeekMessage(&message, nullptr, 0, 0, PM_REMOVE) > 0)
{
DispatchMessage(&message);
}
}
session.Close();
D3D11_TEXTURE2D_DESC captured_texture_desc;
texture->GetDesc(&captured_texture_desc);
captured_texture_desc.Usage = D3D11_USAGE_STAGING;
captured_texture_desc.BindFlags = 0;
captured_texture_desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
captured_texture_desc.MiscFlags = 0;
winrt::com_ptr<ID3D11Texture2D> user_texture = nullptr;
winrt::check_hresult(d3d_device->CreateTexture2D(&captured_texture_desc, nullptr, user_texture.put()));
d3d_context->CopyResource(user_texture.get(), texture.get());
D3D11_MAPPED_SUBRESOURCE resource;
winrt::check_hresult(d3d_context->Map(user_texture.get(), NULL, D3D11_MAP_READ, 0, &resource));
BITMAPINFO l_bmp_info;
// BMP 32 bpp
ZeroMemory(&l_bmp_info, sizeof(BITMAPINFO));
l_bmp_info.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
l_bmp_info.bmiHeader.biBitCount = 32;
l_bmp_info.bmiHeader.biCompression = BI_RGB;
l_bmp_info.bmiHeader.biWidth = captured_texture_desc.Width;
l_bmp_info.bmiHeader.biHeight = captured_texture_desc.Height;
l_bmp_info.bmiHeader.biPlanes = 1;
l_bmp_info.bmiHeader.biSizeImage = captured_texture_desc.Width * captured_texture_desc.Height * 4;
std::unique_ptr<BYTE> p_buf(new BYTE[l_bmp_info.bmiHeader.biSizeImage]);
UINT l_bmp_row_pitch = captured_texture_desc.Width * 4;
auto sptr = static_cast<BYTE*>(resource.pData);
auto dptr = p_buf.get() + l_bmp_info.bmiHeader.biSizeImage - l_bmp_row_pitch;
UINT l_row_pitch = std::min<UINT>(l_bmp_row_pitch, resource.RowPitch);
for (size_t h = 0; h < captured_texture_desc.Height; ++h)
{
memcpy_s(dptr, l_bmp_row_pitch, sptr, l_row_pitch);
sptr += resource.RowPitch;
dptr -= l_bmp_row_pitch;
}
// Save bitmap buffer into the file
WCHAR l_my_doc_path[MAX_PATH];
winrt::check_hresult(SHGetFolderPathW(nullptr, CSIDL_PERSONAL, nullptr, SHGFP_TYPE_CURRENT, l_my_doc_path));
FILE* lfile = nullptr;
if (auto lerr = _wfopen_s(&lfile, output_file_path.c_str(), L"wb"); lerr != 0)
{
return;
}
if (lfile != nullptr)
{
BITMAPFILEHEADER bmp_file_header;
bmp_file_header.bfReserved1 = 0;
bmp_file_header.bfReserved2 = 0;
bmp_file_header.bfSize = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER) + l_bmp_info.bmiHeader.biSizeImage;
bmp_file_header.bfType = 'MB';
bmp_file_header.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER);
fwrite(&bmp_file_header, sizeof(BITMAPFILEHEADER), 1, lfile);
fwrite(&l_bmp_info.bmiHeader, sizeof(BITMAPINFOHEADER), 1, lfile);
fwrite(p_buf.get(), l_bmp_info.bmiHeader.biSizeImage, 1, lfile);
fclose(lfile);
convert_image_encoding(output_file_path, L"png");
}
}
Why is the DirectX11 code so complex/long and slow (about 800ms - 1s per call including cold start initialization)? Also, the latter version causes blinking borders around the captured window which I might want to get rid of. I also seem to have to take the more inefficient route of storing the BMP image to the disk and then loading it back in order to convert it to PNG and then storing it again to produce the final result on the disk which I like to have.
Any suggestions or help with any of these things are welcome, especially why the first screenshot capture code can yield unexpected images depending on the window being captured. Other than that, I like the first version for its speed, brevity and simplicity.

Error with the output of Camera Calibration OPENCV 3.2 with C++ Visual Studio 2015. Cant save output value

i am getting an error when using opencv camera calibration. When i debug my code, it just open the webcam not show the camera calibration value output in Folder Location.
i am using opencv 3.2 with VS 15
My code:
#include "opencv2/core.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/aruco.hpp"
#include "opencv2/calib3d.hpp"
#include <sstream>
#include <iostream>
#include <fstream>
using namespace std;
using namespace cv;
const float calibrationSquareDimension = 0.01905f; //meters
const float arucoSquareDimension = 0.1016f; //meters
const Size chessboardDimensions = Size(9,6);
void createArucoMarkers()
{
Mat outputMarker;
Ptr<aruco::Dictionary> markerDictionary = aruco::getPredefinedDictionary(aruco::PREDEFINED_DICTIONARY_NAME::DICT_4X4_50);
for (int i = 0; i < 50; i++)
{
aruco::drawMarker(markerDictionary, i, 500, outputMarker, 1);
ostringstream convert;
string imageName = "4x4Marker_";
convert << imageName << i << ".jpg";
imwrite(convert.str(), outputMarker);
}
}
void createKnownBoardPosition(Size boardsize, float squareEdgeLength, vector<Point3f>& corners)
{
for (int i = 0; i < boardsize.height; i++)
{
for (int j = 0; j < boardsize.width; j++)
{
corners.push_back(Point3f(j * squareEdgeLength, i * squareEdgeLength, 0.0f));
}
}
}
void getchessboardcorners(vector<Mat> images, vector<vector<Point2f>>& allfoundcorners, bool showresults = false)
{
for (vector<Mat>::iterator iter = images.begin(); iter != images.end(); iter++)
{
vector<Point2f> pointBuf;
bool found = findChessboardCorners(*iter, Size(9, 6), pointBuf, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE);
if (found)
{
allfoundcorners.push_back(pointBuf);
}
if (showresults)
{
drawChessboardCorners(*iter, Size(9, 6), pointBuf, found);
imshow("Looking for Corners", *iter);
waitKey(0);
}
}
}
void cameraCalibration(vector<Mat> calibrationImages, Size boardSize, float squareEdgeLength, Mat& cameraMatrix, Mat& distanceCoefficients)
{
vector<vector<Point2f>> checkerboardImageSpacePoints;
getchessboardcorners(calibrationImages, checkerboardImageSpacePoints, false);
vector<vector<Point3f>> worldSpaceCornerPoints(1);
createKnownBoardPosition(boardSize, squareEdgeLength, worldSpaceCornerPoints[0]);
worldSpaceCornerPoints.resize(checkerboardImageSpacePoints.size(), worldSpaceCornerPoints[0]);
vector<Mat> rVectors, tVectors;
distanceCoefficients = Mat::zeros(8, 1, CV_64F);
calibrateCamera(worldSpaceCornerPoints, checkerboardImageSpacePoints, boardSize, cameraMatrix, distanceCoefficients, rVectors, tVectors );
}
bool saveCameraCalibration(string name, Mat cameraMatrix, Mat distanceCoefficients)
{
ofstream outStream(name);
if (outStream)
{
uint16_t rows = cameraMatrix.rows;
uint16_t columns = cameraMatrix.cols;
for (int r = 0; r < rows; r++)
{
for (int c = 0; c < columns; c++)
{
double value = cameraMatrix.at<double>(r, c);
outStream << value << endl;
}
}
rows = distanceCoefficients.rows;
columns = distanceCoefficients.cols;
for (int r = 0; r < rows; r++)
{
for (int c = 0; c < columns; c++)
{
double value = cameraMatrix.at<double>(r, c);
outStream << value << endl;
}
}
outStream.close();
return true;
}
return false;
}
int main(int argv, char** argc)
{
Mat frame;
Mat drawToFrame;
Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
Mat distanceCoefficients;
vector<Mat> savedImages;
vector<vector<Point2f>> markerCorners, rejectedCandidates;
VideoCapture vid(0);
if (!vid.isOpened())
{
return 0;
}
int framesPersecond = 20;
namedWindow("Webcam", CV_WINDOW_AUTOSIZE);
while (true)
{
if (!vid.read(frame))
break;
vector<Vec2f> foundPoints;
bool found = false;
found = findChessboardCorners(frame, chessboardDimensions, foundPoints, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE );
frame.copyTo(drawToFrame);
drawChessboardCorners(drawToFrame, chessboardDimensions, foundPoints, found);
if (found)
imshow("Webcam", drawToFrame);
else
imshow("Webcam", frame);
char character = waitKey(1000 / framesPersecond);
switch (character)
{
case ' ':
//saving image
if(found)
{
Mat temp;
frame.copyTo(temp);
savedImages.push_back(temp);
}
break;
case 13:
//start calibration
if (savedImages.size() > 15)
{
cameraCalibration(savedImages, chessboardDimensions, calibrationSquareDimension, cameraMatrix, distanceCoefficients);
saveCameraCalibration("CameraCalibration", cameraMatrix, distanceCoefficients);
}
break;
case 27:
//exit
return 0;
break;
}
}
return 0;
}

How to get picture buffer data in ffmpeg?

I'm trying to pass bitmap from ffmpeg to android.
It already works but it's displaying picture right on surface passed from java to native code.
How can i get frame buffer bitmap data to pass it to java?
I've tried to save out_frame buffer data:
unsigned char bmpFileHeader[14] = {'B', 'M', 0,0,0,0, 0,0, 0,0, 54, 0,0,0};
unsigned char bmpInfoHeader[40] = {40,0,0,0, 0,0,0,0, 0,0,0,0, 1,0, 24,0};
unsigned char bmpPad[3] = {0, 0, 0};
void saveBuffer(int fileIndex, int width, int height, unsigned char *buffer, int buffer_size) {
unsigned char filename[1024];
sprintf(filename, "/storage/sdcard0/3d_player_%d.bmp", fileIndex);
LOGI(10, "saving ffmpeg bitmap file: %d to %s", fileIndex, filename);
FILE *bitmapFile = fopen(filename, "wb");
if (!bitmapFile) {
LOGE(10, "failed to create ffmpeg bitmap file");
return;
}
unsigned char filesize = 54 + 3 * width * height; // 3 = (r,g,b)
bmpFileHeader[2] = (unsigned char)(filesize);
bmpFileHeader[3] = (unsigned char)(filesize >> 8);
bmpFileHeader[4] = (unsigned char)(filesize >> 16);
bmpFileHeader[5] = (unsigned char)(filesize >> 24);
bmpInfoHeader[4] = (unsigned char)(width);
bmpInfoHeader[5] = (unsigned char)(width >> 8);
bmpInfoHeader[6] = (unsigned char)(width >> 16);
bmpInfoHeader[7] = (unsigned char)(width >> 24);
bmpInfoHeader[8] = (unsigned char)(height);
bmpInfoHeader[9] = (unsigned char)(height >> 8);
bmpInfoHeader[10] = (unsigned char)(height >> 16);
bmpInfoHeader[11] = (unsigned char)(height >> 24);
fwrite(bmpFileHeader, 1, 14, bitmapFile);
fwrite(bmpInfoHeader, 1, 40, bitmapFile);
int i;
for (i=0; i<height; i++) {
fwrite(buffer + width * (height - 1) * 3, 3, width, bitmapFile);
fwrite(bmpPad, 1, (4-(width * 3) % 4) % 4, bitmapFile);
}
fflush(bitmapFile);
fclose(bitmapFile);
}
int player_decode_video(struct DecoderData * decoder_data, JNIEnv * env,
struct PacketData *packet_data) {
int got_frame_ptr;
struct Player *player = decoder_data->player;
int stream_no = decoder_data->stream_no;
AVCodecContext * ctx = player->input_codec_ctxs[stream_no];
AVFrame * frame = player->input_frames[stream_no];
AVStream * stream = player->input_streams[stream_no];
int interrupt_ret;
int to_write;
int err = 0;
AVFrame *rgb_frame = player->rgb_frame;
ANativeWindow_Buffer buffer;
ANativeWindow * window;
#ifdef MEASURE_TIME
struct timespec timespec1, timespec2, diff;
#endif // MEASURE_TIME
LOGI(10, "player_decode_video decoding");
int frameFinished;
#ifdef MEASURE_TIME
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timespec1);
#endif // MEASURE_TIME
int ret = avcodec_decode_video2(ctx, frame, &frameFinished,
packet_data->packet);
#ifdef MEASURE_TIME
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timespec2);
diff = timespec_diff(timespec1, timespec2);
LOGI(3, "decode_video timediff: %d.%9ld", diff.tv_sec, diff.tv_nsec);
#endif // MEASURE_TIME
if (ret < 0) {
LOGE(1, "player_decode_video Fail decoding video %d\n", ret);
return -ERROR_WHILE_DECODING_VIDEO;
}
if (!frameFinished) {
LOGI(10, "player_decode_video Video frame not finished\n");
return 0;
}
// saving in buffer converted video frame
LOGI(7, "player_decode_video copy wait");
#ifdef MEASURE_TIME
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timespec1);
#endif // MEASURE_TIME
pthread_mutex_lock(&player->mutex_queue);
window = player->window;
if (window == NULL) {
pthread_mutex_unlock(&player->mutex_queue);
goto skip_frame;
}
ANativeWindow_setBuffersGeometry(window, ctx->width, ctx->height,
WINDOW_FORMAT_RGBA_8888);
if (ANativeWindow_lock(window, &buffer, NULL) != 0) {
pthread_mutex_unlock(&player->mutex_queue);
goto skip_frame;
}
pthread_mutex_unlock(&player->mutex_queue);
int format = buffer.format;
if (format < 0) {
LOGE(1, "Could not get window format")
}
enum PixelFormat out_format;
if (format == WINDOW_FORMAT_RGBA_8888) {
out_format = PIX_FMT_RGBA;
LOGI(6, "Format: WINDOW_FORMAT_RGBA_8888");
} else if (format == WINDOW_FORMAT_RGBX_8888) {
out_format = PIX_FMT_RGB0;
LOGE(1, "Format: WINDOW_FORMAT_RGBX_8888 (not supported)");
} else if (format == WINDOW_FORMAT_RGB_565) {
out_format = PIX_FMT_RGB565;
LOGE(1, "Format: WINDOW_FORMAT_RGB_565 (not supported)");
} else {
LOGE(1, "Unknown window format");
}
avpicture_fill((AVPicture *) rgb_frame, buffer.bits, out_format,
buffer.width, buffer.height);
rgb_frame->data[0] = buffer.bits;
if (format == WINDOW_FORMAT_RGBA_8888) {
rgb_frame->linesize[0] = buffer.stride * 4;
} else {
LOGE(1, "Unknown window format");
}
LOGI(6,
"Buffer: width: %d, height: %d, stride: %d",
buffer.width, buffer.height, buffer.stride);
int i = 0;
#ifdef MEASURE_TIME
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timespec2);
diff = timespec_diff(timespec1, timespec2);
LOGI(1,
"lockPixels and fillimage timediff: %d.%9ld", diff.tv_sec, diff.tv_nsec);
#endif // MEASURE_TIME
#ifdef MEASURE_TIME
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timespec1);
#endif // MEASURE_TIME
LOGI(7, "player_decode_video copying...");
AVFrame * out_frame;
int rescale;
if (ctx->width == buffer.width && ctx->height == buffer.height) {
// This always should be true
out_frame = rgb_frame;
rescale = FALSE;
} else {
out_frame = player->tmp_frame2;
rescale = TRUE;
}
if (ctx->pix_fmt == PIX_FMT_YUV420P) {
__I420ToARGB(frame->data[0], frame->linesize[0], frame->data[2],
frame->linesize[2], frame->data[1], frame->linesize[1],
out_frame->data[0], out_frame->linesize[0], ctx->width,
ctx->height);
} else if (ctx->pix_fmt == PIX_FMT_NV12) {
__NV21ToARGB(frame->data[0], frame->linesize[0], frame->data[1],
frame->linesize[1], out_frame->data[0], out_frame->linesize[0],
ctx->width, ctx->height);
} else {
LOGI(3, "Using slow conversion: %d ", ctx->pix_fmt);
struct SwsContext *sws_context = player->sws_context;
sws_context = sws_getCachedContext(sws_context, ctx->width, ctx->height,
ctx->pix_fmt, ctx->width, ctx->height, out_format,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
player->sws_context = sws_context;
if (sws_context == NULL) {
LOGE(1, "could not initialize conversion context from: %d"
", to :%d\n", ctx->pix_fmt, out_format);
// TODO some error
}
sws_scale(sws_context, (const uint8_t * const *) frame->data,
frame->linesize, 0, ctx->height, out_frame->data,
out_frame->linesize);
}
if (rescale) {
// Never occurs
__ARGBScale(out_frame->data[0], out_frame->linesize[0], ctx->width,
ctx->height, rgb_frame->data[0], rgb_frame->linesize[0],
buffer.width, buffer.height, __kFilterNone);
out_frame = rgb_frame;
}
// TODO: (4ntoine) frame decoded and rescaled, ready to call callback with frame picture from buffer
int bufferSize = buffer.width * buffer.height * 3; // 3 = (r,g,b);
static int bitmapCounter = 0;
if (bitmapCounter < 10) {
saveBuffer(bitmapCounter++, buffer.width, buffer.height, (unsigned char *)out_frame->data, bufferSize);
}
but out_frame is empty and file has header and 0x00 bytes body.
How to get picture buffer data in ffmpeg?
Solved, in short: you should take buffer from ANativeWindow_Buffer - buffer.bits. Pay attention buffer is (rgba) but BMP is usually (rgb) - 3 bytes. To save it as BMP one need to add BMP header and save lines with padding.

Resources