File Name Path Bug - winapi

I'm starting to program and I'm using Visual Studio. I have this simple program that some days ago worked, but after working with another project, it now returns me an error:
// Include file header
#include <Windows.h>
#include <iostream>
#include <sstream>
#include <string>
#include <string.h>
#include <gif_lib.h>
void SaveDialogCreation(HWND hWnd)
{
OPENFILENAMEA ofn;
char szFileName[MAX_PATH] = "";
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hWnd;
ofn.lpstrFilter = "Text Files (*.txt)\0*.txt\0All Files (*.*)\0*.*\0";
ofn.lpstrFile = szFileName;
ofn.nMaxFile = MAX_PATH;
ofn.lpstrDefExt = "txt";
ofn.Flags = OFN_EXPLORER | OFN_PATHMUSTEXIST | OFN_HIDEREADONLY | OFN_OVERWRITEPROMPT;
// open save dialog
if (GetSaveFileNameA(ofn))
{
gif_write(szFileName);
}
}
//gif_write code function
bool gif_write(const char *fileName)
{
// it using giflib_win library
int error;
// Open gif file
GifFileType* gifFile = EGifOpenFileName(fileName, false, &error);
// error while open gif file
if (!gifFile) {
std::cout << "EGifOpenFileName() failed - " << error << std::endl;
return false;
}
GifColorType colors[64];
GifColorType* c = colors;
int level[4] = { 0, 85, 170, 255 };
for (int r = 0; r < 4; ++r) {
for (int g = 0; g < 4; ++g) {
for (int b = 0; b < 4; ++b, ++c) {
c->Red = level[r];
c->Green = level[g];
c->Blue = level[b];
}
}
}
// Write pixel to gif
GifByteType pix[16] = {
0, 1, 2, 3, // B
0, 4, 8, 12, // G
0, 16, 32, 48, // R
0, 21, 42, 63, // BK
};
gifFile->SWidth = 4;
gifFile->SHeight = 4;
gifFile->SColorResolution = 8;
gifFile->SBackGroundColor = 0;
gifFile->SColorMap = GifMakeMapObject(64, colors);
SavedImage gifImage;
gifImage.ImageDesc.Left = 0;
gifImage.ImageDesc.Top = 0;
gifImage.ImageDesc.Width = 4;
gifImage.ImageDesc.Height = 4;
gifImage.ImageDesc.Interlace = false;
gifImage.ImageDesc.ColorMap = nullptr;
gifImage.RasterBits = (GifByteType*)malloc(16);
gifImage.ExtensionBlockCount = 0;
gifImage.ExtensionBlocks = nullptr;
memcpy(gifImage.RasterBits, pix, 16);
GifMakeSavedImage(gifFile, &gifImage);
if (EGifSpew(gifFile) == GIF_ERROR) {
std::cout << "EGifSpew() failed - " << gifFile->Error << std::endl;
EGifCloseFile(gifFile, &error);
return false;
}
// Close gif file
EGifCloseFile(gifFile, &error);
return true;
}
I can compile it, but when I use the save dialog in gif_write(), it saves the gif file but has the first character in the filename.
How can I solve this?

It can compile but crashed.
It's definite that there is a semantic problem with the code. A possible one is OFN_EXPLORER which Indicates that
any customizations made to the Open or Save As dialog box. By default,
the Open and Save As dialog boxes use the Explorer-style user
interface regardless of whether this flag is set. This flag is
necessary only if you provide a hook procedure or custom template,
or set the OFN_ALLOWMULTISELECT flag.
You could need the example.

Related

Taking screenshot of window (handle) in C++

I'm trying to take a screenshot of a particular window (HWND) on Windows using C++. The following code works on Notepad but not on another specific process. Instead, the code returns a completely different screenshot for the other process:
#include <Windows.h>
HBITMAP dump_client_window(const HWND window_handle)
{
RECT window_handle_rectangle;
GetClientRect(window_handle, &window_handle_rectangle);
const HDC hdc_screen = GetDC(nullptr);
const HDC hdc = CreateCompatibleDC(hdc_screen);
const auto cx = window_handle_rectangle.right - window_handle_rectangle.left;
const auto cy = window_handle_rectangle.bottom - window_handle_rectangle.top;
const HBITMAP bitmap = CreateCompatibleBitmap(hdc_screen, cx, cy);
SelectObject(hdc, bitmap);
const auto old_bitmap = SelectObject(hdc, bitmap);
PrintWindow(window_handle, hdc, PW_CLIENTONLY);
// Cleanup
SelectObject(hdc, old_bitmap);
DeleteDC(hdc);
ReleaseDC(nullptr, hdc_screen);
return bitmap;
}
What could be the reason for it? If I use DirectX11 for taking the screenshot of the window, it works correctly for both processes:
#include <dxgi.h>
#include <inspectable.h>
#include <dxgi1_2.h>
#include <d3d11.h>
#include <winrt/Windows.System.h>
#include <winrt/Windows.Graphics.Capture.h>
#include <Windows.Graphics.Capture.Interop.h>
#include <windows.graphics.directx.direct3d11.interop.h>
#include <roerrorapi.h>
#include <ShlObj_core.h>
#include <dwmapi.h>
#include <filesystem>
#include "ImageFormatConversion.hpp"
#pragma comment(lib, "Dwmapi.lib")
#pragma comment(lib, "windowsapp.lib")
void capture_window(HWND window_handle, const std::wstring& output_file_path)
{
// Init COM
init_apartment(winrt::apartment_type::multi_threaded);
// Create Direct 3D Device
winrt::com_ptr<ID3D11Device> d3d_device;
winrt::check_hresult(D3D11CreateDevice(nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr, D3D11_CREATE_DEVICE_BGRA_SUPPORT,
nullptr, 0, D3D11_SDK_VERSION, d3d_device.put(), nullptr, nullptr));
winrt::Windows::Graphics::DirectX::Direct3D11::IDirect3DDevice device;
const auto dxgiDevice = d3d_device.as<IDXGIDevice>();
{
winrt::com_ptr<IInspectable> inspectable;
winrt::check_hresult(CreateDirect3D11DeviceFromDXGIDevice(dxgiDevice.get(), inspectable.put()));
device = inspectable.as<winrt::Windows::Graphics::DirectX::Direct3D11::IDirect3DDevice>();
}
auto idxgi_device2 = dxgiDevice.as<IDXGIDevice2>();
winrt::com_ptr<IDXGIAdapter> adapter;
winrt::check_hresult(idxgi_device2->GetParent(winrt::guid_of<IDXGIAdapter>(), adapter.put_void()));
winrt::com_ptr<IDXGIFactory2> factory;
winrt::check_hresult(adapter->GetParent(winrt::guid_of<IDXGIFactory2>(), factory.put_void()));
ID3D11DeviceContext* d3d_context = nullptr;
d3d_device->GetImmediateContext(&d3d_context);
RECT rect{};
DwmGetWindowAttribute(window_handle, DWMWA_EXTENDED_FRAME_BOUNDS, &rect, sizeof(RECT));
const auto size = winrt::Windows::Graphics::SizeInt32{ rect.right - rect.left, rect.bottom - rect.top };
winrt::Windows::Graphics::Capture::Direct3D11CaptureFramePool m_frame_pool =
winrt::Windows::Graphics::Capture::Direct3D11CaptureFramePool::Create(
device,
winrt::Windows::Graphics::DirectX::DirectXPixelFormat::B8G8R8A8UIntNormalized,
2,
size);
const auto activation_factory = winrt::get_activation_factory<
winrt::Windows::Graphics::Capture::GraphicsCaptureItem>();
auto interop_factory = activation_factory.as<IGraphicsCaptureItemInterop>();
winrt::Windows::Graphics::Capture::GraphicsCaptureItem capture_item = { nullptr };
interop_factory->CreateForWindow(window_handle, winrt::guid_of<ABI::Windows::Graphics::Capture::IGraphicsCaptureItem>(),
winrt::put_abi(capture_item));
auto is_frame_arrived = false;
winrt::com_ptr<ID3D11Texture2D> texture;
const auto session = m_frame_pool.CreateCaptureSession(capture_item);
m_frame_pool.FrameArrived([&](auto& frame_pool, auto&)
{
if (is_frame_arrived)
{
return;
}
auto frame = frame_pool.TryGetNextFrame();
struct __declspec(uuid("A9B3D012-3DF2-4EE3-B8D1-8695F457D3C1"))
IDirect3DDxgiInterfaceAccess : ::IUnknown
{
virtual HRESULT __stdcall GetInterface(GUID const& id, void** object) = 0;
};
auto access = frame.Surface().as<IDirect3DDxgiInterfaceAccess>();
access->GetInterface(winrt::guid_of<ID3D11Texture2D>(), texture.put_void());
is_frame_arrived = true;
return;
});
session.StartCapture();
// Message pump
MSG message;
while (!is_frame_arrived)
{
if (PeekMessage(&message, nullptr, 0, 0, PM_REMOVE) > 0)
{
DispatchMessage(&message);
}
}
session.Close();
D3D11_TEXTURE2D_DESC captured_texture_desc;
texture->GetDesc(&captured_texture_desc);
captured_texture_desc.Usage = D3D11_USAGE_STAGING;
captured_texture_desc.BindFlags = 0;
captured_texture_desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
captured_texture_desc.MiscFlags = 0;
winrt::com_ptr<ID3D11Texture2D> user_texture = nullptr;
winrt::check_hresult(d3d_device->CreateTexture2D(&captured_texture_desc, nullptr, user_texture.put()));
d3d_context->CopyResource(user_texture.get(), texture.get());
D3D11_MAPPED_SUBRESOURCE resource;
winrt::check_hresult(d3d_context->Map(user_texture.get(), NULL, D3D11_MAP_READ, 0, &resource));
BITMAPINFO l_bmp_info;
// BMP 32 bpp
ZeroMemory(&l_bmp_info, sizeof(BITMAPINFO));
l_bmp_info.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
l_bmp_info.bmiHeader.biBitCount = 32;
l_bmp_info.bmiHeader.biCompression = BI_RGB;
l_bmp_info.bmiHeader.biWidth = captured_texture_desc.Width;
l_bmp_info.bmiHeader.biHeight = captured_texture_desc.Height;
l_bmp_info.bmiHeader.biPlanes = 1;
l_bmp_info.bmiHeader.biSizeImage = captured_texture_desc.Width * captured_texture_desc.Height * 4;
std::unique_ptr<BYTE> p_buf(new BYTE[l_bmp_info.bmiHeader.biSizeImage]);
UINT l_bmp_row_pitch = captured_texture_desc.Width * 4;
auto sptr = static_cast<BYTE*>(resource.pData);
auto dptr = p_buf.get() + l_bmp_info.bmiHeader.biSizeImage - l_bmp_row_pitch;
UINT l_row_pitch = std::min<UINT>(l_bmp_row_pitch, resource.RowPitch);
for (size_t h = 0; h < captured_texture_desc.Height; ++h)
{
memcpy_s(dptr, l_bmp_row_pitch, sptr, l_row_pitch);
sptr += resource.RowPitch;
dptr -= l_bmp_row_pitch;
}
// Save bitmap buffer into the file
WCHAR l_my_doc_path[MAX_PATH];
winrt::check_hresult(SHGetFolderPathW(nullptr, CSIDL_PERSONAL, nullptr, SHGFP_TYPE_CURRENT, l_my_doc_path));
FILE* lfile = nullptr;
if (auto lerr = _wfopen_s(&lfile, output_file_path.c_str(), L"wb"); lerr != 0)
{
return;
}
if (lfile != nullptr)
{
BITMAPFILEHEADER bmp_file_header;
bmp_file_header.bfReserved1 = 0;
bmp_file_header.bfReserved2 = 0;
bmp_file_header.bfSize = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER) + l_bmp_info.bmiHeader.biSizeImage;
bmp_file_header.bfType = 'MB';
bmp_file_header.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER);
fwrite(&bmp_file_header, sizeof(BITMAPFILEHEADER), 1, lfile);
fwrite(&l_bmp_info.bmiHeader, sizeof(BITMAPINFOHEADER), 1, lfile);
fwrite(p_buf.get(), l_bmp_info.bmiHeader.biSizeImage, 1, lfile);
fclose(lfile);
convert_image_encoding(output_file_path, L"png");
}
}
Why is the DirectX11 code so complex/long and slow (about 800ms - 1s per call including cold start initialization)? Also, the latter version causes blinking borders around the captured window which I might want to get rid of. I also seem to have to take the more inefficient route of storing the BMP image to the disk and then loading it back in order to convert it to PNG and then storing it again to produce the final result on the disk which I like to have.
Any suggestions or help with any of these things are welcome, especially why the first screenshot capture code can yield unexpected images depending on the window being captured. Other than that, I like the first version for its speed, brevity and simplicity.

Programatically take screen shots and save as PNG

I've been using the following code for quite a while:
#include <fstream>
#include <gdiplus.h>
#include <windows.h>
#include <iostream>
using namespace std;
void CaptureScreen(const char* filename)
{
HDC hScreenDC = GetDC(0);
HDC hMemoryDC = CreateCompatibleDC(hScreenDC);
int upper_left_x = GetSystemMetrics(SM_XVIRTUALSCREEN);
int upper_left_y = GetSystemMetrics(SM_YVIRTUALSCREEN);
int bitmap_dx = GetSystemMetrics(SM_CXVIRTUALSCREEN ) * 1.25f;
int bitmap_dy = GetSystemMetrics(SM_CYVIRTUALSCREEN ) * 1.25f;
// create file
ofstream file(filename, ios::binary);
if(!file) return;
// save bitmap file headers
BITMAPFILEHEADER fileHeader;
BITMAPINFOHEADER infoHeader;
fileHeader.bfType = 0x4d42;
fileHeader.bfSize = 0;
fileHeader.bfReserved1 = 0;
fileHeader.bfReserved2 = 0;
fileHeader.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER);
infoHeader.biSize = sizeof(infoHeader);
infoHeader.biWidth = bitmap_dx;
infoHeader.biHeight = -bitmap_dy;
infoHeader.biPlanes = 1;
infoHeader.biBitCount = 16;
infoHeader.biCompression = BI_RGB;
infoHeader.biSizeImage = 0;
infoHeader.biXPelsPerMeter = 0;
infoHeader.biYPelsPerMeter = 0;
infoHeader.biClrUsed = 0;
infoHeader.biClrImportant = 0;
file.write((char*)&fileHeader, sizeof(fileHeader));
file.write((char*)&infoHeader, sizeof(infoHeader));
// dibsection information
BITMAPINFO info;
info.bmiHeader = infoHeader;
// ------------------
// THE IMPORTANT CODE
// ------------------
// create a dibsection and blit the window contents to the bitmap
BYTE* memory = 0;
HBITMAP bitmap = CreateDIBSection(hScreenDC, &info, DIB_RGB_COLORS, (void**)&memory, 0, 0);
SelectObject(hMemoryDC, bitmap);
BitBlt(hMemoryDC, 0, 0, bitmap_dx, bitmap_dy, hScreenDC, upper_left_x, upper_left_y, SRCCOPY);
DeleteDC(hMemoryDC);
ReleaseDC(NULL, hScreenDC);
// save dibsection data
int bytes = (((16*bitmap_dx + 31) & (~31))/8)*bitmap_dy;
file.write((const char *)memory, bytes);
DeleteObject(bitmap);
}
int main()
{
CaptureScreen("ok.jpg");
return 0;
}
But it seems to generate a too large BMP file because the bitmaps are saved uncompressed.
I'm looking for a way to capture a screen shot and save it to a buffer in PNG format, send it over a TCP connection and save it as a PNG file there.
I believe it has something to do with assigning BI_PNG to infoHeader.biCompression and a different calculation of bytes but I can't figure exactly what.
http://lodev.org/lodepng/
#include <fstream>
#include <gdiplus.h>
#include <windows.h>
#include <iostream>
#include "lodepng.h"
const int bits_per_pixel = 24;
using namespace std;
void CaptureScreen(const char* filename)
{
HDC hScreenDC = GetDC(0);
HDC hMemoryDC = CreateCompatibleDC(hScreenDC);
int upper_left_x = GetSystemMetrics(SM_XVIRTUALSCREEN);
int upper_left_y = GetSystemMetrics(SM_YVIRTUALSCREEN);
int bitmap_dx = GetSystemMetrics(SM_CXVIRTUALSCREEN ) * 1.25f;
int bitmap_dy = GetSystemMetrics(SM_CYVIRTUALSCREEN ) * 1.25f;
// create file
ofstream file(filename, ios::binary);
if(!file) return;
// save bitmap file headers
BITMAPFILEHEADER fileHeader;
BITMAPINFOHEADER infoHeader;
fileHeader.bfType = 0x4d42;
fileHeader.bfSize = 0;
fileHeader.bfReserved1 = 0;
fileHeader.bfReserved2 = 0;
fileHeader.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER);
infoHeader.biSize = sizeof(infoHeader);
infoHeader.biWidth = bitmap_dx;
infoHeader.biHeight = -bitmap_dy;
infoHeader.biPlanes = 1;
infoHeader.biBitCount = bits_per_pixel;
infoHeader.biCompression = BI_RGB;
infoHeader.biSizeImage = 0;
infoHeader.biXPelsPerMeter = 0;
infoHeader.biYPelsPerMeter = 0;
infoHeader.biClrUsed = 0;
infoHeader.biClrImportant = 0;
file.write((char*)&fileHeader, sizeof(fileHeader));
file.write((char*)&infoHeader, sizeof(infoHeader));
// dibsection information
BITMAPINFO info;
info.bmiHeader = infoHeader;
// ------------------
// THE IMPORTANT CODE
// ------------------
// create a dibsection and blit the window contents to the bitmap
BYTE* memory = 0;
HBITMAP bitmap = CreateDIBSection(hScreenDC, &info, DIB_RGB_COLORS, (void**)&memory, 0, 0);
SelectObject(hMemoryDC, bitmap);
BitBlt(hMemoryDC, 0, 0, bitmap_dx, bitmap_dy, hScreenDC, upper_left_x, upper_left_y, SRCCOPY);
DeleteDC(hMemoryDC);
ReleaseDC(NULL, hScreenDC);
// save dibsection data
int bytes = (((bits_per_pixel*bitmap_dx + 31) & (~31))/8)*bitmap_dy;
file.write((const char *)memory, bytes);
unsigned char *out_buffer;
size_t out_buffer_len;
unsigned error;
if ( bits_per_pixel == 24 )
{
// convert memory from bgr format to rgb
for ( unsigned i = 0; i< bytes-2; i+=3)
{
int tmp = memory[i+2];
memory[i+2] = memory[i];
memory[i] = tmp;
}
error = lodepng_encode24(&out_buffer,
&out_buffer_len,
memory,
bitmap_dx,
bitmap_dy);
}
if ( bits_per_pixel == 32 )
{
// convert memory from bgr format to rgb
for ( unsigned i = 0; i< bytes-3; i+=4)
{
int tmp = memory[i+2];
memory[i+2] = memory[i];
memory[i] = tmp;
}
error = lodepng_encode32(&out_buffer,
&out_buffer_len,
memory,
bitmap_dx,
bitmap_dy);
}
if ( error )
{
std::cout << "error: " << error << '\n';
return;
}
lodepng_save_file(out_buffer, out_buffer_len, "stam.png");
// free(out);
DeleteObject(bitmap);
}
int main()
{
CaptureScreen("ok.jpg");
return 0;
}

Error with the output of Camera Calibration OPENCV 3.2 with C++ Visual Studio 2015. Cant save output value

i am getting an error when using opencv camera calibration. When i debug my code, it just open the webcam not show the camera calibration value output in Folder Location.
i am using opencv 3.2 with VS 15
My code:
#include "opencv2/core.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/aruco.hpp"
#include "opencv2/calib3d.hpp"
#include <sstream>
#include <iostream>
#include <fstream>
using namespace std;
using namespace cv;
const float calibrationSquareDimension = 0.01905f; //meters
const float arucoSquareDimension = 0.1016f; //meters
const Size chessboardDimensions = Size(9,6);
void createArucoMarkers()
{
Mat outputMarker;
Ptr<aruco::Dictionary> markerDictionary = aruco::getPredefinedDictionary(aruco::PREDEFINED_DICTIONARY_NAME::DICT_4X4_50);
for (int i = 0; i < 50; i++)
{
aruco::drawMarker(markerDictionary, i, 500, outputMarker, 1);
ostringstream convert;
string imageName = "4x4Marker_";
convert << imageName << i << ".jpg";
imwrite(convert.str(), outputMarker);
}
}
void createKnownBoardPosition(Size boardsize, float squareEdgeLength, vector<Point3f>& corners)
{
for (int i = 0; i < boardsize.height; i++)
{
for (int j = 0; j < boardsize.width; j++)
{
corners.push_back(Point3f(j * squareEdgeLength, i * squareEdgeLength, 0.0f));
}
}
}
void getchessboardcorners(vector<Mat> images, vector<vector<Point2f>>& allfoundcorners, bool showresults = false)
{
for (vector<Mat>::iterator iter = images.begin(); iter != images.end(); iter++)
{
vector<Point2f> pointBuf;
bool found = findChessboardCorners(*iter, Size(9, 6), pointBuf, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE);
if (found)
{
allfoundcorners.push_back(pointBuf);
}
if (showresults)
{
drawChessboardCorners(*iter, Size(9, 6), pointBuf, found);
imshow("Looking for Corners", *iter);
waitKey(0);
}
}
}
void cameraCalibration(vector<Mat> calibrationImages, Size boardSize, float squareEdgeLength, Mat& cameraMatrix, Mat& distanceCoefficients)
{
vector<vector<Point2f>> checkerboardImageSpacePoints;
getchessboardcorners(calibrationImages, checkerboardImageSpacePoints, false);
vector<vector<Point3f>> worldSpaceCornerPoints(1);
createKnownBoardPosition(boardSize, squareEdgeLength, worldSpaceCornerPoints[0]);
worldSpaceCornerPoints.resize(checkerboardImageSpacePoints.size(), worldSpaceCornerPoints[0]);
vector<Mat> rVectors, tVectors;
distanceCoefficients = Mat::zeros(8, 1, CV_64F);
calibrateCamera(worldSpaceCornerPoints, checkerboardImageSpacePoints, boardSize, cameraMatrix, distanceCoefficients, rVectors, tVectors );
}
bool saveCameraCalibration(string name, Mat cameraMatrix, Mat distanceCoefficients)
{
ofstream outStream(name);
if (outStream)
{
uint16_t rows = cameraMatrix.rows;
uint16_t columns = cameraMatrix.cols;
for (int r = 0; r < rows; r++)
{
for (int c = 0; c < columns; c++)
{
double value = cameraMatrix.at<double>(r, c);
outStream << value << endl;
}
}
rows = distanceCoefficients.rows;
columns = distanceCoefficients.cols;
for (int r = 0; r < rows; r++)
{
for (int c = 0; c < columns; c++)
{
double value = cameraMatrix.at<double>(r, c);
outStream << value << endl;
}
}
outStream.close();
return true;
}
return false;
}
int main(int argv, char** argc)
{
Mat frame;
Mat drawToFrame;
Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
Mat distanceCoefficients;
vector<Mat> savedImages;
vector<vector<Point2f>> markerCorners, rejectedCandidates;
VideoCapture vid(0);
if (!vid.isOpened())
{
return 0;
}
int framesPersecond = 20;
namedWindow("Webcam", CV_WINDOW_AUTOSIZE);
while (true)
{
if (!vid.read(frame))
break;
vector<Vec2f> foundPoints;
bool found = false;
found = findChessboardCorners(frame, chessboardDimensions, foundPoints, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE );
frame.copyTo(drawToFrame);
drawChessboardCorners(drawToFrame, chessboardDimensions, foundPoints, found);
if (found)
imshow("Webcam", drawToFrame);
else
imshow("Webcam", frame);
char character = waitKey(1000 / framesPersecond);
switch (character)
{
case ' ':
//saving image
if(found)
{
Mat temp;
frame.copyTo(temp);
savedImages.push_back(temp);
}
break;
case 13:
//start calibration
if (savedImages.size() > 15)
{
cameraCalibration(savedImages, chessboardDimensions, calibrationSquareDimension, cameraMatrix, distanceCoefficients);
saveCameraCalibration("CameraCalibration", cameraMatrix, distanceCoefficients);
}
break;
case 27:
//exit
return 0;
break;
}
}
return 0;
}

Gtk2, Gdk2 memory usage gets bigger and bigger every frame update

I have been trying to make a program that takses the active window, and displays it in its window.
I have successfully exceeded my goal. But the problem is, it uses a lot of ram, and it keeps using more every frame update(20fps).
Here is the source code:
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include <gdk/gdkx.h>
#include <gtk/gtk.h>
int funcfinished = 1;
GtkWidget *window;
GdkPixbuf *fupdate_pixbuf;
GtkStyle *fupdate_style;
GdkPixmap *fupdate_background;
gint fupdate_xorig;
gint fupdate_yorig;
gint fupdate_width;
gint fupdate_height;
GdkPixbuf *fupdate_screenshot;
GdkWindow *fupdate_window;
gboolean frameupdate()
{
if(funcfinished == 1)
{
/*********[FuncFinish]*********/
funcfinished = 0;
fupdate_pixbuf = NULL;
fupdate_style = NULL;
fupdate_background = NULL;
fupdate_screenshot = NULL;
fupdate_window = NULL;
fupdate_xorig = 0;
fupdate_yorig = 0;
fupdate_width = 0;
fupdate_height = 0;
/*********[Func]*********/
fupdate_window = gdk_screen_get_active_window(gdk_screen_get_default());
gdk_drawable_get_size(fupdate_window, &fupdate_width, &fupdate_height);
fupdate_pixbuf = gdk_pixbuf_get_from_drawable(NULL, fupdate_window, NULL, 0, 0, 0, 0, fupdate_width, fupdate_height);
gdk_pixbuf_render_pixmap_and_mask(fupdate_pixbuf, &fupdate_background, NULL, 0);
fupdate_style = gtk_style_new();
fupdate_style->bg_pixmap[0] = fupdate_background;
gtk_widget_set_style(GTK_WIDGET(window), GTK_STYLE(fupdate_style));
/*********[FuncFinish]*********/
fupdate_pixbuf = NULL;
fupdate_style = NULL;
fupdate_background = NULL;
fupdate_screenshot = NULL;
fupdate_window = NULL;
fupdate_xorig = 0;
fupdate_yorig = 0;
fupdate_width = 0;
fupdate_height = 0;
funcfinished = 1;
}
else
{
printf("Skipped 1 frame update");
}
return TRUE;
}
int main(int argc, char *argv[])
{
gtk_init(&argc, &argv);
window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
gtk_window_set_title(GTK_WINDOW(window), "Hay Day Autobot");
gtk_window_set_default_size(GTK_WINDOW(window), 400, 300);
g_signal_connect(window, "destroy", G_CALLBACK (gtk_main_quit), NULL);
g_timeout_add(50, frameupdate, 0);
gtk_widget_show(window);
gtk_main();
return 0;
}
I also made a video of it in action, showing off the problem:
https://www.youtube.com/watch?v=GNCwNetLLBM
You are not releasing the memory that you create during the frame update function. For each function that you use there, you should look it up in the documentation and see what it says under "return value".
For example, gdk_screen_get_active_window() lists its return value as "transfer full". That means that "full" ownership of the return value is "transferred" to you when you call that function; ownership means that you are responsible for freeing the memory. Usually the documentation will also say how to do that. In this case you can read
The returned window should be unrefed using g_object_unref() when no longer needed.
On the other hand, gdk_screen_get_default() is "transfer none", so you don't need to do anything there.

How to get picture buffer data in ffmpeg?

I'm trying to pass bitmap from ffmpeg to android.
It already works but it's displaying picture right on surface passed from java to native code.
How can i get frame buffer bitmap data to pass it to java?
I've tried to save out_frame buffer data:
unsigned char bmpFileHeader[14] = {'B', 'M', 0,0,0,0, 0,0, 0,0, 54, 0,0,0};
unsigned char bmpInfoHeader[40] = {40,0,0,0, 0,0,0,0, 0,0,0,0, 1,0, 24,0};
unsigned char bmpPad[3] = {0, 0, 0};
void saveBuffer(int fileIndex, int width, int height, unsigned char *buffer, int buffer_size) {
unsigned char filename[1024];
sprintf(filename, "/storage/sdcard0/3d_player_%d.bmp", fileIndex);
LOGI(10, "saving ffmpeg bitmap file: %d to %s", fileIndex, filename);
FILE *bitmapFile = fopen(filename, "wb");
if (!bitmapFile) {
LOGE(10, "failed to create ffmpeg bitmap file");
return;
}
unsigned char filesize = 54 + 3 * width * height; // 3 = (r,g,b)
bmpFileHeader[2] = (unsigned char)(filesize);
bmpFileHeader[3] = (unsigned char)(filesize >> 8);
bmpFileHeader[4] = (unsigned char)(filesize >> 16);
bmpFileHeader[5] = (unsigned char)(filesize >> 24);
bmpInfoHeader[4] = (unsigned char)(width);
bmpInfoHeader[5] = (unsigned char)(width >> 8);
bmpInfoHeader[6] = (unsigned char)(width >> 16);
bmpInfoHeader[7] = (unsigned char)(width >> 24);
bmpInfoHeader[8] = (unsigned char)(height);
bmpInfoHeader[9] = (unsigned char)(height >> 8);
bmpInfoHeader[10] = (unsigned char)(height >> 16);
bmpInfoHeader[11] = (unsigned char)(height >> 24);
fwrite(bmpFileHeader, 1, 14, bitmapFile);
fwrite(bmpInfoHeader, 1, 40, bitmapFile);
int i;
for (i=0; i<height; i++) {
fwrite(buffer + width * (height - 1) * 3, 3, width, bitmapFile);
fwrite(bmpPad, 1, (4-(width * 3) % 4) % 4, bitmapFile);
}
fflush(bitmapFile);
fclose(bitmapFile);
}
int player_decode_video(struct DecoderData * decoder_data, JNIEnv * env,
struct PacketData *packet_data) {
int got_frame_ptr;
struct Player *player = decoder_data->player;
int stream_no = decoder_data->stream_no;
AVCodecContext * ctx = player->input_codec_ctxs[stream_no];
AVFrame * frame = player->input_frames[stream_no];
AVStream * stream = player->input_streams[stream_no];
int interrupt_ret;
int to_write;
int err = 0;
AVFrame *rgb_frame = player->rgb_frame;
ANativeWindow_Buffer buffer;
ANativeWindow * window;
#ifdef MEASURE_TIME
struct timespec timespec1, timespec2, diff;
#endif // MEASURE_TIME
LOGI(10, "player_decode_video decoding");
int frameFinished;
#ifdef MEASURE_TIME
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timespec1);
#endif // MEASURE_TIME
int ret = avcodec_decode_video2(ctx, frame, &frameFinished,
packet_data->packet);
#ifdef MEASURE_TIME
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timespec2);
diff = timespec_diff(timespec1, timespec2);
LOGI(3, "decode_video timediff: %d.%9ld", diff.tv_sec, diff.tv_nsec);
#endif // MEASURE_TIME
if (ret < 0) {
LOGE(1, "player_decode_video Fail decoding video %d\n", ret);
return -ERROR_WHILE_DECODING_VIDEO;
}
if (!frameFinished) {
LOGI(10, "player_decode_video Video frame not finished\n");
return 0;
}
// saving in buffer converted video frame
LOGI(7, "player_decode_video copy wait");
#ifdef MEASURE_TIME
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timespec1);
#endif // MEASURE_TIME
pthread_mutex_lock(&player->mutex_queue);
window = player->window;
if (window == NULL) {
pthread_mutex_unlock(&player->mutex_queue);
goto skip_frame;
}
ANativeWindow_setBuffersGeometry(window, ctx->width, ctx->height,
WINDOW_FORMAT_RGBA_8888);
if (ANativeWindow_lock(window, &buffer, NULL) != 0) {
pthread_mutex_unlock(&player->mutex_queue);
goto skip_frame;
}
pthread_mutex_unlock(&player->mutex_queue);
int format = buffer.format;
if (format < 0) {
LOGE(1, "Could not get window format")
}
enum PixelFormat out_format;
if (format == WINDOW_FORMAT_RGBA_8888) {
out_format = PIX_FMT_RGBA;
LOGI(6, "Format: WINDOW_FORMAT_RGBA_8888");
} else if (format == WINDOW_FORMAT_RGBX_8888) {
out_format = PIX_FMT_RGB0;
LOGE(1, "Format: WINDOW_FORMAT_RGBX_8888 (not supported)");
} else if (format == WINDOW_FORMAT_RGB_565) {
out_format = PIX_FMT_RGB565;
LOGE(1, "Format: WINDOW_FORMAT_RGB_565 (not supported)");
} else {
LOGE(1, "Unknown window format");
}
avpicture_fill((AVPicture *) rgb_frame, buffer.bits, out_format,
buffer.width, buffer.height);
rgb_frame->data[0] = buffer.bits;
if (format == WINDOW_FORMAT_RGBA_8888) {
rgb_frame->linesize[0] = buffer.stride * 4;
} else {
LOGE(1, "Unknown window format");
}
LOGI(6,
"Buffer: width: %d, height: %d, stride: %d",
buffer.width, buffer.height, buffer.stride);
int i = 0;
#ifdef MEASURE_TIME
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timespec2);
diff = timespec_diff(timespec1, timespec2);
LOGI(1,
"lockPixels and fillimage timediff: %d.%9ld", diff.tv_sec, diff.tv_nsec);
#endif // MEASURE_TIME
#ifdef MEASURE_TIME
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timespec1);
#endif // MEASURE_TIME
LOGI(7, "player_decode_video copying...");
AVFrame * out_frame;
int rescale;
if (ctx->width == buffer.width && ctx->height == buffer.height) {
// This always should be true
out_frame = rgb_frame;
rescale = FALSE;
} else {
out_frame = player->tmp_frame2;
rescale = TRUE;
}
if (ctx->pix_fmt == PIX_FMT_YUV420P) {
__I420ToARGB(frame->data[0], frame->linesize[0], frame->data[2],
frame->linesize[2], frame->data[1], frame->linesize[1],
out_frame->data[0], out_frame->linesize[0], ctx->width,
ctx->height);
} else if (ctx->pix_fmt == PIX_FMT_NV12) {
__NV21ToARGB(frame->data[0], frame->linesize[0], frame->data[1],
frame->linesize[1], out_frame->data[0], out_frame->linesize[0],
ctx->width, ctx->height);
} else {
LOGI(3, "Using slow conversion: %d ", ctx->pix_fmt);
struct SwsContext *sws_context = player->sws_context;
sws_context = sws_getCachedContext(sws_context, ctx->width, ctx->height,
ctx->pix_fmt, ctx->width, ctx->height, out_format,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
player->sws_context = sws_context;
if (sws_context == NULL) {
LOGE(1, "could not initialize conversion context from: %d"
", to :%d\n", ctx->pix_fmt, out_format);
// TODO some error
}
sws_scale(sws_context, (const uint8_t * const *) frame->data,
frame->linesize, 0, ctx->height, out_frame->data,
out_frame->linesize);
}
if (rescale) {
// Never occurs
__ARGBScale(out_frame->data[0], out_frame->linesize[0], ctx->width,
ctx->height, rgb_frame->data[0], rgb_frame->linesize[0],
buffer.width, buffer.height, __kFilterNone);
out_frame = rgb_frame;
}
// TODO: (4ntoine) frame decoded and rescaled, ready to call callback with frame picture from buffer
int bufferSize = buffer.width * buffer.height * 3; // 3 = (r,g,b);
static int bitmapCounter = 0;
if (bitmapCounter < 10) {
saveBuffer(bitmapCounter++, buffer.width, buffer.height, (unsigned char *)out_frame->data, bufferSize);
}
but out_frame is empty and file has header and 0x00 bytes body.
How to get picture buffer data in ffmpeg?
Solved, in short: you should take buffer from ANativeWindow_Buffer - buffer.bits. Pay attention buffer is (rgba) but BMP is usually (rgb) - 3 bytes. To save it as BMP one need to add BMP header and save lines with padding.

Resources