I'm pretty new to Windows API/GDI. All I want to do in this piece of code is to take a FULL-screenshot into my memory buffer.
int __cdecl main(void)
{
int width = GetSystemMetrics( SM_CXSCREEN );
int height = GetSystemMetrics( SM_CYSCREEN );
char *bmpBuf;
FILE* bmpFile;
HDC dcScreen = GetDC( NULL );
HDC dcCapt = CreateCompatibleDC( dcScreen );
HBITMAP hbmpCapt = CreateCompatibleBitmap( dcScreen, width, height );
BITMAP bmpCapt;
BITMAPINFOHEADER bi;
size_t bmpSize;
SelectObject( dcCapt, hbmpCapt );
BitBlt( dcCapt, 0,0, width, height, dcScreen, 0,0, SRCCOPY | CAPTUREBLT );
GetObject( hbmpCapt, sizeof(BITMAP), &bmpCapt );
bmpSize = ((bmpCapt.bmWidth*32+31)/32)*4*bmpCapt.bmHeight;
bmpBuf = (char*)malloc( bmpSize );
GetDIBits( dcCapt, hbmpCapt, 0, (UINT)bmpCapt.bmHeight, bmpBuf, (LPBITMAPINFO)&bi, DIB_RGB_COLORS );
bmpFile = fopen( "screenshot.raw", "w" );
if( !bmpFile )
printf( "Can't open file\n" );
else {
fwrite( bmpBuf, 1, bmpSize, bmpFile );
fclose( bmpFile );
}
printf( "Image( %d x %d ) written\n", bmpCapt.bmWidth, bmpCapt.bmHeight );
DeleteDC( dcCapt );
DeleteObject( hbmpCapt );
return 0;
}
I managed to save my memory buffer into a .raw file. However, when I open this .raw file in Photoshop(I tell PS its size and format), I see ALL BLACK. So there must be something wrong in my code. I just can't find the bug!
Related
I have to use GDI to print documents with printers in general.I can get the device no problem. I also communicate it desired page size like:
HANDLE hPrinter;
OpenPrinterW( printerName, &hPrinter, 0 );
DocumentPropertiesW( 0, hPrinter, printerName,
pDevMode, NULL, DM_OUT_BUFFER);
if( settings.paperLength > 0 && settings.paperWidth > 0 )
{
if( pDevMode->dmFields & DM_PAPERSIZE )
{
pDevMode->dmPaperSize = 0;
}
if( pDevMode->dmFields & DM_PAPERLENGTH )
{
pDevMode->dmPaperLength = settings.paperLength;
}
if( pDevMode->dmFields & DM_PAPERWIDTH )
{
pDevMode->dmPaperWidth = settings.paperWidth;
}
}
DocumentPropertiesW( 0, hPrinter, printerName, 0, pDevMode,
DM_IN_BUFFER | DM_OUT_BUFFER );
ClosePrinter( hPrinter );
Then I StretchBlt my document and print it.
DOCINFOW di;
memset ((void *) &di, 0, sizeof (di));
di.cbSize = sizeof (DOCINFOW);
di.lpszDocName = utf_to_widechar("Document").data();
HDC hdc = this->deviceContext;
auto cxpage = GetDeviceCaps( hdc, HORZRES );
auto cypage = GetDeviceCaps( hdc, VERTRES );
auto hdcMem = CreateCompatibleDC( hdc );
BITMAPINFO bi;
bi.bmiHeader = {sizeof(bi.bmiHeader), settings.width, settings.height,
1, 24, BI_RGB, 0, 0x0ec4, 0x0ec4, 0, 0};
LPVOID ppvBits;
auto hBitmap = CreateDIBSection( 0, &bi, DIB_RGB_COLORS, &ppvBits, 0, 0 );
auto hbmOld = SelectObject( hdcMem, hBitmap );
StartDocW( hdc, &di);
for(int i = 0; i < this->colorTables.size(); ++i)
{
int size = settings.width * settings.height * 3;
StartPage( hdc );
SetDIBits( 0, hBitmap, 0, settings.height,
(PVOID)&(colorTables[i][0]), &bi, DIB_RGB_COLORS );
SetMapMode( hdc, MM_ISOTROPIC )
SetWindowExtEx( hdc, cxpage, cypage, 0 );
SetViewportExtEx( hdc, cxpage, cypage, 0 );
SetViewportOrgEx( hdc, 0, 0, 0 );
StretchBlt( hdc, 0, 0, cxpage, cypage, hdcMem, 0, 0,
settings.width, settings.height, SRCCOPY );
EndPage( hdc );
}
EndDoc( hdc );
SelectObject( hdcMem, hbmOld );
DeleteDC( hdcMem );
Now, my document is 210mmx99mm. Most printers more or less properly handle this: the print size is proper, and either is it located left top corner, or top in the middle, if a printer supports smaller paper sizes. The issue is, I am working with HP printer now and it just doesn't work this way. The print is stretched all over A4 [which means that it is resized to cover whole A4 page, which is both wrong size and wrong proportions]. I know the printer can actually do it. I can use other software to print such documents properly. It just seems HP doesn't fully support some GDI functions. Is there some way to sort it out?
How to create a DIB with ARGB format. I want to blit a image(that has some part transparent in it ) using this DIB.
I tried with the following code but its not working properly
unsigned char * rawdata; ==> Filled by Qimage Raw Data
unsigned char * buffer = NULL;
memset(&bmi, 0, sizeof(bmi));
bmi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmi.bmiHeader.biWidth = width;/* Width of your image buffer */
bmi.bmiHeader.biHeight = -height; /* Height of your image buffer */
bmi.bmiHeader.biPlanes = 1;
bmi.bmiHeader.biBitCount = 32;
bmi.bmiHeader.biCompression = BI_RGB;
HBITMAP g_dibbmp = CreateDIBSection(hDesktopDC, &bmi, DIB_RGB_COLORS, (void **)&buffer, 0, 0);
if (!buffer)
{ /* ERROR */
printf("ERROR DIB could not create buffer\n");
}
else
{
printf("DIB created buffer successfully\n");
memcpy(buffer,rawdata,sizeof(rawdata));
}
Please help.
Reagards,
Techtotie.
Here's a snippet I put together from pieces of working code. The main difference I see is setting the mask bits and using memsection.
// assumes height and width passed in
int bpp = 32; // Bits per pixel
int stride = (width * (bpp / 8));
unsigned int byteCount = (unsigned int)(stride * height);
HANDLE hMemSection = ::CreateFileMapping( INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE, 0, byteCount, NULL );
if (hMemSection == NULL)
return false;
BITMAPV5HEADER bmh;
memset( &bmh, 0, sizeof( BITMAPV5HEADER ) );
bmh.bV5Size = sizeof( BITMAPV5HEADER );
bmh.bV5Width = width;
bmh.bV5Height = -height;
bmh.bV5Planes = 1;
bmh.bV5BitCount = 32;
bmh.bV5Compression = BI_RGB;
bmh.bV5AlphaMask = 0xFF000000;
bmh.bV5RedMask = 0x00FF0000;
bmh.bV5GreenMask = 0x0000FF00;
bmh.bV5BlueMask = 0x000000FF;
HDC hdc = ::GetDC( NULL );
HBITMAP hDIB = ::CreateDIBSection( hdc, (BITMAPINFO *) &bmh, DIB_RGB_COLORS,
&pBits, hMemSection, (DWORD) 0 );
::ReleaseDC( NULL, hdc );
// Much later when done manipulating the bitmap
::CloseHandle( hMemSection );
Thanks for your answer.
But my problem got solved. It was not actually the problem with the DIB creation.
It was due to the wrong API that I was using for Blitting.
I was using BitBlt for blitting but this API does not take care of the Alpha gradient. Instead of it I tried
TransparentBlt (Refer : http://msdn.microsoft.com/en-us/library/windows/desktop/dd145141(v=vs.85).aspx)
and it worked as this API takes care of copying the Alpha values from Source DC to destination DC.
I am trying to work with this code so that SURF can be implemented using color frames/images and then use the code here Kalman_Color_Object_Trackto track the detected object using the color value by Kalman filter. So, these are the steps that I intend to do but I am stuck since this SURF detection code does not accept/work with color images:
"book1.png" is the color image
After the rectangle around the image is detected from the incoming frames, the Mat structure is changed to IplImage since the Kalman_Color_Object_Track code is in C++ by
dest_image=cvCloneImage(&(IplImage)image);
mat_frame=cvCloneImage(&(IplImage)frame);
Call the Kalman_Color_Object_Track( mat_frame,dest_image,30); method.
Questions : (A) How to make this code work so that SURF features can be extracted and detected for color images? (B) I am unsure what should be passed in the function signature of Kalman_Color_Object_Track() and (C) where exactly in the object detection module should it be called?
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
using namespace cv;
IplImage *mat_dest_image=0;
IplImage *mat_frame=0;
/* Object Detection and recognition from video*/
int main()
{
Mat object = imread( "book1.png", );
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
//Detect the keypoints using SURF Detector
int minHessian = 500;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> kp_object;
detector.detect( object, kp_object );
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;
extractor.compute( object, kp_object, des_object );
FlannBasedMatcher matcher;
namedWindow("Good Matches");
namedWindow("Tracking");
std::vector<Point2f> obj_corners(4);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
char key = 'a';
int framecount = 0;
VideoCapture cap("booksvideo.avi");
for(; ;)
{
Mat frame;
cap >> frame;
imshow("Good Matches", frame);
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;
//cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (good_matches.size() >= 4)
{
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
mat_dest_image=cvCloneImage(&(IplImage)image);
mat_frame=cvCloneImage(&(IplImage)frame);
Kalman_Color_Object_Track( ); // The tracking method
}
//Show detected matches
imshow( "Good Matches", img_matches );
for( int i = 0; i < good_matches.size(); i++ )
{ printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
waitKey(0);
}
return 0;
}
This paper implemented the SIFT descriptor on color images by computing gradient histograms for each channel independently. Perhaps you could try the same approach for SURF features.
I'm on Windows 7, and i am trying to display an icon with transparency on my contextual menu but it doesn't work.
I am trying to use LoadImage like this :
m_hMenuBmp = (HBITMAP)::LoadImage(g_hInst, L"C:\\Users\\nicolas\\AppData\\Roaming\\MyApp\\icon.bmp", IMAGE_BITMAP, 16, 16, LR_LOADFROMFILE | LR_LOADTRANSPARENT );
and my icon.bmp is set to 256 colors with white ( 255, 255, 255 ) on background ...
I don't know why this isn't working ...
I tried the ARGB Method of Raymon Chen but it didn't work neither !
int cx = GetSystemMetrics(SM_CXSMICON);
int cy = GetSystemMetrics(SM_CYSMICON);
BITMAPINFO bmi = {0};
bmi.bmiHeader.biSize =sizeof(bmi.bmiHeader);
bmi.bmiHeader.biWidth = cx;
bmi.bmiHeader.biHeight = cy;
bmi.bmiHeader.biPlanes = 1;
bmi.bmiHeader.biBitCount = 32;
bmi.bmiHeader.biCompression = BI_RGB;
DWORD *pBits;
m_hMenuBmp = CreateDIBSection(NULL, &bmi, DIB_RGB_COLORS, (void **)&pBits, NULL , 0);
if (m_hMenuBmp)
{
for (int y = 0; y < cy ; y++ )
{
for (int x = 0; x < cx; x++)
{
BYTE bAlpha = x * x * 255 / cx / cx;
DWORD dv = (bAlpha << 24) | (bAlpha << 16) | bAlpha ;
pBits[y *cx + x] - dv;
}
}
}
And I don't know why ... my icon isn't displayed with this method ..
I found a way to did this easily :
HICON hIcon = (HICON)LoadImage( NULL, L"icon.ico", IMAGE_ICON, 16, 16, LR_LOADFROMFILE );
HDC hDC = ::GetDC( NULL );
m_hMenuBmp = ::CreateCompatibleBitmap( hDC, 16, 16 );
HDC hDCTemp = ::CreateCompatibleDC( hDC );
::ReleaseDC( NULL, hDC );
HBITMAP hBitmapOld = ( HBITMAP ) ::SelectObject( hDCTemp, m_hMenuBmp );
::DrawIconEx( hDCTemp, 0, 0, hIcon, 16, 16, 0, ::GetSysColorBrush( COLOR_MENU ), DI_NORMAL );
::SelectObject( hDCTemp, hBitmapOld );
::DeleteDC( hDCTemp );
I was able to get this to work:
HBITMAP hBitmap = (HBITMAP)::LoadImage(NULL, "C:\\moo\\res\\bitmap1.bmp", IMAGE_BITMAP, 0, 0, LR_LOADFROMFILE | LR_LOADTRANSPARENT | LR_LOADMAP3DCOLORS);
m_pic.SetBitmap(hBitmap);
The trick was LR_LOADMAP3DCOLORS together with LR_LOADTRANSPARENT. This was for a dialog box, by the way. Without LR_LOADMAP3DCOLORS, my white background stayed white.
I've been writing some code to do a screen grab of a window (in Windows). The code works fine, however prior to the screen grab I have to bring the window to the front that I want to capture and force a redraw.
I force the redraw with InvalidateRect, I then have to pump some messages from the message loop in order for the WM_PAINT to get processed. This is obviously a bit lame, as i don't know how many messages to pump.
I tried using RedrawWindow with RDW_ALLCHILDREN, however the app I am grabbing a screen from is an MDI app and doesn't seem to redraw all of it's children.
So my question is, is there a better way to redraw the window prior to the screen grab?
Cheers
Rich
Since you have not mentioned the language you are using, I hope the following code in C++ helps you!
void getScreenShot( int texWidth, int texHeight, unsigned char* pBuffer, HWND handle )
{
/* Local variables */
HDC screenDC;
RECT screenRect;
int extraBytesPerRow;
BITMAPINFO bitmapInfo;
HDC bitmapDC;
void* bitmapDataPtr;
HBITMAP hBitmap;
HBITMAP hPrevBitmap;
unsigned char* pIn;
unsigned char* pOut;
int rowIndex;
int colIndex;
/* Get a DC from the desktop window */
screenDC = GetDC(handle);
GetClientRect(handle, &screenRect );
/* Determine the extra bytes we need per row (each row of bitmap data must end on a 32bit boundary) */
extraBytesPerRow = ( texWidth * 3 ) % 4;
extraBytesPerRow = extraBytesPerRow ? 4 - extraBytesPerRow : 0;
/* Setup the bitmap info structure */
memset( &bitmapInfo, 0, sizeof( bitmapInfo ) );
bitmapInfo.bmiHeader.biSize = sizeof( BITMAPINFOHEADER );
bitmapInfo.bmiHeader.biWidth = texWidth;
bitmapInfo.bmiHeader.biHeight = texHeight;
bitmapInfo.bmiHeader.biPlanes = 1;
bitmapInfo.bmiHeader.biBitCount = 24;
bitmapInfo.bmiHeader.biCompression = BI_RGB;
/* Create a bitmap device context (bitmapDataPtr will be a pointer to the bits in the bitmap) */
bitmapDC = CreateCompatibleDC( NULL );
hBitmap = CreateDIBSection( bitmapDC, ( BITMAPINFO* )&bitmapInfo.bmiHeader, DIB_RGB_COLORS, &bitmapDataPtr, NULL, 0 );
hPrevBitmap = ( HBITMAP )SelectObject( bitmapDC, hBitmap );
/* BitBlt or StretchBlt the image from the input DC into our bitmap DC */
if ( ( texWidth != screenRect.right ) || ( texHeight != screenRect.bottom ) )
{
SetStretchBltMode( bitmapDC, HALFTONE );
StretchBlt( bitmapDC, 0, 0, texWidth, texHeight, screenDC, 0, 0, screenRect.right, screenRect.bottom, SRCCOPY );
}
else
{
BitBlt( bitmapDC, 0, 0, texWidth, texHeight, screenDC, 0, 0, SRCCOPY);
}
/* Copy the data from the bitmap to the user's buffer (bitmap data is BGR and 4 byte aligned on each row, we want tightly-packed RGB) */
pIn = ( unsigned char* )bitmapDataPtr;
pOut = pBuffer;
for ( rowIndex = 0; rowIndex < texHeight; rowIndex++ )
{
for ( colIndex = 0; colIndex < texWidth; colIndex++ )
{
pOut[ 0 ] = pIn[2];
pOut[ 1 ] = pIn[1];
pOut[ 2 ] = pIn[0];
pOut += 3;
pIn += 3;
}
pIn += extraBytesPerRow;
}
/* Free memory used by the bitmap */
SelectObject( bitmapDC, hPrevBitmap );
DeleteObject( hBitmap );
DeleteDC( bitmapDC );
/* Release the screen DC */
ReleaseDC(handle, screenDC );
}
You dont actually need to force a redraw.. But in case the window is minimised, you might need to bring it up, before you call the function with the window handle..! texWidth and texHeight are dimension of the window you are about to capture; to get this you can use, GetWindowRect(..) or check out the link here: link