AlphaBlend and 16-bit color mode - windows

Win32 graphics is not my gig, but I have to do some alpha blending. The following code works fine in 32-bit color mode but displays nothing except the white background in 16-bit mode. Sorry for the length, but I don't know where it's going wrong. This is as compact as I could make it.
hbm is a 32-bit ARGB bitmap with varying per-pixel alpha, size 16x16 (so, cx = cy = 16).
// Create a memory DC to construct the bits
HDC hdc = GetDC(hWnd);
HDC hdcMem = CreateCompatibleDC(hdc);
HBITMAP hbmMem = CreateBitmap(cx, cy, 1, 32, NULL);
SelectObject(hdcMem, hbmMem);
// Fill the BG
RECT rc = { 0, 0, cx, cy };
FillRect(hdcMem, &rc, (HBRUSH)GetStockObject(WHITE_BRUSH));
// Get the bitmap bits
BITMAPINFO bmi;
ZeroMemory(&bmi, sizeof(bmi));
bmi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmi.bmiHeader.biWidth = cx;
bmi.bmiHeader.biHeight = cy;
bmi.bmiHeader.biBitCount = 32;
bmi.bmiHeader.biPlanes = 1;
bmi.bmiHeader.biCompression = BI_RGB;
std::unique_ptr<BYTE[]> pvBits(new BYTE[cx * cy * 4]);
GetDIBits(hdcMem, hbm, 0, cy, reinterpret_cast<void*>(pvBits.get()), &bmi, DIB_RGB_COLORS);
// Premultiply all color channel values by the per-pixel alpha.
int ctPixels = cx * cy;
BYTE *prgba = pvBits.get();
for (int i = 0; i < ctPixels; ++i)
{
int alpha = *(prgba + 3);
for (int j = 0; j <= 2; ++j)
{
int k = *prgba;
*prgba++ = k * alpha / 255;
}
++prgba;
}
// Put the new bits back
SetDIBits(hdcMem, hbm, 0, cy, reinterpret_cast<void*>(pvBits.get()), &bmi, DIB_RGB_COLORS);
// Alpha blend into memory DC
HDC hdcSrc = CreateCompatibleDC(hdcMem);
HBITMAP hbmOld = static_cast<HBITMAP>(SelectObject(hdcSrc, hbm));
BLENDFUNCTION bfn = { AC_SRC_OVER, 0, 255, AC_SRC_ALPHA };
AlphaBlend(hdcMem, 0, 0, cx, cy, hdcSrc, 0, 0, cx, cy, bfn);
SelectObject(hdcSrc, hbmOld);
DeleteDC(hdcSrc);
// Blit the memory DC to the screen
BitBlt(hdc, 0, 0, cx, cy, hdcMem, 0, 0, SRCCOPY);
I have some vague suspicions about CreateCompatibleDC but other than that I'm flying blind.
Any help appreciated. TIA.

According to Microsoft's documentation per pixel alpha values are only supported with 32-bit bitmaps. SetDIBits converts your 32-bit DIB into a 16-bit DDB if the display using 16-bit colour, and a 16-bit DDB has no space to store alpha values. You'll have to set bfn.SourceConstantAlpha to your alpha value instead to get this to work. You won't need to premultiply your bitmap in that case either.

Related

How to draw into device context

I have a bitmap image in form of array of 32-bit integers (ARGB pixels: uint32 *mypixels) and int width and int height. I need to output them to a printer.
I have the printer context: HDC hdcPrinter;
As I learned, I need first to create a compatible context:
HDC hdcMem = CreateCompatibleDC(hdcPrinter);
Then I need to create an HBITMAP object, select it into the compatible context, and render:
HBITMAP hBitmap = ...?
SelectObject(hdcMem, hBitmap);
BitBlt(printerContext, 0, 0, width, height, hdcMem, 0, 0, SRCCOPY);
And finally clean up:
DeleteObject(hBitmap);
DeleteDC(hdcMem);
My question is how do I create an HBITMAP object and put mypixels into it?
I found two options:
HBITMAP hBitmap = CreateCompatibleBitmap(hdcPrinter, width, height);
Looks good, but how do mypixels get into this bitmap?
HBITMAP hBitmap = CreateDIBSection(hdcPrinter /*or hdcMem?*/, ...);
Will it work? Is it better than option 1.?
This function creates a bitmap and sets it to an initial image.
Irt's a bit fiddly to access the bits directly, but it can be done.
HBITMAP MakeBitmap(unsigned char *rgba, int width, int height, VOID **buff)
{
VOID *pvBits; // pointer to DIB section
HBITMAP answer;
BITMAPINFO bmi;
HDC hdc;
int x, y;
int red, green, blue, alpha;
// setup bitmap info
bmi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmi.bmiHeader.biWidth = width;
bmi.bmiHeader.biHeight = height;
bmi.bmiHeader.biPlanes = 1;
bmi.bmiHeader.biBitCount = 32; // four 8-bit components
bmi.bmiHeader.biCompression = BI_RGB;
bmi.bmiHeader.biSizeImage = width * height * 4;
hdc = CreateCompatibleDC(GetDC(0));
answer = CreateDIBSection(hdc, &bmi, DIB_RGB_COLORS, &pvBits, NULL, 0x0);
for (y = 0; y < height; y++)
{
for (x = 0; x < width; x++)
{
red = rgba[(y*width + x) * 4];
green = rgba[(y*width + x) * 4 + 1];
blue = rgba[(y*width + x) * 4 + 2];
alpha = rgba[(y*width + x) * 4 + 3];
red = (red * alpha) >> 8;
green = (green * alpha) >> 8;
blue = (blue * alpha) >> 8;
((UINT32 *)pvBits)[(height - y - 1) * width + x] = (alpha << 24) | (red << 16) | (green << 8) | blue;
}
}
DeleteDC(hdc);
*buff = pvBits;
return answer;
}

Blend 2 bitmaps

I have 2 buffers pointing to RGB32 images of different sizes, so my idea is to scale one buffer to match the other one and alphablend these images.
Currently I am able to mix StretchBlt (for scaling performance) and GDI+ drawimage function with a colormatrix for alphablending. This seem to be a bit slow and also it has issues with buffer being used by a different component that uses DirectX. For buffer issue I tried to copy the rows in reverse order and it works except in the DirectX related component.
Bitmap bmp1(width, height, 4bytesperpixel, RGB32, bufferpointer1);
Bitmap blend(width, height, 4bytesperpixel);
Graphics g(&newbmp)
using GDI function
Bitmap bmp2(scaleWidth, scaleHeight, 4bytesperpixel, RGB32, bufferpointer2)
HDC memdc = g.GetHDC();
//// scaling the bufferpointer2 to actual width & height
StretchDIBits(memdc, x,y, width, height, 0, 0,scaleWidth, scaleHeight, bufferpointer2,..)
g.ReleaseDC(memdc); // so that content is copied to the bitmap
//// Then alphablending bmp1 on top of the scaled imaged bmp2
//// Using lockbits to copy the bitmap bytes and unlocking it.
So I would need to replace the GDI+ functions and use Win32 function like AlphaBlend for this. I tried something like this and it shows a black screen
BITMAPINFO bminfo1 = {};
bminfo1.bmiHeader.biSize = sizeof( BITMAPINFO );
bminfo1.bmiHeader.biWidth = w;
bminfo1.bmiHeader.biHeight = h;
bminfo1.bmiHeader.biBitCount = m_nBytesPerPixel * 8;
bminfo1.bmiHeader.biCompression = BI_RGB;
bminfo1.bmiHeader.biPlanes = 1;
BITMAPINFO bminfo2 = {};
bminfo2.bmiHeader.biSize = sizeof( BITMAPINFO );
bminfo2.bmiHeader.biWidth = sW;
bminfo2.bmiHeader.biHeight = sH;
bminfo2.bmiHeader.biBitCount = m_nBytesPerPixel * 8;
bminfo2.bmiHeader.biCompression = BI_RGB;
bminfo2.bmiHeader.biPlanes = 1;
char* pBytes1, *pBytes2;
HDC hmemdc1 = CreateCompatibleDC(GetDC(0));
HDC hmemdc2 = CreateCompatibleDC(GetDC(0));
HBITMAP hBitmap1 = CreateDIBSection(hmemdc1, &bminfo1, DIB_RGB_COLORS, (void**) &pBytes1, NULL, 0);
SetDIBits(hmemdc1, hBitmap1, 0, bminfo1.bmiHeader.bih, pBuffer[0], &bminfo1, DIB_RGB_COLORS);
HBITMAP hBitmap2 = CreateDIBSection(hmemdc2, &bminfo2, DIB_RGB_COLORS, (void**) &pBytes2, NULL, 0);
SelectObject(hmemdc2,hBitmap2);
StretchDIBits(hmemdc2, 0, 0, w, h, 0, 0,
sW, sH, pBuffer[1], &bminfo2, DIB_RGB_COLORS, SRCCOPY );
BLENDFUNCTION bStruct;
bStruct.BlendOp = AC_SRC_OVER;
bStruct.BlendFlags = 0;
bStruct.SourceConstantAlpha = 255;
bStruct.AlphaFormat = AC_SRC_ALPHA;
SelectObject(hmemdc1,hBitmap1);
SelectObject(hmemdc2,hBitmap2);
//blend bmp2 on bmp1
BOOL res = AlphaBlend(hmemdc1, 0, 0, w, h, hmemdc2, 0, 0, w, h, bStruct);
//for testing output
SelectObject(hmemdc1,hBitmap1);
BitBlt(GetDC(0),0,0,width,height,hmemdc1,100,100,SRCCOPY);
//copy the bitmap buffer
memcpy(out, pBytes1, (w * m_nBytesPerPixel) * h);
I am not sure if it is possible to use AlphaBlend function to mix bitmaps per-pixel based from 2 memory DCs. Any help would be highly appreciated.
This part is wrong:
bminfo1.bmiHeader.biSize = sizeof( BITMAPINFO );
It should be sizeof(BITMAPINFOHEADER) otherwise it ruins everything. Also you can't use GetDC(0) for any proper painting. Use instead:
HDC hdc = GetDC(hwnd);
...
ReleaseDC(hwnd, hdc);
or use HDC from BeginPaint. Since you are using GDI+ then you must have HBITMAP handles from bmp->GetHBITMAP(), there is no reason to convert to memory and back to HBITMAP
For AlphaBlend set SourceConstantAlpha = 128; in case alpha channel is not set.
void blend(HDC hdc, RECT rc, HBITMAP hbitmap1, HBITMAP hbitmap2)
{
HDC memdc1 = CreateCompatibleDC(hdc);
HDC memdc2 = CreateCompatibleDC(hdc);
BITMAP bmp1, bmp2;
GetObject(hbitmap1, sizeof(BITMAP), &bmp1);
GetObject(hbitmap2, sizeof(BITMAP), &bmp2);
SelectObject(memdc1, hbitmap1);
SelectObject(memdc2, hbitmap2);
BLENDFUNCTION blend = { 0 };
blend.SourceConstantAlpha = 128;
SetStretchBltMode(hdc, COLORONCOLOR);
AlphaBlend(memdc2, 0, 0, bmp2.bmWidth, bmp2.bmHeight, memdc1, 0, 0, bmp1.bmWidth, bmp1.bmHeight, blend);
StretchBlt(hdc, 0, 0, rc.right, rc.bottom, memdc2, 0, 0, bmp2.bmWidth, bmp2.bmHeight, SRCCOPY);
//or create another memdc to get dibs
DeleteDC(memdc1);
DeleteDC(memdc2);
}
In case you want to get dibs, then don't draw on hdc, instead create a third memdc and another HBITMAP, then use GetDIBits
HDC memdc = CreateCompatibleDC(hdc);
HBITMAP hbmp = CreateCompatibleBitmap(hdc, rc.right, rc.bottom);
SelectObject(memdc, hbmp);
SetStretchBltMode(memdc, COLORONCOLOR);
StretchBlt(memdc, 0, 0, rc.right, rc.bottom,
memdc2, 0, 0, bmp2.bmWidth, bmp2.bmHeight, SRCCOPY);
int w = rc.right;
int h = rc.bottom;
BITMAPINFOHEADER bmpInfoHeader = { sizeof(BITMAPINFOHEADER) };
bmpInfoHeader.biWidth = w;
bmpInfoHeader.biHeight = h;
bmpInfoHeader.biBitCount = 32;
bmpInfoHeader.biCompression = BI_RGB;
bmpInfoHeader.biPlanes = 1;
DWORD size = w * 4 * h;
char *dib = new char[size];
GetDIBits(hdc, hbmp, 0, h, dib, (BITMAPINFO*)&bmpInfoHeader, DIB_RGB_COLORS);
...
DeleteDC(memdc);
DeleteObject(hbitmap);
delete[]dib;
Edit
Method 2: This method should be faster because it uses one StretchBlt and one AlphaBlend. This way you can use pre-computed alphas, although it's not necessary.
Use the other method with 2 AlphaBlend only if you want to blend both images with background.
void modify_bits(HDC hdc, HBITMAP hbitmap)
{ //expecting 32-bit bitmap
BITMAP bm = { 0 };
GetObject(hbitmap, sizeof(bm), &bm);
int w = bm.bmWidth;
int h = bm.bmHeight;
BITMAPINFOHEADER bmpInfoHeader = { sizeof(BITMAPINFOHEADER),
w, h, 1, 32, BI_RGB, 0, 0, 0, 0, 0 };
BYTE* bits = new BYTE[w * h * 4];
if (GetDIBits(hdc, hbitmap, 0, h, bits, (BITMAPINFO*)&bmpInfoHeader, DIB_RGB_COLORS)) {
BYTE* p = bits;
for (int x = 0; x < w; x++) {
for (int y = 0; y < h; y++) {
p[3] = 128;
p[0] = p[0] * p[3] / 255;
p[1] = p[1] * p[3] / 255;
p[2] = p[2] * p[3] / 255;
p += 4;
}
}
SetDIBits(hdc, hbitmap, 0, h, bits, (BITMAPINFO*)&bmpInfoHeader, DIB_RGB_COLORS);
}
delete[] bits;
}
void blend2(HDC hdc, RECT rc, HBITMAP hbitmap1, HBITMAP hbitmap2)
{
int w = rc.right;
int h = rc.bottom;
modify_bits(hdc, hbitmap2);
HDC memdc1 = CreateCompatibleDC(hdc);
HDC memdc2 = CreateCompatibleDC(hdc);
BITMAP bmp1, bmp2;
GetObject(hbitmap1, sizeof(BITMAP), &bmp1);
GetObject(hbitmap2, sizeof(BITMAP), &bmp2);
int w1 = bmp1.bmWidth;
int h1 = bmp1.bmHeight;
int w2 = bmp2.bmWidth;
int h2 = bmp2.bmHeight;
SelectObject(memdc1, hbitmap1);
SelectObject(memdc2, hbitmap2);
BLENDFUNCTION blend = { 0 };
blend.BlendOp = AC_SRC_OVER;
blend.BlendFlags = 0;
blend.SourceConstantAlpha = 255;
blend.AlphaFormat = AC_SRC_ALPHA;
SetStretchBltMode(hdc, COLORONCOLOR);
//draw first image normally:
StretchBlt(hdc, 0, 0, w, h, memdc1, 0, 0, w1, h1, SRCCOPY);
//AlphaBlend the second image:
AlphaBlend(hdc, 0, 0, w, h, memdc2, 0, 0, w2, h2, blend);
DeleteDC(memdc1);
DeleteDC(memdc2);
}

Rendering Windows screenshot capture bitmap as DirectX texture

I'm making progress developing a '3d desktop' directx app that needs to display the current contents of a desktop window (e.g. "Calculator") as a 2D texture on a rectangular surface in directx (11). I'm sooo close but really struggling with the screenshot BMP -> Texture2D step. I do have screenshot->HBITMAP and DDSFile->rendered texture successfully working but can't complete the screenshot->rendered texture.
So far I have working the 'capture the window as a screenshot' bit:
RECT user_window_rectangle;
HWND user_window = FindWindow(NULL, TEXT("Calculator"));
GetClientRect(user_window, &user_window_rectangle);
HDC hdcScreen = GetDC(NULL);
HDC hdc = CreateCompatibleDC(hdcScreen);
UINT screenshot_width = user_window_rectangle.right - user_window_rectangle.left;
UINT screenshot_height = user_window_rectangle.bottom - user_window_rectangle.top;
hbmp = CreateCompatibleBitmap(hdcScreen, screenshot_width, screenshot_height);
SelectObject(hdc, hbmp);
PrintWindow(user_window, hdc, PW_CLIENTONLY);
At this point I have the window bitmap referenced by HBITMAP hbmp.
Also working is my code to render a DDS file as a texture on a directx/3d rectangle:
ID3D11Device *dev;
ID3D11DeviceContext *dev_context;
...
dev_context->PSSetShaderResources(0, 1, &shader_resource_view);
dev_context->PSSetSamplers(0, 1, &tex_sampler_state);
...
DirectX::TexMetadata tex_metadata;
DirectX::ScratchImage image;
hr = LoadFromDDSFile(L"Earth.dds", DirectX::DDS_FLAGS_NONE, &tex_metadata, image);
hr = CreateShaderResourceView(dev, image.GetImages(), image.GetImageCount(), tex_metadata, &shader_resource_view);
Pixel shader is:
Texture2D ObjTexture
SamplerState ObjSamplerState
float4 PShader(float4 pos : SV_POSITION, float4 color : COLOR, float2 tex : TEXCOORD) : SV_TARGET\
{
return ObjTexture.Sample( ObjSamplerState, tex );
}
The samplerstate (defaulting to linear) is:
D3D11_SAMPLER_DESC sampler_desc;
ZeroMemory(&sampler_desc, sizeof(sampler_desc));
sampler_desc.AddressU = D3D11_TEXTURE_ADDRESS_WRAP;
sampler_desc.AddressV = D3D11_TEXTURE_ADDRESS_WRAP;
sampler_desc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
sampler_desc.MinLOD = 0;
sampler_desc.MaxLOD = D3D11_FLOAT32_MAX;
hr = dev->CreateSamplerState(&sampler_desc, &tex_sampler_state);
Question: how do I replace the LoadFromDDSFile bit with some equivalent that takes the HBITMAP from the windows screencapture and ends up with it on the graphics card as ObjTexture ?
Below is my best shot of bridging from the screenshot HBITMAP hbmp to the shader resource screenshot_texture, but it gives a memory access violation from the graphics driver (I think due to my "data.pSysmem = &bmp.bmBits", but no idea really):
GetObject(hbmp, sizeof(BITMAP), (LPSTR)&bmp)
D3D11_TEXTURE2D_DESC screenshot_desc = CD3D11_TEXTURE2D_DESC(DXGI_FORMAT_R8G8B8A8_UNORM, bmp.bmWidth, bmp.bmHeight, 1,
1,
D3D11_BIND_SHADER_RESOURCE
);
int bytes_per_pixel = 4;
D3D11_SUBRESOURCE_DATA data;
ZeroMemory(&data, sizeof(D3D11_SUBRESOURCE_DATA));
data.pSysMem = &bmp.bmBits; //pixel buffer
data.SysMemPitch = bytes_per_pixel * bmp.bmWidth;// line size in byte
data.SysMemSlicePitch = bytes_per_pixel * bmp.bmWidth * bmp.bmHeight;// total buffer size in byte
hr = dev->CreateTexture2D(
&screenshot_desc, //texture format
&data, // pixel buffer use to fill the texture
&screenshot_texture // created texture
);
:::::::::::::::::::::::::SOLUTION::::::::::::::::::::::::::::::::::::::::::
The main issue was trying to use &bmp.bmBits directly as a pixel buffer caused memory conflicts within the graphics driver - this was resolved by using 'malloc' to allocate an appropriately sized block of memory to store the pixel data. Thanks to Chuck Walbourn for helping with my poking around in the dark to work out how the pixel data is actually stored (it was actually 32 bits/pixel by default). It's still possible/likely some of code is relying on luck to read the pixel data correctly, but it's been improved with Chuck's input.
My basic technique was;
FindWindow to get the client window on the desktop
CreateCompatibleBitmap and SelectObject and PrintWindow to get a HBITMAP to the snapshot
malloc to allocate the correct amount of space for a (byte*)pixel buffer
GetDIBits to populate the (byte*)pixel buffer from the HBITMAP
CreateTexture2D to build the texture buffer
CreateShaderResourceView to map the texture to the graphics pixel shader
So working code to screenshot a windows desktop window and pass that as a texture to a direct3d app is:
RECT user_window_rectangle;
HWND user_window = FindWindow(NULL, TEXT("Calculator")); //the window can't be min
if (user_window == NULL)
{
MessageBoxA(NULL, "Can't find Calculator", "Camvas", MB_OK);
return;
}
GetClientRect(user_window, &user_window_rectangle);
//create
HDC hdcScreen = GetDC(NULL);
HDC hdc = CreateCompatibleDC(hdcScreen);
UINT screenshot_width = user_window_rectangle.right - user_window_rectangle.left;
UINT screenshot_height = user_window_rectangle.bottom - user_window_rectangle.top;
hbmp = CreateCompatibleBitmap(hdcScreen, screenshot_width, screenshot_height);
SelectObject(hdc, hbmp);
//Print to memory hdc
PrintWindow(user_window, hdc, PW_CLIENTONLY);
BITMAPINFOHEADER bmih;
ZeroMemory(&bmih, sizeof(BITMAPINFOHEADER));
bmih.biSize = sizeof(BITMAPINFOHEADER);
bmih.biPlanes = 1;
bmih.biBitCount = 32;
bmih.biWidth = screenshot_width;
bmih.biHeight = 0-screenshot_height;
bmih.biCompression = BI_RGB;
bmih.biSizeImage = 0;
int bytes_per_pixel = bmih.biBitCount / 8;
BYTE *pixels = (BYTE*)malloc(bytes_per_pixel * screenshot_width * screenshot_height);
BITMAPINFO bmi = { 0 };
bmi.bmiHeader = bmih;
int row_count = GetDIBits(hdc, hbmp, 0, screenshot_height, pixels, &bmi, DIB_RGB_COLORS);
D3D11_TEXTURE2D_DESC screenshot_desc = CD3D11_TEXTURE2D_DESC(
DXGI_FORMAT_B8G8R8A8_UNORM, // format
screenshot_width, // width
screenshot_height, // height
1, // arraySize
1, // mipLevels
D3D11_BIND_SHADER_RESOURCE, // bindFlags
D3D11_USAGE_DYNAMIC, // usage
D3D11_CPU_ACCESS_WRITE, // cpuaccessFlags
1, // sampleCount
0, // sampleQuality
0 // miscFlags
);
D3D11_SUBRESOURCE_DATA data;
ZeroMemory(&data, sizeof(D3D11_SUBRESOURCE_DATA));
data.pSysMem = pixels; // texArray; // &bmp.bmBits; //pixel buffer
data.SysMemPitch = bytes_per_pixel * screenshot_width;// line size in byte
data.SysMemSlicePitch = bytes_per_pixel * screenshot_width * screenshot_height;
hr = dev->CreateTexture2D(
&screenshot_desc, //texture format
&data, // pixel buffer use to fill the texture
&screenshot_texture // created texture
);
D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc;
srvDesc.Format = screenshot_desc.Format;
srvDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
srvDesc.Texture2D.MostDetailedMip = 0;
srvDesc.Texture2D.MostDetailedMip = screenshot_desc.MipLevels;
dev->CreateShaderResourceView(screenshot_texture, NULL, &shader_resource_view);
You are making a lot of assumptions here that the BITMAP returned is actually in 32-bit RGBA form. It is likely not at all in that format, and in any case you need to validate the contents of bmPlanes to be 1 and bmBitsPixel to be 32 if you are assuming it is 4-bytes per pixel. You should read more about the BMP format.
BMPs uses BGRA order, so you can use DXGI_FORMAT_B8G8R8A8_UNORM for the case of bmBitsPixel being 32.
Secondly, you need to derive pitch from bmWidthBytes and not bmWidth.
data.pSysMem = &bmp.bmBits; //pixel buffer
data.SysMemPitch = bmp.bmWidthBytes;// line size in byte
data.SysMemSlicePitch = bmp.bmWidthBytes * bmp.bmHeight;// total buffer size in byte
If bmBitsPixel is 24, there is no DXGI format equivalent to that. You have to copy the data to a 32-bit format such as DXGI_FORMAT_B8G8R8X8_UNORM.
If bmBitsPixel is 15 or 16, you can use DXGI_FORMAT_B5G5R5A1_UNORM on a system with Direct3D 11.1, but remember that 16-bit DXGI formats are not always supported depending on the driver. Otherwise you'll have to convert this data to something else.
For bmBitsPixel values of 1, 2, 4, or 8 you have to convert them as there are no DXGI texture formats that are equivalent.
The main issue was trying to use &bmp.bmBits directly as a pixel buffer caused memory conflicts within the graphics driver - this was resolved by using 'malloc' to allocate an appropriately sized block of memory to store the pixel data. Thanks to Chuck Walbourn for helping with my poking around in the dark to work out how the pixel data is actually stored (it was actually 32 bits/pixel by default). It's still possible/likely some of code is relying on luck to read the pixel data correctly, but it's been improved with Chuck's input.
My basic technique was;
FindWindow to get the client window on the desktop
CreateCompatibleBitmap and SelectObject and PrintWindow to get a HBITMAP to the snapshot
malloc to allocate the correct amount of space for a (byte*)pixel buffer
GetDIBits to populate the (byte*)pixel buffer from the HBITMAP
CreateTexture2D to build the texture buffer
CreateShaderResourceView to map the texture to the graphics pixel shader
So working code to screenshot a windows desktop window and pass that as a texture to a direct3d app is:
RECT user_window_rectangle;
HWND user_window = FindWindow(NULL, TEXT("Calculator")); //the window can't be min
if (user_window == NULL)
{
MessageBoxA(NULL, "Can't find Calculator", "Camvas", MB_OK);
return;
}
GetClientRect(user_window, &user_window_rectangle);
//create
HDC hdcScreen = GetDC(NULL);
HDC hdc = CreateCompatibleDC(hdcScreen);
UINT screenshot_width = user_window_rectangle.right - user_window_rectangle.left;
UINT screenshot_height = user_window_rectangle.bottom - user_window_rectangle.top;
hbmp = CreateCompatibleBitmap(hdcScreen, screenshot_width, screenshot_height);
SelectObject(hdc, hbmp);
//Print to memory hdc
PrintWindow(user_window, hdc, PW_CLIENTONLY);
BITMAPINFOHEADER bmih;
ZeroMemory(&bmih, sizeof(BITMAPINFOHEADER));
bmih.biSize = sizeof(BITMAPINFOHEADER);
bmih.biPlanes = 1;
bmih.biBitCount = 32;
bmih.biWidth = screenshot_width;
bmih.biHeight = 0-screenshot_height;
bmih.biCompression = BI_RGB;
bmih.biSizeImage = 0;
int bytes_per_pixel = bmih.biBitCount / 8;
BYTE *pixels = (BYTE*)malloc(bytes_per_pixel * screenshot_width * screenshot_height);
BITMAPINFO bmi = { 0 };
bmi.bmiHeader = bmih;
int row_count = GetDIBits(hdc, hbmp, 0, screenshot_height, pixels, &bmi, DIB_RGB_COLORS);
D3D11_TEXTURE2D_DESC screenshot_desc = CD3D11_TEXTURE2D_DESC(
DXGI_FORMAT_B8G8R8A8_UNORM, // format
screenshot_width, // width
screenshot_height, // height
1, // arraySize
1, // mipLevels
D3D11_BIND_SHADER_RESOURCE, // bindFlags
D3D11_USAGE_DYNAMIC, // usage
D3D11_CPU_ACCESS_WRITE, // cpuaccessFlags
1, // sampleCount
0, // sampleQuality
0 // miscFlags
);
D3D11_SUBRESOURCE_DATA data;
ZeroMemory(&data, sizeof(D3D11_SUBRESOURCE_DATA));
data.pSysMem = pixels; // texArray; // &bmp.bmBits; //pixel buffer
data.SysMemPitch = bytes_per_pixel * screenshot_width;// line size in byte
data.SysMemSlicePitch = bytes_per_pixel * screenshot_width * screenshot_height;
hr = dev->CreateTexture2D(
&screenshot_desc, //texture format
&data, // pixel buffer use to fill the texture
&screenshot_texture // created texture
);
D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc;
srvDesc.Format = screenshot_desc.Format;
srvDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
srvDesc.Texture2D.MostDetailedMip = 0;
srvDesc.Texture2D.MostDetailedMip = screenshot_desc.MipLevels;
dev->CreateShaderResourceView(screenshot_texture, NULL, &shader_resource_view);

Create ARGB DIB

How to create a DIB with ARGB format. I want to blit a image(that has some part transparent in it ) using this DIB.
I tried with the following code but its not working properly
unsigned char * rawdata; ==> Filled by Qimage Raw Data
unsigned char * buffer = NULL;
memset(&bmi, 0, sizeof(bmi));
bmi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmi.bmiHeader.biWidth = width;/* Width of your image buffer */
bmi.bmiHeader.biHeight = -height; /* Height of your image buffer */
bmi.bmiHeader.biPlanes = 1;
bmi.bmiHeader.biBitCount = 32;
bmi.bmiHeader.biCompression = BI_RGB;
HBITMAP g_dibbmp = CreateDIBSection(hDesktopDC, &bmi, DIB_RGB_COLORS, (void **)&buffer, 0, 0);
if (!buffer)
{ /* ERROR */
printf("ERROR DIB could not create buffer\n");
}
else
{
printf("DIB created buffer successfully\n");
memcpy(buffer,rawdata,sizeof(rawdata));
}
Please help.
Reagards,
Techtotie.
Here's a snippet I put together from pieces of working code. The main difference I see is setting the mask bits and using memsection.
// assumes height and width passed in
int bpp = 32; // Bits per pixel
int stride = (width * (bpp / 8));
unsigned int byteCount = (unsigned int)(stride * height);
HANDLE hMemSection = ::CreateFileMapping( INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE, 0, byteCount, NULL );
if (hMemSection == NULL)
return false;
BITMAPV5HEADER bmh;
memset( &bmh, 0, sizeof( BITMAPV5HEADER ) );
bmh.bV5Size = sizeof( BITMAPV5HEADER );
bmh.bV5Width = width;
bmh.bV5Height = -height;
bmh.bV5Planes = 1;
bmh.bV5BitCount = 32;
bmh.bV5Compression = BI_RGB;
bmh.bV5AlphaMask = 0xFF000000;
bmh.bV5RedMask = 0x00FF0000;
bmh.bV5GreenMask = 0x0000FF00;
bmh.bV5BlueMask = 0x000000FF;
HDC hdc = ::GetDC( NULL );
HBITMAP hDIB = ::CreateDIBSection( hdc, (BITMAPINFO *) &bmh, DIB_RGB_COLORS,
&pBits, hMemSection, (DWORD) 0 );
::ReleaseDC( NULL, hdc );
// Much later when done manipulating the bitmap
::CloseHandle( hMemSection );
Thanks for your answer.
But my problem got solved. It was not actually the problem with the DIB creation.
It was due to the wrong API that I was using for Blitting.
I was using BitBlt for blitting but this API does not take care of the Alpha gradient. Instead of it I tried
TransparentBlt (Refer : http://msdn.microsoft.com/en-us/library/windows/desktop/dd145141(v=vs.85).aspx)
and it worked as this API takes care of copying the Alpha values from Source DC to destination DC.

UpdateLayeredWindow and DrawText

I'm using UpdateLayeredWindow to display an application window. I have created my own custom buttons and i would like to create my own static text. The problem is that when i try to draw the text on the hdc, the DrawText or TextOut functions overwrite the alpha channel of my picture and the text will become transparent. I tried to find a solution to this but i could not find any. My custom controls are designed in such way that they will do all the drawing in a member function called Draw(HDC hDc), so they can only access the hdc. I would like to keep this design. Can anyone help me? I am using MFC and i would want to achieve the desired result without the use of GDI+.
I know this is an old post ... but I just had this very same problem ... and it was driving me CRAZY.
Eventually, I stumbled upon this post by Mike Sutton to the microsoft.public.win32.programmer.gdi newsgroup ... from almost 7 years ago!
Basically, the DrawText (and TextOut) do not play nicely with the alpha channel and UpdateLayeredWindow ... and you need to premultiply the R, G, and B channels with the alpha channel.
In Mike's post, he shows how he creates another DIB (device independent bitmap) upon which he draws the text ... and alpha blends that into the other bitmap.
After doing this, my text looked perfect!
Just in case, the link to the newsgroup post dies ... I am going to include the code here. All credit goes to Mike Sutton (#mikedsutton).
Here is the code that creates the alpha blended bitmap with the text on it:
HBITMAP CreateAlphaTextBitmap(LPCSTR inText, HFONT inFont, COLORREF inColour)
{
int TextLength = (int)strlen(inText);
if (TextLength <= 0) return NULL;
// Create DC and select font into it
HDC hTextDC = CreateCompatibleDC(NULL);
HFONT hOldFont = (HFONT)SelectObject(hTextDC, inFont);
HBITMAP hMyDIB = NULL;
// Get text area
RECT TextArea = {0, 0, 0, 0};
DrawText(hTextDC, inText, TextLength, &TextArea, DT_CALCRECT);
if ((TextArea.right > TextArea.left) && (TextArea.bottom > TextArea.top))
{
BITMAPINFOHEADER BMIH;
memset(&BMIH, 0x0, sizeof(BITMAPINFOHEADER));
void *pvBits = NULL;
// Specify DIB setup
BMIH.biSize = sizeof(BMIH);
BMIH.biWidth = TextArea.right - TextArea.left;
BMIH.biHeight = TextArea.bottom - TextArea.top;
BMIH.biPlanes = 1;
BMIH.biBitCount = 32;
BMIH.biCompression = BI_RGB;
// Create and select DIB into DC
hMyDIB = CreateDIBSection(hTextDC, (LPBITMAPINFO)&BMIH, 0, (LPVOID*)&pvBits, NULL, 0);
HBITMAP hOldBMP = (HBITMAP)SelectObject(hTextDC, hMyDIB);
if (hOldBMP != NULL)
{
// Set up DC properties
SetTextColor(hTextDC, 0x00FFFFFF);
SetBkColor(hTextDC, 0x00000000);
SetBkMode(hTextDC, OPAQUE);
// Draw text to buffer
DrawText(hTextDC, inText, TextLength, &TextArea, DT_NOCLIP);
BYTE* DataPtr = (BYTE*)pvBits;
BYTE FillR = GetRValue(inColour);
BYTE FillG = GetGValue(inColour);
BYTE FillB = GetBValue(inColour);
BYTE ThisA;
for (int LoopY = 0; LoopY < BMIH.biHeight; LoopY++) {
for (int LoopX = 0; LoopX < BMIH.biWidth; LoopX++) {
ThisA = *DataPtr; // Move alpha and pre-multiply with RGB
*DataPtr++ = (FillB * ThisA) >> 8;
*DataPtr++ = (FillG * ThisA) >> 8;
*DataPtr++ = (FillR * ThisA) >> 8;
*DataPtr++ = ThisA; // Set Alpha
}
}
// De-select bitmap
SelectObject(hTextDC, hOldBMP);
}
}
// De-select font and destroy temp DC
SelectObject(hTextDC, hOldFont);
DeleteDC(hTextDC);
// Return DIBSection
return hMyDIB;
}
Here is the code that drives the CreateAlphaTextBitmap method:
void TestAlphaText(HDC inDC, int inX, int inY)
{
const char *DemoText = "Hello World!\0";
RECT TextArea = {0, 0, 0, 0};
HFONT TempFont = CreateFont(50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "Arial\0");
HBITMAP MyBMP = CreateAlphaTextBitmap(DemoText, TempFont, 0xFF);
DeleteObject(TempFont);
if (MyBMP)
{
// Create temporary DC and select new Bitmap into it
HDC hTempDC = CreateCompatibleDC(inDC);
HBITMAP hOldBMP = (HBITMAP)SelectObject(hTempDC, MyBMP);
if (hOldBMP)
{
// Get Bitmap image size
BITMAP BMInf;
GetObject(MyBMP, sizeof(BITMAP), &BMInf);
// Fill blend function and blend new text to window
BLENDFUNCTION bf;
bf.BlendOp = AC_SRC_OVER;
bf.BlendFlags = 0;
bf.SourceConstantAlpha = 0x80;
bf.AlphaFormat = AC_SRC_ALPHA;
AlphaBlend(inDC, inX, inY, BMInf.bmWidth, BMInf.bmHeight, hTempDC, 0, 0, BMInf.bmWidth, BMInf.bmHeight, bf);
// Clean up
SelectObject(hTempDC, hOldBMP);
DeleteObject(MyBMP);
DeleteDC(hTempDC);
}
}
}

Resources