Related
I have 2 buffers pointing to RGB32 images of different sizes, so my idea is to scale one buffer to match the other one and alphablend these images.
Currently I am able to mix StretchBlt (for scaling performance) and GDI+ drawimage function with a colormatrix for alphablending. This seem to be a bit slow and also it has issues with buffer being used by a different component that uses DirectX. For buffer issue I tried to copy the rows in reverse order and it works except in the DirectX related component.
Bitmap bmp1(width, height, 4bytesperpixel, RGB32, bufferpointer1);
Bitmap blend(width, height, 4bytesperpixel);
Graphics g(&newbmp)
using GDI function
Bitmap bmp2(scaleWidth, scaleHeight, 4bytesperpixel, RGB32, bufferpointer2)
HDC memdc = g.GetHDC();
//// scaling the bufferpointer2 to actual width & height
StretchDIBits(memdc, x,y, width, height, 0, 0,scaleWidth, scaleHeight, bufferpointer2,..)
g.ReleaseDC(memdc); // so that content is copied to the bitmap
//// Then alphablending bmp1 on top of the scaled imaged bmp2
//// Using lockbits to copy the bitmap bytes and unlocking it.
So I would need to replace the GDI+ functions and use Win32 function like AlphaBlend for this. I tried something like this and it shows a black screen
BITMAPINFO bminfo1 = {};
bminfo1.bmiHeader.biSize = sizeof( BITMAPINFO );
bminfo1.bmiHeader.biWidth = w;
bminfo1.bmiHeader.biHeight = h;
bminfo1.bmiHeader.biBitCount = m_nBytesPerPixel * 8;
bminfo1.bmiHeader.biCompression = BI_RGB;
bminfo1.bmiHeader.biPlanes = 1;
BITMAPINFO bminfo2 = {};
bminfo2.bmiHeader.biSize = sizeof( BITMAPINFO );
bminfo2.bmiHeader.biWidth = sW;
bminfo2.bmiHeader.biHeight = sH;
bminfo2.bmiHeader.biBitCount = m_nBytesPerPixel * 8;
bminfo2.bmiHeader.biCompression = BI_RGB;
bminfo2.bmiHeader.biPlanes = 1;
char* pBytes1, *pBytes2;
HDC hmemdc1 = CreateCompatibleDC(GetDC(0));
HDC hmemdc2 = CreateCompatibleDC(GetDC(0));
HBITMAP hBitmap1 = CreateDIBSection(hmemdc1, &bminfo1, DIB_RGB_COLORS, (void**) &pBytes1, NULL, 0);
SetDIBits(hmemdc1, hBitmap1, 0, bminfo1.bmiHeader.bih, pBuffer[0], &bminfo1, DIB_RGB_COLORS);
HBITMAP hBitmap2 = CreateDIBSection(hmemdc2, &bminfo2, DIB_RGB_COLORS, (void**) &pBytes2, NULL, 0);
SelectObject(hmemdc2,hBitmap2);
StretchDIBits(hmemdc2, 0, 0, w, h, 0, 0,
sW, sH, pBuffer[1], &bminfo2, DIB_RGB_COLORS, SRCCOPY );
BLENDFUNCTION bStruct;
bStruct.BlendOp = AC_SRC_OVER;
bStruct.BlendFlags = 0;
bStruct.SourceConstantAlpha = 255;
bStruct.AlphaFormat = AC_SRC_ALPHA;
SelectObject(hmemdc1,hBitmap1);
SelectObject(hmemdc2,hBitmap2);
//blend bmp2 on bmp1
BOOL res = AlphaBlend(hmemdc1, 0, 0, w, h, hmemdc2, 0, 0, w, h, bStruct);
//for testing output
SelectObject(hmemdc1,hBitmap1);
BitBlt(GetDC(0),0,0,width,height,hmemdc1,100,100,SRCCOPY);
//copy the bitmap buffer
memcpy(out, pBytes1, (w * m_nBytesPerPixel) * h);
I am not sure if it is possible to use AlphaBlend function to mix bitmaps per-pixel based from 2 memory DCs. Any help would be highly appreciated.
This part is wrong:
bminfo1.bmiHeader.biSize = sizeof( BITMAPINFO );
It should be sizeof(BITMAPINFOHEADER) otherwise it ruins everything. Also you can't use GetDC(0) for any proper painting. Use instead:
HDC hdc = GetDC(hwnd);
...
ReleaseDC(hwnd, hdc);
or use HDC from BeginPaint. Since you are using GDI+ then you must have HBITMAP handles from bmp->GetHBITMAP(), there is no reason to convert to memory and back to HBITMAP
For AlphaBlend set SourceConstantAlpha = 128; in case alpha channel is not set.
void blend(HDC hdc, RECT rc, HBITMAP hbitmap1, HBITMAP hbitmap2)
{
HDC memdc1 = CreateCompatibleDC(hdc);
HDC memdc2 = CreateCompatibleDC(hdc);
BITMAP bmp1, bmp2;
GetObject(hbitmap1, sizeof(BITMAP), &bmp1);
GetObject(hbitmap2, sizeof(BITMAP), &bmp2);
SelectObject(memdc1, hbitmap1);
SelectObject(memdc2, hbitmap2);
BLENDFUNCTION blend = { 0 };
blend.SourceConstantAlpha = 128;
SetStretchBltMode(hdc, COLORONCOLOR);
AlphaBlend(memdc2, 0, 0, bmp2.bmWidth, bmp2.bmHeight, memdc1, 0, 0, bmp1.bmWidth, bmp1.bmHeight, blend);
StretchBlt(hdc, 0, 0, rc.right, rc.bottom, memdc2, 0, 0, bmp2.bmWidth, bmp2.bmHeight, SRCCOPY);
//or create another memdc to get dibs
DeleteDC(memdc1);
DeleteDC(memdc2);
}
In case you want to get dibs, then don't draw on hdc, instead create a third memdc and another HBITMAP, then use GetDIBits
HDC memdc = CreateCompatibleDC(hdc);
HBITMAP hbmp = CreateCompatibleBitmap(hdc, rc.right, rc.bottom);
SelectObject(memdc, hbmp);
SetStretchBltMode(memdc, COLORONCOLOR);
StretchBlt(memdc, 0, 0, rc.right, rc.bottom,
memdc2, 0, 0, bmp2.bmWidth, bmp2.bmHeight, SRCCOPY);
int w = rc.right;
int h = rc.bottom;
BITMAPINFOHEADER bmpInfoHeader = { sizeof(BITMAPINFOHEADER) };
bmpInfoHeader.biWidth = w;
bmpInfoHeader.biHeight = h;
bmpInfoHeader.biBitCount = 32;
bmpInfoHeader.biCompression = BI_RGB;
bmpInfoHeader.biPlanes = 1;
DWORD size = w * 4 * h;
char *dib = new char[size];
GetDIBits(hdc, hbmp, 0, h, dib, (BITMAPINFO*)&bmpInfoHeader, DIB_RGB_COLORS);
...
DeleteDC(memdc);
DeleteObject(hbitmap);
delete[]dib;
Edit
Method 2: This method should be faster because it uses one StretchBlt and one AlphaBlend. This way you can use pre-computed alphas, although it's not necessary.
Use the other method with 2 AlphaBlend only if you want to blend both images with background.
void modify_bits(HDC hdc, HBITMAP hbitmap)
{ //expecting 32-bit bitmap
BITMAP bm = { 0 };
GetObject(hbitmap, sizeof(bm), &bm);
int w = bm.bmWidth;
int h = bm.bmHeight;
BITMAPINFOHEADER bmpInfoHeader = { sizeof(BITMAPINFOHEADER),
w, h, 1, 32, BI_RGB, 0, 0, 0, 0, 0 };
BYTE* bits = new BYTE[w * h * 4];
if (GetDIBits(hdc, hbitmap, 0, h, bits, (BITMAPINFO*)&bmpInfoHeader, DIB_RGB_COLORS)) {
BYTE* p = bits;
for (int x = 0; x < w; x++) {
for (int y = 0; y < h; y++) {
p[3] = 128;
p[0] = p[0] * p[3] / 255;
p[1] = p[1] * p[3] / 255;
p[2] = p[2] * p[3] / 255;
p += 4;
}
}
SetDIBits(hdc, hbitmap, 0, h, bits, (BITMAPINFO*)&bmpInfoHeader, DIB_RGB_COLORS);
}
delete[] bits;
}
void blend2(HDC hdc, RECT rc, HBITMAP hbitmap1, HBITMAP hbitmap2)
{
int w = rc.right;
int h = rc.bottom;
modify_bits(hdc, hbitmap2);
HDC memdc1 = CreateCompatibleDC(hdc);
HDC memdc2 = CreateCompatibleDC(hdc);
BITMAP bmp1, bmp2;
GetObject(hbitmap1, sizeof(BITMAP), &bmp1);
GetObject(hbitmap2, sizeof(BITMAP), &bmp2);
int w1 = bmp1.bmWidth;
int h1 = bmp1.bmHeight;
int w2 = bmp2.bmWidth;
int h2 = bmp2.bmHeight;
SelectObject(memdc1, hbitmap1);
SelectObject(memdc2, hbitmap2);
BLENDFUNCTION blend = { 0 };
blend.BlendOp = AC_SRC_OVER;
blend.BlendFlags = 0;
blend.SourceConstantAlpha = 255;
blend.AlphaFormat = AC_SRC_ALPHA;
SetStretchBltMode(hdc, COLORONCOLOR);
//draw first image normally:
StretchBlt(hdc, 0, 0, w, h, memdc1, 0, 0, w1, h1, SRCCOPY);
//AlphaBlend the second image:
AlphaBlend(hdc, 0, 0, w, h, memdc2, 0, 0, w2, h2, blend);
DeleteDC(memdc1);
DeleteDC(memdc2);
}
I'm making progress developing a '3d desktop' directx app that needs to display the current contents of a desktop window (e.g. "Calculator") as a 2D texture on a rectangular surface in directx (11). I'm sooo close but really struggling with the screenshot BMP -> Texture2D step. I do have screenshot->HBITMAP and DDSFile->rendered texture successfully working but can't complete the screenshot->rendered texture.
So far I have working the 'capture the window as a screenshot' bit:
RECT user_window_rectangle;
HWND user_window = FindWindow(NULL, TEXT("Calculator"));
GetClientRect(user_window, &user_window_rectangle);
HDC hdcScreen = GetDC(NULL);
HDC hdc = CreateCompatibleDC(hdcScreen);
UINT screenshot_width = user_window_rectangle.right - user_window_rectangle.left;
UINT screenshot_height = user_window_rectangle.bottom - user_window_rectangle.top;
hbmp = CreateCompatibleBitmap(hdcScreen, screenshot_width, screenshot_height);
SelectObject(hdc, hbmp);
PrintWindow(user_window, hdc, PW_CLIENTONLY);
At this point I have the window bitmap referenced by HBITMAP hbmp.
Also working is my code to render a DDS file as a texture on a directx/3d rectangle:
ID3D11Device *dev;
ID3D11DeviceContext *dev_context;
...
dev_context->PSSetShaderResources(0, 1, &shader_resource_view);
dev_context->PSSetSamplers(0, 1, &tex_sampler_state);
...
DirectX::TexMetadata tex_metadata;
DirectX::ScratchImage image;
hr = LoadFromDDSFile(L"Earth.dds", DirectX::DDS_FLAGS_NONE, &tex_metadata, image);
hr = CreateShaderResourceView(dev, image.GetImages(), image.GetImageCount(), tex_metadata, &shader_resource_view);
Pixel shader is:
Texture2D ObjTexture
SamplerState ObjSamplerState
float4 PShader(float4 pos : SV_POSITION, float4 color : COLOR, float2 tex : TEXCOORD) : SV_TARGET\
{
return ObjTexture.Sample( ObjSamplerState, tex );
}
The samplerstate (defaulting to linear) is:
D3D11_SAMPLER_DESC sampler_desc;
ZeroMemory(&sampler_desc, sizeof(sampler_desc));
sampler_desc.AddressU = D3D11_TEXTURE_ADDRESS_WRAP;
sampler_desc.AddressV = D3D11_TEXTURE_ADDRESS_WRAP;
sampler_desc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
sampler_desc.MinLOD = 0;
sampler_desc.MaxLOD = D3D11_FLOAT32_MAX;
hr = dev->CreateSamplerState(&sampler_desc, &tex_sampler_state);
Question: how do I replace the LoadFromDDSFile bit with some equivalent that takes the HBITMAP from the windows screencapture and ends up with it on the graphics card as ObjTexture ?
Below is my best shot of bridging from the screenshot HBITMAP hbmp to the shader resource screenshot_texture, but it gives a memory access violation from the graphics driver (I think due to my "data.pSysmem = &bmp.bmBits", but no idea really):
GetObject(hbmp, sizeof(BITMAP), (LPSTR)&bmp)
D3D11_TEXTURE2D_DESC screenshot_desc = CD3D11_TEXTURE2D_DESC(DXGI_FORMAT_R8G8B8A8_UNORM, bmp.bmWidth, bmp.bmHeight, 1,
1,
D3D11_BIND_SHADER_RESOURCE
);
int bytes_per_pixel = 4;
D3D11_SUBRESOURCE_DATA data;
ZeroMemory(&data, sizeof(D3D11_SUBRESOURCE_DATA));
data.pSysMem = &bmp.bmBits; //pixel buffer
data.SysMemPitch = bytes_per_pixel * bmp.bmWidth;// line size in byte
data.SysMemSlicePitch = bytes_per_pixel * bmp.bmWidth * bmp.bmHeight;// total buffer size in byte
hr = dev->CreateTexture2D(
&screenshot_desc, //texture format
&data, // pixel buffer use to fill the texture
&screenshot_texture // created texture
);
:::::::::::::::::::::::::SOLUTION::::::::::::::::::::::::::::::::::::::::::
The main issue was trying to use &bmp.bmBits directly as a pixel buffer caused memory conflicts within the graphics driver - this was resolved by using 'malloc' to allocate an appropriately sized block of memory to store the pixel data. Thanks to Chuck Walbourn for helping with my poking around in the dark to work out how the pixel data is actually stored (it was actually 32 bits/pixel by default). It's still possible/likely some of code is relying on luck to read the pixel data correctly, but it's been improved with Chuck's input.
My basic technique was;
FindWindow to get the client window on the desktop
CreateCompatibleBitmap and SelectObject and PrintWindow to get a HBITMAP to the snapshot
malloc to allocate the correct amount of space for a (byte*)pixel buffer
GetDIBits to populate the (byte*)pixel buffer from the HBITMAP
CreateTexture2D to build the texture buffer
CreateShaderResourceView to map the texture to the graphics pixel shader
So working code to screenshot a windows desktop window and pass that as a texture to a direct3d app is:
RECT user_window_rectangle;
HWND user_window = FindWindow(NULL, TEXT("Calculator")); //the window can't be min
if (user_window == NULL)
{
MessageBoxA(NULL, "Can't find Calculator", "Camvas", MB_OK);
return;
}
GetClientRect(user_window, &user_window_rectangle);
//create
HDC hdcScreen = GetDC(NULL);
HDC hdc = CreateCompatibleDC(hdcScreen);
UINT screenshot_width = user_window_rectangle.right - user_window_rectangle.left;
UINT screenshot_height = user_window_rectangle.bottom - user_window_rectangle.top;
hbmp = CreateCompatibleBitmap(hdcScreen, screenshot_width, screenshot_height);
SelectObject(hdc, hbmp);
//Print to memory hdc
PrintWindow(user_window, hdc, PW_CLIENTONLY);
BITMAPINFOHEADER bmih;
ZeroMemory(&bmih, sizeof(BITMAPINFOHEADER));
bmih.biSize = sizeof(BITMAPINFOHEADER);
bmih.biPlanes = 1;
bmih.biBitCount = 32;
bmih.biWidth = screenshot_width;
bmih.biHeight = 0-screenshot_height;
bmih.biCompression = BI_RGB;
bmih.biSizeImage = 0;
int bytes_per_pixel = bmih.biBitCount / 8;
BYTE *pixels = (BYTE*)malloc(bytes_per_pixel * screenshot_width * screenshot_height);
BITMAPINFO bmi = { 0 };
bmi.bmiHeader = bmih;
int row_count = GetDIBits(hdc, hbmp, 0, screenshot_height, pixels, &bmi, DIB_RGB_COLORS);
D3D11_TEXTURE2D_DESC screenshot_desc = CD3D11_TEXTURE2D_DESC(
DXGI_FORMAT_B8G8R8A8_UNORM, // format
screenshot_width, // width
screenshot_height, // height
1, // arraySize
1, // mipLevels
D3D11_BIND_SHADER_RESOURCE, // bindFlags
D3D11_USAGE_DYNAMIC, // usage
D3D11_CPU_ACCESS_WRITE, // cpuaccessFlags
1, // sampleCount
0, // sampleQuality
0 // miscFlags
);
D3D11_SUBRESOURCE_DATA data;
ZeroMemory(&data, sizeof(D3D11_SUBRESOURCE_DATA));
data.pSysMem = pixels; // texArray; // &bmp.bmBits; //pixel buffer
data.SysMemPitch = bytes_per_pixel * screenshot_width;// line size in byte
data.SysMemSlicePitch = bytes_per_pixel * screenshot_width * screenshot_height;
hr = dev->CreateTexture2D(
&screenshot_desc, //texture format
&data, // pixel buffer use to fill the texture
&screenshot_texture // created texture
);
D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc;
srvDesc.Format = screenshot_desc.Format;
srvDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
srvDesc.Texture2D.MostDetailedMip = 0;
srvDesc.Texture2D.MostDetailedMip = screenshot_desc.MipLevels;
dev->CreateShaderResourceView(screenshot_texture, NULL, &shader_resource_view);
You are making a lot of assumptions here that the BITMAP returned is actually in 32-bit RGBA form. It is likely not at all in that format, and in any case you need to validate the contents of bmPlanes to be 1 and bmBitsPixel to be 32 if you are assuming it is 4-bytes per pixel. You should read more about the BMP format.
BMPs uses BGRA order, so you can use DXGI_FORMAT_B8G8R8A8_UNORM for the case of bmBitsPixel being 32.
Secondly, you need to derive pitch from bmWidthBytes and not bmWidth.
data.pSysMem = &bmp.bmBits; //pixel buffer
data.SysMemPitch = bmp.bmWidthBytes;// line size in byte
data.SysMemSlicePitch = bmp.bmWidthBytes * bmp.bmHeight;// total buffer size in byte
If bmBitsPixel is 24, there is no DXGI format equivalent to that. You have to copy the data to a 32-bit format such as DXGI_FORMAT_B8G8R8X8_UNORM.
If bmBitsPixel is 15 or 16, you can use DXGI_FORMAT_B5G5R5A1_UNORM on a system with Direct3D 11.1, but remember that 16-bit DXGI formats are not always supported depending on the driver. Otherwise you'll have to convert this data to something else.
For bmBitsPixel values of 1, 2, 4, or 8 you have to convert them as there are no DXGI texture formats that are equivalent.
The main issue was trying to use &bmp.bmBits directly as a pixel buffer caused memory conflicts within the graphics driver - this was resolved by using 'malloc' to allocate an appropriately sized block of memory to store the pixel data. Thanks to Chuck Walbourn for helping with my poking around in the dark to work out how the pixel data is actually stored (it was actually 32 bits/pixel by default). It's still possible/likely some of code is relying on luck to read the pixel data correctly, but it's been improved with Chuck's input.
My basic technique was;
FindWindow to get the client window on the desktop
CreateCompatibleBitmap and SelectObject and PrintWindow to get a HBITMAP to the snapshot
malloc to allocate the correct amount of space for a (byte*)pixel buffer
GetDIBits to populate the (byte*)pixel buffer from the HBITMAP
CreateTexture2D to build the texture buffer
CreateShaderResourceView to map the texture to the graphics pixel shader
So working code to screenshot a windows desktop window and pass that as a texture to a direct3d app is:
RECT user_window_rectangle;
HWND user_window = FindWindow(NULL, TEXT("Calculator")); //the window can't be min
if (user_window == NULL)
{
MessageBoxA(NULL, "Can't find Calculator", "Camvas", MB_OK);
return;
}
GetClientRect(user_window, &user_window_rectangle);
//create
HDC hdcScreen = GetDC(NULL);
HDC hdc = CreateCompatibleDC(hdcScreen);
UINT screenshot_width = user_window_rectangle.right - user_window_rectangle.left;
UINT screenshot_height = user_window_rectangle.bottom - user_window_rectangle.top;
hbmp = CreateCompatibleBitmap(hdcScreen, screenshot_width, screenshot_height);
SelectObject(hdc, hbmp);
//Print to memory hdc
PrintWindow(user_window, hdc, PW_CLIENTONLY);
BITMAPINFOHEADER bmih;
ZeroMemory(&bmih, sizeof(BITMAPINFOHEADER));
bmih.biSize = sizeof(BITMAPINFOHEADER);
bmih.biPlanes = 1;
bmih.biBitCount = 32;
bmih.biWidth = screenshot_width;
bmih.biHeight = 0-screenshot_height;
bmih.biCompression = BI_RGB;
bmih.biSizeImage = 0;
int bytes_per_pixel = bmih.biBitCount / 8;
BYTE *pixels = (BYTE*)malloc(bytes_per_pixel * screenshot_width * screenshot_height);
BITMAPINFO bmi = { 0 };
bmi.bmiHeader = bmih;
int row_count = GetDIBits(hdc, hbmp, 0, screenshot_height, pixels, &bmi, DIB_RGB_COLORS);
D3D11_TEXTURE2D_DESC screenshot_desc = CD3D11_TEXTURE2D_DESC(
DXGI_FORMAT_B8G8R8A8_UNORM, // format
screenshot_width, // width
screenshot_height, // height
1, // arraySize
1, // mipLevels
D3D11_BIND_SHADER_RESOURCE, // bindFlags
D3D11_USAGE_DYNAMIC, // usage
D3D11_CPU_ACCESS_WRITE, // cpuaccessFlags
1, // sampleCount
0, // sampleQuality
0 // miscFlags
);
D3D11_SUBRESOURCE_DATA data;
ZeroMemory(&data, sizeof(D3D11_SUBRESOURCE_DATA));
data.pSysMem = pixels; // texArray; // &bmp.bmBits; //pixel buffer
data.SysMemPitch = bytes_per_pixel * screenshot_width;// line size in byte
data.SysMemSlicePitch = bytes_per_pixel * screenshot_width * screenshot_height;
hr = dev->CreateTexture2D(
&screenshot_desc, //texture format
&data, // pixel buffer use to fill the texture
&screenshot_texture // created texture
);
D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc;
srvDesc.Format = screenshot_desc.Format;
srvDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
srvDesc.Texture2D.MostDetailedMip = 0;
srvDesc.Texture2D.MostDetailedMip = screenshot_desc.MipLevels;
dev->CreateShaderResourceView(screenshot_texture, NULL, &shader_resource_view);
Win32 graphics is not my gig, but I have to do some alpha blending. The following code works fine in 32-bit color mode but displays nothing except the white background in 16-bit mode. Sorry for the length, but I don't know where it's going wrong. This is as compact as I could make it.
hbm is a 32-bit ARGB bitmap with varying per-pixel alpha, size 16x16 (so, cx = cy = 16).
// Create a memory DC to construct the bits
HDC hdc = GetDC(hWnd);
HDC hdcMem = CreateCompatibleDC(hdc);
HBITMAP hbmMem = CreateBitmap(cx, cy, 1, 32, NULL);
SelectObject(hdcMem, hbmMem);
// Fill the BG
RECT rc = { 0, 0, cx, cy };
FillRect(hdcMem, &rc, (HBRUSH)GetStockObject(WHITE_BRUSH));
// Get the bitmap bits
BITMAPINFO bmi;
ZeroMemory(&bmi, sizeof(bmi));
bmi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmi.bmiHeader.biWidth = cx;
bmi.bmiHeader.biHeight = cy;
bmi.bmiHeader.biBitCount = 32;
bmi.bmiHeader.biPlanes = 1;
bmi.bmiHeader.biCompression = BI_RGB;
std::unique_ptr<BYTE[]> pvBits(new BYTE[cx * cy * 4]);
GetDIBits(hdcMem, hbm, 0, cy, reinterpret_cast<void*>(pvBits.get()), &bmi, DIB_RGB_COLORS);
// Premultiply all color channel values by the per-pixel alpha.
int ctPixels = cx * cy;
BYTE *prgba = pvBits.get();
for (int i = 0; i < ctPixels; ++i)
{
int alpha = *(prgba + 3);
for (int j = 0; j <= 2; ++j)
{
int k = *prgba;
*prgba++ = k * alpha / 255;
}
++prgba;
}
// Put the new bits back
SetDIBits(hdcMem, hbm, 0, cy, reinterpret_cast<void*>(pvBits.get()), &bmi, DIB_RGB_COLORS);
// Alpha blend into memory DC
HDC hdcSrc = CreateCompatibleDC(hdcMem);
HBITMAP hbmOld = static_cast<HBITMAP>(SelectObject(hdcSrc, hbm));
BLENDFUNCTION bfn = { AC_SRC_OVER, 0, 255, AC_SRC_ALPHA };
AlphaBlend(hdcMem, 0, 0, cx, cy, hdcSrc, 0, 0, cx, cy, bfn);
SelectObject(hdcSrc, hbmOld);
DeleteDC(hdcSrc);
// Blit the memory DC to the screen
BitBlt(hdc, 0, 0, cx, cy, hdcMem, 0, 0, SRCCOPY);
I have some vague suspicions about CreateCompatibleDC but other than that I'm flying blind.
Any help appreciated. TIA.
According to Microsoft's documentation per pixel alpha values are only supported with 32-bit bitmaps. SetDIBits converts your 32-bit DIB into a 16-bit DDB if the display using 16-bit colour, and a 16-bit DDB has no space to store alpha values. You'll have to set bfn.SourceConstantAlpha to your alpha value instead to get this to work. You won't need to premultiply your bitmap in that case either.
How to create a DIB with ARGB format. I want to blit a image(that has some part transparent in it ) using this DIB.
I tried with the following code but its not working properly
unsigned char * rawdata; ==> Filled by Qimage Raw Data
unsigned char * buffer = NULL;
memset(&bmi, 0, sizeof(bmi));
bmi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmi.bmiHeader.biWidth = width;/* Width of your image buffer */
bmi.bmiHeader.biHeight = -height; /* Height of your image buffer */
bmi.bmiHeader.biPlanes = 1;
bmi.bmiHeader.biBitCount = 32;
bmi.bmiHeader.biCompression = BI_RGB;
HBITMAP g_dibbmp = CreateDIBSection(hDesktopDC, &bmi, DIB_RGB_COLORS, (void **)&buffer, 0, 0);
if (!buffer)
{ /* ERROR */
printf("ERROR DIB could not create buffer\n");
}
else
{
printf("DIB created buffer successfully\n");
memcpy(buffer,rawdata,sizeof(rawdata));
}
Please help.
Reagards,
Techtotie.
Here's a snippet I put together from pieces of working code. The main difference I see is setting the mask bits and using memsection.
// assumes height and width passed in
int bpp = 32; // Bits per pixel
int stride = (width * (bpp / 8));
unsigned int byteCount = (unsigned int)(stride * height);
HANDLE hMemSection = ::CreateFileMapping( INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE, 0, byteCount, NULL );
if (hMemSection == NULL)
return false;
BITMAPV5HEADER bmh;
memset( &bmh, 0, sizeof( BITMAPV5HEADER ) );
bmh.bV5Size = sizeof( BITMAPV5HEADER );
bmh.bV5Width = width;
bmh.bV5Height = -height;
bmh.bV5Planes = 1;
bmh.bV5BitCount = 32;
bmh.bV5Compression = BI_RGB;
bmh.bV5AlphaMask = 0xFF000000;
bmh.bV5RedMask = 0x00FF0000;
bmh.bV5GreenMask = 0x0000FF00;
bmh.bV5BlueMask = 0x000000FF;
HDC hdc = ::GetDC( NULL );
HBITMAP hDIB = ::CreateDIBSection( hdc, (BITMAPINFO *) &bmh, DIB_RGB_COLORS,
&pBits, hMemSection, (DWORD) 0 );
::ReleaseDC( NULL, hdc );
// Much later when done manipulating the bitmap
::CloseHandle( hMemSection );
Thanks for your answer.
But my problem got solved. It was not actually the problem with the DIB creation.
It was due to the wrong API that I was using for Blitting.
I was using BitBlt for blitting but this API does not take care of the Alpha gradient. Instead of it I tried
TransparentBlt (Refer : http://msdn.microsoft.com/en-us/library/windows/desktop/dd145141(v=vs.85).aspx)
and it worked as this API takes care of copying the Alpha values from Source DC to destination DC.
Once I have loaded a BITMAP from file, with LoadImage:
HBITMAP renderBMP = (HBITMAP)LoadImage( NULL, filePath, IMAGE_BITMAP, 0, 0, LR_DEFAULTSIZE | LR_LOADFROMFILE );
is there a way to easily access and edit the pixels individually?
I can use this to get the bitmap object, but it doesn't seem to help,
BITMAP bm;
GetObject(renderBMP, sizeof(bm), &bm);
because the value of bmBits in the structure is 0.
UPDATE:
Now I am getting a bug with this solution:
struct Pixel { unsigned char r,g,b,a; };
void Frame::PushMemory(HDC hdc)
{
BITMAPINFO bi;
ZeroMemory(&bi.bmiHeader, sizeof(BITMAPINFOHEADER));
bi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
GetDIBits(hdc, renderBMP, 0, bi.bmiHeader.biHeight, NULL, &bi, DIB_RGB_COLORS);
/* Allocate memory for bitmap bits */
Pixel* pixels = new Pixel[bi.bmiHeader.biHeight * bi.bmiHeader.biWidth];
int n = sizeof(Pixel) * bi.bmiHeader.biHeight * bi.bmiHeader.biWidth;
int m = bi.bmiHeader.biSizeImage;
GetDIBits(hdc, renderBMP, 0, bi.bmiHeader.biHeight, pixels, &bi, DIB_RGB_COLORS);
// Recompute the output
//ComputeOutput(pixels);
// Push back to windows
//SetDIBits(hdc, renderBMP, 0, bi.bmiHeader.biHeight, pixels, &bi, DIB_RGB_COLORS );
//delete pixels;
}
I get this error:
Run-Time Check Failure #2 - Stack around the variable 'bi' was corrupted.
The last three lines don't seem to matter whether commented in or not.
Use GetDIBits to access pixels. It copies all pixels into specified buffer. After pixels' modification you can use SetDIBits to write pixels back to bitmap.
EDIT:
Example of code:
LPVOID lpvBits=NULL; // pointer to bitmap bits array
BITMAPINFO bi;
ZeroMemory(&bi.bmiHeader, sizeof(BITMAPINFOHEADER));
bi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
if (!GetDIBits(hDC, hBmp, 0, height, NULL, &bi, DIB_RGB_COLORS))
return NULL;
/* Allocate memory for bitmap bits */
if ((lpvBits = new char[bi.bmiHeader.biSizeImage]) == NULL)
return NULL;
if (!GetDIBits(hDC, hBmp, 0, height, lpvBits, &bi, DIB_RGB_COLORS))
return NULL;
/* do something with bits */
::SetDIBits( hDC, hBmp, 0, height, ( LPVOID )lpvBits, &bi, DIB_RGB_COLORS );
If you pass the LR_CREATEDIBSECTION flag to LoadImage it creates a special kind of bitmap with a usermode memory section containing the bits of the bitmap.
GetObject on a DIBSection bitmap will fill in the bmPits pointer of the BITMAP structure, or even fill in a DIBSECTION struct with extra data.