TextOut() doesn't seem to exhibit a surrogate UNICODE pair - winapi

I tried to exhibit U+1D400 (surrogate pair H = 0xD835 L = 0xDC00) using TextOut() to no avail. Why ?
case WM_PAINT:
PAINTSTRUCT ps;
BeginPaint(hwnd, &ps);
int iLogPixelsY;
iLogPixelsY = GetDeviceCaps(ps.hdc, LOGPIXELSY);
LOGFONT lf;
int iPts;
iPts = 11;
memset(&lf, 0, sizeof(LOGFONT));
lf.lfHeight = -iPts * iLogPixelsY / 72;
lf.lfOutPrecision = OUT_TT_ONLY_PRECIS;
wcscpy_s(lf.lfFaceName, L"Cambria Math");
HFONT hFont;
hFont = CreateFontIndirect(&lf);
hFont = (HFONT)SelectObject(ps.hdc, hFont);
wchar_t tx[2];
tx[0] = 0xD835;
tx[1] = 0xDC00;
TextOut(ps.hdc, 10, 100, tx, 1);
DeleteObject(SelectObject(ps.hdc, hFont));
EndPaint(hwnd, &ps);
break;

You are calling TextOut specifying a string length of 1, but according to this documentation, you should pass 2 since it is a surrogate pair.

Related

Retrieving the palette of a bitmap image

I am loading a bitmap image from a file (type BMP) via the GDI function LoadImage, which returns a BITMAP handle.
I know how to access the bitmap bits. But the image is in the format 8BPP, hence palettized. How can I obtain the palette entries ?
Select the bitmap in to dc and call GetDIBColorTable. A temporary memory dc can be used here:
RGBQUAD rgb[256] = { 0 };
HDC memdc = CreateCompatibleDC(hdc);
auto oldbmp = SelectObject(memdc, hbitmap);
GetDIBColorTable(memdc, 0, 256, rgb);
SelectObject(memdc, oldbmp);
DeleteDC(memdc);
Alternatively use GetDIBits to read BITMAPINFO. You have to reserve enough memory to read the color table + all bytes + sizeof(BITMAPINFO).
Color table will be copied to BITMAPINFO -> bmiColors
Gdi+ is another option. Here is GDI example:
int main()
{
HBITMAP hbitmap = (HBITMAP)LoadImage(0, L"source.bmp",
IMAGE_BITMAP, 0, 0, LR_CREATEDIBSECTION | LR_DEFAULTSIZE | LR_LOADFROMFILE);
if (!hbitmap)
return 0;
BITMAP bm;
GetObject(hbitmap, sizeof(bm), &bm);
int width = bm.bmWidth;
int height = bm.bmHeight;
WORD clrbits = (WORD)(bm.bmPlanes * bm.bmBitsPixel);
if (clrbits == 8) clrbits = 1;
else if (clrbits <= 4) clrbits = 4;
else if (clrbits <= 8) clrbits = 8;
else if (clrbits <= 16) clrbits = 16;
else if (clrbits <= 24) clrbits = 24;
else clrbits = 32;
HDC hdc = GetDC(0);
if(clrbits == 8)
{
RGBQUAD rgb[256] = { 0 };
HDC memdc = CreateCompatibleDC(hdc);
auto oldbmp = SelectObject(memdc, hbitmap);
GetDIBColorTable(memdc, 0, 256, rgb);
SelectObject(memdc, oldbmp);
DeleteDC(memdc);
}
int palette_size = (clrbits < 24) ? sizeof(RGBQUAD) * (1 << clrbits) : 0;
BITMAPINFO* bmpinfo = (BITMAPINFO*)new BYTE[sizeof(BITMAPINFO) + palette_size];
int width_in_bytes = ((width * clrbits + 31) & ~31) / 8;
DWORD size = width_in_bytes * height;
bmpinfo->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmpinfo->bmiHeader.biWidth = width;
bmpinfo->bmiHeader.biHeight = height;
bmpinfo->bmiHeader.biPlanes = bm.bmPlanes;
bmpinfo->bmiHeader.biBitCount = bm.bmBitsPixel;
bmpinfo->bmiHeader.biClrUsed = (clrbits < 24) ? (1 << clrbits) : 0;
bmpinfo->bmiHeader.biCompression = BI_RGB;
bmpinfo->bmiHeader.biSizeImage = size;
BYTE* bits = new BYTE[size];
GetDIBits(hdc, hbitmap, 0, height, bits, bmpinfo, 0);
//palette size should be 1024 for 256 color
//it should be stored in `bmpinfo->bmiColors`
delete[]bits;
delete[](BYTE*)bmpinfo;
DeleteObject(hbitmap);
ReleaseDC(0, hdc);
return 0;
}

Blend 2 bitmaps

I have 2 buffers pointing to RGB32 images of different sizes, so my idea is to scale one buffer to match the other one and alphablend these images.
Currently I am able to mix StretchBlt (for scaling performance) and GDI+ drawimage function with a colormatrix for alphablending. This seem to be a bit slow and also it has issues with buffer being used by a different component that uses DirectX. For buffer issue I tried to copy the rows in reverse order and it works except in the DirectX related component.
Bitmap bmp1(width, height, 4bytesperpixel, RGB32, bufferpointer1);
Bitmap blend(width, height, 4bytesperpixel);
Graphics g(&newbmp)
using GDI function
Bitmap bmp2(scaleWidth, scaleHeight, 4bytesperpixel, RGB32, bufferpointer2)
HDC memdc = g.GetHDC();
//// scaling the bufferpointer2 to actual width & height
StretchDIBits(memdc, x,y, width, height, 0, 0,scaleWidth, scaleHeight, bufferpointer2,..)
g.ReleaseDC(memdc); // so that content is copied to the bitmap
//// Then alphablending bmp1 on top of the scaled imaged bmp2
//// Using lockbits to copy the bitmap bytes and unlocking it.
So I would need to replace the GDI+ functions and use Win32 function like AlphaBlend for this. I tried something like this and it shows a black screen
BITMAPINFO bminfo1 = {};
bminfo1.bmiHeader.biSize = sizeof( BITMAPINFO );
bminfo1.bmiHeader.biWidth = w;
bminfo1.bmiHeader.biHeight = h;
bminfo1.bmiHeader.biBitCount = m_nBytesPerPixel * 8;
bminfo1.bmiHeader.biCompression = BI_RGB;
bminfo1.bmiHeader.biPlanes = 1;
BITMAPINFO bminfo2 = {};
bminfo2.bmiHeader.biSize = sizeof( BITMAPINFO );
bminfo2.bmiHeader.biWidth = sW;
bminfo2.bmiHeader.biHeight = sH;
bminfo2.bmiHeader.biBitCount = m_nBytesPerPixel * 8;
bminfo2.bmiHeader.biCompression = BI_RGB;
bminfo2.bmiHeader.biPlanes = 1;
char* pBytes1, *pBytes2;
HDC hmemdc1 = CreateCompatibleDC(GetDC(0));
HDC hmemdc2 = CreateCompatibleDC(GetDC(0));
HBITMAP hBitmap1 = CreateDIBSection(hmemdc1, &bminfo1, DIB_RGB_COLORS, (void**) &pBytes1, NULL, 0);
SetDIBits(hmemdc1, hBitmap1, 0, bminfo1.bmiHeader.bih, pBuffer[0], &bminfo1, DIB_RGB_COLORS);
HBITMAP hBitmap2 = CreateDIBSection(hmemdc2, &bminfo2, DIB_RGB_COLORS, (void**) &pBytes2, NULL, 0);
SelectObject(hmemdc2,hBitmap2);
StretchDIBits(hmemdc2, 0, 0, w, h, 0, 0,
sW, sH, pBuffer[1], &bminfo2, DIB_RGB_COLORS, SRCCOPY );
BLENDFUNCTION bStruct;
bStruct.BlendOp = AC_SRC_OVER;
bStruct.BlendFlags = 0;
bStruct.SourceConstantAlpha = 255;
bStruct.AlphaFormat = AC_SRC_ALPHA;
SelectObject(hmemdc1,hBitmap1);
SelectObject(hmemdc2,hBitmap2);
//blend bmp2 on bmp1
BOOL res = AlphaBlend(hmemdc1, 0, 0, w, h, hmemdc2, 0, 0, w, h, bStruct);
//for testing output
SelectObject(hmemdc1,hBitmap1);
BitBlt(GetDC(0),0,0,width,height,hmemdc1,100,100,SRCCOPY);
//copy the bitmap buffer
memcpy(out, pBytes1, (w * m_nBytesPerPixel) * h);
I am not sure if it is possible to use AlphaBlend function to mix bitmaps per-pixel based from 2 memory DCs. Any help would be highly appreciated.
This part is wrong:
bminfo1.bmiHeader.biSize = sizeof( BITMAPINFO );
It should be sizeof(BITMAPINFOHEADER) otherwise it ruins everything. Also you can't use GetDC(0) for any proper painting. Use instead:
HDC hdc = GetDC(hwnd);
...
ReleaseDC(hwnd, hdc);
or use HDC from BeginPaint. Since you are using GDI+ then you must have HBITMAP handles from bmp->GetHBITMAP(), there is no reason to convert to memory and back to HBITMAP
For AlphaBlend set SourceConstantAlpha = 128; in case alpha channel is not set.
void blend(HDC hdc, RECT rc, HBITMAP hbitmap1, HBITMAP hbitmap2)
{
HDC memdc1 = CreateCompatibleDC(hdc);
HDC memdc2 = CreateCompatibleDC(hdc);
BITMAP bmp1, bmp2;
GetObject(hbitmap1, sizeof(BITMAP), &bmp1);
GetObject(hbitmap2, sizeof(BITMAP), &bmp2);
SelectObject(memdc1, hbitmap1);
SelectObject(memdc2, hbitmap2);
BLENDFUNCTION blend = { 0 };
blend.SourceConstantAlpha = 128;
SetStretchBltMode(hdc, COLORONCOLOR);
AlphaBlend(memdc2, 0, 0, bmp2.bmWidth, bmp2.bmHeight, memdc1, 0, 0, bmp1.bmWidth, bmp1.bmHeight, blend);
StretchBlt(hdc, 0, 0, rc.right, rc.bottom, memdc2, 0, 0, bmp2.bmWidth, bmp2.bmHeight, SRCCOPY);
//or create another memdc to get dibs
DeleteDC(memdc1);
DeleteDC(memdc2);
}
In case you want to get dibs, then don't draw on hdc, instead create a third memdc and another HBITMAP, then use GetDIBits
HDC memdc = CreateCompatibleDC(hdc);
HBITMAP hbmp = CreateCompatibleBitmap(hdc, rc.right, rc.bottom);
SelectObject(memdc, hbmp);
SetStretchBltMode(memdc, COLORONCOLOR);
StretchBlt(memdc, 0, 0, rc.right, rc.bottom,
memdc2, 0, 0, bmp2.bmWidth, bmp2.bmHeight, SRCCOPY);
int w = rc.right;
int h = rc.bottom;
BITMAPINFOHEADER bmpInfoHeader = { sizeof(BITMAPINFOHEADER) };
bmpInfoHeader.biWidth = w;
bmpInfoHeader.biHeight = h;
bmpInfoHeader.biBitCount = 32;
bmpInfoHeader.biCompression = BI_RGB;
bmpInfoHeader.biPlanes = 1;
DWORD size = w * 4 * h;
char *dib = new char[size];
GetDIBits(hdc, hbmp, 0, h, dib, (BITMAPINFO*)&bmpInfoHeader, DIB_RGB_COLORS);
...
DeleteDC(memdc);
DeleteObject(hbitmap);
delete[]dib;
Edit
Method 2: This method should be faster because it uses one StretchBlt and one AlphaBlend. This way you can use pre-computed alphas, although it's not necessary.
Use the other method with 2 AlphaBlend only if you want to blend both images with background.
void modify_bits(HDC hdc, HBITMAP hbitmap)
{ //expecting 32-bit bitmap
BITMAP bm = { 0 };
GetObject(hbitmap, sizeof(bm), &bm);
int w = bm.bmWidth;
int h = bm.bmHeight;
BITMAPINFOHEADER bmpInfoHeader = { sizeof(BITMAPINFOHEADER),
w, h, 1, 32, BI_RGB, 0, 0, 0, 0, 0 };
BYTE* bits = new BYTE[w * h * 4];
if (GetDIBits(hdc, hbitmap, 0, h, bits, (BITMAPINFO*)&bmpInfoHeader, DIB_RGB_COLORS)) {
BYTE* p = bits;
for (int x = 0; x < w; x++) {
for (int y = 0; y < h; y++) {
p[3] = 128;
p[0] = p[0] * p[3] / 255;
p[1] = p[1] * p[3] / 255;
p[2] = p[2] * p[3] / 255;
p += 4;
}
}
SetDIBits(hdc, hbitmap, 0, h, bits, (BITMAPINFO*)&bmpInfoHeader, DIB_RGB_COLORS);
}
delete[] bits;
}
void blend2(HDC hdc, RECT rc, HBITMAP hbitmap1, HBITMAP hbitmap2)
{
int w = rc.right;
int h = rc.bottom;
modify_bits(hdc, hbitmap2);
HDC memdc1 = CreateCompatibleDC(hdc);
HDC memdc2 = CreateCompatibleDC(hdc);
BITMAP bmp1, bmp2;
GetObject(hbitmap1, sizeof(BITMAP), &bmp1);
GetObject(hbitmap2, sizeof(BITMAP), &bmp2);
int w1 = bmp1.bmWidth;
int h1 = bmp1.bmHeight;
int w2 = bmp2.bmWidth;
int h2 = bmp2.bmHeight;
SelectObject(memdc1, hbitmap1);
SelectObject(memdc2, hbitmap2);
BLENDFUNCTION blend = { 0 };
blend.BlendOp = AC_SRC_OVER;
blend.BlendFlags = 0;
blend.SourceConstantAlpha = 255;
blend.AlphaFormat = AC_SRC_ALPHA;
SetStretchBltMode(hdc, COLORONCOLOR);
//draw first image normally:
StretchBlt(hdc, 0, 0, w, h, memdc1, 0, 0, w1, h1, SRCCOPY);
//AlphaBlend the second image:
AlphaBlend(hdc, 0, 0, w, h, memdc2, 0, 0, w2, h2, blend);
DeleteDC(memdc1);
DeleteDC(memdc2);
}

AlphaBlend and 16-bit color mode

Win32 graphics is not my gig, but I have to do some alpha blending. The following code works fine in 32-bit color mode but displays nothing except the white background in 16-bit mode. Sorry for the length, but I don't know where it's going wrong. This is as compact as I could make it.
hbm is a 32-bit ARGB bitmap with varying per-pixel alpha, size 16x16 (so, cx = cy = 16).
// Create a memory DC to construct the bits
HDC hdc = GetDC(hWnd);
HDC hdcMem = CreateCompatibleDC(hdc);
HBITMAP hbmMem = CreateBitmap(cx, cy, 1, 32, NULL);
SelectObject(hdcMem, hbmMem);
// Fill the BG
RECT rc = { 0, 0, cx, cy };
FillRect(hdcMem, &rc, (HBRUSH)GetStockObject(WHITE_BRUSH));
// Get the bitmap bits
BITMAPINFO bmi;
ZeroMemory(&bmi, sizeof(bmi));
bmi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmi.bmiHeader.biWidth = cx;
bmi.bmiHeader.biHeight = cy;
bmi.bmiHeader.biBitCount = 32;
bmi.bmiHeader.biPlanes = 1;
bmi.bmiHeader.biCompression = BI_RGB;
std::unique_ptr<BYTE[]> pvBits(new BYTE[cx * cy * 4]);
GetDIBits(hdcMem, hbm, 0, cy, reinterpret_cast<void*>(pvBits.get()), &bmi, DIB_RGB_COLORS);
// Premultiply all color channel values by the per-pixel alpha.
int ctPixels = cx * cy;
BYTE *prgba = pvBits.get();
for (int i = 0; i < ctPixels; ++i)
{
int alpha = *(prgba + 3);
for (int j = 0; j <= 2; ++j)
{
int k = *prgba;
*prgba++ = k * alpha / 255;
}
++prgba;
}
// Put the new bits back
SetDIBits(hdcMem, hbm, 0, cy, reinterpret_cast<void*>(pvBits.get()), &bmi, DIB_RGB_COLORS);
// Alpha blend into memory DC
HDC hdcSrc = CreateCompatibleDC(hdcMem);
HBITMAP hbmOld = static_cast<HBITMAP>(SelectObject(hdcSrc, hbm));
BLENDFUNCTION bfn = { AC_SRC_OVER, 0, 255, AC_SRC_ALPHA };
AlphaBlend(hdcMem, 0, 0, cx, cy, hdcSrc, 0, 0, cx, cy, bfn);
SelectObject(hdcSrc, hbmOld);
DeleteDC(hdcSrc);
// Blit the memory DC to the screen
BitBlt(hdc, 0, 0, cx, cy, hdcMem, 0, 0, SRCCOPY);
I have some vague suspicions about CreateCompatibleDC but other than that I'm flying blind.
Any help appreciated. TIA.
According to Microsoft's documentation per pixel alpha values are only supported with 32-bit bitmaps. SetDIBits converts your 32-bit DIB into a 16-bit DDB if the display using 16-bit colour, and a 16-bit DDB has no space to store alpha values. You'll have to set bfn.SourceConstantAlpha to your alpha value instead to get this to work. You won't need to premultiply your bitmap in that case either.

Capture bitmap of entire software window with winapi

I'm trying to capture several software window and print them to my software. The goal is to be able to view two different software side to side even if they overlap.
void MyWindowFinder::drawTo(HWND hWnd){
MyWindow* currentWindow = anchorWindow;
HDC hdcRendering;
HDC hdcMemRendering;
PAINTSTRUCT ps;
hdcRendering = BeginPaint(hWnd, &ps);
hdcMemRendering = CreateCompatibleDC(hdcRendering);
int widthToRender = 0;
for(int i = 0; i < this->anchorWindow->winID ; i++){
HDC hdcCapture;
HDC hdcMemCapture;
HBITMAP hBitmap;
HGDIOBJ oldCaptureBitmap;
HGDIOBJ oldRenderingBitmap;
RECT clientRect;
GetClientRect(currentWindow->hwnd,&clientRect);
int width = clientRect.right-clientRect.left;
int height = clientRect.bottom-clientRect.top ;
if (!(hdcCapture = GetDC (currentWindow->hwnd))){
return;
}
hdcMemCapture = CreateCompatibleDC(hdcCapture);
hBitmap = CreateCompatibleBitmap(hdcCapture,clientRect.right-clientRect.left,clientRect.bottom-clientRect.top);
oldCaptureBitmap = SelectObject(hdcMemCapture, hBitmap);
BitBlt(hdcMemCapture, 0, 0, width, height, hdcCapture, 0,0, SRCCOPY|CAPTUREBLT);
oldRenderingBitmap = SelectObject(hdcMemRendering, hBitmap);
BitBlt(hdcRendering, widthToRender, 0, width, height, hdcMemCapture, 0, 0, SRCCOPY);
widthToRender+= width;
SelectObject(hdcMemRendering, oldRenderingBitmap);
SelectObject(hdcMemCapture, oldCaptureBitmap);
DeleteDC(hdcMemCapture);
DeleteDC(hdcCapture);
DeleteObject(hBitmap);
currentWindow = currentWindow->previousWindow;
}
DeleteDC(hdcMemRendering);
DeleteDC(hdcRendering);
EndPaint(hWnd, &ps);
}
This works pretty well and I can print two overlapping window side to side in my software :
However I'm facing a tricky problem. When I click on any element of one of my children software, they are not drawn in my software.
Do you have any idea why, and how I could solve this problem ?

GetDIBits: Where's that pixel? (x, y coordinates)

So, I'm testing the following function FindPixel with the following app. The HWND and COLORREF are constants that I've determined with Spy++ and Color Cop for debugging; the final version of the program will find these automatically.
I've confirmed that the part of this algorithm which determines whether the color exists works (ie: if the color exists anywhere in the window, the if statement for that is true eventually, and if it doesn't the if statement is never true), however, I cannot figure out how to isolate which pixel this occurs on. The line SetCursorPos(rect.left+i, rect.top+i2); does not move the mouse anywhere near the correct location.
The window I'm debugging this with is entirely white save for the one pixel with the value 16776960. The function can tell that it's there, but the values of (i, i2) are not the (x, y) coordinates of where it occurs.
Is there something I'm missing here?
#include <Windows.h>
void FindPixel(HWND hWnd, COLORREF target)
{
HDC hDC = GetDC(hWnd);
HDC memDC = CreateCompatibleDC (hDC);
BYTE *ScreenData = NULL;
HBITMAP hBitmap;
BITMAPINFOHEADER bmHeader = {0};
RECT rect;
int width, height;
int i, i2;
GetWindowRect(hWnd, &rect);
width = rect.right-rect.left;
height = rect.bottom-rect.top;
ScreenData = (BYTE*)malloc(4*width*height);
hBitmap = CreateCompatibleBitmap(hDC, width, height);
bmHeader.biSize = sizeof(BITMAPINFOHEADER);
bmHeader.biPlanes = 1;
bmHeader.biBitCount = 24;
bmHeader.biWidth = width;
bmHeader.biHeight = -height;
bmHeader.biCompression = BI_RGB;
SelectObject(memDC, hBitmap);
BitBlt(memDC, 0, 0, width, height, hDC, 0, 0, SRCCOPY);
GetDIBits(hDC, hBitmap, 0, height, ScreenData, (BITMAPINFO*)&bmHeader, DIB_RGB_COLORS);
// i=0;
for(i = 0; i < width; i++)
{
for(i2 = 0; i2 < height; i2++)
{
if(RGB((ScreenData[3*((i2*width)+i)+2]),(ScreenData[3*((i2*width)+i)+1]), ScreenData[3*((i2*width)+i)])==target)
{
SetCursorPos(rect.left+i, rect.top+i2);
DeleteObject(hBitmap);
DeleteDC(memDC);
free(ScreenData);
ReleaseDC(hWnd, hDC);
return;
}
}
}
DeleteObject(hBitmap);
DeleteDC(memDC);
free(ScreenData);
ReleaseDC(hWnd, hDC);
}
int APIENTRY WinMain(HINSTANCE hi, HINSTANCE hpi, LPSTR lpcl, int nsc)
{
const COLORREF px = 16776960;
const HWND hWnd = (HWND)0x000C04BC;
Sleep(1000);
FindPixel(hWnd, px);
return 0;
}
The problem was indeed that I was not taking the stride into account. Here is the working loop:
stride = ((((width * 24) + 31) & ~31) >> 3);
totalpx = stride*height;
for(i = 0; i < totalpx; i++)
{
//int x = i % width;
//int y = ((i-x)/width);
if(RGB(
(ScreenData[(3*i)+2]),
(ScreenData[(3*i)+1]),
(ScreenData[(3*i)+0]))==target)
{
int x = i % stride;
int y = ((i-x)/width);
SetCursorPos(rect.left+x,rect.top+y);
DeleteObject(hBitmap);
DeleteDC(memDC);
free(ScreenData);
ReleaseDC(hWnd, hDC);
return;
}
}

Resources