Minimum width of a list control without horizontal scrollbar - winapi

I have a list control in report mode.
I fill this list control with data and then I auto size all columns with LVM_SETCOLUMNWIDTH. Depending on the data the list control may end up with a horizontal scrollbar or not.
So far so good. But now I'd like to get the minimum width the list control should have so no horizontal scrollbar is needed. Knowing that size I could resize the list control in order to get rid of the horizontal scrollbar.
Any ideas ?

Since you already know the required width, you can use that information and have the system calculate the corresponding window width for you. Either of the following APIs can be used: AdjustWindowRect or AdjustWindowRectEx. The height can be ignored.
int requiredWidth = 0;
for ( int index = 0; index < itemCount; ++index ) {
// calculate item width
requiredWidth += itemWidth;
}
RECT r = { 0, 0, requiredWidth, 1 };
DWORD style = (DWORD)::GetWindowLongPtr( hList, GWL_STYLE );
DWORD styleEx = (DWORD)::GetWindowLongPtr( hList, GWL_EXSTYLE );
::AdjustWindowRectEx( &r, style, FALSE, styleEx );
int windowWidth = r.right - r.left;

A lazy solution is to increase the width until the scrollbar disappears.
RECT r;
::GetWindowRect(hlist, &r);
RECT rc;
::GetClientRect(hparent, &rc);
POINT p { rc.right, 0 };
::ClientToScreen(hparent, &p);
int limit = p.x - r.right;
for (int i = 0; i < limit; i++)
{
if (!(::GetWindowLong(hlist, GWL_STYLE) & WS_HSCROLL))
break;
r.right++;
::SetWindowPos(hlist, 0, 0, 0, r.right - r.left, r.bottom - r.top,
SWP_NOREDRAW | SWP_NOMOVE | SWP_NOZORDER);
}

Related

Dealing with cursor masks

I'm trying to get the data (byte[]) of the current mouse cursor, but one type of cursor (known as MaskedColor) needs special treatment.
http://elektronotdienst-nuernberg.de/bugs/cursor.html
It's the grab and grabbing cursors from this page.
I wonder if there's any way to combine somehow the mask with the color data.
The color data comes with alpha = 0, so there's no transparency (which is available in the mask).
The mask looks like this (32x32, 128 bytes):
And the color info is this (32x32, 4096 bytes, but with alpha=0):
Getting the cursor references:
var cursorInfo = new CursorInfo(false); //Create struct.
if (!User32.GetCursorInfo(out cursorInfo))
return;
if (cursorInfo.Flags != ScreenToGif.Native.Constants.CursorShowing)
{
Gdi32.DeleteObject(cursorInfo.CursorHandle);
return;
}
var iconHandle = User32.CopyIcon(cursorInfo.CursorHandle);
if (iconHandle == IntPtr.Zero)
{
Gdi32.DeleteObject(cursorInfo.CursorHandle);
return;
}
if (!User32.GetIconInfo(iconHandle, out var iconInfo))
{
User32.DestroyIcon(iconHandle);
Gdi32.DeleteObject(cursorInfo.CursorHandle);
return;
}
Getting the cursor color and mask buffers:
try
{
//Color.
var colorHeader = new BitmapInfoHeader(false);
Gdi32.GetDIBits(_windowDeviceContext, iconInfo.Color, 0, 0, null, ref colorHeader, DibColorModes.RgbColors);
//Mask.
var maskHeader = new BitmapInfoHeader(false);
Gdi32.GetDIBits(_windowDeviceContext, iconInfo.Mask, 0, 0, null, ref maskHeader, DibColorModes.RgbColors);
Whenever there's a color buffer, draw to a bitmap to get the cursor animation steps.
if (colorHeader.Height != 0)
{
//Create bitmap.
var compatibleBitmap = Gdi32.CreateCompatibleBitmap(_windowDeviceContext, colorHeader.Width, colorHeader.Height);
var oldBitmap = Gdi32.SelectObject(_compatibleDeviceContext, compatibleBitmap);
//Draw image.
var ok = User32.DrawIconEx(_compatibleDeviceContext, 0, 0, cursorInfo.CursorHandle, 0, 0, _cursorStep, IntPtr.Zero, DrawIconFlags.Image);
if (!ok)
{
_cursorStep = 0;
User32.DrawIconEx(_compatibleDeviceContext, 0, 0, cursorInfo.CursorHandle, 0, 0, _cursorStep, IntPtr.Zero, DrawIconFlags.Image);
}
else
_cursorStep++;
Here comes the tricky part, if there's a mask, I need to apply to that cursor color buffer to know which pixels are transparent or not.
I tried to call DrawIconEx() passing Normal which Mask + Color in the last parameter, but without any difference.
if (maskHeader.SizeImage > 0)
{
//User32.DrawIconEx(_compatibleDeviceContext, 0, 0, cursorInfo.CursorHandle, 0, 0, _cursorStep, IntPtr.Zero, DrawIconFlags.Mask);
var ok2 = Gdi32.MaskBlt(_compatibleDeviceContext, 0, 0, colorHeader.Width, colorHeader.Height, compatibleBitmap, 0, 0, iconInfo.Mask, 0, 0, (int)CopyPixelOperations.SourceErase);
}
The last part is to actually get the final cursor as a byte[].
colorHeader.Height *= -1;
var colorBuffer = new byte[colorHeader.SizeImage];
Gdi32.GetDIBits(_windowDeviceContext, compatibleBitmap, 0, (uint)(colorHeader.Height * -1), colorBuffer, ref colorHeader, DibColorModes.RgbColors);
//Send colorBuffer
//Erase bitmaps.
Gdi32.SelectObject(_compatibleDeviceContext, oldBitmap);
Gdi32.DeleteObject(compatibleBitmap);
return;
}
For the mask monochrome cursor, just get the mask itself as byte[].
var maskBuffer = new byte[maskHeader.SizeImage];
maskHeader.Height *= -1;
Gdi32.GetDIBits(_windowDeviceContext, iconInfo.Mask, 0, (uint)(maskHeader.Height * -1), maskBuffer, ref maskHeader, DibColorModes.RgbColors);
//Send maskBuffer
}
finally
{
Gdi32.DeleteObject(iconInfo.Color);
Gdi32.DeleteObject(iconInfo.Mask);
User32.DestroyIcon(iconHandle);
Gdi32.DeleteObject(cursorInfo.CursorHandle);
}
Edit
Well, I can merge the mask buffer with the color buffer manually, but the issue is that I don't have a straightforward way to detect if the cursor needs the mask or not.
It works for the masked-color cursors:
But it butchers normal cursors (semi-transparent pixels turn opaque):
Both have a mask and color data, but the only difference is that the masked cursor will have its color buffer with all alpha = 0.
var colorHeight = colorHeader.Height * -1;
var colorWidth = colorHeader.Width; //Bug: For some reason, after calling GetDIBits() for the mask, the width of the color struct shifts.
var maskBuffer2 = new byte[maskHeader.SizeImage];
maskHeader.Height *= -1;
Gdi32.GetDIBits(_windowDeviceContext, iconInfo.Mask, 0, (uint)(maskHeader.Height * -1), maskBuffer2, ref maskHeader, DibColorModes.RgbColors);
var targetPitch = colorBuffer.Length / colorHeight;
var cursorPitch = maskBuffer2.Length / maskHeader.Height * -1;
//Merge mask with color.
for (var row = 0; row < colorWidth; row++)
{
//128 in binary.
byte mask = 0x80;
for (var col = 0; col < colorHeight; col++)
{
//Reads current pixel and merge with mask.
//Each mask byte holds information for 8 pixels.
var targetIndex = row * targetPitch + col * 4;
var xor = (maskBuffer2[row * cursorPitch + col / 8] & mask) == mask;
colorBuffer[targetIndex + 3] = (byte) (xor ? 255 : 0);
//Shifts the mask around until it reaches 1, then resets it back to 128.
if (mask == 0x01)
mask = 0x80;
else
mask = (byte)(mask >> 1);
}
}
The non-elegant way to know for sure that I need the mask is to check all alphas in the cursor buffer, if all zero, then I need the mask.
var needsMask = true;
for (var index = 0; index < colorBuffer.Length; index += 4)
{
if (colorBuffer[index] == 0) continue;
//If there's any non-zero alpha value, it means that the mask is not necessary.
needsMask = false;
break;
}
if (!needsMask)
{
//Send colorBuffer.
return;
}
//Send colorBuffer + maskBuffer.
At least it gives me the correct result:

difference when rendering horizontal line using TextOut char by char vs all at once

I am writing a win32 low level gui app that emulates a console app. I use a fixed width font, my test uses Cascadia Mono, but I have the same issue with any fixed width font.
The console app is trying to draw a horizontal line using U2500 character.
I output the characters that app is passing me one by one. When I do that I get spaces between the horizontal lines, when I output in one call to textout those gaps are filled in.
I made this using the VS c++ windows app template and added this code to the WM_PAINT handling
auto nHeight = -MulDiv(48, GetDeviceCaps(hdc, LOGPIXELSY), 72);
auto hfont = CreateFont(
nHeight,
0,
0,
0,
100,//200,
0,
0,
0,
DEFAULT_CHARSET,
OUT_OUTLINE_PRECIS,
CLIP_DEFAULT_PRECIS,
CLEARTYPE_QUALITY,
FIXED_PITCH,
L"Cascadia Mono"
);
TEXTMETRIC tm;
SelectObject(hdc, hfont);
GetTextMetrics(hdc, &tm);
auto str = L"kkkkkk─────k";
TextOut(hdc, 0, 0, L"kkkkkk─────k", 12);
for (int i = 0; i < 12; i++)
{
TextOut(hdc, i * tm.tmAveCharWidth, tm.tmHeight, &str[i], 1);
}
This displays
you can see that this is not due to me miscalculating the char cell width, the strings are exactly aligned , just there are some added pixels in the upper one, also notice some extra 'knobiness' where the joins are. V odd. Also note that the right edge of the last K before the line starts is slightly chopped off in the char by char one, but not in the all at once one.
So why am I doing it char by char, because I need to specify font weight, bg, fg for each cell.
Instead of using TextOut, you can use DrawText which is a bit more hi-level, like this:
for (int i = 0; i < 12; i++)
{
RECT rc;
rc.left = i * tm.tmAveCharWidth;
rc.top = tm.tmHeight;
rc.right = rc.left + 50; // todo: make sure this is ok
rc.bottom = rc.top + 100;
DrawText(hdc, (LPWSTR)&str[i], 1, &rc, 0);
}
And it seems to fix the "lineness" of it, although it's not 100% exactly the same (there are some pixels that show a difference):

Animating sprites at random rates

I have a program that displays 5 explosions in a line at the same time. I would like to know how to get each explosion to animate at different rates. Here is the code that actually animates the sprites:
//=========================================================================
//
// Helper functions
//
//========================================================================
void Sprite_Draw_Frame(LPDIRECT3DTEXTURE9 texture, int destx, int desty, int framenum, int framew, int frameh, int columns)
{
D3DXVECTOR3 position( (float)destx, (float)desty, 0);
D3DCOLOR white = D3DCOLOR_XRGB(255, 255, 255);`
RECT rect;
rect.left = (framenum % columns) * framew;
rect.top = (framenum / columns) * frameh;
rect.right = rect.left + framew;
rect.bottom = rect.top + frameh;
spriteobj->Draw(texture, &rect, NULL, &position, white);
}
void Sprite_Animate(int &frame, int startframe, int endframe, int direction, int &starttime, int delay)
{
if((int)GetTickCount() > starttime + delay)
{
starttime = GetTickCount();
frame += direction;
if(frame > endframe) frame = startframe;
if(frame < startframe) frame = endframe;
}
}
//============================================================================
//
// Function calls
//
//==============================================================================
Tried to use random numbers for the delay variable in Sprite_Animate so that the frames would be delayed at different rates. However, the explosions continued to animate in sync.
The Sprite_Animate function just updates the global variables frame and starttime to continue drawing each new frame with the Sprite_Draw_Frame function.
//animate and draw the sprite
for(int i = 0; i < 5; i++)
{
Sprite_Animate(frame, 0, 29, 1, starttime, rand() %100);
Sprite_Draw_Frame(explosion, (i * 100), 100, frame, 128, 128, 6);
}
There is a simple solution to this, and if anyone wants the full code I can send it to you by request.
SO i figured it out. The reason why they were all in sync even with random delay values is because they were all sharing the same global variables frame and starttime.
So instead, creating a structure of EXPLOSION with the member variables frame and starttime and than using a vector to contain them all. After it just had to be iterated through.
The new code is here in case someone else comes along this problem.
//The structue to contain each explosions own frame and starttime variables
struct EXPLOSION
{
int frame;
int starttime;
};
//============================================================================
//
// Function calls
//
//==============================================================================
//animate and draw the sprite
for(int i = 0; i < 5; i++)
{
Sprite_Animate(explosions[i].frame, 0, 29, 1, explosions[i].starttime, rand() % 100);
Sprite_Draw_Frame(explosion, i * 100, 100, explosions[i].frame, 128, 128, 6);
}

GetDIBits: Where's that pixel? (x, y coordinates)

So, I'm testing the following function FindPixel with the following app. The HWND and COLORREF are constants that I've determined with Spy++ and Color Cop for debugging; the final version of the program will find these automatically.
I've confirmed that the part of this algorithm which determines whether the color exists works (ie: if the color exists anywhere in the window, the if statement for that is true eventually, and if it doesn't the if statement is never true), however, I cannot figure out how to isolate which pixel this occurs on. The line SetCursorPos(rect.left+i, rect.top+i2); does not move the mouse anywhere near the correct location.
The window I'm debugging this with is entirely white save for the one pixel with the value 16776960. The function can tell that it's there, but the values of (i, i2) are not the (x, y) coordinates of where it occurs.
Is there something I'm missing here?
#include <Windows.h>
void FindPixel(HWND hWnd, COLORREF target)
{
HDC hDC = GetDC(hWnd);
HDC memDC = CreateCompatibleDC (hDC);
BYTE *ScreenData = NULL;
HBITMAP hBitmap;
BITMAPINFOHEADER bmHeader = {0};
RECT rect;
int width, height;
int i, i2;
GetWindowRect(hWnd, &rect);
width = rect.right-rect.left;
height = rect.bottom-rect.top;
ScreenData = (BYTE*)malloc(4*width*height);
hBitmap = CreateCompatibleBitmap(hDC, width, height);
bmHeader.biSize = sizeof(BITMAPINFOHEADER);
bmHeader.biPlanes = 1;
bmHeader.biBitCount = 24;
bmHeader.biWidth = width;
bmHeader.biHeight = -height;
bmHeader.biCompression = BI_RGB;
SelectObject(memDC, hBitmap);
BitBlt(memDC, 0, 0, width, height, hDC, 0, 0, SRCCOPY);
GetDIBits(hDC, hBitmap, 0, height, ScreenData, (BITMAPINFO*)&bmHeader, DIB_RGB_COLORS);
// i=0;
for(i = 0; i < width; i++)
{
for(i2 = 0; i2 < height; i2++)
{
if(RGB((ScreenData[3*((i2*width)+i)+2]),(ScreenData[3*((i2*width)+i)+1]), ScreenData[3*((i2*width)+i)])==target)
{
SetCursorPos(rect.left+i, rect.top+i2);
DeleteObject(hBitmap);
DeleteDC(memDC);
free(ScreenData);
ReleaseDC(hWnd, hDC);
return;
}
}
}
DeleteObject(hBitmap);
DeleteDC(memDC);
free(ScreenData);
ReleaseDC(hWnd, hDC);
}
int APIENTRY WinMain(HINSTANCE hi, HINSTANCE hpi, LPSTR lpcl, int nsc)
{
const COLORREF px = 16776960;
const HWND hWnd = (HWND)0x000C04BC;
Sleep(1000);
FindPixel(hWnd, px);
return 0;
}
The problem was indeed that I was not taking the stride into account. Here is the working loop:
stride = ((((width * 24) + 31) & ~31) >> 3);
totalpx = stride*height;
for(i = 0; i < totalpx; i++)
{
//int x = i % width;
//int y = ((i-x)/width);
if(RGB(
(ScreenData[(3*i)+2]),
(ScreenData[(3*i)+1]),
(ScreenData[(3*i)+0]))==target)
{
int x = i % stride;
int y = ((i-x)/width);
SetCursorPos(rect.left+x,rect.top+y);
DeleteObject(hBitmap);
DeleteDC(memDC);
free(ScreenData);
ReleaseDC(hWnd, hDC);
return;
}
}

UpdateLayeredWindow and DrawText

I'm using UpdateLayeredWindow to display an application window. I have created my own custom buttons and i would like to create my own static text. The problem is that when i try to draw the text on the hdc, the DrawText or TextOut functions overwrite the alpha channel of my picture and the text will become transparent. I tried to find a solution to this but i could not find any. My custom controls are designed in such way that they will do all the drawing in a member function called Draw(HDC hDc), so they can only access the hdc. I would like to keep this design. Can anyone help me? I am using MFC and i would want to achieve the desired result without the use of GDI+.
I know this is an old post ... but I just had this very same problem ... and it was driving me CRAZY.
Eventually, I stumbled upon this post by Mike Sutton to the microsoft.public.win32.programmer.gdi newsgroup ... from almost 7 years ago!
Basically, the DrawText (and TextOut) do not play nicely with the alpha channel and UpdateLayeredWindow ... and you need to premultiply the R, G, and B channels with the alpha channel.
In Mike's post, he shows how he creates another DIB (device independent bitmap) upon which he draws the text ... and alpha blends that into the other bitmap.
After doing this, my text looked perfect!
Just in case, the link to the newsgroup post dies ... I am going to include the code here. All credit goes to Mike Sutton (#mikedsutton).
Here is the code that creates the alpha blended bitmap with the text on it:
HBITMAP CreateAlphaTextBitmap(LPCSTR inText, HFONT inFont, COLORREF inColour)
{
int TextLength = (int)strlen(inText);
if (TextLength <= 0) return NULL;
// Create DC and select font into it
HDC hTextDC = CreateCompatibleDC(NULL);
HFONT hOldFont = (HFONT)SelectObject(hTextDC, inFont);
HBITMAP hMyDIB = NULL;
// Get text area
RECT TextArea = {0, 0, 0, 0};
DrawText(hTextDC, inText, TextLength, &TextArea, DT_CALCRECT);
if ((TextArea.right > TextArea.left) && (TextArea.bottom > TextArea.top))
{
BITMAPINFOHEADER BMIH;
memset(&BMIH, 0x0, sizeof(BITMAPINFOHEADER));
void *pvBits = NULL;
// Specify DIB setup
BMIH.biSize = sizeof(BMIH);
BMIH.biWidth = TextArea.right - TextArea.left;
BMIH.biHeight = TextArea.bottom - TextArea.top;
BMIH.biPlanes = 1;
BMIH.biBitCount = 32;
BMIH.biCompression = BI_RGB;
// Create and select DIB into DC
hMyDIB = CreateDIBSection(hTextDC, (LPBITMAPINFO)&BMIH, 0, (LPVOID*)&pvBits, NULL, 0);
HBITMAP hOldBMP = (HBITMAP)SelectObject(hTextDC, hMyDIB);
if (hOldBMP != NULL)
{
// Set up DC properties
SetTextColor(hTextDC, 0x00FFFFFF);
SetBkColor(hTextDC, 0x00000000);
SetBkMode(hTextDC, OPAQUE);
// Draw text to buffer
DrawText(hTextDC, inText, TextLength, &TextArea, DT_NOCLIP);
BYTE* DataPtr = (BYTE*)pvBits;
BYTE FillR = GetRValue(inColour);
BYTE FillG = GetGValue(inColour);
BYTE FillB = GetBValue(inColour);
BYTE ThisA;
for (int LoopY = 0; LoopY < BMIH.biHeight; LoopY++) {
for (int LoopX = 0; LoopX < BMIH.biWidth; LoopX++) {
ThisA = *DataPtr; // Move alpha and pre-multiply with RGB
*DataPtr++ = (FillB * ThisA) >> 8;
*DataPtr++ = (FillG * ThisA) >> 8;
*DataPtr++ = (FillR * ThisA) >> 8;
*DataPtr++ = ThisA; // Set Alpha
}
}
// De-select bitmap
SelectObject(hTextDC, hOldBMP);
}
}
// De-select font and destroy temp DC
SelectObject(hTextDC, hOldFont);
DeleteDC(hTextDC);
// Return DIBSection
return hMyDIB;
}
Here is the code that drives the CreateAlphaTextBitmap method:
void TestAlphaText(HDC inDC, int inX, int inY)
{
const char *DemoText = "Hello World!\0";
RECT TextArea = {0, 0, 0, 0};
HFONT TempFont = CreateFont(50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "Arial\0");
HBITMAP MyBMP = CreateAlphaTextBitmap(DemoText, TempFont, 0xFF);
DeleteObject(TempFont);
if (MyBMP)
{
// Create temporary DC and select new Bitmap into it
HDC hTempDC = CreateCompatibleDC(inDC);
HBITMAP hOldBMP = (HBITMAP)SelectObject(hTempDC, MyBMP);
if (hOldBMP)
{
// Get Bitmap image size
BITMAP BMInf;
GetObject(MyBMP, sizeof(BITMAP), &BMInf);
// Fill blend function and blend new text to window
BLENDFUNCTION bf;
bf.BlendOp = AC_SRC_OVER;
bf.BlendFlags = 0;
bf.SourceConstantAlpha = 0x80;
bf.AlphaFormat = AC_SRC_ALPHA;
AlphaBlend(inDC, inX, inY, BMInf.bmWidth, BMInf.bmHeight, hTempDC, 0, 0, BMInf.bmWidth, BMInf.bmHeight, bf);
// Clean up
SelectObject(hTempDC, hOldBMP);
DeleteObject(MyBMP);
DeleteDC(hTempDC);
}
}
}

Resources