Can you get the RGB value from HBRUSH or from brush id? for example: I'm looking for GRAY_BRUSH in RGB value.
You want to use the GetObject function to return a LOGBRUSH structure that contains the brush color.
static COLORREF lbColor;
HBRUSH hb = GetSysColorBrush(COLOR_BACKGROUND);
LOGBRUSH br = { 0 };
if (GetObject(hb, sizeof(br), &br))
{
lbColor = br.lbColor;
RGBQUAD rgbq = { 0 };
rgbq.rgbBlue = GetBValue(lbColor);
rgbq.rgbGreen = GetGValue(lbColor);
rgbq.rgbRed = GetRValue(lbColor);
rgbq.rgbReserved = 0;
//...
}
Related
I used DrawIconEx (GDI/D3D11 interoperability and CopyResource) to generate an ID3D11Texture2D which has many pixels with an alpha channel value of 0. this texture has been verified by D3D11_USAGE_STAGING/Map to view the pixel value and ScreenGrab save png (relevant code needs to be modified: DXGI_FORMAT_B8G8R8A8_UNORM->Use GUID_WICPixelFormat32bppBGRA instead of GUID_WICPixelFormat24bppBGR).
When I use the rendering texture method of Tutorial 5: Texturing, the alpha value of 0 pixels will be rendered as black, which is not what i want, I hope these pixels render to be transparent. What will be done to achieve the goal? Here is my relevant code:
HRESULT CGraphRender::Init()
{
...
// Create an alpha enabled blend state description.
_blend_state = nullptr;
D3D11_BLEND_DESC blendDesc;
ZeroMemory(&blendDesc, sizeof(D3D11_BLEND_DESC));
blendDesc.RenderTarget[0].BlendEnable = TRUE;
blendDesc.RenderTarget[0].SrcBlend = D3D11_BLEND_SRC_ALPHA;
blendDesc.RenderTarget[0].DestBlend = D3D11_BLEND_INV_SRC_ALPHA;
blendDesc.RenderTarget[0].BlendOp = D3D11_BLEND_OP_ADD;
blendDesc.RenderTarget[0].SrcBlendAlpha = D3D11_BLEND_ONE;
blendDesc.RenderTarget[0].DestBlendAlpha = D3D11_BLEND_ZERO;
blendDesc.RenderTarget[0].BlendOpAlpha = D3D11_BLEND_OP_ADD;
blendDesc.RenderTarget[0].RenderTargetWriteMask = D3D11_COLOR_WRITE_ENABLE_ALL;
hr = _d3d_device->CreateBlendState(&blendDesc, &_blend_state);
RETURN_ON_FAIL(hr);
....
}
HRESULT CGraphRender::Clear_3D(float color[])
{
ID3D11RenderTargetView* rtv[] = { _back_rendertarget_view };
_immediate_context->OMSetRenderTargets(_countof(rtv), rtv, nullptr);
_immediate_context->ClearRenderTargetView(_back_rendertarget_view, color);
float blendFactor[4] = { 1.f, 1.f, 1.f, 1.f };
_immediate_context->OMSetBlendState(_blend_state, blendFactor, 0xffffffff);
return S_OK;
}
The problem has been solved: Perform the OMGetBlendState(_blend_state... setting before rendering the "alpha" texture, and restore the default blendstate after rendered
HRESULT CGraphRender::DrawTexture(const std::shared_ptr<CDrawTextureShader>& texture, const RECT& dst_rect, const BOOL& is_blend_alpha)
{
CComPtr<ID3D11DeviceContext> immediate_context;
_d3d_device->GetImmediateContext(&immediate_context);
if (!immediate_context)
{
return E_UNEXPECTED;
}
if (is_blend_alpha)
{
CComPtr<ID3D11BlendState> old_blend_state;
FLOAT old_blend_factor[4] = { 0.f };
UINT old_sample_mask = 0;
immediate_context->OMGetBlendState(&old_blend_state, old_blend_factor, &old_sample_mask);
float blend_factor[4] = { 1.f, 1.f, 1.f, 1.f };
immediate_context->OMSetBlendState(_blend_state, blend_factor, 0xffffffff);
HRESULT hr = texture->Render(immediate_context, dst_rect);
immediate_context->OMSetBlendState(old_blend_state, old_blend_factor, old_sample_mask);
return hr;
}
else
{
return texture->Render(immediate_context, dst_rect);
}
}
I created a D3D11 device and can perform operations such as rendering pictures smoothly, but in order to also support GDI, I tried several methods:
Through swapchain -> GetBuffer(ID3D11Texture2D) -> CreateDxgiSurfaceRenderTarget -> ID2D1GdiInteropRenderTarget -> GetDC, finally get the DC. It runs normally on my Win10, but an exception report when running GetDC on Win7: _com_error.
Via swapchain -> GetBuffer(IDXGISurface1) -> GetDC, same as 1.
I suspect that the ID3D11Texture2D/IDXGISurface1 obtained by GetBuffer on Win7 will have some restrictions on the use of GDI, so I changed to dynamically create a new ID3D11Texture2D by myself, and now use DC alone/D3D11 drawing interface alone It works fine, but if I interoperate, I will find that gdi opertaion is drawn on the custom-created ID3D11Texture2D instead of the back_buffer of swapchain:
_d3d->Clear();
_d3d->DrawImage();
HDC hdc = _d3d->GetDC();
DrawRectangleByGDI(hdc);
_d3d->ReleaseDC();
_d3d->Present();
So how to do it: Whether the D3D or DC methods is drawn, they are all on the same ID3D11Texture2D? This way, it is also convenient for my CopyResource.
HRESULT CGraphRender::Resize(const UINT32& width, const UINT32& height)
{
_back_texture2d = nullptr;
_back_rendertarget_view = nullptr;
_dc_texture2d = nullptr;
_dc_render_target = nullptr;
float dpi = GetDpiFromD2DFactory(_d2d_factory);
//Backbuffer
HRESULT hr = _swap_chain->ResizeBuffers(2, width, height, DXGI_FORMAT_B8G8R8A8_UNORM, _is_gdi_compatible ? DXGI_SWAP_CHAIN_FLAG_GDI_COMPATIBLE : 0);
RETURN_ON_FAIL(hr);
hr = _swap_chain->GetBuffer(0, __uuidof(ID3D11Texture2D), (void**)&_back_texture2d);
RETURN_ON_FAIL(hr);
hr = CreateD3D11Texture2D(_d3d_device, width, height, &_dc_texture2d);
RETURN_ON_FAIL(hr);
D3D11_RENDER_TARGET_VIEW_DESC rtv;
rtv.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
rtv.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;
rtv.Texture2D.MipSlice = 0;
hr = _d3d_device->CreateRenderTargetView(_back_texture2d, &rtv, &_back_rendertarget_view);
RETURN_ON_FAIL(hr);
...
}
HRESULT CGraphRender::Clear(float color[])
{
CComPtr<ID3D11DeviceContext> immediate_context;
_d3d_device->GetImmediateContext(&immediate_context);
if (!immediate_context)
{
return E_UNEXPECTED;
}
ID3D11RenderTargetView* ref_renderTargetView = _back_rendertarget_view;
immediate_context->OMSetRenderTargets(1, &ref_renderTargetView, nullptr);
immediate_context->ClearRenderTargetView(_back_rendertarget_view, color);
return S_OK;
}
HDC CGraphRender::GetDC()
{
if (_is_gdi_compatible)
{
CComPtr<IDXGISurface1> gdi_surface;
HRESULT hr = _dc_texture2d->QueryInterface(__uuidof(IDXGISurface1), (void**)&gdi_surface);
if (SUCCEEDED(hr))
{
HDC hdc = nullptr;
hr = gdi_surface->GetDC(TRUE, &hdc);
if (SUCCEEDED(hr))
{
return hdc;
}
}
}
return nullptr;
}
HRESULT CGraphRender::CopyTexture(ID3D11Texture2D* dst_texture, ID3D11Texture2D* src_texture, POINT* dst_topleft/* = nullptr*/, POINT* src_topleft/* = nullptr*/)
{
if (!dst_texture && !src_texture)
{
return E_INVALIDARG;
}
CComPtr<ID3D11DeviceContext> immediate_context;
_d3d_device->GetImmediateContext(&immediate_context);
if (!immediate_context)
{
return E_UNEXPECTED;
}
ID3D11Texture2D* dst_texture_real = dst_texture ? dst_texture : _dc_texture2d;
POINT dst_topleft_real = dst_topleft ? (*dst_topleft) : POINT{ 0, 0 };
ID3D11Texture2D* src_texture_real = src_texture ? src_texture : _dc_texture2d;
POINT src_topleft_real = src_topleft ? (*src_topleft) : POINT{ 0, 0 };
D3D11_TEXTURE2D_DESC src_desc = { 0 };
src_texture_real->GetDesc(&src_desc);
D3D11_TEXTURE2D_DESC dst_desc = { 0 };
dst_texture_real->GetDesc(&dst_desc);
if (!dst_topleft_real.x && !src_topleft_real.x && !dst_topleft_real.y && !src_topleft_real.y && dst_desc.Width == src_desc.Width && dst_desc.Height == src_desc.Height)
{
immediate_context->CopyResource(dst_texture_real, src_texture_real);
}
else
{
D3D11_BOX src_box;
src_box.left = min((UINT)src_topleft_real.x, (UINT)dst_topleft_real.x + dst_desc.Width);
src_box.top = min((UINT)src_topleft_real.y, (UINT)dst_topleft_real.y + dst_desc.Height);
src_box.right = min((UINT)src_box.left + src_desc.Width, (UINT)dst_topleft_real.x + dst_desc.Width);
src_box.bottom = min((UINT)src_box.top + src_desc.Height, (UINT)dst_topleft_real.y + dst_desc.Height);
src_box.front = 0;
src_box.back = 1;
ATLASSERT(src_box.left < src_box.right);
ATLASSERT(src_box.top < src_box.bottom);
immediate_context->CopySubresourceRegion(dst_texture_real, 0, dst_topleft_real.x, dst_topleft_real.y, 0, src_texture_real, 0, &src_box);
}
return S_OK;
}
I don’t think Windows 7 supports what you’re trying to do. Here’s some alternatives.
Switch from GDI to something else that can render 2D graphics with D3D11. Direct2D is the most straightforward choice here. And DirectWrite if you want text in addition to rectangles.
If your 2D content is static or only changes rarely, you can use GDI+ to render into in-memory RGBA device context, create Direct3D11 texture with that data, and render a full-screen triangle with that texture.
You can overlay another Win32 window on top of your Direct3D 11 rendering one, and use GDI to render into that one. The GDI window on top must have WS_EX_LAYERED expended style, and you must update it with UpdateLayeredWindow API. This method is the most complicated and least reliable, though.
I can fill color by this code
void FillColorPolygon(POINT pts[],int ilnum,long fillColor)
{
COLORREF fillcol;
fillcol = Gc_disp::ColorSet(fillColor);
HBRUSH hBrushNew = CreateSolidBrush(fillcol);
HBRUSH hBrushOld = (HBRUSH)SelectObject(m_hDC, hBrushNew);
SetPolyFillMode(m_hDC, WINDING);
Polygon(m_hDC, pts, (short)ilnum);
SelectObject(m_hDC, hBrushOld);
DeleteObject(hBrushNew);
}
ColorSet function is fill by 100% opaque
COLORREF Gc_disp::ColorSet(long col)
{
COLORREF rcol = RGB(0, 0, 0);
if( col >= 0 && col <= GRIP_MAXCOLORS + 1 )
rcol = g_tblColor[ col ];
return( rcol );
}
But I don't know how to fill color with 50% opeque?
Edit:
After following #Jonathan's advise. I try to use AlphaBlend function.
First I try to make some bitmap turn to be transparent, It seems work.
void FillColorPolygonAlpha(POINT pts[],int ilnum,long fillColor)
{
BLENDFUNCTION m_bf;
m_bf.BlendOp = AC_SRC_OVER;
m_bf.BlendFlags = 0;
m_bf.SourceConstantAlpha = 0xC8;
m_bf.AlphaFormat = 0;
CBitmap m_bitmap;
CImage image;
image.Load(_T("C:\\Blas_grande.png"));
CBitmap bitmap;
m_bitmap.Attach(image.Detach());
int m_nWidth, m_nHeight;
BITMAP aBmp;
m_bitmap.GetBitmap(&aBmp);
m_nWidth = aBmp.bmWidth ;
m_nHeight = aBmp.bmHeight;
CDC* pDC = CDC::FromHandle( GetDC() );
CDC dcMem;
dcMem.CreateCompatibleDC(pDC);
CBitmap *pOldBitmap = dcMem.SelectObject(&m_bitmap);
AlphaBlend(m_hDC, 0,0, m_nWidth, m_nHeight, dcMem, 0,0,m_nWidth, m_nHeight,m_bf);
dcMem.SelectObject(pOldBitmap);
}
The image was drawn with transparent like below.
But it isn't work when I try to fill polygon.
I modify FillColorPolygonAlpha() function by changed to fill a polygon.
void FillColorPolygonAlpha(POINT pts[],int ilnum,long fillColor)
{
BLENDFUNCTION m_bf;
m_bf.BlendOp = AC_SRC_OVER;
m_bf.BlendFlags = 0;
m_bf.SourceConstantAlpha = 0xC8;
m_bf.AlphaFormat = 0;
CDC* pDC = CDC::FromHandle( GetDC() );
CDC dcMem;
dcMem.CreateCompatibleDC(pDC);
COLORREF fillcol;
fillcol = Gc_disp::ColorSet(fillColor);
HBRUSH hBrushNew = CreateSolidBrush(fillcol);
Polygon(dcMem, pts, (short)ilnum);
AlphaBlend(m_hDC, 0,0, m_nWidth, m_nHeight, dcMem, 0,0,m_nWidth, m_nHeight,m_bf);
}
It don't draw any polygon.
I solved this problem by using GDI+.
Because of SolidBrush can set a transparent by alpha value.
void FillColorPolygonAlpha(POINT pts[],int ilnum,long fillColor,int alpha)
{
COLORREF rcol;
rcol = Gc_disp::ColorSet(fillColor);
Gdiplus::Graphics gr(m_hDC);
Gdiplus::SolidBrush semiTransBrush(Gdiplus::Color(alpha, GetRValue(rcol), GetGValue(rcol), GetBValue(rcol) ));
CArray<Gdiplus::Point,Gdiplus::Point> arrPoints;
for(int i=0; i<sizeof(pts); i++)
{
Gdiplus::Point pt(pts[i].x , pts[i].y);
arrPoints.Add(pt);
}
Gdiplus::Point* pPoints = arrPoints.GetData();
gr.FillPolygon(&semiTransBrush,pPoints,ilnum);
}
I have a set of labels [8][8] each with it's own Id, from a routine I call to change label color giving the hWnd, but then nothing happens, but if i don't specify an Id on case WM_CTLCOLORSTATIC: all labels change color
DWORD WINAPI changecolor(LPVOID lpParameter){
clrLabelBkGnd = RGB(255, 255, 0x00);
InvalidateRect(hWndLabel[0][0], NULL, TRUE);
return 0;
}
CALL back function
case WM_CTLCOLORSTATIC:
ctrlID = GetDlgCtrlID((HWND)lParam);
if (ctrlID == 1000) {
hdc = reinterpret_cast<HDC>(wParam);
SetBkColor(hdc, clrLabelBkGnd);
return reinterpret_cast<LRESULT>(hBrushLabel);
}
else break;
main program
/* fill the labels IDs*/
for (int i = 0; i < 8; i++){
for (int j = 0; j < 8; j++){
labelId[i][j] = (i * 8 + j)+1000;
}
}
In this example when I specify id 1000 which exist hWndLabel[0][0] nothing is colored, but if I don't specify id or if i put id > 1000 in case WM_CTLCOLORSTATIC: all labels are colored even by calling only hWndLabel[0][0]
This part is wrong:
case WM_CTLCOLORSTATIC:
if (LOWORD(wParam) == 1000) {
hdc = reinterpret_cast<HDC>(wParam);
Since wParam is the handle to the device context, why are you using it's low word as the ID of the control?
Take a look at WM_CTLCOLORSTATIC:
wParam
Handle to the device context for the static control window.
lParam
Handle to the static control.
What you need to use is lParam.
DWORD ctrlID = GetDlgCtrlID((HWND)lParam));
if (ctrlID == 1000)
{
}
UPDATE: Based on the comments you provided, you need to have a mechanism to retain the ID of the label that has been invalidated.
DWORD WINAPI changecolor(LPVOID lpParameter)
{
clrLabelBkGnd = RGB(255, 255, 0x00);
someVariableToHoldLabelIdWithRightScope = labelId[0][0]; // Or GetDlgCtrlID(hWndLabel[0][0]);
InvalidateRect(hWndLabel[0][0], NULL, TRUE);
return 0;
}
Then, when you handle the color:
case WM_CTLCOLORSTATIC:
ctrlID = GetDlgCtrlID((HWND)lParam);
if (ctrlID == someVariableToHoldLabelIdWithRightScope)
{
hdc = reinterpret_cast<HDC>(wParam);
SetBkColor(hdc, clrLabelBkGnd);
return reinterpret_cast<LRESULT>(hBrushLabel);
}
else break;
If you invalidate more than one label at a time, then one variable like this is not enough. You need to have a list/array/queue of IDs.
Marius answered your question - you are misusing the parameters of WM_CTLCOLORSTATIC, which is why your painting is not working correctly.
I would suggest a different solution to your problem. Have a list of colors, one set of Text/BkGnd colors for each label. Make changecolor() update the color entries for just the specified label as needed and then invalidate that label to trigger a repaint. WM_CTLCOLORSTATIC can then use the current colors of whichever label is currently being painted. No need to keep track of the changed Control ID between the call to changecolor() and the triggering of WM_CTLCOLORSTATIC (doing so is error prone anyway - think of what would happen if you wanted to change another label's coloring before WM_CTLCOLORSTATIC of a previous change is processed).
I would suggest a std::map to associate each label HWND to a struct holding that label's colors, eg:
#include <map>
struct sLabelColors
{
COLORREF clrText;
COLORREF clrBkGnd;
HBRUSH hBrushBkGnd;
};
std::map<HWND, sLabelColors> labelColors;
hWndLabel[0][0] = CreateWindowEx(...);
if (hWndLabel[0][0] != NULL)
{
sLabelColors &colors = labelColors[hWndLabel[0][0]];
colors.clrText = GetSysColor(COLOR_WINDOWTEXT);
colors.clrBkGnd = GetSysColor(COLOR_WINDOW);
colors.hBrushBkGnd = NULL;
}
case WM_CTLCOLORSTATIC:
{
HDC hdc = reinterpret_cast<HDC>(wParam);
sLabelColors &colors = labelColors[(HWND)lParam];
SetTextColor(hdc, colors.clrText);
SetBkColor(hdc, colors.clrBkGnd);
if (!colors.hBrushBkGnd) colors.hBrushBkGnd = CreateSolidBrush(colors.clrBkGnd);
return reinterpret_cast<LRESULT>(colors.hBrushBkGnd);
}
case WM_PARENTNOTIFY:
{
if (LOWORD(wParam) == WM_DESTROY)
{
HWND hWnd = (HWND)lParam;
std::map<HWND, sLabelColors>::iterator iter = labelColors.find((HWND)lParam);
if (iter != labelColors.end())
{
if (iter->hBrushBkGnd) DeleteObject(iter->hBrushBkGnd);
labelColors.erase(iter);
}
}
break;
}
DWORD WINAPI changecolor(LPVOID lpParameter)
{
sLabelColors &colors = labelColors[hWndLabel[0][0]];
if (colors.hBrushBkGnd) {
DeleteObject(colors.hBrushBkGnd);
colors.hBrushBkGnd = NULL;
}
colors.clrBkGnd = RGB(255, 255, 0x00);
InvalidateRect(hWndLabel[0][0], NULL, TRUE);
return 0;
}
I am trying to generate a binary image from the depthMap()-function in OpenNI, which provides an array of type int. With that image I want to do blob-Tracking.
Problem is that I am not able to generate a clear binary image from the depthMap. In my understanding the depth image generates a bright pixel for everything that is closer to the sensor and the farer away from the sensor the darker they get. So I ask every Pixel in the (one-dimensional) Array if it is over my min and under my max-Threshold to make up a range from that I want the get the data.
Here is my code:
// import library
import SimpleOpenNI.*;
import processing.opengl.*; // opengl
import blobDetection.*; // blobs
// declare SimpleOpenNI object
SimpleOpenNI context;
BlobDetection theBlobDetection;
BlobBall blobBalls;
PrintWriter output;
// threshold for binaryImage
int minThreshold, maxThreshold;
// Size of the kinect Image
int kinectWidth = 640;
int kinectHeight = 480;
//
float globalX, globalY;
// Colors
color bgColor = color(0, 0, 123);
color white = color(255,255,255);
color black = color(0,0,0);
// PImage to hold incoming imagery
int[] distanceArray;
PImage cam, forBlobDetect;
void setup() {
output = createWriter("positions.txt");
// init threshold
minThreshold = 960;
maxThreshold = 2500;
// same as Kinect dimensions
size(kinectWidth, kinectHeight);
background(bgColor);
// initialize SimpleOpenNI object
context = new SimpleOpenNI(this);
if (context.isInit() == false) {
println("Can't init SimpleOpenNI, maybe the camera is not connected!");
exit();
}
else {
// mirror the image to be more intuitive
context.setMirror(true);
context.enableDepth();
// context.enableScene();
distanceArray = context.depthMap();
forBlobDetect = new PImage(width, height);
theBlobDetection = new BlobDetection(forBlobDetect.width, forBlobDetect.height);
theBlobDetection.setThreshold(0.2);
}
}
void draw() {
noStroke();
// update the SimpleOpenNI object
context.update();
// put the image into a PImage
cam = context.depthImage();
// copy the image into the smaller blob image
// forBlobDetect.copy(cam, 0, 0, cam.width, cam.height, 0, 0, forBlobDetect.width, forBlobDetect.height);
// blur the blob image
forBlobDetect.filter(BLUR, 2);
//
int pos = 0;
int currentDepthValue = 0;
distanceArray = context.depthMap();
for(int x = 0; x < cam.width; x++) {
for(int y = 0; y < cam.height; y++) {
pos = y*cam.width+x;
currentDepthValue = distanceArray[pos];
// println(currentDepthValue);
if((currentDepthValue > minThreshold) && (currentDepthValue < maxThreshold)) {
forBlobDetect.pixels[pos] = black;
} else {
forBlobDetect.pixels[pos] = white;
}
}
}
// for(int i=0; i < distanceArray.length; i++) {
// currentDepthValue = distanceArray[i];
// // println(currentDepthValue);
// if(currentDepthValue > minThreshold) /*&& (currentDepthValue < maxThreshold)*/) {
// forBlobDetect.pixels[pos] = white;
// } else {
// forBlobDetect.pixels[pos] = black;
// }
// }
// detect the blobs
theBlobDetection.computeBlobs(forBlobDetect.pixels);
// display the image
image(cam, 0, 0);
image(forBlobDetect, 0, 0, width/2, height/2);
// image(context.sceneImage(), context.depthWidth(), 0);
}
Really stupid mistake by myself because I missunderstood the 11-bit Array.
Thanks to the "Making things see" examples I solved it.
https://github.com/atduskgreg/Making-Things-See-Examples/tree/master/ax02_depth_range_limit