I am trying to detect the display scale factor in Windows 10 in C++. Below is the code I am using.
//Code to detect scale
void getDisplayScale(double &h_Scale, double &v_Scale)
{
//auto activeWindow = GetActiveWindow();
HWND activeWindow = GetDesktopWindow();
HMONITOR monitor = MonitorFromWindow(activeWindow, MONITOR_DEFAULTTONEAREST);
// Get the logical width and height of the monitor
MONITORINFOEX monitorInfoEx;
monitorInfoEx.cbSize = sizeof(monitorInfoEx);
GetMonitorInfo(monitor, &monitorInfoEx);
long cxLogical = monitorInfoEx.rcMonitor.right - monitorInfoEx.rcMonitor.left;
long cyLogical = monitorInfoEx.rcMonitor.bottom - monitorInfoEx.rcMonitor.top;
// Get the physical width and height of the monitor
DEVMODE devMode;
devMode.dmSize = sizeof(devMode);
devMode.dmDriverExtra = 0;
EnumDisplaySettings(monitorInfoEx.szDevice, ENUM_CURRENT_SETTINGS, &devMode);
DWORD cxPhysical = devMode.dmPelsWidth;
DWORD cyPhysical = devMode.dmPelsHeight;
// Calculate the scaling factor
h_Scale = ((double)cxPhysical / (double)cxLogical);
v_Scale = ((double)cyPhysical / (double)cyLogical);
// Round off to 2 decimal places
h_Scale = round(h_Scale * 100.0) / 100.0;
v_Scale = round(v_Scale * 100.0) / 100.0;
std::cout << "Horizonzal scaling: " << h_Scale << "\n";
std::cout << "Vertical scaling: " << v_Scale;
}
It works properly as a standalone console project.
When I try to put in my MFC code, it returns 100% only. Even if the scale is 125%, it returns 100% only.
How to fix this?
Thanks
Related
The title pretty much explains it all after using MoveWindow function the window does go to the second monitor tho after minimizing the app and opening it again the window appears back on the first screen. I'm guessing there is a function to like say to windows the window is now on second screen not the first tho I can't seem to find it.
RECT applicationRect;
HWND applicationHWND;
applicationHWND = GetForegroundWindow();
GetWindowRect(applicationHWND, &applicationRect);
const int width = abs( abs(applicationRect.right) - abs(applicationRect.left) );
const int height = abs( abs(applicationRect.bottom) - abs(applicationRect.top) );
short screen = 0;
if(applicationRect.left < -8){//Posible left screen
if(abs(applicationRect.left) > abs(applicationRect.right))screen = 1;
else screen = 0;
}
const int newleft = (screen == 0)*(applicationRect.left - 1920) + (screen == 1)*(applicationRect.left + 1920);
MoveWindow(applicationHWND, newleft, applicationRect.top, width, height, false);
This is the function that moves window to another screen.
Now I can reproduce it, this happens when the target window is maximized. In this case MoveWindow cannot change the normal position value(that is, WINDOWPLACEMENT.rcNormalPosition). When the window is restored to the maximize, it will be restored based on this value(detect it in which monitor). You need to check whether the window is maximized first, and then call SetWindowPlacement to change this Position. In addition to maximized windows, you can use SetWindowPlacement instead of MoveWindow.
#include <windows.h>
#include <iostream>
int main()
{
RECT applicationRect;
HWND applicationHWND;
BOOL ret = 0;
applicationHWND = GetForegroundWindow();
GetWindowRect(applicationHWND, &applicationRect);
const int width = abs(abs(applicationRect.right) - abs(applicationRect.left));
const int height = abs(abs(applicationRect.bottom) - abs(applicationRect.top));
short screen = 0;
if (applicationRect.left < -8) {//Posible left screen
if (abs(applicationRect.left) > abs(applicationRect.right))screen = 1;
else screen = 0;
}
const int newleft = (screen == 0) * (applicationRect.left - 1920) + (screen == 1) * (applicationRect.left + 1920);
WINDOWPLACEMENT place = {};
place.length = sizeof(WINDOWPLACEMENT);
GetWindowPlacement(applicationHWND, &place);
if (place.showCmd == SW_SHOWMAXIMIZED)
{
MoveWindow(applicationHWND, newleft, applicationRect.top, width, height, false);
}
place.rcNormalPosition.left = (screen == 0) * (place.rcNormalPosition.left - 1920) + (screen == 1) * (place.rcNormalPosition.left + 1920);
place.rcNormalPosition.right = (screen == 0) * (place.rcNormalPosition.right - 1920) + (screen == 1) * (place.rcNormalPosition.right + 1920);
place.flags |= WPF_ASYNCWINDOWPLACEMENT;
ret = SetWindowPlacement(applicationHWND, &place);
}
I'm testing PIXIjs for a simple 2D graphics, basically I'm sliding tiles with some background color and borders animation, plus I'm masking some parts of the layout.
While it works great in desktops it's really slower than the same slide+animations made with pure css in mobile devices (where by the way I'm using crosswalk+cordova so the browser is always the same)
For moving tiles and animating color I'm calling requestAnimationFrame for each tile and I've disabled PIXI's ticker:
ticker.autoStart = false;
ticker.stop();
This slowness could be due to a weaker GPU on mobiles? or is just about the way I use PIXI?
I'm not showing the full code because is quite long ~ 800 lines.
The following is the routine I use for each tile once a slide is captured:
const animateTileBorderAndText = (tileObj, steps, _color, radius, textSize, strokeThickness, _config) => {
let pixiTile = tileObj.tile;
let s = 0;
let graphicsData = pixiTile.graphicsData[0];
let shape = graphicsData.shape;
let textStyle = pixiTile.children[0].style;
let textInc = (textSize - textStyle.fontSize) / steps;
let strokeInc = (strokeThickness - textStyle.strokeThickness) / steps;
let prevColor = graphicsData.fillColor;
let color = _color !== null ? _color : prevColor;
let alpha = pixiTile.alpha;
let h = shape.height;
let w = shape.width;
let rad = shape.radius;
let radiusInc = (radius - rad) / steps;
let r = (prevColor & 0xFF0000) >> 16;
let g = (prevColor & 0x00FF00) >> 8;
let b = prevColor & 0x0000FF;
let rc = (color & 0xFF0000) >> 16;
let rg = (color & 0x00FF00) >> 8;
let rb = color & 0x0000FF;
let redStep = (rc - r) / steps;
let greenStep = (rg - g) / steps;
let blueStep = (rb - b) / steps;
let paintColor = prevColor;
let goPaint = color !== prevColor;
let animate = (t) => {
if (s === steps) {
textStyle.fontSize = textSize;
textStyle.strokeThickness = strokeThickness;
//pixiTile.tint = color;
if (!_config.SEMAPHORES.slide) {
_config.SEMAPHORES.slide = true;
PUBSUB.publish(_config.SLIDE_CODE, _config.torusModel.getData());
}
return true;
}
if (goPaint) {
r += redStep;
g += greenStep;
b += blueStep;
paintColor = (r << 16) + (g << 8) + b;
}
textStyle.fontSize += textInc;
textStyle.strokeThickness += strokeInc;
pixiTile.clear()
pixiTile.beginFill(paintColor, alpha)
pixiTile.drawRoundedRect(0, 0, h, w, rad + radiusInc * (s + 1))
pixiTile.endFill();
s++;
return requestAnimationFrame(animate);
};
return animate();
};
the above function is called after the following one, which is called for each tile to make it slide.
const slideSingleTile = (tileObj, delta, axe, conf, SEM, tilesMap) => {
let tile = tileObj.tile;
let steps = conf.animationSteps;
SEM.slide = false;
let s = 0;
let stepDelta = delta / steps;
let endPos = tile[axe] + delta;
let slide = (time) => {
if (s === steps) {
tile[axe] = endPos;
tileObj.resetPosition();
tilesMap[tileObj.row][tileObj.col] = tileObj;
return tileObj.onSlideEnd(axe == 'x' ? 0 : 2);
}
tile[axe] += stepDelta;
s++;
return requestAnimationFrame(slide);
};
return slide();
};
For each finger gesture a single column/row (of NxM matrix of tiles) is slided and animated using the above two functions.
It's the first time I use canvas.
I red that canvas is way faster then DOM animations and I red very good review of PIXIjs, so I believe I'm doing something wrong.
Can someone help?
In the end I'm a complete donk...
The issue is not with pixijs.
Basically I was forcing 60fps! The number of steps to complete the animation is set to 12 that implies 200ms animation at 60FPS (using requestAnimationFrame) but in low end devices its going to be obviously slower.
Css animation works with timing as parameter so it auto adapt FPS to devices hardware.
To solve the issue I'm adapting the number of steps during animations, basically if animations takes longer than 200ms I just reduce number of steps proportionally.
I hope this could be of help for each web developer used to css animation who have just started developing canvas.
[The final fix, which works unconditionally: use SetDIBitsToDevice, not BitBlt, to copy out the post-text-draw image data. With this change, all occurrences of the problem are gone.]
I fixed the problem I'm having, but for the life of me I can't figure out why it occurred.
Create a bitmap with CreateDIBitmap. Get a pointer to the bitmap bits.
Select the bitmap into a memory DC.
Background fill the bitmap by directly writing the bitmap memory.
TextOut.
No text displays.
What fixed the problem: change item 3. from direct fill to a call to FillRect. All is well, it works perfectly.
This is under Windows 10 but from what little I could find on the web, it spans all versions of Windows. NO operations work on the bitmap - even calling FillRect - after the manual write. No savvy, Kimosabe. Elsewhere in the app, I even build gradient fills by directly writing to that bitmap memory and there is no problem. But once TextOut is called after the manual fill, the bitmap is locked (effectively) and no further functions work on it - nor do any return an error.
I'm using a font with a 90 degree escapement. Have not tried it with a "normal" font, 0 degree escapement. DrawTextEx with DT_CALCRECT specifically states it only works on 0 degree escapement fonts so I had to use TextOut for this reason.
Very bizarre.
No, there were no stupid mistakes like using the same text color as the background color. I've spent too long on this for that. One option people have available is that the endless energy that would normally be spent destroying the question and/or the person who asked it could instead be used to write a few lines of code and try it for yourself.
Here's a function to make a bitmap. Don't pass a plain colour, pass a gradient fill, say going from white to pinkish.
Does it display correctly? If so, does the TextOut call on top of that work?
static HBITMAP MakeBitmap(unsigned char *rgba, int width, int height, VOID **buff)
{
VOID *pvBits; // pointer to DIB section
HBITMAP answer;
BITMAPINFO bmi;
HDC hdc;
int x, y;
int red, green, blue, alpha;
// setup bitmap info
bmi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmi.bmiHeader.biWidth = width;
bmi.bmiHeader.biHeight = height;
bmi.bmiHeader.biPlanes = 1;
bmi.bmiHeader.biBitCount = 32; // four 8-bit components
bmi.bmiHeader.biCompression = BI_RGB;
bmi.bmiHeader.biSizeImage = width * height * 4;
hdc = CreateCompatibleDC(GetDC(0));
answer = CreateDIBSection(hdc, &bmi, DIB_RGB_COLORS, &pvBits, NULL, 0x0);
for (y = 0; y < height; y++)
{
for (x = 0; x < width; x++)
{
red = rgba[(y*width + x) * 4];
green = rgba[(y*width + x) * 4 + 1];
blue = rgba[(y*width + x) * 4 + 2];
alpha = rgba[(y*width + x) * 4 + 3];
red = (red * alpha) >> 8;
green = (green * alpha) >> 8;
blue = (blue * alpha) >> 8;
((UINT32 *)pvBits)[(height - y - 1) * width + x] = (alpha << 24) | (red << 16) | (green << 8) | blue;
}
}
DeleteDC(hdc);
*buff = pvBits;
return answer;
}
I am trying to display a 24-bit uncompressed bitmap with an odd width using standard Win32 API calls, but it seems like I have a stride problem.
According to the msdn:
https://msdn.microsoft.com/en-us/library/windows/desktop/dd318229%28v=vs.85%29.aspx
"For uncompressed RGB formats, the minimum stride is always the image width in bytes, rounded up to the nearest DWORD. You can use the following formula to calculate the stride:
stride = ((((biWidth * biBitCount) + 31) & ~31) >> 3)"
but this simply does not work for me and below is is the code:
void Init()
{
pImage = ReadBMP("data\\bird.bmp");
size_t imgSize = pImage->width * pImage->height * 3;
BITMAPINFOHEADER bmih;
bmih.biSize = sizeof(BITMAPINFOHEADER);
bmih.biBitCount = 24;
// This is probably where the bug is
LONG stride = ((((pImage->width * bmih.biBitCount) + 31) & ~31) >> 3);
//bmih.biWidth = pImage->width;
bmih.biWidth = stride;
bmih.biHeight = -((LONG)pImage->height);
bmih.biPlanes = 1;
bmih.biCompression = BI_RGB;
bmih.biSizeImage = 0;
bmih.biXPelsPerMeter = 1;
bmih.biYPelsPerMeter = 1;
bmih.biClrUsed = 0;
bmih.biClrImportant = 0;
BITMAPINFO dbmi;
ZeroMemory(&dbmi, sizeof(dbmi));
dbmi.bmiHeader = bmih;
dbmi.bmiColors->rgbBlue = 0;
dbmi.bmiColors->rgbGreen = 0;
dbmi.bmiColors->rgbRed = 0;
dbmi.bmiColors->rgbReserved = 0;
HDC hdc = ::GetDC(NULL);
mTestBMP = CreateDIBitmap(hdc,
&bmih,
CBM_INIT,
pImage->pSrc,
&dbmi,
DIB_RGB_COLORS);
hdc = ::GetDC(NULL);
}
and here the drawing fuction
RawBMP *pImage;
HBITMAP mTestBMP;
void UpdateScreen(HDC srcHDC)
{
if (pImage != nullptr && mTestBMP != 0x00)
{
HDC hdc = CreateCompatibleDC(srcHDC);
SelectObject(hdc, mTestBMP);
BitBlt(srcHDC,
0, // x
0, // y
// I tried passing the stride here and it did not work either
pImage->width, // width of the image
pImage->height, // height
hdc,
0, // x and
0, // y of upper left corner
SRCCOPY);
DeleteDC(hdc);
}
}
If I pass the original image width (odd number) instead of the stride
LONG stride = ((((pImage->width * bmih.biBitCount) + 31) & ~31) >> 3);
//bmih.biWidth = stride;
bmih.biWidth = pImage->width;
the picture looks skewed, below shows the differences:
and if I pass the stride according to msdn, then nothing shows up because the stride is too large.
any clues? Thank you!
thanks Jonathan for the solution. I need to copy row by row with the proper padding for odd width images. More or less the code for 24-bit uncompressed images:
const uint32_t bitCount = 24;
LONG strideInBytes;
// if the width is odd, then we need to add padding
if (width & 0x1)
{
strideInBytes = ((((width * bitCount) + 31) & ~31) >> 3);
}
else
{
strideInBytes = width * 3;
}
// allocate the new buffer
unsigned char *pBuffer = new unsigned char[strideInBytes * height];
memset(pBuffer, 0xaa, strideInBytes * height);
// Copy row by row
for (uint32_t yy = 0; yy < height; yy++)
{
uint32_t rowSizeInBytes = width * 3;
unsigned char *pDest = &pBuffer[yy * strideInBytes];
unsigned char *pSrc = &pData[yy * rowSizeInBytes];
memcpy(pDest, pSrc, rowSizeInBytes);
}
rawBMP->pSrc = pBuffer;
rawBMP->width = width;
rawBMP->height = height;
rawBMP->stride = strideInBytes;
I've been going through these tutorials (only 2 links allowed for me): https:// code.msdn.microsoft.com/Direct3D-Tutorial-Win32-829979ef
and reading through the Direct3D 11 Graphics Pipeline: https://msdn.microsoft.com/en-us/library/windows/desktop/ff476882%28v=vs.85%29.aspx
I currently have a Pixel (aka. Fragment) Shader coded in HLSL, consisting of the following code:
//Pixel Shader input.
struct psInput
{
float4 Position: SV_POSITION;
float4 Color: COLOR;
};
//Pixel (aka. Fragment) Shader.
float4 PS(psInput input): SV_TARGET
{
return input.Color;
}
What I (think I) would like to do is multisample and access nearby pixel data for each pixel in my Pixel Shader so that I can perform a sort of custom anti-aliasing like FXAA (http://developer.download.nvidia.com/assets/gamedev/files/sdk/11/FXAA_WhitePaper.pdf). From my understanding, I need to pass a texture to HLSL using PSSetShaderResources for each render, but beyond that I have no idea. So, my question is:
How do I send nearby pixel data to a Pixel-Shader in Direct3D 11?
Being able to do this kind of thing would also be extremely beneficial to my understanding of how c++ and HLSL interact with each other beyond the standard "pass some float4's to the shader" that I find in tutorials. It seems that this is the most crucial aspect of D3D development, and yet I can't find very many examples of it online.
I've considered traditional MSAA (MultiSample Anti-Aliasing), but I can't find any information on how to do it successfully in D3D 11 beyond that I need to be using a "BitBlt" (bit-block transfer) model swap chain first. (See DXGI_SAMPLE_DESC1 and DXGI_SAMPLE_DESC; only a count of 1 and a quality of 0 (no AA) will result in things being drawn.) Additionally, I would like to know how to perform the above for general understanding in case I need it for other aspects of my project. Answers on how to perform MSAA in D3D 11 are welcome too though.
Please use D3D 11 and HLSL code only.
To do custom anti-aliasing like FXAA you'll need to render the scene to an offscreen render target:
-Create a ID3D11Texture2D with bind flags D3D11_BIND_RENDER_TARGET and D3D11_BIND_SHADER_RESOURCE
-Create a ID3D11ShaderResourceView and a ID3D11RenderTargetView for the texture created in step one.
-Render the scene to the ID3D11RenderTargetView created in step 2
-Set the backbuffer as render target and bind the ID3D11ShaderResourceView created in step 2 to the correct pixel shader slot.
-Render a fullscreen triangle covering the entire screen you'll be able to sample the texture containing the scene in the pixel shader (use the Load() function)
When you tried to do traditional MSAA did you remeber to set MultisampleEnable in the rasterizer state?
And again I answer my own question, sort of (never did use FXAA...). I am providing my answer here to be nice to those who are following my footsteps.
It turns out I was missing the depth stencil view for MSAA. You want SampleCount to be 1U for disabled MSAA, 2U for 2XMSAA, 4U for 4XMSAA, 8U for 8XMSAA, etc. (Use ID3D11Device::CheckMultisampleQualityLevels to "probe" for viable MSAA levels...) You pretty much always want to use a quality level of 0U for disabled MSAA and 1U for enabled MSAA.
Below is my working MSAA code (you should be able to fill in the rest). Note that I used DXGI_FORMAT_D24_UNORM_S8_UINT and D3D11_DSV_DIMENSION_TEXTURE2DMS, and that the Format values for the depth texture and depth stencil view are the same and the SampleCount and SampleQuality values are the same.
Good luck!
unsigned int SampleCount = 1U;
unsigned int SampleQuality = (SampleCount > 1U ? 1U : 0U);
//Create swap chain.
IDXGIFactory2* dxgiFactory2 = nullptr;
d3dResult = dxgiFactory->QueryInterface(__uuidof(IDXGIFactory2), reinterpret_cast<void**>(&dxgiFactory2));
if (dxgiFactory2)
{
//DirectX 11.1 or later.
d3dResult = D3DDevice->QueryInterface(__uuidof(ID3D11Device1), reinterpret_cast<void**>(&D3DDevice1));
if (SUCCEEDED(d3dResult))
{
D3DDeviceContext->QueryInterface(__uuidof(ID3D11DeviceContext1), reinterpret_cast<void**>(&D3DDeviceContext1));
}
DXGI_SWAP_CHAIN_DESC1 swapChain;
ZeroMemory(&swapChain, sizeof(swapChain));
swapChain.Width = width;
swapChain.Height = height;
swapChain.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
swapChain.SampleDesc.Count = SampleCount;
swapChain.SampleDesc.Quality = SampleQuality;
swapChain.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChain.BufferCount = 2U;
d3dResult = dxgiFactory2->CreateSwapChainForHwnd(D3DDevice, w32Window, &swapChain, nullptr, nullptr, &SwapChain1);
if (SUCCEEDED(d3dResult))
{
d3dResult = SwapChain1->QueryInterface(__uuidof(IDXGISwapChain), reinterpret_cast<void**>(&SwapChain));
}
dxgiFactory2->Release();
}
else
{
//DirectX 11.0.
DXGI_SWAP_CHAIN_DESC swapChain;
ZeroMemory(&swapChain, sizeof(swapChain));
swapChain.BufferCount = 2U;
swapChain.BufferDesc.Width = width;
swapChain.BufferDesc.Height = height;
swapChain.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
swapChain.BufferDesc.RefreshRate.Numerator = 60U;
swapChain.BufferDesc.RefreshRate.Denominator = 1U;
swapChain.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChain.OutputWindow = w32Window;
swapChain.SampleDesc.Count = SampleCount;
swapChain.SampleDesc.Quality = SampleQuality;
swapChain.Windowed = true;
d3dResult = dxgiFactory->CreateSwapChain(D3DDevice, &swapChain, &SwapChain);
}
//Disable Alt + Enter and Print Screen shortcuts.
dxgiFactory->MakeWindowAssociation(w32Window, DXGI_MWA_NO_PRINT_SCREEN | DXGI_MWA_NO_ALT_ENTER);
dxgiFactory->Release();
if (FAILED(d3dResult))
{
return false;
}
//Create render target view.
ID3D11Texture2D* backBuffer = nullptr;
d3dResult = SwapChain->GetBuffer(0U, __uuidof(ID3D11Texture2D), reinterpret_cast<void**>(&backBuffer));
if (FAILED(d3dResult))
{
return false;
}
d3dResult = D3DDevice->CreateRenderTargetView(backBuffer, nullptr, &RenderTargetView);
backBuffer->Release();
if (FAILED(d3dResult))
{
return false;
}
//Create depth stencil texture.
ID3D11Texture2D* DepthStencilTexture = nullptr;
D3D11_TEXTURE2D_DESC depthTextureLayout;
ZeroMemory(&depthTextureLayout, sizeof(depthTextureLayout));
depthTextureLayout.Width = width;
depthTextureLayout.Height = height;
depthTextureLayout.MipLevels = 1U;
depthTextureLayout.ArraySize = 1U;
depthTextureLayout.Usage = D3D11_USAGE_DEFAULT;
depthTextureLayout.CPUAccessFlags = 0U;
depthTextureLayout.MiscFlags = 0U;
depthTextureLayout.Format = DXGI_FORMAT_D24_UNORM_S8_UINT;
depthTextureLayout.SampleDesc.Count = SampleCount;
depthTextureLayout.SampleDesc.Quality = SampleQuality;
depthTextureLayout.BindFlags = D3D11_BIND_DEPTH_STENCIL;
d3dResult = D3DDevice->CreateTexture2D(&depthTextureLayout, nullptr, &DepthStencilTexture);
if (FAILED(d3dResult))
{
return false;
}
//Create depth stencil.
D3D11_DEPTH_STENCIL_DESC depthStencilLayout;
depthStencilLayout.DepthEnable = true;
depthStencilLayout.DepthWriteMask = D3D11_DEPTH_WRITE_MASK_ALL;
depthStencilLayout.DepthFunc = D3D11_COMPARISON_LESS;
depthStencilLayout.StencilEnable = true;
depthStencilLayout.StencilReadMask = 0xFF;
depthStencilLayout.StencilWriteMask = 0xFF;
depthStencilLayout.FrontFace.StencilFailOp = D3D11_STENCIL_OP_KEEP;
depthStencilLayout.FrontFace.StencilDepthFailOp = D3D11_STENCIL_OP_INCR;
depthStencilLayout.FrontFace.StencilPassOp = D3D11_STENCIL_OP_KEEP;
depthStencilLayout.FrontFace.StencilFunc = D3D11_COMPARISON_ALWAYS;
depthStencilLayout.BackFace.StencilFailOp = D3D11_STENCIL_OP_KEEP;
depthStencilLayout.BackFace.StencilDepthFailOp = D3D11_STENCIL_OP_INCR;
depthStencilLayout.BackFace.StencilPassOp = D3D11_STENCIL_OP_KEEP;
depthStencilLayout.BackFace.StencilFunc = D3D11_COMPARISON_ALWAYS;
ID3D11DepthStencilState* depthStencilState;
D3DDevice->CreateDepthStencilState(&depthStencilLayout, &depthStencilState);
D3DDeviceContext->OMSetDepthStencilState(depthStencilState, 1U);
//Create depth stencil view.
D3D11_DEPTH_STENCIL_VIEW_DESC depthStencilViewLayout;
ZeroMemory(&depthStencilViewLayout, sizeof(depthStencilViewLayout));
depthStencilViewLayout.Format = depthTextureLayout.Format;
depthStencilViewLayout.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2DMS;
depthStencilViewLayout.Texture2D.MipSlice = 0U;
d3dResult = D3DDevice->CreateDepthStencilView(DepthStencilTexture, &depthStencilViewLayout, &DepthStencilView);
DepthStencilTexture->Release();
if (FAILED(d3dResult))
{
return false;
}
//Set output-merger render targets.
D3DDeviceContext->OMSetRenderTargets(1U, &RenderTargetView, DepthStencilView);