direct x 11 invalid argument when creating vertex shader - directx-11

Here is the Shader Code File Name:Shader.shader
struct VOut
{
float4 position : SV_POSITION;
float4 color : COLOR;
};
VOut VMain(float4 position : POSITION, float4 color : COLOR)
{
VOut output;
output.position = position;
output.color = color;
return output;
}
float4 PMain(float4 position : SV_POSITION, float4 color : COLOR) : SV_TARGET
{
return color;
}
Here is how I compile my shaders
RasterShader::RasterShader(ID3D11Device* device,LPCWSTR vFile,LPCSTR vEntry,LPCSTR vVersion,LPCWSTR pFile,LPCSTR pEntry,LPCSTR pVersion)
{
ID3DBlob* compiledCode=nullptr;
vShader = nullptr;
pShader = nullptr;
errors = 0;
if (FAILED(Compile(vFile,vEntry,vVersion,&compiledCode)))
{
errors = 1;
return;
}
if (FAILED(device->CreateVertexShader(compiledCode->GetBufferPointer(), compiledCode->GetBufferSize(),nullptr, &vShader)))
{
compiledCode->Release();
MessageBox(NULL, L"Failed To Create Vertex Shader", L"Failed Vertex Shader", MB_OK);
errors = 1;
return;
}
D3D11_INPUT_ELEMENT_DESC desc[] =
{
{"POSITION",0,DXGI_FORMAT_R32G32B32_FLOAT,0,0,D3D11_INPUT_PER_VERTEX_DATA,0}
,{"COLOR",0,DXGI_FORMAT_R32G32B32_FLOAT,0,12,D3D11_INPUT_PER_VERTEX_DATA,0}
};
if (FAILED(device->CreateInputLayout(desc, 2,compiledCode->GetBufferPointer(),compiledCode->GetBufferSize(),&inputLayout)))
{
compiledCode->Release();
MessageBox(NULL, L"Failed To Create Input Layout", L"Failed Input Layout", MB_OK);
errors = 1;
return;
}
compiledCode->Release();
if (FAILED(Compile(pFile,pEntry,pVersion,&compiledCode)))
{
errors = 1;
return;
}
if (FAILED(device->CreatePixelShader(compiledCode->GetBufferPointer(), compiledCode->GetBufferSize(), nullptr, &pShader)))
{
compiledCode->Release();
MessageBox(NULL, L"Failed To Create Pixel Shader", L"Failed Pixel Shader", MB_OK);
errors = 1;
return;
}
compiledCode->Release();
}
HRESULT RasterShader::Compile(LPCWSTR fileName,LPCSTR entry,LPCSTR version,ID3DBlob** code)
{
ID3DBlob* errors=nullptr;
HRESULT hr = D3DCompileFromFile(fileName,nullptr,nullptr
,entry,version
,0,0,code,&errors);
if (FAILED(hr))
{
if (errors!=nullptr)
{
CString data((char*)errors->GetBufferPointer());
MessageBox(NULL, data.GetBuffer(), L"Shader Compile Errors", MB_OK);
data.ReleaseBuffer();
errors->Release();
}
if (code) { (*code)->Release(); }
}
return hr;
}
RasterShader * RasterShader::Create(ID3D11Device* device,LPCWSTR vFile,LPCSTR vMain,LPCSTR vVersion,LPCWSTR pFile,LPCSTR pMain,LPCSTR pVersion)
{
RasterShader* shader = new RasterShader(device,vFile,vMain,vVersion,pFile,pMain,pVersion);
if (shader->errors == 1)
{
delete shader;
shader = nullptr;
}
return shader;
}
Here is how I create my shader
shader = RasterShader::Create(directx->getDevice(), L"Shader.shader","VMain","vs_4_0",L"Shader.shader","PMain","ps_4_0");
if (shader == nullptr)
{
errors = 1;
return;
}
Here is how I create my device
D3D_FEATURE_LEVEL levels[] = {
D3D_FEATURE_LEVEL_9_1,
D3D_FEATURE_LEVEL_9_2,
D3D_FEATURE_LEVEL_9_3,
D3D_FEATURE_LEVEL_10_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_11_1
};
//CREATE DEVICE AND CONTEXT
HRESULT hr = D3D11CreateDevice(nullptr,D3D_DRIVER_TYPE_HARDWARE,0,D3D11_CREATE_DEVICE_BGRA_SUPPORT | D3D10_CREATE_DEVICE_DEBUG
,levels,ARRAYSIZE(levels),D3D11_SDK_VERSION
,&device,&level,&context);
The Returned/Supported feature level is 9_1
The program breaks at the point I create my VertexShader with the Message "FAILED TO CREATE VERTEX SHADER"
When I analyze the HRESULT returned by device->CreateVertexShader() I get the Error Code
E_INVALIDARG
When debug layer was enabled I got this error
CreateVertexShader: Encoded Vertex Shader size doesn't match specified size. [ STATE_CREATION ERROR #166: CREATEVERTEXSHADER_INVALIDSHADERBYTECODE]
I tired enquiring the size of my buffer. Not sure if helpful or if I did it right.
wchar_t buffer[256];
wsprintf(buffer, L"%d",sizeof((*code)->GetBufferPointer()));
MessageBox(NULL, buffer, L"A", MB_OK);
It prints 4
wchar_t buffer[256];
wsprintf(buffer, L"%d",sizeof((*code)->GetBufferSize()));
MessageBox(NULL, buffer, L"A", MB_OK);
It prints 164
Any help would be greatly appreciated. Thank U

Eurika I got it !!
Turns out my profile should be combined with my feature level as follows
Vertex Shader : vs_4_0_level_9_1
Pixel Shader : ps_4_0_level_9_1
Thank's Everyone :)

Related

direct3d 11 and 2D: pass coordinates of a vertex as int and not float

My purpose is to write a backend of a toolkit using only Direct3D 11 for 2D (no additional library like Direct2D, or SpriteBatch or something else).
Note that it is the first time I use Direct3D, and I'm currently learning d3D 11.
So for now, I can display a triangle or rectangle of the color I want.
The vertex structure of my C code contains 2 float for the position and 4 unsigned char for the color. In my vertex shader, the vertex structure has 2 floats for the position of the vertex, and 4 floats for the color.
I have remarked that if I use DXGI_FORMAT_R8G8B8A8_UNORM for the color in my D3D11_INPUT_ELEMENT_DESC array, then the color is interpolated automatically from the values 0 to 255 to the values 0.0f to 1.0f. It seems resonnable when I read the documentation (DXGI Format anumeration, the description of _UNORM):
"Unsigned normalized integer; which is interpreted in a resource as an unsigned integer, and is interpreted in a shader as an unsigned normalized floating-point value in the range [0, 1]. All 0's maps to 0.0f, and all 1's maps to 1.0f. A sequence of evenly spaced floating-point values from 0.0f to 1.0f are represented. For instance, a 2-bit UNORM represents 0.0f, 1/3, 2/3, and 1.0f."
Or at least that is how I interpret this doc (I may be wrong). And the color of the triangle is correct.
What I would like to do is the same for pixels: if I pass an integer for the coordinates (x between 0 and the width of the window -1, and y between 0 and the height of the window - 1), then is it interpreted as the correct signed normalized floating-point value bythe vertex shader (-1.0f to 1.0f for x, and 1.0f to -1.0f for y). I tried several values in my Vertex C struct and D3D11_INPUT_ELEMENT_DESC array, without luck. So I have 2 questions:
Is it possible ?
If it is not possible, is it faster to convert the coordinates in the C code, or in the shader code (with the viewport as a constant buffer) ? See the macros XF and YF in the code below for the conversion from int to float.
Below is my complete code that displays a simple triangle, followed with the HLSL code for vertex and pixel shader. I use the C api of Direct3D. I support Win 7 and Win 10.
Source code:
/* Windows 10 */
#define _WIN32_WINNT 0x0A00
#if defined _WIN32_WINNT && _WIN32_WINNT >= 0x0A00
# define HAVE_WIN10
#endif
#include <stdio.h>
#ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>
/* C API for d3d11 */
#define COBJMACROS
#include <guiddef.h>
#ifdef HAVE_WIN10
# include <dxgi1_3.h>
#else
# include <dxgi.h>
#endif
#include <d3d11.h>
#include "d3d11_vs.h"
#include "d3d11_ps.h"
/* comment for no debug informations */
#define _DEBUG
#ifdef _DEBUG
# define FCT \
do { printf(" * %s\n", __FUNCTION__); fflush(stdout); } while (0)
#else
# define FCT \
do { } while (0)
#endif
#define XF(w,x) ((float)(2 * (x) - (w)) / (float)(w))
#define YF(h,y) ((float)((h) - 2 * (y)) / (float)(h))
typedef struct Window Window;
typedef struct D3d D3d;
struct Window
{
HINSTANCE instance;
RECT rect;
HWND win;
D3d *d3d;
};
struct D3d
{
#ifdef HAVE_WIN10
IDXGIFactory2 *dxgi_factory;
IDXGISwapChain1 *dxgi_swapchain;
#else
IDXGIFactory *dxgi_factory;
IDXGISwapChain *dxgi_swapchain;
#endif
ID3D11Device *d3d_device;
ID3D11DeviceContext *d3d_device_ctx;
ID3D11RenderTargetView *d3d_render_target_view;
ID3D11InputLayout *d3d_input_layout;
ID3D11VertexShader *d3d_vertex_shader;
ID3D11PixelShader *d3d_pixel_shader;
D3D11_VIEWPORT viewport;
Window *win;
unsigned int vsync : 1;
};
typedef struct
{
FLOAT x;
FLOAT y;
BYTE r;
BYTE g;
BYTE b;
BYTE a;
} Vertex;
void d3d_resize(D3d *d3d, UINT width, UINT height);
void d3d_render(D3d *d3d);
/************************* Window *************************/
LRESULT CALLBACK
_window_procedure(HWND window,
UINT message,
WPARAM window_param,
LPARAM data_param)
{
switch (message)
{
case WM_CLOSE:
PostQuitMessage(0);
return 0;
case WM_KEYUP:
if (window_param == 'Q')
{
PostQuitMessage(0);
}
return 0;
case WM_ERASEBKGND:
/* no need to erase back */
return 1;
/* GDI notifications */
case WM_CREATE:
#ifdef _DEBUG
printf(" * WM_CREATE\n");
fflush(stdout);
#endif
return 0;
case WM_SIZE:
{
Window *win;
#ifdef _DEBUG
printf(" * WM_SIZE\n");
fflush(stdout);
#endif
win = (Window *)GetWindowLongPtr(window, GWLP_USERDATA);
d3d_resize(win->d3d,
(UINT)LOWORD(data_param), (UINT)HIWORD(data_param));
return 0;
}
case WM_PAINT:
{
#ifdef _DEBUG
printf(" * WM_PAINT\n");
fflush(stdout);
#endif
if (GetUpdateRect(window, NULL, FALSE))
{
PAINTSTRUCT ps;
Window *win;
BeginPaint(window, &ps);
win = (Window *)GetWindowLongPtr(window, GWLP_USERDATA);
d3d_render(win->d3d);
EndPaint(window, &ps);
}
return 0;
}
default:
return DefWindowProc(window, message, window_param, data_param);
}
}
Window *window_new(int x, int y, int w, int h)
{
WNDCLASS wc;
RECT r;
Window *win;
win = (Window *)calloc(1, sizeof(Window));
if (!win)
return NULL;
win->instance = GetModuleHandle(NULL);
if (!win->instance)
goto free_win;
memset(&wc, 0, sizeof(WNDCLASS));
wc.style = CS_HREDRAW | CS_VREDRAW;
wc.lpfnWndProc = _window_procedure;
wc.cbClsExtra = 0;
wc.cbWndExtra = 0;
wc.hInstance = win->instance;
wc.hIcon = LoadIcon(NULL, IDI_APPLICATION);
wc.hCursor = LoadCursor(NULL, IDC_ARROW);
wc.hbrBackground = NULL;
wc.lpszMenuName = NULL;
wc.lpszClassName = "D3D";
if (!RegisterClass(&wc))
goto free_library;
r.left = 0;
r.top = 0;
r.right = w;
r.bottom = h;
if (!AdjustWindowRectEx(&r,
WS_OVERLAPPEDWINDOW | WS_SIZEBOX,
FALSE,
0U))
goto unregister_class;
win->win = CreateWindowEx(0U,
"D3D", "Test",
WS_OVERLAPPEDWINDOW | WS_SIZEBOX,
x, y,
r.right - r.left,
r.bottom - r.top,
NULL,
NULL, win->instance, NULL);
if (!win->win)
goto unregister_class;
return win;
unregister_class:
UnregisterClass("D2D", win->instance);
free_library:
FreeLibrary(win->instance);
free_win:
free(win);
return NULL;
}
void window_del(Window *win)
{
if (!win)
return;
DestroyWindow(win->win);
UnregisterClass("D2D", win->instance);
FreeLibrary(win->instance);
free(win);
}
void window_show(Window *win)
{
ShowWindow(win->win, SW_SHOWNORMAL);
}
/************************** D3D11 **************************/
static void d3d_refresh_rate_get(D3d *d3d, UINT *num, UINT *den)
{
DXGI_MODE_DESC *display_mode_list = NULL; /* 28 bytes */
IDXGIAdapter *dxgi_adapter;
IDXGIOutput *dxgi_output;
UINT nbr_modes;
UINT i;
HRESULT res;
*num = 0U;
*den = 1U;
if (!d3d->vsync)
return;
/* adapter of primary desktop : pass 0U */
res = IDXGIFactory_EnumAdapters(d3d->dxgi_factory, 0U, &dxgi_adapter);
if (FAILED(res))
return;
/* output of primary desktop : pass 0U */
res = IDXGIAdapter_EnumOutputs(dxgi_adapter, 0U, &dxgi_output);
if (FAILED(res))
goto release_dxgi_adapter;
/* number of mode that fit the format */
res = IDXGIOutput_GetDisplayModeList(dxgi_output,
DXGI_FORMAT_B8G8R8A8_UNORM,
DXGI_ENUM_MODES_INTERLACED,
&nbr_modes, NULL);
if (FAILED(res))
goto release_dxgi_output;
printf("display mode list : %d\n", nbr_modes);
fflush(stdout);
display_mode_list = (DXGI_MODE_DESC *)malloc(nbr_modes * sizeof(DXGI_MODE_DESC));
if (!display_mode_list)
goto release_dxgi_output;
/* fill the mode list */
res = IDXGIOutput_GetDisplayModeList(dxgi_output,
DXGI_FORMAT_B8G8R8A8_UNORM,
DXGI_ENUM_MODES_INTERLACED,
&nbr_modes, display_mode_list);
if (FAILED(res))
goto free_mode_list;
for (i = 0; i < nbr_modes; i++)
{
if ((display_mode_list[i].Width == (UINT)GetSystemMetrics(SM_CXSCREEN)) &&
(display_mode_list[i].Height == (UINT)GetSystemMetrics(SM_CYSCREEN)))
{
*num = display_mode_list[i].RefreshRate.Numerator;
*den = display_mode_list[i].RefreshRate.Denominator;
break;
}
}
#ifdef _DEBUG
{
DXGI_ADAPTER_DESC adapter_desc;
IDXGIAdapter_GetDesc(dxgi_adapter, &adapter_desc);
printf(" * video mem: %llu B, %llu MB\n",
adapter_desc.DedicatedVideoMemory,
adapter_desc.DedicatedVideoMemory / 1024 / 1024);
fflush(stdout);
wprintf(L" * description: %ls\n", adapter_desc.Description);
fflush(stdout);
}
#endif
free_mode_list:
free(display_mode_list);
release_dxgi_output:
IDXGIOutput_Release(dxgi_output);
release_dxgi_adapter:
IDXGIFactory_Release(dxgi_adapter);
}
D3d *d3d_init(Window *win, int vsync)
{
D3D11_INPUT_ELEMENT_DESC desc_ie[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "COLOR", 0, DXGI_FORMAT_R8G8B8A8_UNORM, 0, 2 * sizeof(float), D3D11_INPUT_PER_VERTEX_DATA, 0 }
};
#ifdef HAVE_WIN10
DXGI_SWAP_CHAIN_DESC1 desc;
DXGI_SWAP_CHAIN_FULLSCREEN_DESC desc_fs;
#else
DXGI_SWAP_CHAIN_DESC desc;
#endif
D3d *d3d;
RECT r;
HRESULT res;
UINT flags;
UINT num;
UINT den;
D3D_FEATURE_LEVEL feature_level[4];
d3d = (D3d *)calloc(1, sizeof(D3d));
if (!d3d)
return NULL;
d3d->vsync = vsync;
win->d3d = d3d;
d3d->win = win;
/* create the DXGI factory */
flags = 0;
#ifdef HAVE_WIN10
# ifdef _DEBUG
flags = DXGI_CREATE_FACTORY_DEBUG;
# endif
res = CreateDXGIFactory2(flags, &IID_IDXGIFactory2, (void **)&d3d->dxgi_factory);
#else
res = CreateDXGIFactory(&IID_IDXGIFactory, (void **)&d3d->dxgi_factory);
#endif
if (FAILED(res))
goto free_d3d;
/* single threaded for now */
flags = D3D11_CREATE_DEVICE_SINGLETHREADED |
D3D11_CREATE_DEVICE_BGRA_SUPPORT;
#ifdef HAVE_WIN10
# ifdef _DEBUG
flags |= D3D11_CREATE_DEVICE_DEBUG;
# endif
#endif
feature_level[0] = D3D_FEATURE_LEVEL_11_1;
feature_level[1] = D3D_FEATURE_LEVEL_11_0;
feature_level[2] = D3D_FEATURE_LEVEL_10_1;
feature_level[3] = D3D_FEATURE_LEVEL_10_0;
/* create device and device context with hardware support */
res = D3D11CreateDevice(NULL,
D3D_DRIVER_TYPE_HARDWARE,
NULL,
flags,
feature_level,
3U,
D3D11_SDK_VERSION,
&d3d->d3d_device,
NULL,
&d3d->d3d_device_ctx);
if (FAILED(res))
goto release_dxgi_factory2;
if (!GetClientRect(win->win, &r))
goto release_d3d_device;
/*
* create the swap chain. It needs some settings...
* the size of the internal buffers
* the image format
* the number of back buffers (>= 2 for flip model, see SwapEffect field)
*
* Settings are different in win 7 and win10
*/
d3d_refresh_rate_get(d3d, &num, &den);
#ifdef HAVE_WIN10
desc.Width = r.right - r.left;
desc.Height = r.bottom - r.top;
desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
desc.Stereo = FALSE;
#else
desc.BufferDesc.Width = r.right - r.left;
desc.BufferDesc.Height = r.bottom - r.top;
desc.BufferDesc.RefreshRate.Numerator = num;
desc.BufferDesc.RefreshRate.Denominator = den;
desc.BufferDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;;
desc.BufferDesc.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
desc.BufferDesc.Scaling = DXGI_MODE_SCALING_UNSPECIFIED;
#endif
desc.SampleDesc.Count = 1U;
desc.SampleDesc.Quality = 0U;
desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
desc.BufferCount = 2U;
#ifdef HAVE_WIN10
desc.Scaling = DXGI_SCALING_NONE;
#else
desc.OutputWindow = win->win;
desc.Windowed = TRUE;
#endif
desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
#ifdef HAVE_WIN10
desc.AlphaMode = DXGI_ALPHA_MODE_UNSPECIFIED;
#endif
desc.Flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH;
#ifdef HAVE_WIN10
desc_fs.RefreshRate.Numerator = num;
desc_fs.RefreshRate.Denominator = den;
desc_fs.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
desc_fs.Scaling = DXGI_MODE_SCALING_UNSPECIFIED;
desc_fs.Windowed = TRUE;
#endif
#ifdef HAVE_WIN10
res = IDXGIFactory2_CreateSwapChainForHwnd(d3d->dxgi_factory,
(IUnknown *)d3d->d3d_device,
win->win,
&desc,
&desc_fs,
NULL,
&d3d->dxgi_swapchain);
#else
res = IDXGIFactory_CreateSwapChain(d3d->dxgi_factory,
(IUnknown *)d3d->d3d_device,
&desc,
&d3d->dxgi_swapchain);
#endif
if (FAILED(res))
goto release_d3d_device;
/* Vertex shader */
res = ID3D11Device_CreateVertexShader(d3d->d3d_device,
d3d_vertex_shader,
sizeof(d3d_vertex_shader),
NULL,
&d3d->d3d_vertex_shader);
if (FAILED(res))
{
printf(" * CreateVertexShader() failed\n");
goto release_dxgi_swapchain;
}
/* Pixel shader */
res = ID3D11Device_CreatePixelShader(d3d->d3d_device,
d3d_pixel_shader,
sizeof(d3d_pixel_shader),
NULL,
&d3d->d3d_pixel_shader);
if (FAILED(res))
{
printf(" * CreatePixelShader() failed\n");
goto release_vertex_shader;
}
/* create the input layout */
res = ID3D11Device_CreateInputLayout(d3d->d3d_device,
desc_ie,
sizeof(desc_ie) / sizeof(D3D11_INPUT_ELEMENT_DESC),
d3d_vertex_shader,
sizeof(d3d_vertex_shader),
&d3d->d3d_input_layout);
if (FAILED(res))
{
printf(" * CreateInputLayout() failed\n");
goto release_pixel_shader;
}
return d3d;
release_pixel_shader:
ID3D11PixelShader_Release(d3d->d3d_pixel_shader);
release_vertex_shader:
ID3D11VertexShader_Release(d3d->d3d_vertex_shader);
release_dxgi_swapchain:
#ifdef HAVE_WIN10
IDXGISwapChain1_SetFullscreenState(d3d->dxgi_swapchain, FALSE, NULL);
IDXGISwapChain1_Release(d3d->dxgi_swapchain);
#else
IDXGISwapChain_SetFullscreenState(d3d->dxgi_swapchain, FALSE, NULL);
IDXGISwapChain_Release(d3d->dxgi_swapchain);
#endif
release_d3d_device:
ID3D11DeviceContext_Release(d3d->d3d_device_ctx);
ID3D11Device_Release(d3d->d3d_device);
release_dxgi_factory2:
#ifdef HAVE_WIN10
IDXGIFactory2_Release(d3d->dxgi_factory);
#else
IDXGIFactory_Release(d3d->dxgi_factory);
#endif
free_d3d:
free(d3d);
return NULL;
}
void d3d_shutdown(D3d *d3d)
{
#ifdef _DEBUG
ID3D11Debug *d3d_debug;
HRESULT res;
#endif
if (!d3d)
return;
#ifdef _DEBUG
res = ID3D11Debug_QueryInterface(d3d->d3d_device, &IID_ID3D11Debug,
(void **)&d3d_debug);
#endif
ID3D11PixelShader_Release(d3d->d3d_pixel_shader);
ID3D11VertexShader_Release(d3d->d3d_vertex_shader);
ID3D11InputLayout_Release(d3d->d3d_input_layout);
ID3D11RenderTargetView_Release(d3d->d3d_render_target_view);
#ifdef HAVE_WIN10
IDXGISwapChain1_SetFullscreenState(d3d->dxgi_swapchain, FALSE, NULL);
IDXGISwapChain1_Release(d3d->dxgi_swapchain);
#else
IDXGISwapChain_SetFullscreenState(d3d->dxgi_swapchain, FALSE, NULL);
IDXGISwapChain_Release(d3d->dxgi_swapchain);
#endif
ID3D11DeviceContext_Release(d3d->d3d_device_ctx);
ID3D11Device_Release(d3d->d3d_device);
#ifdef HAVE_WIN10
IDXGIFactory2_Release(d3d->dxgi_factory);
#else
IDXGIFactory_Release(d3d->dxgi_factory);
#endif
free(d3d);
#ifdef _DEBUG
if (SUCCEEDED(res))
{
ID3D11Debug_ReportLiveDeviceObjects(d3d_debug, D3D11_RLDO_DETAIL);
ID3D11Debug_Release(d3d_debug);
}
#endif
}
void d3d_resize(D3d *d3d, UINT width, UINT height)
{
D3D11_RENDER_TARGET_VIEW_DESC desc_rtv;
ID3D11Texture2D *back_buffer;
HRESULT res;
FCT;
/* set viewport, depends on size of the window */
d3d->viewport.TopLeftX = 0.0f;
d3d->viewport.TopLeftY = 0.0f;
d3d->viewport.Width = (float)width;
d3d->viewport.Height = (float)height;
d3d->viewport.MinDepth = 0.0f;
d3d->viewport.MaxDepth = 1.0f;
/* release the render target view */
if (d3d->d3d_render_target_view)
ID3D11RenderTargetView_Release(d3d->d3d_render_target_view);
/* unset the render target view in the output merger */
ID3D11DeviceContext_OMSetRenderTargets(d3d->d3d_device_ctx,
0U, NULL, NULL);
/* resize the internal nuffers of the swapt chain to the new size */
#ifdef HAVE_WIN10
res = IDXGISwapChain1_ResizeBuffers(d3d->dxgi_swapchain,
0U, /* preserve buffer count */
width, height,
DXGI_FORMAT_UNKNOWN, /* preserve format */
0U);
#else
res = IDXGISwapChain_ResizeBuffers(d3d->dxgi_swapchain,
0U, /* preserve buffer count */
width, height,
DXGI_FORMAT_UNKNOWN, /* preserve format */
0U);
#endif
if ((res == DXGI_ERROR_DEVICE_REMOVED) ||
(res == DXGI_ERROR_DEVICE_RESET) ||
(res == DXGI_ERROR_DRIVER_INTERNAL_ERROR))
{
return;
}
if (FAILED(res))
{
printf("ResizeBuffers() failed\n");
fflush(stdout);
return;
}
/* get the internal buffer of the swap chain */
#ifdef HAVE_WIN10
res = IDXGISwapChain1_GetBuffer(d3d->dxgi_swapchain, 0,
&IID_ID3D11Texture2D,
(void **)&back_buffer);
#else
res = IDXGISwapChain_GetBuffer(d3d->dxgi_swapchain, 0,
&IID_ID3D11Texture2D,
(void **)&back_buffer);
#endif
if (FAILED(res))
{
printf("swapchain GetBuffer() failed\n");
fflush(stdout);
return;
}
ZeroMemory(&desc_rtv, sizeof(D3D11_RENDER_TARGET_VIEW_DESC));
desc_rtv.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
desc_rtv.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;
/* create the new render target view from this internal buffer */
res = ID3D11Device_CreateRenderTargetView(d3d->d3d_device,
(ID3D11Resource *)back_buffer,
&desc_rtv,
&d3d->d3d_render_target_view);
ID3D11Texture2D_Release(back_buffer);
}
/*** triangle ***/
typedef struct
{
Vertex vertices[3];
unsigned int indices[3];
ID3D11Buffer *vertex_buffer;
ID3D11Buffer *index_buffer; /* not useful for a single triangle */
UINT stride;
UINT offset;
UINT count;
UINT index_count;
} Triangle;
Triangle *triangle_new(D3d *d3d,
int w, int h,
int x1, int y1,
int x2, int y2,
int x3, int y3,
unsigned char r,
unsigned char g,
unsigned char b,
unsigned char a)
{
D3D11_BUFFER_DESC desc;
D3D11_SUBRESOURCE_DATA sr_data;
Triangle *t;
HRESULT res;
t = (Triangle *)malloc(sizeof(Triangle));
if (!t)
return NULL;
t->vertices[0].x = XF(w, x1);
t->vertices[0].y = YF(h, y1);
t->vertices[0].r = r;
t->vertices[0].g = g;
t->vertices[0].b = b;
t->vertices[0].a = a;
t->vertices[1].x = XF(w, x2);
t->vertices[1].y = YF(h, y2);
t->vertices[1].r = r;
t->vertices[1].g = g;
t->vertices[1].b = b;
t->vertices[1].a = a;
t->vertices[2].x = XF(w, x3);
t->vertices[2].y = YF(h, y3);
t->vertices[2].r = r;
t->vertices[2].g = g;
t->vertices[2].b = b;
t->vertices[2].a = a;
/* useful only for the rectangle later */
t->indices[0] = 0;
t->indices[1] = 1;
t->indices[2] = 2;
t->stride = sizeof(Vertex);
t->offset = 0U;
t->index_count = 3U;
desc.ByteWidth = sizeof(t->vertices);
desc.Usage = D3D11_USAGE_DYNAMIC;
desc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
desc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
desc.MiscFlags = 0U;
desc.StructureByteStride = 0U;
sr_data.pSysMem = t->vertices;
sr_data.SysMemPitch = 0U;
sr_data.SysMemSlicePitch = 0U;
res = ID3D11Device_CreateBuffer(d3d->d3d_device,
&desc,
&sr_data,
&t->vertex_buffer);
if (FAILED(res))
{
free(t);
return NULL;
}
desc.ByteWidth = sizeof(t->indices);
desc.Usage = D3D11_USAGE_DYNAMIC;
desc.BindFlags = D3D11_BIND_INDEX_BUFFER;
desc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
desc.MiscFlags = 0U;
desc.StructureByteStride = 0U;
sr_data.pSysMem = t->indices;
sr_data.SysMemPitch = 0U;
sr_data.SysMemSlicePitch = 0U;
res = ID3D11Device_CreateBuffer(d3d->d3d_device,
&desc,
&sr_data,
&t->index_buffer);
if (FAILED(res))
{
free(t);
return NULL;
}
return t;
}
void triangle_free(Triangle *t)
{
if (!t)
return;
ID3D11Buffer_Release(t->index_buffer);
ID3D11Buffer_Release(t->vertex_buffer);
free(t);
}
void d3d_render(D3d *d3d)
{
#ifdef HAVE_WIN10
DXGI_PRESENT_PARAMETERS pp;
#endif
const FLOAT color[4] = { 0.10f, 0.18f, 0.24f, 1.0f };
RECT rect;
HRESULT res;
FCT;
if (!GetClientRect(d3d->win->win, &rect))
{
return;
}
/* scene */
Triangle *t;
t = triangle_new(d3d,
rect.right - rect.left,
rect.bottom - rect.top,
320, 120,
480, 360,
160, 360,
255, 255, 0, 255); /* r, g, b, a */
/* clear render target */
ID3D11DeviceContext_ClearRenderTargetView(d3d->d3d_device_ctx,
d3d->d3d_render_target_view,
color);
/* Input Assembler (IA) */
/* TRIANGLESTRIP only useful for the rectangle later */
ID3D11DeviceContext_IASetPrimitiveTopology(d3d->d3d_device_ctx,
D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP);
ID3D11DeviceContext_IASetInputLayout(d3d->d3d_device_ctx,
d3d->d3d_input_layout);
ID3D11DeviceContext_IASetVertexBuffers(d3d->d3d_device_ctx,
0,
1,
&t->vertex_buffer,
&t->stride,
&t->offset);
ID3D11DeviceContext_IASetIndexBuffer(d3d->d3d_device_ctx,
t->index_buffer,
DXGI_FORMAT_R32_UINT,
0);
/* vertex shader */
ID3D11DeviceContext_VSSetShader(d3d->d3d_device_ctx,
d3d->d3d_vertex_shader,
NULL,
0);
/* pixel shader */
ID3D11DeviceContext_PSSetShader(d3d->d3d_device_ctx,
d3d->d3d_pixel_shader,
NULL,
0);
/* set viewport in the Rasterizer Stage */
ID3D11DeviceContext_RSSetViewports(d3d->d3d_device_ctx, 1U, &d3d->viewport);
/* Output merger */
ID3D11DeviceContext_OMSetRenderTargets(d3d->d3d_device_ctx,
1U, &d3d->d3d_render_target_view,
NULL);
/* draw */
ID3D11DeviceContext_DrawIndexed(d3d->d3d_device_ctx,
t->index_count,
0, 0);
triangle_free(t);
/*
* present frame, that is flip the back buffer and the front buffer
* if no vsync, we present immediatly
*/
#ifdef HAVE_WIN10
pp.DirtyRectsCount = 0;
pp.pDirtyRects = NULL;
pp.pScrollRect = NULL;
pp.pScrollOffset = NULL;
res = IDXGISwapChain1_Present1(d3d->dxgi_swapchain,
d3d->vsync ? 1 : 0, 0, &pp);
#else
res = IDXGISwapChain_Present(d3d->dxgi_swapchain,
d3d->vsync ? 1 : 0, 0);
#endif
if (res == DXGI_ERROR_DEVICE_RESET || res == DXGI_ERROR_DEVICE_REMOVED)
{
printf("device removed or lost, need to recreate everything\n");
fflush(stdout);
}
else if (res == DXGI_STATUS_OCCLUDED)
{
printf("window is not visible, so vsync won't work. Let's sleep a bit to reduce CPU usage\n");
fflush(stdout);
}
}
int main()
{
Window *win;
D3d *d3d;
/* remove scaling on HiDPI */
#ifdef HAVE_WIN10
SetProcessDpiAwarenessContext(DPI_AWARENESS_CONTEXT_SYSTEM_AWARE);
#endif
win = window_new(100, 100, 800, 480);
if (!win)
return 1;
d3d = d3d_init(win, 0);
if (!d3d)
goto del_window;
SetWindowLongPtr(win->win, GWLP_USERDATA, (LONG_PTR)win);
window_show(win);
/* mesage loop */
while (1)
{
MSG msg;
BOOL ret;
ret = PeekMessage(&msg, NULL, 0, 0, PM_REMOVE);
if (ret)
{
do
{
if (msg.message == WM_QUIT)
goto beach;
TranslateMessage(&msg);
DispatchMessageW(&msg);
} while (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE));
}
}
beach:
d3d_shutdown(d3d);
window_del(win);
return 0;
del_window:
window_del(win);
printf(" error\n");
fflush(stdout);
return 1;
}
Vertex shader:
struct vs_input
{
float2 position : POSITION;
float4 color : COLOR;
};
struct ps_input
{
float4 position : SV_POSITION;
float4 color : COLOR;
};
ps_input main(vs_input input )
{
ps_input output;
output.position = float4(input.position, 0.0f, 1.0f);
output.color = input.color;
return output;
}
Pixel shader:
struct ps_input
{
float4 position : SV_POSITION;
float4 color : COLOR;
};
float4 main(ps_input input) : SV_TARGET
{
return input.color;
}
thank you
If you want to use pixel coordinates for your vertex, you can use one of those 2 formats :
DXGI_FORMAT_R32G32_FLOAT (same as you use right now, pixel in floating point)
DXGI_FORMAT_R32G32_UINT (pixel coordinates as int, vertex shader position input becomes uint2 position : POSITION)
if you use float, the float conversion is done in C side, if you use UINT, the conversion is done on the vertex shader side. Speed difference would need profiling, if number of vertices is low I'd expect it to be negligible.
you can then easily remap those values into the -1 to 1 range in vertex shader (which is quite efficient), you only need to pass the inverse viewport size in a constant buffer.
so your vertex shader becomes :
struct vs_input
{
float2 position : POSITION;
//uint2 position : POSITION; If you use UINT
float4 color : COLOR;
};
struct ps_input
{
float4 position : SV_POSITION;
float4 color : COLOR;
};
cbuffer cbViewport : register(b0)
{
float2 inverseViewportSize;
}
ps_input main(vs_input input )
{
ps_input output;
float2 p = input.position; //if you use UINT, conversion is done here
p *= inverseViewportSize;
p *= 2.0f;
p -= 1.0f;
p.y *= -1.0f; (clip space is bottom to top, pixel is top to bottom)
output.position = float4(p, 0.0f, 1.0f);
output.color = input.color;
return output;
}

How does D3D11 render pixels with an alpha value of 0 in the texture as transparent?

I used DrawIconEx (GDI/D3D11 interoperability and CopyResource) to generate an ID3D11Texture2D which has many pixels with an alpha channel value of 0. this texture has been verified by D3D11_USAGE_STAGING/Map to view the pixel value and ScreenGrab save png (relevant code needs to be modified: DXGI_FORMAT_B8G8R8A8_UNORM->Use GUID_WICPixelFormat32bppBGRA instead of GUID_WICPixelFormat24bppBGR).
When I use the rendering texture method of Tutorial 5: Texturing, the alpha value of 0 pixels will be rendered as black, which is not what i want, I hope these pixels render to be transparent. What will be done to achieve the goal? Here is my relevant code:
HRESULT CGraphRender::Init()
{
...
// Create an alpha enabled blend state description.
_blend_state = nullptr;
D3D11_BLEND_DESC blendDesc;
ZeroMemory(&blendDesc, sizeof(D3D11_BLEND_DESC));
blendDesc.RenderTarget[0].BlendEnable = TRUE;
blendDesc.RenderTarget[0].SrcBlend = D3D11_BLEND_SRC_ALPHA;
blendDesc.RenderTarget[0].DestBlend = D3D11_BLEND_INV_SRC_ALPHA;
blendDesc.RenderTarget[0].BlendOp = D3D11_BLEND_OP_ADD;
blendDesc.RenderTarget[0].SrcBlendAlpha = D3D11_BLEND_ONE;
blendDesc.RenderTarget[0].DestBlendAlpha = D3D11_BLEND_ZERO;
blendDesc.RenderTarget[0].BlendOpAlpha = D3D11_BLEND_OP_ADD;
blendDesc.RenderTarget[0].RenderTargetWriteMask = D3D11_COLOR_WRITE_ENABLE_ALL;
hr = _d3d_device->CreateBlendState(&blendDesc, &_blend_state);
RETURN_ON_FAIL(hr);
....
}
HRESULT CGraphRender::Clear_3D(float color[])
{
ID3D11RenderTargetView* rtv[] = { _back_rendertarget_view };
_immediate_context->OMSetRenderTargets(_countof(rtv), rtv, nullptr);
_immediate_context->ClearRenderTargetView(_back_rendertarget_view, color);
float blendFactor[4] = { 1.f, 1.f, 1.f, 1.f };
_immediate_context->OMSetBlendState(_blend_state, blendFactor, 0xffffffff);
return S_OK;
}
The problem has been solved: Perform the OMGetBlendState(_blend_state... setting before rendering the "alpha" texture, and restore the default blendstate after rendered
HRESULT CGraphRender::DrawTexture(const std::shared_ptr<CDrawTextureShader>& texture, const RECT& dst_rect, const BOOL& is_blend_alpha)
{
CComPtr<ID3D11DeviceContext> immediate_context;
_d3d_device->GetImmediateContext(&immediate_context);
if (!immediate_context)
{
return E_UNEXPECTED;
}
if (is_blend_alpha)
{
CComPtr<ID3D11BlendState> old_blend_state;
FLOAT old_blend_factor[4] = { 0.f };
UINT old_sample_mask = 0;
immediate_context->OMGetBlendState(&old_blend_state, old_blend_factor, &old_sample_mask);
float blend_factor[4] = { 1.f, 1.f, 1.f, 1.f };
immediate_context->OMSetBlendState(_blend_state, blend_factor, 0xffffffff);
HRESULT hr = texture->Render(immediate_context, dst_rect);
immediate_context->OMSetBlendState(old_blend_state, old_blend_factor, old_sample_mask);
return hr;
}
else
{
return texture->Render(immediate_context, dst_rect);
}
}

How to cache a AVI file using ffMpeg

I am reading a AVI file using ffMpeg.
I want to cache the file into a vector and resuse it later.
This is my code.
typedef struct {
AVFormatContext *fmt_ctx;
int stream_idx;
AVStream *video_stream;
AVCodecContext *codec_ctx;
AVCodec *decoder;
AVPacket *packet;
AVFrame *av_frame;
AVFrame *gl_frame;
struct SwsContext *conv_ctx;
unsigned int frame_tex;
}AppData;
AppData data;
Here i am caching the file to a std::vector
std::vector< AVFrame* > cache;
bool initReadFrame()
{
do {
glBindTexture(GL_TEXTURE_2D, data.frame_tex);
int error = av_read_frame(data.fmt_ctx, data.packet);
if (error)
{
av_free_packet(data.packet);
return false;
}
if (data.packet->stream_index == data.stream_idx)
{
int frame_finished = 0;
if (avcodec_decode_video2(data.codec_ctx, data.av_frame, &frame_finished,
data.packet) < 0) {
av_free_packet(data.packet);
return false;
}
if (frame_finished)
{
if (!data.conv_ctx)
{
data.conv_ctx = sws_getContext(data.codec_ctx->width,
data.codec_ctx->height, data.codec_ctx->pix_fmt,
data.codec_ctx->width, data.codec_ctx->height, AV_PIX_FMT_RGBA,
SWS_BICUBIC, NULL, NULL, NULL);
}
sws_scale(data.conv_ctx, data.av_frame->data, data.av_frame->linesize, 0,
data.codec_ctx->height, data.gl_frame->data, data.gl_frame->linesize);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, data.codec_ctx->width,
data.codec_ctx->height, GL_RGBA, GL_UNSIGNED_BYTE,
data.gl_frame->data[0]);
cache.push_back(av_frame_clone(data.gl_frame)); // Pushing AVFrame* to vector
}
}
av_free_packet(data.packet);
} while (data.packet->stream_index != data.stream_idx);
return true;
}
here i am trying to read the buffer and updating GL_TEXTURE_2D
void playCache()
{
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, data.codec_ctx->width,
data.codec_ctx->height, GL_RGBA, GL_UNSIGNED_BYTE,
cache[temp]->data[0]);
temp++;
}
The issue i am facing is that when i try to read the Cached data the application crashes.
You are storing dead reference in your cache.
cache.push_back(av_frame_clone(data.gl_frame));
the doc says:
av_frame_clone: Create a new frame that references the same data as src.
When you destroy src, you loose its content and you can't access it in your cache.
You can try to move the ref to your new frame, or to copies its value.
Move:
AVFrame* cachedValue;
av_frame_move_ref(cachedValue, data.gl_frame);
cache.push_back(cachedValue);
Copy
AVFrame *cachedValue= av_frame_alloc();
cachedValue->format = data.gl_frame->format;
cachedValue->width = data.gl_frame->width;
cachedValue->height = data.gl_frame->height;
cachedValue->channels = data.gl_frame->channels;
cachedValue->channel_layout = data.gl_frame->channel_layout;
cachedValue->nb_samples = data.gl_frame->nb_samples;
av_frame_get_buffer(cachedValue, 32);
av_frame_copy(cachedValue, data.gl_frame);
av_frame_copy_props(cachedValue, data.gl_frame);
cache.push_back(cachedValue);
/////////////////////////////////////////////////
avformat_network_init();
initializeAppData();
// open video
if (avformat_open_input(&data.fmt_ctx, stdstrPathOfVideo.c_str(), NULL, NULL) < 0) {
clearAppData();
return;
}
// find stream info
if (avformat_find_stream_info(data.fmt_ctx, NULL) < 0) {
clearAppData();
return;
}
// dump debug info
// av_dump_format(data.fmt_ctx, 0, "D:\\E\\Event\\2019\\AVI_Badges\\Generic\\Generic.avi", 0);
// find the video stream
for (unsigned int i = 0; i < data.fmt_ctx->nb_streams; ++i)
{
if (data.fmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
data.stream_idx = i;
break;
}
}
if (data.stream_idx == -1)
{
clearAppData();
return;
}
data.video_stream = data.fmt_ctx->streams[data.stream_idx];
data.codec_ctx = data.video_stream->codec;
// find the decoder
data.decoder = avcodec_find_decoder(data.codec_ctx->codec_id);
if (data.decoder == NULL)
{
clearAppData();
return;
}
// open the decoder
if (avcodec_open2(data.codec_ctx, data.decoder, NULL) < 0)
{
clearAppData();
return;
}
// allocate the video frames
data.av_frame = av_frame_alloc();
data.gl_frame = av_frame_alloc();
int size = avpicture_get_size(AV_PIX_FMT_RGBA, data.codec_ctx->width,
data.codec_ctx->height);
uint8_t *internal_buffer = (uint8_t *)av_malloc(size * sizeof(uint8_t));
avpicture_fill((AVPicture *)data.gl_frame, internal_buffer, AV_PIX_FMT_RGBA,
data.codec_ctx->width, data.codec_ctx->height);
data.packet = (AVPacket *)av_malloc(sizeof(AVPacket));

HLSL Stream Out Entries don't work correctly

I want implement Particle system based on stream out structure to my bigger project. I saw few articles about that method and I build one particle. It works almost correctly but in geometry shader with stream out i cant get value of InitVel.z and age because it always is 0. If i change order of age(for example age is before Position) it works fine for age but 6th float of order is still 0. It looks like he push only 5 first positions. I had no idea what i do wrong because i try change almost all(create input layout for vertex, the same like entry SO Declaration, change number of strides for static 28, change it to 32 but in this case he draw chaotic so size of strides is probably good). I think it is problem with limits of NumEntry in declaration Entry but on site msdn i saw the limit for directx is D3D11_SO_STREAM_COUNT(4)*D3D11_SO_OUTPUT_COMPONENT_COUNT(128) not 5. Pls can you look in this code and give me the way or hope of implement it correctly?? Thanks a lot for help.
Structure of particle
struct Particle{
Particle() {}
Particle(float x, float y, float z,float vx, float vy, float vz,float
l /*UINT typ*/)
:InitPos(x, y, z), InitVel(vx, vy, vz), Age(l) /*, Type(typ)*/{}
XMFLOAT3 InitPos;
XMFLOAT3 InitVel;
float Age;
//UINT Type;
};
SO Entry
D3D11_SO_DECLARATION_ENTRY PartlayoutSO[] =
{
{ 0,"POSITION", 0, 0 , 3, 0 }, // output all components of position
{ 0,"VELOCITY", 0, 0, 3, 0 },
{ 0,"AGE", 0, 0, 1, 0 }
//{ 0,"TYPE", 0, 0, 1, 0 }
};
Global Variables
//streamout shaders
ID3D11VertexShader* Part_VSSO;
ID3D11GeometryShader* Part_GSSO;
ID3DBlob *Part_GSSO_Buffer;
ID3DBlob *Part_VSSO_Buffer;
//normal shaders
ID3D11VertexShader* Part_VS;
ID3D11GeometryShader* Part_GS;
ID3DBlob *Part_GS_Buffer;
ID3D11PixelShader* Part_PS;
ID3DBlob *Part_VS_Buffer;
ID3DBlob *Part_PS_Buffer;
ID3D11Buffer* PartVertBufferInit;
//ID3D11Buffer* Popy;
ID3D11Buffer* mDrawVB;
ID3D11Buffer* mStreamOutVB;
ID3D11InputLayout* PartVertLayout;// I try to set input layout too
void ParticleSystem::InitParticles()
{
mFirstRun = true;
srand(time(NULL));
hr = D3DCompileFromFile(L"ParticleVertexShaderSO4.hlsl", NULL,
D3D_COMPILE_STANDARD_FILE_INCLUDE, "main", "vs_5_0", NULL, NULL,
&Part_VSSO_Buffer, NULL);
hr = D3DCompileFromFile(L"ParticleGeometryShaderSO4.hlsl", NULL,
D3D_COMPILE_STANDARD_FILE_INCLUDE, "main", "gs_5_0", NULL, NULL,
&Part_GSSO_Buffer, NULL);
UINT StrideArray[1] = { sizeof(Particle) };//I try to set static 28 bits-7*4
per float
hr = device->CreateVertexShader(Part_VSSO_Buffer->GetBufferPointer(),
Part_VSSO_Buffer->GetBufferSize(), NULL, &Part_VSSO);
hr = device->CreateGeometryShaderWithStreamOutput(Part_GSSO_Buffer-
>GetBufferPointer(), Part_GSSO_Buffer->GetBufferSize(), PartlayoutSO ,3/*
sizeof(PartlayoutSO)*/ , StrideArray, 1,D3D11_SO_NO_RASTERIZED_STREAM,
NULL,&Part_GSSO);
//Draw Shaders
hr = D3DCompileFromFile(L"ParticleVertexShaderDRAW4.hlsl", NULL,
D3D_COMPILE_STANDARD_FILE_INCLUDE, "main", "vs_5_0", NULL, NULL,
&Part_VS_Buffer, NULL);
hr = D3DCompileFromFile(L"ParticleGeometryShaderDRAW4.hlsl", NULL,
D3D_COMPILE_STANDARD_FILE_INCLUDE, "main", "gs_5_0", NULL, NULL,
&Part_GS_Buffer, NULL);
hr = D3DCompileFromFile(L"ParticlePixelShaderDRAW4.hlsl", NULL,
D3D_COMPILE_STANDARD_FILE_INCLUDE, "main", "ps_5_0", NULL, NULL,
&Part_PS_Buffer, NULL);
hr = device->CreateVertexShader(Part_VS_Buffer->GetBufferPointer(),
Part_VS_Buffer->GetBufferSize(), NULL, &Part_VS);
hr = device->CreateGeometryShader(Part_GS_Buffer->GetBufferPointer(),
Part_GS_Buffer->GetBufferSize(), NULL, &Part_GS);
hr = device->CreatePixelShader(Part_PS_Buffer->GetBufferPointer(),
Part_PS_Buffer->GetBufferSize(), NULL, &Part_PS);
BuildVertBuffer();
}
void ParticleSystem::BuildVertBuffer()
{
D3D11_BUFFER_DESC vertexBufferDesc1;
ZeroMemory(&vertexBufferDesc1, sizeof(vertexBufferDesc1));
vertexBufferDesc1.Usage = D3D11_USAGE_DEFAULT;
vertexBufferDesc1.ByteWidth = sizeof(Particle)*1; //*numParticles;
vertexBufferDesc1.BindFlags = D3D11_BIND_VERTEX_BUFFER;// |
D3D11_BIND_STREAM_OUTPUT;
vertexBufferDesc1.CPUAccessFlags = 0;
vertexBufferDesc1.MiscFlags = 0;
vertexBufferDesc1.StructureByteStride = 0;// I tried to comment this too
Particle p;
ZeroMemory(&p, sizeof(Particle));
p.InitPos = XMFLOAT3(0.0f, 0.0f, 0.0f);
p.InitVel = XMFLOAT3(0.0f, 0.0f, 0.0f);
p.Age = 0.0f;
//p.Type = 100.0f;
D3D11_SUBRESOURCE_DATA vertexBufferData1;
ZeroMemory(&vertexBufferData1, sizeof(vertexBufferData1));
vertexBufferData1.pSysMem = &p;//było &p
vertexBufferData1.SysMemPitch = 0;
vertexBufferData1.SysMemSlicePitch = 0;
hr = device->CreateBuffer(&vertexBufferDesc1, &vertexBufferData1,
&PartVertBufferInit);
ZeroMemory(&vertexBufferDesc1, sizeof(vertexBufferDesc1));
vertexBufferDesc1.ByteWidth = sizeof(Particle) * numParticles;
vertexBufferDesc1.BindFlags = D3D11_BIND_VERTEX_BUFFER |
D3D11_BIND_STREAM_OUTPUT;
hr = device->CreateBuffer(&vertexBufferDesc1, 0, &mDrawVB);
hr = device->CreateBuffer(&vertexBufferDesc1, 0, &mStreamOutVB);
}
void ParticleSystem::LoadDataParticles()
{
UINT stride = sizeof(Particle);
UINT offset = 0;
//Create the Input Layout
//device->CreateInputLayout(Partlayout, numElementsPart, Part_VSSO_Buffer-
//>GetBufferPointer(),
// Part_VSSO_Buffer->GetBufferSize(), &PartVertLayout);
//Set the Input Layout
//context->IASetInputLayout(PartVertLayout);
//Set Primitive Topology
context->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_POINTLIST);
if (mFirstRun)
{
// context->CopyResource(Popy, PartVertBufferInit);
context->IASetVertexBuffers(0, 1, &PartVertBufferInit, &stride,
&offset);
}
else
{
context->IASetVertexBuffers(0, 1, &mDrawVB, &stride, &offset);
}
context->SOSetTargets(1, &mStreamOutVB, &offset);
context->VSSetShader(Part_VSSO, NULL, 0);
context->GSSetShader(Part_GSSO, NULL, 0);
context->PSSetShader(NULL, NULL, 0);
//context->PSSetShader(Part_PS, NULL, 0);
ID3D11DepthStencilState* depthState;//disable depth
D3D11_DEPTH_STENCIL_DESC depthStateDesc;
depthStateDesc.DepthEnable = false;
depthStateDesc.DepthWriteMask = D3D11_DEPTH_WRITE_MASK_ZERO;
device->CreateDepthStencilState(&depthStateDesc, &depthState);
context->OMSetDepthStencilState(depthState, 0);
if (mFirstRun)
{
//mFirstRun;
context->Draw(1, 0);
mFirstRun = false;
}
else
{
context->DrawAuto();
}
//}
// done streaming-out--unbind the vertex buffer
ID3D11Buffer* bufferArray[1] = { 0 };
context->SOSetTargets(1, bufferArray, &offset);
// ping-pong the vertex buffers
std::swap(mStreamOutVB, mDrawVB);
// Draw the updated particle system we just streamed-out.
//Create the Input Layout
//device->CreateInputLayout(Partlayout, numElementsPart, Part_VS_Buffer-
//>GetBufferPointer(),
// Part_VS_Buffer->GetBufferSize(), &PartVertLayout);
//Set the normal Input Layout
//context->IASetInputLayout(PartVertLayout);
context->IASetVertexBuffers(0, 1, &mDrawVB, &stride, &offset);
ZeroMemory(&depthStateDesc, sizeof(depthStateDesc));
depthStateDesc.DepthEnable = true;
depthStateDesc.DepthWriteMask = D3D11_DEPTH_WRITE_MASK_ZERO;
device->CreateDepthStencilState(&depthStateDesc, &depthState);
context->OMSetDepthStencilState(depthState, 0);
//I tried add normal layout here the same like Entry SO but no changes
//Set Primitive Topology
//context->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_POINTLIST);
context->VSSetShader(Part_VS, NULL, 0);
context->GSSetShader(Part_GS, NULL, 0);
context->PSSetShader(Part_PS, NULL, 0);
context->DrawAuto();
//mFirstRun = true;
context->GSSetShader(NULL, NULL, 0);
}
void ParticleSystem::RenderParticles()
{
//mFirstRun = true;
LoadDataParticles();
}
And the code of shaders:
VertexShader to stream out
struct Particle
{
float3 InitPos : POSITION;
float3 InitVel : VELOCITY;
float Age : AGE;
//uint Type : TYPE;
};
Particle main(Particle vin)
{
return vin;// just push data into geomtrywithso
}
GeometrywithSo
struct Particle
{
float3 InitPos : POSITION;
float3 InitVel : VELOCITY;
float Age : AGE;
//uint Type : TYPE;
};
float RandomPosition(float offset)
{
float u = Time + offset;// (Time + offset);
float v = ObjTexture13.SampleLevel(ObjSamplerState, u, 0).r;
return (v);
}
[maxvertexcount(6)]
void main(
point Particle gin[1],
inout PointStream< Particle > Output
)
{
//gin[0].Age = Time;
if ( StartPart == 1.0f )
{
//if (gin[0].Age < 100.0f)
//{
for (int i = 0; i < 6; i++)
{
float3 VelRandom; //= 5.0f * RandomPosition((float)i / 5.0f);
VelRandom.y = 10.0f+i;
VelRandom.x = 35 * i* RandomPosition((float)i / 5.0f);//+ offse;
VelRandom.z = 10.0f;//35*i * RandomPosition((float)i / 5.0f);
Particle p;
p.InitPos = VelRandom;//float3(0.0f, 5.0f, 0.0f); //+ VelRandom;
p.InitVel = float3(10.0f, 10.0f, 10.0f);
p.Age = 0.0f;//VelRandom.y;
//p.Type = PT_FLARE;
Output.Append(p);
}
Output.Append(gin[0]);
}
else if (StartPart == 0.0f)
{
if (gin[0].Age >= 0)
{
Output.Append(gin[0]);
}
}
}
If I change Age in geometry with so: for example Age += Time from const buffer
In geometry shader its fine once but in draw shader it is 0 and next time if it is reading in geometry with so it is 0 too.
Vertex shader to draw
struct VertexOut
{
float3 Pos : POSITION;
float4 Colour : COLOR;
//uint Type : TYPE;
};
struct Particle
{
float3 InitPos : POSITION;
float3 InitVel : VELOCITY;
float Age : AGE;
// uint Type : TYPE;
};
VertexOut main(Particle vin)
{
VertexOut vout;
float3 gAccelW = float3(0.0f, -0.98f, 0.0f);
float t = vin.Age;
//float b = Time/10000;
// constant Acceleration equation
vout.Pos = vin.InitVel+ (0.7f * gAccelW)*Time/100;
//vout.Pos.x = t;
vout.Colour = float4(1.0f, 0.0f, 0.0f, 1.0f);
//vout.Age = vout.Pos.y;
//vout.Type = vin.Type;
return vout;
}
Geometry shader to change point into line
struct VertexOut
{
float3 Pos : POSITION;
float4 Colour : COLOR;
//uint Type : TYPE;
};
struct GSOutput
{
float4 Pos : SV_POSITION;
float4 Colour : COLOR;
//float2 Tex : TEXCOORD;
};
[maxvertexcount(2)]
void main(
point VertexOut gin[1],
inout LineStream< GSOutput > Output
)
{
float3 gAccelW = float3(0.0f, -0.98f, 0.0f);
//if (gin[0].Type != PT_EMITTER)
{
float4 v[2];
v[0] = float4(gin[0].Pos, 1.0f);
v[1] = float4((gin[0].Pos + gAccelW), 1.0f);
GSOutput gout;
[unroll]
for (int i = 0; i < 2; ++i)
{
gout.Pos = mul(v[i], WVP);// mul(v[i], gViewProj);
gout.Colour = gin[0].Colour;
Output.Append(gout);
}
}
}
And pixel Shader
struct GSOutput
{
float4 Pos : SV_POSITION;
float4 Colour : COLOR;
};
float4 main(GSOutput pin) : SV_TARGET
{
return pin.Colour;
}

CreateBitmapFromWicBitmap access violation

Here's my problem:
I'm working on a test project, that initializes Direct2D to draw some stuff.
For now, I am in need of creating BMP background so I looked some tutorials of loading bmp to use it with Direct2D on MSDN site. After some coding and debugging I came to the only problem I can't fully comprehend and, well, I'm stuck here. The problem is simple: I get ACCESS VIOLATION at this line:
pRenderTarget->CreateBitmapFromWicBitmap( pConverter, NULL, ppBitmap);
I fixed all issues I could find out and tested every HRESULT.
here's full code of background.cpp:
`
#include "Background.h"
using namespace D2D1;
Background::Background()
{
pIWICFactory = nullptr;
pDecoder = nullptr;
pSource = nullptr;
pStream = nullptr;
pConverter = nullptr;
ppBitmap = nullptr;
destinationWidth = 0;
destinationHeight = 0;
file_path = L"Background.bmp";
}
bool Background::init(HWND hwnd)
{
CoInitializeEx(0, COINIT_MULTITHREADED);
CoCreateInstance( CLSID_WICImagingFactory, NULL, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&pIWICFactory));
pIWICFactory->CreateDecoderFromFilename(file_path, NULL, GENERIC_READ, WICDecodeMetadataCacheOnLoad, &pDecoder);
pIWICFactory->CreateStream(&pStream);
pStream->InitializeFromFilename(file_path, GENERIC_READ);
pDecoder->Initialize(pStream,WICDecodeMetadataCacheOnLoad);
pDecoder->GetFrame(0, &pSource);
pIWICFactory->CreateFormatConverter(&pConverter);
pConverter->Initialize(pSource, GUID_WICPixelFormat32bppPBGRA, WICBitmapDitherTypeNone, NULL, 0.f, WICBitmapPaletteTypeMedianCut);
pRenderTarget->CreateBitmapFromWicBitmap( pConverter, NULL, ppBitmap);
return true;
}
Background::~Background()
{
CoUninitialize();
pIWICFactory->Release();
pDecoder->Release();
pSource->Release();
pStream->Release();
pConverter->Release();
CoUninitialize();
}
`
And here's parent class "Render.cpp"
(there's no connection between how I managed class inheritance and the problem - I tried to create new project that contained only one class including both render and background)
`
#include "Render.h"
using namespace D2D1;
Render::Render()
{
pD2DFactory = nullptr;
pRenderTarget = nullptr;
pGreenBrush = nullptr;
}
bool Render::Init(HWND hwnd)
{
HRESULT hr = D2D1CreateFactory( D2D1_FACTORY_TYPE_SINGLE_THREADED, &pD2DFactory );
RECT rc;
GetClientRect(hwnd, &rc);
hr = pD2DFactory->CreateHwndRenderTarget(RenderTargetProperties(), HwndRenderTargetProperties(hwnd, SizeU( rc.right - rc.left, rc.bottom - rc.top)),&pRenderTarget);
if (SUCCEEDED(hr))
pRenderTarget->CreateSolidColorBrush(ColorF(ColorF::Green), &pGreenBrush );
else
return false;
return true;
}
bool Render::Draw(HWND hwnd)
{
RECT rc;
GetClientRect(hwnd, &rc);
pRenderTarget->BeginDraw();
pRenderTarget->FillRectangle(
RectF(
rc.left + 500.0f,
rc.top + 250.0f,
rc.right - 500.0f,
rc.bottom - 500.0f),
pGreenBrush);
HRESULT hr = pRenderTarget->EndDraw();
if (SUCCEEDED(hr))
return true;
else
return false;
}
void Render::ShutDown()
{
if (pD2DFactory)
pD2DFactory->Release();
if (pRenderTarget)
pRenderTarget->Release();
if (pGreenBrush)
pGreenBrush->Release();
}
`
From the double 'pp' I asume you declared ppBitmap; as D2D1Bitmap**, also because you pass it as-value to CreateBitmapFromWicBitmap.
If correct your solution is simple: Declare a ptrP*, not a ptrptrPP**
// in class declaration or c'tor(?)
D2D1Bitmap* pBitmap = nullptr; // instead of D2D1Bitmap**
/// at the point of creation
D2D1wndTarget->CreateBitmapFromWicBitmap(<yourConverter>, NULL, &pBitmap);
In your example you declared it as null-ptr and passed it to a function that takes an address to pointer, resulting in the function reading at 0x000000 resulting in an access violation.

Resources