Direct2D draws nothing on a "hardware mode" render target - winapi

I 'm trying to use Direct2D on a render target created by Direct3D according to this MSDN document. The idea not to use a HWND render buffer is for the application to be able to switch targets easily, so I can e.g. render to a bitmap the static painting and use direct commands for any runtime-dependent painting. The code to create the interfaces is the following:
bool CreateD2DC(HWND hh)
{
D2D& d = *this;
D3D_FEATURE_LEVEL featureLevels[] =
{
D3D_FEATURE_LEVEL_11_1,
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0,
D3D_FEATURE_LEVEL_9_3,
D3D_FEATURE_LEVEL_9_2,
D3D_FEATURE_LEVEL_9_1
};
auto de = D3D_DRIVER_TYPE_HARDWARE;
if (IsCurrentSessionRemoteable())
de = D3D_DRIVER_TYPE_SOFTWARE;
D3D_FEATURE_LEVEL m_featureLevel;
D3D11CreateDevice(
nullptr, // specify null to use the default adapter
de,
0,
D3D11_CREATE_DEVICE_BGRA_SUPPORT, // optionally set debug and Direct2D compatibility flags
featureLevels, // list of feature levels this app can support
ARRAYSIZE(featureLevels), // number of possible feature levels
D3D11_SDK_VERSION,
&d.device, // returns the Direct3D device created
&m_featureLevel, // returns feature level of device created
&d.context // returns the device immediate context
);
if (!d.device)
return 0;
d.dxgiDevice = d.device;
if (!d.dxgiDevice)
return 0;
D2D1_CREATION_PROPERTIES dp;
dp.threadingMode = D2D1_THREADING_MODE::D2D1_THREADING_MODE_SINGLE_THREADED;
dp.debugLevel = D2D1_DEBUG_LEVEL::D2D1_DEBUG_LEVEL_NONE;
dp.options = D2D1_DEVICE_CONTEXT_OPTIONS::D2D1_DEVICE_CONTEXT_OPTIONS_NONE;
D2D1CreateDevice(d.dxgiDevice, dp, &d.m_d2dDevice);
if (!d.m_d2dDevice)
return 0;
d.m_d2dDevice->CreateDeviceContext(
D2D1_DEVICE_CONTEXT_OPTIONS_NONE,
&d.m_d2dContext);
if (!d.m_d2dContext)
return 0;
// Allocate a descriptor.
DXGI_SWAP_CHAIN_DESC1 swapChainDesc = { 0 };
swapChainDesc.Width = 0; // use automatic sizing
swapChainDesc.Height = 0;
swapChainDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM; // this is the most common swapchain format
swapChainDesc.Stereo = false;
swapChainDesc.SampleDesc.Count = 1; // don't use multi-sampling
swapChainDesc.SampleDesc.Quality = 0;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.BufferCount = 2; // use double buffering to enable flip
swapChainDesc.Scaling = DXGI_SCALING_NONE;
swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL; // all apps must use this SwapEffect
swapChainDesc.Flags = 0;
d.dxgiDevice->GetAdapter(&d.dxgiAdapter);
if (!d.dxgiAdapter)
return 0;
// Get the factory object that created the DXGI device.
d.dxgiAdapter->GetParent(IID_PPV_ARGS(&d.dxgiFactory));
if (!d.dxgiFactory)
return 0;
d.dxgiFactory->CreateSwapChainForHwnd(
d.device,
hh, &swapChainDesc,
nullptr, // allow on all displays
nullptr, // allow on all displays
&d.m_swapChain);
if (!d.m_swapChain)
return 0;
d.dxgiDevice->SetMaximumFrameLatency(1);
d.m_swapChain->GetBuffer(0, IID_PPV_ARGS(&d.backBuffer));
if (!d.backBuffer)
return 0;
// Now we set up the Direct2D render target bitmap linked to the swapchain.
D2D1_BITMAP_PROPERTIES1 bitmapProperties;
bitmapProperties.bitmapOptions = D2D1_BITMAP_OPTIONS_TARGET | D2D1_BITMAP_OPTIONS_CANNOT_DRAW;
bitmapProperties.pixelFormat.alphaMode = D2D1_ALPHA_MODE_IGNORE;
bitmapProperties.pixelFormat.format = DXGI_FORMAT_B8G8R8A8_UNORM;
bitmapProperties.dpiX = 96;
bitmapProperties.dpiY = 96;
bitmapProperties.colorContext = 0;
// Direct2D needs the dxgi version of the backbuffer surface pointer.
d.m_swapChain->GetBuffer(0, IID_PPV_ARGS(&d.dxgiBackBuffer));
if (!d.dxgiBackBuffer)
return 0;
// Get a D2D surface from the DXGI back buffer to use as the D2D render target.
d.m_d2dContext->CreateBitmapFromDxgiSurface(
d.dxgiBackBuffer,
&bitmapProperties,
&d.m_d2dTargetBitmap);
if (!d.m_d2dTargetBitmap)
return 0;
// Now we can set the Direct2D render target.
d.m_d2dContext->SetTarget(d.m_d2dTargetBitmap);
return true;
}
The problem is this code:
auto de = D3D_DRIVER_TYPE_HARDWARE;
When used, Direct2D writes nothing although EndDraw() returns S_OK. If I use D3D_DRIVER_TYPE_SOFTWARE, everything works correctly.
What am I doing wrong?

Related

Making a Cubemap in DX11 from 6 Textures

I want to load in a skysphere, I know how to do it using a dds file, but I want to try and do it from 6 separate texture files.
The problem I have is that when I load in the texturecube, only 3 separate textures are visible, the other 3 are not. I'll show you my code and how it looks in Nsight. I am right now using just 6 different uniform colored png files, they are all 512x512 in size.
std::vector<std::string> paths = { "../Resources/Textures/posX.png", "../Resources/Textures/negX.png",
"../Resources/Textures/posY.png",
"../Resources/Textures/negY.png",
"../Resources/Textures/posZ.png", "../Resources/Textures/negZ.png" };
ID3D11Texture2D* cubeTexture = NULL;
WRL::ComPtr<ID3D11ShaderResourceView> shaderResourceView = NULL;
//Description of each face
D3D11_TEXTURE2D_DESC texDesc = {};
texDesc.Width = 512;
texDesc.Height = 512;
texDesc.MipLevels = 1;
texDesc.ArraySize = 6;
texDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
texDesc.CPUAccessFlags = 0;
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.Usage = D3D11_USAGE_DEFAULT;
texDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
texDesc.CPUAccessFlags = 0;
texDesc.MiscFlags = D3D11_RESOURCE_MISC_TEXTURECUBE;
//The Shader Resource view description
D3D11_SHADER_RESOURCE_VIEW_DESC SMViewDesc = {};
SMViewDesc.Format = texDesc.Format;
SMViewDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURECUBE;
SMViewDesc.TextureCube.MipLevels = texDesc.MipLevels;
SMViewDesc.TextureCube.MostDetailedMip = 0;
D3D11_SUBRESOURCE_DATA pData[6] = {};
for (int i = 0; i < 6; i++)
{
ID3D11Resource* res = nullptr;
std::wstring pathWString(paths[j].begin(), paths[j].end());
HRESULT hr = DirectX::CreateWICTextureFromFileEx(Renderer::getDevice(), pathWString.c_str(), 0, D3D11_USAGE_STAGING, 0, D3D11_CPU_ACCESS_READ, 0,
WIC_LOADER_FLAGS::WIC_LOADER_DEFAULT,
&res, 0);
assert(SUCCEEDED(hr));
D3D11_MAPPED_SUBRESOURCE destRes = {};
Renderer::getContext()->Map(res, 0, D3D11_MAP_READ, 0, &destRes);
pData[i].pSysMem = destRes.pData;
pData[i].SysMemPitch = destRes.RowPitch;
pData[i].SysMemSlicePitch = destRes.DepthPitch;
Renderer::getContext()->Unmap(res, 0);
RELEASE_COM(res);
}
Renderer::getDevice()->CreateTexture2D(&texDesc, &pData[0], &cubeTexture);
Renderer::getDevice()->CreateShaderResourceView(cubeTexture, &SMViewDesc, shaderResourceView.GetAddressOf());
When graphics debugging, this is what the cubemap looks like, 3 textures are loaded in twice, overwriting the other 3.
When reading the documentation it says subresource should be relating to mip levels.
If I loop 9 times instead of 6, the other 3 images are shown instead of these current 3. All 6 should have unique colors.
What I'm doing in the code is creating a Texture2D Description, a Shader Resource View Descriptuion, then I try to fetch data from imported images using WIC, I put it in a resource then map that to a subresource struct.
When looking at the addresses of the subresource, all 6 are always unique so it seems they load in the textures correctly, I have tried moving around the rowpitch, changing the size of the image but it only seems to affect the single images inside the textureCube, it doesn't seem to move around the duplicates if you understand what I mean.
Any help is greatly appreciated.
So I found some code from a Frank D Luna example doing something else.
Here is the code I use that works, mip levels had to be taken into account.
I hope this helps if someone in the future has a similar issue.
ID3D11Texture2D* cubeTexture = NULL;
WRL::ComPtr<ID3D11ShaderResourceView> shaderResourceView = NULL;
//Description of each face
D3D11_TEXTURE2D_DESC texDesc = {};
D3D11_TEXTURE2D_DESC texDesc1 = {};
//The Shader Resource view description
D3D11_SHADER_RESOURCE_VIEW_DESC SMViewDesc = {};
ID3D11Texture2D* tex[6] = { nullptr, nullptr, nullptr,nullptr, nullptr, nullptr };
for (int i = 0; i < 6; i++)
{
std::wstring pathWString(paths[i].begin(), paths[i].end());
HRESULT hr = DirectX::CreateWICTextureFromFileEx(Renderer::getDevice(), pathWString.c_str(), 0, D3D11_USAGE_STAGING, 0, D3D11_CPU_ACCESS_READ| D3D11_CPU_ACCESS_WRITE, 0,
WIC_LOADER_FLAGS::WIC_LOADER_DEFAULT,
(ID3D11Resource**)&tex[i], 0);
assert(SUCCEEDED(hr));
}
tex[0]->GetDesc(&texDesc1);
texDesc.Width = texDesc1.Width;
texDesc.Height = texDesc1.Height;
texDesc.MipLevels = texDesc1.MipLevels;
texDesc.ArraySize = 6;
texDesc.Format = texDesc1.Format;
texDesc.CPUAccessFlags = 0;
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.Usage = D3D11_USAGE_DEFAULT;
texDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
texDesc.CPUAccessFlags = 0;
texDesc.MiscFlags = D3D11_RESOURCE_MISC_TEXTURECUBE;
SMViewDesc.Format = texDesc.Format;
SMViewDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURECUBE;
SMViewDesc.TextureCube.MipLevels = texDesc.MipLevels;
SMViewDesc.TextureCube.MostDetailedMip = 0;
Renderer::getDevice()->CreateTexture2D(&texDesc, NULL, &cubeTexture);
for (int i = 0; i < 6; i++)
{
for (UINT mipLevel = 0; mipLevel < texDesc.MipLevels; ++mipLevel)
{
D3D11_MAPPED_SUBRESOURCE mappedTex2D;
HRESULT hr = (Renderer::getContext()->Map(tex[i], mipLevel, D3D11_MAP_READ, 0, &mappedTex2D));
assert(SUCCEEDED(hr));
Renderer::getContext()->UpdateSubresource(cubeTexture,
D3D11CalcSubresource(mipLevel, i, texDesc.MipLevels),
0, mappedTex2D.pData, mappedTex2D.RowPitch, mappedTex2D.DepthPitch);
Renderer::getContext()->Unmap(tex[i], mipLevel);
}
}
for (int i = 0; i < 6; i++)
{
RELEASE_COM(tex[i]);
}
Renderer::getDevice()->CreateShaderResourceView(cubeTexture, &SMViewDesc, shaderResourceView.GetAddressOf());

Moving from AVR to ESP32 rto control led strip using 1 pin and SPI/DMA

i'm working on an old project based on AVR, that controls a led stripe using 1 pin with SPI high frequency in DMA. (42 leds)
I'm converting the code to esp-idf, but i'm facing some problem, probably base on bus/config parameters.
These are the code:
AVR:
USART_SPI_RGB_OPTIONS.baudrate = USART_SPI_RGB_BAUDRATE;
USART_SPI_RGB_OPTIONS.spimode = USART_SPI_RGB_CHMODE;
USART_SPI_RGB_OPTIONS.data_order = USART_SPI_RGB_DATA_ORDER;
usart_init_spi(USART_SPI_RGB_PORT, &USART_SPI_RGB_OPTIONS);
void dma_init(void){
struct dma_channel_config dmach_conf;
memset(&dmach_conf, 0, sizeof(dmach_conf));
dma_channel_set_burst_length(&dmach_conf, DMA_CH_BURSTLEN_1BYTE_gc);
dma_channel_set_transfer_count(&dmach_conf, DMA_BUFFER_SIZE);
dma_channel_set_src_reload_mode(&dmach_conf, DMA_CH_SRCRELOAD_TRANSACTION_gc);
dma_channel_set_dest_reload_mode(&dmach_conf, DMA_CH_DESTRELOAD_NONE_gc);
dma_channel_set_src_dir_mode(&dmach_conf, DMA_CH_SRCDIR_INC_gc);
dma_channel_set_source_address(&dmach_conf, (uint16_t)(uintptr_t)RGBMemoryMap);
dma_channel_set_dest_dir_mode(&dmach_conf, DMA_CH_DESTDIR_FIXED_gc);
dma_channel_set_destination_address(&dmach_conf, (uint16_t)(uintptr_t)USART_SPI_RGB_PORT.DATA);
dma_channel_set_trigger_source(&dmach_conf, DMA_CH_TRIGSRC_USARTD0_DRE_gc);
dma_channel_set_single_shot(&dmach_conf);
dma_enable();
dma_channel_write_config(DMA_CHANNEL, &dmach_conf);
dma_channel_enable(DMA_CHANNEL);
}
ESP-IDF:
void initSPISettings()
{
//memset(&SPI_settings, 0, sizeof(SPI_settings));
SPI_settings.host = HSPI_HOST,
SPI_settings.dma_chan = SPI_DMA_CH2;
// buscfg
SPI_settings.buscfg.flags = 0;
SPI_settings.buscfg.miso_io_num = -1;
SPI_settings.buscfg.mosi_io_num = GPIO_NUM_32;
SPI_settings.buscfg.sclk_io_num = -1;
SPI_settings.buscfg.quadwp_io_num = -1;
SPI_settings.buscfg.quadhd_io_num = -1;
SPI_settings.buscfg.max_transfer_sz = LED_DMA_BUFFER_SIZE;
// devcfg
SPI_settings.devcfg.clock_speed_hz = DMA_SPEED;
SPI_settings.devcfg.dummy_bits = 0;
SPI_settings.devcfg.mode = 0;
SPI_settings.devcfg.flags = SPI_DEVICE_NO_DUMMY;
SPI_settings.devcfg.spics_io_num = -1;
SPI_settings.devcfg.queue_size = 1;
SPI_settings.devcfg.command_bits = 0;
SPI_settings.devcfg.address_bits = 0;
}
void initSPI()
{
esp_err_t err;
initSPISettings();
err = spi_bus_initialize(SPI_settings.host, &SPI_settings.buscfg, SPI_settings.dma_chan);
ESP_ERROR_CHECK(err);
//Attach the Accel to the SPI bus
err = spi_bus_add_device(SPI_settings.host, &SPI_settings.devcfg, &SPI_settings.spi);
ESP_ERROR_CHECK(err);
}
void updateRGB()
{
spi_transaction_t t;
esp_err_t err;
printf("Start transaction to DMA...\r\n");
// for (int i = (LED_DMA_BUFFER_SIZE - LED_RESET_COUNT); i < LED_DMA_BUFFER_SIZE; i++) {
// //Setting more bytes to 0x00 at the end of memory map to assure correct RGB init
// ledDMAbuffer[i] = 0x00;
// }
t.flags = 0;
t.length = LED_DMA_BUFFER_SIZE * 8; //length is in bits
t.tx_buffer = ledDMAbuffer;
t.rx_buffer = NULL;
t.rxlength = 0;
err = spi_device_transmit(SPI_settings.spi, &t);
ESP_ERROR_CHECK(err);
}
ledDMAbuffer = heap_caps_malloc(LED_DMA_BUFFER_SIZE, MALLOC_CAP_DMA); // Critical to be DMA memory.
Do you have any ideas what i'm missing?
I fill data in the right way (i'm sure of that). If i try to control 1 led no problem at all, while increasing the number of leds controlled i have strange behaviour (for example with 2 led blinking red/green, first one works properly while second one just red)
Thanks to all

How to support (on win7) GDI, D3D11 interoperability?

I created a D3D11 device and can perform operations such as rendering pictures smoothly, but in order to also support GDI, I tried several methods:
Through swapchain -> GetBuffer(ID3D11Texture2D) -> CreateDxgiSurfaceRenderTarget -> ID2D1GdiInteropRenderTarget -> GetDC, finally get the DC. It runs normally on my Win10, but an exception report when running GetDC on Win7: _com_error.
Via swapchain -> GetBuffer(IDXGISurface1) -> GetDC, same as 1.
I suspect that the ID3D11Texture2D/IDXGISurface1 obtained by GetBuffer on Win7 will have some restrictions on the use of GDI, so I changed to dynamically create a new ID3D11Texture2D by myself, and now use DC alone/D3D11 drawing interface alone It works fine, but if I interoperate, I will find that gdi opertaion is drawn on the custom-created ID3D11Texture2D instead of the back_buffer of swapchain:
_d3d->Clear();
_d3d->DrawImage();
HDC hdc = _d3d->GetDC();
DrawRectangleByGDI(hdc);
_d3d->ReleaseDC();
_d3d->Present();
So how to do it: Whether the D3D or DC methods is drawn, they are all on the same ID3D11Texture2D? This way, it is also convenient for my CopyResource.
HRESULT CGraphRender::Resize(const UINT32& width, const UINT32& height)
{
_back_texture2d = nullptr;
_back_rendertarget_view = nullptr;
_dc_texture2d = nullptr;
_dc_render_target = nullptr;
float dpi = GetDpiFromD2DFactory(_d2d_factory);
//Backbuffer
HRESULT hr = _swap_chain->ResizeBuffers(2, width, height, DXGI_FORMAT_B8G8R8A8_UNORM, _is_gdi_compatible ? DXGI_SWAP_CHAIN_FLAG_GDI_COMPATIBLE : 0);
RETURN_ON_FAIL(hr);
hr = _swap_chain->GetBuffer(0, __uuidof(ID3D11Texture2D), (void**)&_back_texture2d);
RETURN_ON_FAIL(hr);
hr = CreateD3D11Texture2D(_d3d_device, width, height, &_dc_texture2d);
RETURN_ON_FAIL(hr);
D3D11_RENDER_TARGET_VIEW_DESC rtv;
rtv.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
rtv.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;
rtv.Texture2D.MipSlice = 0;
hr = _d3d_device->CreateRenderTargetView(_back_texture2d, &rtv, &_back_rendertarget_view);
RETURN_ON_FAIL(hr);
...
}
HRESULT CGraphRender::Clear(float color[])
{
CComPtr<ID3D11DeviceContext> immediate_context;
_d3d_device->GetImmediateContext(&immediate_context);
if (!immediate_context)
{
return E_UNEXPECTED;
}
ID3D11RenderTargetView* ref_renderTargetView = _back_rendertarget_view;
immediate_context->OMSetRenderTargets(1, &ref_renderTargetView, nullptr);
immediate_context->ClearRenderTargetView(_back_rendertarget_view, color);
return S_OK;
}
HDC CGraphRender::GetDC()
{
if (_is_gdi_compatible)
{
CComPtr<IDXGISurface1> gdi_surface;
HRESULT hr = _dc_texture2d->QueryInterface(__uuidof(IDXGISurface1), (void**)&gdi_surface);
if (SUCCEEDED(hr))
{
HDC hdc = nullptr;
hr = gdi_surface->GetDC(TRUE, &hdc);
if (SUCCEEDED(hr))
{
return hdc;
}
}
}
return nullptr;
}
HRESULT CGraphRender::CopyTexture(ID3D11Texture2D* dst_texture, ID3D11Texture2D* src_texture, POINT* dst_topleft/* = nullptr*/, POINT* src_topleft/* = nullptr*/)
{
if (!dst_texture && !src_texture)
{
return E_INVALIDARG;
}
CComPtr<ID3D11DeviceContext> immediate_context;
_d3d_device->GetImmediateContext(&immediate_context);
if (!immediate_context)
{
return E_UNEXPECTED;
}
ID3D11Texture2D* dst_texture_real = dst_texture ? dst_texture : _dc_texture2d;
POINT dst_topleft_real = dst_topleft ? (*dst_topleft) : POINT{ 0, 0 };
ID3D11Texture2D* src_texture_real = src_texture ? src_texture : _dc_texture2d;
POINT src_topleft_real = src_topleft ? (*src_topleft) : POINT{ 0, 0 };
D3D11_TEXTURE2D_DESC src_desc = { 0 };
src_texture_real->GetDesc(&src_desc);
D3D11_TEXTURE2D_DESC dst_desc = { 0 };
dst_texture_real->GetDesc(&dst_desc);
if (!dst_topleft_real.x && !src_topleft_real.x && !dst_topleft_real.y && !src_topleft_real.y && dst_desc.Width == src_desc.Width && dst_desc.Height == src_desc.Height)
{
immediate_context->CopyResource(dst_texture_real, src_texture_real);
}
else
{
D3D11_BOX src_box;
src_box.left = min((UINT)src_topleft_real.x, (UINT)dst_topleft_real.x + dst_desc.Width);
src_box.top = min((UINT)src_topleft_real.y, (UINT)dst_topleft_real.y + dst_desc.Height);
src_box.right = min((UINT)src_box.left + src_desc.Width, (UINT)dst_topleft_real.x + dst_desc.Width);
src_box.bottom = min((UINT)src_box.top + src_desc.Height, (UINT)dst_topleft_real.y + dst_desc.Height);
src_box.front = 0;
src_box.back = 1;
ATLASSERT(src_box.left < src_box.right);
ATLASSERT(src_box.top < src_box.bottom);
immediate_context->CopySubresourceRegion(dst_texture_real, 0, dst_topleft_real.x, dst_topleft_real.y, 0, src_texture_real, 0, &src_box);
}
return S_OK;
}
I don’t think Windows 7 supports what you’re trying to do. Here’s some alternatives.
Switch from GDI to something else that can render 2D graphics with D3D11. Direct2D is the most straightforward choice here. And DirectWrite if you want text in addition to rectangles.
If your 2D content is static or only changes rarely, you can use GDI+ to render into in-memory RGBA device context, create Direct3D11 texture with that data, and render a full-screen triangle with that texture.
You can overlay another Win32 window on top of your Direct3D 11 rendering one, and use GDI to render into that one. The GDI window on top must have WS_EX_LAYERED expended style, and you must update it with UpdateLayeredWindow API. This method is the most complicated and least reliable, though.

Controlling sprite with 2 dimensional array SDL2

I'm currently learning to create my first game ever, and I chose SDL2 to be the game engine. I can make animation for the main character sprite separately. However I figured that it would be ridiculously tedious to control the sprite that way.
So I loaded the sprite sheet into a 2 dimensional array hoping that I would be able to control the sprite with all kind of actions with just that one sprite sheet.
#define HOR_FRAMES 16
#define VER_FRAMES 16
// These are for the images and sprites
Sprite catGif;
SDL_Rect catRect[VER_FRAMES][HOR_FRAMES];
bool load()
{
bool success = true;
// Load cat sprite sheets
if (!catGif.loadFromFile("./assets/pets/fighter_cat_sprite/cat_sprite_base_64.png",
sceneRenderer, 0X00, 0x00, 0x00))
{
printf("%s", SDL_GetError());
success = false;
}
else
{
int initX = 0;
int initY = 0;
for (int ver = 0; ver < VER_FRAMES; ver++)
{
for (int hor = 0; hor < HOR_FRAMES; hor++)
{
catRect[ver][hor].x = initX;
catRect[ver][hor].y = initY;
catRect[ver][hor].w = 64;
catRect[ver][hor].h = 64;
initX += 64;
}
initX = 0;
initY += 64;
}
initX = 0;
initY = 0;
}
return success;
}
Then I created a simple enum to define the actions, which is the rows in the sprite sheet
enum Actions{
IDLE,
WALK,
TOTAL
};
After that, in main(), I add the control logic and set the variables in away that I think appropriately for rendering.
#include "init.h"
#include "Sprite.h"
LTexture background(SCREEN_WIDTH, SCREEN_HEIGHT);
SDL_Rect bgRect;
LTexture foreground(SCREEN_WIDTH, SCREEN_HEIGHT);
SDL_Rect fgRect;
int frame = 0;
int maxFrame = 0;
SDL_RendererFlip flipType;
int main(int argc, char* argv[])
{
init();
load();
int action;
bool quitFlag = false;
SDL_Event event;
while (!quitFlag)
{
action = IDLE;
while (SDL_PollEvent(&event) != 0) // Handle events in queue
{
if (event.type == SDL_QUIT ||
event.key.keysym.sym == SDLK_ESCAPE)
quitFlag = true;
}
const Uint8* states = SDL_GetKeyboardState(NULL);
if (states[SDL_SCANCODE_A])
{
maxFrame = 8;
action = WALK;
flipType = SDL_FLIP_HORIZONTAL;
catGif.moveLeft();
}
else if (states[SDL_SCANCODE_D])
{
maxFrame = 8;
action = WALK;
flipType = SDL_FLIP_NONE;
catGif.moveRight();
}
else
{
maxFrame = 4;
action = IDLE;
}
// Drawing
background.clipRender(0, 0, sceneRenderer, &bgRect);
foreground.clipRender(0, SCREEN_HEIGHT / 2, sceneRenderer, &fgRect);
SDL_Rect* currFrame = &catRect[action][frame / maxFrame];
int x = catGif.xGetter();
int y = catGif.yGetter();
catGif.render(x, y, sceneRenderer, currFrame, NULL, 0, flipType);
// Update
SDL_RenderPresent(sceneRenderer);
frame++;
if (frame / maxFrame == maxFrame)
{
frame = 0;
}
}
clean();
return 0;
}
The sprite played smoothly as I pressed the A/D buttons. However, problems appeared after I released the buttons, the sprite sheet continued to play itself through to the end and then the sprite disappeared, even though the "action" variable has been set and there is no way that it can increment itself, I'm pretty sure about that.
Please help me to understand how that happened, hopefully I can try to fix it, or take another approach. Thank you.
I use code::blocks 16.01 on Fedora linux.
And this is the sprite sheet: cat_sprite_sheet
You must reset frame to 0 when changing the animation state.
{
maxFrame = ...;
frame = 0;
...
}
The bug is there because of == check in your frame increment. If current frame is 40 and you release A/D, maxFrame is set back to 4. frame / maxFrame = 40 / 4 = 10 > 4, so it will never be equal to maxFrame, and frame keeps increasing forever, thus make your sprite to disappear when [frame / maxFrame] goes out of bound. Reset frame to 0 will solve this.

Image saved from Fingerprint sensor seems corrupted

I have been trying to put together code to actually save image from fingerprint sensors. I have already tried forums and this is my current code which saves file with correct file size but When i open the image, Its not image of fingerprint rather it looks like a corrupted image. Here is what it looks like.
My code is given below. Any help will be appreciated. I am new to windows development.
bool SaveBMP(BYTE* Buffer, int width, int height, long paddedsize, LPCTSTR bmpfile)
{
BITMAPFILEHEADER bmfh;
BITMAPINFOHEADER info;
memset(&bmfh, 0, sizeof(BITMAPFILEHEADER));
memset(&info, 0, sizeof(BITMAPINFOHEADER));
//Next we fill the file header with data:
bmfh.bfType = 0x4d42; // 0x4d42 = 'BM'
bmfh.bfReserved1 = 0;
bmfh.bfReserved2 = 0;
bmfh.bfSize = sizeof(BITMAPFILEHEADER) +
sizeof(BITMAPINFOHEADER) + paddedsize;
bmfh.bfOffBits = 0x36;
//and the info header:
info.biSize = sizeof(BITMAPINFOHEADER);
info.biWidth = width;
info.biHeight = height;
info.biPlanes = 1;
info.biBitCount = 8;
info.biCompression = BI_RGB;
info.biSizeImage = 0;
info.biXPelsPerMeter = 0x0ec4;
info.biYPelsPerMeter = 0x0ec4;
info.biClrUsed = 0;
info.biClrImportant = 0;
HANDLE file = CreateFile(bmpfile, GENERIC_WRITE, FILE_SHARE_READ,
NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
//Now we write the file header and info header:
unsigned long bwritten;
if (WriteFile(file, &bmfh, sizeof(BITMAPFILEHEADER),
&bwritten, NULL) == false)
{
CloseHandle(file);
return false;
}
if (WriteFile(file, &info, sizeof(BITMAPINFOHEADER),
&bwritten, NULL) == false)
{
CloseHandle(file);
return false;
}
//and finally the image data:
if (WriteFile(file, Buffer, paddedsize, &bwritten, NULL) == false)
{
CloseHandle(file);
return false;
}
//Now we can close our function with
CloseHandle(file);
return true;
}
HRESULT CaptureSample()
{
HRESULT hr = S_OK;
WINBIO_SESSION_HANDLE sessionHandle = NULL;
WINBIO_UNIT_ID unitId = 0;
WINBIO_REJECT_DETAIL rejectDetail = 0;
PWINBIO_BIR sample = NULL;
SIZE_T sampleSize = 0;
// Connect to the system pool.
hr = WinBioOpenSession(
WINBIO_TYPE_FINGERPRINT, // Service provider
WINBIO_POOL_SYSTEM, // Pool type
WINBIO_FLAG_RAW, // Access: Capture raw data
NULL, // Array of biometric unit IDs
0, // Count of biometric unit IDs
WINBIO_DB_DEFAULT, // Default database
&sessionHandle // [out] Session handle
);
// Capture a biometric sample.
wprintf_s(L"\n Calling WinBioCaptureSample - Swipe sensor...\n");
hr = WinBioCaptureSample(
sessionHandle,
WINBIO_NO_PURPOSE_AVAILABLE,
WINBIO_DATA_FLAG_RAW,
&unitId,
&sample,
&sampleSize,
&rejectDetail
);
wprintf_s(L"\n Swipe processed - Unit ID: %d\n", unitId);
wprintf_s(L"\n Captured %d bytes.\n", sampleSize);
PWINBIO_BIR_HEADER BirHeader = (PWINBIO_BIR_HEADER)(((PBYTE)sample) + sample->HeaderBlock.Offset);
PWINBIO_BDB_ANSI_381_HEADER AnsiBdbHeader = (PWINBIO_BDB_ANSI_381_HEADER)(((PBYTE)sample) + sample->StandardDataBlock.Offset);
PWINBIO_BDB_ANSI_381_RECORD AnsiBdbRecord = (PWINBIO_BDB_ANSI_381_RECORD)(((PBYTE)AnsiBdbHeader) + sizeof(WINBIO_BDB_ANSI_381_HEADER));
PBYTE firstPixel = (PBYTE)((PBYTE)AnsiBdbRecord) + sizeof(WINBIO_BDB_ANSI_381_RECORD);
SaveBMP(firstPixel, AnsiBdbRecord->HorizontalLineLength, AnsiBdbRecord->VerticalLineLength, AnsiBdbRecord->BlockLength, "D://test.bmp");
wprintf_s(L"\n Press any key to exit.");
_getch();
}
IInspectable is correct, the corruption looks like it's coming from your implicit use of color tables:
info.biBitCount = 8;
info.biCompression = BI_RGB;
If your data is actually just 24-bit RGB, you can do info.biBitCount = 24; to render a valid bitmap. If it's lower (or higher) than that, then you'll need to do some conversion work. You can check AnsiBdbHeader->PixelDepth to confirm that it's the 8 bits per pixel that you expect.
It also looks like your passing AnsiBdbRecord->BlockLength to SaveBMP isn't quite right. The docs for this field say:
WINBIO_BDB_ANSI_381_RECORD structure
BlockLength
Contains the number of bytes in this structure plus the number of bytes of sample image data.
So you'll want to make sure to subtract sizeof(WINBIO_BDB_ANSI_381_RECORD) before passing it as your bitmap buffer size.
Side note, make sure you free the memory involved after the capture.
WinBioFree(sample);
WinBioCloseSession(sessionHandle);

Resources