Image saved from Fingerprint sensor seems corrupted - windows

I have been trying to put together code to actually save image from fingerprint sensors. I have already tried forums and this is my current code which saves file with correct file size but When i open the image, Its not image of fingerprint rather it looks like a corrupted image. Here is what it looks like.
My code is given below. Any help will be appreciated. I am new to windows development.
bool SaveBMP(BYTE* Buffer, int width, int height, long paddedsize, LPCTSTR bmpfile)
{
BITMAPFILEHEADER bmfh;
BITMAPINFOHEADER info;
memset(&bmfh, 0, sizeof(BITMAPFILEHEADER));
memset(&info, 0, sizeof(BITMAPINFOHEADER));
//Next we fill the file header with data:
bmfh.bfType = 0x4d42; // 0x4d42 = 'BM'
bmfh.bfReserved1 = 0;
bmfh.bfReserved2 = 0;
bmfh.bfSize = sizeof(BITMAPFILEHEADER) +
sizeof(BITMAPINFOHEADER) + paddedsize;
bmfh.bfOffBits = 0x36;
//and the info header:
info.biSize = sizeof(BITMAPINFOHEADER);
info.biWidth = width;
info.biHeight = height;
info.biPlanes = 1;
info.biBitCount = 8;
info.biCompression = BI_RGB;
info.biSizeImage = 0;
info.biXPelsPerMeter = 0x0ec4;
info.biYPelsPerMeter = 0x0ec4;
info.biClrUsed = 0;
info.biClrImportant = 0;
HANDLE file = CreateFile(bmpfile, GENERIC_WRITE, FILE_SHARE_READ,
NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
//Now we write the file header and info header:
unsigned long bwritten;
if (WriteFile(file, &bmfh, sizeof(BITMAPFILEHEADER),
&bwritten, NULL) == false)
{
CloseHandle(file);
return false;
}
if (WriteFile(file, &info, sizeof(BITMAPINFOHEADER),
&bwritten, NULL) == false)
{
CloseHandle(file);
return false;
}
//and finally the image data:
if (WriteFile(file, Buffer, paddedsize, &bwritten, NULL) == false)
{
CloseHandle(file);
return false;
}
//Now we can close our function with
CloseHandle(file);
return true;
}
HRESULT CaptureSample()
{
HRESULT hr = S_OK;
WINBIO_SESSION_HANDLE sessionHandle = NULL;
WINBIO_UNIT_ID unitId = 0;
WINBIO_REJECT_DETAIL rejectDetail = 0;
PWINBIO_BIR sample = NULL;
SIZE_T sampleSize = 0;
// Connect to the system pool.
hr = WinBioOpenSession(
WINBIO_TYPE_FINGERPRINT, // Service provider
WINBIO_POOL_SYSTEM, // Pool type
WINBIO_FLAG_RAW, // Access: Capture raw data
NULL, // Array of biometric unit IDs
0, // Count of biometric unit IDs
WINBIO_DB_DEFAULT, // Default database
&sessionHandle // [out] Session handle
);
// Capture a biometric sample.
wprintf_s(L"\n Calling WinBioCaptureSample - Swipe sensor...\n");
hr = WinBioCaptureSample(
sessionHandle,
WINBIO_NO_PURPOSE_AVAILABLE,
WINBIO_DATA_FLAG_RAW,
&unitId,
&sample,
&sampleSize,
&rejectDetail
);
wprintf_s(L"\n Swipe processed - Unit ID: %d\n", unitId);
wprintf_s(L"\n Captured %d bytes.\n", sampleSize);
PWINBIO_BIR_HEADER BirHeader = (PWINBIO_BIR_HEADER)(((PBYTE)sample) + sample->HeaderBlock.Offset);
PWINBIO_BDB_ANSI_381_HEADER AnsiBdbHeader = (PWINBIO_BDB_ANSI_381_HEADER)(((PBYTE)sample) + sample->StandardDataBlock.Offset);
PWINBIO_BDB_ANSI_381_RECORD AnsiBdbRecord = (PWINBIO_BDB_ANSI_381_RECORD)(((PBYTE)AnsiBdbHeader) + sizeof(WINBIO_BDB_ANSI_381_HEADER));
PBYTE firstPixel = (PBYTE)((PBYTE)AnsiBdbRecord) + sizeof(WINBIO_BDB_ANSI_381_RECORD);
SaveBMP(firstPixel, AnsiBdbRecord->HorizontalLineLength, AnsiBdbRecord->VerticalLineLength, AnsiBdbRecord->BlockLength, "D://test.bmp");
wprintf_s(L"\n Press any key to exit.");
_getch();
}

IInspectable is correct, the corruption looks like it's coming from your implicit use of color tables:
info.biBitCount = 8;
info.biCompression = BI_RGB;
If your data is actually just 24-bit RGB, you can do info.biBitCount = 24; to render a valid bitmap. If it's lower (or higher) than that, then you'll need to do some conversion work. You can check AnsiBdbHeader->PixelDepth to confirm that it's the 8 bits per pixel that you expect.
It also looks like your passing AnsiBdbRecord->BlockLength to SaveBMP isn't quite right. The docs for this field say:
WINBIO_BDB_ANSI_381_RECORD structure
BlockLength
Contains the number of bytes in this structure plus the number of bytes of sample image data.
So you'll want to make sure to subtract sizeof(WINBIO_BDB_ANSI_381_RECORD) before passing it as your bitmap buffer size.
Side note, make sure you free the memory involved after the capture.
WinBioFree(sample);
WinBioCloseSession(sessionHandle);

Related

Performance loss with CopyResource() and then Map()/Unmap()

The problem is that if you do not use these methods, then the FPS differs by about 2 times in a big way. For example, I had about 5000 fps in a 3d scene. And it became about 2500. I know that the problem is that the application is waiting for the copy to wait. But it's only 4 bytes... If you use the D3D11_MAP_FLAG_DO_NOT_WAIT flag, Map() will always return DXGI_ERROR_WAS_STILL_DRAWING. What can be done so that I can use this method without losing fps? Here is my code:
Init
D3D11_BUFFER_DESC outputDesc;
outputDesc.Usage = D3D11_USAGE_DEFAULT;
outputDesc.ByteWidth = sizeof(float);
outputDesc.BindFlags = D3D11_BIND_UNORDERED_ACCESS;
outputDesc.CPUAccessFlags = 0;
outputDesc.StructureByteStride = sizeof(float);
outputDesc.MiscFlags = D3D11_RESOURCE_MISC_BUFFER_STRUCTURED;
FOG_TRACE(mDevice->CreateBuffer(&outputDesc, nullptr, &outputBuffer));
outputDesc.Usage = D3D11_USAGE_STAGING;
outputDesc.BindFlags = 0;
outputDesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
FOG_TRACE(mDevice->CreateBuffer(&outputDesc, nullptr, &outputResultBuffer));
D3D11_UNORDERED_ACCESS_VIEW_DESC uavDesc{};
uavDesc.Buffer.FirstElement = 0;
uavDesc.Buffer.Flags = D3D11_BUFFER_UAV_FLAG_APPEND;
uavDesc.Buffer.NumElements = 1;
uavDesc.Format = DXGI_FORMAT_UNKNOWN;
uavDesc.ViewDimension = D3D11_UAV_DIMENSION_BUFFER;
FOG_TRACE(mDevice->CreateUnorderedAccessView(outputBuffer, &uavDesc, &unorderedAccessView));
Update
const UINT offset = 0;
mDeviceContext->OMSetRenderTargetsAndUnorderedAccessViews(1, &mRenderTargetView, mDepthStencilView, 1, 1, &unorderedAccessView, &offset);
mDeviceContext->ClearDepthStencilView(mDepthStencilView, D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1.0f, 0);
ObjectManager::Draw();
mDeviceContext->CopyResource(outputResultBuffer, outputBuffer);
D3D11_MAPPED_SUBRESOURCE mappedBuffer;
HRESULT hr;
FOG_TRACE(hr = mDeviceContext->Map(outputResultBuffer, 0, D3D11_MAP_READ, 0/*D3D11_MAP_FLAG_DO_NOT_WAIT*/, &mappedBuffer));
if (SUCCEEDED(hr))
{
float* copy = (float*)(mappedBuffer.pData);
OutputDebugString(String::ToStr(*copy) + L"\n");
}
mDeviceContext->Unmap(outputResultBuffer, 0);
const UINT var[4]{};
mDeviceContext->ClearUnorderedAccessViewUint(unorderedAccessView, var);
I've already profiled and checked everything possible, the problem is exactly in pending. I would be very grateful if someone could explain everything in detail :)
The problem was solved very simply but for a long time! I just didn't make copy calls until I had read past data. Here is a small crutch:
static bool isWait = false;
if (!isWait)
{
mDeviceContext->CopyResource(outputResultBuffer, outputBuffer);
}
D3D11_MAPPED_SUBRESOURCE mappedBuffer;
HRESULT hr;
FOG_TRACE(hr = mDeviceContext->Map(outputResultBuffer, 0, D3D11_MAP_READ, D3D11_MAP_FLAG_DO_NOT_WAIT, &mappedBuffer));
if (SUCCEEDED(hr))
{
float* copy = (float*)(mappedBuffer.pData);
OutputDebugString(String::ToStr(*copy) + L"\n");
mDeviceContext->Unmap(outputResultBuffer, 0);
const UINT var[4]{};
mDeviceContext->ClearUnorderedAccessViewUint(unorderedAccessView, var);
isWait = false;
}
else
{
isWait = true;
}

Extending Media Foundation Encoder to support 10-bit video encoding

The HEVC Media Foundation Encoder in Windows will only encode 8-bit video. My GFX NVidia card also supports 10-bit HDR and alpha-mode video encoding, so I decided to create my own IMFTransform to use the NVidia SDK.
I 've registered my DLL using MFTRegister:
MFT_REGISTER_TYPE_INFO aMediaTypesIn[] =
{
{ MFMediaType_Video, MFVideoFormat_ARGB32 },
{ MFMediaType_Video, MFVideoFormat_RGB32 },
{ MFMediaType_Video, MFVideoFormat_RGB32 },
{ MFMediaType_Video, MFVideoFormat_RGB10 },
{ MFMediaType_Video, MyFakeFmt },
};
MFT_REGISTER_TYPE_INFO aMediaTypesOut[] =
{
{ MFMediaType_Video, MFVideoFormat_H264 },
{ MFMediaType_Video, MFVideoFormat_H265 },
{ MFMediaType_Video, MFVideoFormat_HEVC },
};
// Size of the array.
const DWORD cNumMediaTypesI = ARRAY_SIZE(aMediaTypesIn);
const DWORD cNumMediaTypesO = ARRAY_SIZE(aMediaTypesOut);
hr = MFTRegister(
GUID_NVidiaEncoder, // CLSID.
MFT_CATEGORY_VIDEO_ENCODER, // Category.
The MyFakeFmt is a non existing input type to fool the Sink Writer to pick my encoder when calling SetInputMediaType instead of the predefined Microsoft's transform. This works OK.
int wi = 1920;
int he = 1080;
int fps = 30;
int br = 4000;
auto fmt = MFVideoFormat_H264;
bool Our = 1;
const wchar_t* fil = L"r:\\1.mp4";
std::vector<DWORD> frame;
frame.resize(wi * he);
// Test
CComPtr<IMFSinkWriter> wr;
DeleteFile(fil);
CComPtr<IMFAttributes> attrs;
MFCreateAttributes(&attrs, 0);
auto hr = MFCreateSinkWriterFromURL(fil, 0, attrs, &wr);
DWORD str = (DWORD)-1;
CComPtr<IMFMediaType> mt2;
MFCreateMediaType(&mt2);
mt2->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
mt2->SetGUID(MF_MT_SUBTYPE, fmt);
MFSetAttributeRatio(mt2, MF_MT_FRAME_RATE, fps, 1);
hr = MFSetAttributeSize(mt2, MF_MT_FRAME_SIZE,wi, he);
MFSetAttributeRatio(mt2, MF_MT_PIXEL_ASPECT_RATIO, 1, 1);
mt2->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
mt2->SetUINT32(MF_MT_VIDEO_NOMINAL_RANGE, MFNominalRange_Normal);
mt2->SetUINT32(MF_MT_AVG_BITRATE, br*1000);
hr = wr->AddStream(mt2, &str);
CComPtr<IMFMediaType> mt1;
MFCreateMediaType(&mt1);
mt1->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
mt1->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_ARGB32);
hr = MFSetAttributeSize(mt1, MF_MT_FRAME_SIZE, wi, he);
// Force our selection
if (Our)
{
mt1->SetGUID(MF_MT_SUBTYPE, MyFakeFmt);
hr = wr->SetInputMediaType(str, mt1, 0);
}
mt1->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_ARGB32);
hr = wr->SetInputMediaType(str, mt1, 0);
hr = wr->BeginWriting();
for(int i = 0 ; i < 15 ; i++)
{
auto i2 = i % 5;
if (i2 == 0) Frm(frame, wi, he, 0xFFFFFFFF);
if (i2 == 1 || i2 == 4) Frm(frame, wi, he, 0xFF0000FF); // some colors
if (i2 == 2) Frm(frame, wi, he, 0xFFFF00FF); //
if (i2 == 3) Frm(frame, wi, he, 0xFF00FFFF); //
CComPtr<IMFSample> s;
MFCreateSample(&s);
int secs = 1;
hr = s->SetSampleDuration(10 * 1000 * 1000 * secs);
hr = s->SetSampleTime(10 * 1000 * 1000 * i);
CComPtr<IMFMediaBuffer> b;
MFCreateMemoryBuffer((DWORD)(frame.size() * 4), &b);
b->SetCurrentLength((DWORD)(frame.size() * 4));
BYTE* by = 0;
DWORD ml = 0, cl = 0;
b->Lock(&by, &ml, &cl);
memcpy(by, frame.data(), frame.size() * 4);
b->Unlock();
hr = s->AddBuffer(b);
b = 0;
hr = wr->WriteSample(str, s);
}
hr = wr->Finalize();
wr = 0;
The problems start with the call to Finalize to end the writing. At that point, everything seems to work normally. Note that I have tested the NVidia IMFTransform I 've created with input frames and it encodes and outputs them correctly as raw data.
When I call Finalize and the type is MFVideoFormat_H264 , the call succeeds. However the generated mp4 plays weirdly:
For some reason also, MediaInfo shows 1 FPS. Why?
When the output is MFVideoFormat_HEVC, then Finalize fails with `0xc00d4a45 : Sink could not create valid output file because required headers were not provided to the sink.'.
I 've also tried to convert the raw .h264 file I 'm saving with ffmpeg to mp4, and this works. The mp4 generated plays correctly.
Adding a MF_MT_MPEG_SEQUENCE_HEADER didn't help (besides, I think this is only needed for H.264)
const char* bl4 = "\x00\x00\x00\x01\x67\x42\xC0\x28\x95\xB0\x1E\x00\x89\xF9\x70\x16\xC8\x00\x00\x03\x00\x08\x00\x00\x03\x01\xE0\x6D\x04\x42\x37\x00\x00\x00\x01\x68\xCA\x8F\x20";
mt2->SetBlob(MF_MT_MPEG_SEQUENCE_HEADER, (UINT8*)bl4, 39);
What do you make of all that?
Thanks

Problem getting USB_BANDWIDTH_INFO structure

I'm writing a Windows application in C++ reading images from an external USB cam and displaying them (which works nicely). I like to monitor the used USB bandwidth at the same time. I know that there exists a USB_BANDWIDTH_INFO structure (https://learn.microsoft.com/en-us/windows/win32/api/usbuser/ns-usbuser-usb_bandwidth_info), but I have no clue how to use it. More precisely: The structure itself is pretty clear, but how do I get/read it (didn't find any example code explaining that)?
According to the MSDN:
The USB_BANDWIDTH_INFO structure is used with the IOCTL_USB_USER_REQUEST I/O control request to retrieve information about the allocated bandwidth.
So you need to call DeviceIoControl with IOCTL_USB_USER_REQUEST.
Refer to the official example, you can find:
GetHostControllerInfo(
HANDLE hHCDev,
PUSBHOSTCONTROLLERINFO hcInfo)
{
USBUSER_CONTROLLER_INFO_0 UsbControllerInfo;
DWORD dwError = 0;
DWORD dwBytes = 0;
BOOL bSuccess = FALSE;
memset(&UsbControllerInfo, 0, sizeof(UsbControllerInfo));
// set the header and request sizes
UsbControllerInfo.Header.UsbUserRequest = USBUSER_GET_CONTROLLER_INFO_0;
UsbControllerInfo.Header.RequestBufferLength = sizeof(UsbControllerInfo);
//
// Query for the USB_CONTROLLER_INFO_0 structure
//
bSuccess = DeviceIoControl(hHCDev,
IOCTL_USB_USER_REQUEST,
&UsbControllerInfo,
sizeof(UsbControllerInfo),
&UsbControllerInfo,
sizeof(UsbControllerInfo),
&dwBytes,
NULL);
if (!bSuccess)
{
dwError = GetLastError();
OOPS();
}
else
{
hcInfo->ControllerInfo = (PUSB_CONTROLLER_INFO_0) ALLOC(sizeof(USB_CONTROLLER_INFO_0));
if(NULL == hcInfo->ControllerInfo)
{
dwError = GetLastError();
OOPS();
}
else
{
// copy the data into our USB Host Controller's info structure
memcpy(hcInfo->ControllerInfo, &UsbControllerInfo.Info0, sizeof(USB_CONTROLLER_INFO_0));
}
}
return dwError;
}
You can modify it like:
USBUSER_CONTROLLER_INFO_0 UsbControllerInfo;
UsbControllerInfo.Header.UsbUserRequest = USBUSER_GET_BANDWIDTH_INFORMATION;
UsbControllerInfo.Header.RequestBufferLength = sizeof(UsbControllerInfo);
USB_BANDWIDTH_INFO UsbBandInfo{};
DWORD dwError = 0;
DWORD dwBytes = 0;
BOOL bSuccess = FALSE;
bSuccess = DeviceIoControl(hHCDev,
IOCTL_USB_USER_REQUEST,
&UsbControllerInfo,
sizeof(UsbControllerInfo),
&UsbBandInfo,
sizeof(USB_BANDWIDTH_INFO),
&dwBytes,
NULL);

Direct2D draws nothing on a "hardware mode" render target

I 'm trying to use Direct2D on a render target created by Direct3D according to this MSDN document. The idea not to use a HWND render buffer is for the application to be able to switch targets easily, so I can e.g. render to a bitmap the static painting and use direct commands for any runtime-dependent painting. The code to create the interfaces is the following:
bool CreateD2DC(HWND hh)
{
D2D& d = *this;
D3D_FEATURE_LEVEL featureLevels[] =
{
D3D_FEATURE_LEVEL_11_1,
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0,
D3D_FEATURE_LEVEL_9_3,
D3D_FEATURE_LEVEL_9_2,
D3D_FEATURE_LEVEL_9_1
};
auto de = D3D_DRIVER_TYPE_HARDWARE;
if (IsCurrentSessionRemoteable())
de = D3D_DRIVER_TYPE_SOFTWARE;
D3D_FEATURE_LEVEL m_featureLevel;
D3D11CreateDevice(
nullptr, // specify null to use the default adapter
de,
0,
D3D11_CREATE_DEVICE_BGRA_SUPPORT, // optionally set debug and Direct2D compatibility flags
featureLevels, // list of feature levels this app can support
ARRAYSIZE(featureLevels), // number of possible feature levels
D3D11_SDK_VERSION,
&d.device, // returns the Direct3D device created
&m_featureLevel, // returns feature level of device created
&d.context // returns the device immediate context
);
if (!d.device)
return 0;
d.dxgiDevice = d.device;
if (!d.dxgiDevice)
return 0;
D2D1_CREATION_PROPERTIES dp;
dp.threadingMode = D2D1_THREADING_MODE::D2D1_THREADING_MODE_SINGLE_THREADED;
dp.debugLevel = D2D1_DEBUG_LEVEL::D2D1_DEBUG_LEVEL_NONE;
dp.options = D2D1_DEVICE_CONTEXT_OPTIONS::D2D1_DEVICE_CONTEXT_OPTIONS_NONE;
D2D1CreateDevice(d.dxgiDevice, dp, &d.m_d2dDevice);
if (!d.m_d2dDevice)
return 0;
d.m_d2dDevice->CreateDeviceContext(
D2D1_DEVICE_CONTEXT_OPTIONS_NONE,
&d.m_d2dContext);
if (!d.m_d2dContext)
return 0;
// Allocate a descriptor.
DXGI_SWAP_CHAIN_DESC1 swapChainDesc = { 0 };
swapChainDesc.Width = 0; // use automatic sizing
swapChainDesc.Height = 0;
swapChainDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM; // this is the most common swapchain format
swapChainDesc.Stereo = false;
swapChainDesc.SampleDesc.Count = 1; // don't use multi-sampling
swapChainDesc.SampleDesc.Quality = 0;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.BufferCount = 2; // use double buffering to enable flip
swapChainDesc.Scaling = DXGI_SCALING_NONE;
swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL; // all apps must use this SwapEffect
swapChainDesc.Flags = 0;
d.dxgiDevice->GetAdapter(&d.dxgiAdapter);
if (!d.dxgiAdapter)
return 0;
// Get the factory object that created the DXGI device.
d.dxgiAdapter->GetParent(IID_PPV_ARGS(&d.dxgiFactory));
if (!d.dxgiFactory)
return 0;
d.dxgiFactory->CreateSwapChainForHwnd(
d.device,
hh, &swapChainDesc,
nullptr, // allow on all displays
nullptr, // allow on all displays
&d.m_swapChain);
if (!d.m_swapChain)
return 0;
d.dxgiDevice->SetMaximumFrameLatency(1);
d.m_swapChain->GetBuffer(0, IID_PPV_ARGS(&d.backBuffer));
if (!d.backBuffer)
return 0;
// Now we set up the Direct2D render target bitmap linked to the swapchain.
D2D1_BITMAP_PROPERTIES1 bitmapProperties;
bitmapProperties.bitmapOptions = D2D1_BITMAP_OPTIONS_TARGET | D2D1_BITMAP_OPTIONS_CANNOT_DRAW;
bitmapProperties.pixelFormat.alphaMode = D2D1_ALPHA_MODE_IGNORE;
bitmapProperties.pixelFormat.format = DXGI_FORMAT_B8G8R8A8_UNORM;
bitmapProperties.dpiX = 96;
bitmapProperties.dpiY = 96;
bitmapProperties.colorContext = 0;
// Direct2D needs the dxgi version of the backbuffer surface pointer.
d.m_swapChain->GetBuffer(0, IID_PPV_ARGS(&d.dxgiBackBuffer));
if (!d.dxgiBackBuffer)
return 0;
// Get a D2D surface from the DXGI back buffer to use as the D2D render target.
d.m_d2dContext->CreateBitmapFromDxgiSurface(
d.dxgiBackBuffer,
&bitmapProperties,
&d.m_d2dTargetBitmap);
if (!d.m_d2dTargetBitmap)
return 0;
// Now we can set the Direct2D render target.
d.m_d2dContext->SetTarget(d.m_d2dTargetBitmap);
return true;
}
The problem is this code:
auto de = D3D_DRIVER_TYPE_HARDWARE;
When used, Direct2D writes nothing although EndDraw() returns S_OK. If I use D3D_DRIVER_TYPE_SOFTWARE, everything works correctly.
What am I doing wrong?

MQSendMessage() failed with MQ_ERROR_INVALID_PARAMETER (0xC00E0006)

Problem with MSMQ configuration or code i've implemented??
I've written windows service code (win32 C++ ) in which i am sending a log to the local private queue.This code is working fine if I execute that in 32-bit environment (either windows7/8/vista). But that same code if I build for x64 OS and if I execute MQSendMessage() failed with MQ_ERROR_INVALID_PARAMETER (0xC00E0006). What could be the problem.??? Please help me out in this regard.Thanks in advance..
I've tried by changing the NUMBEROFPROPERTIES from 3-7 in x-64 windows 7 system. But still the problem remains same. what to do to avoid this..
Here is my sample code
#define ClientQueue L".\\Private$\\TestQueue"
#define LogMsgLable L"TestLOG"
#define MIN_PRIVATE_QUEUE_NAME_LENGTH 55
DWORD MSMQSendMessage()
{
//Define the required constants and variables.
const int NUMBEROFPROPERTIES = 7; // Number of properties
DWORD cPropId = 0; // Property counter
HRESULT hr = MQ_OK; // Return code
HANDLE hQueue = NULL; // Queue handle
//Define an MQMSGPROPS structure.
MQMSGPROPS msgProps;
MSGPROPID aMsgPropId[NUMBEROFPROPERTIES] = {0};
MQPROPVARIANT aMsgPropVar[NUMBEROFPROPERTIES] = {0};
HRESULT aMsgStatus[NUMBEROFPROPERTIES] = {0};
// Specify the message properties to be sent.
aMsgPropId[cPropId] = PROPID_M_LABEL; // Property ID
aMsgPropVar[cPropId].vt = VT_LPWSTR; // Type indicator
aMsgPropVar[cPropId].pwszVal = L"ADCLOG"; // The message label
cPropId++;
// Specifying the storage of messages in the harddisk
// setting the message properties as recoverable
aMsgPropId[cPropId] = PROPID_M_DELIVERY;
aMsgPropVar[cPropId].vt = VT_UI1;
aMsgPropVar[cPropId].bVal = MQMSG_DELIVERY_RECOVERABLE;
cPropId++;
aMsgPropId[cPropId] = PROPID_M_ACKNOWLEDGE; // Property ID
aMsgPropVar[cPropId].vt = VT_UI1; // Type indicator
aMsgPropVar[cPropId].bVal = MQMSG_ACKNOWLEDGMENT_FULL_RECEIVE;
cPropId++;
// we need to set the size of the message
// if we dont set it, takes 4MB as default message size
// to set the size of it we have ---> PROPID_M_BODY
ULONG ulBufferSize = 15;
char *lLog_msg = NULL;
lLog_msg = ( char*)GlobalAlloc( GPTR, 15);
ZeroMemory( lLog_msg, 15) ;
strcpy(lLog_msg, "HelloWorld");
aMsgPropId[cPropId] = PROPID_M_BODY; // Property ID
aMsgPropVar[cPropId].vt = VT_VECTOR | VT_UI1; // Type indicator
aMsgPropVar[cPropId].caub.pElems = (UCHAR *)lLog_msg; // Body buffer
aMsgPropVar[cPropId].caub.cElems = ulBufferSize; // Buffer size
cPropId++;
//here we should not put VT_NULL in type as defined with VT_UI4.........
aMsgPropId[cPropId] = PROPID_M_BODY_TYPE; // Property ID
aMsgPropVar[cPropId].vt = VT_UI4; // Type indicator
cPropId++;
// Initialize the MQMSGPROPS structure.
msgProps.cProp = cPropId;
msgProps.aPropID = aMsgPropId;
msgProps.aPropVar = aMsgPropVar;
msgProps.aStatus = aMsgStatus;
// Create a direct format name for the queue.
WCHAR *gFormatName = NULL;
DWORD dwBufferLength = 0;
dwBufferLength = MIN_PRIVATE_QUEUE_NAME_LENGTH; //Private queue format name buffer size atleast 54
gFormatName = (WCHAR *)malloc( dwBufferLength*sizeof( WCHAR ));
if (gFormatName == NULL)
{
printf( "malloc", 0, NULL );
return MQ_ERROR_INSUFFICIENT_RESOURCES;
}
SecureZeroMemory( gFormatName, dwBufferLength*sizeof(WCHAR) );
hr = MQPathNameToFormatName( ClientQueue,
gFormatName,
&dwBufferLength );
if (FAILED( hr ))
{
if( hr == MQ_ERROR_FORMATNAME_BUFFER_TOO_SMALL )
{
if (gFormatName != NULL)
{
gFormatName = (WCHAR *)realloc( gFormatName, dwBufferLength*sizeof( WCHAR ));
if (gFormatName == NULL)
{
printf( "realloc failed\n");
return MQ_ERROR_INSUFFICIENT_RESOURCES;
}
}
SecureZeroMemory( gFormatName, dwBufferLength*sizeof( WCHAR ));
hr = MQPathNameToFormatName( ClientQueue,
gFormatName,
&dwBufferLength );
if(FAILED( hr ))
{
printf( L"MQPathNameToFormatName2 failed:%x\n", hr);
return hr;
}
}
else
{
printf("MQPathNameToFormatName failed:%x\n", hr);
return hr;
}
}
// Call MQOpenQueue to open the queue with send access.
hr = MQOpenQueue(
gFormatName, // Format name of the queue
MQ_SEND_ACCESS, // Access mode
MQ_DENY_NONE, // Share mode
&hQueue // OUT: Queue handle
);
if ( FAILED( hr ))
{
printf("MQOpenQueue failed:%x\n", hr);
goto ret;
//goto cleanup;
}
if( gFormatName )
free( gFormatName );
// Call MQSendMessage to send the message to the queue.
hr = MQSendMessage(
hQueue, // Queue handle
&msgProps, // Message property structure
NULL // Not in a transaction
);
if (FAILED(hr))
{
printf( "MQSendMessage failed:%x\n", hr );
MQCloseQueue( hQueue );
goto ret;
}
//Call MQCloseQueue to close the queue.
hr = MQCloseQueue(hQueue);
if(hr != 0)
{
printf("MQCloseQueue failed:%x",hr);
//goto cleanup;
goto ret;
}ret:
if( lLog_msg )
{
GlobalFree( lLog_msg );
lLog_msg = NULL;
}
return hr;
}
Your code works on 32-bit Windows purely by chance. Take a look at this:
ULONG ulBufferSize = sizeof( 15);
char *lLog_msg = NULL;
lLog_msg = ( char*)GlobalAlloc( GPTR, sizeof( 15));
ZeroMemory( lLog_msg, sizeof( 15)) ;
strcpy(lLog_msg, "HelloWorld");
You seem to misunderstand what the sizeof operator does. It is a compile time operator that replaces its argument with the size of that argument. In this case, the compiler replaces sizeof(15) with the number 4. Why? Because a literal constant like 15 occupies 4 bytes on a 64-bit machine. So in the code above you are allocating 4 bytes of memory and then copying 11 bytes into it, thereby corrupting memory.
To fix this, simply remove sizeof. The code above should look like this:
ULONG ulBufferSize = 15;
char *lLog_msg = NULL; // this is pointless since you set it in the next line
lLog_msg = ( char*)GlobalAlloc( GPTR, ulBufferSize);
ZeroMemory( lLog_msg, ulBufferSize) ;
strcpy(lLog_msg, "HelloWorld");

Resources