KNEM cookie and declared region - linux-kernel

First question is PROT_WRITE and PROT_READ i wasn't able to find anywhere and it's giving me a hard time compiling. I replaced with 0 and 1 but it doesn't seem to work.
Second, "rejected (unexisting region cookie)"
int rank;
MPI_Init( &argc, &argv );
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
MPI_Win win;
int knem_fd = open("/dev/knem", O_RDWR);
int err;
uint64_t size = 64;
if( rank == 0 ){
char *inbuf = malloc(size);
for( int i = 0; i < size; i++ )
inbuf[i] = rand() % 26 + 97;
print_array( inbuf, size, '0' );
struct knem_cmd_create_region create;
struct knem_cmd_param_iovec knem_iov[1];
knem_iov[0].base = (uint64_t)&inbuf;
knem_iov[0].len = size;
create.iovec_array = (uintptr_t) &knem_iov[0];
create.iovec_nr = 1;
create.flags = KNEM_FLAG_SINGLEUSE;
//create.protection = 1;
err = ioctl( knem_fd, KNEM_CMD_CREATE_REGION, &create );
MPI_Send( &(create.cookie), 1, MPI_UINT64_T, 1, 0, MPI_COMM_WORLD );
MPI_Barrier( MPI_COMM_WORLD );
} else if( rank == 1 ){
char *obuf = malloc(size);
int err;
struct knem_cmd_copy copy;
struct knem_cmd_create_region create;
struct knem_cmd_param_iovec knem_iov[1];
knem_iov[0].base = (uint64_t)&obuf;
knem_iov[0].len = size;
create.iovec_array = (uintptr_t) &knem_iov[0];
create.iovec_nr = 1;
//create.protection = 0;
create.flags = KNEM_FLAG_SINGLEUSE;
err = ioctl( knem_fd, KNEM_CMD_CREATE_REGION, &create );
MPI_Recv( &(copy.src_cookie), 1, MPI_UINT64_T, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE );
copy.src_offset = 0;
copy.dst_cookie = create.cookie;
copy.dst_offset = 0;
copy.flags = 0;
err = ioctl(knem_fd, KNEM_CMD_COPY, &copy);
print_array( obuf, size, '1' );
MPI_Barrier( MPI_COMM_WORLD );
}
0 and 1 both create a region, 0 sends its cookie to 1 and 1 goes in grab data from 0. I checked the received cookie is the same as the send cookie, but it just failed to find the declared region.

PROT_READ and PROT_WRITE are mmap flags, you need to include sys/mman.h to get them. In the second part of the code, you need to set copy.src_cookie to create.cookie (or just use an inline copy to avoid creating that region at all since it'll be destroyed immediately because of the SINGLEUSE flag). Also, make sure ou check the return values of all ioctl before continuing. Copy cannot work if create.cookie wasn't initialized because the create ioctl failed.

Related

What causes cases with high ZeroMQ latency and how to avoid them?

I try to use ZeroMQ for fast message passing. Messages need to be delivered in less than 1 [ms]. I did some testing (inproc, single process on Linux, no TCP) and see that usually there is no problem with that. The latency is about 10 - 100 [us], depending on how often the messages are sent (why?). Sometimes however messages are received after 6 [ms] which is unacceptable.
What can be the cause that some messages are delayed?
Maybe the process is preempted?
Or it's because of polling used (zmq_poll())?
Example results from my test :
avg lag = 28 [us]
max lag = 5221 [us]
std dev = 25.85 [us]
big lag = 180 x above 200 [us]
"big lag" means number of cases where latency was over 200 [us]. In my tests there are 500 000 messages sent so the value 180 means that latency over 200 [us] was recorded in 180 / 500000 = 0,036%. It's a quite low number but I'd like it to be zero. Even on the expense of average latency.
The test source code is below :
#include <stdlib.h>
#include <math.h>
#include <zmq.h>
#include <pthread.h>
#define SOCKETS_NUM 5
#define RUNS 100000
void *context;
int numbers[SOCKETS_NUM];
struct {
struct timespec send_time;
struct timespec receive_time;
} times[SOCKETS_NUM * RUNS], *ptimes;
static void * worker_thread(void * dummy) {
int * number = dummy;
char endpoint[] = "inproc://endpointX";
endpoint[17] = (char)('0' + *number);
void * socket = zmq_socket(context, ZMQ_PUSH);
zmq_connect(socket, endpoint);
struct timespec sleeptime, remtime;
int rnd = rand() / 3000;
sleeptime.tv_sec = 0;
sleeptime.tv_nsec = rnd;
nanosleep(&sleeptime, &remtime);
clock_gettime(CLOCK_REALTIME, &(ptimes[*number].send_time));
zmq_send(socket, "Hello", 5, 0);
zmq_close(socket);
return NULL;
}
static void run_test(zmq_pollitem_t items[]) {
pthread_t threads[SOCKETS_NUM];
for (int i = 0; i < SOCKETS_NUM; i++) {
pthread_create(&threads[i], NULL, worker_thread, &numbers[i]);
}
char buffer[10];
int to_receive = SOCKETS_NUM;
for (int i = 0; i < SOCKETS_NUM; i++) {
int rc = zmq_poll(items, SOCKETS_NUM, -1);
for (int j = 0; j < SOCKETS_NUM; j++) {
if (items[j].revents & ZMQ_POLLIN) {
clock_gettime(CLOCK_REALTIME, &(ptimes[j].receive_time));
zmq_recv(items[j].socket, buffer, 10, 0);
}
}
to_receive -= rc;
if (to_receive == 0) break;
}
for (int i = 0; i < SOCKETS_NUM; i++) {
pthread_join(threads[i], NULL);
}
}
int main(void)
{
context = zmq_ctx_new();
zmq_ctx_set(context, ZMQ_THREAD_SCHED_POLICY, SCHED_FIFO);
zmq_ctx_set(context, ZMQ_THREAD_PRIORITY, 99);
void * responders[SOCKETS_NUM];
char endpoint[] = "inproc://endpointX";
for (int i = 0; i < SOCKETS_NUM; i++) {
responders[i] = zmq_socket(context, ZMQ_PULL);
endpoint[17] = (char)('0' + i);
zmq_bind(responders[i], endpoint);
numbers[i] = i;
}
time_t tt;
time_t t = time(&tt);
srand((unsigned int)t);
zmq_pollitem_t poll_items[SOCKETS_NUM];
for (int i = 0; i < SOCKETS_NUM; i++) {
poll_items[i].socket = responders[i];
poll_items[i].events = ZMQ_POLLIN;
}
ptimes = times;
for (int i = 0; i < RUNS; i++) {
run_test(poll_items);
ptimes += SOCKETS_NUM;
}
long int lags[SOCKETS_NUM * RUNS];
long int total_lag = 0;
long int max_lag = 0;
long int big_lag = 0;
for (int i = 0; i < SOCKETS_NUM * RUNS; i++) {
lags[i] = (times[i].receive_time.tv_nsec - times[i].send_time.tv_nsec + (times[i].receive_time.tv_sec - times[i].send_time.tv_sec) * 1000000000) / 1000;
if (lags[i] > max_lag) max_lag = lags[i];
total_lag += lags[i];
if (lags[i] > 200) big_lag++;
}
long int avg_lag = total_lag / SOCKETS_NUM / RUNS;
double SD = 0.0;
for (int i = 0; i < SOCKETS_NUM * RUNS; ++i) {
SD += pow((double)(lags[i] - avg_lag), 2);
}
double std_lag = sqrt(SD / SOCKETS_NUM / RUNS);
printf("avg lag = %l5d [us]\n", avg_lag);
printf("max lag = %l5d [us]\n", max_lag);
printf("std dev = %8.2f [us]\n", std_lag);
printf("big lag = %l5d x above 200 [us]\n", big_lag);
for (int i = 0; i < SOCKETS_NUM; i++) {
zmq_close(responders[i]);
}
zmq_ctx_destroy(context);
return 0;
}
Q : "...I'd like it to be zero."
Cool to say, yet hard to make.
As you run an ultra-fast, memory-mapped inproc:// Transport Class, the main focus will be performance tweaking of the Context()-processing. Here, you spend so awfully much setup-overhead & straight termination-overhead operations to send 1E5-times just a 5 [B], so I guess there will never be a queue-management related issue, as there will never be any "stack-growing" at all.
1 ) ( suppose we let the code as-is ) it would be a natural step for the performance tuning to at least set the ZeroMQ mapping of a socket-CPU_core ZMQ_AFFINITY ( not jumping or wandering from core to core ). It may be interesting to see, if that many ~ 5E5 socket setups/terminations on the PUSH-er side, each without ever sending more than a single shot of 5 [B] over the memory-mapped line, could get some help (for those large overheads & maintenance) from configuring the context-instance with SOCKETS_NUM I/O-threads, using the ZMQ_IO_THREADS setting ( fighting for a "RealTime"-ness, using the SCHED_FIFO, having only one I/O-thread does not help much, does it? )
2 ) next level of experimentation is to re-balance the ZMQ_THREAD_AFFINITY_CPU_ADD maps (the global context's I/O-threads onto CPU-cores) and the per-socket setup of the ZMQ_AFFINITY maps onto the context's I/O-thread(s). Having sufficient amount of CPU-cores, there may be some performance / ultra-low latency benefits from making several gangs-of-I/O-threads serving one socket-instance stay "together", on a same CPU-core, yet here we get into territory, where the actual hardware and the real-system's background workloads & still-"spare"-resources for this "RealTime"-ambition motivated experimenting start to become hard to predict without any in-vivo testing & validation.
3 ) tweaking per-socket zmq_setsockopt() parameters may help, yet unless a nano-scaled socket-lifetime ( rather an expensive one-time used "consumable-disposable" ), do not expect any breakthrough from here.
4 ) trying to measure with a nanosecond resolution, the more if used for "durations" of something, ought be used by CLOCK_MONOTONIC_RAW, that avoids ntp-injected adjustments, astronomy-correcting leap seconds injections et al.
5 ) the zmq_poll()-strategy: I would no go this way. Using the timeout == -1 is blocking the whole circus. A thing I strongly discourage in any distributed-computing system, the more in one, that has a "RealTime" ambition. Spinning the PULL-side to a max performance may go via having a 1:1 PUSH/PULL threads on either side, or if trying to challenge the grooming, have 5-PUSH-er threads, as you have it, and collect all ingress messages on a just single, Zero-Copy well oiled PULL-er ( easier polling, may use a payload-based index-helper to which send-side timestamp to put the receive-side timestamp ), anyway, the blocking poller is almost the anti-pattern for challenging any low-latency soft-realtime toys.
Anyway, do not hesistate to refactor the code and to use profiling tools to better see, where you "acquire" the big_lag-s ( my guesses are above )
#include <stdlib.h>
#include <math.h>
#include <zmq.h>
#include <pthread.h>
#define SOCKETS_NUM 5
#define RUNS 100000
void *context;
int numbers[SOCKETS_NUM];
struct {
struct timespec send_time;
struct timespec recv_time;
} times[SOCKETS_NUM * RUNS],
*ptimes;
static void *worker_thread( void *dummy ) { //-------------------------- an ovehead expensive one-shot PUSH-based "Hello"-sender & .close()
int *number = dummy;
char endpoint[] = "inproc://endpointX";
endpoint[17] = (char)( '0' + *number );
int rnd = rand() / 3000;
void *socket = zmq_socket( context, ZMQ_PUSH );
struct timespec remtime,
sleeptime;
sleeptime.tv_sec = 0;
sleeptime.tv_nsec = rnd;
zmq_connect( socket, endpoint );
nanosleep( &sleeptime, &remtime ); // anything betweed < 0 : RAND_MAX/3000 > [ns] ... easily >> 32, as #define RAND_MAX 2147483647 ~ 715 827 [ns]
clock_gettime( CLOCK_REALTIME, &( ptimes[*number].send_time) ); //............................................................................ CLK_set_NEAR_SEND
// any CLOCK re-adjustments may and will skew any non-MONOTONIC_CLOCK
zmq_send( socket, "Hello", 5, 0 );
zmq_close( socket );
return NULL;
}
static void run_test( zmq_pollitem_t items[] ) { //--------------------- zmq_poll()-blocked zmq_recv()-orchestrator ( called ~ 1E5 x !!! resources' nano-use & setup + termination overheads matter )
char buffer[10];
int to_receive = SOCKETS_NUM;
pthread_t threads[SOCKETS_NUM];
for ( int i = 0; i < SOCKETS_NUM; i++ ) { //------------------------ thread-maker ( a per-socket PUSH-er[]-s )
pthread_create( &threads[i], NULL, worker_thread, &numbers[i] );
}
for ( int i = 0; i < SOCKETS_NUM; i++ ) { //------------------------ [SERIAL]-------- [i]-stepping
int rc = zmq_poll( items, SOCKETS_NUM, -1 ); //----------------- INFINITE ??? --- blocks /\/\/\/\/\/\/\/\/\/\/\ --- several may flag ZMQ_POLLIN
for ( int j = 0; j < SOCKETS_NUM; j++ ) { //-------------------- ALL-CHECKED in a loop for an items[j].revents
if ( items[j].revents & ZMQ_POLLIN ) { //------------------- FIND IF IT WAS THIS ONE
clock_gettime( CLOCK_REALTIME, &( ptimes[j].recv_time ) );//...................................................................... CLK_set_NEAR_poll()_POSACK'd R2recv
zmq_recv( items[j].socket, buffer, 10, 0 ); //---------- READ-IN from any POSACK'd by zmq_poll()-er flag(s)
}
}
to_receive -= rc; // ---------------------------------------------------------------------------------------------- SUB rc
if (to_receive == 0) break;
}
for ( int i = 0; i < SOCKETS_NUM; i++ ) { //------------------------ thread-killer
pthread_join( threads[i], NULL );
}
}
int main( void ) {
context = zmq_ctx_new();
zmq_ctx_set( context, ZMQ_THREAD_SCHED_POLICY, SCHED_FIFO );
zmq_ctx_set( context, ZMQ_THREAD_PRIORITY, 99 );
void *responders[SOCKETS_NUM];
char endpoint[] = "inproc://endpointX";
for ( int i = 0; i < SOCKETS_NUM; i++ ) {
responders[i] = zmq_socket( context, ZMQ_PULL ); // ------------ PULL instances into []
endpoint[17] = (char)( '0' + i );
zmq_bind( responders[i], endpoint ); //------------------------- .bind()
numbers[i] = i;
}
time_t tt;
time_t t = time(&tt);
srand( (unsigned int)t );
zmq_pollitem_t poll_items[SOCKETS_NUM];
for ( int i = 0; i < SOCKETS_NUM; i++ ) { //------------------------ zmq_politem_t array[] ---pre-fill---
poll_items[i].socket = responders[i];
poll_items[i].events = ZMQ_POLLIN;
}
ptimes = times;
for ( int i = 0; i < RUNS; i++ ) { //------------------------------- 1E5 RUNs
run_test( poll_items ); // ------------------------------------- RUN TEST
ptimes += SOCKETS_NUM;
}
long int lags[SOCKETS_NUM * RUNS];
long int total_lag = 0;
long int max_lag = 0;
long int big_lag = 0;
for ( int i = 0; i < SOCKETS_NUM * RUNS; i++ ) {
lags[i] = ( times[i].recv_time.tv_nsec
- times[i].send_time.tv_nsec
+ ( times[i].recv_time.tv_sec
- times[i].send_time.tv_sec
) * 1000000000
) / 1000; // --------------------------------------- [us]
if ( lags[i] > max_lag ) max_lag = lags[i];
total_lag += lags[i];
if ( lags[i] > 200 ) big_lag++;
}
long int avg_lag = total_lag / SOCKETS_NUM / RUNS;
double SD = 0.0;
for ( int i = 0; i < SOCKETS_NUM * RUNS; ++i ) {
SD += pow( (double)( lags[i] - avg_lag ), 2 );
}
double std_lag = sqrt( SD / SOCKETS_NUM / RUNS );
printf("avg lag = %l5d [us]\n", avg_lag);
printf("max lag = %l5d [us]\n", max_lag);
printf("std dev = %8.2f [us]\n", std_lag);
printf("big lag = %l5d x above 200 [us]\n", big_lag);
for ( int i = 0; i < SOCKETS_NUM; i++ ) {
zmq_close( responders[i] );
}
zmq_ctx_destroy( context );
return 0;
}
Using nanosleep for a random (not cardinal, safely outside of any control-loop(s) activity) sleep is rather a risky luxury, as in earlier kernels caused problems:
In order to support applications requiring much more precise pauses (e.g., in order to control some time-critical hardware), nanosleep() would handle pauses of up to 2 ms by busy waiting with microsecond precision when called from a thread scheduled under a real-time policy like SCHED_FIFO or SCHED_RR. This special extension was removed in kernel 2.5.39, hence is still present in current 2.4 kernels, but not in 2.6 kernels.

Not getting ack after sending xmodem frame

I have a device connected to serial port and waiting for a file to be transmited using xmodem protocol.
I have tried constructing a message using in xmodem format and sending it, however I'm not getting the expected ACK for the transfer.
Bellow are the relevant bits of code:
Format of XMODEM message:
struct xmodem_packet
{
uint8_t start;
uint8_t block;
uint8_t block_neg;
uint8_t payload[128];
uint16_t crc;
};
Opening and configuring port:
HANDLE portHandler = CreateFile(L"COM9", GENERIC_READ | GENERIC_WRITE, 0, 0, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, 0);
DCB config = { 0 };
COMMTIMEOUTS timeout = { 0 };
// Configure
config.DCBlength = sizeof(config);
GetCommState(portHandler, &config);
config.BaudRate = CBR_115200;
config.ByteSize = 8;
config.StopBits = ONESTOPBIT;
config.Parity = NOPARITY;
SetCommState(portHandler, &config);
timeout.ReadIntervalTimeout = 50;
timeout.ReadTotalTimeoutConstant = 50;
timeout.ReadTotalTimeoutMultiplier = 50;
timeout.WriteTotalTimeoutConstant = 50;
timeout.WriteTotalTimeoutMultiplier = 10;
SetCommTimeouts(portHandler, &timeout);
Prepare module for XMODEM transfer:
DWORD toRead = 1;
DWORD wasWriten = 0;
DWORD wasRead = 0;
char responce = 0;
WriteFile(portHandler, "set load xmodem\n", 3+4+6+3, &wasWriten, NULL);
WriteFile(portHandler, "\n", 2, &wasWriten, NULL); // Doesn't work without this
Construct XMODEM frame
xmodem_frame frame;
frame.start = SOH;
frame.block = 0;
frame.block_neg = 0;
memcpy(frame.payload, "test_data", 128);
swap16(crc16(frame.payload, sizeof(frame.payload)));
Send frame and look for ACK:
WriteFile(portHandler, &frame, sizeof(frame), &wasWriten, NULL);
ReadFile(portHandler, &responce, toRead, &wasRead, NULL);
if (responce == 6)
std::cout << "ACK was recieved";
else
std::cout << "ACK wasn't recieved";
I was expecting to get an ACK, however "ACK wasn't recieved" is always printed.

DirectX 11 Map deviceContext failed

I have an application that render a cube and do some other syuff. But unfortunately, when I want to move some vertices with the mouse. I have an error. When I map the vertex buffer, the vertices recorded in another struct array is empty. For testing my application, I just put a map after creating the vertexbuffer in order to see if the debugger show me the real numbers. It fails too. Everything is populated with a 0.00 value (positions, normals, tangents...).What is the problem ?
Here you can find the code.
D3D11_BUFFER_DESC bd;
ZeroMemory( &bd, sizeof(bd) );
bd.Usage = D3D11_USAGE_DYNAMIC; //D3D11_USAGE_DEFAULT; //D3D11_USAGE_DYNAMIC;// D3D11_USAGE_DEFAULT;
bd.ByteWidth = CBSize( sizeof( VERTEX_PNTTB)* NumVertices);
bd.BindFlags = D3D11_BIND_VERTEX_BUFFER;
bd.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE; // 0; // D3D11_CPU_ACCESS_READ; //D3D11_CPU_ACCESS_WRITE; // 0;
bd.StructureByteStride = 0;
bd.MiscFlags = 0;
//bd.StructureByteStride
//bd.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA InitData;
ZeroMemory( &InitData, sizeof(InitData) );
InitData.pSysMem = (void*)vertices; //(void*)(mesh->GetVertexes().data()); //vertices; //(float*)vertices; // (UINT*)vertices;
InitData.SysMemPitch=0;
ID3D11Buffer* vbuff = NULL;
hr = device->CreateBuffer(&bd, &InitData, &vbuff); // &(m->vertexBuffer)); //&m_pVertexBuffer );
//if( FAILED( hr ) )
// return hr;
//}
m->vertexBuffer = vbuff;
D3D11_MAPPED_SUBRESOURCE mappedResource;
ID3D11Buffer* &buff = vbuff;
//g_pImmediateContext->CopyResource(buff, mElements[ 0 ]->getElement().model ->Meshes()[selectedMeshIndex]->VertexBuffer());
hr = g_pImmediateContext->Map( buff, 0, D3D11_MAP_WRITE_DISCARD ,0, &mappedResource); // D3D11_MAP_WRITE_DISCARD, 0, &mappedResource);
if (SUCCEEDED(hr))
{
VERTEX_PNTTB *vertices = (VERTEX_PNTTB *)mappedResource.pData;
// Fill vertex buffer
//vertices[0].position = vertices[0]; // XMFLOAT3(toX0,toY0,0); //-1;//toX0; //-1;//vf1.x; // x0;
/*for(UINT i=0; i<faces_indices.size(); i++)
{
vertices[ faces_indices[i] ].position.x = vertices[faces_indices[i] ].position.x + DirectX::XMFLOAT3(20*dx,20*dy,0).x;
vertices[ faces_indices[i] ].position.y = vertices[faces_indices[i] ].position.y + DirectX::XMFLOAT3(20*dx,20*dy,0).y;
vertices[ faces_indices[i] ].position.z = vertices[faces_indices[i] ].position.z + DirectX::XMFLOAT3(20*dx,20*dy,0).z;
}*/
//g_pImmediateContext->Unmap( mElements[ 0 ]->getElement().model ->Meshes()[selectedMeshIndex]->VertexBuffer(), 0);
g_pImmediateContext->Unmap( buff, 0);
}
Generally you don't want to create Vertex Buffer or Index Buffers in CPU-readable memory as it has a major negative performance impact. You'll find it's much better to have static VB/IB and another copy of the data in standard RAM for the CPU to modify.
That said, you can create the Direct3D 11 buffer in shared memory by using D3D11_USAGE_DYNAMIC and D3D11_CPU_ACCESS_READ | D3D11_CPU_ACCESS_WRITE. You then call Map with D3D11_MAP_READ_WRITE.
You definitely should not use D3D11_MAP_WRITE_DISCARD which gives you a fresh piece of memory that has no data in it which will overwrite the old data when you call Unmap.

SetupDiEnumDeviceInterfaces returns ERROR_INVALID_PARAMETER when querying bluetooth devices

I'm trying to get a HANDLE from a Bluetooth Low Energy device by using CreateFile().
Therefore I need to extract the device path of the device.
I get an ERROR_INVALID_PARAMETER error when calling SetupDiEnumDeviceInterfaces. It seems that the second parameter (DeviceInfoData) has a problem.
Any ideas what the problem could be?
EDITED: Simplified code
HDEVINFO hDevInfo;
DWORD i;
// Create a HDEVINFO with all present devices.
hDevInfo = SetupDiGetClassDevs(&GUID_DEVCLASS_BLUETOOTH, 0, 0, DIGCF_PRESENT);
if (hDevInfo == INVALID_HANDLE_VALUE)
{
// Insert error handling here.
return;//1;
}
PSP_DEVINFO_DATA DeviceInfoData = new SP_DEVINFO_DATA;
DeviceInfoData->cbSize = sizeof(SP_DEVINFO_DATA);
for (i = 0; SetupDiEnumDeviceInfo(hDevInfo, i, DeviceInfoData); i++)
{
DeviceInfoData->cbSize = sizeof(SP_DEVINFO_DATA);
char detailDataBuf[0x100];
PSP_DEVICE_INTERFACE_DETAIL_DATA detailData = (PSP_DEVICE_INTERFACE_DETAIL_DATA)detailDataBuf;
ULONG length;
ULONG requiredLength = 0;
bool bResult = FALSE;
for(DWORD j = 0; j < 10; j++ )
{
SP_DEVICE_INTERFACE_DATA interfaceData;
interfaceData.cbSize = sizeof(SP_DEVICE_INTERFACE_DATA);
bResult = SetupDiEnumDeviceInterfaces(hDevInfo, DeviceInfoData, &GUID_DEVCLASS_BLUETOOTH, j, &interfaceData );
if (!bResult) {
int lastError = GetLastError(); // always returns ERROR 259
continue;
}
// Get the size of the buffer required to receive the device info
SetupDiGetDeviceInterfaceDetail(hDevInfo, &interfaceData, NULL, 0, &requiredLength, NULL );
if( requiredLength >= sizeof( detailDataBuf ) )
break;
// Get the name of the device
detailData->cbSize = sizeof( SP_DEVICE_INTERFACE_DETAIL_DATA );
length = requiredLength;
bResult = SetupDiGetDeviceInterfaceDetail(hDevInfo, &interfaceData, detailData, length, &requiredLength, NULL ) != 0;
if( !bResult )
break;
}
}
EDITED2:
Passing in NULL for DeviceInfoData: This simple case always returns false
HDEVINFO hDevInfo = SetupDiGetClassDevs(&GUID_DEVCLASS_BLUETOOTH, 0, 0, DIGCF_PRESENT | DIGCF_DEVICEINTERFACE);
bool bResult = FALSE;
for(DWORD j = 0; j < 10; j++ )
{
SP_DEVICE_INTERFACE_DATA interfaceData;
interfaceData.cbSize = sizeof(SP_DEVICE_INTERFACE_DATA);
bResult = SetupDiEnumDeviceInterfaces(hDevInfo, NULL, &GUID_DEVCLASS_BLUETOOTH, j, &interfaceData );
if (!bResult) {
int lastError = GetLastError(); // ERROR 259
continue;
}
}
The documentation says:
DeviceInfoData [in, optional]
A pointer to an SP_DEVINFO_DATA structure that specifies a device information element in DeviceInfoSet.
In other words, it must point to an element of deviceInfo, which the pointer you are passing doesn't. If you don't want to filter the results to the interfaces of a specific device in the device information set, pass NULL.
(Note that this is an input parameter, as indicated by the "in" tag. The output is passed via the fifth parameter, DeviceInterfaceData.)

The ".text" section accessed in memory and loaded from disc differs for GCC compiled application

The content of the '.text' section is accessed using code like this:
1) For the application which is loaded into memory (i.e. executing):
//accessing code in memory
PIMAGE_DOS_HEADER pDOSHeader = NULL;
pDOSHeader = static_cast<PIMAGE_DOS_HEADER>( (void*)hModule);
...
PIMAGE_NT_HEADERS pNTHeader = reinterpret_cast<PIMAGE_NT_HEADERS>((byte*)hModule + pDOSHeader->e_lfanew );
...
PIMAGE_FILE_HEADER pFileHeader = reinterpret_cast<PIMAGE_FILE_HEADER>((byte*)&pNTHeader->FileHeader );
...
PIMAGE_OPTIONAL_HEADER pOptionalHeader =
reinterpret_cast<PIMAGE_OPTIONAL_HEADER>((byte*)&pNTHeader->OptionalHeader );
...
PIMAGE_SECTION_HEADER pSectionHeader = reinterpret_cast<PIMAGE_SECTION_HEADER>(
(byte*)&pNTHeader->OptionalHeader +
pNTHeader->FileHeader.SizeOfOptionalHeader );
//so iterate headers and select one with right name
const char TEXT[] = ".text";
const char BSSTEXT[] = ".textbss";
unsigned int nSectionCount = pNTHeader->FileHeader.NumberOfSections;
char szSectionName[ IMAGE_SIZEOF_SHORT_NAME + 1 ];
szSectionName[ IMAGE_SIZEOF_SHORT_NAME ] = '\0';
for( unsigned int i = 0; i < nSectionCount; i++ )
{
memcpy( szSectionName, pSectionHeader->Name,
IMAGE_SIZEOF_SHORT_NAME );
if( 0 == strncmp( TEXT, szSectionName,
IMAGE_SIZEOF_SHORT_NAME ) )
{
break;
}
pSectionHeader++;
}
pVirtualAddress = (void*)(pSectionHeader->VirtualAddress);
dwCodeSize = pSectionHeader->Misc.VirtualSize;
//seems resonable: To calculate the real starting address of a given section in memory,
//add the base address of the image to the section's VirtualAddress stored in this field.
pCodeStart = (void*)(((byte*)hModule) +(size_t)((byte*)pVirtualAddress) );
pCodeEnd = (void*)((byte*)pCodeStart + dwCodeSize);
2) For the application file read from hdd and mapped into memory:
//loading code from file and mapping
hFile = CreateFile( filename, FILE_READ_DATA, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, 0);
...
hFileMapping = CreateFileMapping( hFile, NULL, PAGE_READONLY ),0, 0, NULL );
...
pBaseAddress = MapViewOfFile( hFileMapping, FILE_MAP_READ, 0, 0, 0 );
...
PIMAGE_DOS_HEADER pDOSHeader = static_cast<PIMAGE_DOS_HEADER>( pBaseAddress);
...
PIMAGE_NT_HEADERS pNTHeader = reinterpret_cast<PIMAGE_NT_HEADERS>(
(PBYTE)_pBaseAddress() + pDOSHeader->e_lfanew );
...
PIMAGE_FILE_HEADER pFileHeader = reinterpret_cast<PIMAGE_FILE_HEADER>(
(PBYTE)&pNTHeader->FileHeader );
...
PIMAGE_OPTIONAL_HEADER pOptionalHeader =
reinterpret_cast<PIMAGE_OPTIONAL_HEADER>(
(PBYTE)&pNTHeader->OptionalHeader );
PIMAGE_SECTION_HEADER pSectionHeader =
reinterpret_cast<PIMAGE_SECTION_HEADER>(
(PBYTE)&pNTHeader->OptionalHeader +
pNTHeader->FileHeader.SizeOfOptionalHeader );
DWORD dwEntryPoint = pNTHeader->OptionalHeader.AddressOfEntryPoint;
UINT nSectionCount = pNTHeader->FileHeader.NumberOfSections;
const char TEXT[] = ".text";
const char BSSTEXT[] = ".textbss";
char szSectionName[ IMAGE_SIZEOF_SHORT_NAME + 1 ];
szSectionName[ IMAGE_SIZEOF_SHORT_NAME ] = '\0';
for( unsigned int i = 0; i < nSectionCount; i++ )
{
memcpy( szSectionName, pSectionHeader->Name,
IMAGE_SIZEOF_SHORT_NAME );
if( 0 == strncmp( TEXT, szSectionName,
IMAGE_SIZEOF_SHORT_NAME ) )
{
break;
}
pSectionHeader++;
}
// Use this when probing On Disk. It is where things
// are on disk - not where they will be in memory
dwRawData = pSectionHeader->PointerToRawData;
// Use this when probing On Disk. It is where things
// are on disk - not where they will be in memory
pCodeStart = (void*)((byte*)pBaseAddress +
pSectionHeader->PointerToRawData );
pEntryPoint = (void*)(((byte*)pBaseAddress) + dwEntryPoint);
dwCodeSize = pSectionHeader->Misc.VirtualSize;
pCodeEnd = (void*)((byte*)pCodeStart + pSectionHeader->Misc.VirtualSize );
If the application is built with Visual Studio, all the bytes between pCodeStart and pCodeEnd are matching in both cases.
But if the application is built with GCC (MinGW) some bytes which are following pCodeStart and prior pCodeEnd are the same but somewhere in the middle some different bytes are appearing.
Why does it happen?

Resources