Userland interrupt timer access such as via KeQueryInterruptTime (or similar) - windows

Is there a "Nt" or similar (i.e. non-kernelmode-driver) function equivalent for KeQueryInterruptTime or anything similar? There seems to be no such thing as NtQueryInterruptTime, at least I've not found it.
What I want is some kind of reasonably accurate and reliable, monotonic timer (thus not QPC) which is reasonably efficient and doesn't have surprises as an overflowing 32-bit counter, and no unnecessary "smartness", no time zones, or complicated structures.
So ideally, I want something like timeGetTime with a 64 bit value. It doesn't even have to be the same timer.
There exists GetTickCount64 starting with Vista, which would be acceptable as such, but I'd not like to break XP support only for such a stupid reason.
Reading the quadword at 0x7FFE0008 as indicated here ... well, works ... and it proves that indeed the actual internal counter is 64 bits under XP (it's also as fast as it could possibly get), but meh... let's not talk about what a kind of nasty hack it is to read some unknown, hardcoded memory location.
There must certainly be something in between calling an artificially stupefied (scaling a 64 bit counter down to 32 bits) high-level API function and reading a raw memory address?

Here's an example of a thread-safe wrapper for GetTickCount() extending the tick count value to 64 bits and in that being equivalent to GetTickCount64().
To avoid undesired counter roll overs, make sure to call this function a few times every 49.7 days. You can even have a dedicated thread whose only purpose would be to call this function and then sleep some 20 days in an infinite loop.
ULONGLONG MyGetTickCount64(void)
{
static volatile LONGLONG Count = 0;
LONGLONG curCount1, curCount2;
LONGLONG tmp;
curCount1 = InterlockedCompareExchange64(&Count, 0, 0);
curCount2 = curCount1 & 0xFFFFFFFF00000000;
curCount2 |= GetTickCount();
if ((ULONG)curCount2 < (ULONG)curCount1)
{
curCount2 += 0x100000000;
}
tmp = InterlockedCompareExchange64(&Count, curCount2, curCount1);
if (tmp == curCount1)
{
return curCount2;
}
else
{
return tmp;
}
}
EDIT: And here's a complete application that tests MyGetTickCount64().
// Compiled with Open Watcom C 1.9: wcl386.exe /we /wx /q gettick.c
#include <windows.h>
#include <stdio.h>
#include <stdarg.h>
#include <stdlib.h>
//
// The below code is an ugly implementation of InterlockedCompareExchange64()
// that is apparently missing in Open Watcom C 1.9.
// It must work with MSVC++ too, however.
//
UINT8 Cmpxchg8bData[] =
{
0x55, // push ebp
0x89, 0xE5, // mov ebp, esp
0x57, // push edi
0x51, // push ecx
0x53, // push ebx
0x8B, 0x7D, 0x10, // mov edi, [ebp + 0x10]
0x8B, 0x07, // mov eax, [edi]
0x8B, 0x57, 0x04, // mov edx, [edi + 0x4]
0x8B, 0x7D, 0x0C, // mov edi, [ebp + 0xc]
0x8B, 0x1F, // mov ebx, [edi]
0x8B, 0x4F, 0x04, // mov ecx, [edi + 0x4]
0x8B, 0x7D, 0x08, // mov edi, [ebp + 0x8]
0xF0, // lock:
0x0F, 0xC7, 0x0F, // cmpxchg8b [edi]
0x5B, // pop ebx
0x59, // pop ecx
0x5F, // pop edi
0x5D, // pop ebp
0xC3 // ret
};
LONGLONG (__cdecl *Cmpxchg8b)(LONGLONG volatile* Dest, LONGLONG* Exch, LONGLONG* Comp) =
(LONGLONG (__cdecl *)(LONGLONG volatile*, LONGLONG*, LONGLONG*))Cmpxchg8bData;
LONGLONG MyInterlockedCompareExchange64(LONGLONG volatile* Destination,
LONGLONG Exchange,
LONGLONG Comparand)
{
return Cmpxchg8b(Destination, &Exchange, &Comparand);
}
#ifdef InterlockedCompareExchange64
#undef InterlockedCompareExchange64
#endif
#define InterlockedCompareExchange64(Destination, Exchange, Comparand) \
MyInterlockedCompareExchange64(Destination, Exchange, Comparand)
//
// This stuff makes a thread-safe printf().
// We don't want characters output by one thread to be mixed
// with characters output by another. We want printf() to be
// "atomic".
// We use a critical section around vprintf() to achieve "atomicity".
//
static CRITICAL_SECTION PrintfCriticalSection;
int ts_printf(const char* Format, ...)
{
int count;
va_list ap;
EnterCriticalSection(&PrintfCriticalSection);
va_start(ap, Format);
count = vprintf(Format, ap);
va_end(ap);
LeaveCriticalSection(&PrintfCriticalSection);
return count;
}
#define TICK_COUNT_10MS_INCREMENT 0x800000
//
// This is the simulated tick counter.
// Its low 32 bits are going to be returned by
// our, simulated, GetTickCount().
//
// TICK_COUNT_10MS_INCREMENT is what the counter is
// incremented by every time. The value is so chosen
// that the counter quickly overflows in its
// low 32 bits.
//
static volatile LONGLONG SimulatedTickCount = 0;
//
// This is our simulated 32-bit GetTickCount()
// that returns a count that often overflows.
//
ULONG SimulatedGetTickCount(void)
{
return (ULONG)SimulatedTickCount;
}
//
// This thread function will increment the simulated tick counter
// whose value's low 32 bits we'll be reading in SimulatedGetTickCount().
//
DWORD WINAPI SimulatedTickThread(LPVOID lpParameter)
{
UNREFERENCED_PARAMETER(lpParameter);
for (;;)
{
LONGLONG c;
Sleep(10);
// Get the counter value, add TICK_COUNT_10MS_INCREMENT to it and
// store the result back.
c = InterlockedCompareExchange64(&SimulatedTickCount, 0, 0);
InterlockedCompareExchange64(&SimulatedTickCount, c + TICK_COUNT_10MS_INCREMENT, c) != c);
}
return 0;
}
volatile LONG CountOfObserved32bitOverflows = 0;
volatile LONG CountOfObservedUpdateRaces = 0;
//
// This prints statistics that includes the true 64-bit value of
// SimulatedTickCount that we can't get from SimulatedGetTickCount() as it
// returns only its lower 32 bits.
//
// The stats also include:
// - the number of times that MyGetTickCount64() observes an overflow of
// SimulatedGetTickCount()
// - the number of times MyGetTickCount64() fails to update its internal
// counter because of a concurrent update in another thread.
//
void PrintStats(void)
{
LONGLONG true64bitCounter = InterlockedCompareExchange64(&SimulatedTickCount, 0, 0);
ts_printf(" 0x%08X`%08X <- true 64-bit count; ovfs: ~%d; races: %d\n",
(ULONG)(true64bitCounter >> 32),
(ULONG)true64bitCounter,
CountOfObserved32bitOverflows,
CountOfObservedUpdateRaces);
}
//
// This is our poor man's implementation of GetTickCount64()
// on top of GetTickCount().
//
// It's thread safe.
//
// When used with actual GetTickCount() instead of SimulatedGetTickCount()
// it must be called at least a few times in 49.7 days to ensure that
// it doesn't miss any overflows in GetTickCount()'s return value.
//
ULONGLONG MyGetTickCount64(void)
{
static volatile LONGLONG Count = 0;
LONGLONG curCount1, curCount2;
LONGLONG tmp;
curCount1 = InterlockedCompareExchange64(&Count, 0, 0);
curCount2 = curCount1 & 0xFFFFFFFF00000000;
curCount2 |= SimulatedGetTickCount();
if ((ULONG)curCount2 < (ULONG)curCount1)
{
curCount2 += 0x100000000;
InterlockedIncrement(&CountOfObserved32bitOverflows);
}
tmp = InterlockedCompareExchange64(&Count, curCount2, curCount1);
if (tmp != curCount1)
{
curCount2 = tmp;
InterlockedIncrement(&CountOfObservedUpdateRaces);
}
return curCount2;
}
//
// This is an error counter. If a thread that uses MyGetTickCount64() notices
// any problem with what MyGetTickCount64() returns, it bumps up this error
// counter and stops. If one of threads sees a non-zero value in this
// counter due to an error in another thread, it stops as well.
//
volatile LONG Error = 0;
//
// This is a thread function that will be using MyGetTickCount64(),
// validating its return value and printing some stats once in a while.
//
// This function is meant to execute concurrently in multiple threads
// to create race conditions inside of MyGetTickCount64() and test it.
//
DWORD WINAPI TickUserThread(LPVOID lpParameter)
{
DWORD user = (DWORD)lpParameter; // thread number
ULONGLONG ticks[4];
ticks[3] = ticks[2] = ticks[1] = MyGetTickCount64();
while (!Error)
{
ticks[0] = ticks[1];
ticks[1] = MyGetTickCount64();
// Every ~100 ms sleep a little (slightly lowers CPU load, to about 90%)
if (ticks[1] > ticks[2] + TICK_COUNT_10MS_INCREMENT * 10L)
{
ticks[2] = ticks[1];
Sleep(1 + rand() % 20);
}
// Every ~1000 ms print the last value from MyGetTickCount64().
// Thread 1 also prints stats here.
if (ticks[1] > ticks[3] + TICK_COUNT_10MS_INCREMENT * 100L)
{
ticks[3] = ticks[1];
ts_printf("%u:0x%08X`%08X\n", user, (ULONG)(ticks[1] >> 32), (ULONG)ticks[1]);
if (user == 1)
{
PrintStats();
}
}
if (ticks[0] > ticks[1])
{
ts_printf("%u:Non-monotonic tick counts: 0x%016llX > 0x%016llX!\n",
user,
ticks[0],
ticks[1]);
PrintStats();
InterlockedIncrement(&Error);
return -1;
}
else if (ticks[0] + 0x100000000 <= ticks[1])
{
ts_printf("%u:Too big tick count jump: 0x%016llX -> 0x%016llX!\n",
user,
ticks[0],
ticks[1]);
PrintStats();
InterlockedIncrement(&Error);
return -1;
}
Sleep(0); // be nice, yield to other threads.
}
return 0;
}
//
// This prints stats upon Ctrl+C and terminates the program.
//
BOOL WINAPI ConsoleEventHandler(DWORD Event)
{
if (Event == CTRL_C_EVENT)
{
PrintStats();
}
return FALSE;
}
int main(void)
{
HANDLE simulatedTickThreadHandle;
HANDLE tickUserThreadHandle;
DWORD dummy;
// This is for the missing InterlockedCompareExchange64() workaround.
VirtualProtect(Cmpxchg8bData, sizeof(Cmpxchg8bData), PAGE_EXECUTE_READWRITE, &dummy);
InitializeCriticalSection(&PrintfCriticalSection);
if (!SetConsoleCtrlHandler(&ConsoleEventHandler, TRUE))
{
ts_printf("SetConsoleCtrlHandler(&ConsoleEventHandler) failed with error 0x%X\n", GetLastError());
return -1;
}
// Start the tick simulator thread.
simulatedTickThreadHandle = CreateThread(NULL, 0, &SimulatedTickThread, NULL, 0, NULL);
if (simulatedTickThreadHandle == NULL)
{
ts_printf("CreateThread(&SimulatedTickThread) failed with error 0x%X\n", GetLastError());
return -1;
}
// Start one thread that'll be using MyGetTickCount64().
tickUserThreadHandle = CreateThread(NULL, 0, &TickUserThread, (LPVOID)2, 0, NULL);
if (tickUserThreadHandle == NULL)
{
ts_printf("CreateThread(&TickUserThread) failed with error 0x%X\n", GetLastError());
return -1;
}
// The other thread using MyGetTickCount64() will be the main thread.
TickUserThread((LPVOID)1);
//
// The app terminates upon any error condition detected in TickUserThread()
// in any of the threads or by Ctrl+C.
//
return 0;
}
As a test I've been running this test app under Windows XP for 5+ hours on an otherwise idle machine that has 2 CPUs (idle, to avoid potential long starvation times and therefore avoid missing counter overflows that occur every 5 seconds) and it's still doing well.
Here's the latest output from the console:
2:0x00000E1B`C8800000
1:0x00000E1B`FA800000
0x00000E1B`FA800000 <- true 64-bit count; ovfs: ~3824; races: 110858
As you can see, MyGetTickCount64() has observed 3824 32-bit overflows and failed to update the value of Count with its second InterlockedCompareExchange64() 110858 times. So, overflows indeed occur and the last number means that the variable is, in fact, being concurrently updated by the two threads.
You can also see that the 64-bit tick counts that the two threads receive from MyGetTickCount64() in TickUserThread() don't have anything missing in the top 32 bits and are pretty close to the actual 64-bit tick count in SimulatedTickCount, whose 32 low bits are returned by SimulatedGetTickCount(). 0x00000E1BC8800000 is visually behind 0x00000E1BFA800000 due to thread scheduling and infrequent stat prints, it's behind by exactly 100*TICK_COUNT_10MS_INCREMENT, or 1 second. Internally, of course, the difference is much smaller.
Now, on availability of InterlockedCompareExchange64()... It's a bit odd that it's officially available since Windows Vista and Windows Server 2003. Server 2003 is in fact build from the same code base as Windows XP.
But the most important thing here is that this function is built on top of the Pentium CMPXCHG8B instruction that's been available since 1998 or earlier (1), (2). And I can see this instruction in my Windows XP's (SP3) binaries. It's in ntkrnlpa.exe/ntoskrnl.exe (the kernel) and ntdll.dll (the DLL that exports kernel's NtXxxx() functions that everything's built upon). Look for a byte sequence of 0xF0, 0x0F, 0xC7 and disassemble the code around that place to see that these bytes aren't there coincidentally.
You can check availability of this instruction through the CPUID instruction (EDX bit 8 of CPUID function 0x00000001 and function 0x80000001) and refuse to run instead of crashing if the instruction isn't there, but these days you're unlikely to find a machine that doesn't support this instruction. If you do, it won't be a good machine for Windows XP and probably your application as well anyways.

Thanks to Google Books which kindly offered the relevant literature for free, I came up with an easy and fast implementation of GetTickCount64 which works perfectly well on pre-Vista systems too (and it still is somewhat less nasty than reading a value from a hardcoded memory address).
It is in fact as easy as calling interrupt 0x2A, which maps to KiGetTickCount. In GCC inline assembly, this gives:
static __inline__ __attribute__((always_inline)) unsigned long long get_tick_count64()
{
unsigned long long ret;
__asm__ __volatile__ ("int $0x2a" : "=A"(ret) : : );
return ret;
}
Due to the way KiGetTickCount works, the function should probably better be called GetTickCount46, as it performs a right shift by 18, returning 46 bits, not 64. Though the same is true for the original Vista version, too.
Note that KiGetTickCount clobbers edx, this is relevant if you plan to implement your own faster implementation of the 32-bit version (must add edx to the clobber list in that case!).

Here's another approach, a variant of Alex's wrapper but using only 32-bit interlocks. It only actually returns a 60-bit number, but that's still good for about thirty-six million years. :-)
It does need to be called more often, at least once every three days. That shouldn't normally be a major drawback.
ULONGLONG MyTickCount64(void)
{
static volatile DWORD count = 0xFFFFFFFF;
DWORD previous_count, current_tick32, previous_count_zone, current_tick32_zone;
ULONGLONG current_tick64;
previous_count = InterlockedCompareExchange(&count, 0, 0);
current_tick32 = GetTickCount();
if (previous_count == 0xFFFFFFFF)
{
// count has never been written
DWORD initial_count;
initial_count = current_tick32 >> 28;
previous_count = InterlockedCompareExchange(&count, initial_count, 0xFFFFFFFF);
if (previous_count == 0xFFFFFFFF)
{ // This thread wrote the initial value for count
previous_count = initial_count;
}
else if (previous_count != initial_count)
{ // Another thread wrote the initial value for count,
// and it differs from the one we calculated
current_tick32 = GetTickCount();
}
}
previous_count_zone = previous_count & 15;
current_tick32_zone = current_tick32 >> 28;
if (current_tick32_zone == previous_count_zone)
{
// The top four bits of the 32-bit tick count haven't changed since count was last written.
current_tick64 = previous_count;
current_tick64 <<= 28;
current_tick64 += current_tick32 & 0x0FFFFFFF;
return current_tick64;
}
if (current_tick32_zone == previous_count_zone + 1 || (current_tick32_zone == 0 && previous_count_zone == 15))
{
// The top four bits of the 32-bit tick count have been incremented since count was last written.
InterlockedCompareExchange(&count, previous_count + 1, previous_count);
current_tick64 = previous_count + 1;
current_tick64 <<= 28;
current_tick64 += current_tick32 & 0x0FFFFFFF;
return current_tick64;
}
// Oops, we weren't called often enough, we're stuck
return 0xFFFFFFFF;
}

Related

Is there an efficient way to calculate ceiling of log_b(a)?

I need to accurately calculate where a and b
are both integers. If I simply use typical change of base formula with floating point math functions I wind up with errors due to rounding error.
You can use this identity:
b^logb(a) = a
So binary search x = logb(a) so the result of b^x is biggest integer which is still less than a and afterwards just increment the final result.
Here small C++ example for 32 bits:
//---------------------------------------------------------------------------
DWORD u32_pow(DWORD a,DWORD b) // = a^b
{
int i,bits=32;
DWORD d=1;
for (i=0;i<bits;i++)
{
d*=d;
if (DWORD(b&0x80000000)) d*=a;
b<<=1;
}
return d;
}
//---------------------------------------------------------------------------
DWORD u32_log2(DWORD a) // = ceil(log2(a))
{
DWORD x;
for (x=32;((a&0x80000000)==0)&&(x>1);x--,a<<=1);
return x;
}
//---------------------------------------------------------------------------
DWORD u32_log(DWORD b,DWORD a) // = ceil(logb(a))
{
DWORD x,m,bx;
// edge cases
if (b< 2) return 0;
if (a< 2) return 0;
if (a<=b) return 1;
m=1<<(u32_log2(a)-1); // max limit for b=2, all other bases lead to smaller exponents anyway
for (x=0;m;m>>=1)
{
x|=m;
bx=u32_pow(b,x);
if (bx>=a) x^=m;
}
return x+1;
}
//---------------------------------------------------------------------------
Where DWORD is any unsigned 32bit int type... for more info about pow,log,exp and bin search see:
Power by squaring for negative exponents
Note that u32_log2 is not really needed (unless you want bigints) you can use constant bitwidth instead, also some CPUs like x86 has single asm instruction returning the same much faster than for loop...
Now the next step is exploit the fact that the u32_pow bin search is the same as the u32_log bin search so we can merge the two functions and get rid of one nested for loop completely improving complexity considerably like this:
//---------------------------------------------------------------------------
DWORD u32_pow(DWORD a,DWORD b) // = a^b
{
int i,bits=32;
DWORD d=1;
for (i=0;i<bits;i++)
{
d*=d;
if (DWORD(b&0x80000000)) d*=a;
b<<=1;
}
return d;
}
//---------------------------------------------------------------------------
DWORD u32_log2(DWORD a) // = ceil(log2(a))
{
DWORD x;
for (x=32;((a&0x80000000)==0)&&(x>1);x--,a<<=1);
return x;
}
//---------------------------------------------------------------------------
DWORD u32_log(DWORD b,DWORD a) // = ceil(logb(a))
{
const int _bits=32; // DWORD bitwidth
DWORD bb[_bits]; // squares of b LUT for speed up b^x
DWORD x,m,bx,bx0,bit,bits;
// edge cases
if (b< 2) return 0;
if (a< 2) return 0;
if (a<=b) return 1;
// max limit for x where b=2, all other bases lead to smaller x
bits=u32_log2(a);
// compute bb LUT
bb[0]=b;
for (bit=1;bit< bits;bit++) bb[bit]=bb[bit-1]*bb[bit-1];
for ( ;bit<_bits;bit++) bb[bit]=1;
// bin search x and b^x at the same time
for (bx=1,x=0,bit=bits-1,m=1<<bit;m;m>>=1,bit--)
{
x|=m; bx0=bx; bx*=bb[bit];
if (bx>=a){ x^=m; bx=bx0; }
}
return x+1;
}
//---------------------------------------------------------------------------
The only drawback is that we need LUT for squares of b so: b,b^2,b^4,b^8... up to bits number of squares
Beware squaring will double the number of bits so you should also handle overflow if b or a are too big ...
[Edit2] more optimization
As benchmark on normal ints (on bigints the bin search is much much faster) revealed bin search version is the same speed as naive version (because of many subsequent operations except multiplications):
DWORD u32_log_naive(DWORD b,DWORD a) // = ceil(logb(a))
{
int x,bx;
if (b< 2) return 0;
if (a< 2) return 0;
if (a<=b) return 1;
for (x=2,bx=b;bx*=b;x++)
if (bx>=a) break;
return x;
}
We can optimize more:
we can comment out computation of unused squares:
//for ( ;bit<_bits;bit++) bb[bit]=1;
with this bin search become faster also on ints but not by much
we can use faster log2 instead of naive one
see: Fastest implementation of log2(int) and log2(float)
putting all together (x86 CPUs):
DWORD u32_log(DWORD b,DWORD a) // = ceil(logb(a))
{
const int _bits=32; // DWORD bitwidth
DWORD bb[_bits]; // squares of b LUT for speed up b^x
DWORD x,m,bx,bx0,bit,bits;
// edge cases
if (b< 2) return 0;
if (a< 2) return 0;
if (a<=b) return 1;
// max limit for x where b=2, all other bases lead to smaller x
asm {
bsr eax,a; // bits=u32_log2(a);
mov bits,eax;
}
// compute bb LUT
bb[0]=b;
for (bit=1;bit< bits;bit++) bb[bit]=bb[bit-1]*bb[bit-1];
// for ( ;bit<_bits;bit++) bb[bit]=1;
// bin search x and b^x at the same time
for (bx=1,x=0,bit=bits-1,m=1<<bit;m;m>>=1,bit--)
{
x|=m; bx0=bx; bx*=bb[bit];
if (bx>=a){ x^=m; bx=bx0; }
}
return x+1;
}
however the speed up is just slight for example naive 137 ms bin search 133 ms ... note that faster log2 did almost no change but that is because how my compiler is handling inline asm (not sure why BDS2006 and BCC32 is very slow on switching between asm and C++ but its true that is why in older C++ builders inline asm functions where not a good choice for speed optimizations unless a major speedup was expected) ...

C++11 on modern Intel: am I crazy or are non-atomic aligned 64-bit load/store actually atomic?

Can I base a mission-critical application on the results of this test, that 100 threads reading a pointer set a billion times by a main thread never see a tear?
Any other potential problems doing this besides tearing?
Here's a stand-alone demo that compiles with g++ -g tear.cxx -o tear -pthread.
#include <atomic>
#include <thread>
#include <vector>
using namespace std;
void* pvTearTest;
atomic<int> iTears( 0 );
void TearTest( void ) {
while (1) {
void* pv = (void*) pvTearTest;
intptr_t i = (intptr_t) pv;
if ( ( i >> 32 ) != ( i & 0xFFFFFFFF ) ) {
printf( "tear: pv = %p\n", pv );
iTears++;
}
if ( ( i >> 32 ) == 999999999 )
break;
}
}
int main( int argc, char** argv ) {
printf( "\n\nTEAR TEST: are normal pointer read/writes atomic?\n" );
vector<thread> athr;
// Create lots of threads and have them do the test simultaneously.
for ( int i = 0; i < 100; i++ )
athr.emplace_back( TearTest );
for ( int i = 0; i < 1000000000; i++ )
pvTearTest = (void*) (intptr_t)
( ( i % (1L<<32) ) * 0x100000001 );
for ( auto& thr: athr )
thr.join();
if ( iTears )
printf( "%d tears\n", iTears.load() );
else
printf( "\n\nTEAR TEST: SUCCESS, no tears\n" );
}
The actual application is a malloc()'ed and sometimes realloc()'d array (size is power of two; realloc doubles storage) that many child threads will absolutely be hammering in a mission-critical but also high-performance-critical way.
From time to time a thread will need to add a new entry to the array, and will do so by setting the next array entry to point to something, then increment an atomic<int> iCount. Finally it will add data to some data structures that would cause other threads to attempt to dereference that cell.
It all seems fine (except I'm not positive if the increment of count is assured of happening before following non-atomic updates)... except for one thing: realloc() will typically change the address of the array, and further frees the old one, the pointer to which is still visible to other threads.
OK, so instead of realloc(), I malloc() a new array, manually copy the contents, set the pointer to the array. I would free the old array but I realize other threads may still be accessing it: they read the array base; I free the base; a third thread allocates it writes something else there; the first thread then adds the indexed offset to the base and expects a valid pointer. I'm happy to leak those though. (Given the doubling growth, all old arrays combined are about the same size as the current array so overhead is simply an extra 16 bytes per item, and it's memory that soon is never referenced again.)
So, here's the crux of the question: once I allocate the bigger array, can I write it's base address with a non-atomic write, in utter safety? Or despite my billion-access test, do I actually have to make it atomic<> and thus slow all worker threads to read that atomic?
(As this is surely environment dependent, we're talking 2012-or-later Intel, g++ 4 to 9, and Red Hat of 2012 or later.)
EDIT: here is a modified test program that matches my planned scenario much more closely, with only a small number of writes. I've also added a count of the reads. I see when switching from void* to atomic I go from 2240 reads/sec to 660 reads/sec (with optimization disabled). The machine language for the read is shown after the source.
#include <atomic>
#include <chrono>
#include <thread>
#include <vector>
using namespace std;
chrono::time_point<chrono::high_resolution_clock> tp1, tp2;
// void*: 1169.093u 0.027s 2:26.75 796.6% 0+0k 0+0io 0pf+0w
// atomic<void*>: 6656.864u 0.348s 13:56.18 796.1% 0+0k 0+0io 0pf+0w
// Different definitions of the target variable.
atomic<void*> pvTearTest;
//void* pvTearTest;
// Children sum the tears they find, and at end, total checks performed.
atomic<int> iTears( 0 );
atomic<uint64_t> iReads( 0 );
bool bEnd = false; // main thr sets true; children all finish.
void TearTest( void ) {
uint64_t i;
for ( i = 0; ! bEnd; i++ ) {
intptr_t iTearTest = (intptr_t) (void*) pvTearTest;
// Make sure top 4 and bottom 4 bytes are the same. If not it's a tear.
if ( ( iTearTest >> 32 ) != ( iTearTest & 0xFFFFFFFF ) ) {
printf( "tear: pv = %ux\n", iTearTest );
iTears++;
}
// Output periodically to prove we're seeing changing values.
if ( ( (i+1) % 50000000 ) == 0 )
printf( "got: pv = %lx\n", iTearTest );
}
iReads += i;
}
int main( int argc, char** argv ) {
printf( "\n\nTEAR TEST: are normal pointer read/writes atomic?\n" );
vector<thread> athr;
// Create lots of threads and have them do the test simultaneously.
for ( int i = 0; i < 100; i++ )
athr.emplace_back( TearTest );
tp1 = chrono::high_resolution_clock::now();
#if 0
// Change target as fast as possible for fixed number of updates.
for ( int i = 0; i < 1000000000; i++ )
pvTearTest = (void*) (intptr_t)
( ( i % (1L<<32) ) * 0x100000001 );
#else
// More like our actual app: change target only periodically, for fixed time.
for ( int i = 0; i < 100; i++ ) {
pvTearTest.store( (void*) (intptr_t) ( ( i % (1L<<32) ) * 0x100000001 ),
std::memory_order_release );
this_thread::sleep_for(10ms);
}
#endif
bEnd = true;
for ( auto& thr: athr )
thr.join();
tp2 = chrono::high_resolution_clock::now();
chrono::duration<double> dur = tp2 - tp1;
printf( "%ld reads in %.4f secs: %.2f reads/usec\n",
iReads.load(), dur.count(), iReads.load() / dur.count() / 1000000 );
if ( iTears )
printf( "%d tears\n", iTears.load() );
else
printf( "\n\nTEAR TEST: SUCCESS, no tears\n" );
}
Dump of assembler code for function TearTest():
0x0000000000401256 <+0>: push %rbp
0x0000000000401257 <+1>: mov %rsp,%rbp
0x000000000040125a <+4>: sub $0x10,%rsp
0x000000000040125e <+8>: movq $0x0,-0x8(%rbp)
0x0000000000401266 <+16>: movzbl 0x6e83(%rip),%eax # 0x4080f0 <bEnd>
0x000000000040126d <+23>: test %al,%al
0x000000000040126f <+25>: jne 0x40130c <TearTest()+182>
=> 0x0000000000401275 <+31>: mov $0x4080d8,%edi
0x000000000040127a <+36>: callq 0x40193a <std::atomic<void*>::operator void*() const>
0x000000000040127f <+41>: mov %rax,-0x10(%rbp)
0x0000000000401283 <+45>: mov -0x10(%rbp),%rax
0x0000000000401287 <+49>: sar $0x20,%rax
0x000000000040128b <+53>: mov -0x10(%rbp),%rdx
0x000000000040128f <+57>: mov %edx,%edx
0x0000000000401291 <+59>: cmp %rdx,%rax
0x0000000000401294 <+62>: je 0x4012bb <TearTest()+101>
0x0000000000401296 <+64>: mov -0x10(%rbp),%rax
0x000000000040129a <+68>: mov %rax,%rsi
0x000000000040129d <+71>: mov $0x40401a,%edi
0x00000000004012a2 <+76>: mov $0x0,%eax
0x00000000004012a7 <+81>: callq 0x401040 <printf#plt>
0x00000000004012ac <+86>: mov $0x0,%esi
0x00000000004012b1 <+91>: mov $0x4080e0,%edi
0x00000000004012b6 <+96>: callq 0x401954 <std::__atomic_base<int>::operator++(int)>
0x00000000004012bb <+101>: mov -0x8(%rbp),%rax
0x00000000004012bf <+105>: lea 0x1(%rax),%rcx
0x00000000004012c3 <+109>: movabs $0xabcc77118461cefd,%rdx
0x00000000004012cd <+119>: mov %rcx,%rax
0x00000000004012d0 <+122>: mul %rdx
0x00000000004012d3 <+125>: mov %rdx,%rax
0x00000000004012d6 <+128>: shr $0x19,%rax
0x00000000004012da <+132>: imul $0x2faf080,%rax,%rax
0x00000000004012e1 <+139>: sub %rax,%rcx
0x00000000004012e4 <+142>: mov %rcx,%rax
0x00000000004012e7 <+145>: test %rax,%rax
0x00000000004012ea <+148>: jne 0x401302 <TearTest()+172>
0x00000000004012ec <+150>: mov -0x10(%rbp),%rax
0x00000000004012f0 <+154>: mov %rax,%rsi
0x00000000004012f3 <+157>: mov $0x40402a,%edi
0x00000000004012f8 <+162>: mov $0x0,%eax
0x00000000004012fd <+167>: callq 0x401040 <printf#plt>
0x0000000000401302 <+172>: addq $0x1,-0x8(%rbp)
0x0000000000401307 <+177>: jmpq 0x401266 <TearTest()+16>
0x000000000040130c <+182>: mov -0x8(%rbp),%rax
0x0000000000401310 <+186>: mov %rax,%rsi
0x0000000000401313 <+189>: mov $0x4080e8,%edi
0x0000000000401318 <+194>: callq 0x401984 <std::__atomic_base<unsigned long>::operator+=(unsigned long)>
0x000000000040131d <+199>: nop
0x000000000040131e <+200>: leaveq
0x000000000040131f <+201>: retq
Yes, on x86 aligned loads are atomic, BUT this is an architectural detail that you should NOT rely on!
Since you are writing C++ code, you have to abide by the rules of the C++ standard, i.e., you have to use atomics instead of volatile. The fact
that volatile has been part of that language long before the introduction
of threads in C++11 should be a strong enough indication that volatile was
never designed or intended to be used for multi-threading. It is important to
note that in C++ volatile is something fundamentally different from volatile
in languages like Java or C# (in these languages volatile is in
fact related to the memory model and therefore much more like an atomic in C++).
In C++, volatile is used for what is often referred to as "unusual memory".
This is typically memory that can be read or modified outside the current process,
for example when using memory mapped I/O. volatile forces the compiler to
execute all operations in the exact order as specified. This prevents
some optimizations that would be perfectly legal for atomics, while also allowing
some optimizations that are actually illegal for atomics. For example:
volatile int x;
int y;
volatile int z;
x = 1;
y = 2;
z = 3;
z = 4;
...
int a = x;
int b = x;
int c = y;
int d = z;
In this example, there are two assignments to z, and two read operations on x.
If x and z were atomics instead of volatile, the compiler would be free to treat
the first store as irrelevant and simply remove it. Likewise it could just reuse the
value returned by the first load of x, effectively generating code like int b = a.
But since x and z are volatile, these optimizations are not possible. Instead,
the compiler has to ensure that all volatile operations are executed in the
exact order as specified, i.e., the volatile operations cannot be reordered with
respect to each other. However, this does not prevent the compiler from reordering
non-volatile operations. For example, the operations on y could freely be moved
up or down - something that would not be possible if x and z were atomics. So
if you were to try implementing a lock based on a volatile variable, the compiler
could simply (and legally) move some code outside your critical section.
Last but not least it should be noted that marking a variable as volatile does
not prevent it from participating in a data race. In those rare cases where you
have some "unusual memory" (and therefore really require volatile) that is
also accessed by multiple threads, you have to use volatile atomics.
Since aligned loads are actually atomic on x86, the compiler will translate an atomic.load() call to a simple mov instruction, so an atomic load is not slower than reading a volatile variable. An atomic.store() is actually slower than writing a volatile variable, but for good reasons, since in contrast to the volatile write it is by default sequentially consistent. You can relax the memory orders, but you really have to know what you are doing!!
If you want to learn more about the C++ memory model, I can recommend this paper: Memory Models for C/C++ Programmers

How do x86 jump instructions check their respective flags?

As I understand, conditional jumps check the status of a flag set after the CMP instruction. For example:
CMP AX,DX ; Set compare flags
JGE DONE ; Go to DONE label if AX >= DX
...
DONE:
...
How are the flags actually checked in this situation? If I'm not mistaken, the flags are individual bits in a special register; so are the bits checked one at a time, or all at once? In C pseudocode:
unsigned flags = 0; /* reset all flags */
/* define the flags */
const unsigned GREATER = 1<<1;
const unsigned EQUAL = 1<<2;
const unsigned LESS = 1<<3;
unsigned AX = 4; /* initialize AX */
unsigned DX = 3; /* initialize DX */
/* CMP AX,DX */
int result = AX - DX;
if(result > 0){
flags |= GREATER;
}else if(result == 0){
flags |= EQUAL;
}else if(result < 0){
flags |= LESS;
}
/* -------------------------------- */
/* JGE Method 1 */
if(flags & GREATER){
goto DONE;
}
if(flags & EQUAL){
goto DONE;
}
/* or JGE Method 2 */
if(flags & (GREATER|EQUAL)){
goto DONE;
}
Don't look to deeply into the flag-setting code -- I know actual x86 processors flags are naturally filled rather than explicitly set -- my concern is how those flags are actually checked: bit by bit, or an encompassing bitmask.
Well, it depends, on how the machine is built.
Let me point you to a PDF of Harry Porter's Relay Computer, a particularly simple example of a computer.
Look on slide 115, where he's showing how the instructions are processed.
Basically there's a big blob of combinational logic implementing a Finite State Machine that controls how each instruction is executed.
Most of the stepping in the FSM is concerned with moving data and addresses between registers, using the address and data busses.
If you're wondering what combinational logic is, it is a blob having a bunch of boolean inputs, and a bunch of boolean outputs, and each output is a boolean function of some of the inputs. The blob has no memory. The way it gets memory is by feeding some of the outputs back to the inputs.
So to answer your question, in the context of that computer, it probably tests all the condition bits at the same time, in a boolean expression.

No-overflow cast on x64

I have an existing C codebase that works on x86.
I'm now compiling it for x64.
What I'd like to do is cast a size_t to a DWORD, and throw an exception if there's a loss of data.
Q: Is there an idiom for this?
Here's why I'm doing this:
A bunch of Windows APIs accept DWORDs as arguments, and the code currently assumes sizeof(DWORD)==sizeof(size_t). That assumption holds for x86, but not for x64. So when compiling for x64, passing size_t in place of a DWORD argument, generates a compile-time warning.
In virtually all of these cases the actual size is not going to exceed 2^32. But I want to code it defensively and explicitly.
This is my first x64 project, so... be gentle.
see boost::numeric_cast
http://www.boost.org/doc/libs/1_33_1/libs/numeric/conversion/doc/numeric_cast.html
I just defined a function to perform the cast.
I included an assert-like behavior to insure I'm not silently rubbishing pointers.
DWORD ConvertSizeTo32bits(size_t sz, char *file, int line)
{
if (!(0 <= sz && sz <= INT32_MAX)) {
EmitLogMessage("Invalid Pointer size: %d file(%s) line(%d)",
sz, file, line);
ExitProcess( 0 );
}
return (DWORD) sz;
}
#define size_t_to_DWORD(st,dw) if ((DWORD)(st) != st) RaiseException(exLossOfData, 0, 0, NULL); else dw = (DWORD)(st)
size_t st;
DWORD dw;
st = 0xffffffff;
size_t_to_DWORD(st,dw); // this succeeds
st = 0xffffffff1;
size_t_to_DWORD(st,dw); // this throws
EDIT:
Or better yet, do this so you can use it in an expression:
DWORD MyRaiseException()
{
RaiseException(1, 0, 0, NULL);
return 0;
}
#define size_t_to_DWORD(st) (DWORD)(st) != (st) ? MyRaiseException() : (DWORD)(st)
void main(void)
{
size_t st;
DWORD dw;
st = 0xffffffff1;
dw = size_t_to_DWORD(st);
printf("%u %u\n", st, dw);
}

How do I patch a Windows API at runtime so that it to returns 0 in x64?

In x86, I get the function address using GetProcAddress() and write a simple XOR EAX,EAX; RET 4; in it. Simple and effective. How do I do the same in x64?
bool DisableSetUnhandledExceptionFilter()
{
const BYTE PatchBytes[5] = { 0x33, 0xC0, 0xC2, 0x04, 0x00 }; // XOR EAX,EAX; RET 4;
// Obtain the address of SetUnhandledExceptionFilter
HMODULE hLib = GetModuleHandle( _T("kernel32.dll") );
if( hLib == NULL )
return false;
BYTE* pTarget = (BYTE*)GetProcAddress( hLib, "SetUnhandledExceptionFilter" );
if( pTarget == 0 )
return false;
// Patch SetUnhandledExceptionFilter
if( !WriteMemory( pTarget, PatchBytes, sizeof(PatchBytes) ) )
return false;
// Ensures out of cache
FlushInstructionCache(GetCurrentProcess(), pTarget, sizeof(PatchBytes));
// Success
return true;
}
static bool WriteMemory( BYTE* pTarget, const BYTE* pSource, DWORD Size )
{
// Check parameters
if( pTarget == 0 )
return false;
if( pSource == 0 )
return false;
if( Size == 0 )
return false;
if( IsBadReadPtr( pSource, Size ) )
return false;
// Modify protection attributes of the target memory page
DWORD OldProtect = 0;
if( !VirtualProtect( pTarget, Size, PAGE_EXECUTE_READWRITE, &OldProtect ) )
return false;
// Write memory
memcpy( pTarget, pSource, Size );
// Restore memory protection attributes of the target memory page
DWORD Temp = 0;
if( !VirtualProtect( pTarget, Size, OldProtect, &Temp ) )
return false;
// Success
return true;
}
This example is adapted from code found here: http://www.debuginfo.com/articles/debugfilters.html#overwrite .
In x64 the return value is in RAX, which is the 64bit version of EAX. But because the upper 32 bits are cleared when a 32 bit sub-register is written, "xor eax, eax" is equivalent to "xor rax, rax" and doesn't need to be changed.
However, because the calling convention is different on x64, the same return instruction won't work there:
In x86 winapi functions use the stdcall convention, where the callee pops the arguments from the stack (hence the "retn 4" instruction, which pops that one argument from SetUnhandledExceptionFilter off the stack (you may want to fix that comment in your code)).
In x64 the stack is not cleaned by the callee, so a normal "retn" instruction needs to be used:
const BYTE PatchBytes[3] = { 0x33, 0xC0, 0xC3 }; // XOR EAX,EAX; RET;
Use a library like Microsoft Detours or EasyHook, both of which support exactly this kind of patching, and one of which at least works in x64.

Resources