Why does L2 hardware prefetcher perform worse with only 1 KiB or 2 KiB access size? - performance

I have a simple multi-threaded program where the thread performs random reads on a given file (in memory) divided evenly amongst the threads. The thread reads from the file to buffer and sets a value. This is really a program designed to test memory bandwidth. This is the following program,
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <getopt.h>
#include <errno.h>
#include <stdbool.h>
#include <ctype.h>
#include <inttypes.h>
#include <pthread.h>
#include <assert.h>
#include <time.h>
#define NS_IN_SECOND 1000000000
uint64_t nano_time(void) {
struct timespec ts;
if( clock_gettime(CLOCK_REALTIME, &ts) == 0)
return ts.tv_sec * NS_IN_SECOND + ts.tv_nsec;
}
// avx512 test
#include <stdint.h>
void *__memmove_chk_avx512_no_vzeroupper(void *dest, void *src, size_t s);
/**
* To create 4 GB file: This will allocate space on disk
* $ dd < /dev/zero bs=1048576 count=4096 > testfile
*
* 100 GiB
* dd if=/dev/zero of=bigmmaptest bs=1M count=102400
* To clear cache:
* $ sync; echo 1 > /proc/sys/vm/drop_caches
*/
//#define SAMPLE_LATENCY 1
#define BYTES_IN_GB (1024*1024*1024)
// Block sized will be used for read and the same will be used for striding
// when iterating over a file in mmap.
#define DEFAULT_BLOCK_SIZE 4096 //8192
#define NANOSECONDS_IN_SECOND 1000000000
const char DEFAULT_NAME[] = "/mnt/tmp/mmaptest";
#define EXIT_MSG(...) \
do { \
printf(__VA_ARGS__); \
_exit(-1); \
} while (0)
uint64_t read_mmap_test(int fd, int tid, size_t block_size, size_t filesize, char* buf,
off_t *offsets, uint64_t *begin, uint64_t *end);
uint64_t write_mmap_test(int fd, int tid, size_t block_size, size_t filesize, char* buf,
off_t *offsets, uint64_t *begin, uint64_t *end);
uint64_t mmap_test(int fd, int tid, size_t block_size, size_t filesize, char *buf,
char optype, off_t *offsets, uint64_t *begin, uint64_t *end);
uint64_t read_syscall_test(int fd, int tid, size_t block_size, size_t filesize,
off_t *offsets, uint64_t *begin, uint64_t *end);
uint64_t write_syscall_test(int fd, int tid, size_t block_size, size_t filesize,
off_t *offsets, uint64_t *begin, uint64_t *end);
uint64_t syscall_test(int fd, int tid, size_t block_size, size_t filesize,
char optype, off_t *offsets, uint64_t *begin, uint64_t *end);
size_t get_filesize(const char* filename);
void print_help_message(const char *progname);
char* map_buffer(int fd, size_t size);
void *run_tests(void *);
static int silent = 0;
typedef struct {
int tid;
int fd;
char *mapped_buffer;
int read_mmap;
int read_syscall;
int write_mmap;
int write_syscall;
off_t *offsets;
size_t block_size;
size_t chunk_size;
int retval;
uint64_t start_time;
uint64_t end_time;
} threadargs_t;
size_t filesize;
int main(int argc, char **argv) {
char *fname = (char*) DEFAULT_NAME;
char *mapped_buffer = NULL;
int c, fd, i, flags = O_RDWR, numthreads = 1, ret, option_index;
static int randomaccess = 0,
read_mmap = 0, read_syscall = 0,
write_mmap = 0, write_syscall = 0,
mixed_mmap = 0, write_tr = 0;
off_t *offsets = 0;
size_t block_size = DEFAULT_BLOCK_SIZE, numblocks,
new_file_size = 0;
uint64_t min_start_time, max_end_time = 0, retval;
// permissions
uint64_t mode = S_IRWXU | S_IRWXG;
pthread_t *threads;
threadargs_t *threadargs;
static struct option long_options[] =
{
// Options set a flag
{"randomaccess", no_argument, &randomaccess, 1},
{"readmmap", no_argument, &read_mmap, 1},
{"readsyscall", no_argument, &read_syscall, 1},
{"silent", no_argument, &silent, 1},
{"writemmap", no_argument, &write_mmap, 1},
{"writesyscall", no_argument, &write_syscall, 1},
{"mixedmmap", no_argument, &mixed_mmap, 1},
// Options take an argument
{"block", required_argument, 0, 'b'},
{"file", required_argument, 0, 'f'},
{"help", no_argument, 0, 'h'},
{"size", no_argument, 0, 's'},
{"threads", required_argument, 0, 't'},
{"writethreads", no_argument, 0, 'w'},
{0, 0, 0, 0}
};
//read operations
while(1) {
c = getopt_long(argc, argv, "b:f:h:s:t:w:",
long_options, &option_index);
// is end of the option
if (c == -1)
break;
switch(c)
{
case 0:
break;
case 'b':
block_size = atoi(optarg);
break;
case 'f':
fname = optarg;
break;
case 'h':
print_help_message(argv[0]);
_exit(0);
case 's':
new_file_size = (size_t)(atoi(optarg)) * BYTES_IN_GB;
break;
case 't':
numthreads = (int) (atoi(optarg));
break;
case 'w':
write_tr = atoi(optarg);
break;
default:
break;
}
}
if(!silent){
printf("PID: %d\n", getpid());
printf("Using file %s \n", fname);
}
if ((filesize = get_filesize(fname)) == -1) {
if (read_mmap || read_syscall) {
printf("Cannot obtain file size for %s: %s"
"File must exist prior to running read tests.\n",
fname, strerror(errno));
_exit(-1);
}
else
filesize = new_file_size;
}
fd = open((const char*)fname, flags, mode);
if(fd <0) {
printf("Clould not open/create file %s: %s\n",
fname, strerror(errno));
_exit(-1);
}
if(block_size < 0 || block_size > filesize){
printf("Invalid block size: %zu for file of size "
"%zu. Block size must be greater than 0 and no"
"greater than the file size.\n",
block_size, filesize);
_exit(-1);
}
/*
* Generate random block number for random file access.
* Sequential for sequential access
*/
numblocks = filesize/block_size;
if(filesize % block_size > 0)
numblocks++;
offsets = (off_t *) malloc(numblocks * sizeof(off_t));
if(offsets == 0){
printf("Failed to allocate memory: %s\n", strerror(errno));
_exit(-1);
}
for (uint64_t i = 0; i < numblocks; i++)
if(randomaccess)
offsets[i] = ((int)random() % numblocks) * block_size;
else
offsets[i] = i*block_size;
if (numblocks % numthreads != 0)
EXIT_MSG("We have %" PRIu64 " blocks and %d threads. "
"Threads must evenly divide blocks. "
"Please fix the args.\n",
(uint_least64_t)numblocks, numthreads);
if( read_mmap || write_mmap || mixed_mmap)
assert((mapped_buffer = map_buffer(fd, filesize)) != NULL);
threads = (pthread_t*)malloc(numthreads * sizeof(pthread_t));
threadargs =
(threadargs_t*)malloc(numthreads * sizeof(threadargs_t));
if (threads == NULL || threadargs == NULL)
EXIT_MSG("Could not allocate thread array for %d threads.\n", numthreads);
for (i = 0; i < numthreads; i++) {
if(mixed_mmap){
if (i < write_tr) {
write_mmap = 1;
} else {
read_mmap = 1;
}
}
threadargs[i].fd = fd;
threadargs[i].tid = i;
threadargs[i].block_size = block_size;
threadargs[i].chunk_size = filesize/numthreads;
threadargs[i].mapped_buffer = mapped_buffer;
threadargs[i].offsets = &offsets[numblocks/numthreads * i];
threadargs[i].read_mmap = read_mmap;
threadargs[i].read_syscall = read_syscall;
threadargs[i].write_mmap = write_mmap;
threadargs[i].write_syscall = write_syscall;
int ret = pthread_create(&threads[i], NULL, run_tests, &threadargs[i]);
if (ret!=0)
EXIT_MSG("pthread_create for %dth thread failed: %s\n",
i, strerror(errno));
}
for (i = 0; i< numthreads; i++){
ret = pthread_join(threads[i], NULL);
if (ret !=0)
EXIT_MSG("Thread %d failed in join: %s\n",
i, strerror(errno));
}
// for mixed mode determine read and write aggregate b/w.
if(mixed_mmap) {
// Write b/w
min_start_time = threadargs[0].start_time;
max_end_time = 0;
// Since tid 0 to write_tr-1 did writes, find it's min and max.
for(i=0; i < write_tr; i++){
min_start_time = (threadargs[i].start_time < min_start_time)?
threadargs[i].start_time:min_start_time;
max_end_time = (threadargs[i].end_time > max_end_time)?
threadargs[i].end_time:max_end_time;
}
printf("Write: %.2f\n",
(double)write_tr*(filesize/numthreads)/(double)(max_end_time-min_start_time)
* NANOSECONDS_IN_SECOND / BYTES_IN_GB);
// Read b/w
min_start_time = threadargs[write_tr].start_time;
max_end_time = 0;
for(i=write_tr; i < numthreads; i++){
min_start_time = (threadargs[i].start_time < min_start_time)?
threadargs[i].start_time:min_start_time;
max_end_time = (threadargs[i].end_time > max_end_time)?
threadargs[i].end_time:max_end_time;
}
printf("Read: %.2f\n",
(double)(numthreads-write_tr)*(filesize/numthreads)/(double)(max_end_time-min_start_time)
* NANOSECONDS_IN_SECOND / BYTES_IN_GB);
}
/**
* For total run time. Find the smallest start time
* and largest end time across all threads.
*/
min_start_time = threadargs[0].start_time;
max_end_time = 0;
for (i=0; i< numthreads; i++){
min_start_time = (threadargs[i].start_time < min_start_time)?
threadargs[i].start_time:min_start_time;
max_end_time = (threadargs[i].end_time > max_end_time)?
threadargs[i].end_time:max_end_time;
}
printf("%.2f\n",
(double)filesize/(double)(max_end_time-min_start_time)
* NANOSECONDS_IN_SECOND / BYTES_IN_GB);
munmap(mapped_buffer, filesize);
close(fd);
}
void * run_tests(void *args) {
uint64_t retval;
threadargs_t t = *(threadargs_t*)args;
if(t.read_mmap) {
if(!silent)
printf("Running read mmap test:\n");
retval = read_mmap_test(t.fd, t.tid, t.block_size, t.chunk_size,
t.mapped_buffer, t.offsets,
&((threadargs_t*)args)->start_time,
&((threadargs_t*)args)->end_time);
}
else if(t.read_syscall) {
if(!silent)
printf("Running read syscall test:\n");
retval = read_syscall_test(t.fd, t.tid, t.block_size, t.chunk_size,
t.offsets,
&((threadargs_t*)args)->start_time,
&((threadargs_t*)args)->end_time);
}
else if(t.write_mmap) {
if(!silent)
printf("Running write mmap test:\n");
retval = write_mmap_test(t.fd, t.tid, t.block_size, t.chunk_size,
t.mapped_buffer, t.offsets,
&((threadargs_t*)args)->start_time,
&((threadargs_t*)args)->end_time);
}
else if(t.write_syscall) {
if(!silent)
printf("Running write syscall test:\n");
retval = write_syscall_test(t.fd, t.tid, t.block_size, t.chunk_size,
t.offsets,
&((threadargs_t*)args)->start_time,
&((threadargs_t*)args)->end_time);
}
return (void*) 0;
}
#define READ 1
#define WRITE 2
/**
********* SYSCALL section
*/
uint64_t read_syscall_test(int fd, int tid, size_t block_size, size_t filesize,
off_t *offsets, uint64_t *begin, uint64_t *end) {
return syscall_test(fd, tid, block_size, filesize, READ, offsets,
begin, end);
}
uint64_t write_syscall_test(int fd, int tid, size_t block_size, size_t filesize,
off_t *offsets, uint64_t *begin, uint64_t *end) {
return syscall_test(fd, tid, block_size, filesize, WRITE, offsets,
begin, end);
}
uint64_t syscall_test(int fd, int tid, size_t block_size, size_t filesize,
char optype, off_t *offsets, uint64_t *begin, uint64_t *end) {
bool done = false;
char * buffer = NULL;
int i = 0;
size_t total_bytes_transferred = 0;
uint64_t begin_time, end_time, ret_token = 0;
buffer = (char*)malloc(block_size);
if(buffer == NULL) {
printf("Failed to allocate memory: %s\n", strerror(errno));
return -1;
}
memset((void*)buffer, 0, block_size);
begin_time= nano_time();
while(!done) {
size_t bytes_transferred = 0;
if(optype == READ)
bytes_transferred = pread(fd, buffer, block_size, offsets[i++]);
else if (optype == WRITE)
bytes_transferred = pwrite(fd, buffer, block_size, offsets[i++]);
if (bytes_transferred == 0)
done = true;
else if(bytes_transferred == -1){
printf("Failed to IO: %s\n", strerror(errno));
return -1;
}
else {
total_bytes_transferred += bytes_transferred;
if (optype == WRITE && total_bytes_transferred == filesize)
done = true;
// Do random operation
ret_token += buffer[0];
}
if (i*block_size >= filesize)
done = true;
}
end_time = nano_time();
if(!silent){
printf("%s: %" PRIu64 " bytes transferred in %" PRIu64 ""
" ns.\n", (optype == READ)?"read-syscall":"write-syscall",
(uint_least64_t)total_bytes_transferred, (end_time-begin_time));
// Throughput in GB/s
printf("(tid %d) %.2f\n", tid,
(double)filesize/(double)(end_time-begin_time)
* NANOSECONDS_IN_SECOND / BYTES_IN_GB);
}
*begin = begin_time;
*end = end_time;
return ret_token;
}
/**
* MMAP tests
*/
uint64_t read_mmap_test(int fd, int tid, size_t block_size, size_t filesize,
char *buf, off_t *offsets, uint64_t *begin, uint64_t *end) {
return mmap_test(fd, tid, block_size, filesize, buf, READ, offsets, begin, end);
}
uint64_t write_mmap_test(int fd, int tid, size_t block_size, size_t filesize,
char *buf, off_t *offsets, uint64_t *begin, uint64_t *end){
return mmap_test(fd, tid, block_size, filesize, buf, WRITE, offsets, begin, end);
}
// Add memory addr
#if SAMPLE_LATENCY
#define BEGIN_LAT_SAMPLE \
if (num_samples < MAX_LAT_SAMPLES && i%LAT_SAMPL_INTERVAL == 0) \
lat_begin_time = nano_time();
#define END_LAT_SAMPLE \
if (num_samples < MAX_LAT_SAMPLES && i%LAT_SAMPL_INTERVAL == 0) { \
lat_end_time = nano_time(); \
latency_samples[i/LAT_SAMPL_INTERVAL % MAX_LAT_SAMPLES] = \
lat_end_time - lat_begin_time; \
num_samples++; \
}
#define MAX_LAT_SAMPLES 50
//#define LAT_SAMPL_INTERVAL (1000*1048576)
#define LAT_SAMPL_INTERVAL block_size
#else
#define BEGIN_LAT_SAMPLE ;
#define END_LAT_SAMPLE
#endif
uint64_t mmap_test(int fd, int tid, size_t block_size, size_t filesize, char *mapped_buffer,
char optype, off_t *offsets, uint64_t *begin, uint64_t *end) {
bool done = false;
char *buffer = NULL;
uint64_t i, j, numblocks, ret;
uint64_t begin_time, end_time, ret_token = 0;
#if SAMPLE_LATENCY
uint64_t lat_begin_time, lat_end_time;
size_t latency_samples[MAX_LAT_SAMPLES];
int num_samples = 0;
memset((void*)latency_samples, 0, sizeof(latency_samples));
#endif
buffer = (char*)malloc(block_size);
if(buffer == NULL) {
printf("Failed to allocate memory: %s\n", strerror(errno));
return -1;
}
memset((void*)buffer, 1, block_size);
begin_time = nano_time();
for(i=0; i<filesize; i+=block_size){
off_t offset = offsets[i/block_size];
BEGIN_LAT_SAMPLE;
if(optype == READ) {
//__memmove_chk_avx512_no_vzeroupper(buffer, &mapped_buffer[offset], block_size);
memcpy(buffer, &mapped_buffer[offset], block_size);
ret_token += buffer[0];
}
else if (optype == WRITE) {
//__memmove_chk_avx512_no_vzeroupper(&mapped_buffer[offset], buffer, block_size);
memcpy(&mapped_buffer[offset], buffer, block_size);
ret_token += mapped_buffer[i];
}
END_LAT_SAMPLE;
}
end_time = nano_time();
if(!silent) {
printf("%s: %" PRIu64 " bytes read in %" PRIu64 " ns.\n",
(optype==READ)?"readmap":"writemap",
(uint_least64_t)filesize, (end_time-begin_time));
// print GB/s
printf("(tid %d) %.2f\n", tid,
(double)filesize/(double)(end_time-begin_time)
* NANOSECONDS_IN_SECOND / BYTES_IN_GB);
}
*begin = begin_time;
*end = end_time;
#if SAMPLE_LATENCY
printf("\nSample latency for %ld byte block:\n", block_size);
for (i = 0; i < MAX_LAT_SAMPLES; i++)
printf("\t%ld: %ld\n", i, latency_samples[i]);
#endif
return ret_token;
}
char* map_buffer(int fd, size_t size) {
char *mapped_buffer = NULL;
// Populate
mapped_buffer = (char*)mmap(NULL, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_POPULATE, fd, 0);
// Shared
// mapped_buffer = (char*)mmap(NULL, size, PROT_READ | PROT_WRITE,
// MAP_SHARED, fd, 0);
// Anon test
// mapped_buffer = (char*)mmap(NULL, size, PROT_READ | PROT_WRITE,
// MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if(mapped_buffer == MAP_FAILED)
EXIT_MSG("Failed to mmap file of size %zu: %s\n",
size, strerror(errno));
// Might also need to gurantee page aligned - posix_memalign()
// int mret = madvise(mapped_buffer, filesize, MADV_HUGEPAGE);
// if(mret!=0) {
// fprintf(stderr, "failed madvise: %s\n", strerror(errno));
// }
return mapped_buffer;
}
size_t get_filesize(const char* filename){
int retval;
struct stat st;
retval = stat(filename, &st);
if(retval)
return -1;
else
return st.st_size;
}
void print_help_message(const char *progname) {
/* take only the last portion of the path */
const char *basename = strrchr(progname, '/');
basename = basename ? basename + 1 : progname;
printf("usage: %s [OPTION]\n", basename);
printf(" -h, --help\n"
" Print this help and exit.\n");
printf(" -b, --block[=BLOCKSIZE]\n"
" Block size used for read system calls.\n"
" For mmap tests, the size of the stride when iterating\n"
" over the file.\n"
" Defaults to %d.\n", DEFAULT_BLOCK_SIZE);
printf(" -f, --file[=FILENAME]\n"
" Perform all tests on this file (defaults to %s).\n",
DEFAULT_NAME);
printf(" --readsyscall\n"
" Perform a read test using system calls.\n");
printf(" --readmmap\n"
" Perform a read test using mmap.\n");
printf(" --writesyscall\n"
" Perform a write test using system calls.\n");
printf(" --writemmap\n"
" Perform a write test using mmap.\n");
printf(" --randomaccess\n"
" Perform random access.\n");
printf(" --threads\n"
" Number of threads to use. Defaults to one.\n");
printf(" --mixedmmap\n"
" Perfom read and write concurrently at different offsets\n");
printf(" -w, -writethreads[=0]\n"
" Number of threads that should perform write\n");
}
To compile:
$ gcc testm.c -o testm -lpthread -static -O2 -fno-builtin-memcpy
Commands to run the program:
$ dd if=/dev/zero of=bigmmaptest bs=1M count=25600 # 25 GiB file
$ ./testm -b 1024 -f bigmmaptest --threads 16 --randomaccess --readmmap
I am on a 32 core Xeon 5218 2nd Gen. L1d KiB /L2 MiB /L3 MiB -- 512 / 16 / 22
When the memcpy size is 1 KiB I get 21.7 GB/s but when the size is 256B I get 26.68 GB/s and 34.8 GB/s when the size is 4 KiB. Why is there a drop in the middle?
I observe that 2 KiB also performs poorly when compared to 256B and 4 KiB.
What's more interesting is, when I disable the L2 hardware prefetcher and without any other changes my bandwidth automatically increases for 1 KiB and 2 KiB. Without prefetch 2 KiB memcpy gives 34.8 GB/s. All of these are aggregate bandwidth.
With perf, I did measure L2 load-store misses but they turned out to not change drastically. This effect is also not seen for 8 threads and below.
I am on linux 5.0.4. I am using the glibC memcpy (gcc 7.5.0) and even with -O2 I observe the above quirk. Where 1 KiB access size gives 18.76 GiB/s with L2 prefetch and without I get 30.32 GiB/s. For comparison, 256 B access size provides 24.7 GiB/s with prefetch and 24.8 GiB/s without. Clearly, the drop in performance is because of the L2 cache pollution caused by the prefetcher, as this is not observed with smaller thread counts. I was considering if SMT could be the reason for increased pollution but I observe the effect distinctly at 16 threads on 16 physical cores.
Skimming through glibc memcpy code, I can see that any access below the size of 4 KiB uses AVX 256 instructions, so there is nothing changing there.

The smaller 256B size not seeing a drop from the L2 streamer might be due to the sequence of cache misses being too short to activate the streamer and waste bandwidth (and slots in the LFBs and L2 <-> L3 superqueue) on requests that won't be useful.
For aligned 4k, there are no bytes within the same page that you're not fetching, so the L2 prefetcher is positively useful, or at least not harmful. (Demand loads come in pretty quickly for later lines when running memcpy so I'm guessing speeds were about the same with/without HW prefetch enabled, unless HW prefetch helps getting started on a new 4k chunk while still waiting for the end of the previous.)
The L2 only sees physical addresses, and AFAIK it doesn't try to prefetch across a 4k boundary. (Even if its within the same 2M hugepage, because it doesn't know that either.) The "next-page prefetcher" Intel mentions being new in Ivy Bridge is AFAIK just a TLB prefetch, not data.
So with aligned 4k memcpy, HW prefetch stops automatically at the end of the data you're actually going to read, not wasting any bandwidth. Since mmap gives you page-aligned memory, these 4k memcopies are from a single source page.
(The destination is irrelevant as it probably stays hot in L1d cache, with maybe an occasional eviction to L2, and the reload from it after memcpy can come from store-forwarding, not even having to wait for memcpy's store to commit to L1d.)
Prediction: If your smaller memcpy source starts part way into a 4k page, but still end at the end of a 4k page, you'd probably see similar behaviour to prefetch disabled. e.g. generate a random page number, and start at 3072 bytes into it, doing a 1 KiB copy. So all your 1 KiB copies come from the ends of pages, never middles.
(You'd still have more dTLB misses per byte memcpyed, because each TLB entry is only covering 1 K of the data you ever actually read. You did you use MAP_POPULATE so you shouldn't be seeing page faults in the timed region, assuming you have enough RAM.)
L1d KiB /L2 MiB /L3 MiB -- 512 / 16 / 22
Those are aggregate totals, but L1d and L2 are private per-core! You have 32kiB L1d and 1MiB L2 per core, because this is Cascade Lake, same layout as Skylake-X.
And BTW, I'd consider using a fast PRNG like xorshift+ or xorshift* inside the timing loop; that's easily random enough to defeat prefetching; even a simple LFSR or even LCG with a power-of-2 modulo would do that (and be very cheap, just an imul and add). It avoids having to read offsets from another array, if you really want to isolate just the memcpy memory accesses. Probably doesn't make a difference though. One advantage of a very simple PRNG with a period equal to the space you're trying to cover (like an LCG) is that you won't generate the same address twice, giving you a random permutation of the blocks. But with a big enough block of memory, random cache hits even from L3 are unlikely even without that hard-to-achieve property.
Your current array of offsets is fine. (I didn't look at the code super closely, so I'm just assuming there aren't bugs.)

Related

Does clflush instruction flush block only from Level 1 Cache?

I have a multi-core system with 4 cores each of them having private L1 and L2 caches and shared LLC. Caches have inclusive property meaning that Higher level Caches are super-set of lower level Caches. Can I directly flush a block on the LLC or does it have to go through the lower level first?
I am trying to understand flush+ reload and flush+flush Cache side Channel attacks.
clflush is architecturally required / guaranteed to evict the line from all levels of cache, making it useful for committing data to non-volatile DIMMs. (e.g. Battery-backed DRAM or 3D XPoint).
The wording in the manual seems pretty clear:
Invalidates from every level of the cache hierarchy in the cache coherence domain ... If that cache line contains modified data at any level of the cache hierarchy, that data is written back to memory
I think if multiple cores have a line in Shared state, clflush / clflushopt on one core has to evict it from the private caches of all cores. (This would happen anyway as part of evicting from inclusive L3 cache, but Skylake-X changed to a NINE (not-inclusive not-exclusive) L3 cache.)
Can I directly flush a block on the LLC or does it have to go through the lower level first?
Not clear what you're asking. Are you asking if you can ask the CPU to flush a block from L3 only, without disturbing L1/L2? You already know L3 is inclusive on most Intel CPUs, so the net effect would be the same as clflush. For cores to talk to L3, they have to go through their own L1d and L2.
clflush still works if the data is only present in L3 but not the private L1d or L2 of the core executing it. It's not a "hint" like a prefetch, or a local-only thing.
In future Silvermont-family CPUs, there will be a cldemote instruction that lets you flush a block to the LLC, but not all the way to DRAM. (And it's only a hint, so it doesn't force the CPU to obey it if the write-back path is busy with evictions to make room for demand-loads.)
That couldn't be true that CLFLUSH always evicts from every cache-level. I just wrote a little program (C++17) where flushing cachlines is always below 5ns on my machine (3990X):
#include <iostream>
#include <chrono>
#include <cstring>
#include <vector>
#include <charconv>
#include <sstream>
#include <cmath>
#if defined(_MSC_VER)
#include <intrin.h>
#elif defined(__GNUC__)
#include <x86intrin.h>
#endif
using namespace std;
using namespace chrono;
size_t parseSize( char const *str );
string blockSizeStr( size_t blockSize );
int main( int argc, char **argv )
{
static size_t const DEFAULT_MAX_BLOCK_SIZE = (size_t)512 * 1024;
size_t blockSize = argc < 2 ? DEFAULT_MAX_BLOCK_SIZE : parseSize( argv[1] );
if( blockSize == -1 )
return EXIT_FAILURE;
blockSize = blockSize >= 4096 ? blockSize : 4096;
vector<char> block( blockSize );
size_t size = 4096;
static size_t const ITERATIONS_64K = 100;
do
{
uint64_t avg = 0;
size = size <= blockSize ? size : blockSize;
size_t iterations = (size_t)((double)0x10000 / size * ITERATIONS_64K + 0.5);
iterations += (size_t)!iterations;
for( size_t it = 0; it != iterations; ++it )
{
// make cachlines to get modified for sure by
// modifying to a differnt value each iteration
for( size_t i = 0; i != size; ++i )
block[i] = (i + it) % 0x100;
auto start = high_resolution_clock::now();
for( char *p = &*block.begin(), *end = p + size; p < end; p += 64 )
_mm_clflush( p );
avg += duration_cast<nanoseconds>( high_resolution_clock::now() - start ).count();
}
double nsPerCl = ((double)(int64_t)avg / iterations) / (double)(ptrdiff_t)(size / 64);
cout << blockSizeStr( size ) << " " << nsPerCl << "ns" << endl;
} while( (size *= 2) <= blockSize );
}
size_t parseSize( char const *str )
{
double dSize;
from_chars_result fcr = from_chars( str, str + strlen( str ), dSize, chars_format::general );
if( fcr.ec != errc() )
return -1;
if( !*(str = fcr.ptr) || str[1] )
return -1;
static const
struct suffix_t
{
char suffix;
size_t mult;
} suffixes[]
{
{ 'k', 1024 },
{ 'm', (size_t)1024 * 1024 },
{ 'g', (size_t)1024 * 1024 * 1024 }
};
char cSuf = tolower( *str );
for( suffix_t const &suf : suffixes )
if( suf.suffix == cSuf )
{
dSize = trunc( dSize * (ptrdiff_t)suf.mult );
if( dSize < 1.0 || dSize >= (double)numeric_limits<ptrdiff_t>::max() )
return -1;
return (ptrdiff_t)dSize;
}
return -1;
}
string blockSizeStr( size_t blockSize )
{
ostringstream oss;
double dSize = (double)(ptrdiff_t)blockSize;
if( dSize < 1024.0 )
oss << blockSize;
else if( dSize < 1024.0 * 1024.0 )
oss << dSize / 1024.0 << "kB";
else if( blockSize < (size_t)1024 * 1024 * 1024 )
oss << dSize / (1024.0 * 1024.0) << "MB";
else
oss << (double)blockSize / (1024.0 * 1024.0 * 1024.0) << "GB";
return oss.str();
}
There's no DDR-whatever memory that can handle flushing a single cacheline below 5ns.

get /dev/random in kernel module

I need to get both /dev/random and /dev/urandom within kernel module.
get_random_bytes API provided to get /dev/urandom.
But there is no API for /dev/random so I tried to ioctl and read file in kernel space.
Here is what I have done.
using RNDGETPOOL ioctl
in include/linux/random.h
RNDGETPOOL is declared
/* Get the contents of the entropy pool. (Superuser only.) */
#define RNDGETPOOL _IOR( 'R', 0x02, int [2] )
but, It won't work so I checked driver/char/random.h noticed RNDGETPOOL is gone!!
static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
int size, ent_count;
int __user *p = (int __user *)arg;
int retval;
switch (cmd) {
case RNDGETENTCNT:
/* inherently racy, no point locking */
if (put_user(input_pool.entropy_count, p))
return -EFAULT;
return 0;
case RNDADDTOENTCNT:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (get_user(ent_count, p))
return -EFAULT;
credit_entropy_bits(&input_pool, ent_count);
return 0;
case RNDADDENTROPY:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (get_user(ent_count, p++))
return -EFAULT;
if (ent_count < 0)
return -EINVAL;
if (get_user(size, p++))
return -EFAULT;
retval = write_pool(&input_pool, (const char __user *)p,
size);
if (retval < 0)
return retval;
credit_entropy_bits(&input_pool, ent_count);
return 0;
case RNDZAPENTCNT:
case RNDCLEARPOOL:
/* Clear the entropy pool counters. */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
rand_initialize();
return 0;
default:
return -EINVAL;
}
}
I searched google and find out ioctl RNDGETPOOL is removed. done!
using random_read function from driver/char/random.c:997
static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
here is my kernel module's function accesses to /dev/random.
static void read_file()
{
struct file *file;
loff_t pos = 0;
//ssize_t wc;
unsigned char buf_ent[21]={0,};
int ent_c;
int i;
ssize_t length = 0;
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
file = filp_open("/dev/random", O_WRONLY, 0);
file->f_op->unlocked_ioctl(file, RNDGETENTCNT, &ent_c);
if(ent_c < sizeof(char))
{
printk("not enough entropy\n");
}
printk("ent counter : %d\n", ent_c);
//file->f_op->unlocked_ioctl(file, RNDGETPOOL, &ent_st.buf);
length = file->f_op->read(file, buf_ent, ent_c/ 8, &pos);
if(length <0)
{
printk("failed to random_read\n");
}
printk("length : %d\n", length);
printk("ent: ");
for(i=0;i<length; i++)
{
printk("%02x", buf_ent[i]);
}
printk("\n");
filp_close(file,0);
set_fs(old_fs);
}
outputs seems to be random
first try
[1290902.992048] ent_c : 165
[1290902.992060] length : 20
[1290902.992060] ent: d89290f4a5eea8e087a63943ed0129041e80b568
second try
[1290911.493990] ent_c : 33
[1290911.493994] length : 4
[1290911.493994] ent: 7832640a
by the way random_read function argument has __user keyword. Buf buf in code is in kernel space.
Is appropriate using random_read function in kernel space??
The in-kernel interface to get random bytes is get_random_bytes():
static void read_file(void)
{
unsigned char buf_ent[21];
get_random_bytes(buf_ent, 21);
print_hex_dump_bytes("ent: ", DUMP_PREFIX_NONE, buf_ent, 21);
}

MPI - scattering filepaths to processes

I have 4 filepaths in the global_filetable and I am trying to scatter 2 pilepaths to each process.
The process 0 have proper 2 paths, but there is something strange in the process 1 (null)...
EDIT:
Here's the full code:
#include <stdio.h>
#include <limits.h> // PATH_MAX
#include <mpi.h>
int main(int argc, char *argv[])
{
char** global_filetable = (char**)malloc(4 * PATH_MAX * sizeof(char));
for(int i = 0; i < 4; ++i) {
global_filetable[i] = (char*)malloc(PATH_MAX *sizeof(char));
strncpy (filetable[i], "/path/", PATH_MAX);
}
/*for(int i = 0; i < 4; ++i) {
printf("%s\n", global_filetable[i]);
}*/
int rank, size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
char** local_filetable = (char**)malloc(2 * PATH_MAX * sizeof(char));
MPI_Scatter(global_filetable, 2*PATH_MAX, MPI_CHAR, local_filetable, 2*PATH_MAX , MPI_CHAR, 0, MPI_COMM_WORLD);
{
/* now all processors print their local data: */
for (int p = 0; p < size; ++p) {
if (rank == p) {
printf("Local process on rank %d is:\n", rank);
for (int i = 0; i < 2; i++) {
printf("path: %s\n", local_filetable[i]);
}
}
MPI_Barrier(MPI_COMM_WORLD);
}
}
MPI_Finalize();
return 0;
}
Output:
Local process on rank 0 is:
path: /path/
path: /path/
Local process on rank 1 is:
path: (null)
path: (null)
Do you have any idea why I am having those nulls?
First, your allocation is inconsistent:
char** local_filetable = (char**)malloc(2 * PATH_MAX * sizeof(char));
The type char** indicates an array of char*, but you allocate a contiguous memory block, which would indicate a char*.
The easiest way would be to use the contiguous memory as char* for both global and local filetables. Depending on what get_filetable() actually does, you may have to convert. You can then index it like this:
char* entry = &filetable[i * PATH_MAX]
You can then simply scatter like this:
MPI_Scatter(global_filetable, 2 * PATH_MAX, MPI_CHAR,
local_filetable, 2 * PATH_MAX, MPI_CHAR, 0, MPI_COMM_WORLD);
Note that there is no more displacement, every rank just gets an equal sized chunk of the contiguous memory.
The next step would be to define a C and MPI struct encapsulating PATH_MAX characters so you can get rid of the constant usage of PATH_MAX and crude indexing.
I think this is much nicer (less complex, less memory management) than using actual char**. You would only need that if memory waste or redundant data transfer becomes an issue.
P.S. Make sure to never put in more than PATH_MAX - 1 characters in an filetable entry to keep space for the tailing \0.
Okay, I'm stupid.
char global_filetable[NUMBER_OF_STRINGS][PATH_MAX];
for(int i = 0; i < 4; ++i) {
strcpy (filetable[i], "/path/");
}
char local_filetable[2][PATH_MAX];
Now it works!

How to know the values of CR registers from linux user and kernel modes

I would like to know the CR0-CR4 register values on x86. Can I write inline assembly to read it out? Are there any other methods? (e.g., does OS keep any file structures to record these values)
The Linux kernel has some function to read and write Control Registers, they are the read_crX and write_crX functions for the standard CR and xgetbv,xsetbv for the extended CR.
User mode applications need a LKM to indirectly use these functions.
In theory you just need to create a LKM with one or more devices and handle IO requests by reading or writing from CR. In practice you usually have more than one CPU, so you need to handle MP.
I used the kernel module for CPUID as a template and create this LKM.
CODE IS WITHOUT ANY WARRANTY, TESTED ON DEBIAN 8 ON 64 bit VM ONLY
#include <linux/module.h> /* Needed by all modules */
#include <linux/kernel.h> /* Needed for KERN_INFO */
#include <linux/fs.h> /* Needed for KERN_INFO */
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/smp.h>
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/uaccess.h>
#include <linux/gfp.h>
#include <asm/processor.h>
#include <asm/msr.h>
#include <asm/xcr.h>
#define MAKE_MINOR(cpu, reg) (cpu<<8 | reg)
#define GET_MINOR_REG(minor) (minor & 0xff)
#define GET_MINOR_CPU(minor) (minor >> 8)
#define XCR_MINOR_BASE 0x80
static int major_n = 0;
static struct class *ctrlreg_class;
struct ctrlreg_info
{
unsigned int reg;
unsigned long value;
unsigned int error;
};
static void ctrlreg_smp_do_read(void* p)
{
struct ctrlreg_info* info = p;
info->error = 0;
printk(KERN_INFO "ctrlreg: do read of reg%u\n", info->reg);
switch (info->reg)
{
case 0: info->value = read_cr0(); break;
case 2: info->value = read_cr2(); break;
case 3: info->value = read_cr3(); break;
case 4: info->value = read_cr4(); break;
#ifdef CONFIG_X86_64
case 8: info->value = read_cr8(); break;
#endif
case XCR_MINOR_BASE: info->value = xgetbv(0); break;
default:
info->error = -EINVAL;
}
}
static void ctrlreg_smp_do_write(void* p)
{
struct ctrlreg_info* info = p;
info->error = 0;
switch (info->reg)
{
case 0: write_cr0(info->value); break;
case 2: write_cr2(info->value); break;
case 3: write_cr3(info->value); break;
case 4: write_cr4(info->value); break;
#ifdef CONFIG_X86_64
case 8: read_cr8(); break;
#endif
case XCR_MINOR_BASE: xgetbv(0); break;
default:
info->error = -EINVAL;
}
}
static ssize_t ctrlreg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
unsigned int minor = iminor(file_inode(file));
unsigned int cpu = GET_MINOR_CPU(minor);
unsigned int reg = GET_MINOR_REG(minor);
struct ctrlreg_info info = {.reg = reg};
int err;
printk(KERN_INFO "ctrlreg: read for cpu%u reg%u\n", cpu, reg);
printk(KERN_INFO "ctrlreg: read of %zu bytes\n", count);
if (count < sizeof(unsigned long))
return -EINVAL;
printk(KERN_INFO "ctrlreg: scheduling read\n");
err = smp_call_function_single(cpu, ctrlreg_smp_do_read, &info, 1);
if (IS_ERR_VALUE(err))
return err;
printk(KERN_INFO "ctrlreg: read success: %x\n", info.error);
if (IS_ERR_VALUE(info.error))
return err;
err = copy_to_user(buf, &info.value, sizeof(unsigned long));
printk(KERN_INFO "ctrlreg: read copy result: %x ( %lu )\n", err, sizeof(unsigned long));
if (IS_ERR_VALUE(err))
return err;
printk(KERN_INFO "ctrlreg: read done\n");
return sizeof(unsigned long);
}
static ssize_t ctrlreg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
unsigned int minor = iminor(file_inode(file));
unsigned int cpu = GET_MINOR_CPU(minor);
unsigned int reg = GET_MINOR_REG(minor);
struct ctrlreg_info info = {.reg = reg};
int err;
printk(KERN_INFO "ctrlreg: write for cpu%u reg%u\n", cpu, reg);
printk(KERN_INFO "ctrlreg: write of %zu bytes\n", count);
if (count < sizeof(unsigned long))
return -EINVAL;
printk(KERN_INFO "ctrlreg: scheduling write\n");
err = copy_from_user((void*)buf, &info.value, sizeof(unsigned long));
printk(KERN_INFO "ctrlreg: write copy data: %x ( %lu )\n", err, sizeof(unsigned long));
if (IS_ERR_VALUE(err))
return err;
err = smp_call_function_single(cpu, ctrlreg_smp_do_write, &info, 1);
if (IS_ERR_VALUE(err))
return err;
printk(KERN_INFO "ctrlreg: write success: %x\n", info.error);
if (IS_ERR_VALUE(info.error))
return err;
printk(KERN_INFO "ctrlreg: write done\n");
return sizeof(unsigned long);
}
static void ctrlreg_can_open(void *p)
{
unsigned int* reg = p;
unsigned int reg_num = *reg;
unsigned int ebx, edx, eax, ecx;
unsigned int support_xgetbv, support_ia32e;
*reg = 0; //Success
printk(KERN_INFO "ctrlreg: can open reg %u\n", reg_num);
if (reg_num <= 4 && reg_num != 1)
return;
#ifdef CONFIG_X86_64
if (reg_num == 8)
return;
#endif
cpuid_count(0x0d, 1, &eax, &ebx, &ecx, &edx);
support_xgetbv = cpuid_ecx(1) & 0x04000000;
support_ia32e = cpuid_edx(0x80000001) & 0x20000000;
printk(KERN_INFO "ctrlreg: xgetbv = %d\n", support_xgetbv);
printk(KERN_INFO "ctrlreg: ia32e = %d\n", support_ia32e);
if (support_xgetbv && support_ia32e)
return;
printk(KERN_INFO "ctrlreg: open denied");
*reg = -EIO;
}
static int ctrlreg_open(struct inode *inode, struct file *file)
{
unsigned int cpu;
unsigned int reg;
unsigned int minor;
int err;
minor = iminor(file_inode(file));
cpu = GET_MINOR_CPU(minor);
reg = GET_MINOR_REG(minor);
printk(KERN_INFO "ctrlreg: open device for cpu%u reg%u\n", cpu, reg);
if (cpu >= nr_cpu_ids || !cpu_online(cpu))
return -ENXIO; /* No such CPU */
err = smp_call_function_single(cpu, ctrlreg_can_open, &reg, 1);
if (IS_ERR_VALUE(err))
return err;
return reg;
}
static const struct file_operations ctrlreg_fops =
{
.owner = THIS_MODULE,
.read = ctrlreg_read,
.write = ctrlreg_write,
.open = ctrlreg_open
};
static int ctrlreg_device_create(int cpu)
{
struct device *dev = NULL;
int i;
printk(KERN_INFO "ctrlreg: device create for cpu %d\n", cpu);
//CR0, 2-4, 8
for (i = 0; i <= 8; i++)
{
if ((i>4 && i<8) || i == 1)
continue; //Skip non existent regs
printk(KERN_INFO "ctrlreg: device cpu%dcr%d\n", cpu, i);
dev = device_create(ctrlreg_class, NULL, MKDEV(major_n, MAKE_MINOR(cpu, i)), NULL, "cpu%dcr%d", cpu, i);
if (IS_ERR(dev))
return PTR_ERR(dev);
}
//XCR0
for (i = 0; i <= 0; i++)
{
printk(KERN_INFO "ctrlreg: device cpu%dxcr%d\n", cpu, i);
dev = device_create(ctrlreg_class, NULL, MKDEV(major_n, MAKE_MINOR(cpu, (XCR_MINOR_BASE+i))), NULL, "cpu%dxcr%d", cpu, i);
if (IS_ERR(dev))
return PTR_ERR(dev);
}
return 0;
}
static void ctrlreg_device_destroy(int cpu)
{
int i;
//CR0, 2-4, 8
for (i = 0; i <= 8; i++)
{
if ((i>4 && i<8) || i == 1)
continue; //Skip non existent regs
device_destroy(ctrlreg_class, MKDEV(major_n, MAKE_MINOR(cpu, i)));
}
//XCR0
for (i = 0; i <= 0; i++)
device_destroy(ctrlreg_class, MKDEV(major_n, MAKE_MINOR(cpu, (XCR_MINOR_BASE+i))));
}
static int ctrlreg_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
int err = 0;
switch (action)
{
case CPU_UP_PREPARE:
err = ctrlreg_device_create(cpu);
break;
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
ctrlreg_device_destroy(cpu);
break;
}
return notifier_from_errno(err);
}
static struct notifier_block __refdata ctrlreg_class_cpu_notifier =
{
.notifier_call = ctrlreg_class_cpu_callback,
};
static char* ctrlreg_devnode(struct device *dev, umode_t *mode)
{
unsigned int minor = MINOR(dev->devt), cpu = GET_MINOR_CPU(minor), reg = GET_MINOR_REG(minor);
if (reg < XCR_MINOR_BASE)
return kasprintf(GFP_KERNEL, "crs/cpu%u/cr%u", cpu, reg);
else
return kasprintf(GFP_KERNEL, "crs/cpu%u/xcr%u", cpu, reg-XCR_MINOR_BASE);
}
int __init ctrlreg_init(void)
{
int err = 0, i = 0;
printk(KERN_INFO "ctrlreg: init\n");
if ((major_n = __register_chrdev(0, 0, NR_CPUS, "crs", &ctrlreg_fops)) < 0)
return major_n;
printk(KERN_INFO "ctrlreg: major number is %u\n", major_n);
ctrlreg_class = class_create(THIS_MODULE, "ctrlreg\n");
if (IS_ERR(ctrlreg_class))
{
err = PTR_ERR(ctrlreg_class);
goto out_chrdev;
}
printk(KERN_INFO "ctrlreg: class created\n");
ctrlreg_class->devnode = ctrlreg_devnode;
cpu_notifier_register_begin();
for_each_online_cpu(i)
{
err = ctrlreg_device_create(i);
if (IS_ERR_VALUE(err))
goto out_class;
}
__register_hotcpu_notifier(&ctrlreg_class_cpu_notifier);
cpu_notifier_register_done();
printk(KERN_INFO "ctrlreg: init success\n");
err = 0;
goto out;
out_class:
i = 0;
for_each_online_cpu(i)
{
ctrlreg_device_destroy(i);
}
cpu_notifier_register_done();
class_destroy(ctrlreg_class);
out_chrdev:
__unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "ctrlreg");
out:
return err;
}
static void __exit ctrlreg_exit(void)
{
int cpu = 0;
cpu_notifier_register_begin();
for_each_online_cpu(cpu)
ctrlreg_device_destroy(cpu);
class_destroy(ctrlreg_class);
__unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "ctrlreg");
__unregister_hotcpu_notifier(&ctrlreg_class_cpu_notifier);
cpu_notifier_register_done();
}
module_init(ctrlreg_init);
module_exit(ctrlreg_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Kee Nemesis 241");
MODULE_DESCRIPTION("Read and write Control Registers");
This module create the following dev nodes:
/dev/crs/cpu0/cr0
/dev/crs/cpu0/cr2
/dev/crs/cpu0/cr3
/dev/crs/cpu0/cr4
/dev/crs/cpu0/cr8
/dev/crs/cpu0/xcr0
/dev/crs/cpu1/cr0
/dev/crs/cpu1/cr2
/dev/crs/cpu1/cr3
/dev/crs/cpu1/cr4
/dev/crs/cpu1/cr8
/dev/crs/cpu1/xcr0
...
You can read/write these dev nodes. The minimum read/write length is 4 bytes on 32 bit system and 8 bytes on 64 bit ones (Linux do some buffering anyway).
To compile this LKM, save the code above as ctrlreg.c and create this Makefile
obj-m += ctrlreg.o
all:
make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
clean:
make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
then use make to get ctrlreg.ko.
To load the module use sudo insmod ctrlreg.ko, to remove it sudo rmmod ctrlreg.
I have also written a small user mode utility to read CR:
CODE IS WITHOUT ANY WARRANTY, TESTED ON DEBIAN 8 ON 64 bit VM ONLY
#include <stdio.h>
#include <stdlib.h>
#define MAX_PATH 256
int main(int argc, char* argv[])
{
unsigned long cpu, reg;
FILE* fin;
char device[MAX_PATH];
unsigned long data;
if (argc < 3 || argc > 4)
return fprintf(stderr, "Usage:\n\t\t cr cpu reg [value]\n"), 1;
if (sscanf(argv[1], "cpu%u", &cpu) != 1)
return fprintf(stderr, "Invalid value '%s' for cpu\n", argv[1]), 2;
if (sscanf(argv[2], "cr%u", &reg) != 1 && sscanf(argv[2], "xcr%u", &reg) != 1)
return fprintf(stderr, "Invalid value '%s' for reg\n", argv[2]), 3;
if (argc == 4 && sscanf(argv[3], "%lu", &data) != 1)
return fprintf(stderr, "Invalid numeric value '%s'\n", argv[3]), 6;
snprintf(device, MAX_PATH, "/dev/crs/cpu%u/%s", cpu, argv[2]);
fin = fopen(device, argc == 4 ? "wb" : "rb");
if (!fin)
return fprintf(stderr, "Cannot open device %s\n", device), 4;
if (argc == 4)
{
if (fwrite(&data, sizeof(data), 1, fin) != 1)
return fprintf(stderr, "Cannot write device %s (%d)\n", device, ferror(fin)), 5;
}
else
{
if (fread(&data, sizeof(data), 1, fin) != 1)
return fprintf(stderr, "Cannot read device %s (%d)\n", device, ferror(fin)), 7;
printf("%016x\n", data);
}
fclose(fin);
return 0;
}
Save the code as cr.c and compile it.
To read cr0 of the second CPU you can use:
cr cpu1 cr0
To write the value 0 (be careful) into it
cr cpu1 cr0 0

Page faults on OS X when reading with MMAP

I am trying to benchmark file system I/O on Mac OS X using mmap.
#include <unistd.h>
#include <fcntl.h>
#include <dirent.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/mman.h>
#include <stdio.h>
#include <math.h>
char c;
int main(int argc, char ** argv)
{
if (argc != 2)
{
printf("no files\n");
exit(1);
}
int fd = open(argv[1], O_RDONLY);
fcntl(fd, F_NOCACHE, 1);
int offset=0;
int size=0x100000;
int pagesize = getpagesize();
struct stat stats;
fstat(fd, &stats);
int filesize = stats.st_size;
printf("%d byte pages\n", pagesize);
printf("file %s # %d bytes\n", argv[1], filesize);
while(offset < filesize)
{
if(offset + size > filesize)
{
int pages = ceil((filesize-offset)/(double)pagesize);
size = pages*pagesize;
}
printf("mapping offset %x with size %x\n", offset, size);
void * mem = mmap(0, size, PROT_READ, 0, fd, offset);
if(mem == -1)
return 0;
offset+=size;
int i=0;
for(; i<size; i+=pagesize)
{
c = *((char *)mem+i);
}
munmap(mem, size);
}
return 0;
}
The idea is that I'll map a file or portion of it and then cause a page fault by dereferencing it. I am slowly losing my sanity since this doesn't at all work and I've done similar things on Linux before.
Change this line
void * mem = mmap(0, size, PROT_READ, 0, fd, offset);
to
void * mem = mmap(0, size, PROT_READ, MAP_PRIVATE, fd, offset);
And, don't compare mem with -1. Use this instead:
if(mem == MAP_FAILED) { ... }
It's both more readable and more portable.
General advice: if you're on a different UNIX platform from what you're used to, it's a good idea to open the man page. For mmap on OS X, it can be found here. It says
Conforming applications must specify either MAP_PRIVATE or MAP_SHARED.
So, specifying 0 on the fourth
argument is not OK in OS X. I believe
this is true for BSD in general.

Resources