GLUT batching motion callbacks on Mojave? - macos

I have a GLUT-based program that I'm moving from macOS High Sierra (10.13.6) to macOS Mojave (10.14.4). Yes, I know that GLUT is deprecated on both. When I compile and run the program on Mojave, GLUT seems to be batching up calls to the glutMotionFunc() callback in a way that it doesn't when it is compiled and/or run on macOS 10.13.
I've written a simple version of the program to isolate the behavior, which I've included here. When there is no active click-and-drag motion on the GLUT window, the display callback sleeps for a "long time" (by default 300 msec), but when there is such motion detected, it sleeps for just 8 msec.
The expected behavior is that a click followed by rapid back-and-forth dragging motion will cause the program to stay in the 8 msec sleeping behavior, enabling it to sample and respond to mouse motion with low latency. When compiled or run on macOS 10.13.6, it behaves this way. When compiled and run on macOS 10.14.4, it gets a few motion callbacks in a row, but then doesn't get any after the 8 msec display callback finishes, so the next display callback takes 300 msec. In practice, this makes the observed mouse motion very jerky.
I did enough binary chopping to isolate the change in behavior to a single byte in the binary. (I know enough about systems programming to figure out how to do this, but not enough about macOS to know just what it means.) If I take a binary compiled on macOS 10.13 and change the "sdk" field of the LC_VERSION_MIN_MACOSX load command to mean "10.14" rather than "10.13", it shows the jerky behavior. If I take a binary compiled on macOS 10.14 and change the "sdk" field of the LC_BUILD_VERSION load command to mean "10.13" rather than "10.14", it works again.
Is my program doing something obviously dumb that would cause this change? Or is this a Mojave bug? Also, does anyone happen to know what actually changes when I mess with the "sdk" field of the load commands?
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/time.h>
#include <GLUT/glut.h>
/* ------------------------------------------------------------------ */
typedef long long hrtime_t;
hrtime_t
gethrtime(void)
{
struct timeval tp;
hrtime_t rv;
gettimeofday(&tp, NULL);
rv = ((unsigned long long)tp.tv_sec * 1000000ull + tp.tv_usec);
return (rv);
}
static void
stamp(void)
{
static hrtime_t then = 0;
const hrtime_t now = gethrtime();
const hrtime_t delta = (then == 0 ? 0 : now - then);
printf("%16lld %5lld.%03lld ", now, delta / 1000, delta % 1000);
then = now;
}
/* ------------------------------------------------------------------ */
#define NPTS 1024
int Xs[NPTS], Ys[NPTS];
int W, R;
static void
point(int x, int y)
{
Xs[W] = x;
Ys[W] = y;
W = (W + 1) % NPTS;
}
static int
line_pending(void)
{
return (R < W && (R + 1) % NPTS < W);
}
static int
get_line(int *sxp, int *syp, int *exp, int *eyp)
{
if (line_pending()) {
*sxp = Xs[R];
*syp = Ys[R];
R = (R + 1) % NPTS;
*exp = Xs[R];
*eyp = Ys[R];
return (1);
} else {
return (0);
}
}
/* ------------------------------------------------------------------ */
static void
keyboard_cb(unsigned char key, int x, int y)
{
if (key == 'q') {
exit(0);
}
glutPostRedisplay();
}
static void
mouse_cb(int button, int state, int x, int y)
{
if (button == GLUT_LEFT_BUTTON) {
stamp();
printf("[%4d, %4d]: mouse %s\n",
x, y, (state == GLUT_DOWN) ? "down" : "up");
point(x, y);
}
glutPostRedisplay();
}
static void
motion_cb(int x, int y)
{
stamp();
printf("[%4d, %4d]: motion\n", x, y);
point(x, y);
glutPostRedisplay();
}
/* ------------------------------------------------------------------ */
static hrtime_t Delay;
static void
display_cb(void)
{
printf("\n");
stamp();
if (line_pending()) {
printf("display: found motion\n");
while (line_pending()) {
int sx, sy, ex, ey;
get_line(&sx, &sy, &ex, &ey);
glBegin(GL_LINES);
glVertex2f(sx, 512 - sy);
glVertex2f(ex, 512 - ey);
glEnd();
printf("drawing [%4d %4d]-[%4d %4d]\n", sx, sy, ex, ey);
}
glFlush();
stamp();
printf("display: now sleeping 8 msec\n");
(void) usleep(8000);
} else {
printf("display: no motion, doing %llu msec render\n", Delay);
(void) usleep(1000 * Delay);
}
stamp();
printf("display: done\n");
}
int
main(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGBA | GLUT_DEPTH);
glutInitWindowSize(512, 512);
glutInitWindowPosition(768, 0);
glutCreateWindow("");
glClearColor(0, 0, 0, 0);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, 512.0, 0.0, 512.0, -1.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
glColor3f(1.0, 1.0, 1.0);
glFlush();
Delay = (argc > 1 ? atoi(argv[1]) : 300);
glutDisplayFunc(display_cb);
glutIdleFunc(glutPostRedisplay);
glutKeyboardFunc(keyboard_cb);
glutMouseFunc(mouse_cb);
glutMotionFunc(motion_cb);
glutMainLoop();
return (0);
}

Related

Windows 10 poor performance compared to Windows 7 (page fault handling is not scalable, severe lock contention when no of threads > 16)

We set up two identical HP Z840 Workstations with the following specs
2 x Xeon E5-2690 v4 # 2.60GHz (Turbo Boost ON, HT OFF, total 28 logical CPUs)
32GB DDR4 2400 Memory, Quad-channel
and installed Windows 7 SP1 (x64) and Windows 10 Creators Update (x64) on each.
Then we ran a small memory benchmark (code below, built with VS2015 Update 3, 64-bit architecture) which performs memory allocation-fill-free simultaneously from multiple threads.
#include <Windows.h>
#include <vector>
#include <ppl.h>
unsigned __int64 ZQueryPerformanceCounter()
{
unsigned __int64 c;
::QueryPerformanceCounter((LARGE_INTEGER *)&c);
return c;
}
unsigned __int64 ZQueryPerformanceFrequency()
{
unsigned __int64 c;
::QueryPerformanceFrequency((LARGE_INTEGER *)&c);
return c;
}
class CZPerfCounter {
public:
CZPerfCounter() : m_st(ZQueryPerformanceCounter()) {};
void reset() { m_st = ZQueryPerformanceCounter(); };
unsigned __int64 elapsedCount() { return ZQueryPerformanceCounter() - m_st; };
unsigned long elapsedMS() { return (unsigned long)(elapsedCount() * 1000 / m_freq); };
unsigned long elapsedMicroSec() { return (unsigned long)(elapsedCount() * 1000 * 1000 / m_freq); };
static unsigned __int64 frequency() { return m_freq; };
private:
unsigned __int64 m_st;
static unsigned __int64 m_freq;
};
unsigned __int64 CZPerfCounter::m_freq = ZQueryPerformanceFrequency();
int main(int argc, char ** argv)
{
SYSTEM_INFO sysinfo;
GetSystemInfo(&sysinfo);
int ncpu = sysinfo.dwNumberOfProcessors;
if (argc == 2) {
ncpu = atoi(argv[1]);
}
{
printf("No of threads %d\n", ncpu);
try {
concurrency::Scheduler::ResetDefaultSchedulerPolicy();
int min_threads = 1;
int max_threads = ncpu;
concurrency::SchedulerPolicy policy
(2 // two entries of policy settings
, concurrency::MinConcurrency, min_threads
, concurrency::MaxConcurrency, max_threads
);
concurrency::Scheduler::SetDefaultSchedulerPolicy(policy);
}
catch (concurrency::default_scheduler_exists &) {
printf("Cannot set concurrency runtime scheduler policy (Default scheduler already exists).\n");
}
static int cnt = 100;
static int num_fills = 1;
CZPerfCounter pcTotal;
// malloc/free
printf("malloc/free\n");
{
CZPerfCounter pc;
for (int i = 1 * 1024 * 1024; i <= 8 * 1024 * 1024; i *= 2) {
concurrency::parallel_for(0, 50, [i](size_t x) {
std::vector<void *> ptrs;
ptrs.reserve(cnt);
for (int n = 0; n < cnt; n++) {
auto p = malloc(i);
ptrs.emplace_back(p);
}
for (int x = 0; x < num_fills; x++) {
for (auto p : ptrs) {
memset(p, num_fills, i);
}
}
for (auto p : ptrs) {
free(p);
}
});
printf("size %4d MB, elapsed %8.2f s, \n", i / (1024 * 1024), pc.elapsedMS() / 1000.0);
pc.reset();
}
}
printf("\n");
printf("Total %6.2f s\n", pcTotal.elapsedMS() / 1000.0);
}
return 0;
}
Surprisingly, the result is very bad in Windows 10 CU compared to Windows 7. I plotted the result below for 1MB chunk size and 8MB chunk size, varying the number of threads from 2,4,.., up to 28. While Windows 7 gave slightly worse performance when we increased the number of threads, Windows 10 gave much worse scalability.
We have tried to make sure all Windows update is applied, update drivers, tweak BIOS settings, without success. We also ran the same benchmark on several other hardware platforms, and all gave similar curve for Windows 10. So it seems to be a problem of Windows 10.
Does anyone have similar experience, or maybe know-how about this (maybe we missed something ?). This behavior has made our multithreaded application got significant performance hit.
*** EDITED
Using https://github.com/google/UIforETW (thanks to Bruce Dawson) to analyze the benchmark, we found that most of the time is spent inside kernels KiPageFault. Digging further down the call tree, all leads to ExpWaitForSpinLockExclusiveAndAcquire. Seems that the lock contention is causing this issue.
*** EDITED
Collected Server 2012 R2 data on the same hardware. Server 2012 R2 is also worse than Win7, but still a lot better than Win10 CU.
*** EDITED
It happens in Server 2016 as well. I added the tag windows-server-2016.
*** EDITED
Using info from #Ext3h, I modified the benchmark to use VirtualAlloc and VirtualLock. I can confirmed significant improvement compared to when VirtualLock is not used. Overall Win10 is still 30% to 40% slower than Win7 when both using VirtualAlloc and VirtualLock.
Microsoft seems to have fixed this issue with Windows 10 Fall Creators Update and Windows 10 Pro for Workstation.
Here is the updated graph.
Win 10 FCU and WKS has lower overhead than Win 7. In exchange, the VirtualLock seems to have higher overhead.
Unfortunately not an answer, just some additional insight.
Little experiment with a different allocation strategy:
#include <Windows.h>
#include <thread>
#include <condition_variable>
#include <mutex>
#include <queue>
#include <atomic>
#include <iostream>
#include <chrono>
class AllocTest
{
public:
virtual void* Alloc(size_t size) = 0;
virtual void Free(void* allocation) = 0;
};
class BasicAlloc : public AllocTest
{
public:
void* Alloc(size_t size) override {
return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
}
void Free(void* allocation) override {
VirtualFree(allocation, NULL, MEM_RELEASE);
}
};
class ThreadAlloc : public AllocTest
{
public:
ThreadAlloc() {
t = std::thread([this]() {
std::unique_lock<std::mutex> qlock(this->qm);
do {
this->qcv.wait(qlock, [this]() {
return shutdown || !q.empty();
});
{
std::unique_lock<std::mutex> rlock(this->rm);
while (!q.empty())
{
q.front()();
q.pop();
}
}
rcv.notify_all();
} while (!shutdown);
});
}
~ThreadAlloc() {
{
std::unique_lock<std::mutex> lock1(this->rm);
std::unique_lock<std::mutex> lock2(this->qm);
shutdown = true;
}
qcv.notify_all();
rcv.notify_all();
t.join();
}
void* Alloc(size_t size) override {
void* target = nullptr;
{
std::unique_lock<std::mutex> lock(this->qm);
q.emplace([this, &target, size]() {
target = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
VirtualLock(target, size);
VirtualUnlock(target, size);
});
}
qcv.notify_one();
{
std::unique_lock<std::mutex> lock(this->rm);
rcv.wait(lock, [&target]() {
return target != nullptr;
});
}
return target;
}
void Free(void* allocation) override {
{
std::unique_lock<std::mutex> lock(this->qm);
q.emplace([allocation]() {
VirtualFree(allocation, NULL, MEM_RELEASE);
});
}
qcv.notify_one();
}
private:
std::queue<std::function<void()>> q;
std::condition_variable qcv;
std::condition_variable rcv;
std::mutex qm;
std::mutex rm;
std::thread t;
std::atomic_bool shutdown = false;
};
int main()
{
SetProcessWorkingSetSize(GetCurrentProcess(), size_t(4) * 1024 * 1024 * 1024, size_t(16) * 1024 * 1024 * 1024);
BasicAlloc alloc1;
ThreadAlloc alloc2;
AllocTest *allocator = &alloc2;
const size_t buffer_size =1*1024*1024;
const size_t buffer_count = 10*1024;
const unsigned int thread_count = 32;
std::vector<void*> buffers;
buffers.resize(buffer_count);
std::vector<std::thread> threads;
threads.resize(thread_count);
void* reference = allocator->Alloc(buffer_size);
std::memset(reference, 0xaa, buffer_size);
auto func = [&buffers, allocator, buffer_size, buffer_count, reference, thread_count](int thread_id) {
for (int i = thread_id; i < buffer_count; i+= thread_count) {
buffers[i] = allocator->Alloc(buffer_size);
std::memcpy(buffers[i], reference, buffer_size);
allocator->Free(buffers[i]);
}
};
for (int i = 0; i < 10; i++)
{
std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
for (int t = 0; t < thread_count; t++) {
threads[t] = std::thread(func, t);
}
for (int t = 0; t < thread_count; t++) {
threads[t].join();
}
std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
std::cout << duration << std::endl;
}
DebugBreak();
return 0;
}
Under all sane conditions, BasicAlloc is faster, just as it should be. In fact, on a quad core CPU (no HT), there is no constellation in which ThreadAlloc could outperform it. ThreadAlloc is constantly around 30% slower. (Which is actually surprisingly little, and it keeps true even for tiny 1kB allocations!)
However, if the CPU has around 8-12 virtual cores, then it eventually reaches the point where BasicAlloc actually scales negatively, while ThreadAlloc just "stalls" on the base line overhead of soft faults.
If you profile the two different allocation strategies, you can see that for a low thread count, KiPageFault shifts from memcpy on BasicAlloc to VirtualLock on ThreadAlloc.
For higher thread and core counts, eventually ExpWaitForSpinLockExclusiveAndAcquire starts emerging from virtually zero load to up to 50% with BasicAlloc, while ThreadAlloc only maintains the constant overhead from KiPageFault itself.
Well, the stall with ThreadAlloc is also pretty bad. No matter how many cores or nodes in a NUMA system you have, you are currently hard capped to around 5-8GB/s in new allocations, across all processes in the system, solely limited by single thread performance. All the dedicated memory management thread achieves, is not wasting CPU cycles on a contended critical section.
You would have expected that Microsoft had a lock free strategy for assigning pages on different cores, but apparently that's not even remotely the case.
The spin-lock was also already present in the Windows 7 and earlier implementations of KiPageFault. So what did change?
Simple answer: KiPageFault itself became much slower. No clue what exactly caused it to slow down, but the spin-lock simply never became a obvious limit, because 100% contention was never possible before.
If someone whishes to disassemble KiPageFault to find the most expensive part - be my guest.

gsl gnu solving first order ordinary differential equation

I visited the gnu gsl website and i dont find the example there to solve a differential equation to be intuitive at all (especially because it is using 2nd order differential equation). https://www.gnu.org/software/gsl/manual/html_node/ODE-Example-programs.html#ODE-Example-programs
Can somebody guide about where to find a descriptive guide to how solve a very simple first order differetial equation.
For example, supoose my function is y'=x+2y (or any such function) then how do i write code in gsl to solve it with a given fixed step size and initial condition.
For y'=f(x,y)=x+2y the arrays have all dimension 1, which normally is something to avoid, but here it is instructional. For the explicit solvers, i.e., those not containing imp in the name, you do not need the Jacobian:
#include <stdio.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_odeiv2.h>
int odefunc (double x, const double y[], double f[], void *params)
{
f[0] = x+2*y[0];
return GSL_SUCCESS;
}
int * jac;
int main ()
{
int dim = 1;
gsl_odeiv2_system sys = {odefunc, NULL, dim, NULL};
gsl_odeiv2_driver * d = gsl_odeiv2_driver_alloc_y_new (&sys, gsl_odeiv2_step_rkf45, 1e-6, 1e-6, 0.0);
int i;
double x0 = 0.0, xf = 100.0; /* start and end of integration interval */
double x = x0;
double y[1] = { 1.0 }; /* initial value */
for (i = 1; i <= 100; i++)
{
double xi = x0 + i * (xf-x0) / 100.0;
int status = gsl_odeiv2_driver_apply (d, &x, xi, y);
if (status != GSL_SUCCESS)
{
printf ("error, return value=%d\n", status);
break;
}
printf ("%.8e %.8e\n", x, y[0]);
}
gsl_odeiv2_driver_free (d);
return 0;
}
You may want to look up the book "Introduction to Computational Modeling Using C and Open-Source Tools" by Jose M. Garrido.
Lutzl, Please review:
'#include <stdio.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_odeiv2.h>
int odefunc (double x, const double y[], double f[], void *params)
{
f[0] = x+2*y[0];
return GSL_SUCCESS;
}
int jac(double x , const double y[] ,double *dfdy , double dfdx[], void *params) {
gsl_matrix_view dfdy_mat= gsl_matrix_view_array(dfdy,1,1);
gsl_matrix *m= &dfdy_mat.matrix;
gsl_matrix_set(m,0,0,x);
dfdx[0]=2;
return GSL_SUCCESS;
}
int main ()
{
int dim =1;
gsl_odeiv2_system sys = {odefunc, jac, dim, NULL};
gsl_odeiv2_driver * d = gsl_odeiv2_driver_alloc_y_new (&sys, gsl_odeiv2_step_rk1imp,1e-7,1e-7, 0.0);
int i;
double x0 = 0.0, xf =1.0; /*al value */
while(x<xf)
{
double xi = x0 + 0.25;
int status = gsl_odeiv2_driver_apply (d, &x, xi, y);
if (status != GSL_SUCCESS)
{
printf ("error, return value=%d\n", status);
break;
}
printf ("%.8e %.8e\n", x, y[0]);
}
gsl_odeiv2_driver_free (d);
return 0;
}
'

Generate functions at compile time

I have an image. Every pixel contains information about RGB intensity. Now I want to sum intenity these channels, but I also want to choose which channels intensity to sum. Straightforwad implementation of this would look like this:
int intensity(const unsiged char* pixel, bool red, bool green, bool blue){
return 0 + (red ? pixel[0] : 0) + (green ? pixel[1] : 0) + (blue ? pixel[2] : 0);
}
Because I will call this function for every pixel in image I want to discard all conditions If I can. So I guess I have to have a function for every case:
std::function<int(const unsigned char* pixel)> generateIntensityAccumulator(
const bool& accumulateRChannel,
const bool& accumulateGChannel,
const bool& accumulateBChannel)
{
if (accumulateRChannel && accumulateGChannel && accumulateBChannel){
return [](const unsigned char* pixel){
return static_cast<int>(pixel[0]) + static_cast<int>(pixel[1]) + static_cast<int>(pixel[2]);
};
}
if (!accumulateRChannel && accumulateGChannel && accumulateBChannel){
return [](const unsigned char* pixel){
return static_cast<int>(pixel[1]) + static_cast<int>(pixel[2]);
};
}
if (!accumulateRChannel && !accumulateGChannel && accumulateBChannel){
return [](const unsigned char* pixel){
return static_cast<int>(pixel[2]);
};
}
if (!accumulateRChannel && !accumulateGChannel && !accumulateBChannel){
return [](const unsigned char* pixel){
return 0;
};
}
if (accumulateRChannel && !accumulateGChannel && !accumulateBChannel){
return [](const unsigned char* pixel){
return static_cast<int>(pixel[0]);
};
}
if (!accumulateRChannel && accumulateGChannel && !accumulateBChannel){
return [](const unsigned char* pixel){
return static_cast<int>(pixel[1]);
};
}
if (accumulateRChannel && !accumulateGChannel && accumulateBChannel){
return [](const unsigned char* pixel){
return static_cast<int>(pixel[0]) + static_cast<int>(pixel[2]);
};
}
if (accumulateRChannel && accumulateGChannel && !accumulateBChannel){
return [](const unsigned char* pixel){
return static_cast<int>(pixel[0]) + static_cast<int>(pixel[1]);
};
}
}
Now I can use this generator before entering image loop and use function without any conditions:
...
auto accumulator = generateIntensityAccumulator(true, false, true);
for(auto pixel : pixels){
auto intensity = accumulator(pixel);
}
...
But it is a lot of writting for such simple task and I have a feeling that there is a better way to accomplish this: for example make compiler to do a dirty work for me and generate all above cases. Can someone point me in the right direction?
Using a std::function like this will cost you dear, because you dont let a chance for the compiler to optimize by inlining what it can.
What you are trying to do is a good job for templates. And since you use integral numbers, the expression itself may be optimized away, sparing you the need to write a specialization of each version. Look at this example :
#include <array>
#include <chrono>
#include <iostream>
#include <random>
#include <vector>
template <bool AccumulateR, bool AccumulateG, bool AccumulateB>
inline int accumulate(const unsigned char *pixel) {
static constexpr int enableR = static_cast<int>(AccumulateR);
static constexpr int enableG = static_cast<int>(AccumulateG);
static constexpr int enableB = static_cast<int>(AccumulateB);
return enableR * static_cast<int>(pixel[0]) +
enableG * static_cast<int>(pixel[1]) +
enableB * static_cast<int>(pixel[2]);
}
int main(void) {
std::vector<std::array<unsigned char, 3>> pixels(
1e7, std::array<unsigned char, 3>{0, 0, 0});
// Fill up with randomness
std::random_device rd;
std::uniform_int_distribution<unsigned char> dist(0, 255);
for (auto &pixel : pixels) {
pixel[0] = dist(rd);
pixel[1] = dist(rd);
pixel[2] = dist(rd);
}
// Measure perf
using namespace std::chrono;
auto t1 = high_resolution_clock::now();
int sum1 = 0;
for (auto const &pixel : pixels)
sum1 += accumulate<true, true, true>(pixel.data());
auto t2 = high_resolution_clock::now();
int sum2 = 0;
for (auto const &pixel : pixels)
sum2 += accumulate<false, true, false>(pixel.data());
auto t3 = high_resolution_clock::now();
std::cout << "Sum 1 " << sum1 << " in "
<< duration_cast<milliseconds>(t2 - t1).count() << "ms\n";
std::cout << "Sum 2 " << sum2 << " in "
<< duration_cast<milliseconds>(t3 - t2).count() << "ms\n";
}
Compiled with Clang 3.9 with -O2, yields this result on my CPU:
Sum 1 -470682949 in 7ms
Sum 2 1275037960 in 2ms
Please notice the fact that we have an overflow here, you may need to use something bigger than an int. A uint64_t might do. If you inspect assembly code, you will see that the two versions of the function are inlined and optimized differently.
First things first. Don't write a std::function that takes a single pixel; write one that takes a contiguous range of pixels (a scanline of pixels).
Second, you want to write a template version of intensity:
template<bool red, bool green, bool blue>
int intensity(const unsiged char* pixel){
return (red ? pixel[0] : 0) + (green ? pixel[1] : 0) + (blue ? pixel[2] : 0);
}
pretty simple, eh? That will optimize down to your hand-crafted version.
template<std::size_t index>
int intensity(const unsiged char* pixel){
return intensity< index&1, index&2, index&4 >(pixel);
}
this one maps from the bits of index to which of the intensity<bool, bool, bool> to call. Now for the scanline version:
template<std::size_t index, std::size_t pixel_stride=3>
int sum_intensity(const unsiged char* pixel, std::size_t count){
int value = 0;
while(count--) {
value += intensity<index>(pixel);
pixel += pixel_stride;
}
return value;
}
We can now generate our scanline intensity calculator:
int(*)( const unsigned char* pel, std::size_t pixels )
scanline_intensity(bool red, bool green, bool blue) {
static const auto table[] = {
sum_intensity<0b000>, sum_intensity<0b001>,
sum_intensity<0b010>, sum_intensity<0b011>,
sum_intensity<0b100>, sum_intensity<0b101>,
sum_intensity<0b110>, sum_intensity<0b111>,
};
std::size_t index = red + green*2 + blue*4;
return sum_intensity[index];
}
and done.
These techniques can be made generic, but you don't need the generic ones.
If your pixel stride is not 3 (say there is an alpha channel), sum_intensity needs to be passed it (as a template parameter ideally).

Is it possible to do interactive user input and output simulation in VHDL or Verilog?

For example, I would like to run a simulation for an interactive game like: https://github.com/fabioperez/space-invaders-vhdl without an FPGA, such that:
signals are set by keyboard keys
outputs can be displayed on a window
http://www.nand2tetris.org/ does this, but is uses a simplified custom educational language for it.
VHDL's textio's read(input and write(output get somewhat close, but not quite:
read(input waits for a newline, we'd want something that can detect is a keyboard key is pressed or not
write(output: would need some way to flush data to ensure that the renderer that will emulate, say, a display gets it
we need some way to throttle simulation speed
Of course, I don't need to do everything in VHDL: I just need a minimal way to communicate with VHDL synchronously with other programs, and then I can do the e.g. display with SDL in C.
Also asked at: https://github.com/tgingold/ghdl/issues/92
Verilator
Verilator is a perfect solution for this application.
It exposes the Verilog simulation loop to C++ (and transpiles the Verilog to C++), allowing you to set inputs, and get outputs from C++.
See the CONNECTING TO C++ example from the docs: http://www.veripool.org/projects/verilator/wiki/Manual-verilator
So you can just plug that into SDL / ncurses / etc. without any IPC.
For a simulator independent solution, it might be worth looking into the foreign language APIs of VHDL (VHPI) / Verilog (DPI) as mentioned in this comment, but there are few examples of how to use those, and you'll have to worry about IPC.
Minimal runnable example:
A related project that implements nand2tetris in Verilator + SDL can be found at: https://hackaday.io/project/160865-nand2tetris-in-verilog-part3-verilator-and-sdl2
Install dependencies on Ubuntu 22.04:
sudo apt install libsdl2-dev verilator
Makefile
.POSIX:
.PHONY: all clean run
RUN ?= move
OUT_EXT ?= .out
VERILATOR_DIR = ./obj_dir/
all: $(VERILATOR_DIR)Vmove display$(OUT_EXT)
$(VERILATOR_DIR)Vmove: move.v move.cpp fps.hpp
verilator -Wall --cc move.v --exe move.cpp
make -C obj_dir -f Vmove.mk Vmove CXXFLAGS='--std=c++11 -Wall' LIBS='-lSDL2'
display$(OUT_EXT): display.cpp
g++ -o '$#' '$<' -lm -lSDL2
clean:
rm -rf obj_dir *'$(OUT_EXT)'
run: all
'$(VERILATOR_DIR)V$(RUN)'
move.v
module move(
input wire clock,
input wire reset,
input wire up,
input wire down,
input wire left,
input wire right,
output reg [1:0] x,
output reg [1:0] y
);
always # (posedge clock) begin
if (reset == 1'b1) begin
x <= 0;
y <= 0;
end
else begin
if (up == 1'b1) begin
y <= y - 1;
end
if (down == 1'b1) begin
y <= y + 1;
end
if (left == 1'b1) begin
x <= x - 1;
end
if (right == 1'b1) begin
x <= x + 1;
end
end
end
endmodule
move.cpp
const char *help = "asdw: move | q: quit";
#include <cmath>
#include <cstdlib>
#include <time.h>
#include <SDL2/SDL.h>
#include "Vmove.h"
#include "verilated.h"
#include "fps.hpp"
#define WINDOW_WIDTH 512
#define RECTS_PER_WINDOW (4)
#define RECT_WIDTH (WINDOW_WIDTH / RECTS_PER_WINDOW)
#define FASTEST_TICK_PERIOD_S (1.0 / 4.0)
int main(int argc, char **argv) {
SDL_Event event;
SDL_Renderer *renderer;
SDL_Window *window;
double current_time_s, last_tick_time_s;
unsigned int current_time, last_time;
const Uint8 *keystate;
Verilated::commandArgs(argc, argv);
Vmove *top = new Vmove;
SDL_Init(SDL_INIT_TIMER | SDL_INIT_VIDEO);
SDL_CreateWindowAndRenderer(WINDOW_WIDTH, WINDOW_WIDTH, 0, &window, &renderer);
SDL_SetWindowTitle(window, help);
fps_init();
top->clock = 0;
top->eval();
top->reset = 1;
top->clock = 1;
top->eval();
while (1) {
current_time = SDL_GetTicks();
current_time_s = current_time / 1000.0;
/* Deal with keyboard input. */
while (SDL_PollEvent(&event) == 1) {
if (event.type == SDL_QUIT) {
goto quit;
} else if (event.type == SDL_KEYDOWN) {
switch(event.key.keysym.sym) {
case SDLK_q:
goto quit;
default:
break;
}
}
}
keystate = SDL_GetKeyboardState(NULL);
if (keystate[SDL_SCANCODE_ESCAPE]) {
top->reset = 1;
}
if (keystate[SDL_SCANCODE_A]) {
top->left = 1;
}
if (keystate[SDL_SCANCODE_D]) {
top->right = 1;
}
if (keystate[SDL_SCANCODE_W]) {
top->up = 1;
}
if (keystate[SDL_SCANCODE_S]) {
top->down = 1;
}
if (current_time != last_time) {
if (current_time_s - last_tick_time_s > FASTEST_TICK_PERIOD_S) {
/* Draw world. */
SDL_SetRenderDrawColor(renderer, 0, 0, 0, 0);
SDL_RenderClear(renderer);
{
SDL_Rect rect;
rect.w = RECT_WIDTH;
rect.h = RECT_WIDTH;
rect.x = top->x * RECT_WIDTH;
rect.y = top->y * RECT_WIDTH;
SDL_SetRenderDrawColor(renderer, 255, 0, 0, 255);
SDL_RenderFillRect(renderer, &rect);
}
SDL_RenderPresent(renderer);
top->clock = 0;
top->eval();
top->clock = 1;
top->eval();
top->up = 0;
top->down = 0;
top->left = 0;
top->right = 0;
top->reset = 0;
/* Update time tracking. */
last_tick_time_s = current_time_s;
fps_update_and_print();
}
}
last_time = current_time;
}
quit:
top->final();
delete top;
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(window);
SDL_Quit();
return EXIT_SUCCESS;
}
display.cpp
/*
Test a simple virtual SDL display, without user input.
*/
#include <cstdlib>
#include <cmath>
#include <iostream>
#include <SDL2/SDL.h>
#define WINDOW_WIDTH 600
#define WINDOW_HEIGHT (WINDOW_WIDTH)
#define N_PIXELS_WIDTH 10
#define N_PIXELS_HEIGHT (N_PIXELS_WIDTH)
#define N_PIXELS (N_PIXELS_WIDTH * N_PIXELS_HEIGHT)
#define PIXEL_WIDTH (WINDOW_WIDTH / N_PIXELS_WIDTH)
#define PIXEL_HEIGHT (WINDOW_HEIGHT / N_PIXELS_HEIGHT)
#define MAX_COLOR 255
#define PI2 (2*(acos(-1.0)))
#define FREQ (0.05)
int main(int argc, char **argv, char **env) {
SDL_Event event;
SDL_Rect rect;
SDL_Renderer *renderer;
SDL_Window *window;
const unsigned int max_color_half = MAX_COLOR / 2;
int quit;
double current_time_s;
size_t cur, i , j;
unsigned int
bs[N_PIXELS],
current_time,
gs[N_PIXELS],
last_time,
rs[N_PIXELS],
val
;
quit = 0;
SDL_Init(SDL_INIT_TIMER | SDL_INIT_VIDEO);
SDL_CreateWindowAndRenderer(WINDOW_WIDTH, WINDOW_WIDTH, 0, &window, &renderer);
rect.w = PIXEL_WIDTH;
rect.h = PIXEL_HEIGHT;
last_time = SDL_GetTicks();
while (!quit) {
while (SDL_PollEvent(&event) == 1) {
if (event.type == SDL_QUIT) {
quit = 1;
}
}
current_time = SDL_GetTicks();
if (current_time != last_time) {
for (i = 0; i < N_PIXELS_WIDTH; ++i) {
for (j = 0; j < N_PIXELS_WIDTH; ++j) {
cur = j * N_PIXELS_WIDTH + i;
val = (1 + i) * (1 + j) * PI2 * FREQ * current_time / 1000.0;
rs[cur] = max_color_half * (1.0 + std::sin(1 * val));
gs[cur] = max_color_half * (1.0 + std::sin(2 * val));
bs[cur] = max_color_half * (1.0 + std::sin(3 * val));
}
}
}
for (i = 0; i < N_PIXELS_WIDTH; ++i) {
for (j = 0; j < N_PIXELS_WIDTH; ++j) {
cur = j *N_PIXELS_WIDTH + i;
SDL_SetRenderDrawColor(renderer, rs[cur], gs[cur], bs[cur], 255);
rect.x = i * PIXEL_WIDTH;
rect.y = j * PIXEL_HEIGHT;
SDL_RenderFillRect(renderer, &rect);
}
}
SDL_RenderPresent(renderer);
}
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(window);
SDL_Quit();
return EXIT_SUCCESS;
}
GitHub upstream.
Connectal connects software running on actual CPUs to RTL (BSV, which can link to VHDL and Verilog) on FPGAs or simulators. BSV is free for academic and research use and for open source projects. In any case, Connectal is open source and the software to simulator connection uses SystemVerilog DPI, which you could use in your project without using BSV.
Connectal has one example that displays the output from the FGPA/simulator on a display. It uses Qt to display on the computer monitor when simulating. From an FPGA it displays directly on an HDMI display.
CPUs simulated in Verilog or VHDL tend to be too slow for interactive use, but I have connected a CPU simulated with qemu to devices or accelerators in verilator or on FPGA. Performance of qemu is quite good. I think it would work for your purposes.
I added a plugin FgpaOps API so that the simulator or FPGA could handle CPU load/store instructions:
struct FpgaOps {
uint64_t (*read)(hwaddr addr);
void (*write)(hwaddr addr, uint64_t value);
void (*close)(void);
void *(*alloc_mem)(size_t size);
};
In my case, I used connectal to implement the FpgaOps plugin. This code is under hw/riscv but is not specific to riscv, so it could be used with any processor architecture supported by qemu.
No need for anything too clever or customised to interact with your Sim providing you're willing to simulate a real hardware interface like a UART.
The Verilog simuation of my TTL CPU includes a (reasonably) accurate model of a UM245R UART and the UART supports interactive IO via the verilog file interface.
One file for input and the other for output; bidirectional.
I use this for interacting with the simualated hardware so that I can develop the software and test it via automated test without having to mess with the hardware.
I even have CHIP8 games running on the simulated hardware and the CHIP8 GUI is drawn by sending control codes from the UART back to a graphics terminal.
The UART is here ...
https://github.com/Johnlon/spam-1/blob/master/verilog/uart/um245r.v
At some point I'll do a write up on it.

Piecemeal processing of a matrix - CUDA

OK, so lets say I have an ( N x N ) matrix that I would like to process. This matrix is quite large for my computer, and if I try to send it to the device all at once I get a 'out of memory error.'
So is there a way to send sections of the matrix to the device? One way I can see to do it is copy portions of the matrix on the host, and then send these manageable copied portions from the host to the device, and then put them back together at the end.
Here is something I have tried, but the cudaMemcpy in the for loop returns error code 11, 'invalid argument.'
int h_N = 10000;
size_t h_size_m = h_N*sizeof(float);
h_A = (float*)malloc(h_size_m*h_size_m);
int d_N = 2500;
size_t d_size_m = d_N*sizeof(float);
InitializeMatrices(h_N);
int i;
int iterations = (h_N*h_N)/(d_N*d_N);
for( i = 0; i < iterations; i++ )
{
float* h_array_ref = h_A+(i*d_N*d_N);
cudasafe( cudaMemcpy(d_A, h_array_ref, d_size_m*d_size_m, cudaMemcpyHostToDevice), "cudaMemcpy");
cudasafe( cudaFree(d_A), "cudaFree(d_A)" );
}
What I'm trying to accomplish with the above code is this: instead of send the entire matrix to the device, I simply send a pointer to a place within that matrix and reserve enough space on the device to do the work, and then with the next iteration of the loop move the pointer forward within the matrix, etc. etc.
Not only can you do this (assuming your problem is easily decomposed this way into sub-arrays), it can be a very useful thing to do for performance; once you get the basic approach you've described working, you can start using asynchronous memory copies and double-buffering to overlap some of the memory transfer time with the time spent computing what is already on-card.
But first one gets the simple thing working. Below is a 1d example (multiplying a vector by a scalar and adding another scalar) but using a linearized 2d array would be the same; the key part is
CHK_CUDA( cudaMalloc(&xd, batchsize*sizeof(float)) );
CHK_CUDA( cudaMalloc(&yd, batchsize*sizeof(float)) );
tick(&gputimer);
int nbatches = 0;
for (int nstart=0; nstart < n; nstart+=batchsize) {
int size=batchsize;
if ((nstart + batchsize) > n) size = n - nstart;
CHK_CUDA( cudaMemcpy(xd, &(x[nstart]), size*sizeof(float), cudaMemcpyHostToDevice) );
blocksize = (size+nblocks-1)/nblocks;
cuda_saxpb<<<nblocks, blocksize>>>(xd, a, b, yd, size);
CHK_CUDA( cudaMemcpy(&(ycuda[nstart]), yd, size*sizeof(float), cudaMemcpyDeviceToHost) );
nbatches++;
}
gputime = tock(&gputimer);
CHK_CUDA( cudaFree(xd) );
CHK_CUDA( cudaFree(yd) );
You allocate the buffers at the start, and then loop through until you're done, each time doing the copy, starting the kernel, and then copying back. You free at the end.
The full code is
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <cuda.h>
#include <sys/time.h>
#include <math.h>
#define CHK_CUDA(e) {if (e != cudaSuccess) {fprintf(stderr,"Error: %s\n", cudaGetErrorString(e)); exit(-1);}}
__global__ void cuda_saxpb(const float *xd, const float a, const float b,
float *yd, const int n) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i<n) {
yd[i] = a*xd[i]+b;
}
return;
}
void cpu_saxpb(const float *x, float a, float b, float *y, int n) {
int i;
for (i=0;i<n;i++) {
y[i] = a*x[i]+b;
}
return;
}
int get_options(int argc, char **argv, int *n, int *s, int *nb, float *a, float *b);
void tick(struct timeval *timer);
double tock(struct timeval *timer);
int main(int argc, char **argv) {
int n=1000;
int nblocks=10;
int batchsize=100;
float a = 5.;
float b = -1.;
int err;
float *x, *y, *ycuda;
float *xd, *yd;
double abserr;
int blocksize;
int i;
struct timeval cputimer;
struct timeval gputimer;
double cputime, gputime;
err = get_options(argc, argv, &n, &batchsize, &nblocks, &a, &b);
if (batchsize > n) {
fprintf(stderr, "Resetting batchsize to size of vector, %d\n", n);
batchsize = n;
}
if (err) return 0;
x = (float *)malloc(n*sizeof(float));
if (!x) return 1;
y = (float *)malloc(n*sizeof(float));
if (!y) {free(x); return 1;}
ycuda = (float *)malloc(n*sizeof(float));
if (!ycuda) {free(y); free(x); return 1;}
/* run CPU code */
tick(&cputimer);
cpu_saxpb(x, a, b, y, n);
cputime = tock(&cputimer);
/* run GPU code */
/* only have to allocate once */
CHK_CUDA( cudaMalloc(&xd, batchsize*sizeof(float)) );
CHK_CUDA( cudaMalloc(&yd, batchsize*sizeof(float)) );
tick(&gputimer);
int nbatches = 0;
for (int nstart=0; nstart < n; nstart+=batchsize) {
int size=batchsize;
if ((nstart + batchsize) > n) size = n - nstart;
CHK_CUDA( cudaMemcpy(xd, &(x[nstart]), size*sizeof(float), cudaMemcpyHostToDevice) );
blocksize = (size+nblocks-1)/nblocks;
cuda_saxpb<<<nblocks, blocksize>>>(xd, a, b, yd, size);
CHK_CUDA( cudaMemcpy(&(ycuda[nstart]), yd, size*sizeof(float), cudaMemcpyDeviceToHost) );
nbatches++;
}
gputime = tock(&gputimer);
CHK_CUDA( cudaFree(xd) );
CHK_CUDA( cudaFree(yd) );
abserr = 0.;
for (i=0;i<n;i++) {
abserr += fabs(ycuda[i] - y[i]);
}
printf("Y = a*X + b, problemsize = %d\n", n);
printf("CPU time = %lg millisec.\n", cputime*1000.);
printf("GPU time = %lg millisec (done with %d batches of %d).\n",
gputime*1000., nbatches, batchsize);
printf("CUDA and CPU results differ by %lf\n", abserr);
free(x);
free(y);
free(ycuda);
return 0;
}
int get_options(int argc, char **argv, int *n, int *s, int *nb, float *a, float *b) {
const struct option long_options[] = {
{"nvals" , required_argument, 0, 'n'},
{"nblocks" , required_argument, 0, 'B'},
{"batchsize" , required_argument, 0, 's'},
{"a", required_argument, 0, 'a'},
{"b", required_argument, 0, 'b'},
{"help", no_argument, 0, 'h'},
{0, 0, 0, 0}};
char c;
int option_index;
int tempint;
while (1) {
c = getopt_long(argc, argv, "n:B:a:b:s:h", long_options, &option_index);
if (c == -1) break;
switch(c) {
case 'n': tempint = atoi(optarg);
if (tempint < 1 || tempint > 500000) {
fprintf(stderr,"%s: Cannot use number of points %s;\n Using %d\n", argv[0], optarg, *n);
} else {
*n = tempint;
}
break;
case 's': tempint = atoi(optarg);
if (tempint < 1 || tempint > 50000) {
fprintf(stderr,"%s: Cannot use number of points %s;\n Using %d\n", argv[0], optarg, *s);
} else {
*s = tempint;
}
break;
case 'B': tempint = atoi(optarg);
if (tempint < 1 || tempint > 1000 || tempint > *n) {
fprintf(stderr,"%s: Cannot use number of blocks %s;\n Using %d\n", argv[0], optarg, *nb);
} else {
*nb = tempint;
}
break;
case 'a': *a = atof(optarg);
break;
case 'b': *b = atof(optarg);
break;
case 'h':
puts("Calculates y[i] = a*x[i] + b on the GPU.");
puts("Options: ");
puts(" --nvals=N (-n N): Set the number of values in y,x.");
puts(" --batchsize=N (-s N): Set the number of values to transfer at a time.");
puts(" --nblocks=N (-B N): Set the number of blocks used.");
puts(" --a=X (-a X): Set the parameter a.");
puts(" --b=X (-b X): Set the parameter b.");
puts(" --niters=N (-I X): Set number of iterations to calculate.");
puts("");
return +1;
}
}
return 0;
}
void tick(struct timeval *timer) {
gettimeofday(timer, NULL);
}
double tock(struct timeval *timer) {
struct timeval now;
gettimeofday(&now, NULL);
return (now.tv_usec-timer->tv_usec)/1.0e6 + (now.tv_sec - timer->tv_sec);
}
Running this one gets:
$ ./batched-saxpb --nvals=10240 --batchsize=10240 --nblocks=20
Y = a*X + b, problemsize = 10240
CPU time = 0.072 millisec.
GPU time = 0.117 millisec (done with 1 batches of 10240).
CUDA and CPU results differ by 0.000000
$ ./batched-saxpb --nvals=10240 --batchsize=5120 --nblocks=20
Y = a*X + b, problemsize = 10240
CPU time = 0.066 millisec.
GPU time = 0.133 millisec (done with 2 batches of 5120).
CUDA and CPU results differ by 0.000000
$ ./batched-saxpb --nvals=10240 --batchsize=2560 --nblocks=20
Y = a*X + b, problemsize = 10240
CPU time = 0.067 millisec.
GPU time = 0.167 millisec (done with 4 batches of 2560).
CUDA and CPU results differ by 0.000000
The GPU time goes up in this case (we're doing more memory copies) but the answers stay the same.
Edited: The original version of this code had an option for running multiple iterations of the kernel for timing purposes, but that's unnecessarily confusing in this context so it's removed.

Resources