accelerate pairwise force calculation in cuda C++ - performance

recently I write a Molecular Dynamics code calculating the ion-electron force using CUDA parallel computing.
the kernel is list as below:
__global__ void (*x,*y,*z,N){
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
while(i<N) {
double dx;
double dy;
double dz;
double dr;
double Fx;
double Fy;
double Fz;
for (int j = 0; j < N; j++){
dx=x[i]-x[j];
dy=y[i]-y[j];
dz=z[i]-z[j];
dr=sqrt(dx*dx+dy*dy+dz*dz)
dr=dr*dr*dr
Fx+=k*q*q*dx/dr
Fy+=k*q*q*dy/dr
Fz+=k*q*q*dz/dr //force=kq^2r/r^3 written in Cartesian coordinate
}
//rest of the code manipulate force is irrelevant to my question and I want to keep my code short
i += blockDim.x * gridDim.x;
}
}
x,y,z are position of the particels, and dx,dy,dz is the xyz distance, Fx, Fy, Fz in the for loop is the sum of force exerting on ith particle, more specifically you need to calculate x[i]-x[j] and run through all js to find the total force, and let the kernel do all i in parallel.
I found this to be slow as I know the GPU is reading the arrays from global memory. When I change x[i] to a number it becomes 10 times faster because it is reading from the register(L1 cache). my array is too big (more than 20000 element with double float) it is impossible to put into the register. But can it still be a little faster using other memories? I know there's constant memory and shared memory but I don't know how to implement. I think the x[i] is sitting at the globe memory causing it to be slow, and all thread is trying to read x[i] at the same time. any way to improve the speed?

Here is a basic version using shared memory to optimize the access pattern a bit.
#define KERNEL_BLOCKSIZE 256
__global__ void __launch_bounds__(KERNEL_BLOCKSIZE)
kernel(const double* x, const double* y, const double* z, int N,
double k, double q, double* fake_out)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
/*
* threads beyond the bound still participate in value fetching, so we cannot
* return early
*/
const bool active = i < N;
double xi, yi, zi;
if(active)
xi = x[i], yi = y[i], zi = z[i];
const double kqq = k * q * q;
double Fx = 0., Fy = 0., Fz = 0.;
__shared__ double xt[KERNEL_BLOCKSIZE];
__shared__ double yt[KERNEL_BLOCKSIZE];
__shared__ double zt[KERNEL_BLOCKSIZE];
for(int j = 0; j < N; j += blockDim.x) {
__syncthreads();
const int thread_j = j + threadIdx.x;
if(thread_j < N) {
xt[threadIdx.x] = x[thread_j];
yt[threadIdx.x] = y[thread_j];
zt[threadIdx.x] = z[thread_j];
}
__syncthreads();
for(int l = 0, M = min(KERNEL_BLOCKSIZE, N - j); l < M; ++l) {
const double dx = xi - xt[l], dy = yi - yt[l], dz = zi - zt[l];
// 1 / sqrt(dx*dx + dy+dy + dz*dz)
const double rnorm = rnorm3d(dx, dy, dz);
const double dr = rnorm * rnorm * rnorm;
const double scale = kqq * dr;
Fx += scale * dx;
Fy += scale * dy;
Fz += scale * dz;
}
}
if(active)
fake_out[i] = norm3d(Fx, Fy, Fz);
}
It's nothing fancy and it doesn't solve the inherent issues with the O(N²) runtime. I made the following changes
Get rid of the while loop. The loop counter was declared as int i. The maximum grid dimension in all CUDA devices is 2^31-1. Meaning we can always launch the entire grid with only one loop per thread.
Given the quadratic runtime, we have no chance of ever running such a huge grid, anyway. But if we did have one that is larger, just launch multiple kernels operating on subsets
Use shared memory to buffer blocks. I picked 256 as a fixed blocksize. That tends to work well. 512 may be another size that is worth experimenting with
The whole dr calculation can be folded into a single predefined math function
To get something that at least compiles into reasonable code, I added an output
Double buffering
We can reduce the number of __syncthreads() that are required by using double buffering. However, that doubles the shared memory usage. Platforms that have only 64 kiB of shared memory will suffer limited occupancy. It requires benchmarking to see which version works better.
__global__ void __launch_bounds__(KERNEL_BLOCKSIZE)
kernel_dbuf(const double* x, const double* y, const double* z, int N,
double k, double q, double* fake_out)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const bool active = i < N;
double xi, yi, zi;
if(active)
xi = x[i], yi = y[i], zi = z[i];
const double kqq = k * q * q;
double Fx = 0., Fy = 0., Fz = 0.;
__shared__ double xt[2][KERNEL_BLOCKSIZE];
__shared__ double yt[2][KERNEL_BLOCKSIZE];
__shared__ double zt[2][KERNEL_BLOCKSIZE];
int dbuf = 0;
for(int j = 0; j < N; dbuf ^= 1, j += blockDim.x) {
const int thread_j = j + threadIdx.x;
if(thread_j < N) {
xt[dbuf][threadIdx.x] = x[thread_j];
yt[dbuf][threadIdx.x] = y[thread_j];
zt[dbuf][threadIdx.x] = z[thread_j];
}
__syncthreads();
for(int l = 0, M = min(KERNEL_BLOCKSIZE, N - j); l < M; ++l) {
const double dx = xi - xt[dbuf][l];
const double dy = yi - yt[dbuf][l];
const double dz = zi - zt[dbuf][l];
// 1 / sqrt(dx*dx + dy+dy + dz*dz)
const double rnorm = rnorm3d(dx, dy, dz);
const double dr = rnorm * rnorm * rnorm;
const double scale = kqq * dr;
Fx += scale * dx;
Fy += scale * dy;
Fz += scale * dz;
}
}
if(active)
fake_out[i] = norm3d(Fx, Fy, Fz);
}
Launch the kernel like this:
__host__ void
launch(const double* x, const double* y, const double* z, int N,
double k, double q, double* fake_out, cudaStream_t stream)
{
const int numBlocks = (N + KERNEL_BLOCKSIZE - 1) / KERNEL_BLOCKSIZE;
kernel<<<numBlocks, KERNEL_BLOCKSIZE, 0, stream>>>(x, y, z, N, k, q, fake_out);
}
Other thoughts
People have already commented on the inherent inefficiency of the algorithm
I guess there is a good reason why k and q are separate variables and you don't just pass a precomputed k * q * q to the kernel
Using doubles should always be a last resort when computing on a GPU, in my opinion. Possible avenues to reduce the precision, at least for parts of the algorithm:
Replace the dr computation with one that is less prone to overflows. Like this:
float scale = 1.f / max(max(abs(dx), abs(dy)), abs(dz));
float rnorm = rnorm3df(dx * scale, dy * scale, dz * scale) * scale;
float dr = rnorm * rnorm * rnorm;
Use Kahan summation for Fx, Fy, Fz
Use double only for Fx, Fy, Fz but not x, y, z positions or other computations

Related

Is there any optimization function in Rcpp

The following is my Rcpp code, and I want to minimize the objective function logtpoi(x,theta) respect to theta in R by 'nlminb'. I found it is slow.
I have two question:
Anyone can improve my Rcpp code? Thank you very much.
Is there any optimization functions in Rcpp? If yes,maybe I can use them in Rcpp directly. And how to use them? Thank you very much.
My code:
#include <RcppArmadillo.h>
using namespace Rcpp;
using namespace arma;
// [[Rcpp::depends("RcppArmadillo")]]
// [[Rcpp::export]]
List dtpoi0(const IntegerVector& x, const NumericVector& theta){
//x is 3-dim vector; theta is a 6-dim parameter vector.
//be careful the order of theta1,...,theta6.
double theta1 = theta[0]; double theta2 = theta[1];
double theta3 = theta[2]; double theta4 = theta[3];
double theta5 = theta[4]; double theta6 = theta[5];
int x1 = x[0]; int x2 = x[1]; int x3 = x[2];
IntegerVector z1 = IntegerVector::create(x1,x2);
IntegerVector z2 = IntegerVector::create(x1,x3);
IntegerVector z3 = IntegerVector::create(x2,x3);
int s1 = min(z1); int s2 = min(z2); int s3 = min(z3);
arma::imat missy(1,3,fill::zeros); arma::irowvec ijk={0,0,0};
for (int i = 0; i <= s1; ++i) {
for (int j = 0; j <= s2; ++j) {
for (int k = 0; k <= s3; ++k) {
if ((i+j <= s1) & (i+k <= s2) & ( j+k <= s3))
{ ijk = {i,j,k};
missy = join_cols(missy,ijk);}
}
}
}
IntegerMatrix misy = as<IntegerMatrix>(wrap(missy));
IntegerVector u1 = IntegerVector::create(0);
IntegerVector u2 = IntegerVector::create(0);
IntegerVector u3 = IntegerVector::create(0);
IntegerVector u4 = IntegerVector::create(0);
IntegerVector u5 = IntegerVector::create(0);
IntegerVector u6 = IntegerVector::create(0);
int total = misy.nrow();
double fvalue = 0;
NumericVector part1(1); NumericVector part2(1);
NumericVector part3(1); NumericVector part4(1);
NumericVector part5(1); NumericVector part6(1);
for (int l = 1; l < total; ++l) {
u1 = IntegerVector::create(x1-misy(l,0)-misy(l,1));
u2 = IntegerVector::create(x2-misy(l,0)-misy(l,2));
u3 = IntegerVector::create(x3-misy(l,1)-misy(l,2));
u4 = IntegerVector::create(misy(l,0));
u5 = IntegerVector::create(misy(l,1));
u6 = IntegerVector::create(misy(l,2));
part1 = dpois(u1,theta1);
part2 = dpois(u2,theta2);
part3 = dpois(u3,theta3);
part4 = dpois(u4,theta4);
part5 = dpois(u5,theta5);
part6 = dpois(u6,theta6);
fvalue = fvalue + (part1*part2*part3*part4*part5*part6)[0]; }
return(List::create(Named("misy") = misy,Named("fvalue") = fvalue));
}
// [[Rcpp::export]]
NumericVector dtpoi(const IntegerMatrix& x, const NumericVector& theta){
//x is n*3 matrix, n is the number of observations.
int n = x.nrow();
NumericVector density(n);
for (int i = 0; i < n; ++i){
density(i) = dtpoi0(x.row(i),theta)["fvalue"];
}
return(density);
}
// [[Rcpp::export]]
double logtpoi0(const IntegerMatrix& x,const NumericVector theta){
// theta must be a 6-dimiension parameter.
double nln = -sum(log( dtpoi(x,theta) + 1e-60 ));
if(arma::is_finite(nln)) {nln = nln;} else {nln = -1e10;}
return(nln);
}
Huge caveat ahead: I don’t really know Armadillo. But I’ve had a stab at it because the code looks interesting.
A few general things:
You don’t need to declare things before you assign them for the first time. In particular, it’s generally not necessary to declare vectors outside a loop if they’re only used inside the loop. This is probably no less efficient than declaring them inside the loop. However, if your code is too slow it makes sense to carefully profile this, and test whether the assumption holds.
Many of your declarations are just aliases for vector elements and don’t seem necessary.
Your z{1…3} vectors aren’t necessary. C++ has a min function to find the minimum of two elements.
dtpoi0 contains two main loops. Both of these have been heavily modified in my code:
The first loop iterates over many ks that can are never used, due to the internal if that tests whether i + j exceeds s2. By pulling this check into the loop condition of j, we perform fewer k loops.
Your if uses & instead of &&. Like in R, using && rather than & causes short-circuiting. While this is probably not more efficient in this case, using && is idiomatic, whereas & causes head-scratching (my code uses and which is an alternative way of spelling && in C++; I prefer its readability).
The second loops effectively performs a matrix operation manually. I feel that there should be a way of expressing this purely with matrix operations — but as mentioned I’m not an Armadillo user. Still, my changes attempt to vectorise as much of this operation as possible (if nothing else this makes the code shorter). The dpois inner product is unfortunately still inside a loop.
The logic of logtpoi0 can be made more idiomatic and (IMHO) more readable by using the conditional operator instead of if.
const-correctness is a big deal in C++, since it weeds out accidental modifications. Use const liberally when declaring variables that are not supposed to change.
In terms of efficiency, the biggest hit when calling dtpoi or logtpoi0 is probably the conversion of missy to misy, which causes allocations and memory copies. Only convert to IntegerMatrix when necessary, i.e. when actually returning that value to R. For that reason, I’ve split dtpoi0 into two parts.
Another inefficiency is the fact that the first loop in dtpoi0 grows a matrix by appending columns. That’s a big no-no. However, rewriting the code to avoid this isn’t trivial.
#include <algorithm>
#include <RcppArmadillo.h>
// [[Rcpp::depends("RcppArmadillo")]]
using namespace Rcpp;
using namespace arma;
imat dtpoi0_mat(const IntegerVector& x) {
const int s1 = std::min(x[0], x[1]);
const int s2 = std::min(x[0], x[2]);
const int s3 = std::min(x[1], x[2]);
imat missy(1, 3, fill::zeros);
for (int i = 0; i <= s1; ++i) {
for (int j = 0; j <= s2 and i + j <= s1; ++j) {
for (int k = 0; k <= s3 and i + k <= s2 and j + k <= s3; ++k) {
missy = join_cols(missy, irowvec{i, j, k});
}
}
}
return missy;
}
double dtpoi0_fvalue(const IntegerVector& x, const NumericVector& theta, imat& missy) {
double fvalue = 0.0;
ivec xx = as<ivec>(x);
missy.each_row([&](irowvec& v) {
const ivec u(join_cols(xx - v(uvec{0, 0, 1}) - v(uvec{1, 2, 3}), v));
double prod = 1;
for (int i = 0; i < u.n_elem; ++i) {
prod *= R::dpois(u[i], theta[i], 0);
}
fvalue += prod;
});
return fvalue;
}
double dtpoi0_fvalue(const IntegerVector& x, const NumericVector& theta) {
imat missy = dtpoi0_mat(x);
return dtpoi0_fvalue(x, theta, missy);
}
// [[Rcpp::export]]
List dtpoi0(const IntegerVector& x, const NumericVector& theta) {
imat missy = dtpoi0_mat(x);
const double fvalue = dtpoi0_fvalue(x, theta, missy);
return List::create(Named("misy") = as<IntegerMatrix>(wrap(missy)), Named("fvalue") = fvalue);
}
// [[Rcpp::export]]
NumericVector dtpoi(const IntegerMatrix& x, const NumericVector& theta) {
//x is n*3 matrix, n is the number of observations.
int n = x.nrow();
NumericVector density(n);
for (int i = 0; i < n; ++i){
density(i) = dtpoi0_fvalue(x.row(i), theta);
}
return density;
}
// [[Rcpp::export]]
double logtpoi0(const IntegerMatrix& x, const NumericVector theta) {
// theta must be a 6-dimension parameter.
const double nln = -sum(log(dtpoi(x, theta) + 1e-60));
return is_finite(nln) ? nln : -1e10;
}
Important: This compiles, but I can’t test its correctness. It’s entirely possible (even likely!) that my refactor introduced errors. It should therefore only be viewed as a solution sketch, and should by no means be copied and pasted into an application.

Optimizing global memory load in CUDA

My task :
I have two matrices : A - (18 x 4194304) ; B - (18 x 1024).
I have to take each 18-length vector from A and compute distance with each 18-length vector from B and find minimum distance and index.
My code :
__device__
void GetMin(float &dist, int &idx)
{
float dist2;
int idx2;
dist2 = __shfl_down_sync(0xFFFFFFFF, dist, 16, 32);
idx2 = __shfl_down_sync(0xFFFFFFFF, idx, 16);
if (dist > dist2)
{
dist = dist2;
idx = idx2;
}
dist2 = __shfl_down_sync(0xFFFFFFFF, dist, 8, 32);
idx2 = __shfl_down_sync(0xFFFFFFFF, idx, 8);
if (dist > dist2)
{
dist = dist2;
idx = idx2;
}
dist2 = __shfl_down_sync(0xFFFFFFFF, dist, 4, 32);
idx2 = __shfl_down_sync(0xFFFFFFFF, idx, 4);
if (dist > dist2)
{
dist = dist2;
idx = idx2;
}
dist2 = __shfl_down_sync(0xFFFFFFFF, dist, 2, 32);
idx2 = __shfl_down_sync(0xFFFFFFFF, idx, 2);
if (dist > dist2)
{
dist = dist2;
idx = idx2;
}
dist2 = __shfl_down_sync(0xFFFFFFFF, dist, 1, 32);
idx2 = __shfl_down_sync(0xFFFFFFFF, idx, 1);
if (dist > dist2)
{
dist = dist2;
idx = idx2;
}
}
__global__
void CalcMinDist_kernel(const float *A, const float *B, float *output, const int nNumPixels, int nNumImages)
{
int tx = threadIdx.x + blockIdx.x * blockDim.x;
int ty = threadIdx.y;
int lane_id = tx % 32;
float dist = 0;
int idx = 0;
float fMin = 99999999;
int nMinIdx = -1;
for(int i = lane_id; i < 1024; i += 32)
{
dist = 0;
for(int j = 0; j < nNumImages; ++j)
{
int img_idx = blockIdx.x * ty + j * nNumPixels;
dist += (A[img_idx] - B[i * nNumImages + j]) *
(A[img_idx] - B[i * nNumImages + j]);
}
idx = i;
GetMin(dist, idx);
if(threadIdx.x == 0)
{
if(fMin > dist)
{
fMin = dist;
nMinIdx = idx;
}
}
}
if(threadIdx.x == 0)
{
output[blockIdx.x * ty] = nMinIdx;
}
}
Looking at the profiler, I'm memory bound, and do have ~90% occupancy. Is there any way to speed up this operation?
Let me know if I need to provide any other information.
Actually, I would look at the algorithm first. This is a geometric problem - treat it as such.
You should represent the B data using a different data structure, e.g. by clustering or building a partition structure (e.g. k-d tree). That will let you avoid actually computing the distance from most B elements. (You could also consider a project onto fewer dimensions, but the benefit of this may be more elusive.)
With respect to the access pattern - you would probably benefit from having consecutive threads working on consecutive elements of the 18-element-long vectors, rather than having threads work on complete 18-element-long vectors individually. That would better fit the memory layout - right now, a warp read is of many elements which are at distance 18 from each other. If I understand the code correctly anyway.
(I also think the GetMin() could avoid some of the index swaps, but that's not significant since you only perform very few of those.)

What is the best solution to calculate and store maximum values using the GPU? My current one is not satisfactory

I have the following kernel that runs on my device:
__global__ void kernel1(float *Vb, int *sS, int *sE, int *bI, float *eR, int S, int K, int B, int N)
{
const unsigned long long int blockId = blockIdx.x //1D
+ blockIdx.y * gridDim.x //2D
+ gridDim.x * gridDim.y * blockIdx.z; //3D
const unsigned long long int threadId = blockId * blockDim.x + threadIdx.x;
int s = threadId / (K * B), k = (threadId - (s * K * B)) / B, b = threadId - (s * K * B) - (k * B);
if (s < S && k < K && b < B)
{
float sum = 0;
for (int t = sS[k]; t <= sE[k]; t++)
sum += eR[s * N + bI[b * N + t]];
if (sum > Vb[b * K + k])
{
Vb[b * K + k] = sum;
}
}
}
I basically calculate some sums based on the eR[SxN] and bI[BxN] matrices (that are mapped as simple 1D arrays) and on the sE[K] and sS[K] arrays, and try to store the maximum value obtained for each (s,(k,b)) pair in the Vb[BxK] matrix (that is also mapped as an 1D array).
The problem that I have is that in the end, the Vb matrix does not contain the maximum values calculated for each pair. From what I can figure out, the problem arises because all GPU threads run in parallel (which is, of course, a good thing) and they all reach the "if (sum > Vb[b * K + k])" statement at the same time, thus all evaluating the Vb[b * K + k] element based on it's original value. Because of this, the final value stored in Vb[b * K + k] is the value of the sum calculated in the last thread that set the value of the element (last sum larger than the original element value), and not the overall maximum.
In order to correct for this I tried transforming Vb into a [SxKxB] cube in order to calculate the sums for all (s,k,b) pairs and then maxing out the elements for each s on the CPU. The kernel looks like this:
__global__ void kernel2(float *Vb, int *sS, int *sE, int *bI, float *eR, int S, int K, int B, int N)
{
const unsigned long long int blockId = blockIdx.x //1D
+ blockIdx.y * gridDim.x //2D
+ gridDim.x * gridDim.y * blockIdx.z; //3D
const unsigned long long int threadId = blockId * blockDim.x + threadIdx.x;
int s = threadId / (K * B), k = (threadId - (s * K * B)) / B, b = threadId - (s * K * B) - (k * B);
if (s < S && k < K && b < B)
{
float sum = 0;
for (int t = sS[k]; t <= sE[k]; t++)
sum += eR[s * N + bI[b * N + t]];
Vb[s * K * B + k * B + b] = sum;
}
}
This works fine for relatively small S, K and B's, but when these are large (say S = 100000, K = 12, B = 1000), the memory requirements of the Vb matrix (about 4.5GB) far exceeds the device free memory (about 600-700MB).
So my questions are:
1. is there any way to make the first kernel work as expected (in the end obtaining the max sum)?
2. what do you think is the best approach for this problem when working with large sets of data?
a. splitting up the data into multiple chunks and running multiple instances of kernel2? (I think this dramatically increases the time necessary for the calculations)
b. investing in hardware with larger memory capabilities?
c. I have read that there is the possibility of directly using the host memory from the device (with zero-memory copy) but I am not familiar with how it works right now. Might this be a solution? (so I can focus on learning and implementing it)
d. another approach (please suggest)...the simpler the better.
A positive and efficient solution to the first question would be much preferred.
My device is a GeForce GT 220 with 1GB total memory and compute capability 1.2 (latest driver). I am using CUDA5.5 in VS2012 on Windows 8.1 64-bit.
You can implement and use a float version of atomicMax() but performance may not be good -- especially on a CC 1.2 device. Might be worth a try though.
Borrowed from https://stackoverflow.com/a/17401122/442006:
__device__ static float atomicMax(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
Then:
__global__ void kernel1(float *Vb, int *sS, int *sE, int *bI, float *eR, int S, int K, int B, int N)
{
const unsigned long long int blockId = blockIdx.x //1D
+ blockIdx.y * gridDim.x //2D
+ gridDim.x * gridDim.y * blockIdx.z; //3D
const unsigned long long int threadId = blockId * blockDim.x + threadIdx.x;
int s = threadId / (K * B), k = (threadId - (s * K * B)) / B, b = threadId - (s * K * B) - (k * B);
if (s < S && k < K && b < B)
{
float sum = 0;
for (int t = sS[k]; t <= sE[k]; t++)
sum += eR[s * N + bI[b * N + t]];
atomicMax(Vb + b * K + k, sum);
}
}

Kernel crashes while trying to do a simple value assignment

I am learning CUDA and still at the very beginner level. I am trying a simple assignment but my code crashes when I run it and I am not sure why. Any help would be appreciated.
EDIT: Crashes on cudaMemcpy and in Image structure, the pixelVal is of type int**. Is that the cause?
Original C++ code:
void Image::reflectImage(bool flag, Image& oldImage)
/*Reflects the Image based on users input*/
{
int rows = oldImage.N;
int cols = oldImage.M;
Image tempImage(oldImage);
for(int i = 0; i < rows; i++)
{
for(int j = 0; j < cols; j++)
tempImage.pixelVal[rows - (i + 1)][j] = oldImage.pixelVal[i][j];
}
oldImage = tempImage;
}
My CUDA kernel & code:
#define NTPB 512
__global__ void fliph(int* a, int* b, int r, int c)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= r || j >= c)
return;
a[(r - i * c) + j] = b[i * c + j];
}
void Image::reflectImage(bool flag, Image& oldImage)
/*Reflects the Image based on users input*/
{
int rows = oldImage.N;
int cols = oldImage.M;
Image tempImage(oldImage);
if(flag == true) //horizontal reflection
{
//Allocate device memory
int* dpixels;
int* oldPixels;
int n = rows * cols;
cudaMalloc((void**)&dpixels, n * sizeof(int));
cudaMalloc((void**)&oldPixels, n * sizeof(int));
cudaMemcpy(dpixels, tempImage.pixelVal, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(oldPixels, oldImage.pixelVal, n * sizeof(int), cudaMemcpyHostToDevice);
int nblks = (n + NTPB - 1) / NTPB;
fliph<<<nblks, NTPB>>>(dpixels, oldPixels, rows, cols);
cudaMemcpy(tempImage.pixelVal, dpixels, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dpixels);
cudaFree(oldPixels);
}
oldImage = tempImage;
}
You have to create a 2D Grid in order to process the image using 2D indices i and j. In the current case, the kernel is processing only the first row of the image.
To create a 2D indexing mechanism, create a 2D block and 2D grid like this:
const int BLOCK_DIM = 16;
dim3 Block(BLOCK_DIM,BLOCK_DIM);
dim3 Grid;
Grid.x = (cols + Block.x - 1)/Block.x;
Grid.y = (rows + Block.y - 1)/Block.y;
fliph<<<Grid, Block>>>(dpixels, oldPixels, rows, cols);

FIR filter in CUDA (as a 1D convolution)

I'm trying to implement a FIR (Finite Impulse Response) filter in CUDA. My approach is quite simple and looks somewhat like this:
#include <cuda.h>
__global__ void filterData(const float *d_data,
const float *d_numerator,
float *d_filteredData,
const int numeratorLength,
const int filteredDataLength)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float sum = 0.0f;
if (i < filteredDataLength)
{
for (int j = 0; j < numeratorLength; j++)
{
// The first (numeratorLength-1) elements contain the filter state
sum += d_numerator[j] * d_data[i + numeratorLength - j - 1];
}
}
d_filteredData[i] = sum;
}
int main(void)
{
// (Skipping error checks to make code more readable)
int dataLength = 18042;
int filteredDataLength = 16384;
int numeratorLength= 1659;
// Pointers to data, filtered data and filter coefficients
// (Skipping how these are read into the arrays)
float *h_data = new float[dataLength];
float *h_filteredData = new float[filteredDataLength];
float *h_filter = new float[numeratorLength];
// Create device pointers
float *d_data = nullptr;
cudaMalloc((void **)&d_data, dataLength * sizeof(float));
float *d_numerator = nullptr;
cudaMalloc((void **)&d_numerator, numeratorLength * sizeof(float));
float *d_filteredData = nullptr;
cudaMalloc((void **)&d_filteredData, filteredDataLength * sizeof(float));
// Copy data to device
cudaMemcpy(d_data, h_data, dataLength * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_numerator, h_numerator, numeratorLength * sizeof(float), cudaMemcpyHostToDevice);
// Launch the kernel
int threadsPerBlock = 256;
int blocksPerGrid = (filteredDataLength + threadsPerBlock - 1) / threadsPerBlock;
filterData<<<blocksPerGrid,threadsPerBlock>>>(d_data, d_numerator, d_filteredData, numeratorLength, filteredDataLength);
// Copy results to host
cudaMemcpy(h_filteredData, d_filteredData, filteredDataLength * sizeof(float), cudaMemcpyDeviceToHost);
// Clean up
cudaFree(d_data);
cudaFree(d_numerator);
cudaFree(d_filteredData);
// Do stuff with h_filteredData...
// Clean up some more
delete [] h_data;
delete [] h_filteredData;
delete [] h_filter;
}
The filter works, but as I'm new to CUDA programming and I'm not sure how to optimize it.
A slight problem that I see is that dataLength, filteredDataLength, and numeratorLength are not known before hand in the application I intend to use the filter in. Also, even though dataLength is a multiple of 32 in the above code, it is not guaranteed to be that in the final application.
When I compare my code above to ArrayFire, my code takes about three times longer to execute.
Does anyone have any ideas on how to speed things up?
EDIT: Have changed all filterLength to numeratorLength.
I can suggest the following to speed up your code:
Use the shared memory: it is a tiny cache-like memory but extremely
faster than the global card memory. You can find more about it by
looking for __shared__ keyword in CUDA documentation. For
example, you can pre-fetch the filter numerators and big chunks
of data in shared memory, this will significantly enhance your
performance. You need to pay extra attention to the data
alignment in this case as it really matters and it can slow down
your code.
Think about unrolling the for-loop of the numerator
sum. You can check the reduce-vector example in CUDA
documentation.
You can also think about parallelizing the
numerator loop itself by itself. This can be done by adding an extra dimension (say 'y') to your thread-block. You will need to make sum a shared vector as well that has the dimension of numeratorLength. You can also check the reduce vector example on how
to quickly take the sum of this vector at the end.
You are attempting at calculating the filter output by directly evaluating the 1D convolution through a CUDA kernel.
In the case when the filter impulse response duration is long, one thing you can do to evaluate the filtered input is performing the calculations directly in the conjugate domain using FFTs. Below I'm reporting a sample code using CUDA Thrust and the cuFFT library. It is a direct translation of the Matlab-based example reported at
Low-Pass Filtering by FFT Convolution
Let me disclaim that some optimizations are possible with this code, but I preferred to leave it as it is so that it could be more easily compared to its Matlab's counterpart.
#include <stdio.h>
#include <math.h>
#include <cufft.h>
#include <thrust\device_vector.h>
#include <thrust\sequence.h>
#define pi_f 3.14159265358979f // Greek pi in single precision
/****************/
/* SIN OPERATOR */
/****************/
class sin_op {
float fk_, Fs_;
public:
sin_op(float fk, float Fs) { fk_ = fk; Fs_ = Fs; }
__host__ __device__ float operator()(float x) const { return sin(2.f*pi_f*x*fk_/Fs_); }
};
/*****************/
/* SINC OPERATOR */
/*****************/
class sinc_op {
float fc_, Fs_;
public:
sinc_op(float fc, float Fs) { fc_ = fc; Fs_ = Fs; }
__host__ __device__ float operator()(float x) const
{
if (x==0) return (2.f*fc_/Fs_);
else return (2.f*fc_/Fs_)*sin(2.f*pi_f*fc_*x/Fs_)/(2.f*pi_f*fc_*x/Fs_);
}
};
/********************/
/* HAMMING OPERATOR */
/********************/
class hamming_op {
int L_;
public:
hamming_op(int L) { L_ = L; }
__host__ __device__ float operator()(int x) const
{
return 0.54-0.46*cos(2.f*pi_f*x/(L_-1));
}
};
/*********************************/
/* MULTIPLY CUFFTCOMPLEX NUMBERS */
/*********************************/
struct multiply_cufftComplex {
__device__ cufftComplex operator()(const cufftComplex& a, const cufftComplex& b) const {
cufftComplex r;
r.x = a.x * b.x - a.y * b.y;
r.y = a.x * b.y + a.y * b.x;
return r;
}
};
/********/
/* MAIN */
/********/
void main(){
// Signal parameters:
int M = 256; // signal length
const int N = 4;
float f[N] = { 440, 880, 1000, 2000 }; // frequencies
float Fs = 5000.; // sampling rate
// Generate a signal by adding up sinusoids:
thrust::device_vector<float> d_x(M,0.f); // pre-allocate 'accumulator'
thrust::device_vector<float> d_n(M); // discrete-time grid
thrust::sequence(d_n.begin(), d_n.end(), 0, 1);
thrust::device_vector<float> d_temp(M);
for (int i=0; i<N; i++) {
float fk = f[i];
thrust::transform(d_n.begin(), d_n.end(), d_temp.begin(), sin_op(fk,Fs));
thrust::transform(d_temp.begin(), d_temp.end(), d_x.begin(), d_x.begin(), thrust::plus<float>());
}
// Filter parameters:
int L = 257; // filter length
float fc = 600.f; // cutoff frequency
// Design the filter using the window method:
thrust::device_vector<float> d_hsupp(L);
thrust::sequence(d_hsupp.begin(), d_hsupp.end(), -(L-1)/2, 1);
thrust::device_vector<float> d_hideal(L);
thrust::transform(d_hsupp.begin(), d_hsupp.end(), d_hideal.begin(), sinc_op(fc,Fs));
thrust::device_vector<float> d_l(L);
thrust::sequence(d_l.begin(), d_l.end(), 0, 1);
thrust::device_vector<float> d_h(L);
thrust::transform(d_l.begin(), d_l.end(), d_h.begin(), hamming_op(L));
// h is our filter
thrust::transform(d_hideal.begin(), d_hideal.end(), d_h.begin(), d_h.begin(), thrust::multiplies<float>());
// --- Choose the next power of 2 greater than L+M-1
int Nfft = pow(2,(ceil(log2((float)(L+M-1))))); // or 2^nextpow2(L+M-1)
// Zero pad the signal and impulse response:
thrust::device_vector<float> d_xzp(Nfft,0.f);
thrust::device_vector<float> d_hzp(Nfft,0.f);
thrust::copy(d_x.begin(), d_x.end(), d_xzp.begin());
thrust::copy(d_h.begin(), d_h.end(), d_hzp.begin());
// Transform the signal and the filter:
cufftHandle plan;
cufftPlan1d(&plan, Nfft, CUFFT_R2C, 1);
thrust::device_vector<cufftComplex> d_X(Nfft/2+1);
thrust::device_vector<cufftComplex> d_H(Nfft/2+1);
cufftExecR2C(plan, (cufftReal*)thrust::raw_pointer_cast(d_xzp.data()), (cufftComplex*)thrust::raw_pointer_cast(d_X.data()));
cufftExecR2C(plan, (cufftReal*)thrust::raw_pointer_cast(d_hzp.data()), (cufftComplex*)thrust::raw_pointer_cast(d_H.data()));
thrust::device_vector<cufftComplex> d_Y(Nfft/2+1);
thrust::transform(d_X.begin(), d_X.end(), d_H.begin(), d_Y.begin(), multiply_cufftComplex());
cufftPlan1d(&plan, Nfft, CUFFT_C2R, 1);
thrust::device_vector<float> d_y(Nfft);
cufftExecC2R(plan, (cufftComplex*)thrust::raw_pointer_cast(d_Y.data()), (cufftReal*)thrust::raw_pointer_cast(d_y.data()));
getchar();
}
Besides my other answer which I expect will be more convenient for convolution kernels with long duration, below I'm reporting a different implementation, which is more compliant with the OP's initial attempt and I expect will be more convenient for convolution kernels with short duration. Such an implementation is based on a hand-written kernel exploiting caching in shared memory. More details can be found in the book by D.B. Kirk and W.-m. W. Hwu
Programming Massively Parallel Processors, Second Edition: A Hands-on Approach
#include <stdio.h>
#include <stdlib.h>
#include "TimingGPU.cuh"
#include "Utilities.cuh"
#define RG 10
#define BLOCKSIZE 8
/****************/
/* CPU FUNCTION */
/****************/
void h_convolution_1D(const float * __restrict__ h_Signal, const float * __restrict__ h_ConvKernel, float * __restrict__ h_Result_CPU,
const int N, const int K) {
for (int i = 0; i < N; i++) {
float temp = 0.f;
int N_start_point = i - (K / 2);
for (int j = 0; j < K; j++) if (N_start_point + j >= 0 && N_start_point + j < N) {
temp += h_Signal[N_start_point+ j] * h_ConvKernel[j];
}
h_Result_CPU[i] = temp;
}
}
/********************/
/* BASIC GPU KERNEL */
/********************/
__global__ void d_convolution_1D_basic(const float * __restrict__ d_Signal, const float * __restrict__ d_ConvKernel, float * __restrict__ d_Result_GPU,
const int N, const int K) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
float temp = 0.f;
int N_start_point = i - (K / 2);
for (int j = 0; j < K; j++) if (N_start_point + j >= 0 && N_start_point + j < N) {
temp += d_Signal[N_start_point+ j] * d_ConvKernel[j];
}
d_Result_GPU[i] = temp;
}
/***************************/
/* GPU KERNEL WITH CACHING */
/***************************/
__global__ void d_convolution_1D_caching(const float * __restrict__ d_Signal, const float * __restrict__ d_ConvKernel, float * __restrict__ d_Result_GPU,
const int N, const int K) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float d_Tile[BLOCKSIZE];
d_Tile[threadIdx.x] = d_Signal[i];
__syncthreads();
float temp = 0.f;
int N_start_point = i - (K / 2);
for (int j = 0; j < K; j++) if (N_start_point + j >= 0 && N_start_point + j < N) {
if ((N_start_point + j >= blockIdx.x * blockDim.x) && (N_start_point + j < (blockIdx.x + 1) * blockDim.x))
// --- The signal element is in the tile loaded in the shared memory
temp += d_Tile[threadIdx.x + j - (K / 2)] * d_ConvKernel[j];
else
// --- The signal element is not in the tile loaded in the shared memory
temp += d_Signal[N_start_point + j] * d_ConvKernel[j];
}
d_Result_GPU[i] = temp;
}
/********/
/* MAIN */
/********/
int main(){
const int N = 15; // --- Signal length
const int K = 5; // --- Convolution kernel length
float *h_Signal = (float *)malloc(N * sizeof(float));
float *h_Result_CPU = (float *)malloc(N * sizeof(float));
float *h_Result_GPU = (float *)malloc(N * sizeof(float));
float *h_ConvKernel = (float *)malloc(K * sizeof(float));
float *d_Signal; gpuErrchk(cudaMalloc(&d_Signal, N * sizeof(float)));
float *d_Result_GPU; gpuErrchk(cudaMalloc(&d_Result_GPU, N * sizeof(float)));
float *d_ConvKernel; gpuErrchk(cudaMalloc(&d_ConvKernel, K * sizeof(float)));
for (int i=0; i < N; i++) { h_Signal[i] = (float)(rand() % RG); }
for (int i=0; i < K; i++) { h_ConvKernel[i] = (float)(rand() % RG); }
gpuErrchk(cudaMemcpy(d_Signal, h_Signal, N * sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_ConvKernel, h_ConvKernel, K * sizeof(float), cudaMemcpyHostToDevice));
h_convolution_1D(h_Signal, h_ConvKernel, h_Result_CPU, N, K);
d_convolution_1D_basic<<<iDivUp(N, BLOCKSIZE), BLOCKSIZE>>>(d_Signal, d_ConvKernel, d_Result_GPU, N, K);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(h_Result_GPU, d_Result_GPU, N * sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i < N; i++) if (h_Result_CPU[i] != h_Result_GPU[i]) {printf("mismatch2 at %d, cpu: %d, gpu %d\n", i, h_Result_CPU[i], h_Result_GPU[i]); return 1;}
printf("Test basic passed\n");
d_convolution_1D_caching<<<iDivUp(N, BLOCKSIZE), BLOCKSIZE>>>(d_Signal, d_ConvKernel, d_Result_GPU, N, K);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(h_Result_GPU, d_Result_GPU, N * sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i < N; i++) if (h_Result_CPU[i] != h_Result_GPU[i]) {printf("mismatch2 at %d, cpu: %d, gpu %d\n", i, h_Result_CPU[i], h_Result_GPU[i]); return 1;}
printf("Test caching passed\n");
return 0;
}

Resources