I work on stereovision and I have a problem with sort of thrust library, when I use it in my kernel function the application run and bug because all the kernel aren't launch in my <<< >>> call but when I remove the sort all works (but the result isn't good). I have search for alternative but I have find nothing except thrust for gpu sort. Thanks
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core/core.hpp>
#include <iostream>
#include <time.h>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_functions.h>
#include <device_launch_parameters.h>
#include <opencv2/cudaarithm.hpp>
#include <opencv2/core/cuda.hpp>
#include <algorithm>
#include <functional>
#include <array>
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/generate.h>
#include <thrust/equal.h>
#include <thrust/sequence.h>
#include <thrust/for_each.h>
#include <opencv2/imgproc/imgproc.hpp>
using namespace std;
using namespace cv;
const int correlationWindow = 81;
const int widthWindow = (int)sqrt((float)correlationWindow);
const int searchWindow = 52;
__constant__ int widthWindow2 = 9;
__device__
void makeVector(float *mat, float *vec, int col, int x, int y) {
int ind = 0;
for (int i = x; i < x + widthWindow2; i++) {
for (int j = y; j < y + widthWindow2; j++) {
vec[ind] = mat[col * i + j];
ind++;
}
}
}
__device__
void disparityUpdate2(int i, int j, int col, int distance, float *d_disparity) {
d_disparity[col * i + j] = ((255 / searchWindow) * distance);
}
void resize(float *d_disparity, Mat &disparity) {
for (int i = 0; i < disparity.rows; i++) {
for (int j = 0; j < disparity.cols; j++) {
int ind = disparity.cols * i + j;
disparity.at<float>(i, j) = d_disparity[ind];
}
}
}
__global__
void computeSMAD2(int minX, float *d_mL, float *d_mR, float *dif, float *windowL, float *windowR, float *d_disparity, int colmLO, int colmL, int seachWindow) {
int mini;
int pOiX(threadIdx.x + minX); // + minX
int pOiY(blockIdx.x + minX);
int newPoIx(max(minX, pOiX - searchWindow));
int newPoIy(pOiY);
/*int pOiX(pox);
int pOiY(poy);
int newPoIx(npox);
int newPoIy(npoy); */
int minPoIx(newPoIx);
int smad = 0;
int bMax = (int)(correlationWindow / 2);
makeVector(d_mL, windowL, colmL, pOiY, pOiX); // ATTENTION ligne / colonne
makeVector(d_mR, windowR, colmL, newPoIy, newPoIx);
for (int h = 0; h < correlationWindow; h++) {
dif[h] = windowL[h] - windowR[h];
}
thrust::sort(thrust::seq, dif, dif + correlationWindow);
int median = dif[(correlationWindow - 1) / 2];
for (int h = 0; h < correlationWindow; h++) {
dif[h] = abs(dif[h] - median);
}
thrust::sort(thrust::seq, dif, dif + correlationWindow);
for (int i = 0; i <= bMax - 1; i++) {
smad = smad + pow(dif[i], 2);
}
mini = smad;
newPoIx++;
for (int i = newPoIx; i <= pOiX; i++) {
smad = 0;
makeVector(d_mR, windowR, colmL, newPoIy, i);
for (int h = 0; h < correlationWindow; h++) {
dif[h] = windowL[h] - windowR[h];
}
thrust::sort(thrust::seq, dif, dif + correlationWindow);
median = dif[(correlationWindow - 1) / 2];
for (int h = 0; h < correlationWindow; h++) {
dif[h] = abs(dif[h] - median);
}
thrust::sort(thrust::seq, dif, dif + correlationWindow);
for (int j = 0; j <= bMax - 1; j++) {
if (smad < mini) {
smad = smad + pow(dif[j], 2);
}
else {
break;
}
}
if (smad < mini) {
mini = smad;
minPoIx = i;
}
}
int distance = pOiX - minPoIx;
d_disparity[colmLO * (pOiY - minX) + (pOiX - minX)] = ((255 / searchWindow) * distance);
}
Mat SMAD2(int minX, Mat mLO, Mat mRO) {
Mat mL = Mat::zeros(mLO.rows + 2 * minX, mLO.cols + 2 * minX, CV_32FC1);
Mat mR = Mat::zeros(mLO.rows + 2 * minX, mLO.cols + 2 * minX, CV_32FC1);
Mat disparity = Mat::zeros(mRO.rows, mRO.cols, CV_32FC1);
mLO.copyTo(mL.rowRange(minX, mL.rows - minX).colRange(minX, mL.cols - minX));
mRO.copyTo(mR.rowRange(minX, mL.rows - minX).colRange(minX, mL.cols - minX));
float *d_mL, *windowL;
float *d_mR, *windowR;
float *dif;
float *d_disparity;
cudaMallocManaged(&dif, correlationWindow * sizeof(float));
cudaMallocManaged(&windowL, correlationWindow * sizeof(float));
cudaMallocManaged(&windowR, correlationWindow * sizeof(float));
cudaMallocManaged(&d_mL, mL.rows * mL.cols * sizeof(float));
cudaMallocManaged(&d_mR, mR.rows * mR.cols * sizeof(float));
cudaMallocManaged(&d_disparity, disparity.rows * disparity.cols * sizeof(float));
/*dif = new float[correlationWindow];
windowL = new float[correlationWindow];
windowR = new float[correlationWindow];
d_mL = new float[mL.rows * mL.cols];
d_mR = new float[mR.rows * mR.cols];
d_disparity = new float[disparity.rows * disparity.cols]; */
memcpy(d_mL, mL.data, mL.rows * mL.cols * sizeof(float));
memcpy(d_mR, mR.data, mR.rows * mR.cols * sizeof(float));
memcpy(d_disparity, disparity.data, disparity.rows * disparity.cols * sizeof(float));
int ind = 0;
int colmL = mL.cols;
int colmLO = mLO.cols;
int npox, npoy;
clock_t begin = clock();
computeSMAD2 <<<70, 50>>>(minX, d_mL, d_mR, dif, windowL, windowR, d_disparity, mLO.cols, mL.cols, searchWindow);
//computeSMAD2 <<<mLO.rows, mLO.cols>>>(minX, d_mL, d_mR, dif, windowL, windowR, d_disparity, mLO.cols, mL.cols, searchWindow);
cudaDeviceSynchronize();
/*
for (int poy = minX; poy < mR.rows - minX; poy++)
{
for (int pox = minX; pox < mR.cols - minX; pox++)
{
//////////////////////// DE GAUCHE A DROITE
npox = max(minX, pox - searchWindow);
npoy = poy;
computeSMAD2(minX, d_mL, d_mR, disparity, d_disparity, windowL, windowR, dif, colmLO, colmL, pox, poy, npox, npoy);
ind++;
}
} */
clock_t end = clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
cout << "time " << elapsed_secs << endl;
cudaFree(dif);
cudaFree(windowL);
cudaFree(windowR);
cudaFree(d_mL);
cudaFree(d_mR);
cudaFree(d_disparity);
return disparity;
}
int main(int argc, char* argv[]) {
int minX = (int)floor((float)(widthWindow / 2));
Mat mL2 = Mat::ones(70, 50, CV_8UC1) * 255;
Mat mR2 = Mat::zeros(70, 50, CV_8UC1);
Mat disparity = SMAD2(minX, mL2, mR2);
disparity.convertTo(disparity, CV_8UC1);
Mat im;
hconcat(mL2, mR2, im);
hconcat(im, disparity, im);
imshow("test", im);
waitKey(0);
return 0;
}
your code is wrong. as I see, all threads will sort the same array dif. It caused memory violation.
Related
I'm relatively new to Thrust and I'm trying to perform a segmented scan. Here is my code, which you should be able to run as-is:
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <chrono>
// Sequential scan for CPU
float* test_seqScan(float* in, int s, int m) {
float* out = new float[s * m];
for (unsigned int i = 0; i < s; i++) {
out[i * m] = 0;
}
for (unsigned int i = 0; i < s; i++) {
for (unsigned int j = 1; j < m; j++) {
out[i * m + j] = out[i * m + j - 1] + in[i * m + j - 1];
}
}
return out;
}
void test_sumScan(thrust::device_vector<float> dev_in, thrust::device_vector<int> dev_keys, int s, int m) {
// Allocate device memory for output
thrust::device_vector<float> dev_out(s * m);
thrust::exclusive_scan_by_key(thrust::device, dev_keys.begin(), dev_keys.end(), dev_in.begin(), dev_out.begin());
}
int main(){
int s = 100;
int m = 100000;
float* seq_in = new float[s * m];
for (int i = 0; i < s; i++) {
for (int j = 0; j < m; j++) {
seq_in[i * m + j] = j + 1;
}
}
thrust::host_vector<float> par_in(s * m);
for (int i = 0; i < s; i++) {
for (int j = 0; j < m; j++) {
par_in[i * m + j] = j + 1;
}
}
thrust::host_vector<int> keys(s * m);
for (int i = 0; i < s; i++) {
for (int j = 0; j < m; j++) {
keys[i * m + j] = i;
}
}
thrust::device_vector<float> dev_in = par_in;
thrust::device_vector<int> dev_keys = keys;
auto t1 = std::chrono::high_resolution_clock::now();
test_seqScan(seq_in, s, m);
auto t2 = std::chrono::high_resolution_clock::now();
auto duration1 = std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
std::cout << "Sequential duration: " << duration1 << "\n\n";
auto t3 = std::chrono::high_resolution_clock::now();
test_sumScan(dev_in, dev_keys, s, m);
auto t4 = std::chrono::high_resolution_clock::now();
auto duration2 = std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
std::cout << "Parallel duration: " << duration2 << "\n\n";
}
My issue is that both these snippets of code take exactly the same amount of time to run regardless of how small or large I set s and m. I assume that I'm doing something wrong, but I don't know what; can anyone point out the issue?
I am trying to implement a function in Rcpp that takes a matrix as input and calculates and quantiles as specified by the user for the row of said matrix. Since I want to use openMP I tried to do it using RcppEigen due to thread safety concerns.
One reason this looks a bit complicated is that for calculating quantiles efficiently I tried to mimic this approach (finding quartiles, first answer), but allow for user input. So essentially I create a vector with indices corresponding to the quantiles in the first step. In the second step I try to acces the corresponding values in the for loop.
This is the code I was trying:
// // -*- mode: C++; c-indent-level: 4; c-basic-offset: 4; indent-tabs-mode: nil; -*-
// [[Rcpp::depends(RcppEigen)]]
#include <RcppEigen.h>
// [[Rcpp::plugins(openmp)]]
#ifdef _OPENMP
#include <omp.h>
#endif
// [[Rcpp::plugins(cpp11)]]
#include <random>
// [[Rcpp::export]]
SEXP summaryParC(const Eigen::MatrixXd x,
const Eigen::VectorXd quantiles,
int nrow, int ncol, const int ncores)
{
const int no_quantiles = quantiles.size();
Eigen::MatrixXd result(nrow, no_quantiles);
// this part is just to give me a vector of indices I need later on in the foor loop
//-----------------------------------------------
Eigen::VectorXi indices(no_quantiles +1);
indices[0] = -1;
for (int k=0; k<no_quantiles; k++){
if (quantiles[k] < 0.5){
indices[k+1] = floor(quantiles[k] * (ncol-1));
} else {
indices[k+1] = ceil(quantiles[k] * (ncol-1));
}
}
//-----------------------------------------------
#pragma omp parallel num_threads(ncores)
{
#pragma omp for
for(int i = 0; i < nrow; i++){
// I am trying to convert it into a vector so I can sort it
Eigen::VectorXd v = (x.row(i));
auto * ptr = v; // this fails
// here I want to use the pointer to access the n-th element of the vector
for(int q=0; q<no_quantiles; q++){ //quantiles
std::nth_element(ptr + indices[q] + 1, ptr + indices[q+1], ptr + ncol);
result(i,q) = *(ptr + indices[q+1]);
}
}
}
return Rcpp::wrap(result);
}
The reason that I wanted to define my own pointer is that Eigen::VectorXd v has nothing like v.begin(). without openMP I would simply define x as NumericMatrix and v as NumericVector and everything works fine. Using openMP I can not rely on that being thread-safe?
This works for smaller datasets, but crashes when used on a larger matrix:
// [[Rcpp::export]]
SEXP summaryC(NumericMatrix x,
NumericVector quantiles,
int nrow, int ncol, const int ncores)
{
const int no_quantiles = quantiles.size();
NumericMatrix result(nrow, no_quantiles);
int indices[no_quantiles +1];
//-----------------------------------------------
indices[0] = -1;
for (int k=0; k<no_quantiles; k++){
if (quantiles[k] < 0.5){
indices[k+1] = floor(quantiles[k] * (ncol-1));
} else {
indices[k+1] = ceil(quantiles[k] * (ncol-1));
}
}
//-----------------------------------------------
#pragma omp parallel num_threads(ncores)
{
#pragma omp for
for(int i = 0; i < nrow; i++){
// converting it into a vector so I can sort it
NumericVector v = (x.row(i));
for(int q=0; q<no_quantiles; q++){ //quantiles
std::nth_element(v.begin() + indices[q] + 1, v.begin() + indices[q+1], v.end());
result(i,q) = *(v.begin() + indices[q+1]);
}
}
}
return Rcpp::wrap(result);
}
Thank you very much!
Update:
I implemented Ralf Stubner's approach. The Pointer works fine as far as I can tell. (Unfortunately R still aborts the session when I try to run it. As Dirk Eddelbuettel pointed out using a pointer does not solve the problem of accessing R memory).
// [[Rcpp::export]]
SEXP summaryParC(Eigen::MatrixXd x,
const Eigen::VectorXd quantiles,
int nrow, int ncol, const int ncores)
{
const int no_quantiles = quantiles.size();
Eigen::MatrixXd result(nrow, no_quantiles);
Eigen::VectorXi indices(no_quantiles +1);
indices[0] = -1;
for (int k=0; k<no_quantiles; k++){
if (quantiles[k] < 0.5){
indices[k+1] = floor(quantiles[k] * (ncol-1));
} else {
indices[k+1] = ceil(quantiles[k] * (ncol-1));
}
}
#pragma omp parallel num_threads(ncores)
{
#pragma omp for
for(int i = 0; i < nrow; i++){
Eigen::VectorXd v = (x.row(i));
double * B = v.data();
double * E = B + nrow;
for(int q=0; q<no_quantiles; q++){ //quantiles
std::nth_element(B + indices[q] + 1, B + indices[q+1], E);
result(i,q) = *(B + indices[q+1]);
}
}
}
return Rcpp::wrap(result);
}
2nd update: here a cleaner example of the underlying problem. I am aware of the fact that using R structures is problematic with openMP, but maybe the example can lead to a better understanding of the underlying reasons.
// [[Rcpp::plugins(openmp)]]
// [[Rcpp::plugins(cpp11)]]
#include <Rcpp.h>
#ifdef _OPENMP
#include <omp.h>
#endif
using namespace Rcpp;
// [[Rcpp::export]]
SEXP summaryC(NumericMatrix x,
int nrow, int ncol, const int ncores)
{
NumericMatrix result(nrow, 5);
int indices[6] = {-1, 0, 249, 500, 750, 999};
// #pragma omp parallel num_threads(ncores)
{
// #pragma omp for
for(int i = 0; i < nrow; i++){
NumericVector v = (x.row(i));
for(int q=0; q < 5; q++){
std::nth_element(v.begin() + indices[q] + 1, v.begin() + indices[q+1], v.end());
result(i,q) = *(v.begin() + indices[q+1]);
}
}
}
return Rcpp::wrap(result);
}
// [[Rcpp::export]]
SEXP summaryParC(NumericMatrix x,
int nrow, int ncol, const int ncores)
{
NumericMatrix result(nrow, 5);
int indices[6] = {-1, 0, 249, 500, 750, 999};
#pragma omp parallel num_threads(ncores)
{
#pragma omp for schedule(dynamic)
for(int i = 0; i < nrow; i++){
{
NumericVector v = (x.row(i));
for(int q=0; q<5; q++){
std::nth_element(v.begin() + indices[q] + 1, v.begin() + indices[q+1], v.end());
result(i,q) = *(v.begin() + indices[q+1]);
}
}
}
}
return Rcpp::wrap(result);
}
// [[Rcpp::export]]
SEXP summaryParCorder(NumericMatrix x,
int nrow, int ncol, const int ncores)
{
NumericMatrix result(nrow, 5);
int indices[6] = {-1, 0, 249, 500, 750, 999};
#pragma omp parallel num_threads(ncores)
{
#pragma omp for ordered schedule(dynamic)
for(int i = 0; i < nrow; i++){
#pragma omp ordered
{
NumericVector v = (x.row(i));
for(int q=0; q<5; q++){
std::nth_element(v.begin() + indices[q] + 1, v.begin() + indices[q+1], v.end());
result(i,q) = *(v.begin() + indices[q+1]);
}
}
}
}
return Rcpp::wrap(result);
}
***** R - code *****
#this works, but summaryParCorder is much slower.
mbm <- microbenchmark::microbenchmark(
summaryC(x = matrix(as.numeric(1:1000000), ncol = 1000),
nrow = 1000, ncol = 1000, ncores = 4),
summaryParCorder(x = matrix(as.numeric(1:1000000), ncol = 1000),
nrow = 1000, ncol = 1000, ncores = 4),
times = 20
)
mbm
# this breaks:
summaryParC(x = matrix(as.numeric(1:1000000), ncol = 1000),
nrow = 1000, ncol = 1000, ncores = 4)
I have not checked for compatibility with OpenMP, but Eigen::VectorXd::data() gives you the required pointer, if the vector in question is not const:
// [[Rcpp::depends(RcppEigen)]]
#include <RcppEigen.h>
// [[Rcpp::export]]
Eigen::VectorXd quantiles(Eigen::VectorXd x, const Eigen::VectorXi& indices) {
Eigen::VectorXd result(indices.size());
std::nth_element(x.data(), x.data() + indices[0], x.data() + x.size());
result(0) = x[indices[0]];
for (int i = 1; i < indices.size(); ++i) {
std::nth_element(x.data() + indices[i - 1] + 1,
x.data() + indices[i],
x.data() + x.size());
result(i) = x[indices[i]];
}
return result;
}
/*** R
set.seed(42)
x <- runif(12)
i <- sort(sample(seq_len(12), 3)) - 1
quantiles(x, i)
*/
Here a full solution including OpenMP:
// [[Rcpp::plugins(openmp)]]
// [[Rcpp::plugins(cpp11)]]
// [[Rcpp::depends(RcppEigen)]]
#include <RcppEigen.h>
using namespace Rcpp;
// [[Rcpp::export]]
NumericMatrix summaryC(NumericMatrix x, int nrow, int ncores)
{
NumericMatrix result(nrow, 5);
int indices[6] = {-1, 0, 249, 500, 750, 999};
for (int i = 0; i < nrow; i++) {
NumericVector v = (x.row(i));
for (int q = 0; q < 5; ++q) {
std::nth_element(v.begin() + indices[q] + 1, v.begin() + indices[q+1], v.end());
result(i,q) = *(v.begin() + indices[q+1]);
}
}
return result;
}
// [[Rcpp::export]]
Eigen::MatrixXd summaryParC(Eigen::MatrixXd x,int nrow, int ncores) {
Eigen::MatrixXd result(nrow, 5);
int indices[6] = {-1, 0, 249, 500, 750, 999};
#pragma omp parallel num_threads(ncores)
{
#pragma omp for schedule(dynamic)
for (int i = 0; i < nrow; i++) {
Eigen::VectorXd v = x.row(i);
for (int q = 0; q < 5; ++q) {
std::nth_element(v.data() + indices[q] + 1,
v.data() + indices[q+1],
v.data() + v.size());
result(i,q) = v[indices[q+1]];
}
}
}
return result;
}
/*** R
x <- matrix(as.numeric(1:1000000), ncol = 1000)
microbenchmark::microbenchmark(
summaryC = summaryC(x = x, nrow = 1000, ncores = 4),
summaryParC = summaryParC(x = x, nrow = 1000, ncores = 4),
times = 100)
*/
I have never seen a crash with this parallel version. And on my dual-core machine it is about 44% percent faster than the serial code.
I'm currently working on a MPI-program and I'm trying to send blocks of a matrix with scatterv to all processes.
Process description
The matrix is given as an array.
First I produce a datatype with MPI_Type_vector to create the necessary block out of the original array.
Second I create a MPI_Type_struct that should hold rows of blocks.
#include <math.h>
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#define n 16
int main(int argc, char *argv[])
{
MPI_Init(&argc, &argv);
MPI_Comm comm = MPI_COMM_WORLD;
int p,r;
MPI_Comm_size(comm, &p);
MPI_Comm_rank(comm, &r);
int *arr;
arr = NULL;
if (r == 0){
arr = (int *) malloc(n * n * sizeof(int));
for (int i = 0; i < n * n; i++) arr[i] = i;
for (int i = 0; i < n; i++){
printf("\n");
for (int j = 0; j < n; j++)
printf("%4d", arr[i * n + j]);
}
}
printf("\n");
int ps = sqrt(p);
int ns = n / ps;
if (r == 0) {
printf("ps: %d ns: %d\n", ps, ns);
}
/* create datatype */
MPI_Datatype block;
MPI_Type_vector(ns, ns, n, MPI_INT, &block);
int blocks[ps];
MPI_Aint displs[ps];
for (int i = 0; i < ps; i++) {
blocks[i] = 1;
displs[i] = i * sizeof(int);
}
MPI_Datatype types[ps];
//for (int i = 0; i < ps - 1; i++) types[i] = block;
//types[ps - 1] = MPI_UB;
types[0] = block;
for (int i = 1; i < ps; i++) types[i] = MPI_UB;
//types[0] = block;
//types[1] = MPI_UB;
if (r == 0) {
printf("displs:\n");
for(int i = 0; i < ps; i++) printf("%3ld", displs[i]);
printf("\n");
}
MPI_Datatype row;
MPI_Type_struct(ps, blocks, displs, types, &row);
MPI_Type_commit(&row);
/* prepare scatter */
int sdispl[p]; int sendcounts[p];
for (int i = 0; i < p; i++) {
sdispl[i] = (i % ps) + (i / ps) * (ns * ps);
sendcounts[i] = 1;
}
if (r == 0) {
printf("sdispl: \n");
for (int i = 0; i < 4; i++) printf("%3d", sdispl[i]);
printf("\n");
}
int rcv[ns * ns];
MPI_Scatterv(arr, sendcounts, sdispl, row, rcv, ns * ns, MPI_INT, 0, comm);
int result = 1;
if (r == result) {
printf("result for %d:\n", result);
for (int i = 0; i < ns * ns; i++) {
printf("%4d", rcv[i]);
if ((i+1) % ns == 0) printf("\n");
}
}
if (arr != NULL) free(arr);
MPI_Finalize();
return 0;
}
So far the structure of the blocks is correct.
The problem
The block, that was sent to process r = 1 starts with 3 instead of 4. The block for process r = 2 also starts with 6 and the one for process r = 3 starts with 9.
For r == 4 it jumps to 48.
What it should do
r start
0 0
1 4
2 8
3 12
4 64
5 68
6 ...
15 204
The help I would need
I think, that I'm making some mistake with displ and sdispl.
Compiling and Running the example
The code is compiled with the folowing command:
mpicc -o main main.c -lm
I run the code with:
mpirun -np 16 ./main
Thanks for any help in advance!
With the hint of Zulan I was able to solve my problem.
The following code is based on the excellent answer to subarrays.
#include <math.h>
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#define n 8
void print_arr(int *arr, int x) {
printf("\n");
for (int i = 0; i < x*x; i++){
if (i % x == 0) printf("\n");
printf("%4d", arr[i]);
}
printf("\n");
}
int main(int argc, char *argv[])
{
MPI_Init(&argc, &argv);
MPI_Comm comm = MPI_COMM_WORLD;
int p, r;
MPI_Comm_size(comm, &p);
MPI_Comm_rank(comm, &r);
/* number of proceses in dim x and dim y */
int ps = sqrt(p);
/* number of elements in dim x and dim y in sarr */
int ns = n/ps;
/* array of data - distributed by process 0 */
int *arr = NULL;
if (r==0) {
arr = (int *) malloc(n * n * sizeof(int));
for (int i = 0; i < n*n; i++) arr[i] = i;
print_arr(arr, n);
}
MPI_Datatype type, resizedtype;
int sizes[2] = {n,n};
int subsizes[2] = {ns,ns};
int starts[2] = {0,0};
MPI_Type_create_subarray(2, sizes, subsizes, starts, MPI_ORDER_C, MPI_INT, &type);
MPI_Type_create_resized(type, 0, ns*sizeof(int), &resizedtype);
MPI_Type_commit(&resizedtype);
int counts[p];
for (int i = 0; i < p; i++) counts[i] = 1;
int displs[p];
for (int i = 0; i < p; i++) displs[i] = i%ps + i/ps * ns * ps;
/* subarray to store distributed data */
int sarr[ns * ns];
/* send submatrices to all processes */
MPI_Scatterv(arr, counts, displs, resizedtype, sarr, ns*ns, MPI_INT, 0, comm);
/* print received data for process pr */
int pr = 3;
if (r == pr)
print_arr(sarr, ns);
/* free arr */
if (arr != NULL) free(arr);
MPI_Finalize();
return 0;
}
You can compile the example with
mpicc -o main main.c
and run it with
mpirun -np 4 ./main
I'm trying to write the matrix transpose algorithm. I test this program with matrix size equal to 1024, the result shows that not all elements are in the right places.
Why isn't my array transposing correctly? Does anyone can help me or give me any hint? I will appreciate it. Thanks a lot!
there is the whole cpu code:
__global__ void transpose_naive (float *out, float *in, int w, int h )
{
unsigned int xIdx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int yIdx = blockDim.y * blockIdx.y + threadIdx.y;
if ( xIdx <=w && yIdx <=h ) {
unsigned int idx_in = xIdx + w * yIdx;
unsigned int idx_out = yIdx + h * xIdx;
out[idx_out] = in[idx_in];
}
}
int main()
{
int nx=1024;
int mem_size = nx*nx*sizeof(float);
int t=32;
dim3 dimGrid(((nx-1)/t) +1, ((nx-1)/t) +1);
dim3 dimBlock(t,t);
float *h_idata = (float*)malloc(mem_size);
float *h_cdata = (float*)malloc(mem_size);
float *d_idata, *d_cdata;
checkCuda(cudaMalloc(&d_idata, mem_size) );
checkCuda(cudaMalloc(&d_cdata, mem_size) );
// host
for (int j = 0; j < nx; j++)
for (int i = 0; i < nx; i++)
h_idata[j*nx + i] = j*nx + i;
// device
checkCuda(cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice) );
// events for timing
cudaEvent_t startEvent, stopEvent;
checkCuda(cudaEventCreate(&startEvent) );
checkCuda(cudaEventCreate(&stopEvent) );
float ms;
checkCuda( cudaEventRecord(startEvent, 0) );
transpose_naive<<<dimGrid, dimBlock>>>(d_cdata, d_idata,nx,nx);
checkCuda(cudaEventRecord(stopEvent, 0) );
checkCuda(cudaEventSynchronize(stopEvent) );
checkCuda(cudaEventElapsedTime(&ms, startEvent, stopEvent) );
checkCuda( cudaMemcpy(h_cdata, d_cdata, mem_size, cudaMemcpyDeviceToHost) );
printf("the time %5f ", ms);
printf("\n");
savetofile(h_idata,"i.txt",nx,nx);
savetofile(h_cdata,"t.txt",nx,nx);
error_exit:
// cleanup
checkCuda(cudaEventDestroy(startEvent) );
checkCuda(cudaEventDestroy(stopEvent) );
checkCuda( cudaFree(d_cdata) );
checkCuda( cudaFree(d_idata) );
free(h_idata);
free(h_cdata);
system("pause");
}
I think there is something wrong with file output "i.txt" and "t.txt" otherwise the program looks to be correct. I have made some minor changes in your code by adding error checking and printing on the standard output stream. I am printing the last (1020 - 1024) 3 x 3 matrix to cross check the transpose. Run it on your system and verify whether the matrix transpose is correct or not?
#include "cuda_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "device_launch_parameters.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code),file, line);
if (abort) exit(code);
}
}
__global__ void transpose_naive(float *out, float *in, int w, int h)
{
unsigned int xIdx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int yIdx = blockDim.y * blockIdx.y + threadIdx.y;
if (xIdx <= w && yIdx <= h) {
unsigned int idx_in = xIdx + w * yIdx;
unsigned int idx_out = yIdx + h * xIdx;
out[idx_out] = in[idx_in];
}
}
int main()
{
int nx = 1024;
int mem_size = nx*nx*sizeof(float);
int t = 32;
dim3 dimGrid(((nx - 1) / t) + 1, (((nx - 1) / t) + 1));
dim3 dimBlock(t, t);
float *h_idata = (float*)malloc(mem_size);
float *h_cdata = (float*)malloc(mem_size);
float *d_idata, *d_cdata;
gpuErrchk(cudaMalloc(&d_idata, mem_size));
gpuErrchk(cudaMalloc(&d_cdata, mem_size));
// host
for (int j = 0; j < nx; j++)
for (int i = 0; i < nx; i++)
h_idata[j*nx + i] = j*nx + i;
// device
gpuErrchk(cudaMemcpy(d_idata,h_idata,mem_size,cudaMemcpyHostToDevice));
// events for timing
cudaEvent_t startEvent, stopEvent;
gpuErrchk(cudaEventCreate(&startEvent));
gpuErrchk(cudaEventCreate(&stopEvent));
float ms;
gpuErrchk(cudaEventRecord(startEvent, 0));
transpose_naive << <dimGrid, dimBlock >> >(d_cdata, d_idata, nx, nx);
gpuErrchk(cudaEventRecord(stopEvent, 0));
gpuErrchk(cudaEventSynchronize(stopEvent));
gpuErrchk(cudaEventElapsedTime(&ms, startEvent, stopEvent));
gpuErrchk(cudaMemcpy(h_cdata,d_cdata,mem_size,cudaMemcpyDeviceToHost));
printf("the time %5f ", ms);
printf("\n");
for (int i = 1020; i < 1024; i++) {
for (int j = 1020; j < 1024; j++) {
printf("%.2f ", h_idata[i*nx + j]);
}
printf("\n");
}
printf("\n");
for (int i = 1020; i < 1024; i++) {
for (int j = 1020; j < 1024; j++) {
printf("%.2f ", h_cdata[i*nx + j]);
}
printf("\n");
}
//savetofile(h_idata, "i.txt", nx, nx);
//savetofile(h_cdata, "t.txt", nx, nx);
//error_exit:
// cleanup
gpuErrchk(cudaEventDestroy(startEvent));
gpuErrchk(cudaEventDestroy(stopEvent));
gpuErrchk(cudaFree(d_cdata));
gpuErrchk(cudaFree(d_idata));
free(h_idata);
free(h_cdata);
//system("pause");
}
The only flaw in the code is the incorrect bound checks in the following line of the kernel.
if ( xIdx <=w && yIdx <=h ) {
As the indices are from 0 to w-1 and 0 to h-1 for x and y dimensions respectively, the if condition should be as follows:
if ( xIdx <w && yIdx <h ) {
I'm trying to implement a FIR (Finite Impulse Response) filter in CUDA. My approach is quite simple and looks somewhat like this:
#include <cuda.h>
__global__ void filterData(const float *d_data,
const float *d_numerator,
float *d_filteredData,
const int numeratorLength,
const int filteredDataLength)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float sum = 0.0f;
if (i < filteredDataLength)
{
for (int j = 0; j < numeratorLength; j++)
{
// The first (numeratorLength-1) elements contain the filter state
sum += d_numerator[j] * d_data[i + numeratorLength - j - 1];
}
}
d_filteredData[i] = sum;
}
int main(void)
{
// (Skipping error checks to make code more readable)
int dataLength = 18042;
int filteredDataLength = 16384;
int numeratorLength= 1659;
// Pointers to data, filtered data and filter coefficients
// (Skipping how these are read into the arrays)
float *h_data = new float[dataLength];
float *h_filteredData = new float[filteredDataLength];
float *h_filter = new float[numeratorLength];
// Create device pointers
float *d_data = nullptr;
cudaMalloc((void **)&d_data, dataLength * sizeof(float));
float *d_numerator = nullptr;
cudaMalloc((void **)&d_numerator, numeratorLength * sizeof(float));
float *d_filteredData = nullptr;
cudaMalloc((void **)&d_filteredData, filteredDataLength * sizeof(float));
// Copy data to device
cudaMemcpy(d_data, h_data, dataLength * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_numerator, h_numerator, numeratorLength * sizeof(float), cudaMemcpyHostToDevice);
// Launch the kernel
int threadsPerBlock = 256;
int blocksPerGrid = (filteredDataLength + threadsPerBlock - 1) / threadsPerBlock;
filterData<<<blocksPerGrid,threadsPerBlock>>>(d_data, d_numerator, d_filteredData, numeratorLength, filteredDataLength);
// Copy results to host
cudaMemcpy(h_filteredData, d_filteredData, filteredDataLength * sizeof(float), cudaMemcpyDeviceToHost);
// Clean up
cudaFree(d_data);
cudaFree(d_numerator);
cudaFree(d_filteredData);
// Do stuff with h_filteredData...
// Clean up some more
delete [] h_data;
delete [] h_filteredData;
delete [] h_filter;
}
The filter works, but as I'm new to CUDA programming and I'm not sure how to optimize it.
A slight problem that I see is that dataLength, filteredDataLength, and numeratorLength are not known before hand in the application I intend to use the filter in. Also, even though dataLength is a multiple of 32 in the above code, it is not guaranteed to be that in the final application.
When I compare my code above to ArrayFire, my code takes about three times longer to execute.
Does anyone have any ideas on how to speed things up?
EDIT: Have changed all filterLength to numeratorLength.
I can suggest the following to speed up your code:
Use the shared memory: it is a tiny cache-like memory but extremely
faster than the global card memory. You can find more about it by
looking for __shared__ keyword in CUDA documentation. For
example, you can pre-fetch the filter numerators and big chunks
of data in shared memory, this will significantly enhance your
performance. You need to pay extra attention to the data
alignment in this case as it really matters and it can slow down
your code.
Think about unrolling the for-loop of the numerator
sum. You can check the reduce-vector example in CUDA
documentation.
You can also think about parallelizing the
numerator loop itself by itself. This can be done by adding an extra dimension (say 'y') to your thread-block. You will need to make sum a shared vector as well that has the dimension of numeratorLength. You can also check the reduce vector example on how
to quickly take the sum of this vector at the end.
You are attempting at calculating the filter output by directly evaluating the 1D convolution through a CUDA kernel.
In the case when the filter impulse response duration is long, one thing you can do to evaluate the filtered input is performing the calculations directly in the conjugate domain using FFTs. Below I'm reporting a sample code using CUDA Thrust and the cuFFT library. It is a direct translation of the Matlab-based example reported at
Low-Pass Filtering by FFT Convolution
Let me disclaim that some optimizations are possible with this code, but I preferred to leave it as it is so that it could be more easily compared to its Matlab's counterpart.
#include <stdio.h>
#include <math.h>
#include <cufft.h>
#include <thrust\device_vector.h>
#include <thrust\sequence.h>
#define pi_f 3.14159265358979f // Greek pi in single precision
/****************/
/* SIN OPERATOR */
/****************/
class sin_op {
float fk_, Fs_;
public:
sin_op(float fk, float Fs) { fk_ = fk; Fs_ = Fs; }
__host__ __device__ float operator()(float x) const { return sin(2.f*pi_f*x*fk_/Fs_); }
};
/*****************/
/* SINC OPERATOR */
/*****************/
class sinc_op {
float fc_, Fs_;
public:
sinc_op(float fc, float Fs) { fc_ = fc; Fs_ = Fs; }
__host__ __device__ float operator()(float x) const
{
if (x==0) return (2.f*fc_/Fs_);
else return (2.f*fc_/Fs_)*sin(2.f*pi_f*fc_*x/Fs_)/(2.f*pi_f*fc_*x/Fs_);
}
};
/********************/
/* HAMMING OPERATOR */
/********************/
class hamming_op {
int L_;
public:
hamming_op(int L) { L_ = L; }
__host__ __device__ float operator()(int x) const
{
return 0.54-0.46*cos(2.f*pi_f*x/(L_-1));
}
};
/*********************************/
/* MULTIPLY CUFFTCOMPLEX NUMBERS */
/*********************************/
struct multiply_cufftComplex {
__device__ cufftComplex operator()(const cufftComplex& a, const cufftComplex& b) const {
cufftComplex r;
r.x = a.x * b.x - a.y * b.y;
r.y = a.x * b.y + a.y * b.x;
return r;
}
};
/********/
/* MAIN */
/********/
void main(){
// Signal parameters:
int M = 256; // signal length
const int N = 4;
float f[N] = { 440, 880, 1000, 2000 }; // frequencies
float Fs = 5000.; // sampling rate
// Generate a signal by adding up sinusoids:
thrust::device_vector<float> d_x(M,0.f); // pre-allocate 'accumulator'
thrust::device_vector<float> d_n(M); // discrete-time grid
thrust::sequence(d_n.begin(), d_n.end(), 0, 1);
thrust::device_vector<float> d_temp(M);
for (int i=0; i<N; i++) {
float fk = f[i];
thrust::transform(d_n.begin(), d_n.end(), d_temp.begin(), sin_op(fk,Fs));
thrust::transform(d_temp.begin(), d_temp.end(), d_x.begin(), d_x.begin(), thrust::plus<float>());
}
// Filter parameters:
int L = 257; // filter length
float fc = 600.f; // cutoff frequency
// Design the filter using the window method:
thrust::device_vector<float> d_hsupp(L);
thrust::sequence(d_hsupp.begin(), d_hsupp.end(), -(L-1)/2, 1);
thrust::device_vector<float> d_hideal(L);
thrust::transform(d_hsupp.begin(), d_hsupp.end(), d_hideal.begin(), sinc_op(fc,Fs));
thrust::device_vector<float> d_l(L);
thrust::sequence(d_l.begin(), d_l.end(), 0, 1);
thrust::device_vector<float> d_h(L);
thrust::transform(d_l.begin(), d_l.end(), d_h.begin(), hamming_op(L));
// h is our filter
thrust::transform(d_hideal.begin(), d_hideal.end(), d_h.begin(), d_h.begin(), thrust::multiplies<float>());
// --- Choose the next power of 2 greater than L+M-1
int Nfft = pow(2,(ceil(log2((float)(L+M-1))))); // or 2^nextpow2(L+M-1)
// Zero pad the signal and impulse response:
thrust::device_vector<float> d_xzp(Nfft,0.f);
thrust::device_vector<float> d_hzp(Nfft,0.f);
thrust::copy(d_x.begin(), d_x.end(), d_xzp.begin());
thrust::copy(d_h.begin(), d_h.end(), d_hzp.begin());
// Transform the signal and the filter:
cufftHandle plan;
cufftPlan1d(&plan, Nfft, CUFFT_R2C, 1);
thrust::device_vector<cufftComplex> d_X(Nfft/2+1);
thrust::device_vector<cufftComplex> d_H(Nfft/2+1);
cufftExecR2C(plan, (cufftReal*)thrust::raw_pointer_cast(d_xzp.data()), (cufftComplex*)thrust::raw_pointer_cast(d_X.data()));
cufftExecR2C(plan, (cufftReal*)thrust::raw_pointer_cast(d_hzp.data()), (cufftComplex*)thrust::raw_pointer_cast(d_H.data()));
thrust::device_vector<cufftComplex> d_Y(Nfft/2+1);
thrust::transform(d_X.begin(), d_X.end(), d_H.begin(), d_Y.begin(), multiply_cufftComplex());
cufftPlan1d(&plan, Nfft, CUFFT_C2R, 1);
thrust::device_vector<float> d_y(Nfft);
cufftExecC2R(plan, (cufftComplex*)thrust::raw_pointer_cast(d_Y.data()), (cufftReal*)thrust::raw_pointer_cast(d_y.data()));
getchar();
}
Besides my other answer which I expect will be more convenient for convolution kernels with long duration, below I'm reporting a different implementation, which is more compliant with the OP's initial attempt and I expect will be more convenient for convolution kernels with short duration. Such an implementation is based on a hand-written kernel exploiting caching in shared memory. More details can be found in the book by D.B. Kirk and W.-m. W. Hwu
Programming Massively Parallel Processors, Second Edition: A Hands-on Approach
#include <stdio.h>
#include <stdlib.h>
#include "TimingGPU.cuh"
#include "Utilities.cuh"
#define RG 10
#define BLOCKSIZE 8
/****************/
/* CPU FUNCTION */
/****************/
void h_convolution_1D(const float * __restrict__ h_Signal, const float * __restrict__ h_ConvKernel, float * __restrict__ h_Result_CPU,
const int N, const int K) {
for (int i = 0; i < N; i++) {
float temp = 0.f;
int N_start_point = i - (K / 2);
for (int j = 0; j < K; j++) if (N_start_point + j >= 0 && N_start_point + j < N) {
temp += h_Signal[N_start_point+ j] * h_ConvKernel[j];
}
h_Result_CPU[i] = temp;
}
}
/********************/
/* BASIC GPU KERNEL */
/********************/
__global__ void d_convolution_1D_basic(const float * __restrict__ d_Signal, const float * __restrict__ d_ConvKernel, float * __restrict__ d_Result_GPU,
const int N, const int K) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
float temp = 0.f;
int N_start_point = i - (K / 2);
for (int j = 0; j < K; j++) if (N_start_point + j >= 0 && N_start_point + j < N) {
temp += d_Signal[N_start_point+ j] * d_ConvKernel[j];
}
d_Result_GPU[i] = temp;
}
/***************************/
/* GPU KERNEL WITH CACHING */
/***************************/
__global__ void d_convolution_1D_caching(const float * __restrict__ d_Signal, const float * __restrict__ d_ConvKernel, float * __restrict__ d_Result_GPU,
const int N, const int K) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float d_Tile[BLOCKSIZE];
d_Tile[threadIdx.x] = d_Signal[i];
__syncthreads();
float temp = 0.f;
int N_start_point = i - (K / 2);
for (int j = 0; j < K; j++) if (N_start_point + j >= 0 && N_start_point + j < N) {
if ((N_start_point + j >= blockIdx.x * blockDim.x) && (N_start_point + j < (blockIdx.x + 1) * blockDim.x))
// --- The signal element is in the tile loaded in the shared memory
temp += d_Tile[threadIdx.x + j - (K / 2)] * d_ConvKernel[j];
else
// --- The signal element is not in the tile loaded in the shared memory
temp += d_Signal[N_start_point + j] * d_ConvKernel[j];
}
d_Result_GPU[i] = temp;
}
/********/
/* MAIN */
/********/
int main(){
const int N = 15; // --- Signal length
const int K = 5; // --- Convolution kernel length
float *h_Signal = (float *)malloc(N * sizeof(float));
float *h_Result_CPU = (float *)malloc(N * sizeof(float));
float *h_Result_GPU = (float *)malloc(N * sizeof(float));
float *h_ConvKernel = (float *)malloc(K * sizeof(float));
float *d_Signal; gpuErrchk(cudaMalloc(&d_Signal, N * sizeof(float)));
float *d_Result_GPU; gpuErrchk(cudaMalloc(&d_Result_GPU, N * sizeof(float)));
float *d_ConvKernel; gpuErrchk(cudaMalloc(&d_ConvKernel, K * sizeof(float)));
for (int i=0; i < N; i++) { h_Signal[i] = (float)(rand() % RG); }
for (int i=0; i < K; i++) { h_ConvKernel[i] = (float)(rand() % RG); }
gpuErrchk(cudaMemcpy(d_Signal, h_Signal, N * sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_ConvKernel, h_ConvKernel, K * sizeof(float), cudaMemcpyHostToDevice));
h_convolution_1D(h_Signal, h_ConvKernel, h_Result_CPU, N, K);
d_convolution_1D_basic<<<iDivUp(N, BLOCKSIZE), BLOCKSIZE>>>(d_Signal, d_ConvKernel, d_Result_GPU, N, K);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(h_Result_GPU, d_Result_GPU, N * sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i < N; i++) if (h_Result_CPU[i] != h_Result_GPU[i]) {printf("mismatch2 at %d, cpu: %d, gpu %d\n", i, h_Result_CPU[i], h_Result_GPU[i]); return 1;}
printf("Test basic passed\n");
d_convolution_1D_caching<<<iDivUp(N, BLOCKSIZE), BLOCKSIZE>>>(d_Signal, d_ConvKernel, d_Result_GPU, N, K);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(h_Result_GPU, d_Result_GPU, N * sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i < N; i++) if (h_Result_CPU[i] != h_Result_GPU[i]) {printf("mismatch2 at %d, cpu: %d, gpu %d\n", i, h_Result_CPU[i], h_Result_GPU[i]); return 1;}
printf("Test caching passed\n");
return 0;
}