Illegal context for vector clause in simple OpenACC kernel - openacc

I'm trying to compile a simple OpenACC benchmark:
void foo(const float * restrict a, int a_stride, float * restrict c, int c_stride) {
#pragma acc parallel copyin(a[0:a_stride*256]) copyout(c[0:c_stride*256])
#pragma acc loop vector(128)
{
for (int i = 0; i < 256; ++i) {
float sum = 0;
for (int j = 0; j < 256; ++j) {
sum += *(a + a_stride * i + j);
}
*(c + c_stride * i) = sum;
}
}
}
with Nvidia HPC SDK 21.5 and run into an error
$ nvc++ -S tmp.cc -Wall -Wextra -O2 -acc -acclibs -Minfo=all -g -gpu=cc80
NVC++-S-0155-Illegal context for gang(num:) or worker(num:) or vector(length:) (tmp.cc: 7)
NVC++/x86-64 Linux 21.5-0: compilation completed with severe errors
Any idea what may cause this? From what I can tell my syntax for vector(128) is legal.

It's illegal OpenACC syntax to use "vector(value)" with a parallel construct. You need to use a "vector_length" clause on the parallel directive to define the vector length. The reason is because "parallel" defines a single compute region to be offloaded and hence all vector loops in this region need to have the same vector length.
You can use "vector(value)" only with a "kernels" construct since the compiler can then split the region into multiple kernels each having a different vector length.
Option 1:
% cat test.c
void foo(const float * restrict a, int a_stride, float * restrict c, int c_stride) {
#pragma acc parallel vector_length(128) copyin(a[0:a_stride*256]) copyout(c[0:c_stride*256])
#pragma acc loop vector
{
for (int i = 0; i < 256; ++i) {
float sum = 0;
for (int j = 0; j < 256; ++j) {
sum += *(a + a_stride * i + j);
}
*(c + c_stride * i) = sum;
}
}
}
% nvc -acc -c test.c -Minfo=accel
foo:
4, Generating copyout(c[:c_stride*256]) [if not already present]
Generating copyin(a[:a_stride*256]) [if not already present]
Generating Tesla code
5, #pragma acc loop vector(128) /* threadIdx.x */
7, #pragma acc loop seq
5, Loop is parallelizable
7, Loop is parallelizable
Option 2:
% cat test.c
void foo(const float * restrict a, int a_stride, float * restrict c, int c_stride) {
#pragma acc kernels copyin(a[0:a_stride*256]) copyout(c[0:c_stride*256])
#pragma acc loop independent vector(128)
{
for (int i = 0; i < 256; ++i) {
float sum = 0;
for (int j = 0; j < 256; ++j) {
sum += *(a + a_stride * i + j);
}
*(c + c_stride * i) = sum;
}
}
}
% nvc -acc -c test.c -Minfo=accel
foo:
4, Generating copyout(c[:c_stride*256]) [if not already present]
Generating copyin(a[:a_stride*256]) [if not already present]
5, Loop is parallelizable
Generating Tesla code
5, #pragma acc loop gang, vector(128) /* blockIdx.x threadIdx.x */
7, #pragma acc loop seq
7, Loop is parallelizable

Related

OpenMP reduction on SSE2 vector

I want to compute the average of an image (3 channels of interest + 1 alpha channel we ignore here) for each channel using SSE2 intrinsics. I tried that:
__m128 average = _mm_setzero_ps();
#pragma omp parallel for reduction(+:average)
for(size_t k = 0; k < roi_out->height * roi_out->width * ch; k += ch)
{
float *in = ((float *)temp) + k;
average += _mm_load_ps(in);
}
But I get this error with GCC: user-defined reduction not found for average.
Is that possible with SSE2 ? What's wrong ?
Edit
This works:
float sum[4] = { 0.0f };
#pragma omp parallel for simd reduction(+:sum[:4])
for(size_t k = 0; k < roi_out->height * roi_out->width * ch; k += ch)
{
float *in = ((float *)temp) + k;
for (int i = 0; i < ch; ++i) sum[i] += in[i];
}
const __m128 average = _mm_load_ps(sum) / ((float)roi_out->height * roi_out->width);
You can user-define a custom reduction like this:
#pragma omp declare reduction \
(addps:__m128:omp_out+=omp_in) \
initializer(omp_priv=_mm_setzero_ps())
And then use it like:
#pragma omp parallel for reduction(addps:average)
for(size_t k = 0; k < size * ch; k += ch)
{
average += _mm_loadu_ps(data+k);
}
I think, most importantly, openmp needs to know how to get a neutral element (here _mm_setzero_ps()) for your reduction.
Full working example: https://godbolt.org/z/Fpqttc
Interesting link: http://pages.tacc.utexas.edu/~eijkhout/pcse/html/omp-reduction.html#User-definedreductions

How to generate simd code for math function "exp" using openmp?

I am having a simple c code as follows
void calculate_exp(float *out, float *in, int size) {
for(int i = 0; i < size; i++) {
out[i] = exp(in[i]);
}
}
I wanted to optimize it using open-mp simd. I am new to open-mp and used few pragma's like 'omp simd', 'omp simd safelen' etc. But I am unable to generate the simd code. Can anybody help ?
You can use one of the following four alternatives to vectorize the exp function.
Note that I have used expf (float) instead of exp, which is a double function.
This Godbolt link shows that these functions are vectorized: Search for call _ZGVdN8v___expf_finite in the compiler generated code.
#include<math.h>
int exp_vect_a(float* x, float* y, int N) {
/* Inform the compiler that N is a multiple of 8, this leads to shorter code */
N = N & 0xFFFFFFF8;
x = (float*)__builtin_assume_aligned(x, 32); /* gcc 8.2 doesn't need aligned x and y to generate `nice` code */
y = (float*)__builtin_assume_aligned(y, 32); /* with gcc 7.3 it improves the generated code */
#pragma omp simd
for(int i=0; i<N; i++) y[i] = expf(x[i]);
return 0;
}
int exp_vect_b(float* restrict x, float* restrict y, int N) {
N = N & 0xFFFFFFF8;
x = (float*)__builtin_assume_aligned(x, 32); /* gcc 8.2 doesn't need aligned x and y to generate `nice` code */
y = (float*)__builtin_assume_aligned(y, 32); /* with gcc 7.3 it improves the generated code */
for(int i=0; i<N; i++) y[i] = expf(x[i]);
return 0;
}
/* This also vectorizes, but it doesn't lead to `nice` code */
int exp_vect_c(float* restrict x, float* restrict y, int N) {
for(int i=0; i<N; i++) y[i] = expf(x[i]);
return 0;
}
/* This also vectorizes, but it doesn't lead to `nice` code */
int exp_vect_d(float* x, float* y, int N) {
#pragma omp simd
for(int i=0; i<N; i++) y[i] = expf(x[i]);
return 0;
}
Note that Peter Cordes' comment is very relevant here:
Function _ZGVdN8v___expf_finite might give slightly different results than expf
because its focus is on speed, and not on special cases such as inputs which are
infinite, subnormal, or not a number.
Moreover, the accuracy is 4-ulp maximum relative error,
which is probably slightly less accurate than the standard expf function.
Therefore you need optimization level -Ofast (which allows less accurate code)
instead of -O3 to get the code vectorized with gcc.
See this libmvec page for futher details.
The following test code compiles and runs successfully with gcc 7.3:
#include <math.h>
#include <stdio.h>
/* gcc expv.c -m64 -Ofast -std=c99 -march=skylake -fopenmp -lm */
int exp_vect_d(float* x, float* y, int N) {
#pragma omp simd
for(int i=0; i<N; i++) y[i] = expf(x[i]);
return 0;
}
int main(){
float x[32];
float y[32];
int i;
int N = 32;
for(i = 0; i < N; i++) x[i] = i/100.0f;
x[10]=-89.0f; /* exp(-89.0f)=2.227e-39 which is a subnormal number */
x[11]=-1000.0f; /* output: 0.0 */
x[12]=1000.0f; /* output: Inf. */
x[13]=0.0f/0.0f; /* input: NaN: Not a number */
x[14]=1e20f*1e20f; /* input: Infinity */
x[15]=-1e20f*1e20f; /* input: -Infinity */
x[16]=2.3025850929940f; /* exp(2.3025850929940f)=10.0... */
exp_vect_d(x, y, N);
for(i = 0; i < N; i++) printf("x=%11.8e, y=%11.8e\n", x[i], y[i]);
return 0;
}

openacc create data while running inside a kernels

I'm having a task that is to be accelerated by OpenACC. I need to do dynamic memory allocation within a kernel computation. I've built a simpler demo for it as following.
#include <iostream>
using namespace std;
#pragma acc routine seq
int *routine(int init) {
int *ptr;
#pragma acc data create(ptr[:10])
for (int i = 0; i < 10; ++i) {
ptr[i] = init + i;
}
return ptr;
}
void print_array(int *arr) {
for (int i = 0; i < 10; ++i) {
cout << arr[i] << " ";
}
cout << endl;
}
int main(void) {
int *arrs[5];
#pragma acc kernels
for (int i = 0; i < 5; ++i) {
arrs[i] = routine(i);
}
for (int i = 0; i < 5; ++i) {
print_array(arrs[i]);
}
return 0;
}
In this demo, I'm trying to call the routine while running inside a kernel construct. The routine procedure wants to create some data within the GPU and put some values into it.
While I can compile the code, but it reports runtime problems as following.
lisanhu#lisanhu-XPS-15-9550:create_and_copyout$ pgc++ -o test main.cc -acc -Minfo=accel
routine(int):
6, Generating acc routine seq
main:
23, Generating implicit copyout(arrs[:])
26, Accelerator restriction: size of the GPU copy of arrs is unknown
Loop is parallelizable
Generating implicit copy(arrs[:][:])
Accelerator kernel generated
Generating Tesla code
26, #pragma acc loop gang, vector(32) /* blockIdx.x threadIdx.x */
lisanhu#lisanhu-XPS-15-9550:create_and_copyout$ ./test
call to cuStreamSynchronize returned error 715: Illegal instruction
I'm wondering what I should do to accomplish this task (dynamically allocating memory within processing of a kernel construct). Really appreciate it if you could help.
This is untested, and probably very slow, but this might do what you need it to.
int main() {
const int num = 20;
int a[x] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 0};
int* sizes = (int *)malloc(num * sizeof(int));
int *ptrs[num];
int* temp, *temp2;
int sum;
int* finished = (int *)malloc(num * sizeof(int));
for (int x = 0; x < num; ++x){
finished[x] = 0;
}
#pragma acc kernels copyin(a[0:10]) copyout(ptrs[:num][:1]) async(num*2+1)
{
#pragma acc loop private(temp)
for (int i = 0; i < num; ++i){
#pragma acc loop seq async(i)
for (int j = 0; j < 1; ++j){
temp = ptrs[x];
sizes[i] = ...
}
while (ptrs[x] != x);
ptrs[x] = routine(a, sizes[i]);
}
}
while (true){
sum = 0;
for (int x = 0; x < num; ++x){
sum += finished[x];
}
if (sum == num){
break;
}
for (int x = 0; x < num; ++x){
if (acc_async_test(x) != 0 && finished[x] == 0){
finished[x] = 1;
#pragma acc update host(sizes[x:1])
temp = (int *)malloc(size[x] * sizeof(int));
#pragma acc enter data copyin(temp[0:x])
temp2 = acc_deviceptr(temp);
ptrs[x] = temp2;
#pragma acc update device(ptrs[x:1][0:1])
}
}
}
}

openmp, for loop parallelization and critical zone error

I am new to OpenMP and I am using it to implement the Sieve of Eratosthenes, My code are:
int check_eratothenes(int *p, int pn, int n)
{
int count = 0;
bool* out = new bool[int(pow(pn, 2))];
memset(out, 0, pow(pn, 2));
#pragma omp parallel
for (int i = 0; i < n; i ++)
{
int j = floor((pn + 1) / p[i]) * p[i];
#pragma omp critical
while (j <= pow(pn, 2))
{
out[j] = 1;
j += p[i];
}
}
#pragma omp parallel
for (int i = pn+1; i < pow(pn, 2); i ++)
{
#pragma omp critical
if (out[i] == 0)
{
//cout << i << " ";
count ++;
}
}
return count;
}
But, the above OpenMP pragma is wrong. It can be complied but when it runs, it takes a lot of time to get the result, so it press CTRL + C to stop. And I felt at a loss on how to solve it . Since there are many loops and if statements.
Thanks in advance.

gcc openmp thread reuse

I am using gcc's implementation of openmp to try to parallelize a program. Basically the assignment is to add omp pragmas to obtain speedup on a program that finds amicable numbers.
The original serial program was given(shown below except for the 3 lines I added with comments at the end). We have to parallize first just the outer loop, then just the inner loop. The outer loop was easy and I get close to ideal speedup for a given number of processors. For the inner loop, I get much worse performance than the original serial program. Basically what I am trying to do is a reduction on the sum variable.
Looking at the cpu usage, I am only using ~30% per core. What could be causing this? Is the program continually making new threads everytime it hits the omp parallel for clause? Is there just so much more overhead in doing a barrier for the reduction? Or could it be memory access issue(eg cache thrashing)? From what I read with most implementations of openmp threads get reused overtime(eg pooled), so I am not so sure the first problem is what is wrong.
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include <omp.h>
#define numThread 2
int main(int argc, char* argv[]) {
int ser[29], end, i, j, a, limit, als;
als = atoi(argv[1]);
limit = atoi(argv[2]);
for (i = 2; i < limit; i++) {
ser[0] = i;
for (a = 1; a <= als; a++) {
ser[a] = 1;
int prev = ser[a-1];
if ((prev > i) || (a == 1)) {
end = sqrt(prev);
int sum = 0;//added this
#pragma omp parallel for reduction(+:sum) num_threads(numThread)//added this
for (j = 2; j <= end; j++) {
if (prev % j == 0) {
sum += j;
sum += prev / j;
}
}
ser[a] = sum + 1;//added this
}
}
if (ser[als] == i) {
printf("%d", i);
for (j = 1; j < als; j++) {
printf(", %d", ser[j]);
}
printf("\n");
}
}
}
OpenMP thread teams are instantiated on entering the parallel section. This means, indeed, that the thread creation is repeated every time the inner loop is starting.
To enable reuse of threads, use a larger parallel section (to control the lifetime of the team) and specificly control the parallellism for the outer/inner loops, like so:
Execution time for test.exe 1 1000000 has gone down from 43s to 22s using this fix (and the number of threads reflects the numThreads defined value + 1
PS Perhaps stating the obvious, it would not appear that parallelizing the inner loop is a sound performance measure. But that is likely the whole point to this exercise, and I won't critique the question for that.
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include <omp.h>
#define numThread 2
int main(int argc, char* argv[]) {
int ser[29], end, i, j, a, limit, als;
als = atoi(argv[1]);
limit = atoi(argv[2]);
#pragma omp parallel num_threads(numThread)
{
#pragma omp single
for (i = 2; i < limit; i++) {
ser[0] = i;
for (a = 1; a <= als; a++) {
ser[a] = 1;
int prev = ser[a-1];
if ((prev > i) || (a == 1)) {
end = sqrt(prev);
int sum = 0;//added this
#pragma omp parallel for reduction(+:sum) //added this
for (j = 2; j <= end; j++) {
if (prev % j == 0) {
sum += j;
sum += prev / j;
}
}
ser[a] = sum + 1;//added this
}
}
if (ser[als] == i) {
printf("%d", i);
for (j = 1; j < als; j++) {
printf(", %d", ser[j]);
}
printf("\n");
}
}
}
}

Resources