I visited the gnu gsl website and i dont find the example there to solve a differential equation to be intuitive at all (especially because it is using 2nd order differential equation). https://www.gnu.org/software/gsl/manual/html_node/ODE-Example-programs.html#ODE-Example-programs
Can somebody guide about where to find a descriptive guide to how solve a very simple first order differetial equation.
For example, supoose my function is y'=x+2y (or any such function) then how do i write code in gsl to solve it with a given fixed step size and initial condition.
For y'=f(x,y)=x+2y the arrays have all dimension 1, which normally is something to avoid, but here it is instructional. For the explicit solvers, i.e., those not containing imp in the name, you do not need the Jacobian:
#include <stdio.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_odeiv2.h>
int odefunc (double x, const double y[], double f[], void *params)
{
f[0] = x+2*y[0];
return GSL_SUCCESS;
}
int * jac;
int main ()
{
int dim = 1;
gsl_odeiv2_system sys = {odefunc, NULL, dim, NULL};
gsl_odeiv2_driver * d = gsl_odeiv2_driver_alloc_y_new (&sys, gsl_odeiv2_step_rkf45, 1e-6, 1e-6, 0.0);
int i;
double x0 = 0.0, xf = 100.0; /* start and end of integration interval */
double x = x0;
double y[1] = { 1.0 }; /* initial value */
for (i = 1; i <= 100; i++)
{
double xi = x0 + i * (xf-x0) / 100.0;
int status = gsl_odeiv2_driver_apply (d, &x, xi, y);
if (status != GSL_SUCCESS)
{
printf ("error, return value=%d\n", status);
break;
}
printf ("%.8e %.8e\n", x, y[0]);
}
gsl_odeiv2_driver_free (d);
return 0;
}
You may want to look up the book "Introduction to Computational Modeling Using C and Open-Source Tools" by Jose M. Garrido.
Lutzl, Please review:
'#include <stdio.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_odeiv2.h>
int odefunc (double x, const double y[], double f[], void *params)
{
f[0] = x+2*y[0];
return GSL_SUCCESS;
}
int jac(double x , const double y[] ,double *dfdy , double dfdx[], void *params) {
gsl_matrix_view dfdy_mat= gsl_matrix_view_array(dfdy,1,1);
gsl_matrix *m= &dfdy_mat.matrix;
gsl_matrix_set(m,0,0,x);
dfdx[0]=2;
return GSL_SUCCESS;
}
int main ()
{
int dim =1;
gsl_odeiv2_system sys = {odefunc, jac, dim, NULL};
gsl_odeiv2_driver * d = gsl_odeiv2_driver_alloc_y_new (&sys, gsl_odeiv2_step_rk1imp,1e-7,1e-7, 0.0);
int i;
double x0 = 0.0, xf =1.0; /*al value */
while(x<xf)
{
double xi = x0 + 0.25;
int status = gsl_odeiv2_driver_apply (d, &x, xi, y);
if (status != GSL_SUCCESS)
{
printf ("error, return value=%d\n", status);
break;
}
printf ("%.8e %.8e\n", x, y[0]);
}
gsl_odeiv2_driver_free (d);
return 0;
}
'
I have a few for loops that does saturated arithmetic operations.
For instance:
Implementation of saturated add in my case is as follows:
static void addsat(Vector &R, Vector &A, Vector &B)
{
int32_t a, b, r;
int32_t max_add;
int32_t min_add;
const int32_t SAT_VALUE = (1<<(16-1))-1;
const int32_t SAT_VALUE2 = (-SAT_VALUE - 1);
const int32_t sat_cond = (SAT_VALUE <= 0x7fffffff);
const uint32_t SAT = 0xffffffff >> 16;
for (int i=0; i<R.length; i++)
{
a = static_cast<uint32_t>(A.data[i]);
b = static_cast<uint32_t>(B.data[i]);
max_add = (int32_t)0x7fffffff - a;
min_add = (int32_t)0x80000000 - a;
r = (a>0 && b>max_add) ? 0x7fffffff : a + b;
r = (a<0 && b<min_add) ? 0x80000000 : a + b;
if ( sat_cond == 1)
{
std_max(r,r,SAT_VALUE2);
std_min(r,r,SAT_VALUE);
}
else
{
r = static_cast<uint16_t> (static_cast<int32_t> (r));
}
R.data[i] = static_cast<uint16_t>(r);
}
}
I see that there is paddsat intrinsic in x86 that could have been the perfect solution to this loop. I do get the code auto vectorized but with a combination of multiple operations according to my code. I would like to know what could be the best way to write this loop that auto-vectorizer finds the addsat operation match right.
Vector structure is:
struct V {
static constexpr int length = 32;
unsigned short data[32];
};
Compiler used is clang 3.8 and code was compiled for AVX2 Haswell x86-64 architecture.
the error is:
/usr/include/c++/4.8/bits/stl_algo.h:2159:29: error: invalid conversion from 'int' to 'const char*' [-fpermissive]
if (__comp(*__i, *__first))
I am passing 2 strings X and Y. Now the sort should compare XY and YX and then return the X if XY>YX or return Y. X and Y will have values like - 33 or 9999.
string Solution::largestNumber(const vector<int> &A) {
int i,n;
vector<string> B;
string str;
for(i=0; i<A.size(); i++)
{
B[i]=to_string(A[i]);
}
sort(A.begin(), A.end(),[](const string lhs, const string rhs){
return rhs+lhs < lhs+rhs;
});
for(i=0; i<A.size(); i++)
{
str+= to_string(A[i]);
}
return str;}
You are calling std::sort on a range from A which is a std::vector<int>, so the compare function should compare int-s.
You could fill B initially with 0,1, .... A.size()-1 then, given two indexes i1 and i2 to compare, construct the strings and compare them.
the error is: /usr/include/c++/4.8/bits/stl_algo.h:2159:29: error: invalid conversion from 'int' to 'const char*' [-fpermissive] if (__comp(*__i, *__first))
sort(A.begin(), A.end(),[](const string lhs, const string rhs){...});
Your sort comparator is expecting to compare two std::strings. The element type of A is int. You created B to be a conversion of A's elements to B's element type of std::string. Use B.
Other notes:
string Solution::largestNumber(const vector<int> &A) {
int i,n; // n???
// vector<string> B; // B is empty. Calling B[i] will cause a
// segfault for attempting to access memory
// out of bounds.
vector<string> B(A.size()); // Fix: pass in the size. Now creates B
// with size elements that can be accessed.
string str;
// Consider using algorithms like std::transform
for(i=0; i<A.size(); i++)
{
B[i]=to_string(A[i]); // You won't segfault here now.
}
// Corrected your sort.
sort(B.begin(), B.end(),[](const string lhs, const string rhs){
return rhs+lhs < lhs+rhs;
});
// Appending should be done on the sorted string vector. Consider using
// a ranged-based for loop here.
for(i=0; i<B.size(); i++)
{
str+= B[i];
}
return str;
}
OK, so lets say I have an ( N x N ) matrix that I would like to process. This matrix is quite large for my computer, and if I try to send it to the device all at once I get a 'out of memory error.'
So is there a way to send sections of the matrix to the device? One way I can see to do it is copy portions of the matrix on the host, and then send these manageable copied portions from the host to the device, and then put them back together at the end.
Here is something I have tried, but the cudaMemcpy in the for loop returns error code 11, 'invalid argument.'
int h_N = 10000;
size_t h_size_m = h_N*sizeof(float);
h_A = (float*)malloc(h_size_m*h_size_m);
int d_N = 2500;
size_t d_size_m = d_N*sizeof(float);
InitializeMatrices(h_N);
int i;
int iterations = (h_N*h_N)/(d_N*d_N);
for( i = 0; i < iterations; i++ )
{
float* h_array_ref = h_A+(i*d_N*d_N);
cudasafe( cudaMemcpy(d_A, h_array_ref, d_size_m*d_size_m, cudaMemcpyHostToDevice), "cudaMemcpy");
cudasafe( cudaFree(d_A), "cudaFree(d_A)" );
}
What I'm trying to accomplish with the above code is this: instead of send the entire matrix to the device, I simply send a pointer to a place within that matrix and reserve enough space on the device to do the work, and then with the next iteration of the loop move the pointer forward within the matrix, etc. etc.
Not only can you do this (assuming your problem is easily decomposed this way into sub-arrays), it can be a very useful thing to do for performance; once you get the basic approach you've described working, you can start using asynchronous memory copies and double-buffering to overlap some of the memory transfer time with the time spent computing what is already on-card.
But first one gets the simple thing working. Below is a 1d example (multiplying a vector by a scalar and adding another scalar) but using a linearized 2d array would be the same; the key part is
CHK_CUDA( cudaMalloc(&xd, batchsize*sizeof(float)) );
CHK_CUDA( cudaMalloc(&yd, batchsize*sizeof(float)) );
tick(&gputimer);
int nbatches = 0;
for (int nstart=0; nstart < n; nstart+=batchsize) {
int size=batchsize;
if ((nstart + batchsize) > n) size = n - nstart;
CHK_CUDA( cudaMemcpy(xd, &(x[nstart]), size*sizeof(float), cudaMemcpyHostToDevice) );
blocksize = (size+nblocks-1)/nblocks;
cuda_saxpb<<<nblocks, blocksize>>>(xd, a, b, yd, size);
CHK_CUDA( cudaMemcpy(&(ycuda[nstart]), yd, size*sizeof(float), cudaMemcpyDeviceToHost) );
nbatches++;
}
gputime = tock(&gputimer);
CHK_CUDA( cudaFree(xd) );
CHK_CUDA( cudaFree(yd) );
You allocate the buffers at the start, and then loop through until you're done, each time doing the copy, starting the kernel, and then copying back. You free at the end.
The full code is
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <cuda.h>
#include <sys/time.h>
#include <math.h>
#define CHK_CUDA(e) {if (e != cudaSuccess) {fprintf(stderr,"Error: %s\n", cudaGetErrorString(e)); exit(-1);}}
__global__ void cuda_saxpb(const float *xd, const float a, const float b,
float *yd, const int n) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i<n) {
yd[i] = a*xd[i]+b;
}
return;
}
void cpu_saxpb(const float *x, float a, float b, float *y, int n) {
int i;
for (i=0;i<n;i++) {
y[i] = a*x[i]+b;
}
return;
}
int get_options(int argc, char **argv, int *n, int *s, int *nb, float *a, float *b);
void tick(struct timeval *timer);
double tock(struct timeval *timer);
int main(int argc, char **argv) {
int n=1000;
int nblocks=10;
int batchsize=100;
float a = 5.;
float b = -1.;
int err;
float *x, *y, *ycuda;
float *xd, *yd;
double abserr;
int blocksize;
int i;
struct timeval cputimer;
struct timeval gputimer;
double cputime, gputime;
err = get_options(argc, argv, &n, &batchsize, &nblocks, &a, &b);
if (batchsize > n) {
fprintf(stderr, "Resetting batchsize to size of vector, %d\n", n);
batchsize = n;
}
if (err) return 0;
x = (float *)malloc(n*sizeof(float));
if (!x) return 1;
y = (float *)malloc(n*sizeof(float));
if (!y) {free(x); return 1;}
ycuda = (float *)malloc(n*sizeof(float));
if (!ycuda) {free(y); free(x); return 1;}
/* run CPU code */
tick(&cputimer);
cpu_saxpb(x, a, b, y, n);
cputime = tock(&cputimer);
/* run GPU code */
/* only have to allocate once */
CHK_CUDA( cudaMalloc(&xd, batchsize*sizeof(float)) );
CHK_CUDA( cudaMalloc(&yd, batchsize*sizeof(float)) );
tick(&gputimer);
int nbatches = 0;
for (int nstart=0; nstart < n; nstart+=batchsize) {
int size=batchsize;
if ((nstart + batchsize) > n) size = n - nstart;
CHK_CUDA( cudaMemcpy(xd, &(x[nstart]), size*sizeof(float), cudaMemcpyHostToDevice) );
blocksize = (size+nblocks-1)/nblocks;
cuda_saxpb<<<nblocks, blocksize>>>(xd, a, b, yd, size);
CHK_CUDA( cudaMemcpy(&(ycuda[nstart]), yd, size*sizeof(float), cudaMemcpyDeviceToHost) );
nbatches++;
}
gputime = tock(&gputimer);
CHK_CUDA( cudaFree(xd) );
CHK_CUDA( cudaFree(yd) );
abserr = 0.;
for (i=0;i<n;i++) {
abserr += fabs(ycuda[i] - y[i]);
}
printf("Y = a*X + b, problemsize = %d\n", n);
printf("CPU time = %lg millisec.\n", cputime*1000.);
printf("GPU time = %lg millisec (done with %d batches of %d).\n",
gputime*1000., nbatches, batchsize);
printf("CUDA and CPU results differ by %lf\n", abserr);
free(x);
free(y);
free(ycuda);
return 0;
}
int get_options(int argc, char **argv, int *n, int *s, int *nb, float *a, float *b) {
const struct option long_options[] = {
{"nvals" , required_argument, 0, 'n'},
{"nblocks" , required_argument, 0, 'B'},
{"batchsize" , required_argument, 0, 's'},
{"a", required_argument, 0, 'a'},
{"b", required_argument, 0, 'b'},
{"help", no_argument, 0, 'h'},
{0, 0, 0, 0}};
char c;
int option_index;
int tempint;
while (1) {
c = getopt_long(argc, argv, "n:B:a:b:s:h", long_options, &option_index);
if (c == -1) break;
switch(c) {
case 'n': tempint = atoi(optarg);
if (tempint < 1 || tempint > 500000) {
fprintf(stderr,"%s: Cannot use number of points %s;\n Using %d\n", argv[0], optarg, *n);
} else {
*n = tempint;
}
break;
case 's': tempint = atoi(optarg);
if (tempint < 1 || tempint > 50000) {
fprintf(stderr,"%s: Cannot use number of points %s;\n Using %d\n", argv[0], optarg, *s);
} else {
*s = tempint;
}
break;
case 'B': tempint = atoi(optarg);
if (tempint < 1 || tempint > 1000 || tempint > *n) {
fprintf(stderr,"%s: Cannot use number of blocks %s;\n Using %d\n", argv[0], optarg, *nb);
} else {
*nb = tempint;
}
break;
case 'a': *a = atof(optarg);
break;
case 'b': *b = atof(optarg);
break;
case 'h':
puts("Calculates y[i] = a*x[i] + b on the GPU.");
puts("Options: ");
puts(" --nvals=N (-n N): Set the number of values in y,x.");
puts(" --batchsize=N (-s N): Set the number of values to transfer at a time.");
puts(" --nblocks=N (-B N): Set the number of blocks used.");
puts(" --a=X (-a X): Set the parameter a.");
puts(" --b=X (-b X): Set the parameter b.");
puts(" --niters=N (-I X): Set number of iterations to calculate.");
puts("");
return +1;
}
}
return 0;
}
void tick(struct timeval *timer) {
gettimeofday(timer, NULL);
}
double tock(struct timeval *timer) {
struct timeval now;
gettimeofday(&now, NULL);
return (now.tv_usec-timer->tv_usec)/1.0e6 + (now.tv_sec - timer->tv_sec);
}
Running this one gets:
$ ./batched-saxpb --nvals=10240 --batchsize=10240 --nblocks=20
Y = a*X + b, problemsize = 10240
CPU time = 0.072 millisec.
GPU time = 0.117 millisec (done with 1 batches of 10240).
CUDA and CPU results differ by 0.000000
$ ./batched-saxpb --nvals=10240 --batchsize=5120 --nblocks=20
Y = a*X + b, problemsize = 10240
CPU time = 0.066 millisec.
GPU time = 0.133 millisec (done with 2 batches of 5120).
CUDA and CPU results differ by 0.000000
$ ./batched-saxpb --nvals=10240 --batchsize=2560 --nblocks=20
Y = a*X + b, problemsize = 10240
CPU time = 0.067 millisec.
GPU time = 0.167 millisec (done with 4 batches of 2560).
CUDA and CPU results differ by 0.000000
The GPU time goes up in this case (we're doing more memory copies) but the answers stay the same.
Edited: The original version of this code had an option for running multiple iterations of the kernel for timing purposes, but that's unnecessarily confusing in this context so it's removed.
I'm trying to measure a performance difference between using Boost.Variant and using virtual interfaces. For example, suppose I want to increment different types of numbers uniformly, using Boost.Variant I would use a boost::variant over int and float and a static visitor which increments each one of them. Using class interfaces I would use a pure virtual class number and number_int and number_float classes which derive from it and implement an "increment" method.
From my testing, using interfaces is far faster than using Boost.Variant.
I ran the code at the bottom and received these results:
Virtual: 00:00:00.001028
Variant: 00:00:00.012081
Why do you suppose this difference is? I thought Boost.Variant would be a lot faster.
** Note: Usually Boost.Variant uses heap allocations to guarantee that the variant would always be non-empty. But I read on the Boost.Variant documentation that if boost::has_nothrow_copy is true then it doesn't use heap allocations which should make things significantly faster. For int and float boost::has_nothrow_copy is true.
Here is my code for measuring the two approaches against each other.
#include <iostream>
#include <boost/variant/variant.hpp>
#include <boost/variant/static_visitor.hpp>
#include <boost/variant/apply_visitor.hpp>
#include <boost/date_time/posix_time/ptime.hpp>
#include <boost/date_time/posix_time/posix_time_types.hpp>
#include <boost/date_time/posix_time/posix_time_io.hpp>
#include <boost/format.hpp>
const int iterations_count = 100000;
// a visitor that increments a variant by N
template <int N>
struct add : boost::static_visitor<> {
template <typename T>
void operator() (T& t) const {
t += N;
}
};
// a number interface
struct number {
virtual void increment() = 0;
};
// number interface implementation for all types
template <typename T>
struct number_ : number {
number_(T t = 0) : t(t) {}
virtual void increment() {
t += 1;
}
T t;
};
void use_virtual() {
number_<int> num_int;
number* num = &num_int;
for (int i = 0; i < iterations_count; i++) {
num->increment();
}
}
void use_variant() {
typedef boost::variant<int, float, double> number;
number num = 0;
for (int i = 0; i < iterations_count; i++) {
boost::apply_visitor(add<1>(), num);
}
}
int main() {
using namespace boost::posix_time;
ptime start, end;
time_duration d1, d2;
// virtual
start = microsec_clock::universal_time();
use_virtual();
end = microsec_clock::universal_time();
// store result
d1 = end - start;
// variant
start = microsec_clock::universal_time();
use_variant();
end = microsec_clock::universal_time();
// store result
d2 = end - start;
// output
std::cout <<
boost::format(
"Virtual: %1%\n"
"Variant: %2%\n"
) % d1 % d2;
}
For those interested, after I was a bit frustrated, I passed the option -O2 to the compiler and boost::variant was way faster than a virtual call.
Thanks
This is obvious that -O2 reduces the variant time, because that whole loop is optimized away. Change the implementation to return the accumulated result to the caller, so that the optimizer wouldn't remove the loop, and you'll get the real difference:
Output:
Virtual: 00:00:00.000120 = 10000000
Variant: 00:00:00.013483 = 10000000
#include <iostream>
#include <boost/variant/variant.hpp>
#include <boost/variant/static_visitor.hpp>
#include <boost/variant/apply_visitor.hpp>
#include <boost/date_time/posix_time/ptime.hpp>
#include <boost/date_time/posix_time/posix_time_types.hpp>
#include <boost/date_time/posix_time/posix_time_io.hpp>
#include <boost/format.hpp>
const int iterations_count = 100000000;
// a visitor that increments a variant by N
template <int N>
struct add : boost::static_visitor<> {
template <typename T>
void operator() (T& t) const {
t += N;
}
};
// a visitor that increments a variant by N
template <typename T, typename V>
T get(const V& v) {
struct getter : boost::static_visitor<T> {
T operator() (T t) const { return t; }
};
return boost::apply_visitor(getter(), v);
}
// a number interface
struct number {
virtual void increment() = 0;
};
// number interface implementation for all types
template <typename T>
struct number_ : number {
number_(T t = 0) : t(t) {}
virtual void increment() { t += 1; }
T t;
};
int use_virtual() {
number_<int> num_int;
number* num = &num_int;
for (int i = 0; i < iterations_count; i++) {
num->increment();
}
return num_int.t;
}
int use_variant() {
typedef boost::variant<int, float, double> number;
number num = 0;
for (int i = 0; i < iterations_count; i++) {
boost::apply_visitor(add<1>(), num);
}
return get<int>(num);
}
int main() {
using namespace boost::posix_time;
ptime start, end;
time_duration d1, d2;
// virtual
start = microsec_clock::universal_time();
int i1 = use_virtual();
end = microsec_clock::universal_time();
// store result
d1 = end - start;
// variant
start = microsec_clock::universal_time();
int i2 = use_variant();
end = microsec_clock::universal_time();
// store result
d2 = end - start;
// output
std::cout <<
boost::format(
"Virtual: %1% = %2%\n"
"Variant: %3% = %4%\n"
) % d1 % i1 % d2 % i2;
}