Generate random number in MIPS - random

How can I generate random number in MIPs that similar to C Standard Library
this is the C code.
Please help me
uint32_t random_in_range(uint32_t low, uint32_t high)
{
uint32_t range = high-low+1;
uint32_t rand_num = get_random();
return (rand_num % range) + low;
}
// Generate random 32-bit unsigned number
// based on multiply-with-carry method shown
// at http://en.wikipedia.org/wiki/Random_number_generation
uint32_t get_random()
{
uint32_t result;
m_z = 36969 * (m_z & 65535) + (m_z >> 16);
m_w = 18000 * (m_w & 65535) + (m_w >> 16);
result = (m_z << 16) + m_w; /* 32-bit result */
return result;
}

Related

Getting random integer without 3 set bits in a row [closed]

Closed. This question is opinion-based. It is not currently accepting answers.
Want to improve this question? Update the question so it can be answered with facts and citations by editing this post.
Closed last month.
Improve this question
Is there a performant way to generate an unbiased 64b random integer without 3 set bits in a row, assuming a fast-and-unbiased input PRNG? I don't care about 'wasting bits' of the input source.
That is, something better than the naive rejection-sampling approach:
uint64_t r;
do {
r = get_rand_64();
} while (r & (r >> 1) & (r >> 2));
...which "works", but is very slow. It looks like it's iterating ~187x on average or so.
One possibility I've explored is roughly:
bool p2 = get_rand_bit();
bool p1 = get_rand_bit();
uint64_t r = (p1 << 1) | p2;
for (int i = 2; i < 64; i++) {
bool p0 = (p1 && p2) ? false : get_rand_bit();
r |= p0 << i;
p2 = p1;
p1 = p0;
}
...however, this is still slow. Mainly because using this approach the entire calculation is bit-serial. EDIT: and it's also biased. Easiest to see with a 3-bit integer - 0b011 occurs 1/8th of the time, which is wrong (should be 1/7th).
I've tried doing various parallel fixups, but haven't been able to come up with anything unbiased. It's useful to play around with 4-bit integers first - e.g. setting all bits involved in a conflict to random values ends up biased, and drawing out the Markov chain for 4 bits makes that obvious
Is there a better way to do this?
I optimized the lexicographic decoder, resulting in a four-fold speedup relative to my previous answer. There are two new ideas:
Use the one-to-one correspondence implied by the recurrence T(n) = T(k−1) T(n−k) + T(k−2) T(n−k−1) + T(k−2) T(n−k−2) + T(k−3) T(n−k−1) to avoid working one bit at a time;
Cache the small words without 111 in addition to the recurrence values, incurring an L1 cache hit to save a number of arithmetic operations.
#include <assert.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
enum { kTribonacci14 = 5768 };
static uint64_t g_tribonacci[65];
static void InitTribonacci(void) {
for (unsigned i = 0; i < 65; i++) {
g_tribonacci[i] =
i < 3 ? 1 << i
: g_tribonacci[i - 1] + g_tribonacci[i - 2] + g_tribonacci[i - 3];
}
assert(g_tribonacci[14] == kTribonacci14);
}
static uint16_t g_words_no_111[kTribonacci14];
static void InitCachedWordsNo111(void) {
unsigned i = 0;
for (unsigned word = 0; word < ((unsigned)1 << 14); word++) {
if ((word & (word >> 1) & (word >> 2)) == 0) {
assert(i < kTribonacci14);
g_words_no_111[i++] = (uint16_t)word;
}
}
assert(i == kTribonacci14);
}
static bool CaseNo111(uint64_t *restrict result, unsigned *restrict n,
uint64_t *restrict index, unsigned left_n,
unsigned right_n) {
uint64_t left_count = g_tribonacci[left_n];
uint64_t right_count = g_tribonacci[right_n];
uint64_t product = left_count * right_count;
if (*index >= product) {
*index -= product;
return false;
}
*result = (*result << left_n) + g_words_no_111[*index / right_count];
*n = right_n;
*index %= right_count;
return true;
}
static void Append(uint64_t *result, uint64_t bit) {
*result = (*result << 1) + bit;
}
static uint64_t DecodeNo111(unsigned n, uint64_t index) {
assert(0 <= n && n <= 64);
assert(index < g_tribonacci[n]);
uint64_t result = 0;
while (n > 14) {
assert(g_tribonacci[n] == g_tribonacci[12] * g_tribonacci[n - 13] +
g_tribonacci[11] * g_tribonacci[n - 14] +
g_tribonacci[11] * g_tribonacci[n - 15] +
g_tribonacci[10] * g_tribonacci[n - 14]);
if (CaseNo111(&result, &n, &index, 12, n - 13)) {
Append(&result, 0);
} else if (CaseNo111(&result, &n, &index, 11, n - 14)) {
Append(&result, 0);
Append(&result, 1);
Append(&result, 0);
} else if (CaseNo111(&result, &n, &index, 11, n - 15)) {
Append(&result, 0);
Append(&result, 1);
Append(&result, 1);
Append(&result, 0);
} else if (CaseNo111(&result, &n, &index, 10, n - 14)) {
Append(&result, 0);
Append(&result, 1);
Append(&result, 1);
Append(&result, 0);
} else {
assert(false);
}
}
return (result << n) + g_words_no_111[index];
}
static void PrintWord(unsigned n, uint64_t word) {
assert(0 <= n && n <= 64);
while (n-- > 0) {
putchar('0' + ((word >> n) & 1));
}
putchar('\n');
}
int main(void) {
InitTribonacci();
InitCachedWordsNo111();
if ((false)) {
enum { kN = 20 };
for (uint64_t i = 0; i < g_tribonacci[kN]; i++) {
PrintWord(kN, DecodeNo111(kN, i));
}
}
uint64_t sum = 0;
uint64_t index = 0;
for (uint32_t i = 0; i < 10000000; i++) {
sum += DecodeNo111(64, index % g_tribonacci[64]);
index = (index * 2862933555777941757) + 3037000493;
}
return sum & 127;
}
From #John Coleman's comment, here's the start of an approach based on Tribonacci numbers. Basic idea:
Generate an unbiased number in the range [0..T(bits)), where T(0) = 1, T(1) = 2, T(2) = 4, T(n) = T(n-1) + T(n-2) + T(n-3).
Convert to Tribonacci representation.
You're done.
A minimal example is as follows:
// 1, 2, 4, TRIBO[n-3]+TRIBO[n-2]+TRIBO[n-1]
// possible minor perf optimization: reverse TRIBO
static const uint64_t TRIBO[65] = {1, 2, 4, 7, 13, 24, 44, 81, 149, 274, 504, 927, 1705, 3136, 5768, 10609, 19513, 35890, 66012, 121415, 223317, 410744, 755476, 1389537, 2555757, 4700770, 8646064, 15902591, 29249425, 53798080, 98950096, 181997601, 334745777, 615693474, 1132436852, 2082876103, 3831006429, 7046319384, 12960201916, 23837527729, 43844049029, 80641778674, 148323355432, 272809183135, 501774317241, 922906855808, 1697490356184, 3122171529233, 5742568741225, 10562230626642, 19426970897100, 35731770264967, 65720971788709, 120879712950776, 222332455004452, 408933139743937, 752145307699165, 1383410902447554, 2544489349890656, 4680045560037375, 8607945812375585, 15832480722303616, 29120472094716576, 53560898629395777, 98513851446415969];
// exclusive of max
extern uint64_t get_rand_64_range(uint64_t max);
uint64_t get_rand_no111(void) {
uint64_t idx = get_rand_64_range(TRIBO[64]);
uint64_t ret = 0;
for (int i = 63; i >= 0; i--) {
if (idx >= TRIBO[i]) {
ret |= ((uint64_t) 1) << i;
idx -= TRIBO[i];
}
// optional: if (idx == 0) {break;}
}
return ret;
}
(Warning: retyped from Python code. I suggest testing.)
This satisfies the 'unbiased' portion, and is indeed faster than the naive rejection-sampling approach, but unfortunately is still pretty slow, because it's looping ~64 times.
The idea behind the code below is to generate the upper 32 bits with the proper (non-uniform!) distribution, then generate the lower 32 conditional on the upper. On my laptop, it’s significantly faster than the baseline, and slightly faster than lexicographic decoding.
You can see the logic behind the non-uniform upper distribution with 4-bit outputs: 00 and 10 have four 2-bit lowers, 01 has three lowers, and 11 has two lowers.
#include <cstdint>
#include <random>
namespace {
using Generator = std::mt19937_64;
template <int bits> std::uint64_t GenerateUniform(Generator &gen) {
static_assert(0 <= bits && bits <= 63);
return gen() & ((std::uint64_t{1} << bits) - 1);
}
template <> std::uint64_t GenerateUniform<64>(Generator &gen) { return gen(); }
template <int bits> std::uint64_t GenerateNo111Baseline(Generator &gen) {
std::uint64_t r;
do {
r = GenerateUniform<bits>(gen);
} while (r & (r >> 1) & (r >> 2));
return r;
}
template <int bits> struct Tribonacci {
static constexpr std::uint64_t value = Tribonacci<bits - 1>::value +
Tribonacci<bits - 2>::value +
Tribonacci<bits - 3>::value;
};
template <> struct Tribonacci<0> { static constexpr std::uint64_t value = 1; };
template <> struct Tribonacci<-1> { static constexpr std::uint64_t value = 1; };
template <> struct Tribonacci<-2> { static constexpr std::uint64_t value = 0; };
template <int bits> std::uint64_t GenerateNo111(Generator &gen) {
constexpr int upper_bits = 16;
constexpr int lower_bits = bits - upper_bits;
const std::uint64_t upper = GenerateNo111Baseline<upper_bits>(gen);
for (;;) {
if ((upper & 1) == 0) {
return (upper << lower_bits) + GenerateNo111<lower_bits>(gen);
}
std::uint64_t outcome = std::uniform_int_distribution<std::uint64_t>{
0, Tribonacci<upper_bits>::value - 1}(gen);
if ((upper & 2) == 0) {
if (outcome < Tribonacci<upper_bits - 2>::value) {
return (upper << lower_bits) + (std::uint64_t{1} << (lower_bits - 1)) +
GenerateNo111<lower_bits - 2>(gen);
}
outcome -= Tribonacci<upper_bits - 2>::value;
}
if (outcome < Tribonacci<lower_bits - 1>::value) {
return (upper << lower_bits) + GenerateNo111<lower_bits - 1>(gen);
}
}
}
#define BASELINE(bits) \
template <> std::uint64_t GenerateNo111<bits>(Generator & gen) { \
return GenerateNo111Baseline<bits>(gen); \
}
BASELINE(0)
BASELINE(1)
BASELINE(2)
BASELINE(3)
BASELINE(4)
BASELINE(5)
BASELINE(6)
BASELINE(7)
BASELINE(8)
BASELINE(9)
BASELINE(10)
BASELINE(11)
BASELINE(12)
BASELINE(13)
BASELINE(14)
BASELINE(15)
BASELINE(16)
#undef BASELINE
static const std::uint64_t TRIBO[65] = {1,
2,
4,
7,
13,
24,
44,
81,
149,
274,
504,
927,
1705,
3136,
5768,
10609,
19513,
35890,
66012,
121415,
223317,
410744,
755476,
1389537,
2555757,
4700770,
8646064,
15902591,
29249425,
53798080,
98950096,
181997601,
334745777,
615693474,
1132436852,
2082876103,
3831006429,
7046319384,
12960201916,
23837527729,
43844049029,
80641778674,
148323355432,
272809183135,
501774317241,
922906855808,
1697490356184,
3122171529233,
5742568741225,
10562230626642,
19426970897100,
35731770264967,
65720971788709,
120879712950776,
222332455004452,
408933139743937,
752145307699165,
1383410902447554,
2544489349890656,
4680045560037375,
8607945812375585,
15832480722303616,
29120472094716576,
53560898629395777,
98513851446415969};
std::uint64_t get_rand_no111(Generator &gen) {
std::uint64_t idx =
std::uniform_int_distribution<std::uint64_t>{0, TRIBO[64] - 1}(gen);
std::uint64_t ret = 0;
for (int i = 63; i >= 0; --i) {
if (idx >= TRIBO[i]) {
ret |= std::uint64_t{1} << i;
idx -= TRIBO[i];
}
}
return ret;
}
} // namespace
int main() {
Generator gen{std::random_device{}()};
std::uint64_t sum = 0;
for (std::int32_t i = 0; i < 10000000; i++) {
if constexpr (true) {
sum += GenerateNo111<64>(gen);
} else {
sum += get_rand_no111(gen);
}
}
return sum & 127;
}
What about following simple idea:
Generate random r.
Find within this r window(s)-mask, contains 3 or more sequenced 1s.
If mask is 0 (no 3 or more sequenced bits) - return the r.
Substitute "incorrect" bits under that mask to new random ones.
Goto 2
Code sample (did not tested, compiled only):
uint64_t rand_no3() {
uint64_t r, mask;
for(r = get_rand_64() ; ; ) {
mask = r & (r >> 1) & (r >> 2);
mask |= (mask << 1) | (mask << 2);
if(mask == 0)
return r;
r ^= mask & get_rand_64();
}
}
Another variant of same code, with just single call get_rand_64():
uint64_t rand_no3() {
uint64_t r, mask = ~0ULL;
do {
r ^= mask & get_rand_64();
mask = r & (r >> 1) & (r >> 2);
mask |= (mask << 1) | (mask << 2);
} while(mask != 0);
return r;
}
I know, the last code does not init the r, but it is not matter, because of this variable will be overwritten in 1st loop iteration.
You could generate the number one bit at a time, keeping track of the number of consecutive set bits. Whenever you have two consecutive set bits, you insert an unset bit and set the count back to 0.

Portable efficient alternative to PDEP without using BMI2?

The documentation for the parallel deposit instruction (PDEP) in Intel's Bit Manipulation Instruction Set 2 (BMI2) describes the following serial implementation for the instruction (C-like pseudocode):
U64 _pdep_u64(U64 val, U64 mask) {
U64 res = 0;
for (U64 bb = 1; mask; bb += bb) {
if (val & bb)
res |= mask & -mask;
mask &= mask - 1;
}
return res;
}
See also Intel's pdep insn ref manual entry.
This algorithm is O(n), where n is the number of set bits in mask, which obviously has a worst case of O(k) where k is the total number of bits in mask.
Is a more efficient worst case algorithm possible?
Is it possible to make a faster version that assumes that val has at most one bit set, ie either equals 0 or equals 1<<r for some value of r from 0 to 63?
The second part of the question, about the special case of a 1-bit deposit, requires two steps. In the first step, we need to determine the bit index r of the single 1-bit in val, with a suitable response in case val is zero. This can easily be accomplished via the POSIX function ffs, or if r is known by other means, as alluded to by the asker in comments. In the second step we need to identify bit index i of the r-th 1-bit in mask, if it exists. We can then deposit the r-th bit of val at bit i.
One way of finding the index of the r-th 1-bit in mask is to tally the 1-bits using a classical population count algorithm based on binary partitioning, and record all of the intermediate group-wise bit counts. We then perform a binary search on the recorded bit-count data to identify the position of the desired bit.
The following C-code demonstrates this using 64-bit data. Whether this is actually faster than the iterative method will very much depend on typical values of mask and val.
#include <stdint.h>
/* Find the index of the n-th 1-bit in mask, n >= 0
The index of the least significant bit is 0
Return -1 if there is no such bit
*/
int find_nth_set_bit (uint64_t mask, int n)
{
int t, i = n, r = 0;
const uint64_t m1 = 0x5555555555555555ULL; // even bits
const uint64_t m2 = 0x3333333333333333ULL; // even 2-bit groups
const uint64_t m4 = 0x0f0f0f0f0f0f0f0fULL; // even nibbles
const uint64_t m8 = 0x00ff00ff00ff00ffULL; // even bytes
uint64_t c1 = mask;
uint64_t c2 = c1 - ((c1 >> 1) & m1);
uint64_t c4 = ((c2 >> 2) & m2) + (c2 & m2);
uint64_t c8 = ((c4 >> 4) + c4) & m4;
uint64_t c16 = ((c8 >> 8) + c8) & m8;
uint64_t c32 = (c16 >> 16) + c16;
int c64 = (int)(((c32 >> 32) + c32) & 0x7f);
t = (c32 ) & 0x3f; if (i >= t) { r += 32; i -= t; }
t = (c16>> r) & 0x1f; if (i >= t) { r += 16; i -= t; }
t = (c8 >> r) & 0x0f; if (i >= t) { r += 8; i -= t; }
t = (c4 >> r) & 0x07; if (i >= t) { r += 4; i -= t; }
t = (c2 >> r) & 0x03; if (i >= t) { r += 2; i -= t; }
t = (c1 >> r) & 0x01; if (i >= t) { r += 1; }
if (n >= c64) r = -1;
return r;
}
/* val is either zero or has a single 1-bit.
Return -1 if val is zero, otherwise the index of the 1-bit
The index of the least significant bit is 0
*/
int find_bit_index (uint64_t val)
{
return ffsll (val) - 1;
}
uint64_t deposit_single_bit (uint64_t val, uint64_t mask)
{
uint64_t res = (uint64_t)0;
int r = find_bit_index (val);
if (r >= 0) {
int i = find_nth_set_bit (mask, r);
if (i >= 0) res = (uint64_t)1 << i;
}
return res;
}

load vector from large vector with simd based on mask

I hope someone can help here.
I have a large byte vector from which i create a small byte vector ( based on a mask ) which I then process with simd.
Currently the mask is an array of baseOffset + submask (byte[256]) , optimized for storage as there are > 10^8 . I create a maxsize subvector , then loop through the mask array multiply the baseOffssetby 256 and for each bit offset in the mask load from the large vector and put the values in a smaller vector sequentially . The smaller vector is then processed via a number of VPMADDUBSW and accumulated . I can change this structure. eg walk the bits once to use a 8K bit array buffer and then create the small vector.
Is there a faster way i can create the subarray ?
I pulled the code out of the app into a test program but the original is in a state of flux ( moving to AVX2 and pulling more out of C# )
#include "stdafx.h"
#include<stdio.h>
#include <mmintrin.h>
#include <emmintrin.h>
#include <tmmintrin.h>
#include <smmintrin.h>
#include <immintrin.h>
//from
char N[4096] = { 9, 5, 5, 5, 9, 5, 5, 5, 5, 5 };
//W
char W[4096] = { 1, 2, -3, 5, 5, 5, 5, 5, 5, 5 };
char buffer[4096] ;
__declspec(align(2))
struct packed_destination{
char blockOffset;
__int8 bitMask[32];
};
__m128i sum = _mm_setzero_si128();
packed_destination packed_destinations[10];
void process128(__m128i u, __m128i s)
{
__m128i calc = _mm_maddubs_epi16(u, s); // pmaddubsw
__m128i loints = _mm_cvtepi16_epi32(calc);
__m128i hiints = _mm_cvtepi16_epi32(_mm_shuffle_epi32(calc, 0x4e));
sum = _mm_add_epi32(_mm_add_epi32(loints, hiints), sum);
}
void process_array(char n[], char w[], int length)
{
sum = _mm_setzero_si128();
int length128th = length >> 7;
for (int i = 0; i < length128th; i++)
{
__m128i u = _mm_load_si128((__m128i*)&n[i * 128]);
__m128i s = _mm_load_si128((__m128i*)&w[i * 128]);
process128(u, s);
}
}
void populate_buffer_from_vector(packed_destination packed_destinations[], char n[] , int dest_length)
{
int buffer_dest_index = 0;
for (int i = 0; i < dest_length; i++)
{
int blockOffset = packed_destinations[i].blockOffset <<8 ;
// go through mask and copy to buffer
for (int j = 0; j < 32; j++)
{
int joffset = blockOffset + j << 3;
int mask = packed_destinations[i].bitMask[j];
if (mask & 1 << 0)
buffer[buffer_dest_index++] = n[joffset + 1<<0 ];
if (mask & 1 << 1)
buffer[buffer_dest_index++] = n[joffset + 1<<1];
if (mask & 1 << 2)
buffer[buffer_dest_index++] = n[joffset + 1<<2];
if (mask & 1 << 3)
buffer[buffer_dest_index++] = n[joffset + 1<<3];
if (mask & 1 << 4)
buffer[buffer_dest_index++] = n[joffset + 1<<4];
if (mask & 1 << 5)
buffer[buffer_dest_index++] = n[joffset + 1<<5];
if (mask & 1 << 6)
buffer[buffer_dest_index++] = n[joffset + 1<<6];
if (mask & 1 << 7)
buffer[buffer_dest_index++] = n[joffset + 1<<7];
};
}
}
int _tmain(int argc, _TCHAR* argv[])
{
for (int i = 0; i < 32; ++i)
{
packed_destinations[0].bitMask[i] = 0x0f;
packed_destinations[1].bitMask[i] = 0x04;
}
packed_destinations[1].blockOffset = 1;
populate_buffer_from_vector(packed_destinations, N, 1);
process_array(buffer, W, 256);
int val = sum.m128i_i32[0] +
sum.m128i_i32[1] +
sum.m128i_i32[2] +
sum.m128i_i32[3];
printf("sum is %d" , val);
printf("Press Any Key to Continue\n");
getchar();
return 0;
}
Normally mask usage would be 5-15% for some work loads it would be 25-100% .
MASKMOVDQU is close but then we would have to re pack /swl according to the mask before saving..
A couple of optimisations for your existing code:
If your data is sparse then it would probably be a good idea to add an additional test of each 8 bit mask value prior to testing the additional bits, i.e.
int mask = packed_destinations[i].bitMask[j];
if (mask != 0)
{
if (mask & 1 << 0)
buffer[buffer_dest_index++] = n[joffset + 1<<0 ];
if (mask & 1 << 1)
buffer[buffer_dest_index++] = n[joffset + 1<<1];
...
Secondly your process128 function can be optimised considerably:
inline __m128i process128(const __m128i u, const __m128i s, const __m128i sum)
{
const __m128i vk1 = _mm_set1_epi16(1);
__m128i calc = _mm_maddubs_epi16(u, s);
calc = _mm_madd_epi16(v, vk1);
return _mm_add_epi32(sum, calc);
}
Note that as well as reducing the SSE instruction count from 6 to 3, I've also made sum a parameter, to get away from any dependency on global variables (it's always a good idea to avoid globals, not only for good software engineering but also because they can inhibit certain compiler optimisations).
It would be interesting to see a profile of your code (using a decent sampling profiler, not via instrumentation), since this would help to prioritise any further optimisation efforts.

CUDA Maximum Reduction Algorithm Not Working

A previous question asked how to find to find the maximum value of an array in CUDA efficiently: Finding max value in CUDA, the top response provided a link to a NVIDIA presentation on optimizing reduction kernels.
If you are using Visual Studio, simply remove the header reference, and everything between CPU EXECUTION.
I setup a variant which found the max, but it doesn't match what the CPU is finding:
// Returns the maximum value of
// an array of size n
float GetMax(float *maxes, int n)
{
int i = 0;
float max = -100000;
for(i = 0; i < n; i++)
{
if(maxes[i] > max)
max = maxes[i];
}
return max;
}
// Too obvious...
__device__ float MaxOf2(float a, float b)
{
if(a > b) return a;
else return b;
}
__global__ void MaxReduction(int n, float *g_idata, float *g_odata)
{
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(BLOCKSIZE*2) + tid;
unsigned int gridSize = BLOCKSIZE*2*gridDim.x;
sdata[tid] = 0;
//MMX(index,i)
//MMX(index,i+blockSize)
// Final Optimized Kernel
while (i < n) {
sdata[tid] = MaxOf2(g_idata[i], g_idata[i+BLOCKSIZE]);
i += gridSize;
}
__syncthreads();
if (BLOCKSIZE >= 512) { if (tid < 256) { sdata[tid] = MaxOf2(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (BLOCKSIZE >= 256) { if (tid < 128) { sdata[tid] = MaxOf2(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (BLOCKSIZE >= 128) { if (tid < 64) { sdata[tid] = MaxOf2(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) {
if (BLOCKSIZE >= 64) sdata[tid] = MaxOf2(sdata[tid], sdata[tid + 32]);
if (BLOCKSIZE >= 32) sdata[tid] = MaxOf2(sdata[tid], sdata[tid + 16]);
if (BLOCKSIZE >= 16 ) sdata[tid] = MaxOf2(sdata[tid], sdata[tid + 8]);
if (BLOCKSIZE >= 8) sdata[tid] = MaxOf2(sdata[tid], sdata[tid + 4]);
if (BLOCKSIZE >= 4) sdata[tid] = MaxOf2(sdata[tid], sdata[tid + 2]);
if (BLOCKSIZE >= 2) sdata[tid] = MaxOf2(sdata[tid], sdata[tid + 1]);
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
I have a giant setup to test this algorithm:
#include <cstdio>
#include <cstdlib>
#include <ctime>
#include <iostream>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "book.h"
#define ARRAYSIZE 16384
#define GRIDSIZE 60
#define BLOCKSIZE 32
#define SIZEFLOAT 4
using namespace std;
// Function definitions
float GetMax(float *maxes, int n);
__device__ float MaxOf2(float a, float b);
__global__ void MaxReduction(int n, float *g_idata, float *g_odata);
// Returns random floating point number
float RandomReal(float low, float high)
{
float d;
d = (float) rand() / ((float) RAND_MAX + 1);
return (low + d * (high - low));
}
int main()
{
/*****************VARIABLE SETUP*****************/
// Pointer to CPU numbers
float *numbers;
// Pointer to GPU numbers
float *dev_numbers;
// Counter
int i = 0;
// Randomize
srand(time(0));
// Timers
// Kernel timers
cudaEvent_t start_kernel, stop_kernel;
float elapsedTime_kernel;
HANDLE_ERROR(cudaEventCreate(&start_kernel));
HANDLE_ERROR(cudaEventCreate(&stop_kernel));
// cudaMalloc timers
cudaEvent_t start_malloc, stop_malloc;
float elapsedTime_malloc;
HANDLE_ERROR(cudaEventCreate(&start_malloc));
HANDLE_ERROR(cudaEventCreate(&stop_malloc));
// CPU timers
struct timeval start, stop;
float elapsedTime = 0;
/*****************VARIABLE SETUP*****************/
/*****************CPU ARRAY SETUP*****************/
// Setup CPU array
HANDLE_ERROR(cudaHostAlloc((void**)&numbers, ARRAYSIZE * sizeof(float), cudaHostAllocDefault));
for(i = 0; i < ARRAYSIZE; i++)
numbers[i] = RandomReal(0, 50000.0);
/*****************CPU ARRAY SETUP*****************/
/*****************GPU ARRAY SETUP*****************/
// Start recording cuda malloc time
HANDLE_ERROR(cudaEventRecord(start_malloc,0));
// Allocate memory to GPU
HANDLE_ERROR(cudaMalloc((void**)&dev_numbers, ARRAYSIZE * sizeof(float)));
// Transfer CPU array to GPU
HANDLE_ERROR(cudaMemcpy(dev_numbers, numbers, ARRAYSIZE*sizeof(float), cudaMemcpyHostToDevice));
// An array to temporarily store maximum values on the GPU
float *dev_max;
HANDLE_ERROR(cudaMalloc((void**)&dev_max, GRIDSIZE * sizeof(float)));
// An array to hold grab the GPU max
float maxes[GRIDSIZE];
/*****************GPU ARRAY SETUP*****************/
/*****************KERNEL EXECUTION*****************/
// Start recording kernel execution time
HANDLE_ERROR(cudaEventRecord(start_kernel,0));
// Run kernel
MaxReduction<<<GRIDSIZE, BLOCKSIZE, SIZEFLOAT*BLOCKSIZE>>> (ARRAYSIZE, dev_numbers, dev_max);
// Transfer maxes over
HANDLE_ERROR(cudaMemcpy(maxes, dev_max, GRIDSIZE * sizeof(float), cudaMemcpyDeviceToHost));
// Print out the max
cout << GetMax(maxes, GRIDSIZE) << endl;
// Stop recording kernel execution time
HANDLE_ERROR(cudaEventRecord(stop_kernel,0));
HANDLE_ERROR(cudaEventSynchronize(stop_kernel));
// Retrieve recording data
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime_kernel, start_kernel, stop_kernel));
// Stop recording cuda malloc time
HANDLE_ERROR(cudaEventRecord(stop_malloc,0));
HANDLE_ERROR(cudaEventSynchronize(stop_malloc));
// Retrieve recording data
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime_malloc, start_malloc, stop_malloc));
// Print results
printf("%5.3f\t%5.3f\n", elapsedTime_kernel, elapsedTime_malloc);
/*****************KERNEL EXECUTION*****************/
/*****************CPU EXECUTION*****************/
// Capture the start time
gettimeofday(&start, NULL);
// Call generic P7Viterbi function
cout << GetMax(numbers, ARRAYSIZE) << endl;
// Capture the stop time
gettimeofday(&stop, NULL);
// Retrieve time elapsed in milliseconds
long int elapsed_sec = stop.tv_sec - start.tv_sec;
long int elapsed_usec = stop.tv_usec - start.tv_usec;
elapsedTime = (float)(1000.0f * elapsed_sec) + (float)(0.001f * elapsed_usec);
// Print results
printf("%5.3f\n", elapsedTime);
/*****************CPU EXECUTION*****************/
// Free memory
cudaFreeHost(numbers);
cudaFree(dev_numbers);
cudaFree(dev_max);
cudaEventDestroy(start_kernel);
cudaEventDestroy(stop_kernel);
cudaEventDestroy(start_malloc);
cudaEventDestroy(stop_malloc);
// Exit program
return 0;
}
I ran cuda-memcheck on this test program, with -g & -G switches on, and it reports 0 problems. Can anyone spot the issue?
NOTE: Be sure to have book.h from the CUDA by Example book in your current directory when you compile the program. Source link here: http://developer.nvidia.com/cuda-example-introduction-general-purpose-gpu-programming
Download the source code, and book.h will be under the common directory/folder.
Your kernel looks broken to me. The thread local search (before the shared memory reduction), should look something like this:
sdata[tid] = g_idata[i];
i += gridSize;
while (i < n) {
sdata[tid] = MaxOf2(sdata[tid], g_idata[i]);
i += gridSize;
}
shouldn't it?
Also note that if you run this on Fermi, the shared memory buffer should be declared volatile, and you will get a noticeable improvement in performance if the thread local search is done with a register variable, rather than in shared memory. There is about an 8 times difference in effective bandwidth between the two.
EDIT: Here is a simplified, working version of your reduction kernel. You should note a number of differences compared to your original:
__global__ void MaxReduction(int n, float *g_idata, float *g_odata)
{
extern __shared__ volatile float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(BLOCKSIZE) + tid;
unsigned int gridSize = BLOCKSIZE*gridDim.x;
float val = g_idata[i];
i += gridSize;
while (i < n) {
val = MaxOf2(g_idata[i],val);
i += gridSize;
}
sdata[tid] = val;
__syncthreads();
// This versions uses a single warp for the shared memory
// reduction
# pragma unroll
for(int i=(tid+32); ((tid<32)&&(i<BLOCKSIZE)); i+=32)
sdata[tid] = MaxOf2(sdata[tid], sdata[i]);
if (tid < 16) sdata[tid] = MaxOf2(sdata[tid], sdata[tid + 16]);
if (tid < 8) sdata[tid] = MaxOf2(sdata[tid], sdata[tid + 8]);
if (tid < 4) sdata[tid] = MaxOf2(sdata[tid], sdata[tid + 4]);
if (tid < 2) sdata[tid] = MaxOf2(sdata[tid], sdata[tid + 2]);
if (tid == 0) g_odata[blockIdx.x] = MaxOf2(sdata[tid], sdata[tid + 1]);
}
This code should also be safe on Fermi. You should also familiarise yourself with the CUDA math library, because there is a fmax(x,y) intrinsic which you should use in place of your MaxOf2 function.

Counting with an Integer Divide-based routine - Is there a formulaic approach?

Consider a routine that counts by successive divide w/ remainder operations.
Starting with a 64-bit dividend, the routine divides by a constant divisor.
If the remainder is 0, the routine returns.
Otherwise, a new dividend is constructed by multiplying the remainder by 2^32 and adding the integer quotient.
In code:
/// ULong - 64 bit, unsigned
/// UInt - 32 bit, unsigned
const UInt Divisor;
int TrickyCounter( ULong Dividend)
{
int count = 0;
Ulong Quotient;
UInt Remainder;
do {
Quotient = Dividend/Divisor;
Remainder = Dividend%Divisor;
assert((Quotient >> 32) == 0);
count = count + 1;
Dividend = ((ULong)Remainder << 32) + Quotient;
} while (Remainder != 0);
return count;
}
With an arbitrary Divisor, is there a preferably non-iterating method to calculate the necessary Dividend to get the desired count?
For many initial dividends, this seems to quickly hit the "Assert" condition. Would some dividends cause this to loop forever?
If, instead of a count, the routine returns the quotient, can I calculate the Dividend to produce the number I want returned?
Uint TrickyNumber( ULong Dividend, int count)
{
Ulong Quotient = 0;
UInt Remainder;
while (count > 0)
Quotient = Dividend/Divisor;
Remainder = Dividend%Divisor;
assert((Quotient >> 32) == 0);
count = count - 1;
Dividend = ((ULong)Remainder << 32) + Quotient;
}
return (UInt)Quotient;
}
Would some dividends cause this to loop forever?
Dividend = 0x1ffffffffL, Divisor = 2 is a fairly obvious example, and the whole family (Divisor<<32)-1, Divisor are fixed points.
Working from these, many cyclic combinations of initial dividend and divisor can be found, and I'm sure there are more:
#include <stdio.h>
#include <stdint.h>
#include <inttypes.h>
size_t tricky_counter( uint64_t dividend, const uint32_t divisor )
{
const size_t cycle_buffer_size = 1024;
size_t count = 0;
uint64_t quotient;
uint32_t remainder;
uint64_t pre[cycle_buffer_size];
do {
pre[ count % cycle_buffer_size ] = dividend;
quotient = dividend/divisor;
remainder = dividend%divisor;
if ( (quotient >> 32) != 0) {
printf("quotient: 0x%" PRIx64 "\n", quotient);
}
count = count + 1;
dividend = ((uint64_t)remainder << 32) + quotient;
for (size_t i = 0; i < count && i<cycle_buffer_size;++i) {
if (pre[i] == dividend) {
size_t cycle = 0;
printf("dividend repeats: \n");
while (i != count % cycle_buffer_size) {
//~ printf(" 0x%" PRIx64 " / %" PRId32 " \n", pre[i], divisor);
i = (i + 1) % cycle_buffer_size;
++cycle;
}
printf(" 0x%" PRIx64 " / %" PRId32 " cycle size %zd \n", dividend, divisor, cycle);
return 0;
}
}
} while (remainder != 0);
return count;
}
int main ( void )
{
for (uint64_t k = 1; k < 256; ++k)
for (uint64_t x = 2; x < 1024; ++x)
tricky_counter( (x-1 << 32) + 0x01010101L * k, x);
}

Resources