Related
Closed. This question is opinion-based. It is not currently accepting answers.
Want to improve this question? Update the question so it can be answered with facts and citations by editing this post.
Closed last month.
Improve this question
Is there a performant way to generate an unbiased 64b random integer without 3 set bits in a row, assuming a fast-and-unbiased input PRNG? I don't care about 'wasting bits' of the input source.
That is, something better than the naive rejection-sampling approach:
uint64_t r;
do {
r = get_rand_64();
} while (r & (r >> 1) & (r >> 2));
...which "works", but is very slow. It looks like it's iterating ~187x on average or so.
One possibility I've explored is roughly:
bool p2 = get_rand_bit();
bool p1 = get_rand_bit();
uint64_t r = (p1 << 1) | p2;
for (int i = 2; i < 64; i++) {
bool p0 = (p1 && p2) ? false : get_rand_bit();
r |= p0 << i;
p2 = p1;
p1 = p0;
}
...however, this is still slow. Mainly because using this approach the entire calculation is bit-serial. EDIT: and it's also biased. Easiest to see with a 3-bit integer - 0b011 occurs 1/8th of the time, which is wrong (should be 1/7th).
I've tried doing various parallel fixups, but haven't been able to come up with anything unbiased. It's useful to play around with 4-bit integers first - e.g. setting all bits involved in a conflict to random values ends up biased, and drawing out the Markov chain for 4 bits makes that obvious
Is there a better way to do this?
I optimized the lexicographic decoder, resulting in a four-fold speedup relative to my previous answer. There are two new ideas:
Use the one-to-one correspondence implied by the recurrence T(n) = T(k−1) T(n−k) + T(k−2) T(n−k−1) + T(k−2) T(n−k−2) + T(k−3) T(n−k−1) to avoid working one bit at a time;
Cache the small words without 111 in addition to the recurrence values, incurring an L1 cache hit to save a number of arithmetic operations.
#include <assert.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
enum { kTribonacci14 = 5768 };
static uint64_t g_tribonacci[65];
static void InitTribonacci(void) {
for (unsigned i = 0; i < 65; i++) {
g_tribonacci[i] =
i < 3 ? 1 << i
: g_tribonacci[i - 1] + g_tribonacci[i - 2] + g_tribonacci[i - 3];
}
assert(g_tribonacci[14] == kTribonacci14);
}
static uint16_t g_words_no_111[kTribonacci14];
static void InitCachedWordsNo111(void) {
unsigned i = 0;
for (unsigned word = 0; word < ((unsigned)1 << 14); word++) {
if ((word & (word >> 1) & (word >> 2)) == 0) {
assert(i < kTribonacci14);
g_words_no_111[i++] = (uint16_t)word;
}
}
assert(i == kTribonacci14);
}
static bool CaseNo111(uint64_t *restrict result, unsigned *restrict n,
uint64_t *restrict index, unsigned left_n,
unsigned right_n) {
uint64_t left_count = g_tribonacci[left_n];
uint64_t right_count = g_tribonacci[right_n];
uint64_t product = left_count * right_count;
if (*index >= product) {
*index -= product;
return false;
}
*result = (*result << left_n) + g_words_no_111[*index / right_count];
*n = right_n;
*index %= right_count;
return true;
}
static void Append(uint64_t *result, uint64_t bit) {
*result = (*result << 1) + bit;
}
static uint64_t DecodeNo111(unsigned n, uint64_t index) {
assert(0 <= n && n <= 64);
assert(index < g_tribonacci[n]);
uint64_t result = 0;
while (n > 14) {
assert(g_tribonacci[n] == g_tribonacci[12] * g_tribonacci[n - 13] +
g_tribonacci[11] * g_tribonacci[n - 14] +
g_tribonacci[11] * g_tribonacci[n - 15] +
g_tribonacci[10] * g_tribonacci[n - 14]);
if (CaseNo111(&result, &n, &index, 12, n - 13)) {
Append(&result, 0);
} else if (CaseNo111(&result, &n, &index, 11, n - 14)) {
Append(&result, 0);
Append(&result, 1);
Append(&result, 0);
} else if (CaseNo111(&result, &n, &index, 11, n - 15)) {
Append(&result, 0);
Append(&result, 1);
Append(&result, 1);
Append(&result, 0);
} else if (CaseNo111(&result, &n, &index, 10, n - 14)) {
Append(&result, 0);
Append(&result, 1);
Append(&result, 1);
Append(&result, 0);
} else {
assert(false);
}
}
return (result << n) + g_words_no_111[index];
}
static void PrintWord(unsigned n, uint64_t word) {
assert(0 <= n && n <= 64);
while (n-- > 0) {
putchar('0' + ((word >> n) & 1));
}
putchar('\n');
}
int main(void) {
InitTribonacci();
InitCachedWordsNo111();
if ((false)) {
enum { kN = 20 };
for (uint64_t i = 0; i < g_tribonacci[kN]; i++) {
PrintWord(kN, DecodeNo111(kN, i));
}
}
uint64_t sum = 0;
uint64_t index = 0;
for (uint32_t i = 0; i < 10000000; i++) {
sum += DecodeNo111(64, index % g_tribonacci[64]);
index = (index * 2862933555777941757) + 3037000493;
}
return sum & 127;
}
From #John Coleman's comment, here's the start of an approach based on Tribonacci numbers. Basic idea:
Generate an unbiased number in the range [0..T(bits)), where T(0) = 1, T(1) = 2, T(2) = 4, T(n) = T(n-1) + T(n-2) + T(n-3).
Convert to Tribonacci representation.
You're done.
A minimal example is as follows:
// 1, 2, 4, TRIBO[n-3]+TRIBO[n-2]+TRIBO[n-1]
// possible minor perf optimization: reverse TRIBO
static const uint64_t TRIBO[65] = {1, 2, 4, 7, 13, 24, 44, 81, 149, 274, 504, 927, 1705, 3136, 5768, 10609, 19513, 35890, 66012, 121415, 223317, 410744, 755476, 1389537, 2555757, 4700770, 8646064, 15902591, 29249425, 53798080, 98950096, 181997601, 334745777, 615693474, 1132436852, 2082876103, 3831006429, 7046319384, 12960201916, 23837527729, 43844049029, 80641778674, 148323355432, 272809183135, 501774317241, 922906855808, 1697490356184, 3122171529233, 5742568741225, 10562230626642, 19426970897100, 35731770264967, 65720971788709, 120879712950776, 222332455004452, 408933139743937, 752145307699165, 1383410902447554, 2544489349890656, 4680045560037375, 8607945812375585, 15832480722303616, 29120472094716576, 53560898629395777, 98513851446415969];
// exclusive of max
extern uint64_t get_rand_64_range(uint64_t max);
uint64_t get_rand_no111(void) {
uint64_t idx = get_rand_64_range(TRIBO[64]);
uint64_t ret = 0;
for (int i = 63; i >= 0; i--) {
if (idx >= TRIBO[i]) {
ret |= ((uint64_t) 1) << i;
idx -= TRIBO[i];
}
// optional: if (idx == 0) {break;}
}
return ret;
}
(Warning: retyped from Python code. I suggest testing.)
This satisfies the 'unbiased' portion, and is indeed faster than the naive rejection-sampling approach, but unfortunately is still pretty slow, because it's looping ~64 times.
The idea behind the code below is to generate the upper 32 bits with the proper (non-uniform!) distribution, then generate the lower 32 conditional on the upper. On my laptop, it’s significantly faster than the baseline, and slightly faster than lexicographic decoding.
You can see the logic behind the non-uniform upper distribution with 4-bit outputs: 00 and 10 have four 2-bit lowers, 01 has three lowers, and 11 has two lowers.
#include <cstdint>
#include <random>
namespace {
using Generator = std::mt19937_64;
template <int bits> std::uint64_t GenerateUniform(Generator &gen) {
static_assert(0 <= bits && bits <= 63);
return gen() & ((std::uint64_t{1} << bits) - 1);
}
template <> std::uint64_t GenerateUniform<64>(Generator &gen) { return gen(); }
template <int bits> std::uint64_t GenerateNo111Baseline(Generator &gen) {
std::uint64_t r;
do {
r = GenerateUniform<bits>(gen);
} while (r & (r >> 1) & (r >> 2));
return r;
}
template <int bits> struct Tribonacci {
static constexpr std::uint64_t value = Tribonacci<bits - 1>::value +
Tribonacci<bits - 2>::value +
Tribonacci<bits - 3>::value;
};
template <> struct Tribonacci<0> { static constexpr std::uint64_t value = 1; };
template <> struct Tribonacci<-1> { static constexpr std::uint64_t value = 1; };
template <> struct Tribonacci<-2> { static constexpr std::uint64_t value = 0; };
template <int bits> std::uint64_t GenerateNo111(Generator &gen) {
constexpr int upper_bits = 16;
constexpr int lower_bits = bits - upper_bits;
const std::uint64_t upper = GenerateNo111Baseline<upper_bits>(gen);
for (;;) {
if ((upper & 1) == 0) {
return (upper << lower_bits) + GenerateNo111<lower_bits>(gen);
}
std::uint64_t outcome = std::uniform_int_distribution<std::uint64_t>{
0, Tribonacci<upper_bits>::value - 1}(gen);
if ((upper & 2) == 0) {
if (outcome < Tribonacci<upper_bits - 2>::value) {
return (upper << lower_bits) + (std::uint64_t{1} << (lower_bits - 1)) +
GenerateNo111<lower_bits - 2>(gen);
}
outcome -= Tribonacci<upper_bits - 2>::value;
}
if (outcome < Tribonacci<lower_bits - 1>::value) {
return (upper << lower_bits) + GenerateNo111<lower_bits - 1>(gen);
}
}
}
#define BASELINE(bits) \
template <> std::uint64_t GenerateNo111<bits>(Generator & gen) { \
return GenerateNo111Baseline<bits>(gen); \
}
BASELINE(0)
BASELINE(1)
BASELINE(2)
BASELINE(3)
BASELINE(4)
BASELINE(5)
BASELINE(6)
BASELINE(7)
BASELINE(8)
BASELINE(9)
BASELINE(10)
BASELINE(11)
BASELINE(12)
BASELINE(13)
BASELINE(14)
BASELINE(15)
BASELINE(16)
#undef BASELINE
static const std::uint64_t TRIBO[65] = {1,
2,
4,
7,
13,
24,
44,
81,
149,
274,
504,
927,
1705,
3136,
5768,
10609,
19513,
35890,
66012,
121415,
223317,
410744,
755476,
1389537,
2555757,
4700770,
8646064,
15902591,
29249425,
53798080,
98950096,
181997601,
334745777,
615693474,
1132436852,
2082876103,
3831006429,
7046319384,
12960201916,
23837527729,
43844049029,
80641778674,
148323355432,
272809183135,
501774317241,
922906855808,
1697490356184,
3122171529233,
5742568741225,
10562230626642,
19426970897100,
35731770264967,
65720971788709,
120879712950776,
222332455004452,
408933139743937,
752145307699165,
1383410902447554,
2544489349890656,
4680045560037375,
8607945812375585,
15832480722303616,
29120472094716576,
53560898629395777,
98513851446415969};
std::uint64_t get_rand_no111(Generator &gen) {
std::uint64_t idx =
std::uniform_int_distribution<std::uint64_t>{0, TRIBO[64] - 1}(gen);
std::uint64_t ret = 0;
for (int i = 63; i >= 0; --i) {
if (idx >= TRIBO[i]) {
ret |= std::uint64_t{1} << i;
idx -= TRIBO[i];
}
}
return ret;
}
} // namespace
int main() {
Generator gen{std::random_device{}()};
std::uint64_t sum = 0;
for (std::int32_t i = 0; i < 10000000; i++) {
if constexpr (true) {
sum += GenerateNo111<64>(gen);
} else {
sum += get_rand_no111(gen);
}
}
return sum & 127;
}
What about following simple idea:
Generate random r.
Find within this r window(s)-mask, contains 3 or more sequenced 1s.
If mask is 0 (no 3 or more sequenced bits) - return the r.
Substitute "incorrect" bits under that mask to new random ones.
Goto 2
Code sample (did not tested, compiled only):
uint64_t rand_no3() {
uint64_t r, mask;
for(r = get_rand_64() ; ; ) {
mask = r & (r >> 1) & (r >> 2);
mask |= (mask << 1) | (mask << 2);
if(mask == 0)
return r;
r ^= mask & get_rand_64();
}
}
Another variant of same code, with just single call get_rand_64():
uint64_t rand_no3() {
uint64_t r, mask = ~0ULL;
do {
r ^= mask & get_rand_64();
mask = r & (r >> 1) & (r >> 2);
mask |= (mask << 1) | (mask << 2);
} while(mask != 0);
return r;
}
I know, the last code does not init the r, but it is not matter, because of this variable will be overwritten in 1st loop iteration.
You could generate the number one bit at a time, keeping track of the number of consecutive set bits. Whenever you have two consecutive set bits, you insert an unset bit and set the count back to 0.
I have a question about 64/32-bits division algorithm as it appears in Hacker's Delight in Chapter 9-4 Unsigned Long Division, Figure 9-3, "div1u". Online it can be seen here, from where I copy-pasted it as follows:
unsigned divlu2(unsigned u1, unsigned u0, unsigned v,
unsigned *r) {
const unsigned b = 65536; // Number base (16 bits).
unsigned un1, un0, // Norm. dividend LSD's.
vn1, vn0, // Norm. divisor digits.
q1, q0, // Quotient digits.
un32, un21, un10,// Dividend digit pairs.
rhat; // A remainder.
int s; // Shift amount for norm.
if (u1 >= v) { // If overflow, set rem.
if (r != NULL) // to an impossible value,
*r = 0xFFFFFFFF; // and return the largest
return 0xFFFFFFFF;} // possible quotient.
s = nlz(v); // 0 <= s <= 31.
v = v << s; // Normalize divisor.
vn1 = v >> 16; // Break divisor up into
vn0 = v & 0xFFFF; // two 16-bit digits.
un32 = (u1 << s) | (u0 >> 32 - s) & (-s >> 31);
un10 = u0 << s; // Shift dividend left.
un1 = un10 >> 16; // Break right half of
un0 = un10 & 0xFFFF; // dividend into two digits.
q1 = un32/vn1; // Compute the first
rhat = un32 - q1*vn1; // quotient digit, q1.
again1:
if (q1 >= b || q1*vn0 > b*rhat + un1) {
q1 = q1 - 1;
rhat = rhat + vn1;
if (rhat < b) goto again1;}
un21 = un32*b + un1 - q1*v; // Multiply and subtract.
q0 = un21/vn1; // Compute the second
rhat = un21 - q0*vn1; // quotient digit, q0.
again2:
if (q0 >= b || q0*vn0 > b*rhat + un0) {
q0 = q0 - 1;
rhat = rhat + vn1;
if (rhat < b) goto again2;}
if (r != NULL) // If remainder is wanted,
*r = (un21*b + un0 - q0*v) >> s; // return it.
return q1*b + q0;
}
Specifically, I'm interested in the bounds of the variable un21. How large can it be? Somewhat surprising, it can be larger than v but by how much?
In other words, under again2 there is the test q0 >= b. If I wanted to know whether the division (q0 = un21/vn1) eventually overflows, is it enough to test (un21 >> 16) == vn1 or does it have to read (un21 >> 16) >= vn1, instead if q0 >= b?
The idea is to know in advance, prior to calculating the quotient, whether the division overflows or not.
As division operation (/) is expensive in case of FPGA ? Is it possible to perform division of two Q15 format numbers(16 bit fixed point number) with basic shift operations?
Could someone help me by providing some example?
Thanks in advance!
Fixed-point arithmetic is just integer arithmetic with a bit of scaling thrown in. Q15 is a purely fractional format stored as a signed 16-bit integer with scale factor of 215, able to represent values in the interval [-1, 1). Clearly, division only makes sense in Q15 when the divisor's magnitude exceeds the dividend's magnitude, as otherwise the quotient's magnitude exceeds the representable range.
Before embarking on a custom Verilog implementation of fixed-point division, you would want to check your FPGA vendor's library offerings as a fixed-point library including pipeline division is often available. There are also opens source projects that may be relevant, such as this one.
When using integer division operators for fixed-point division, we need to adjust for the fact that the division will remove the scale factor, i.e (a * 2scale) / (b * 2scale) = (a/b), while the correct fixed-point result is (a/b * 2scale). This is easily fixed by pre-multiplying the dividend by 2scale, as in the following C implementation:
int16_t div_q15 (int16_t dividend, int16_t divisor)
{
return (int16_t)(((int32_t)dividend << 15) / (int32_t)divisor);
}
Wikipedia gives a reasonable overwiew on how to implement binary division on a bit-by-bit basis using add, subtract, and shift operations. These methods are closely related to the longhand division taught in grade school. For FPGAs, the use of the non-restoring method if often preferred, as pointed out by this paper, for example:
Nikolay Sorokin, "Implementation of high-speed fixed-point dividers on FPGA". Journal of Computer Science & Technology, Vol. 6, No. 1, April 2006, pp. 8-11.
Here is C code that shows how the non-restoring method may be used for the division of 16-bit two's-complement operands:
/* bit-wise non-restoring two's complement division */
void int16_div (int16_t dividend, int16_t divisor, int16_t *quot, int16_t *rem)
{
const int operand_bits = (int) (sizeof (int16_t) * CHAR_BIT);
uint16_t d = (uint16_t)divisor;
uint16_t nd = 0 - d; /* -divisor */
uint16_t r, q = 0; /* remainder, quotient */
uint32_t dd = (uint32_t)d << operand_bits; /* expanded divisor */
uint32_t pp = dividend; /* partial remainder */
int i;
for (i = operand_bits - 1; i >= 0; i--) {
if ((int32_t)(pp ^ dd) < 0) {
q = (q << 1) + 0; /* record quotient bit -1 (as 0) */
pp = (pp << 1) + dd;
} else {
q = (q << 1) + 1; /* record quotient bit +1 (as 1) */
pp = (pp << 1) - dd;
}
}
/* convert quotient from digit set {-1,1} to plain two's complement */
q = (q << 1) + 1;
/* remainder is upper half of partial remainder */
r = (uint16_t)(pp >> operand_bits);
/* fix up cases where we worked past a partial remainder of zero */
if (r == d) { /* remainder equal to divisor */
q = q + 1;
r = 0;
} else if (r == nd) { /* remainder equal to -divisor */
q = q - 1;
r = 0;
}
/* for truncating division, remainder must have same sign as dividend */
if (r && ((int16_t)(dividend ^ r) < 0)) {
if ((int16_t)q < 0) {
q = q + 1;
r = r - d;
} else {
q = q - 1;
r = r + d;
}
}
*quot = (int16_t)q;
*rem = (int16_t)r;
}
Note that there are multiple ways of dealing with the various special cases that arise in non-restoring division. For example, one frequently sees code that detects a zero partial remainder pp and exits the loop over the quotient bits early in this case. Here I assume that an FPGA implementation would unroll the loop completely to create a pipelined implementation, in which case early termination is not helpful. Instead, a final correction is applied to those quotients that are affected by ignoring a partial remainder of zero.
In order to create a Q15 division from the above, we have to make just a single change: incorporating the up-scaling of the dividend. Instead of:
uint32_t pp = dividend; /* partial remainder */
we now use this:
uint32_t pp = dividend << 15; /* partial remainder; incorporate Q15 scaling */
The resulting C code (sorry, I won't provide read-to-use Verilog code) including the test framework is:
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <limits.h>
#include <math.h>
/* bit-wise non-restoring two's complement division */
void q15_div (int16_t dividend, int16_t divisor, int16_t *quot, int16_t *rem)
{
const int operand_bits = (int) (sizeof (int16_t) * CHAR_BIT);
uint16_t d = (uint16_t)divisor;
uint16_t nd = 0 - d; /* -divisor */
uint16_t r, q = 0; /* remainder, quotient */
uint32_t dd = (uint32_t)d << operand_bits; /* expanded divisor */
uint32_t pp = dividend << 15; /* partial remainder, incorporate Q15 scaling */
int i;
for (i = operand_bits - 1; i >= 0; i--) {
if ((int32_t)(pp ^ dd) < 0) {
q = (q << 1) + 0; /* record quotient bit -1 (as 0) */
pp = (pp << 1) + dd;
} else {
q = (q << 1) + 1; /* record quotient bit +1 (as 1) */
pp = (pp << 1) - dd;
}
}
/* convert quotient from digit set {-1,1} to plain two's complement */
q = (q << 1) + 1;
/* remainder is upper half of partial remainder */
r = (uint16_t)(pp >> operand_bits);
/* fix up cases where we worked past a partial remainder of zero */
if (r == d) { /* remainder equal to divisor */
q = q + 1;
r = 0;
} else if (r == nd) { /* remainder equal to -divisor */
q = q - 1;
r = 0;
}
/* for truncating division, remainder must have same sign as dividend */
if (r && ((int16_t)(dividend ^ r) < 0)) {
if ((int16_t)q < 0) {
q = q + 1;
r = r - d;
} else {
q = q - 1;
r = r + d;
}
}
*quot = (int16_t)q;
*rem = (int16_t)r;
}
int main (void)
{
uint16_t dividend, divisor, ref_q, res_q, res_r;
double quot, fxscale = (1 << 15);
dividend = 0;
do {
printf ("\r%04x", dividend);
divisor = 1;
do {
quot = trunc (fxscale * (int16_t)dividend / (int16_t)divisor);
/* Q15 can only represent numbers in [-1, 1) */
if ((quot >= -1.0) && (quot < 1.0)) {
ref_q = (int16_t)((((int32_t)(int16_t)dividend) << 15) /
((int32_t)(int16_t)divisor));
q15_div ((int16_t)dividend, (int16_t)divisor,
(int16_t *)&res_q, (int16_t *)&res_r);
if (res_q != ref_q) {
printf ("!r dividend=%04x (%f) divisor=%04x (%f) res=%04x (%f) ref=%04x (%f)\n",
dividend, (int16_t)dividend / fxscale,
divisor, (int16_t)divisor / fxscale,
res_q, (int16_t)res_q / fxscale,
ref_q, (int16_t)ref_q / fxscale);
}
}
divisor++;
} while (divisor);
dividend++;
} while (dividend);
return EXIT_SUCCESS;
}
I'm looking for an algorithm which computes all permutations of a bitstring of given length (n) and amount of bits set (k). For example while n=4 and k=2 the algorithm shall output:
1100
1010
1001
0011
0101
0110
I'm aware of Gosper's Hack which generates the needed permutations in lexicographic order. But i need them to be generated in such a manner, that two consecutive permutations differ in only two (or at least a constant number of) bitpositions (like in the above example).
Another bithack to do that would be awesome, but also a algorithmic description would help me alot.
Walking bit algorithm
To generate permutations of a binary sequence by swapping exactly one set bit with an unset bit in each step (i.e. the Hamming distance between consecutive permutations equals two), you can use this "walking bit" algorithm; the way it works is similar to creating the (reverse) lexicographical order, but the set bits walk right and left alternately, and as a result some parts of the sequence are mirrored. This is probably better explained with an example:
Recursive implementation
A recursive algorithm would receive a sequence of n bits, with k bits set, either all on the left or all on the right. It would then keep a 1 at the end, recurse with the rest of the sequence, move the set bit and keep 01 at the end, recurse with the rest of the bits, move the set bit and keep 001 at the end, etc... until the last recursion with only set bits. As you can see, this creates alternating left-to-right and right-to-left recursions.
When the algorithm is called with a sequence with only one bit set, this is the deepest recursion level, and the set bit walks from one end to the other.
Code example 1
Here's a simple recursive JavaScript implementation:
function walkingBits(n, k) {
var seq = [];
for (var i = 0; i < n; i++) seq[i] = 0;
walk (n, k, 1, 0);
function walk(n, k, dir, pos) {
for (var i = 1; i <= n - k + 1; i++, pos += dir) {
seq[pos] = 1;
if (k > 1) walk(n - i, k - 1, i%2 ? dir : -dir, pos + dir * (i%2 ? 1 : n - i))
else document.write(seq + "<BR>");
seq[pos] = 0;
}
}
}
walkingBits(7,3);
Translated into C++ that could be something like this:
#include <iostream>
#include <string>
void walkingBits(int n, int k, int dir = 1, int pos = 0, bool top = true) {
static std::string seq;
if (top) seq.resize(n, '0');
for (int i = 1; i <= n - k + 1; i++, pos += dir) {
seq[pos] = '1';
if (k > 1) walkingBits(n - i, k - 1, i % 2 ? dir : -dir, pos + dir * (i % 2 ? 1 : n - i), false);
else std::cout << seq << '\n';
seq[pos] = '0';
}
if (top) seq.clear();
}
int main() {
walkingBits(7, 3);
}
(See also [this C++11 version][3], written by VolkerK in response to a question about the above code.)
(Rextester seems to have been hacked, so I've pasted Volker's code below.)
#include <iostream>
#include <vector>
#include <functional>
void walkingBits(size_t n, size_t k) {
std::vector<bool> seq(n, false);
std::function<void(const size_t, const size_t, const int, size_t)> walk = [&](const size_t n, const size_t k, const int dir, size_t pos){
for (size_t i = 1; i <= n - k + 1; i++, pos += dir) {
seq[pos] = true;
if (k > 1) {
walk(n - i, k - 1, i % 2 ? dir : -dir, pos + dir * (i % 2 ? 1 : n - i));
}
else {
for (bool v : seq) {
std::cout << v;
}
std::cout << std::endl;;
}
seq[pos] = false;
}
};
walk(n, k, 1, 0);
}
int main() {
walkingBits(7, 3);
return 0;
}
Code example 2
Or, if you prefer code where elements of an array are actually being swapped:
function walkingBits(n, k) {
var seq = [];
for (var i = 0; i < n; i++) seq[i] = i < k ? 1 : 0;
document.write(seq + "<BR>");
walkRight(n, k, 0);
function walkRight(n, k, pos) {
if (k == 1) for (var p = pos + 1; p < pos + n; p++) swap(p - 1, p)
else for (var i = 1; i <= n - k; i++) {
[walkLeft, walkRight][i % 2](n - i, k - 1, pos + i);
swap(pos + i - 1, pos + i + (i % 2 ? 0 : k - 1));
}
}
function walkLeft(n, k, pos) {
if (k == 1) for (var p = pos + n - 1; p > pos; p--) swap(p - 1, p)
else for (var i = 1; i <= n - k; i++) {
[walkRight, walkLeft][i % 2](n - i, k - 1, pos);
swap(pos + n - i - (i % 2 ? 1 : k), pos + n - i);
}
}
function swap(a, b) {
var c = seq[a]; seq[a] = seq[b]; seq[b] = c;
document.write(seq + "<BR>");
}
}
walkingBits(7,3);
Code example 3
Here the recursion is rolled out into an iterative implementation, with each of the set bits (i.e. each of the recursion levels) represented by an object {o,d,n,p} which holds the offset from the leftmost position, the direction the set bit is moving in, the number of bits (i.e. the length of this part of the sequence), and the current position of the set bit within this part.
function walkingBits(n, k) {
var b = 0, seq = [], bit = [{o: 0, d: 1, n: n, p: 0}];
for (var i = 0; i < n; i++) seq.push(0);
while (bit[0].p <= n - k) {
seq[bit[b].o + bit[b].p * bit[b].d] = 1;
while (++b < k) {
bit[b] = {
o: bit[b-1].o + bit[b-1].d * (bit[b-1].p %2 ? bit[b-1].n-1 : bit[b-1].p+1),
d: bit[b-1].d * (bit[b-1].p %2 ? -1 : 1),
n: bit[b-1].n - bit[b-1].p - 1,
p: 0
}
seq[bit[b].o + bit[b].p * bit[b].d] = 1;
}
document.write(seq + "<BR>");
b = k - 1;
do seq[bit[b].o + bit[b].p * bit[b].d] = 0;
while (++bit[b].p > bit[b].n + b - k && b--);
}
}
walkingBits(7, 3); // n >= k > 0
Transforming lexicographical order into walking bit
Because the walking bit algorithm is a variation of the algorithm to generate the permutations in (reverse) lexicographical order, each permutation in the lexicographical order can be transformed into its corresponding permutation in the walking bit order, by mirroring the appropriate parts of the binary sequence.
So you can use any algorithm (e.g. Gosper's Hack) to create the permutations in lexicographical or reverse lexicographical order, and then transform each one to get the walking bit order.
Practically, this means iterating over the binary sequence from left to right, and if you find a set bit after an odd number of zeros, reversing the rest of the sequence and iterating over it from right to left, and so on...
Code example 4
In the code below the permutations for n,k = 7,3 are generated in reverse lexicographical order, and then transformed one-by-one:
function lexi2walk(lex) {
var seq = [], ofs = 0, pos = 0, dir = 1;
for (var i = 0; i < lex.length; ++i) {
if (seq[ofs + pos * dir] = lex[i]) {
if (pos % 2) ofs -= (dir *= -1) * (pos + lex.length - 1 - i)
else ofs += dir * (pos + 1);
pos = 0;
} else ++pos;
}
return seq;
}
function revLexi(seq) {
var max = true, pos = seq.length, set = 1;
while (pos-- && (max || !seq[pos])) if (seq[pos]) ++set; else max = false;
if (pos < 0) return false;
seq[pos] = 0;
while (++pos < seq.length) seq[pos] = set-- > 0 ? 1 : 0;
return true;
}
var s = [1,1,1,0,0,0,0];
document.write(s + " → " + lexi2walk(s) + "<br>");
while (revLexi(s)) document.write(s + " → " + lexi2walk(s) + "<br>");
Homogeneous Gray path
The permutation order created by this algorithm is similar, but not identical, to the one created by the "homogeneous Gray path for combinations" algorithm described by D. Knuth in The Art of Computer Programming vol. 4a, sect. 7.2.1.3, formula (31) & fig. 26c.
This is easy to achieve with recursion:
public static void nextPerm(List<Integer> list, int num, int index, int n, int k) {
if(k == 0) {
list.add(num);
return;
}
if(index == n) return;
int mask = 1<<index;
nextPerm(list, num^mask, index+1, n, k-1);
nextPerm(list, num, index+1, n, k);
}
Running this with the client:
public static void main(String[] args) {
ArrayList<Integer> list = new ArrayList<Integer>();
nextPerm(list, 0, 0, 4, 2);
}
Output:
0011
0101
1001
0110
1010
1100
The idea is to start with the initial number, and consider changing a bit, one index at a time, and to keep track of how many times you changed the bits. Once you changed the bits k times (when k == 0), store the number and terminate the branch.
I'm trying to use the repeated squaring algorithm (using recursion) to perform matrix exponentiation. I've included header files from the NEWMAT library instead of using arrays. The original matrix has elements in the range (-5,5), all numbers being of type float.
# include "C:\User\newmat10\newmat.h"
# include "C:\User\newmat10\newmatio.h"
# include "C:\User\newmat10\newmatap.h"
# include <iostream>
# include <time.h>
# include <ctime>
# include <cstdlib>
# include <iomanip>
using namespace std;
Matrix repeated_squaring(Matrix A, int exponent, int n) //Recursive function
{
A(n,n);
IdentityMatrix I(n);
if (exponent == 0) //Matrix raised to zero returns an Identity Matrix
return I;
else
{
if ( exponent%2 == 1 ) // if exponent is odd
return (A * repeated_squaring (A*A, (exponent-1)/2, n));
else //if exponent is even
return (A * repeated_squaring( A*A, exponent/2, n));
}
}
Matrix direct_squaring(Matrix B, int k, int no) //Brute Force Multiplication
{
B(no,no);
Matrix C = B;
for (int i = 1; i <= k; i++)
C = B*C;
return C;
}
//----Creating a matrix with elements b/w (-5,5)----
float unifRandom()
{
int a = -5;
int b = 5;
float temp = (float)((b-a)*( rand()/RAND_MAX) + a);
return temp;
}
Matrix initialize_mat(Matrix H, int ord)
{
H(ord,ord);
for (int y = 1; y <= ord; y++)
for(int z = 1; z<= ord; z++)
H(y,z) = unifRandom();
return(H);
}
//---------------------------------------------------
void main()
{
int exponent, dimension;
cout<<"Insert exponent:"<<endl;
cin>>exponent;
cout<< "Insert dimension:"<<endl;
cin>>dimension;
cout<<"The number of rows/columns in the square matrix is: "<<dimension<<endl;
cout<<"The exponent is: "<<exponent<<endl;
Matrix A(dimension,dimension),B(dimension,dimension);
Matrix C(dimension,dimension),D(dimension,dimension);
B= initialize_mat(A,dimension);
cout<<"Initial Matrix: "<<endl;
cout<<setw(5)<<setprecision(2)<<B<<endl;
//-----------------------------------------------------------------------------
cout<<"Repeated Squaring Result: "<<endl;
clock_t time_before1 = clock();
C = repeated_squaring (B, exponent , dimension);
cout<< setw(5) <<setprecision(2) <<C;
clock_t time_after1 = clock();
float diff1 = ((float) time_after1 - (float) time_before1);
cout << "It took " << diff1/CLOCKS_PER_SEC << " seconds to complete" << endl<<endl;
//---------------------------------------------------------------------------------
cout<<"Direct Squaring Result:"<<endl;
clock_t time_before2 = clock();
D = direct_squaring (B, exponent , dimension);
cout<<setw(5)<<setprecision(2)<<D;
clock_t time_after2 = clock();
float diff2 = ((float) time_after2 - (float) time_before2);
cout << "It took " << diff2/CLOCKS_PER_SEC << " seconds to complete" << endl<<endl;
}
I face the following problems:
The random number generator returns only "-5" as each element in the output.
The Matrix multiplication yield different results with brute force multiplication and using the repeated squaring algorithm.
I'm timing the execution time of my code to compare the times taken by brute force multiplication and by repeated squaring.
Could someone please find out what's wrong with the recursion and with the matrix initialization?
NOTE: While compiling this program, make sure you've imported the NEWMAT library.
Thanks in advance!
rand() returns an int so rand()/RAND_MAX will truncate to an integer = 0. Try your
repeated square algorithm by hand with n = 1, 2 and 3 and you'll find a surplus A *
and a gross inefficiency.
Final Working code has the following improvements:
Matrix repeated_squaring(Matrix A, int exponent, int n) //Recursive function
{
A(n,n);
IdentityMatrix I(n);
if (exponent == 0) //Matrix raised to zero returns an Identity Matrix
return I;
if (exponent == 1)
return A;
{
if (exponent % 2 == 1) // if exponent is odd
return (A*repeated_squaring (A*A, (exponent-1)/2, n));
else //if exponent is even
return (repeated_squaring(A*A, exponent/2, n));
}
}
Matrix direct_squaring(Matrix B, int k, int no) //Brute Force Multiplication
{
B(no,no);
Matrix C(no,no);
C=B;
for (int i = 0; i < k-1; i++)
C = B*C;
return C;
}
//----Creating a matrix with elements b/w (-5,5)----
float unifRandom()
{
int a = -5;
int b = 5;
float temp = (float) ((b-a)*((float) rand()/RAND_MAX) + a);
return temp;
}