I need to start the same random number list over every execution of my app.
srand/rand do not exist anymore. What should I do then?
private extension Array {
private func randomValues(_ seed: UInt32, num: Int) -> [Element] {
srand (seed)
var indices = [Int]()
indices.reserveCapacity(num)
let range = 0..<self.count
for _ in 0..<num {
var random = 0
repeat {
random = randomNumberInRange(range)
} while indices.contains(random)
indices.append(random)
}
return indices.map { self[$0] }
}
You can use
srand48(seed) and drand48() in Swift3.
Unless you're developing with Swift for non-Apple platforms, you can get a much better randomization API in GameplayKit: several algorithms (trade randomness vs speed), seedable, distribution control, etc.
I can't find a way to use seeded random in Swift 3 Beta 1. Had to write a silly wrapper function in C:
// ----------------------------------------------
// my_random.h
// ----------------------------------------------
#ifndef my_random_h
#define my_random_h
#include <stdio.h>
#endif /* my_random_h */
long next_random();
// ----------------------------------------------
// my_random.c
// ----------------------------------------------
#include <stdlib.h>
#include "my_random.h"
long next_random() {
return random();
}
You can use the bridging header to import it into Swift. Then you can call it in Swift like this:
srandom(42)
for _ in 0..<10 {
let x = next_random()
print(x)
}
random is better than rand. Read the man pages for discussion on these 2 functions.
Edit:
A workaround, as #riskter suggested, is to use GameKit:
import GameKit
let seed = Data(bytes: [42]) // Use any array of [UInt8]
let source = GKARC4RandomSource(seed: seed)
for _ in 0..<10 {
let x = source.nextInt()
print(x)
}
For a simple repeatable random list try using a Linear Congruential Generator:
import Foundation
class LinearCongruntialGenerator
{
var state = 0 //seed of 0 by default
let a, c, m, shift: Int
//we will use microsoft random by default
init() {
self.a = 214013
self.c = 2531011
self.m = Int(pow(2.0, 31.0)) //2^31 or 2147483648
self.shift = 16
}
init(a: Int, c: Int, m: Int, shift: Int) {
self.a = a
self.c = c
self.m = m //2^31 or 2147483648
self.shift = shift
}
func seed(seed: Int) -> Void {
state = seed;
}
func random() -> Int {
state = (a * state + c) % m
return state >> shift
}
}
let microsoftLinearCongruntialGenerator = LinearCongruntialGenerator()
print("Microsft Rand:")
for i in 0...10
{
print(microsoftLinearCongruntialGenerator.random())
}
More info here:
https://rosettacode.org/wiki/Linear_congruential_generator
I just happened to put this together for Swift 4. I am aware Swift 4.2 has new random extensions that are different from this, but like the OP, I needed them to be seedable during testing. Maybe someone will find it helpful. If you don't seed it, it will use arc4random, otherwise it will use drand48. It avoids mod bias both ways.
import Foundation
class Random {
static var number = unseededGenerator // the current generator
/**
* returns a random Int 0..<n
**/
func get(anIntLessThan n: Int) -> Int {
return generatingFunction(n)
}
class func set(seed: Int) {
number = seedableGenerator
srand48(seed)
}
// Don't normally need to call the rest
typealias GeneratingFunction = (Int) -> Int
static let unseededGenerator = Random(){
Int(arc4random_uniform(UInt32($0)))
}
static let seedableGenerator = Random(){
Int(drand48() * Double($0))
}
init(_ gf: #escaping GeneratingFunction) {
self.generatingFunction = gf
}
private let generatingFunction: GeneratingFunction
}
func randomTest() {
Random.set(seed: 65) // comment this line out for unseeded
for _ in 0..<10 {
print(
Random.number.get(anIntLessThan: 2),
terminator: " "
)
}
}
// Run
randomTest()
Related
I'm trying to select 2 random items out of a list using the RNG class. The problem is occasionally I get the same 2 numbers and I'd like them to be unique. I tried using a while loop to get another number if the it's the same as the last one but adding even a simple while loop results in an "Exceeded prepaid gas" error. What am I not understanding?
//simplified for posting question
var lengthOfList = 10
var numItemsWanted = 2
//Get more rng numbers than I need incase of duplicates
const rng = new RNG<u32>(lenghtOfList, lengthOfList)
for(let i = 0; i < numItemsWanted; i++) {
var r = rng.next()
while (r == rng.last()) {
r = rng.next()
}
newList.push(oldList[r])
}
Working:
//simplified for posting question
var lengthOfList = 10
var numItemsWanted = 2
//Get more rng numbers than I need incase of duplicates
const rng = new RNG<u32>(lenghtOfList, lengthOfList)
let r = rng.next()
let last = r + 1
for(let i = 0; i < numItemsWanted; i++) {
newList.push(oldList[r])
last = r
r = rng.next()
while (r == last) {
r = rng.next()
}
}
this is about near-sdk-as, the smart contract development kit for AssemblyScript on the NEAR platform
you can see how RNG is used in this example
https://github.com/Learn-NEAR/NCD.L1.sample--lottery/blob/ff6cddaa8cac4d8fe29dd1a19b38a6e3c7045363/src/lottery/assembly/lottery.ts#L12-L13
class Lottery {
private chance: f64 = 0.20
play(): bool {
const rng = new RNG<u32>(1, u32.MAX_VALUE);
const roll = rng.next();
logging.log("roll: " + roll.toString());
return roll <= <u32>(<f64>u32.MAX_VALUE * this.chance);
}
}
and how the constructor is implemented here:
https://github.com/near/near-sdk-as/blob/f3707a1672d6da6f6d6a75cd645f8cbdacdaf495/sdk-core/assembly/math.ts#L152
the first argument is the length of the buffer holding random numbers generated from the seed. you can use the next() method to get more numbers from this buffer with each call
export class RNG<T> {
constructor(len: u32, public max: u32 = 10_000) {
let real_len = len * sizeof<T>();
this.buffer = math.randomBuffer(real_len);
this._last = this.get(0);
}
next(): T {}
}
If you remove the item from oldList once picked, it would be imposible to picked it again.
Another aproach is to shuffle your oldList and then pick the first two items.
The Idea is to have a data structure that you can access its elements only randomly, but based on a probability factor defined by the user for each element. So if the probability of the structure that contains 100 elements to yield x is 0.5, then, theoretically if we try to retrieve a random element a hundred times then x will be returned about \~50 times.
I couldn't find a ready-solution that does this, so this is my take on it:
import kotlin.math.absoluteValue
/**
*#author mhashim6 on 13/10/2019
*/
class ProbabilitySet<T>(private val items: Array<out Pair<T, Float>>) {
private var probabilityIndices: List<Int>
private fun calcFutureSize(count: Int, probability: Float) =
((count / (1f - probability)) - count).toInt().absoluteValue
init {
probabilityIndices = items.withIndex().flatMap { (i, item) ->
item.act { (_, probability) ->
calcFutureSize(items.size, probability).minus(items.size).act { delta ->
Iterable { ConstIterator(delta, i) }
}
}
}
}
fun next(): T = items.random().first
}
class ConstIterator(private var size: Int, private val const: Int) : IntIterator() {
override fun nextInt(): Int {
size--
return const
}
override fun hasNext(): Boolean = size > 0
}
fun <E> probabilitySetOf(vararg items: Pair<E, Float>) = ProbabilitySet(items)
inline fun <T, R> T.act(action: (T) -> R) = action(this)
I tried to make it mutable, but I met a lot of complexities regarding time and memory. So it's immutable for now.
Is this a viable Implementation?
Is there an implementation for this problem already?
How to make it mutable?
I assume that if the sum of elements' probabilities is not equal to 1, the actual element probability must be calculated by dividing its original probability by a sum of all elements' probabilities. For example, ProbabilitySet consisting of "A" to 0.1F and "B" to 0.3F returns "A" in 25% of cases and "B" in 75% of cases.
Here is my implementation of mutable ProbabilitySet with add running in O(1) and next running in O(logN):
class ProbabilitySet<E>(
private val random: Random = Random.Default
) {
private val nodes = mutableListOf<Node>()
private var sum = 0F
fun add(element: E, probability: Float) {
require(probability >= 0) { "[$element]'s probability ($probability) is less than 0" }
val oldSum = sum
sum += probability
nodes += Node(oldSum..sum, element)
}
fun isEmpty() = sum == 0F
fun next(): E {
if (isEmpty()) throw NoSuchElementException("ProbabilitySet is empty")
val index = random.nextFloat() * sum
return nodes[nodes.binarySearch {
when {
it.range.start > index -> 1
it.range.endInclusive < index -> -1
else -> 0
}
}].element
}
private inner class Node(
val range: ClosedRange<Float>,
val element: E
)
}
Factory method:
fun <E> probabilitySetOf(vararg items: Pair<E, Float>, random: Random = Random.Default) =
ProbabilitySet<E>(random).apply {
items.forEach { (element, probability) -> add(element, probability) }
}
Use case:
val set = probabilitySetOf("A" to 0.4F, "B" to 0.3F)
println(set.next())
set.add("C", 0.9F)
println(set.next())
I am learning Rust. I am trying to calculate a list of prime numbers up to some number. For that I need to create a vector (vec1) inside an if block and use it outside the scope of the if.
I tried a code with the same logic in MATLAB and it works.
A simplified version of the actual code looks like this:
fn main() {
let mut initiate = 1;
let mut whilechecker = 2;
while whilechecker > 0 {
whilechecker = whilechecker - 1;
if initiate == 1 {
let mut vec1 = vec![2];
}
for i in &vec1 {
if *i == 2 {
break;
}
} //for
initiate = 2;
vec1.push(5);
} //while
} //main
It is supposed to put a list of prime numbers in vec1. But since it is simplified code it should compile and giving a vector (vec1) will suffice.
But the compiler says:
cannot find value vec1 in this scope
at for i in &vec1{ and at vec1.push(5);.
Can you make it compile?
There's no reason to have the complicated if initialize==1 checking. Just move the initialization of the vector outside the while loop, so it gets done only once:
fn main() {
let mut whilechecker = 2;
let mut vec1 = vec![2];
while whilechecker > 0 {
whilechecker = whilechecker - 1;
for i in &vec1 {
if *i == 2 {
break;
}
} //for
vec1.push(5);
} //while
} //main
I don't get the thing which you actually want. But here is an example which may help you to define the global scope variable.
fn main() {
let mut initiate = 1;
let mut whilechecker = 2;
let mut vec1 = Vec::new();
while whilechecker > 0 {
if initiate == 1 {
let mut vec1 = vec![2];
}
for i in &vec1 {
if *i == 2 {
break;
}
}
initiate = 2;
vec1.push(5);
whilechecker = whilechecker - 1;
}
println!("{:?}", vec1);
}
The output of the given code is:
[5, 5]
I need to implement a for loop that goes from one floating point number to another with the step as another floating point number.
I know how to implement that in C-like languages:
for (float i = -1.0; i < 1.0; i += 0.01) { /* ... */ }
I also know that in Rust I can specify the loop step using step_by, and that gives me what I want if I have the boundary values and step as integers:
#![feature(iterator_step_by)]
fn main() {
for i in (0..30).step_by(3) {
println!("Index {}", i);
}
}
When I do that with floating point numbers, it results in a compilation error:
#![feature(iterator_step_by)]
fn main() {
for i in (-1.0..1.0).step_by(0.01) {
println!("Index {}", i);
}
}
And here is the compilation output:
error[E0599]: no method named `step_by` found for type `std::ops::Range<{float}>` in the current scope
--> src/main.rs:4:26
|
4 | for i in (-1.0..1.0).step_by(0.01) {
| ^^^^^^^
|
= note: the method `step_by` exists but the following trait bounds were not satisfied:
`std::ops::Range<{float}> : std::iter::Iterator`
`&mut std::ops::Range<{float}> : std::iter::Iterator`
How can I implement this loop in Rust?
If you haven't yet, I invite you to read Goldberg's What Every Computer Scientist Should Know About Floating-Point Arithmetic.
The problem with floating points is that your code may be doing 200 or 201 iterations, depending on whether the last step of the loop ends up being i = 0.99 or i = 0.999999 (which is still < 1 even if really close).
To avoid this footgun, Rust does not allow iterating over a range of f32 or f64. Instead, it forces you to use integral steps:
for i in -100i8..100 {
let i = f32::from(i) * 0.01;
// ...
}
See also:
How do I convert between numeric types safely and idiomatically?
As a real iterator:
Playground
/// produces: [ linear_interpol(start, end, i/steps) | i <- 0..steps ]
/// (does NOT include "end")
///
/// linear_interpol(a, b, p) = (1 - p) * a + p * b
pub struct FloatIterator {
current: u64,
current_back: u64,
steps: u64,
start: f64,
end: f64,
}
impl FloatIterator {
pub fn new(start: f64, end: f64, steps: u64) -> Self {
FloatIterator {
current: 0,
current_back: steps,
steps: steps,
start: start,
end: end,
}
}
/// calculates number of steps from (end - start) / step
pub fn new_with_step(start: f64, end: f64, step: f64) -> Self {
let steps = ((end - start) / step).abs().round() as u64;
Self::new(start, end, steps)
}
pub fn length(&self) -> u64 {
self.current_back - self.current
}
fn at(&self, pos: u64) -> f64 {
let f_pos = pos as f64 / self.steps as f64;
(1. - f_pos) * self.start + f_pos * self.end
}
/// panics (in debug) when len doesn't fit in usize
fn usize_len(&self) -> usize {
let l = self.length();
debug_assert!(l <= ::std::usize::MAX as u64);
l as usize
}
}
impl Iterator for FloatIterator {
type Item = f64;
fn next(&mut self) -> Option<Self::Item> {
if self.current >= self.current_back {
return None;
}
let result = self.at(self.current);
self.current += 1;
Some(result)
}
fn size_hint(&self) -> (usize, Option<usize>) {
let l = self.usize_len();
(l, Some(l))
}
fn count(self) -> usize {
self.usize_len()
}
}
impl DoubleEndedIterator for FloatIterator {
fn next_back(&mut self) -> Option<Self::Item> {
if self.current >= self.current_back {
return None;
}
self.current_back -= 1;
let result = self.at(self.current_back);
Some(result)
}
}
impl ExactSizeIterator for FloatIterator {
fn len(&self) -> usize {
self.usize_len()
}
//fn is_empty(&self) -> bool {
// self.length() == 0u64
//}
}
pub fn main() {
println!(
"count: {}",
FloatIterator::new_with_step(-1.0, 1.0, 0.01).count()
);
for f in FloatIterator::new_with_step(-1.0, 1.0, 0.01) {
println!("{}", f);
}
}
This is basically doing the same as in the accepted answer, but you might prefer to write something like:
for i in (-100..100).map(|x| x as f64 * 0.01) {
println!("Index {}", i);
}
Another answer using iterators but in a slightly different way playground
extern crate num;
use num::{Float, FromPrimitive};
fn linspace<T>(start: T, stop: T, nstep: u32) -> Vec<T>
where
T: Float + FromPrimitive,
{
let delta: T = (stop - start) / T::from_u32(nstep - 1).expect("out of range");
return (0..(nstep))
.map(|i| start + T::from_u32(i).expect("out of range") * delta)
.collect();
}
fn main() {
for f in linspace(-1f32, 1f32, 3) {
println!("{}", f);
}
}
Under nightly you can use the conservative impl trait feature to avoid the Vec allocation playground
#![feature(conservative_impl_trait)]
extern crate num;
use num::{Float, FromPrimitive};
fn linspace<T>(start: T, stop: T, nstep: u32) -> impl Iterator<Item = T>
where
T: Float + FromPrimitive,
{
let delta: T = (stop - start) / T::from_u32(nstep - 1).expect("out of range");
return (0..(nstep))
.map(move |i| start + T::from_u32(i).expect("out of range") * delta);
}
fn main() {
for f in linspace(-1f32, 1f32, 3) {
println!("{}", f);
}
}
For the reasons mentioned by others, one shouldn't be looping using floats under most circumstances.
For those cases where it is appropriate, it can be done (although not as ergonomically, which is probably good design--Rust should make it more difficult to juggle running chainsaws).
Since Rust 1.34, std::iter::successors() enables looping directly with a floating point index:
use std::iter;
const START: f64 = -1.0;
const END: f64 = 1.0;
// Increment by 0.1 (instead of 0.01 per the question) for output brevity
const INCREMENT: f64 = 0.1;
fn main() {
iter::successors(Some(START), |i| {
let next = i + INCREMENT;
(next < END).then_some(next)
})
.for_each(|i| println!("{i}"));
}
Note there are 21 lines of output, although only 20 were probably expected given the condition of i < 1.0 (as opposed to i <= 1.0) in the sample code of your question.
This is due to the precision and/or cumulative rounding errors present in the output, even though the source code specifies iterating from -1.0 to 1.0 in increments of exactly 0.1. (Feel free to switch the START value to 0.0 or 0.3 to see different series output, also with precision/cumulative rounding errors).
Playground example
I implemented the Miller-Rabin Strong Pseudoprime Test in Rust using BigUint to support arbitrary large primes. To run through the numbers between 5 and 10^6, it took about 40s with cargo run --release.
I implemented the same algorithm with Java's BigInteger and the same test took 10s to finish. Rust appears to be 4 times slower. I assume this is caused by the implementation of num::bigint.
Is this just the current state of num::bigint, or can anyone spot any obvious improvement in my code? (Mainly about how I used the language. Regardless whether my implementation of the algorithm is good or bad, it is almost implemented exactly the same in both languages - so does not cause the difference in performance.)
I did notice there are lots of clone() required, due to Rust's ownership model, that could well impact the speed to some level. But I guess there is no way around that, am I right?
Here is the code:
extern crate rand;
extern crate num;
extern crate core;
extern crate time;
use std::time::{Duration};
use time::{now, Tm};
use rand::Rng;
use num::{Zero, One};
use num::bigint::{RandBigInt, BigUint, ToBigUint};
use num::traits::{ToPrimitive};
use num::integer::Integer;
use core::ops::{Add, Sub, Mul, Div, Rem, Shr};
fn find_r_and_d(i: BigUint) -> (u64, BigUint) {
let mut d = i;
let mut r = 0;
loop {
if d.clone().rem(&2u64.to_biguint().unwrap()) == Zero::zero() {
d = d.shr(1usize);
r = r + 1;
} else {
break;
}
}
return (r, d);
}
fn might_be_prime(n: &BigUint) -> bool {
let nsub1 = n.sub(1u64.to_biguint().unwrap());
let two = 2u64.to_biguint().unwrap();
let (r, d) = find_r_and_d(nsub1.clone());
'WitnessLoop: for kk in 0..6u64 {
let a = rand::thread_rng().gen_biguint_range(&two, &nsub1);
let mut x = mod_exp(&a, &d, &n);
if x == 1u64.to_biguint().unwrap() || x == nsub1 {
continue;
}
for rr in 1..r {
x = x.clone().mul(x.clone()).rem(n);
if x == 1u64.to_biguint().unwrap() {
return false;
} else if x == nsub1 {
continue 'WitnessLoop;
}
}
return false;
}
return true;
}
fn mod_exp(base: &BigUint, exponent: &BigUint, modulus: &BigUint) -> BigUint {
let one = 1u64.to_biguint().unwrap();
let mut result = one.clone();
let mut base_clone = base.clone();
let mut exponent_clone = exponent.clone();
while exponent_clone > 0u64.to_biguint().unwrap() {
if exponent_clone.clone() & one.clone() == one {
result = result.mul(&base_clone).rem(modulus);
}
base_clone = base_clone.clone().mul(base_clone).rem(modulus);
exponent_clone = exponent_clone.shr(1usize);
}
return result;
}
fn main() {
let now1 = now();
for n in 5u64..1_000_000u64 {
let b = n.to_biguint().unwrap();
if might_be_prime(&b) {
println!("{}", n);
}
}
let now2 = now();
println!("{}", now2.to_timespec().sec - now1.to_timespec().sec);
}
You can remove most of the clones pretty easily. BigUint has all ops traits implemented also for operations with &BigUint, not just working with values. With that, it becomes faster but still about half as fast as Java...
Also (not related to performance, just readability) you don't need to use add, sub, mul and shr explicitly; they override the regular +, -, * and >> operators.
For instance you could rewrite might_be_prime and mod_exp like this, which already gives a good speedup on my machine (from 40 to 24sec on avg):
fn might_be_prime(n: &BigUint) -> bool {
let one = BigUint::one();
let nsub1 = n - &one;
let two = BigUint::new(vec![2]);
let mut rng = rand::thread_rng();
let (r, mut d) = find_r_and_d(nsub1.clone());
let mut x;
let mut a: BigUint;
'WitnessLoop: for kk in 0..6u64 {
a = rng.gen_biguint_range(&two, &nsub1);
x = mod_exp(&mut a, &mut d, &n);
if &x == &one || x == nsub1 {
continue;
}
for rr in 1..r {
x = (&x * &x) % n;
if &x == &one {
return false;
} else if x == nsub1 {
continue 'WitnessLoop;
}
}
return false;
}
true
}
fn mod_exp(base: &mut BigUint, exponent: &mut BigUint, modulus: &BigUint) -> BigUint {
let one = BigUint::one();
let zero = BigUint::zero();
let mut result = BigUint::one();
while &*exponent > &zero {
if &*exponent & &one == one {
result = (result * &*base) % modulus;
}
*base = (&*base * &*base) % modulus;
*exponent = &*exponent >> 1usize;
}
result
}
Note that I've moved the println! out of the timing, so that we're not benchmarking IO.
fn main() {
let now1 = now();
let v = (5u64..1_000_000u64)
.filter_map(|n| n.to_biguint())
.filter(|n| might_be_prime(&n))
.collect::<Vec<BigUint>>();
let now2 = now();
for n in v {
println!("{}", n);
}
println!("time spent seconds: {}", now2.to_timespec().sec - now1.to_timespec().sec);
}