BFS algorithm, tracking the path - algorithm

Anyone here familiar with BFS and tracking? I have written the algorithm for the first time and it finds the shortest path, but tracing the shortest path is the part i'm stuck at. The list below is a list of previous indexes (y, x), and when it reaches (0, 5) there are two paths, one path to the left and another to the right, i only want to include the path that leads to the destination, however i have no clue how to make it work. I keep track of previous node, but once we get to (0, 5) the setting of previous starts messing up, because there are two paths. Because of this i can't backtrace from the destination.
How have you kept track of previous and made it work? I have read so many articles but still havent found anything that really explains it to me.
Any help is greatly appreciated
[
(0, 0),
(1, 0),
(2, 0),
(2, 1),
(2, 2),
(2, 3),
(3, 3),
(4, 3),
(5, 3),
(5, 4),
(5, 5),
(4, 5),
(3, 5),
(2, 5),
(1, 5),
(0, 5), <-- starts to mess up here becuase there are two paths to take, left and right
(0, 6), <-- to the right
(0, 4), <-- to the left
(0, 7), <-- to the right
(0, 3), <-- to the left
(0, 8), <-- to the right
(1, 7), <-- to the left and one down
(0, 2), <-- to the left
(0, 9) <-- to the right (also happens to be the destination)
]
Code:
use std::collections::VecDeque;
fn bfs(arr: [[i32; 10]; 10], target: i32) -> bool {
let mut visited = [[false, false, false, false, false, false, false, false, false, false]; 10];
let mut queue: VecDeque<(i32, i32)> = VecDeque::new();
queue.push_back((0, 0));
let mut previous_nodes = Vec::new();
let mut previous = (-1, -1);
while !queue.is_empty() {
let (y, x) = queue.pop_front().unwrap();
if visited[y as usize][x as usize] == true {
continue;
}
visited[y as usize][x as usize] = true;
previous_nodes.push(previous);
previous = (y, x);
print!("{}[2J", 27 as char);
for y in 0..visited.len() {
for x in 0..visited.len() {
if arr[y][x] == target && visited[y][x] == true {
print!("X ");
} else if visited[y][x] == true {
print!("0 ");
} else if arr[y][x] == 3 {
print!("# ");
} else {
print!(". ");
}
}
print!("\n");
}
print!("\n");
if arr[y as usize][x as usize] == target {
for entry in previous_nodes {
println!("{:?}", entry);
}
return true;
}
if x + 1 < arr.len() as i32 && arr[y as usize][(x + 1) as usize] != 3 {
queue.push_back((y, x + 1));
}
if y + 1 < arr.len() as i32 && arr[(y + 1) as usize][x as usize] != 3 {
queue.push_back((y + 1, x));
}
if x - 1 >= 0 && arr[y as usize][(x - 1) as usize] != 3 {
queue.push_back((y, x - 1));
}
if y - 1 >= 0 && arr[(y - 1) as usize][x as usize] != 3 {
queue.push_back((y - 1, x));
}
}
false
}
fn main() {
let data = [
[0, 3, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 3, 3, 3, 3, 0, 3, 0, 3, 3],
[0, 0, 0, 0, 3, 0, 3, 0, 0, 0],
[3, 3, 3, 0, 3, 0, 3, 3, 3, 0],
[0, 0, 3, 0, 3, 0, 3, 0, 3, 0],
[0, 0, 3, 0, 0, 0, 3, 0, 3, 0],
[0, 3, 3, 3, 3, 3, 3, 0, 3, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
];
let b = bfs(data, 1);
println!("Found: {}", b);
}

First off, I started with a little bit of code cleanup to make it a bit more concise and made a rust playground to experiment with. There are two main approaches to solving this problem. Either you keep track of the path taken in the queue or the visited nodes. The easiest approach for you would likely be to simply adapt your code to have the visited nodes array point to the previous node in the path from each visited node. playground link
fn bfs(arr: [[i32; 10]; 10], target: i32) -> Option<Vec<(i32, i32)>> {
let mut visited = [[None; 10]; 10];
let mut queue: VecDeque<(i32, i32)> = VecDeque::new();
queue.push_back((0, 0));
// Put some filler into the first location
visited[0][0] = Some((0, 0));
while let Some((y, x)) = queue.pop_front() {
// Prind debug info
println!("\nExpanding ({}, {})", x, y);
for y in 0..visited.len() {
for x in 0..visited.len() {
if arr[y][x] == target && visited[y][x].is_some() {
print!("X ");
} else if visited[y][x].is_some() {
print!("0 ");
} else if arr[y][x] == 3 {
print!("# ");
} else {
print!(". ");
}
}
println!();
}
// Check if this position is the target
if arr[y as usize][x as usize] == target {
let mut path_taken = Vec::new();
path_taken.push((y, x));
let mut prev_x = x;
let mut prev_y = y;
while prev_x != 0 || prev_y != 0 {
let (py, px) = visited[prev_y as usize][prev_x as usize].unwrap();
path_taken.push((py, px));
prev_y = py;
prev_x = px;
}
return Some(path_taken.into_iter().rev().collect())
}
// Iterate over adjacent offsets
for (dx, dy) in &[(1, 0), (0, 1), (-1, 0), (0, -1)] {
// Check if offset is within bounds
if x + dx < 0
|| y + dy < 0
|| (y + dy) as usize >= arr.len()
|| (x + dx) as usize >= arr[(y + dy) as usize].len()
{
continue;
}
// Check if offset points to valid location
if arr[(y + dy) as usize][(x + dx) as usize] == 3 {
continue;
}
if visited[(y + dy) as usize][(x + dx) as usize].is_some() {
continue;
}
visited[(y + dy) as usize][(x + dx) as usize] = Some((y, x));
queue.push_back((y + dy, x + dx));
}
}
None
}
However, I personally prefer the approach of keeping track of the path taken in the queue to reduce the memory requirement for small paths. While it is not as true to your original question, my favorite version of this would be to write BFS in a way that better represents how it described mathematically using type parameters. playground link
fn bfs<N, F, R>(start: N, end: N, expand: F) -> Option<SearchPath<N>>
where N: Copy + Eq + Hash,
F: Fn(N) -> R,
R: IntoIterator<Item=N> {
let mut visited = HashSet::new();
let mut queue = VecDeque::new();
queue.push_back(SearchPath(start, None));
visited.insert(start);
while let Some(SearchPath(node, path)) = queue.pop_front() {
if node == end {
return Some(SearchPath(node, path))
}
let path = Rc::new(SearchPath(node, path.clone()));
for edge in expand(node) {
if !visited.contains(&edge) {
visited.insert(edge);
queue.push_back(SearchPath(edge, Some(path.clone())));
}
}
}
None
}
#[derive(Clone, PartialEq, Eq)]
pub struct SearchPath<N> (N, Option<Rc<SearchPath<N>>>);
impl<N: Debug> Debug for SearchPath<N> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match &self.1 {
Some(v) => write!(f, "{:?} -> {:?}", v, &self.0),
None => write!(f, "{:?}", &self.0)
}
}
}
Going further down the rabbit hole, we can do even more with this if we add some more type parameters. It may be a bit harder to read now, but what this lets us do is implement a bunch of different search approaches from graph theory using search as a base. Essentially, this new function search boils down the the core components of many search methods.
/// A general purpose graph search.
/// - start: Initial value to use in search queue
/// - expand: A function that takes a path, expands the edges of the top node,
/// then places the next elements in the queue according to the current search
/// approach. Additional ordering constraints may also be applied.
/// - next_node: A helper function which takes a path and evaluates if the search goal
/// has been reached. If the goal has been reached, None is returned and the current
/// path is returned. Otherwise, the top node in the given path is returned so
/// that it can be expanded.
fn search<N, P, F, R>(start: P, expand: F, next_node: R) -> Option<P>
where
N: Eq + Hash,
F: Fn(&P, &mut VecDeque<P>),
R: Fn(&P) -> Option<N>,
{
let mut visited = HashSet::new();
let mut queue = VecDeque::new();
queue.push_back(start);
while let Some(path) = queue.pop_front() {
let node = match next_node(&path) {
Some(v) => v,
None => return Some(path),
};
if visited.contains(&node) {
continue;
}
visited.insert(node);
expand(&path, &mut queue);
}
None
}
#[derive(Clone, PartialEq, Eq)]
pub struct WeightedSearchPath<N>(i32, N, Option<Rc<SearchPath<N>>>);
/// An example using search to find the most efficient path with weighted graph edges
fn weighted_search<N, F, R>(start: N, end: N, expand: F) -> Option<WeightedSearchPath<N>>
where
N: Copy + Eq + Hash,
F: Fn(N) -> R,
R: IntoIterator<Item=(i32, N)>,
{
search(
WeightedSearchPath(0, start, None),
|WeightedSearchPath(cost, node, path), queue| {
let path = Rc::new(SearchPath(*node, path.clone()));
for (weight, edge) in expand(*node) {
queue.push_back(WeightedSearchPath(cost + weight, edge, Some(path.clone())));
}
queue.make_contiguous().sort_by_key(|x| x.0);
},
|WeightedSearchPath(_, node, _)| {
if *node == end {
return None;
}
Some(*node)
},
)
}

Related

Why does my Rust StackDFS implementation produce incorrect results? [closed]

Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 1 year ago.
Improve this question
I am reading Luridas's book about algorithms and now I am at the Chapter 2 trying to implement StackDFS function. The purpose of this function is to "visit" every element of the graph.
Algorithm from the book:
StackDFS(G, node)→visited
//Input: G = (V,E), a graph
//node, the starting vertex in G
//Output: visited, an array of size |V | such that visited[i] is true if we
//have visited node i, false otherwise
1 S ←CreateStack()
2 visited ←CreateArray(|V |)
3 for i ←0 to |V | do
4 visited[i] ← false
5 Push(S, node)
6 while not IsStackEmpty(S) do
7 c ←Pop(s)
8 visited[c] ← true
9 foreach v in AdjacencyList(G, c) do
10 if not visited[v] then
11 Push(S, v)
12 return visited
My code:
use petgraph::graph::NodeIndex;
use petgraph::Undirected;
fn main() {
let mut new = petgraph::Graph::<i32, (i32, i32), Undirected>::new_undirected();
new.extend_with_edges(&[(0, 1), (0, 3), (1, 2), (2, 4)]);
let index = 2;
let mut visited = Vec::new();
for i in 0..new.node_count() {
visited.push(false);
}
StackDFS(&mut new, &index, &mut visited);
}
fn StackDFS<T>(G: &petgraph::Graph<T, (T, T), Undirected>, node: &usize, visited: &mut Vec<bool>) {
let mut s: Vec<usize> = Vec::with_capacity(G.node_count());
// for i in 0..G.node_count(){
// visited.push(false);
//}
s.push(*node);
while s.is_empty() == false {
let c = s.pop().unwrap();
visited[c] = true;
for el in G.neighbors(NodeIndex::new(*node)) {
if visited[el.index()] == false {
s.push(el.index());
}
}
}
println!("{:?}", visited);
}
The output I get:
[false, true, true, false, true]
The output I expect:
[true, true, true, true, true]
I used a Vec instead of a stack, but I don't think this is the problem because Vec can provide the same functionality as a stack.
for el in G.neighbors(NodeIndex::new(*node)) {
You are always accessing the neighbors of the same starting node. You are supposed to get the neighbors of the currently processed node.
Improved version:
use petgraph::{graph::NodeIndex, Undirected}; // 0.6.0
fn main() {
let mut new = petgraph::Graph::<i32, (i32, i32), Undirected>::new_undirected();
new.extend_with_edges(&[(0, 1), (0, 3), (1, 2), (2, 4)]);
let index = 2;
let mut visited = vec![false; new.node_count()];
stack_dfs(&new, index, &mut visited);
}
fn stack_dfs<T>(
g: &petgraph::Graph<T, (T, T), Undirected>,
start_node: usize,
visited: &mut Vec<bool>,
) {
let mut s = Vec::with_capacity(g.node_count());
s.push(start_node);
while let Some(node) = s.pop() {
visited[node] = true;
for el in g.neighbors(NodeIndex::new(node)) {
let idx = el.index();
if !visited[idx] {
s.push(idx);
}
}
}
println!("{:?}", visited);
}

How to divide a set into two sets such that the difference of the average is minimum?

As I understand, it is related to the partition problem.
But I would like to ask a slightly different problem which I don't care about the sum but the average. In this case, it needs to optimize 2 constraints (sum and number of items) at the same time. It seems to be a harder problem and I cannot see any solutions online.
Are there any solutions for this variant? Or how does it relate to the partition problem?
Example:
input X = [1,1,1,1,1,6]
output based on sum: A = [1,1,1,1,1], B=[6]
output based on average: A = [1], B=[1,1,1,1,6]
On some inputs, a modification of the dynamic program for the usual partition problem will give a speedup. We have to classify each partial solution by its count and sum instead of just sum, which slows things down a bit. Python 3 below (note that the use of dictionaries implicitly collapses functionally identical partial solutions):
def children(ab, x):
a, b = ab
yield a + [x], b
yield a, b + [x]
def proper(ab):
a, b = ab
return a and b
def avg(lst):
return sum(lst) / len(lst)
def abs_diff_avg(ab):
a, b = ab
return abs(avg(a) - avg(b))
def min_abs_diff_avg(lst):
solutions = {(0, 0): ([], [])}
for x in lst:
solutions = {
(sum(a), len(a)): (a, b)
for ab in solutions.values()
for (a, b) in children(ab, x)
}
return min(filter(proper, solutions.values()), key=abs_diff_avg)
print(min_abs_diff_avg([1, 1, 1, 1, 1, 6]))
let S_i the sum of a subset of v of size i
let S be the total sum of v, n the length of v
the err to minimize is
err_i = |avg(S_i) - avg(S-S_i)|
err_i = |S_i/i - (S-S_i)/(n-i)|
err_i = |(nS_i - iS)/(i(n-i))|
algorithm below does:
for all tuple sizes (1,...,n/2) as i
- for all tuples of size i-1 as t_{i-1}
- generate all possible tuple of size i from t_{i-1} by adjoining one elem from v
- track best tuple in regard of err_i
The only cut I found being:
for two tuples of size i having the same sum, keep the one whose last element's index is the smallest
e.g given tuples A, B (where X is some taken element from v)
A: [X,....,X....]
B: [.,X,.....,X..]
keep A because its right-most element has the minimal index
(idea being that at size 3, A will offer the same candidates as B plus some more)
function generateTuples (v, tuples) {
const nextTuples = new Map()
for (const [, t] of tuples) {
for (let l = t.l + 1; l < v.length; ++l) {
const s = t.s + v[l]
if (!nextTuples.has(s) || nextTuples.get(s).l > l) {
const nextTuple = { v: t.v.concat(l), s, l }
nextTuples.set(s, nextTuple)
}
}
}
return nextTuples
}
function processV (v) {
const fErr = (() => {
const n = v.length
const S = v.reduce((s, x) => s + x, 0)
return ({ s: S_i, v }) => {
const i = v.length
return Math.abs((n * S_i - i * S) / (i * (n - i)))
}
})()
let tuples = new Map([[0, { v: [], s: 0, l: -1 }]])
let best = null
let err = 9e3
for (let i = 0; i < Math.ceil(v.length / 2); ++i) {
const nextTuples = generateTuples(v, tuples)
for (const [, t] of nextTuples) {
if (fErr(t) <= err) {
best = t
err = fErr(t)
}
}
tuples = nextTuples
}
const s1Indices = new Set(best.v)
return {
sol: v.reduce(([v1, v2], x, i) => {
(s1Indices.has(i) ? v1 : v2).push(x)
return [v1, v2]
}, [[], []]),
err
}
}
console.log('best: ', processV([1, 1, 1, 1, 1, 6]))
console.log('best: ', processV([1, 2, 3, 4, 5]))
console.log('best: ', processV([1, 3, 5, 7, 7, 8]))

find Missing Range in a given a range

We need to find the missing range when main range is given and all sub ranges are given.
main range :[-10, 10]
sub Ranges: [-10, -5] , [-4, -3], [-2, 3], [7, 10]
Assumptions:
1) Range values can go upto 2^63.
2)sub ranges wont overlap and their order can be different.
for ex: the can be [-10, -5],[7, 10], [-2, 3], [-4, -3]
what is best algorithm to find the missing range here?
Assuming the intervals are unsorted, I do not see avoiding a sorting cost since each interval can be a singleton ([n,n]). That cost can be O(n log n) for a comparison sort or O(n) for a radix sort. From now on, let's assume that input intervals are sorted and contain no overlaps. Here is a O(n) single pass Python implementation:
xs = [[-10, -5] , [-4, -3], [-2, 3], [7, 10]]
bounds = (-10, 10)
missing = list()
# pre-processing
xs_sorted = sorted(xs)
# pre-processing a missing range on the lower bound
if bounds[0] < xs_sorted[0][0]:
missing.append((bounds[0], xs_sorted[0][0]-1))
def f_reduce(a, b):
if a[1] + 1 == b[0]:
# merge contiguous intervals
return (a[0], b[1])
else:
# gap detected; add the gap to the missing range list
# and move to the next value
missing.append((a[1]+1, b[0]-1))
return b
from functools import reduce
reduce(f_reduce, xs_sorted)
# post-processing on a missing range on the upper bound
if bounds[1] > xs_sorted[-1][1]:
missing.append((xs_sorted[-1][1]+1, bounds[1]))
print(missing)
# [(4, 6)]
The approach is to use a functional style reduce with a stinky side-effect. When the function f_reduce encounters two intervals (a, b) and (c, d), we return a compound interval (a, d) if b + 1 == c. Otherwise, a gap is detected and stored; the returned interval is (c, d). The pre and post processing steps are dealing with nuisance cases when gaps occur on the two extreme ranges of the interval.
Try using the following way, it works in O(n) where n is the range width.
// entire range is initially 0.
int arr[range_max - range_min + 2] = {0};
//for each sub_range increment the values by 1.
for(int i = 0; i<n; i++){
arr[sub_range_min[i] - range_min] += 1;
arr[sub_range_min[i] - range_max + 1] -= 1;
}
for(int i = 1; i< range_max - range_min + 2; i++){
arr[i] += arr[i-1];
}
// all the uncovered area in the range by the sub_ranges will be marked 0 in the array.
Looks like you can pass through array and find index i where
xi != y(i-1)
Pair
(y(i-1), xi)
is the answer
Assuming only one missing interval:
We may do it in O(k) where k is the number of subranges.
Make a chain (like Chasles) of the connected subranges (since they trivially do not overlap).
At the end, three possible cases:
only one chain: the missing subrange is at the beginning
or at the end
two chains: it is in between
At the current subinterval, check if it is a prolongement of a chain.
if not create another chain.
if yes, increase the chain, like ssssnake. Then maybe it connects that chain with another one. Then reduce the two chains and the sub interval as a single big chain
A chain may simply be representated with its left and right
And to find the chain to increase, may simply use a hashmap on left and another one on right
function getMissingSub (subs, minRange, maxRange) {
const chainsByLeft = new Map () // left -> [left, whatsoever]
const chainsByRight = new Map () // right -> [whatsoever, right]
// before: [[a, [a, whatsoever]]]
// after: [[newVal, [newval, whatsoever]]]
function prolongeLeft (x, newVal) {
const chain = chainsByLeft.get(x)
const old = chain[0]
chain[0] = newVal
chainsByLeft.set(newVal, chain)
chainsByLeft.delete(old)
return chain
}
function prolongeRight (x, newVal) {
const chain = chainsByRight.get(x)
const old = chain[1]
chain[1] = newVal
chainsByRight.set(newVal, chain)
chainsByRight.delete(old)
return chain
}
subs.forEach(([a,b]) => {
if (chainsByLeft.has(b) || chainsByRight.has(a)) {
if (chainsByLeft.has(b)) {
// prolonge on the left
const chain = prolongeLeft(b, a)
if (chainsByRight.has(a) ) {
prolongeRight(a, chain[1])
}
} else {
const chain = prolongeRight(a, b)
if (chainsByLeft.has(b) ) {
prolongeLeft(b, chain[0])
}
}
} else {
// new chain
const chain = [a, b]
chainsByLeft.set(a, chain)
chainsByRight.set(b, chain)
}
})
let missingRange
if (chainsByLeft.size === 1) {
const [, [left, right]] = chainsByLeft.entries().next().value
if (left === minRange) {
missingRange = [right, maxRange]
} else {
missingRange = [minRange, left]
}
} else {
const [[, [l1, r1]], [, [l2, r2]]] = chainsByLeft.entries()
if (r1 < r2) {
missingRange = [r1, l2]
} else {
missingRange = [r2, l1]
}
}
return { missingRange, chainsByLeft }
}
const dump = ({ missingRange: [a,b] }) => console.log(`missing [${a}, ${b}]`)
dump(getMissingSub([[0, 1],[1, 2]], 0, 4))
dump(getMissingSub([[0, 1],[1, 2]], -1, 2))
dump(getMissingSub([[0, 1],[2, 3]], 0, 3))
dump(getMissingSub([[-10, -5] , [-4, -3], [-2, 3], [7, 10]], -10, 10))
If you have several missing ranges, obviously you can have more than two chains, then you may need a sort to order the chains and directly find the gap between consecutive chains
//COPY PASTED FROM BEFORE
function getMissingSub (subs, minRange, maxRange) {
const chainsByLeft = new Map () // left -> [left, whatsoever]
const chainsByRight = new Map () // right -> [whatsoever, right]
// before: [[a, [a, whatsoever]]]
// after: [[newVal, [newval, whatsoever]]]
function prolongeLeft (x, newVal) {
const chain = chainsByLeft.get(x)
const old = chain[0]
chain[0] = newVal
chainsByLeft.set(newVal, chain)
chainsByLeft.delete(old)
return chain
}
function prolongeRight (x, newVal) {
const chain = chainsByRight.get(x)
const old = chain[1]
chain[1] = newVal
chainsByRight.set(newVal, chain)
chainsByRight.delete(old)
return chain
}
subs.forEach(([a,b]) => {
if (chainsByLeft.has(b) || chainsByRight.has(a)) {
if (chainsByLeft.has(b)) {
// prolonge on the left
const chain = prolongeLeft(b, a)
if (chainsByRight.has(a) ) {
prolongeRight(a, chain[1])
}
} else {
const chain = prolongeRight(a, b)
if (chainsByLeft.has(b) ) {
prolongeLeft(b, chain[0])
}
}
} else {
// new chain
const chain = [a, b]
chainsByLeft.set(a, chain)
chainsByRight.set(b, chain)
}
})
let missingRange
if (chainsByLeft.size === 1) {
const [, [left, right]] = chainsByLeft.entries().next().value
if (left === minRange) {
missingRange = [right, maxRange]
} else {
missingRange = [minRange, left]
}
} else {
const [[, [l1, r1]], [, [l2, r2]]] = chainsByLeft.entries()
if (r1 < r2) {
missingRange = [r1, l2]
} else {
missingRange = [r2, l1]
}
}
return { missingRange, chainsByLeft }
}
//ENDCOYP PASTED
function getMissingSubs(subs, minRange, maxRange) {
const { missingRange, chainsByLeft } = getMissingSub.apply(null, arguments)
const missingRanges = []
;[[minRange, minRange], ...chainsByLeft.values(), [maxRange, maxRange]]
.sort((a,b) => a[0]-b[0]).reduce((chain, next) => {
if (chain[1] !== next[0]) {
missingRanges.push([chain[1], next[0]])
}
return next
})
return { missingRanges }
}
const dump2 = ({ missingRanges }) => console.log(`missing2 ${JSON.stringify(missingRanges)}`)
dump2(getMissingSubs([[0, 1],[1, 2]], 0, 4))
dump2(getMissingSubs([[0, 1],[1, 2]], -1, 2))
dump2(getMissingSubs([[0, 1],[2, 3]], 0, 3))
dump2(getMissingSubs([[-10, -5] , [-4, -3], [-2, 3], [7, 10]], -10, 10))

Fixed Scala code using Partition Numbers with Stream calculate, BUT too slowly

I want to talk about how to which proceed.
1. Incorrect usage of Scala. I should try to more improve the code.
2. The efficiency of the algorithm is poor. I should think of an efficient algorithm.
Goal:
Can quickly calculate the max number from more than 1,000 Partition Numbers collections.
Partition Number:
e.g.,
5 -> (5), (1, 4), (2, 3), (1, 1, 3), (1, 2, 2), (1, 1, 1, 2), (1, 1, 1, 1, 1)
I ask that "I want to convert from Python to Scala that Partition Function using Vector", and I was taught to use Stream yesterday.
I fixed code, I can use 10, 50, and so on. But using big numbers(e. g., 100, 1,000 or 10,000) weren't calculate max number.
It calculate from Stream.last to Stream.head.
In my understanding that Stream type can add an element at the head only, so the order of the numbers is reversed form the fist code.
code
import scala.math.floor
class PartitionNumbers(startNum: Int, point: Int) {
var maxNum = 0
var tmpNum = 0
private def appendOnes(n: Int, s: Stream[Int] = Stream.empty[Int]): Stream[Int] = {
if (n == 0) s
else appendOnes(n - 1, 1 #:: s)
}
private def partition(n: Int, k: Int, tmpStream: Stream[Int] = Stream.empty): Int = {
if (n == 0) tmpNum = calculate(tmpStream)
else if (n == 1 | k == 1) tmpNum = calculate(appendOnes(n))
else {
if (n >= k) partition(n - k, k, k #:: tmpStream)
partition(n, k - 1, tmpStream)
}
if (maxNum < tmpNum) maxNum = tmpNum
maxNum
}
def searchMax(n: Int = point): Int = {
partition(n, n)
}
def calculate(usePointsStream: Stream[Int], num: Int = startNum): Int = {
if (usePointsStream.isEmpty) {
num
} else {
calculate(usePointsStream.init, floor(num * (100 + usePointsStream.last) / 100).toInt)
}
}
}
output example
val pn_1 = new PartitionNumbers(100, 10)
println(pn_1.searchMax()) // -> 110
val pn_2 = new PartitionNumbers(1000, 50)
println(pn_2.searchMax()) // -> 1630
val pn_3 = new PartitionNumbers(10000, 100)
println(pn_3.searchMax()) // Can't calculate within 3 minutes using Ryzen 7 2700X.

How many PR numbers exist in a given range?

It is not a homework problem. I am just curious about this problem. And my approach is simple brute-force :-)
My brute-force C++ code:
int main()
{
ll l,r;
cin>>l>>r;
ll f=0;
ll i=l;
while(i<=r)
{
ll j=0;
string s;
ll c=0;
s=to_string(i);
// cout<<s<<" ";
ll x=s.length();
if(x==1)
{
c=0;
}
else
{
j=0;
//whil
while(j<=x-2)
{
string b,g;
b="1";
g="1";
b=s[j];
g=s[j+1];
ll k1,k2;
k1=stoi(b);
k2=stoi(g);
if(__gcd(k1,k2)==1)
{
c=1;
break;
}
j++;
}
}
ll d=0;
j=0;
while(j<=x-1)
{
if( s[j]=='2' || s[j]=='3' || s[j]=='5' || s[j]=='7')
{
string b;
b="1";
b=s[j];
ll k1=stoi(b);
if(i%k1==0)
{
//d=0;
}
else
{
d=1;
break;
}
}
j++;
}
if(c==1 || d==1)
{
// cout<<"NO";
}
else
{
f++;
// cout<<"PR";
}
// cout<<"\n";
i++;
}
cout<<f;
return 0;
}
You are given 2 integers 'L' and 'R' . You are required to find the count of all the PR numbers in the range 'L' to 'R' inclusively. PR number are the numbers which satisfy following properties:
No pair of adjacent digits are co-prime i.e. adjacent digits in a PR number will not be co-prime to each other.
PR number is divisible by all the single digit prime numbers which occur as a digit in the PR number.
Note: Two numbers 'a' and 'b' are co-prime, if gcd(a,b)=1.
Also, gcd(0,a)=a;
Example:
Input: [2,5].
Output: '4'.
(Note: '1' is not a prime-number, though its very common)
(All the integers: '2','3','4','5') satisfy the condition of PR numbers :-)
Constraints on 'L','R': 1 <= L, R <= 10^18
What can be the the most efficient algorithm to solve this ?
Note: This will solve only part 1 which is No pair of adjacent digits are co-prime i.e. adjacent digits in a PR number will not be co-prime to each other.
Here is a constructive approach in python: instead of going throught all numbers in range and filtering by conditions, we will just construct all numbers that satisfy the condition. Note that if we have a valid sequence of digits, for it to continue being valid only the rightmost digit matters in order to decide what the next digit will be.
def ways(max_number, prev_digit, current_number):
if current_number > max_number:
return 0
count = 1
if prev_digit == 0:
if current_number != 0:
count += ways(max_number, 0, current_number * 10)
for i in range(2, 10):
count += ways(max_number, i, current_number * 10 + i)
if prev_digit == 2 or prev_digit == 4 or prev_digit == 8:
for i in [0, 2, 4, 6, 8]:
count += ways(max_number, i, current_number * 10 + i)
if prev_digit == 3 or prev_digit == 9:
for i in [0, 3, 6, 9]:
count += ways(max_number, i, current_number * 10 + i)
if prev_digit == 5 or prev_digit == 7:
count += ways(max_number, 0, current_number * 10)
count += ways(max_number, prev_digit, current_number * 10 + prev_digit)
if prev_digit == 6:
for i in [0, 2, 3, 4, 6, 8, 9]:
count += ways(max_number, i, current_number * 10 + i)
return count
As we are generating all valid numbers up to max_number without any repeats, the complexity of this function is O(amount of numbers between 0 and max_number that satisfy condition 1). To calculate the range a to b, we just need to do ways(b) - ways(a - 1).
Takes less than 1 second to caculate these numbers from 0 to 1 million, as there are only 42935 numbers that satisfy the result. As there are few numbers that satisfy the condition, we can then check if they are multiple of its prime digits to satisfy also condition 2. I leave this part up to the reader as there are multiple ways to do it.
TL;DR: This is more commonly called "digit dynamic programming with bitmask"
In more competitive-programming-familiar terms, you'd compute dp[n_digit][mod_2357][is_less_than_r][digit_appeared][last_digit] = number of numbers with n_digit digits (including leading zeroes), less than the number formed by first n_digit digits of R and with the other properties match. Do it twice with R and L-1 then take the difference. The number of operations required would be about 19 (number of digits) * 210 (mod) * 2 * 24 (it's only necessary to check for appearance of single-digit primes) * 10 * 10, which is obviously manageable by today computers.
Think about how you'd check whether a number is valid.
Not the normal way. Using a finite state automaton that take the input from left to right, digit by digit.
For simplicity, assume the input has a fixed number of digits (so that comparison with L/R is easier. This is possible because the number has at most as many digits as R).
It's necessary for each state to keep track of:
which digit appeared in the number (use a bit mask, there are 4 1-digit primes)
is the number in range [L..R] (either this is guaranteed to be true/false by the prefix, otherwise the prefix matches with that of L/R)
what is the value of the prefix mod each single digit prime
the most recent digit (to check whether all pairs of consecutive digits are coprime)
After the finite state automaton is constructed, the rest is simple. Just use dynamic programming to count the number of path to any accepted state from the starting state.
Remark: This method can be used to count the number of any type of object that can be verified using a finite state automaton (roughly speaking, you can check whether the property is satisfied using a program with constant memory usage, and takes the object piece-by-piece in some order)
We need a table where we can look up the count of suffixes that would match a prefix to construct valid numbers. Given a prefix's
right digit
prime combination
mod combination
and a suffix length, we'd like the count of suffixes that have searchable:
left digit
length
prime combination
mod combination
I started coding in Python, then switched to JavaScript to be able to offer a snippet. Comments in the code describe each lookup table. There are a few of them to allow for faster enumeration. There are samples of prefix-suffix calculations to illustrate how one can build an arbitrary upper-bound using the table, although at least some, maybe all of the prefix construction and aggregation could be made during the tabulation.
function gcd(a,b){
if (!b)
return a
else
return gcd(b, a % b)
}
// (Started writing in Python,
// then switched to JavaScript...
// 'xrange(4)' -> [0, 1, 2, 3]
// 'xrange(2, 4)' -> [2, 3]
function xrange(){
let l = 0
let r = arguments[1] || arguments[0]
if (arguments.length > 1)
l = arguments[0]
return new Array(r - l).fill(0).map((_, i) => i + l)
}
// A lookup table and its reverse,
// mapping each of the 210 mod combinations,
// [n % 2, n % 3, n % 5, n % 7], to a key
// from 0 to 209.
// 'mod_combs[0]' -> [0, 0, 0, 0]
// 'mod_combs[209]' -> [1, 2, 4, 6]
// 'mod_keys[[0,0,0,0]]' -> 0
// 'mod_keys[[1,2,4,6]]' -> 209
let mod_combs = {}
let mod_keys = {}
let mod_key_count = 0
for (let m2 of xrange(2)){
for (let m3 of xrange(3)){
for (let m5 of xrange(5)){
for (let m7 of xrange(7)){
mod_keys[[m2, m3, m5, m7]] = mod_key_count
mod_combs[mod_key_count] = [m2, m3, m5, m7]
mod_key_count += 1
}
}
}
}
// The main lookup table built using the
// dynamic program
// [mod_key 210][l_digit 10][suffix length 20][prime_comb 16]
let table = new Array(210)
for (let mk of xrange(210)){
table[mk] = new Array(10)
for (let l_digit of xrange(10)){
table[mk][l_digit] = new Array(20)
for (let sl of xrange(20)){
table[mk][l_digit][sl] = new Array(16).fill(0)
}
}
}
// We build prime combinations from 0 (no primes) to
// 15 (all four primes), using a bitmask of up to four bits.
let prime_set = [0, 0, 1<<0, 1<<1, 0, 1<<2, 0, 1<<3, 0, 0]
// The possible digits that could
// follow a digit
function get_valid_digits(digit){
if (digit == 0)
return [0, 2, 3, 4, 5, 6, 7, 8, 9]
else if ([2, 4, 8].includes(digit))
return [0, 2, 4, 6, 8]
else if ([3, 9].includes(digit))
return [0, 3, 6, 9]
else if (digit == 6)
return [0, 2, 3, 4, 6, 8, 9]
else if (digit == 5)
return [0, 5]
else if (digit == 7)
return [0, 7]
}
// Build the table bottom-up
// Single digits
for (let i of xrange(10)){
let mod_key = mod_keys[[i % 2, i % 3, i % 5, i % 7]]
let length = 1
let l_digit = i
let prime_comb = prime_set[i]
table[mod_key][l_digit][length][prime_comb] = 1
}
// Everything else
// For demonstration, we just table up to 6 digits
// since either JavaScript, this program, or both seem
// to be too slow for a full demo.
for (let length of xrange(2, 6)){
// We're appending a new left digit
for (let new_l_digit of xrange(0, 10)){
// The digit 1 is never valid
if (new_l_digit == 1)
continue
// The possible digits that could
// be to the right of our new left digit
let ds = get_valid_digits(new_l_digit)
// For each possible digit to the right
// of our new left digit, iterate over all
// the combinations of primes and remainder combinations.
// The ones that are populated are valid paths, the
// sum of which can be aggregated for each resulting
// new combination of primes and remainders.
for (let l_digit of ds){
for (let p_comb of xrange(16)){
for (let m_key of xrange(210)){
new_prime_comb = prime_set[new_l_digit] | p_comb
// suffix's remainder combination
let [m2, m3, m5, m7] = mod_combs[m_key]
// new remainder combination
let m = Math.pow(10, length - 1) * new_l_digit
let new_mod_key = mod_keys[[(m + m2) % 2, (m + m3) % 3, (m + m5) % 5, (m + m7) % 7]]
// Aggregate any populated entries into the new
// table entry
table[new_mod_key][new_l_digit][length][new_prime_comb] += table[m_key][l_digit][length - 1][p_comb]
}
}
}
}
}
// If we need only a subset of the mods set to
// zero, we need to check all instances where
// this subset is zero. For example,
// for the prime combination, [2, 3], we need to
// check all mod combinations where the first two
// are zero since we don't care about the remainders
// for 5 and 7: [0,0,0,0], [0,0,0,1],... [0,0,4,6]
// Return all needed combinations given some
// predetermined, indexed remainders.
function prime_comb_to_mod_keys(remainders){
let mod_map = [2, 3, 5, 7]
let mods = []
for (let i of xrange(4))
mods.push(!remainders.hasOwnProperty(i) ? mod_map[i] - 1 : 0)
function f(ms, i){
if (i == ms.length){
for (let idx in remainders)
ms[idx] = remainders[idx]
return [mod_keys[ms]]
}
let result = []
for (let m=ms[i] - 1; m>=0; m--){
let _ms = ms.slice()
_ms[i] = m
result = result.concat(f(_ms, i + 1))
}
return result.concat(f(ms, i + 1))
}
return f(mods, 0)
}
function get_matching_mods(prefix, len_suffix, prime_comb){
let ps = [2, 3, 5, 7]
let actual_prefix = Math.pow(10, len_suffix) * prefix
let remainders = {}
for (let i in xrange(4)){
if (prime_comb & (1 << i))
remainders[i] = (ps[i] - (actual_prefix % ps[i])) % ps[i]
}
return prime_comb_to_mod_keys(remainders)
}
// A brute-force function to check the
// table is working. Returns a list of
// valid numbers of 'length' digits
// given a prefix.
function confirm(prefix, length){
let result = [0, []]
let ps = [0, 0, 2, 3, 0, 5, 0, 7, 0, 0]
let p_len = String(prefix).length
function check(suffix){
let num = Math.pow(10, length - p_len) * prefix + suffix
let temp = num
prev = 0
while (temp){
let d = temp % 10
if (d == 1 || gcd(prev, d) == 1 || (ps[d] && num % d))
return [0, []]
prev = d
temp = ~~(temp / 10)
}
return [1, [num]]
}
for (suffix of xrange(Math.pow(10, length - p_len))){
let [a, b] = check(suffix)
result[0] += a
result[1] = result[1].concat(b)
}
return result
}
function get_prime_comb(prefix){
let prime_comb = 0
while (prefix){
let d = prefix % 10
prime_comb |= prime_set[d]
prefix = ~~(prefix / 10)
}
return prime_comb
}
// A function to test the table
// against the brute-force method.
// To match a prefix with the number
// of valid suffixes of a chosen length
// in the table, we want to aggregate all
// prime combinations for all valid digits,
// where the remainders for each combined
// prime combination (prefix with suffix)
// sum to zero (with the appropriate mod).
function test(prefix, length, show=false){
let r_digit = prefix % 10
let len_suffix = length - String(prefix).length
let prefix_prime_comb = get_prime_comb(prefix)
let ds = get_valid_digits(r_digit)
let count = 0
for (let l_digit of ds){
for (let prime_comb of xrange(16)){
for (let i of get_matching_mods(prefix, len_suffix, prefix_prime_comb | prime_comb)){
let v = table[i][l_digit][len_suffix][prime_comb]
count += v
}
}
}
let c = confirm(prefix, length)
return `${ count }, ${ c[0] }${ show ? ': ' + c[1] : '' }`
}
// Arbitrary prefixes
for (let length of [3, 4]){
for (let prefix of [2, 30]){
console.log(`prefix, length: ${ prefix }, ${ length }`)
console.log(`tabled, brute-force: ${ test(prefix, length, true) }\n\n`)
}
}
let length = 6
for (let l_digit=2; l_digit<10; l_digit++){
console.log(`prefix, length: ${ l_digit }, ${ length }`)
console.log(`tabled, brute-force: ${ test(l_digit, length) }\n\n`)
}

Resources