I saw the boolean expressions for the N Queens problem from here.
My modified N queens rules are simpler:
For a p*p chessboard I want to place N queens in such a way so that
Queens will be placed adjacently, rows will be filled first.
p*p chessboard size will be adjusted until it can hold N queens
For example, say N = 17, then we need a 5*5 chessboard and the placement will be:
Q_Q_Q_Q_Q
Q_Q_Q_Q_Q
Q_Q_Q_Q_Q
Q_Q_*_*_*
*_*_*_*_*
The question is I am trying to come up with a boolean expression for this problem.
This problem can be solved using the Python packages humanize and omega.
"""Solve variable size square fitting."""
import humanize
from omega.symbolic.fol import Context
def pick_chessboard(q):
ctx = Context()
# compute size of chessboard
#
# picking a domain for `p`
# requires partially solving the
# problem of computing `p`
ctx.declare(p=(0, q))
s = f'''
(p * p >= {q}) # chessboard fits the queens, and
/\ ((p - 1) * (p - 1) < {q}) # is the smallest such board
'''
u = ctx.add_expr(s)
d, = list(ctx.pick_iter(u)) # assert unique solution
p = d['p']
print(f'chessboard size: {p}')
# compute number of full rows
ctx.declare(x=(0, p))
s = f'x = {q} / {p}' # integer division
u = ctx.add_expr(s)
d, = list(ctx.pick_iter(u))
r = d['x']
print(f'{r} rows are full')
# compute number of queens on the last row
s = f'x = {q} % {p}' # modulo
u = ctx.add_expr(s)
d, = list(ctx.pick_iter(u))
n = d['x']
k = r + 1
kword = humanize.ordinal(k)
print(f'{n} queens on the {kword} row')
if __name__ == '__main__':
q = 10 # number of queens
pick_chessboard(q)
Representing multiplication (and integer division and modulo) with binary decision diagrams has complexity exponential in the number of variables, as proved in: https://doi.org/10.1109/12.73590
Related
Let N be a (linear) single-layer perceptron with weight matrix w of dimension nxn.
I want to train N under the Boolean constraint that the condition number k(w) of the weights w remain below a given threshold k_0 at each step of the optimisation.
Is there a standard way to implement this constraint (in pytorch, say)?
After each optimizer step, go through the list of parameters and recondition all matrices:
(code looked at for a few seconds, but not tested)
def recondition_(x, max_cond): # would need to be fixed for non-square x
u, s, vh = torch.linalg.svd(x)
curr_cond = s[0] / s[-1]
if curr_cond > max_cond:
ratio = curr_cond / max_cond
mult = torch.linspace(0, math.log(ratio), len(s)).exp()
s = mult * s
x[:] = torch.mm(u, torch.mm(torch.diag(s), vh))
Training loop:
...
optimizer.step()
with torch.no_grad():
for p in model.parameters():
if p.dim() == 2:
recondition_(p, max_cond)
...
I have a very large absorbing Markov chain. I want to obtain the fundamental matrix of this chain to calculate the expected number of steps before absortion. From this question I know that this can be calculated by the equation
(I - Q)t=1
which can be obtained by using the following python code:
def expected_steps_fast(Q):
I = numpy.identity(Q.shape[0])
o = numpy.ones(Q.shape[0])
numpy.linalg.solve(I-Q, o)
However, I would like to calculate it using some kind of iterative method similar to the power iteration method used for calculate the PageRank. This method would allow me to calculate an approximation to the expected number of steps before absortion in a mapreduce-like system.
¿Does something similar exist?
If you have a sparse matrix, check if scipy.spare.linalg.spsolve works. No guarantees about numerical robustness, but at least for trivial examples it's significantly faster than solving with dense matrices.
import networkx as nx
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg as spla
def example(n):
"""Generate a very simple transition matrix from a directed graph
"""
g = nx.DiGraph()
for i in xrange(n-1):
g.add_edge(i+1, i)
g.add_edge(i, i+1)
g.add_edge(n-1, n)
g.add_edge(n, n)
m = nx.to_numpy_matrix(g)
# normalize rows to ensure m is a valid right stochastic matrix
m = m / np.sum(m, axis=1)
return m
A = sp.csr_matrix(example(2000)[:-1,:-1])
Ad = np.array(A.todense())
def sp_solve(Q):
I = sp.identity(Q.shape[0], format='csr')
o = np.ones(Q.shape[0])
return spla.spsolve(I-Q, o)
def dense_solve(Q):
I = numpy.identity(Q.shape[0])
o = numpy.ones(Q.shape[0])
return numpy.linalg.solve(I-Q, o)
Timings for sparse solution:
%timeit sparse_solve(A)
1000 loops, best of 3: 1.08 ms per loop
Timings for dense solution:
%timeit dense_solve(Ad)
1 loops, best of 3: 216 ms per loop
Like Tobias mentions in the comments, I would have expected other solvers to outperform the generic one, and they may for very large systems. For this toy example, the generic solve seems to work well enough.
I arraived to this answer thanks to #tobias-ribizel's suggestion of using the Neumann series. If we part from the following equation:
Using the Neumann series:
If we multiply each term of the series by the vector 1 we could operate separately over each row of the matrix Q and approximate successively with:
This is the python code I use to calculate this:
def expected_steps_iterative(Q, n=10):
N = Q.shape[0]
acc = np.ones(N)
r_k_1 = np.ones(N)
for k in range(1, n):
r_k = np.zeros(N)
for i in range(N):
for j in range(N):
r_k[i] += r_k_1[j] * Q[i, j]
if np.allclose(acc, acc+r_k, rtol=1e-8):
acc += r_k
break
acc += r_k
r_k_1 = r_k
return acc
And this is the code using Spark. This code expects that Q is a RDD where each row is a tuple (row_id, dict of weights for that row of the matrix).
def expected_steps_spark(sc, Q, n=10):
def dict2np(d, sz):
vec = np.zeros(sz)
for k, v in d.iteritems():
vec[k] = v
return vec
sz = Q.count()
acc = np.ones(sz)
x = {i:1.0 for i in range(sz)}
for k in range(1, n):
bc_x = sc.broadcast(x)
x_old = x
x = Q.map(lambda (u, ol): (u, reduce(lambda s, j: s + bc_x.value[j]*ol[j], ol, 0.0)))
x = x.collectAsMap()
v_old = dict2np(x_old, sz)
v = dict2np(x, sz)
acc += v
if np.allclose(v, v_old, rtol=1e-8):
break
return acc
I have number A (build from digits 0,1,2,3). I want to find the smallest x and y, that if I do x^y i got the biggest number smaller than A
x^y <= A x^y is maximal
Plus x and y must not be decimal numbers, only "integers"
For example:
A = 7 => x^y = 2^2
A = 28 => x^y = 3^3
A = 33 => x^y = 2^5
etc
Edit:
As izomorphius suggested in comment, it will have always solution for x = A and y = 1. But that is not desirable result. I want x and y to be as much close numbers, as it can be.
A naive solution could be:
The "closest yet not higher" number to A by doing a^y for some constant a is:
afloor(log_a(A)) [where log_a(A) is the logarithm with base a of A, which can be calculated as log(A)/log(a) in most programming languages]
By iterating all as in range [2,A) you can find this number.
This solution is O(A * f(A)) where f(A) is your pow/log complexity
P.S. If you want your exponent (y) be larger then 1, you can simply iterate in range [2,sqrt(A)] - it will reduce the time complexity to O(sqrt(A) * f(A)) - and will get you only numbers with an exponent larger then 1.
It is not clear what you are asking, but I will try to guess.
We first solve the equation z^z = a for a real number z. Let u and v be z rounded down and up, respectively. Among the three candidates (u,u), (v,u), (u,v) we choose the largest one that does not exceed a.
Example: Consder the case a = 2000. We solve z^z = 2000 by numerical methods (see below) to get an approximate solution z = 4.8278228255818725. We round down an up to obtain u = 4 and v = 5. We now have three candidates, 4^4 = 256, 4^5 = 1023 and 5^4 = 625. They are all smaller than 2000, so we take the one that gives the largest answer, which is x = 4, y = 5.
Here is Python code. The function solve_approx does what you want. It works well for a >= 3. I am sure you can cope with the cases a = 1 and a = 2 by yourself.
import math
def solve(a):
""""Solve the equation x^x = a using Newton's method"""
x = math.log(a) / math.log(math.log(a)) # Initial estimate
while abs (x ** x - a) > 0.1:
x = x - (x ** x - a) / (x ** x * (1 + math.log(x)))
return x
def solve_approx(a):
""""Find two integer numbers x and y such that x^y is smaller than
a but as close to it as possible, and try to make x and y as equal
as possible."""
# First we solve exactly to find z such that z^z = a
z = solve(a)
# We round z up and down
u = math.floor(z)
v = math.ceil(z)
# We now have three possible candidates to choose from:
# u ** zdwon, v ** u, u ** v
candidates = [(u, u), (v, u), (u, v)]
# We filter out those that are too big:
candidates = [(x,y) for (x,y) in candidates if x ** y <= a]
# And we select the one that gives the largest result
candidates.sort(key=(lambda key: key[0] ** key[1]))
return candidates[-1]
Here is a little demo:
>>> solve_approx(5)
solve_approx(5)
(2, 2)
>>> solve_approx(100)
solve_approx(100)
(3, 4)
>>> solve_approx(200)
solve_approx(200)
(3, 4)
>>> solve_approx(1000)
solve_approx(1000)
(5, 4)
>>> solve_approx(1000000)
solve_approx(1000000)
(7, 7)
I'm working on a project for fun and I need an algorithm to do as follows:
Generate a list of numbers of Length n which add up to x
I would settle for list of integers, but ideally, I would like to be left with a set of floating point numbers.
I would be very surprised if this problem wasn't heavily studied, but I'm not sure what to look for.
I've tackled similar problems in the past, but this one is decidedly different in nature. Before I've generated different combinations of a list of numbers that will add up to x. I'm sure that I could simply bruteforce this problem but that hardly seems like the ideal solution.
Anyone have any idea what this may be called, or how to approach it? Thanks all!
Edit: To clarify, I mean that the list should be length N while the numbers themselves can be of any size.
edit2: Sorry for my improper use of 'set', I was using it as a catch all term for a list or an array. I understand that it was causing confusion, my apologies.
This is how to do it in Python
import random
def random_values_with_prescribed_sum(n, total):
x = [random.random() for i in range(n)]
k = total / sum(x)
return [v * k for v in x]
Basically you pick n random numbers, compute their sum and compute a scale factor so that the sum will be what you want it to be.
Note that this approach will not produce "uniform" slices, i.e. the distribution you will get will tend to be more "egalitarian" than it should be if it was picked at random among all distribution with the given sum.
To see the reason you can just picture what the algorithm does in the case of two numbers with a prescribed sum (e.g. 1):
The point P is a generic point obtained by picking two random numbers and it will be uniform inside the square [0,1]x[0,1]. The point Q is the point obtained by scaling P so that the sum is required to be 1. As it's clear from the picture the points close to the center of the have an higher probability; for example the exact center of the squares will be found by projecting any point on the diagonal (0,0)-(1,1), while the point (0, 1) will be found projecting only points from (0,0)-(0,1)... the diagonal length is sqrt(2)=1.4142... while the square side is only 1.0.
Actually, you need to generate a partition of x into n parts. This is usually done the in following way: The partition of x into n non-negative parts can be represented in the following way: reserve n + x free places, put n borders to some arbitrary places, and stones to the rest. The stone groups add up to x, thus the number of possible partitions is the binomial coefficient (n + x \atop n).
So your algorithm could be as follows: choose an arbitrary n-subset of (n + x)-set, it determines uniquely a partition of x into n parts.
In Knuth's TAOCP the chapter 3.4.2 discusses random sampling. See Algortihm S there.
Algorithm S: (choose n arbitrary records from total of N)
t = 0, m = 0;
u = random, uniformly distributed on (0, 1)
if (N - t)*u >= n - m, skip t-th record and increase t by 1; otherwise include t-th record in the sample, increase m and t by 1
if M < n, return to 2, otherwise, algorithm finished
The solution for non-integers is algorithmically trivial: you just select arbitrary n numbers that don't sum up to 0, and norm them by their sum.
If you want to sample uniformly in the region of N-1-dimensional space defined by x1 + x2 + ... + xN = x, then you're looking at a special case of sampling from a Dirichlet distribution. The sampling procedure is a little more involved than generating uniform deviates for the xi. Here's one way to do it, in Python:
xs = [random.gammavariate(1,1) for a in range(N)]
xs = [x*v/sum(xs) for v in xs]
If you don't care too much about the sampling properties of your results, you can just generate uniform deviates and correct their sum afterwards.
Here is a version of the above algorithm in Javascript
function getRandomArbitrary(min, max) {
return Math.random() * (max - min) + min;
};
function getRandomArray(min, max, n) {
var arr = [];
for (var i = 0, l = n; i < l; i++) {
arr.push(getRandomArbitrary(min, max))
};
return arr;
};
function randomValuesPrescribedSum(min, max, n, total) {
var arr = getRandomArray(min, max, n);
var sum = arr.reduce(function(pv, cv) { return pv + cv; }, 0);
var k = total/sum;
var delays = arr.map(function(x) { return k*x; })
return delays;
};
You can call it with
var myarray = randomValuesPrescribedSum(0,1,3,3);
And then check it with
var sum = myarray.reduce(function(pv, cv) { return pv + cv;},0);
This code does a reasonable job. I think it produces a different distribution than 6502's answer, but I am not sure which is better or more natural. Certainly his code is clearer/nicer.
import random
def parts(total_sum, num_parts):
points = [random.random() for i in range(num_parts-1)]
points.append(0)
points.append(1)
points.sort()
ret = []
for i in range(1, len(points)):
ret.append((points[i] - points[i-1]) * total_sum)
return ret
def test(total_sum, num_parts):
ans = parts(total_sum, num_parts)
assert abs(sum(ans) - total_sum) < 1e-7
print ans
test(5.5, 3)
test(10, 1)
test(10, 5)
In python:
a: create a list of (random #'s 0 to 1) times total; append 0 and total to the list
b: sort the list, measure the distance between each element
c: round the list elements
import random
import time
TOTAL = 15
PARTS = 4
PLACES = 3
def random_sum_split(parts, total, places):
a = [0, total] + [random.random()*total for i in range(parts-1)]
a.sort()
b = [(a[i] - a[i-1]) for i in range(1, (parts+1))]
if places == None:
return b
else:
b.pop()
c = [round(x, places) for x in b]
c.append(round(total-sum(c), places))
return c
def tick():
if info.tick == 1:
start = time.time()
alpha = random_sum_split(PARTS, TOTAL, PLACES)
end = time.time()
log('alpha: %s' % alpha)
log('total: %.7f' % sum(alpha))
log('parts: %s' % PARTS)
log('places: %s' % PLACES)
log('elapsed: %.7f' % (end-start))
yields:
[2014-06-13 01:00:00] alpha: [0.154, 3.617, 6.075, 5.154]
[2014-06-13 01:00:00] total: 15.0000000
[2014-06-13 01:00:00] parts: 4
[2014-06-13 01:00:00] places: 3
[2014-06-13 01:00:00] elapsed: 0.0005839
to the best of my knowledge this distribution is uniform
I am having trouble fully understanding the K-Means++ algorithm. I am interested exactly how the first k centroids are picked, namely the initialization as the rest is like in the original K-Means algorithm.
Is the probability function used based on distance or Gaussian?
In the same time the most long distant point (From the other centroids) is picked for a new centroid.
I will appreciate a step by step explanation and an example. The one in Wikipedia is not clear enough. Also a very well commented source code would also help. If you are using 6 arrays then please tell us which one is for what.
Interesting question. Thank you for bringing this paper to my attention - K-Means++: The Advantages of Careful Seeding
In simple terms, cluster centers are initially chosen at random from the set of input observation vectors, where the probability of choosing vector x is high if x is not near any previously chosen centers.
Here is a one-dimensional example. Our observations are [0, 1, 2, 3, 4]. Let the first center, c1, be 0. The probability that the next cluster center, c2, is x is proportional to ||c1-x||^2. So, P(c2 = 1) = 1a, P(c2 = 2) = 4a, P(c2 = 3) = 9a, P(c2 = 4) = 16a, where a = 1/(1+4+9+16).
Suppose c2=4. Then, P(c3 = 1) = 1a, P(c3 = 2) = 4a, P(c3 = 3) = 1a, where a = 1/(1+4+1).
I've coded the initialization procedure in Python; I don't know if this helps you.
def initialize(X, K):
C = [X[0]]
for k in range(1, K):
D2 = scipy.array([min([scipy.inner(c-x,c-x) for c in C]) for x in X])
probs = D2/D2.sum()
cumprobs = probs.cumsum()
r = scipy.rand()
for j,p in enumerate(cumprobs):
if r < p:
i = j
break
C.append(X[i])
return C
EDIT with clarification: The output of cumsum gives us boundaries to partition the interval [0,1]. These partitions have length equal to the probability of the corresponding point being chosen as a center. So then, since r is uniformly chosen between [0,1], it will fall into exactly one of these intervals (because of break). The for loop checks to see which partition r is in.
Example:
probs = [0.1, 0.2, 0.3, 0.4]
cumprobs = [0.1, 0.3, 0.6, 1.0]
if r < cumprobs[0]:
# this event has probability 0.1
i = 0
elif r < cumprobs[1]:
# this event has probability 0.2
i = 1
elif r < cumprobs[2]:
# this event has probability 0.3
i = 2
elif r < cumprobs[3]:
# this event has probability 0.4
i = 3
One Liner.
Say we need to select 2 cluster centers, instead of selecting them all randomly{like we do in simple k means}, we will select the first one randomly, then find the points that are farthest to the first center{These points most probably do not belong to the first cluster center as they are far from it} and assign the second cluster center nearby those far points.
I have prepared a full source implementation of k-means++ based on the book "Collective Intelligence" by Toby Segaran and the k-menas++ initialization provided here.
Indeed there are two distance functions here. For the initial centroids a standard one is used based numpy.inner and then for the centroids fixation the Pearson one is used. Maybe the Pearson one can be also be used for the initial centroids. They say it is better.
from __future__ import division
def readfile(filename):
lines=[line for line in file(filename)]
rownames=[]
data=[]
for line in lines:
p=line.strip().split(' ') #single space as separator
#print p
# First column in each row is the rowname
rownames.append(p[0])
# The data for this row is the remainder of the row
data.append([float(x) for x in p[1:]])
#print [float(x) for x in p[1:]]
return rownames,data
from math import sqrt
def pearson(v1,v2):
# Simple sums
sum1=sum(v1)
sum2=sum(v2)
# Sums of the squares
sum1Sq=sum([pow(v,2) for v in v1])
sum2Sq=sum([pow(v,2) for v in v2])
# Sum of the products
pSum=sum([v1[i]*v2[i] for i in range(len(v1))])
# Calculate r (Pearson score)
num=pSum-(sum1*sum2/len(v1))
den=sqrt((sum1Sq-pow(sum1,2)/len(v1))*(sum2Sq-pow(sum2,2)/len(v1)))
if den==0: return 0
return 1.0-num/den
import numpy
from numpy.random import *
def initialize(X, K):
C = [X[0]]
for _ in range(1, K):
#D2 = numpy.array([min([numpy.inner(c-x,c-x) for c in C]) for x in X])
D2 = numpy.array([min([numpy.inner(numpy.array(c)-numpy.array(x),numpy.array(c)-numpy.array(x)) for c in C]) for x in X])
probs = D2/D2.sum()
cumprobs = probs.cumsum()
#print "cumprobs=",cumprobs
r = rand()
#print "r=",r
i=-1
for j,p in enumerate(cumprobs):
if r 0:
for rowid in bestmatches[i]:
for m in range(len(rows[rowid])):
avgs[m]+=rows[rowid][m]
for j in range(len(avgs)):
avgs[j]/=len(bestmatches[i])
clusters[i]=avgs
return bestmatches
rows,data=readfile('/home/toncho/Desktop/data.txt')
kclust = kcluster(data,k=4)
print "Result:"
for c in kclust:
out = ""
for r in c:
out+=rows[r] +' '
print "["+out[:-1]+"]"
print 'done'
data.txt:
p1 1 5 6
p2 9 4 3
p3 2 3 1
p4 4 5 6
p5 7 8 9
p6 4 5 4
p7 2 5 6
p8 3 4 5
p9 6 7 8