I have written a poker simulator that calculates the probability to win in texas holdem by running a simulation of games (i.e. monte carlo simulation). Currently it runs around 10000 simulations in 10 seconds which in general is good enough. However, there are apps on the iPhone that are running around 100x faster. I'm wondering whether there's anything I can do to speed up the program significantly. I have already replaced strings with lists and found many ways to speed up the program by around 2-3 times. But what can I do to possibly speed it up 50x-100x? I checked with a profiler but couldn't find any significant bottleneck. I also complied it to cython (without making any changes) but that had no effect on speed either. Any suggestions are appreciated. The full listing is below:
__author__ = 'Nicolas Dickreuter'
import time
import numpy as np
from collections import Counter
class MonteCarlo(object):
def EvalBestHand(self, hands):
scores = [(i, self.score(hand)) for i, hand in enumerate(hands)]
winner = sorted(scores, key=lambda x: x[1], reverse=True)[0][0]
return hands[winner],scores[winner][1][-1]
def score(self, hand):
crdRanksOriginal = '23456789TJQKA'
originalSuits='CDHS'
rcounts = {crdRanksOriginal.find(r): ''.join(hand).count(r) for r, _ in hand}.items()
score, crdRanks = zip(*sorted((cnt, rank) for rank, cnt in rcounts)[::-1])
potentialThreeOfAKind = score[0] == 3
potentialTwoPair = score == (2, 2, 1, 1, 1)
potentialPair = score == (2, 1, 1, 1, 1, 1)
if score[0:2]==(3,2) or score[0:2]==(3,3): # fullhouse (three of a kind and pair, or two three of a kind)
crdRanks = (crdRanks[0],crdRanks[1])
score = (3,2)
elif score[0:4]==(2,2,2,1):
score=(2,2,1) # three pair are not worth more than two pair
sortedCrdRanks = sorted(crdRanks,reverse=True) # avoid for example 11,8,6,7
crdRanks=(sortedCrdRanks[0],sortedCrdRanks[1],sortedCrdRanks[2],sortedCrdRanks[3])
elif len(score) >= 5: # high card, flush, straight and straight flush
# straight
if 12 in crdRanks: # adjust if 5 high straight
crdRanks += (-1,)
sortedCrdRanks = sorted(crdRanks,reverse=True) # sort again as if pairs the first rank matches the pair
for i in range(len(sortedCrdRanks) - 4):
straight = sortedCrdRanks[i] - sortedCrdRanks[i + 4] == 4
if straight:
crdRanks=(sortedCrdRanks[i],sortedCrdRanks[i+1],sortedCrdRanks[i+2],sortedCrdRanks[i+3],sortedCrdRanks[i+4])
break
# flush
suits = [s for _, s in hand]
flush = max(suits.count(s) for s in suits) >= 5
if flush:
for flushSuit in originalSuits: # get the suit of the flush
if suits.count(flushSuit)>=5:
break
flushHand = [k for k in hand if flushSuit in k]
rcountsFlush = {crdRanksOriginal.find(r): ''.join(flushHand).count(r) for r, _ in flushHand}.items()
score, crdRanks = zip(*sorted((cnt, rank) for rank, cnt in rcountsFlush)[::-1])
crdRanks = tuple(sorted(crdRanks,reverse=True)) # ignore original sorting where pairs had influence
# check for straight in flush
if 12 in crdRanks: # adjust if 5 high straight
crdRanks += (-1,)
for i in range(len(crdRanks) - 4):
straight = crdRanks[i] - crdRanks[i + 4] == 4
# no pair, straight, flush, or straight flush
score = ([(5,), (2, 1, 2)], [(3, 1, 3), (5,)])[flush][straight]
if score == (1,) and potentialThreeOfAKind: score = (3, 1)
elif score == (1,) and potentialTwoPair: score = (2, 2, 1)
elif score == (1,) and potentialPair: score = (2, 1, 1)
if score[0]==5:
handType="StraightFlush"
#crdRanks=crdRanks[:5] # five card rule makes no difference {:5] would be incorrect
elif score[0]==4:
handType="FoufOfAKind"
crdRanks=crdRanks[:2]
elif score[0:2]==(3,2):
handType="FullHouse"
# crdRanks=crdRanks[:2] # already implmeneted above
elif score[0:3]==(3,1,3):
handType="Flush"
crdRanks=crdRanks[:5] # !! to be verified !!
elif score[0:3]==(3,1,2):
handType="Straight"
crdRanks=crdRanks[:5] # !! to be verified !!
elif score[0:2]==(3,1):
handType="ThreeOfAKind"
crdRanks=crdRanks[:3]
elif score[0:2]==(2,2):
handType="TwoPair"
crdRanks=crdRanks[:3]
elif score[0]==2:
handType="Pair"
crdRanks=crdRanks[:4]
elif score[0]==1:
handType="HighCard"
crdRanks=crdRanks[:5]
else: raise Exception('Card Type error!')
return score, crdRanks, handType
def createCardDeck(self):
values = "23456789TJQKA"
suites = "CDHS"
Deck=[]
[Deck.append(x+y) for x in values for y in suites]
return Deck
def distributeToPlayers(self, Deck, PlayerAmount, PlayerCardList, TableCardsList):
Players =[]
CardsOnTable = []
knownPlayers = 0
for PlayerCards in PlayerCardList:
FirstPlayer=[]
FirstPlayer.append(Deck.pop(Deck.index(PlayerCards[0])))
FirstPlayer.append(Deck.pop(Deck.index(PlayerCards[1])))
Players.append(FirstPlayer)
knownPlayers += 1
for c in TableCardsList:
CardsOnTable.append(Deck.pop(Deck.index(c))) # remove cards that are on the table from the deck
for n in range(0, PlayerAmount - knownPlayers):
plr=[]
plr.append(Deck.pop(np.random.random_integers(0,len(Deck)-1)))
plr.append(Deck.pop(np.random.random_integers(0,len(Deck)-1)))
Players.append(plr)
return Players, Deck
def distributeToTable(self, Deck, TableCardsList):
remaningRandoms = 5 - len(TableCardsList)
for n in range(0, remaningRandoms):
TableCardsList.append(Deck.pop(np.random.random_integers(0,len(Deck)-1)))
return TableCardsList
def RunMonteCarlo(self, originalPlayerCardList, originalTableCardsList, PlayerAmount, gui, maxRuns=6000,maxSecs=5):
winnerlist = []
EquityList = []
winnerCardTypeList=[]
wins = 0
runs=0
timeout_start=time.time()
for m in range(maxRuns):
runs+=1
try:
if gui.active==True:
gui.progress["value"] = int(round(m*100/maxRuns))
except:
pass
Deck = self.createCardDeck()
PlayerCardList = originalPlayerCardList[:]
TableCardsList = originalTableCardsList[:]
Players, Deck = self.distributeToPlayers(Deck, PlayerAmount, PlayerCardList, TableCardsList)
Deck5Cards = self.distributeToTable(Deck, TableCardsList)
PlayerFinalCardsWithTableCards = []
for o in range(0, PlayerAmount):
PlayerFinalCardsWithTableCards.append(Players[o]+Deck5Cards)
bestHand,winnerCardType=self.EvalBestHand(PlayerFinalCardsWithTableCards)
winner = (PlayerFinalCardsWithTableCards.index(bestHand))
#print (winnerCardType)
CollusionPlayers = 0
if winner < CollusionPlayers + 1:
wins += 1
winnerCardTypeList.append(winnerCardType)
# winnerlist.append(winner)
# self.equity=wins/m
# if self.equity>0.99: self.equity=0.99
# EquityList.append(self.equity)
if time.time()>timeout_start+maxSecs:
break
self.equity = wins / runs
self.winnerCardTypeList = Counter(winnerCardTypeList)
for key, value in self.winnerCardTypeList.items():
self.winnerCardTypeList[key] = value / runs
self.winTypesDict=self.winnerCardTypeList.items()
# show how the montecarlo converges
# xaxis=range(500,monteCarloRuns)
# plt.plot(xaxis,EquityList[499:monteCarloRuns])
# plt.show()
return self.equity,self.winTypesDict
if __name__ == '__main__':
Simulation = MonteCarlo()
mycards=[['AS', 'KS']]
cardsOnTable = []
players = 3
start_time = time.time()
Simulation.RunMonteCarlo(mycards, cardsOnTable, players, 1, maxRuns=200000, maxSecs=120)
print("--- %s seconds ---" % (time.time() - start_time))
equity = Simulation.equity # considering draws as wins
print (equity)
This algorithm is too long and complex for any sensible suggestion apart from generic ones. So here you go: vectorise everything you can and work with vectors of data instead of loops, lists, insertions, removals. Montecarlo simulations allow for this quite nicely because all samples are independent.
For example, you want 1 million retries, generate a numpy array of random 1 million values in one line. You want to test outcome, place 1 million outcomes into a numpy array and test all of them at once against a given condition generating a numpy array of booleans.
If you manage to vectorize this you will get tremendous performance increases.
Inefficient array operations are probably causing most of the slowness. In particular, I see a lot of lines like this:
Deck.pop(some_index)
Every time you do that, the list has to shift all the elements after that index. You can eliminate this (as well as repeated random functions) by using:
from random import shuffle
# Setup
originalDeck = createCardDeck()
# Individual run
Deck = originalDeck[:]
shuffle(Deck) # shuffles in place
# Draw a card from the end so nothing needs to shift
card = Deck.pop()
# Concise and efficient!
There are some other things you could do, like change long if-else chains into constant time lookups with a dict or custom class. Right now it checks for a straight flush first down to a high card last for every hand, which seems rather inefficient. I doubt that it would make a big difference though.
Related
I have written a code and try to use numba for accelerating the code. The main goal of the code is to group some values based on a condition. In this regard, iter_ is used for converging the code to satisfy the condition. I prepared a small case below to reproduce the sample code:
import numpy as np
import numba as nb
rng = np.random.default_rng(85)
# --------------------------------------- small data volume ---------------------------------------
# values_ = {'R0': np.array([0.01090976, 0.01069902, 0.00724112, 0.0068463 , 0.01135723, 0.00990762,
# 0.01090976, 0.01069902, 0.00724112, 0.0068463 , 0.01135723]),
# 'R1': np.array([0.01836379, 0.01900166, 0.01864162, 0.0182823 , 0.01840322, 0.01653088,
# 0.01900166, 0.01864162, 0.0182823 , 0.01840322, 0.01653088]),
# 'R2': np.array([0.02430913, 0.02239156, 0.02225379, 0.02093393, 0.02408692, 0.02110411,
# 0.02239156, 0.02225379, 0.02093393, 0.02408692, 0.02110411])}
#
# params = {'R0': [3, 0.9490579204466154, 1825, 7.070272000000002e-05],
# 'R1': [0, 0.9729203826820172, 167 , 7.070272000000002e-05],
# 'R2': [1, 0.6031363088057902, 1316, 8.007296000000003e-05]}
#
# Sno, dec_, upd_ = 2, 100, 200
# -------------------------------------------------------------------------------------------------
# ----------------------------- UPDATED (medium and large data volumes) ---------------------------
# values_ = np.load("values_med.npy", allow_pickle=True)[()]
# params = np.load("params_med.npy", allow_pickle=True)[()]
values_ = np.load("values_large.npy", allow_pickle=True)[()]
params = np.load("params_large.npy", allow_pickle=True)[()]
Sno, dec_, upd_ = 2000, 1000, 200
# -------------------------------------------------------------------------------------------------
# values_ = [*values_.values()]
# params = [*params.values()]
# #nb.jit(forceobj=True)
# def test(values_, params, Sno, dec_, upd_):
final_dict = {}
for i, j in enumerate(values_.keys()):
Rand_vals = []
goal_sum = params[j][1] * params[j][3]
tel = goal_sum / dec_ * 10
if params[j][0] != 0:
for k in range(Sno):
final_sum = 0.0
iter_ = 0
t = 1
while not np.allclose(goal_sum, final_sum, atol=tel):
iter_ += 1
vals_group = rng.choice(values_[j], size=params[j][0], replace=False)
# final_sum = 0.0016 * np.sum(vals_group) # -----> For small data volume
final_sum = np.sum(vals_group ** 3) # -----> UPDATED For med or large data volume
if iter_ == upd_:
t += 1
tel = t * tel
values_[j] = np.delete(values_[j], np.where(np.in1d(values_[j], vals_group)))
Rand_vals.append(vals_group)
else:
Rand_vals = [np.array([])] * Sno
final_dict["R" + str(i)] = Rand_vals
# return final_dict
# test(values_, params, Sno, dec_, upd_)
At first, for applying numba on this code #nb.jit was used (forceobj=True is used for avoiding warnings and …), which will have adverse effect on the performance. nopython is checked, too, with #nb.njit which get the following error due to not supporting (as mentioned in 1, 2) dictionary type of the inputs:
cannot determine Numba type of <class 'dict'>
I don't know if (how) it could be handled by Dict from numba.typed (by converting created python dictionaries to numba Dict) or if converting the dictionaries to lists of arrays have any advantage. I think, parallelization may be possible if some code lines e.g. Rand_vals.append(vals_group) or else section or … be taken or be modified out of the function to get the same results as before, but I don't have any idea how to do so.
I will be grateful for helping utilize numba on this code. numba parallelization will be the most desired (probably the best applicable method in terms of performance) solution if it could.
Data:
medium data volume: values_med, params_med
large data volume: values_large, params_large
This code can be converted to Numba but it is not straightforward.
First of all, the dictionary and list type must be defined since Numba njit functions cannot directly operate on reflected lists (aka. pure-python lists). This is a bit tedious to do in Numba and the resulting code is a bit verbose:
String = nb.types.unicode_type
ValueArray = nb.float64[::1]
ValueDict = nb.types.DictType(String, ValueArray)
ParamDictValue = nb.types.Tuple([nb.int_, nb.float64, nb.int_, nb.float64])
ParamDict = nb.types.DictType(String, ParamDictValue)
FinalDictValue = nb.types.ListType(ValueArray)
FinalDict = nb.types.DictType(String, FinalDictValue)
Then you need to convert the input dictionaries:
nbValues = nb.typed.typeddict.Dict.empty(String, ValueArray)
for key,value in values_.items():
nbValues[key] = value.copy()
nbParams = nb.typed.typeddict.Dict.empty(String, ParamDictValue)
for key,value in params.items():
nbParams[key] = (nb.int_(value[0]), nb.float64(value[1]), nb.int_(value[2]), nb.float64(value[3]))
Then, you need to write the core function. np.allclose and np.isin are not implemented in Numba so they should be reimplemented manually. But the main point is that Numba does not support the rng Numpy object. I think it will certainly not support it any time soon. Note that Numba has an random numbers implementation that try to mimic the behavior of Numpy but the management of the seed is a bit different. Note also that results should be the same with the np.random.xxx Numpy functions if the seed is set to the same value (Numpy and Numba have different seed variables that are not synchronized).
#nb.njit(FinalDict(ValueDict, ParamDict, nb.int_, nb.int_, nb.int_))
def nbTest(values_, params, Sno, dec_, upd_):
final_dict = nb.typed.Dict.empty(String, FinalDictValue)
for i, j in enumerate(values_.keys()):
Rand_vals = nb.typed.List.empty_list(ValueArray)
goal_sum = params[j][1] * params[j][3]
tel = goal_sum / dec_ * 10
if params[j][0] != 0:
for k in range(Sno):
final_sum = 0.0
iter_ = 0
t = 1
vals_group = np.empty(0, dtype=nb.float64)
while np.abs(goal_sum - final_sum) > (1e-05 * np.abs(final_sum) + tel):
iter_ += 1
vals_group = np.random.choice(values_[j], size=params[j][0], replace=False)
final_sum = 0.0016 * np.sum(vals_group)
# final_sum = 0.0016 * np.sum(vals_group) # (for small data volume)
final_sum = np.sum(vals_group ** 3) # (for med or large data volume)
if iter_ == upd_:
t += 1
tel = t * tel
# Perform an in-place deletion
vals, gr = values_[j], vals_group
cur = 0
for l in range(vals.size):
found = False
for m in range(gr.size):
found |= vals[l] == gr[m]
if not found:
# Keep the value (delete it otherwise)
vals[cur] = vals[l]
cur += 1
values_[j] = vals[:cur]
Rand_vals.append(vals_group)
else:
for k in range(Sno):
Rand_vals.append(np.empty(0, dtype=nb.float64))
final_dict["R" + str(i)] = Rand_vals
return final_dict
Note that the replacement implementation of np.isin is quite naive but it works pretty well in practice on your input example.
The function can be called using the following way:
nbFinalDict = nbTest(nbValues, nbParams, Sno, dec_, upd_)
Finally, the dictionary should be converted back to basic Python objects:
finalDict = dict()
for key,value in nbFinalDict.items():
finalDict[key] = list(value)
This implementation is fast for small inputs but not large ones since np.random.choice takes almost all the time (>96%). The thing is this function is clearly not optimal when the number of requested item is small (which is your case). Indeed, it surprisingly runs in linear time of the input array and not in linear time of the number of requested items.
Further Optimizations
The algorithm can be completely rewritten to extract only 12 random items and discard them from the main currant array in a much more efficient way. The idea is to swap n items (small target sample) at the end of the array with other items at random locations, then check the sum, repeat this process until a condition is fulfilled, and finally extract the view to the last n items before resizing the view so to discard the last items. All of this can be done in O(n) time rather than O(m) time where m is the size of the main current array with n << m (eg. 12 VS 20_000). It can also be compute without any expensive allocation. Here is the resulting code:
#nb.njit(nb.void(ValueArray, nb.int_, nb.int_))
def swap(arr, i, j):
arr[i], arr[j] = arr[j], arr[i]
#nb.njit(FinalDict(ValueDict, ParamDict, nb.int_, nb.int_, nb.int_))
def nbTest(values_, params, Sno, dec_, upd_):
final_dict = nb.typed.Dict.empty(String, FinalDictValue)
for i, j in enumerate(values_.keys()):
Rand_vals = nb.typed.List.empty_list(ValueArray)
goal_sum = params[j][1] * params[j][3]
tel = goal_sum / dec_ * 10
values = values_[j]
n = params[j][0]
if n != 0:
for k in range(Sno):
final_sum = 0.0
iter_ = 0
t = 1
m = values.size
assert n <= m
group = values[-n:]
while np.abs(goal_sum - final_sum) > (1e-05 * np.abs(final_sum) + tel):
iter_ += 1
# Swap the group view with other random items
for pos in range(m - n, m):
swap(values, pos, np.random.randint(0, m))
# For small data volume:
# final_sum = 0.0016 * np.sum(group)
# For med/large data volume
final_sum = 0.0
for v in group:
final_sum += v ** 3
if iter_ == upd_:
t += 1
tel *= t
assert iter_ > 0
values = values[:m-n]
Rand_vals.append(group)
else:
for k in range(Sno):
Rand_vals.append(np.empty(0, dtype=nb.float64))
final_dict["R" + str(i)] = Rand_vals
return final_dict
In addition to being faster, this implementation as the benefit of being also simpler. Results looks quite similar to the previous implementation despite the randomness make the check of the results tricky (especially since this function does not use the same method to choose the random sample). Note that this implementation does not remove items in values that are in group as opposed to the previous one (this is probably not wanted though).
Benchmark
Here are the results of the last implementation on my machine (compilation and conversion timings excluded):
Provided small input (embedded in the question):
- Initial code: 42.71 ms
- Numba code: 0.11 ms
Medium input:
- Initial code: 3481 ms
- Numba code: 11 ms
Large input:
- Initial code: 6728 ms
- Numba code: 20 ms
Note that the conversion time takes about the same time than the computation.
This last implementation is 316~388 times faster than the initial code on small inputs.
Notes
Note that the compilation time takes few seconds due to the dict and lists types.
Note that while it may be possible to parallelise the implementation, only the most encompassing loop can be parallelised. The thing is there is only few items to compute and the time is already quite small (not the best case for multi-threading). <-- Additionally, the creation of many temporary arrays (created by rng.choice) will certainly cause the parallel loop not to scale well anyway. --> Additionally, the list/dict cannot be written from multiple threads safely so one need to use Numpy arrays in the whole function to be able to do that (or add additional conversion that are already expensive). Moreover, Numba parallelism tends to increase significantly the compilation time which is already significant. Finally, the result will be less deterministic since each Numba thread has its own random number generator seed and the items computed by the threads cannot be predicted with prange (dependent of the parallel runtime chosen on the target platform). Note that in Numpy there is one global seed by default used by usual random functions (deprecated way) and RNG objects have their own seed (new preferred way).
So this is another approach to probably well-known codility platform, task about frog crossing the river. And sorry if this question is asked in bad manner, this is my first post here.
The goal is to find the earliest time when the frog can jump to the other side of the river.
For example, given X = 5 and array A such that:
A[0] = 1
A[1] = 3
A[2] = 1
A[3] = 4
A[4] = 2
A[5] = 3
A[6] = 5
A[7] = 4
the function should return 6.
Example test: (5, [1, 3, 1, 4, 2, 3, 5, 4])
Full task content:
https://app.codility.com/programmers/lessons/4-counting_elements/frog_river_one/
So that was my first obvious approach:
def solution(X, A):
lista = list(range(1, X + 1))
if X < 1 or len(A) < 1:
return -1
found = -1
for element in lista:
if element in A:
if A.index(element) > found:
found = A.index(element)
else: return -1
return found
X = 5
A = [1,2,4,5,3]
solution(X,A)
This solution is 100% correct and gets 0% in performance tests.
So I thought less lines + list comprehension will get better score:
def solution(X, A):
if X < 1 or len(A) < 1:
return -1
try:
found = max([ A.index(element) for element in range(1, X + 1) ])
except ValueError:
return -1
return found
X = 5
A = [1,2,4,5,3]
solution(X,A)
This one also works and has 0% performance but it's faster anyway.
I also found solution by deanalvero (https://github.com/deanalvero/codility/blob/master/python/lesson02/FrogRiverOne.py):
def solution(X, A):
# write your code in Python 2.6
frog, leaves = 0, [False] * (X)
for minute, leaf in enumerate(A):
if leaf <= X:
leaves[leaf - 1] = True
while leaves[frog]:
frog += 1
if frog == X: return minute
return -1
This solution gets 100% in correctness and performance tests.
My question arises probably because I don't quite understand this time complexity thing. Please tell me how the last solution is better from my second solution? It has a while loop inside for loop! It should be slow but it's not.
Here is a solution in which you would get 100% in both correctness and performance.
def solution(X, A):
i = 0
dict_temp = {}
while i < len(A):
dict_temp[A[i]] = i
if len(dict_temp) == X:
return i
i += 1
return -1
The answer already been told, but I'll add an optional solution that i think might help you understand:
def save_frog(x, arr):
# creating the steps the frog should make
steps = set([i for i in range(1, x + 1)])
# creating the steps the frog already did
froggy_steps = set()
for index, leaf in enumerate(arr):
froggy_steps.add(leaf)
if froggy_steps == steps:
return index
return -1
I think I got the best performance using set()
take a look at the performance test runtime seconds and compare them with yours
def solution(X, A):
positions = set()
seconds = 0
for i in range(0, len(A)):
if A[i] not in positions and A[i] <= X:
positions.add(A[i])
seconds = i
if len(positions) == X:
return seconds
return -1
The amount of nested loops doesn't directly tell you anything about the time complexity. Let n be the length of the input array. The inside of the while-loop needs in average O(1) time, although its worst case time complexity is O(n). The fast solution uses a boolean array leaves where at every index it has the value true if there is a leaf and false otherwise. The inside of the while-loop during the entire algotihm is excetuded no more than n times. The outer for-loop is also executed only n times. This means the time complexity of the algorithm is O(n).
The key is that both of your initial solutions are quadratic. They involve O(n) inner scans for each of the parent elements (resulting in O(n**2)).
The fast solution initially appears to suffer the same fate as it's obvious it contains a loop within a loop. But the inner while loop does not get fully scanned for each 'leaf'. Take a look at where 'frog' gets initialized and you'll note that the while loop effectively picks up where it left off for each leaf.
Here is my 100% solution that considers the sum of numeric progression.
def solution(X, A):
covered = [False] * (X+1)
n = len(A)
Sx = ((1+X)*X)/2 # sum of the numeric progression
for i in range(n):
if(not covered[A[i]]):
Sx -= A[i]
covered[A[i]] = True
if (Sx==0):
return i
return -1
Optimized solution from #sphoenix, no need to compare two sets, it's not really good.
def solution(X, A):
found = set()
for pos, i in enumerate(A, 0):
if i <= X:
found.add(i)
if len(found) == X:
return pos
return -1
And one more optimized solution for binary array
def solution(X, A):
steps, leaves = X, [False] * X
for minute, leaf in enumerate(A, 0):
if not leaves[leaf - 1]:
leaves[leaf - 1] = True
steps -= 1
if 0 == steps:
return minute
return -1
The last one is better, less resources. set consumes more resources compared to binary list (memory and CPU).
def solution(X, A):
# if there are not enough items in the list
if X > len(A):
return -1
# else check all items
else:
d = {}
for i, leaf in enumerate(A):
d[leaf] = i
if len(d) == X:
return i
# if all else fails
return -1
I tried to use as much simple instruction as possible.
def solution(X, A):
if (X > len(A)): # check for no answer simple
return -1
elif(X == 1): # check for single element
return 0
else:
std_set = {i for i in range(1,X+1)} # list of standard order
this_set = set(A) # set of unique element in list
if(sum(std_set) > sum(this_set)): # check for no answer complex
return -1
else:
for i in range(0, len(A) - 1):
if std_set:
if(A[i] in std_set):
std_set.remove(A[i]) # remove each element in standard set
if not std_set: # if all removed, return last filled position
return(i)
I guess this code might not fulfill runtime but it the simplest I could think of
I am using OrderedDict from collections and sum of first n numbers to check the frog will be able to cross or not.
def solution(X, A):
from collections import OrderedDict as od
if sum(set(A))!=(X*(X+1))//2:
return -1
k=list(od.fromkeys(A).keys())[-1]
for x,y in enumerate(A):
if y==k:
return x
This code gives 100% for correctness and performance, runs in O(N)
def solution(x, a):
# write your code in Python 3.6
# initialize all positions to zero
# i.e. if x = 2; x + 1 = 3
# x_positions = [0,1,2]
x_positions = [0] * (x + 1)
min_time = -1
for k in range(len(a)):
# since we are looking for min time, ensure that you only
# count the positions that matter
if a[k] <= x and x_positions[a[k]] == 0:
x_positions[a[k]] += 1
min_time = k
# ensure that all positions are available for the frog to jump
if sum(x_positions) == x:
return min_time
return -1
100% performance using sets
def solution(X, A):
positions = set()
for i in range(len(A)):
if A[i] not in positions:
positions.add(A[i])
if len(positions) == X:
return i
return -1
I have a problem, which, when simplified:
has a loop which samples new points
evaluates them with a complex/slow function
accepts them if the value is above an ever-increasing threshold.
Here is example code for illustration:
from numpy.random import uniform
from time import sleep
def userfunction(x):
# do something complicated
# but computation always takes takes roughly the same time
sleep(1) # comment this out if too slow
xnew = uniform() # in reality, a non-trivial function of x
y = -0.5 * xnew**2
return xnew, y
x0, cur = userfunction([])
x = [x0] # a sequence of points
while cur < -2e-16:
# this should be parallelised
# search for a new point higher than a threshold
x1, next = userfunction(x)
if next <= cur:
# throw away (this branch is taken 99% of the time)
pass
else:
cur = next
print cur
x.append(x1) # note that userfunction depends on x
print x
I want to parallelise this (e.g. across a cluster), but the problem is that I need to terminate the other workers when a successful point has been found, or at least inform them of the new x (if they manage to get above the new threshold with an older x, the result is still acceptable). As long as no point has been successful, I need the workers repeat.
I am looking for tools/frameworks which can handle this type of problem, in any scientific programming language (C, C++, Python, Julia, etc., no Fortran please).
Can this be solved with MPI semi-elegantly? I don't understand how I can inform/interrupt/update workers with MPI.
Update: added code comments to say most tries are unsuccessful and do not influence the variable userfunction depends on.
if userfunction() does not take too long, then here is an option that qualifies for "MPI semi-elegantly"
in order to keep thing simple, let's assume rank 0 is only an orchestrator and does not compute anything.
on rank 0
cur = 0
x = []
while cur < -2e-16:
MPI_Recv(buf=cur+x1, src=MPI_ANY_SOURCE)
x.append(x1)
MPI_Ibcast(buf=cur+x, root=0, request=req)
MPI_Wait(request=req)
on rank != 0
x0, cur = userfunction([])
x = [x0] # a sequence of points
while cur < -2e-16:
MPI_Ibcast(buf=newcur+newx, root=0, request=req
# search for a new point higher than a threshold
x1, next = userfunction(x)
if next <= cur:
# throw away (this branch is taken 99% of the time)
MPI_Test(request=ret, flag=found)
if found:
MPI_Wait(request)
else:
cur = next
MPI_Send(buffer=cur+x1, dest=0)
MPI_Wait(request)
extra logic is needed to correctly handle
- rank 0 does computation too
- several ranks find the solution at the same time, subsequent messages must be consumed by rank 0
strictly speaking, a task is not "interrupted" when a solution is found on an other task. instead, each task check periodically if the solution was found by an other task. so there is a delay between the time a solution if found somewhere and all tasks stop looking for solutions, but if userfunction() does not take "too long", this looks very acceptable to me.
I solved it roughly with the following code.
This transmits only curmax at the moment, but one can send the other array with a second broadcast+tag.
import numpy
import time
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
import logging
logging.basicConfig(filename='mpitest%d.log' % rank,level=logging.DEBUG)
logFormatter = logging.Formatter("[%(name)s %(levelname)s]: %(message)s")
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
consoleHandler.setLevel(logging.INFO)
logging.getLogger().addHandler(consoleHandler)
log = logging.getLogger(__name__)
if rank == 0:
curmax = numpy.random.random()
seq = [curmax]
log.info('%d broadcasting starting value %f...' % (rank, curmax))
comm.Ibcast(numpy.array([curmax]))
was_updated = False
while True:
# check if news available
status = MPI.Status()
a_avail = comm.iprobe(source=MPI.ANY_SOURCE, tag=12, status=status)
if a_avail:
sugg = comm.recv(source=status.Get_source(), tag=12)
log.info('%d received new limit from %d: %s' % (rank, status.Get_source(), sugg))
if sugg < curmax:
curmax = sugg
seq.append(curmax)
log.info('%d updating to %s' % (rank, curmax))
was_updated = True
else:
# ignore
pass
# check if next message is already waiting:
if comm.iprobe(source=MPI.ANY_SOURCE, tag=12):
# consume it first before broadcasting outdated info
continue
if was_updated:
log.info('%d broadcasting new limit %f...' % (rank, curmax))
comm.Ibcast(numpy.array([curmax]))
was_updated = False
else:
# no message waiting for us and no broadcast done, so pause
time.sleep(0.1)
print
print data, rank
else:
log.info('%d waiting for root to send us starting value...' % (rank))
nextmax = numpy.empty(1, dtype=float)
comm.Ibcast(nextmax).Wait()
amax = float(nextmax)
numpy.random.seed(rank)
update_req = comm.Ibcast(nextmax)
while True:
a = numpy.random.uniform()
if a < amax:
log.info('%d found new: %s, sending to root' % (rank, a))
amax = a
comm.isend(a, dest=0, tag=12)
s = update_req.Get_status()
#log.info('%d bcast status: %s' % (rank, s))
if s:
update_req.Wait()
log.info('%d receiving new limit from root, %s' % (rank, nextmax))
amax = float(nextmax)
update_req = comm.Ibcast(nextmax)
Problem: I am struggling to understand/visualize the Dynamic Programming approach for "A type of balanced 0-1 matrix in "Dynamic Programming - Wikipedia Article."
Wikipedia Link: https://en.wikipedia.org/wiki/Dynamic_programming#A_type_of_balanced_0.E2.80.931_matrix
I couldn't understand how the memoization works when dealing with a multidimensional array. For example, when trying to solve the Fibonacci series with DP, using an array to store previous state results is easy, as the index value of the array store the solution for that state.
Can someone explain DP approach for the "0-1 balanced matrix" in simpler manner?
Wikipedia offered both a crappy explanation and a not ideal algorithm. But let's work with it as a starting place.
First let's take the backtracking algorithm. Rather than put the cells of the matrix "in some order", let's go everything in the first row, then everything in the second row, then everything in the third row, and so on. Clearly that will work.
Now let's modify the backtracking algorithm slightly. Instead of going cell by cell, we'll go row by row. So we make a list of the n choose n/2 possible rows which are half 0 and half 1. Then have a recursive function that looks something like this:
def count_0_1_matrices(n, filled_rows=None):
if filled_rows is None:
filled_rows = []
if some_column_exceeds_threshold(n, filled_rows):
# Cannot have more than n/2 0s or 1s in any column
return 0
else:
answer = 0
for row in possible_rows(n):
answer = answer + count_0_1_matrices(n, filled_rows + [row])
return answer
This is a backtracking algorithm like what we had before. We are just doing whole rows at a time, not cells.
But notice, we're passing around more information than we need. There is no need to pass in the exact arrangement of rows. All that we need to know is how many 1s are needed in each remaining column. So we can make the algorithm look more like this:
def count_0_1_matrices(n, still_needed=None):
if still_needed is None:
still_needed = [int(n/2) for _ in range(n)]
# Did we overrun any column?
for i in still_needed:
if i < 0:
return 0
# Did we reach the end of our matrix?
if 0 == sum(still_needed):
return 1
# Calculate the answer by recursion.
answer = 0
for row in possible_rows(n):
next_still_needed = [still_needed[i] - row[i] for i in range(n)]
answer = answer + count_0_1_matrices(n, next_still_needed)
return answer
This version is almost the recursive function in the Wikipedia version. The main difference is that our base case is that after every row is finished, we need nothing, while Wikipedia would have us code up the base case to check the last row after every other is done.
To get from this to a top-down DP, you only need to memoize the function. Which in Python you can do by defining then adding an #memoize decorator. Like this:
from functools import wraps
def memoize(func):
cache = {}
#wraps(func)
def wrap(*args):
if args not in cache:
cache[args] = func(*args)
return cache[args]
return wrap
But remember that I criticized the Wikipedia algorithm? Let's start improving it! The first big improvement is this. Do you notice that the order of the elements of still_needed can't matter, just their values? So just sorting the elements will stop you from doing the calculation separately for each permutation. (There can be a lot of permutations!)
#memoize
def count_0_1_matrices(n, still_needed=None):
if still_needed is None:
still_needed = [int(n/2) for _ in range(n)]
# Did we overrun any column?
for i in still_needed:
if i < 0:
return 0
# Did we reach the end of our matrix?
if 0 == sum(still_needed):
return 1
# Calculate the answer by recursion.
answer = 0
for row in possible_rows(n):
next_still_needed = [still_needed[i] - row[i] for i in range(n)]
answer = answer + count_0_1_matrices(n, sorted(next_still_needed))
return answer
That little innocuous sorted doesn't look important, but it saves a lot of work! And now that we know that still_needed is always sorted, we can simplify our checks for whether we are done, and whether anything went negative. Plus we can add an easy check to filter out the case where we have too many 0s in a column.
#memoize
def count_0_1_matrices(n, still_needed=None):
if still_needed is None:
still_needed = [int(n/2) for _ in range(n)]
# Did we overrun any column?
if still_needed[-1] < 0:
return 0
total = sum(still_needed)
if 0 == total:
# We reached the end of our matrix.
return 1
elif total*2/n < still_needed[0]:
# We have total*2/n rows left, but won't get enough 1s for a
# column.
return 0
# Calculate the answer by recursion.
answer = 0
for row in possible_rows(n):
next_still_needed = [still_needed[i] - row[i] for i in range(n)]
answer = answer + count_0_1_matrices(n, sorted(next_still_needed))
return answer
And, assuming you implement possible_rows, this should both work and be significantly more efficient than what Wikipedia offered.
=====
Here is a complete working implementation. On my machine it calculated the 6'th term in under 4 seconds.
#! /usr/bin/env python
from sys import argv
from functools import wraps
def memoize(func):
cache = {}
#wraps(func)
def wrap(*args):
if args not in cache:
cache[args] = func(*args)
return cache[args]
return wrap
#memoize
def count_0_1_matrices(n, still_needed=None):
if 0 == n:
return 1
if still_needed is None:
still_needed = [int(n/2) for _ in range(n)]
# Did we overrun any column?
if still_needed[0] < 0:
return 0
total = sum(still_needed)
if 0 == total:
# We reached the end of our matrix.
return 1
elif total*2/n < still_needed[-1]:
# We have total*2/n rows left, but won't get enough 1s for a
# column.
return 0
# Calculate the answer by recursion.
answer = 0
for row in possible_rows(n):
next_still_needed = [still_needed[i] - row[i] for i in range(n)]
answer = answer + count_0_1_matrices(n, tuple(sorted(next_still_needed)))
return answer
#memoize
def possible_rows(n):
return [row for row in _possible_rows(n, n/2)]
def _possible_rows(n, k):
if 0 == n:
yield tuple()
else:
if k < n:
for row in _possible_rows(n-1, k):
yield tuple(row + (0,))
if 0 < k:
for row in _possible_rows(n-1, k-1):
yield tuple(row + (1,))
n = 2
if 1 < len(argv):
n = int(argv[1])
print(count_0_1_matrices(2*n)))
You're memoizing states that are likely to be repeated. The state that needs to be remembered in this case is the vector (k is implicit). Let's look at one of the examples you linked to. Each pair in the vector argument (of length n) is representing "the number of zeros and ones that have yet to be placed in that column."
Take the example on the left, where the vector is ((1, 1) (1, 1) (1, 1) (1, 1)), when k = 2 and the assignments leading to it were 1 0 1 0, k = 3 and 0 1 0 1, k = 4. But we could get to the same state, ((1, 1) (1, 1) (1, 1) (1, 1)), k = 2 from a different set of assignments, for example: 0 1 0 1, k = 3 and 1 0 1 0, k = 4. If we would memoize the result for the state, ((1, 1) (1, 1) (1, 1) (1, 1)), we could avoid recalculating the recursion for that branch again.
Please let me know if there's anything I could better clarify.
Further elaboration in response to your comment:
The Wikipedia example seems to be pretty much a brute-force with memoization. The algorithm seems to attempt to enumerate all the matrixes but uses memoization to exit early from repeated states. How do we enumerate all possibilities? To take their example, n = 4, we start with the vector [(2,2),(2,2),(2,2),(2,2)] where zeros and ones are yet to be placed. (Since the sum of each tuple in the vector is k, we could have a simpler vector where k and the count of either ones or zeros is maintained.)
At every stage, k, in the recursion, we enumerate all possible configurations for the next vector. If the state exists in our hash, we simply return the value for that key. Otherwise, we assign the vector as a new key in the hash (in which case this recursion branch will continue).
For example:
Vector [(2,2),(2,2),(2,2),(2,2)]
Possible assignments of 1's: [1 1 0 0], [1 0 1 0], [1 0 0 1] ... etc.
First branch: [(2,1),(2,1),(1,2),(1,2)]
is this vector a key in the hash?
if yes, return value lookup
else, assign this vector as a key in the hash where the value is the sum
of the function calls with the next possible vectors as their arguments
Building on the excellent answer by https://stackoverflow.com/users/585411/btilly, I've updated their algorithm to exclude "0" cases in the still_needed tuple. The code is about 50% faster largely because of more cache hits using the collapsable tuple.
import time
from typing import Tuple
from sys import argv
from functools import cache
#cache
def possible_rows(n, k=None) -> Tuple[int]:
if k is None:
k = n / 2
return [row for row in _possible_rows(n, k)]
def _possible_rows(n, k) -> Tuple[int]:
if 0 == n:
yield tuple()
else:
if k < n:
for row in _possible_rows(n-1, k):
yield tuple(row + (0,))
if 0 < k:
for row in _possible_rows(n-1, k-1):
yield tuple(row + (1,))
def count(n: int, k: int) -> int:
if n == 0:
return 1
still_needed = tuple([k] * n)
return count_0_1_matrices(k, still_needed)
#cache
def count_0_1_matrices(k:int, still_needed: Tuple[int]):
"""
Assume still_needed contains only positive ints, and is sorted ascending
"""
# Calculate the answer by recursion.
answer = 0
for row in possible_rows(len(still_needed), k):
# Decrement the still_needed value tuple by the row tuple and only keep positive results. Sorting is important for cache hits.
next_still_needed = tuple(sorted([sn - r for sn, r in zip(still_needed, row) if sn > r]))
# Only continue if we still need values and there are enough rows left
if not next_still_needed:
answer += 1
elif len(next_still_needed) >= k and sum(next_still_needed) >= next_still_needed[-1] * k:
# sum / k -> how many rows left. We need enough rows left to continue down this path.
answer += count_0_1_matrices(k, next_still_needed)
return answer
if __name__ == "__main__":
n = 7
if 1 < len(argv):
n = int(argv[1])
start = time.time()
result = count(2*n, n)
print(f"{result} in {time.time() - start} seconds")
Wondering what is the best way to solve this problem: Random play a song from a list of given songs in such a way that no songs is repeated until all the songs are played.
My algorithm basically calls get_random with the reduced set of music sets to find out the next song to play. get_random uses power of 2 to divide the list and then further sub-dividing.
Is this the best I can do or any other better algorithms I can come up with? I need just the idea.
import random
import math
def get_random(number):
if number == 0:
return number
if number == 1:
return number
#make the number power of 2
orig_no = number
number = 1 << (math.floor(math.log(number))+1)
left = 0
right = number
# check if toss falls in this current half and then change the half for next recursion.
# we change half from 1, 2, 4, 8, 16, 36, 64, 128
while left < right:
#f1 can be replaced by this rd.randint(0, 1)
toss_value = f1()
if toss_value:
right = math.floor((left + right)/2)
else:
left = math.floor((left + right)/2) + 1
if left >= orig_no:
return get_random(orig_no)
else:
return left
songs = ["i am here", "your are beautiful", "soorry", "i am happy", "where am i", "what did i do", "nothing wrong with you"]
for i in range(0, len(songs)-1):
value = get_random(len(songs))
print(songs[value])
songs.pop(value)
Shuffle the array of songs, play all the songs, and shuffle it again....
You can use Fisher–Yates shuffle to shuffle the array. Since you are using Python, there's already random.shuffle for you.