Breadth-first algorithm implementation - algorithm

I am trying to implement a "Breadth-First" Algorithm as a variation of something I've seen in a book.
My issue is that the algorithm is not adding the elements of every node into the queue.
For instance, if I search for "black lab" under the name 'mariela' in the "search()" function, I will get the correct output: "simon is a black lab"
However, I ought to be able to look for "black lab" in "walter", which is connected to "mariela", which is connected to "simon", who is a "black lab'. This is not working.
Have I made a rookie mistake in my implementation of this algorithm, or have I set up my graph wrong?
As always, any/all help is much appreciated!
from collections import deque
# TEST GRAPH -------------
graph = {}
graph['walter'] = ['luci', 'kaiser', 'andrea', 'mariela']
graph['andrea'] = ['echo', 'dante', 'walter', 'mariela']
graph['mariela'] = ['ginger', 'simon', 'walter', 'andrea']
graph['kaiser'] = 'german shepherd'
graph['luci'] = 'black cat'
graph['echo'] = 'pitbull'
graph['dante'] = 'pitbull'
graph['ginger'] = 'orange cat'
graph['simon'] = 'black lab'
def condition_met(name):
if graph[name] == 'black lab':
return name
def search(name):
search_queue = deque()
search_queue += graph[name] # add all elements of "name" to queue
searchedAlready = [] # holding array for people already searched through
while search_queue: # while queue not empty...
person = search_queue.popleft() # pull 1st person from queue
if person not in searchedAlready: # if person hasn't been searched through yet...
if condition_met(person):
print person + ' is a black labrador'
return True
else:
search_queue += graph[person]
searchedAlready.append(person)
return False
search('walter')
#search('mariela')

You have lots of problems in your implementation - both Python and Algorithm wise.
Rewrite as:
# #param graph graph to search
# #param start the node to start at
# #param value the value to search for
def search(graph, start, value):
explored = []
queue = [start]
while len(queue) > 0:
# next node to explore
node = queue.pop()
# only explore if not already explored
if node not in explored:
# node found, search complete
if node == value:
return True
# add children of node to queue
else:
explored.append(node)
queue.extend(graph[node]) # extend is faster than concat (+=)
return False
graph = {}
graph['walter'] = ['luci', 'kaiser', 'andrea', 'mariela']
graph['andrea'] = ['echo', 'dante', 'walter', 'mariela']
graph['mariela'] = ['ginger', 'simon', 'walter', 'andrea']
# children should be a list
graph['kaiser'] = ['german shepherd']
graph['luci'] = ['black cat']
graph['echo'] = ['pitbull']
graph['dante'] = ['pitbull']
graph['ginger'] = ['orange cat']
graph['simon'] = ['black lab']
print search(graph, 'mariela', 'walter')
Here is a demo https://repl.it/IkRA/0

Related

How to add a maximum travel time duration for the sum of all routes in VRP Google OR-TOOLS

I am new to programming and used Google OR-tools to create my VRP model. In my current model, I have included a general time window and capacity constraint per vehicle, creating a capacitated vehicle routing problem with time windows. I followed the OR-tools guides which contains a maximum travel duration for each vehicle.
However, I want to include a maximum travel duration for the sum of all routes, whereas the maximum travel duration for each vehicle does not matter (so I set it to 100.000). Accorddingly, I want to create something in the model/solution printer that tells me which amount of addresses could not be visited due to the constraint on the maximum travel duration for the sum of all routes. From the examples I have seen I think it would be kind of easy, but my knowledge on programming is fairly limited, so my attempts had no succes. Can anyone help me?
import pandas as pd
import openpyxl
import numpy as np
import math
from random import sample
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
from scipy.spatial.distance import squareform, pdist
from haversine import haversine
#STEP - create data
# import/read excel file
data = pd.read_excel(r'C:\Users\Jean-Paul\Documents\Thesis\OR TOOLS\Data.xlsx', engine = 'openpyxl')
df = pd.DataFrame(data, columns= ['number','lat','lng']) # create dataframe with 10805 addresses + address of the depot
#print (df)
# randomly sample X addresses from the dataframe and their corresponding number/latitude/longtitude
df_sample = df.sample(n=100)
#print (df_data)
# read first row of the excel file (= coordinates of the depot)
df_depot = pd.DataFrame(data, columns= ['number','lat','lng']).iloc[0:1]
#print (df_depot)
# combine dataframe of depot and sample into one dataframe
df_data = pd.concat([df_depot, df_sample], ignore_index=True, sort=False)
#print (df_data)
#STEP - create distance matrix data
# determine distance between latitude and longtitude
df_data.set_index('number', inplace=True)
matrix_distance = pd.DataFrame(squareform(pdist(df_data, metric=haversine)), index=df_data.index, columns=df_data.index)
matrix_list = np.array(matrix_distance)
#print (matrix_distance) # create table of distances between addresses including headers
#print (matrix_list) # converting table to list of lists and exclude headers
#STEP - create time matrix data
travel_time = matrix_list / 15 * 60 # divide distance by travel speed 20 km/h and multiply by 60 minutes
#print (travel_time) # converting distance matrix to travel time matrix
#STEP - create time window data
# create list for each sample - couriers have to visit this address within 0-X minutes of time using a list of lists
window_range = []
for i in range(len(df_data)):
list = [0, 240]
window_range.append(list) # create list of list with a time window range for each address
#print (window_range)
#STEP - create demand data
# create list for each sample - all addresses demand 1 parcel except the depot
demand_range = []
for i in range(len(df_data.iloc[0:1])):
list = 0
demand_range.append(list)
for j in range(len(df_data.iloc[1:])):
list2 = 1
demand_range.append(list2)
#print (demand_range)
#STEP - create fleet size data # amount of vehicles in the fleet
fleet_size = 6
#print (fleet_size)
#STEP - create capacity data for each vehicle
fleet_capacity = []
for i in range(fleet_size): # capacity per vehicle
list = 20
fleet_capacity.append(list)
#print (fleet_capacity)
#STEP - create data model that stores all data for the problem
def create_data_model():
data = {}
data['time_matrix'] = travel_time
data['time_windows'] = window_range
data['num_vehicles'] = fleet_size
data['depot'] = 0 # index of the depot
data['demands'] = demand_range
data['vehicle_capacities'] = fleet_capacity
return data
#STEP - creating the solution printer
def print_solution(data, manager, routing, solution):
"""Prints solution on console."""
print(f'Objective: {solution.ObjectiveValue()}')
time_dimension = routing.GetDimensionOrDie('Time')
total_time = 0
for vehicle_id in range(data['num_vehicles']):
index = routing.Start(vehicle_id)
plan_output = 'Route for vehicle {}:\n'.format(vehicle_id)
while not routing.IsEnd(index):
time_var = time_dimension.CumulVar(index)
plan_output += '{0} Time({1},{2}) -> '.format(
manager.IndexToNode(index), solution.Min(time_var),
solution.Max(time_var))
index = solution.Value(routing.NextVar(index))
time_var = time_dimension.CumulVar(index)
plan_output += '{0} Time({1},{2})\n'.format(manager.IndexToNode(index),
solution.Min(time_var),
solution.Max(time_var))
plan_output += 'Time of the route: {}min\n'.format(
solution.Min(time_var))
print(plan_output)
total_time += solution.Min(time_var)
print('Total time of all routes: {}min'.format(total_time))
#STEP - create the VRP solver
def main():
# instantiate the data problem
data = create_data_model()
# create the routing index manager
manager = pywrapcp.RoutingIndexManager(len(data['time_matrix']),
data['num_vehicles'], data['depot'])
# create routing model
routing = pywrapcp.RoutingModel(manager)
#STEP - create demand callback and dimension for capacity
# create and register a transit callback
def demand_callback(from_index):
"""Returns the demand of the node."""
# convert from routing variable Index to demands NodeIndex
from_node = manager.IndexToNode(from_index)
return data['demands'][from_node]
demand_callback_index = routing.RegisterUnaryTransitCallback(
demand_callback)
routing.AddDimensionWithVehicleCapacity(
demand_callback_index,
0, # null capacity slack
data['vehicle_capacities'], # vehicle maximum capacities
True, # start cumul to zero
'Capacity')
#STEP - create time callback
# create and register a transit callback
def time_callback(from_index, to_index):
"""Returns the travel time between the two nodes."""
# convert from routing variable Index to time matrix NodeIndex
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return data['time_matrix'][from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(time_callback)
# define cost of each Arc (costs in terms of travel time)
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
# STEP - create a dimension for the travel time (TIMEWINDOW) - dimension keeps track of quantities that accumulate over a vehicles route
# add time windows constraint
time = 'Time'
routing.AddDimension(
transit_callback_index,
2, # allow waiting time (does not have an influence in this model)
100000, # maximum total route lenght in minutes per vehicle (does not have an influence because of capacity constraint)
False, # do not force start cumul to zero
time)
time_dimension = routing.GetDimensionOrDie(time)
# add time window constraints for each location except depot
for location_idx, time_window in enumerate(data['time_windows']):
if location_idx == data['depot']:
continue
index = manager.NodeToIndex(location_idx)
time_dimension.CumulVar(index).SetRange(time_window[0], time_window[1])
# add time window constraint for each vehicle start node
depot_idx = data['depot']
for vehicle_id in range(data['num_vehicles']):
index = routing.Start(vehicle_id)
time_dimension.CumulVar(index).SetRange(
data['time_windows'][depot_idx][0],
data['time_windows'][depot_idx][1])
#STEP - instantiate route start and end times to produce feasible times
for i in range(data['num_vehicles']):
routing.AddVariableMinimizedByFinalizer(
time_dimension.CumulVar(routing.Start(i)))
routing.AddVariableMinimizedByFinalizer(
time_dimension.CumulVar(routing.End(i)))
#STEP - setting default search parameters and a heuristic method for finding the first solution
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
#STEP - solve the problem with the serach parameters and print solution
solution = routing.SolveWithParameters(search_parameters)
if solution:
print_solution(data, manager, routing, solution)
if __name__ == '__main__':
main()
See #Mizux's answer, going under-the-hood in the solver to make a summation cost over all vehicle route lengths:
https://stackoverflow.com/a/68756570/13773745

Understanding the distance metric in company name matching using KNN

I am trying to understand the following code that I found for matching a messy list of company names to a list of clean list of company names. My question is what the 'Ratio' metric is calculated using. It appears that the ratio is from scorer = fuzz.token_sort_ratio which is I understand is part of the fuzzywuzzy package and therefore a levenschtein distance calculation correct? I'm trying to understand why the author uses this as the scorer rather than the distance output from KNN. When I try changing the metric inside NearestNeighbors, it doesn't appear to change the results. Does the metric in NearestNeighbors matter then?
Original article:
https://audhiaprilliant.medium.com/fuzzy-string-matching-optimization-using-tf-idf-and-knn-b07fce69b58f
def build_vectorizer(
clean: pd.Series,
analyzer: str = 'char',
ngram_range: Tuple[int, int] = (1, 4),
n_neighbors: int = 1,
**kwargs
) -> Tuple:
# Create vectorizer
vectorizer = TfidfVectorizer(analyzer = analyzer, ngram_range = ngram_range, **kwargs)
X = vectorizer.fit_transform(clean.values.astype('U'))
# Fit nearest neighbors corpus
nbrs = NearestNeighbors(n_neighbors = n_neighbors, metric = 'cosine').fit(X)
return vectorizer, nbrs
# String matching - KNN
def tfidf_nn(
messy,
clean,
n_neighbors = 1,
**kwargs
):
# Fit clean data and transform messy data
vectorizer, nbrs = build_vectorizer(clean, n_neighbors = n_neighbors, **kwargs)
input_vec = vectorizer.transform(messy)
# Determine best possible matches
distances, indices = nbrs.kneighbors(input_vec, n_neighbors = n_neighbors)
nearest_values = np.array(clean)[indices]
return nearest_values, distances
# String matching - match fuzzy
def find_matches_fuzzy(
row,
match_candidates,
limit = 5
):
row_matches = process.extract(
row, dict(enumerate(match_candidates)),
scorer = fuzz.token_sort_ratio,
limit = limit
)
result = [(row, match[0], match[1]) for match in row_matches]
return result
# String matching - TF-IDF
def fuzzy_nn_match(
messy,
clean,
column,
col,
n_neighbors = 100,
limit = 5, **kwargs):
nearest_values, _ = tfidf_nn(messy, clean, n_neighbors, **kwargs)
results = [find_matches_fuzzy(row, nearest_values[i], limit) for i, row in enumerate(messy)]
df = pd.DataFrame(itertools.chain.from_iterable(results),
columns = [column, col, 'Ratio']
)
return df
# String matching - Fuzzy
def fuzzy_tf_idf(
df: pd.DataFrame,
column: str,
clean: pd.Series,
mapping_df: pd.DataFrame,
col: str,
analyzer: str = 'char',
ngram_range: Tuple[int, int] = (1, 3)
) -> pd.Series:
# Create vectorizer
clean = clean.drop_duplicates().reset_index(drop = True)
messy_prep = df[column].drop_duplicates().dropna().reset_index(drop = True).astype(str)
messy = messy_prep.apply(preprocess_string)
result = fuzzy_nn_match(messy = messy, clean = clean, column = column, col = col, n_neighbors = 1)
# Map value from messy to clean
return result

Algorithm used to prevent a player from getting their same card

I have a game I am coding in Tabletop Simulator where all players (P) is given a card (C). Once memorized all players put the card back into the deck (D), shuffled, and then all players are dealt one of the cards from the same deck (D). I am trying to code the simplest algorithm that prevents a player from getting their own card. Now when it comes to coding this should be simple I assume instead of creating simulations to run until it is successful.
Say you have the following:
deck, a randomized deck containing all the cards (including those the players have seen).
seen_card_id_by_player, a lookup table that give you the guid of the card a player has seen.
Then the solution is simply
local card_ids = {}
for i, card_data in ipairs(deck.getObjects()) do
table.insert(card_ids, card_data.guid)
end
for player, seen_card_id in pairs(seen_card_id_by_player) do
local card_id = table.remove(card_ids)
if card_id == seen_card_id then
local i = math.random(1, #card_ids)
card_ids[i], card_id = card_id, card_ids[i]
end
-- Deal the specific card.
deck.takeObject({
guid = card_ids[i],
position = player.getHandTransform().position,
flip = true,
})
end
When we pick the card the player has already seen, it is placed back at a random location among the remaining cards. This ensures that every card has an equal chance of being drawn by the next player. This is the underlying principle of the Fisher-Yates shuffle.
Full demonstration
function broadcast_error(msg)
broadcastToAll(msg, { r=1, g=0, b=0 })
end
function get_cards_seen_by_players()
local player_ids = Player.getAvailableColors()
local error = false
local seen_card_by_player = {}
for i, player_id in ipairs(player_ids) do
local player = Player[player_id]
local hand_objs = player.getHandObjects()
local player_error = false
if #hand_objs > 1 then
player_error = true
elseif #hand_objs == 1 then
local card = hand_objs[1]
if card.tag ~= "Card" then
player_error = true
else
seen_card_by_player[player] = card
end
end
if player_error then
broadcast_error(player_id .. " doesn't have a valid hand.")
error = true
end
end
if error then
return nil
end
return seen_card_by_player
end
function run()
local deck = getObjectFromGUID("...")
local seen_card_by_player = get_cards_seen_by_players()
if seen_card_by_player == nil or next(seen_card_by_player) == nil then
return
end
local seen_card_id_by_player = {}
for player, card in pairs(seen_card_by_player) do
local card_id = card.guid
seen_card_id_by_player[player] = card_id
card.putObject(deck)
end
deck.randomize()
local card_ids = {}
for i, card_data in ipairs(deck.getObjects()) do
table.insert(card_ids, card_data.guid)
end
for player, seen_card_id in pairs(seen_card_id_by_player) do
local card_id = table.remove(card_ids)
if card_id == seen_card_id then
local i = math.random(1, #card_ids)
card_ids[i], card_id = card_id, card_ids[i]
end
deck.takeObject({
guid = card_ids[i],
position = player.getHandTransform().position,
flip = true,
})
end
end
Create a game with a deck of cards. Place the above code in Global, replacing ... with the deck's GUID. To run the demonstration, deal one card to any number of players, then use /execute Global.call("run") in the chat window.

How can I get the score from Question-Answer Pipeline? Is there a bug when Question-answer pipeline is used?

When I run the following code
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
import torch
tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad")
model = AutoModelForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad")
text = r"""
As checked Dis is not yet on boarded to ARB portal, hence we cannot upload the invoices in portal
"""
questions = [
"Dis asked if it is possible to post the two invoice in ARB.I have not access so I wanted to check if you would be able to do it.",
]
for question in questions:
inputs = tokenizer.encode_plus(question, text, add_special_tokens=True, return_tensors="pt")
input_ids = inputs["input_ids"].tolist()[0]
text_tokens = tokenizer.convert_ids_to_tokens(input_ids)
answer_start_scores, answer_end_scores = model(**inputs)
answer_start = torch.argmax(
answer_start_scores
) # Get the most likely beginning of answer with the argmax of the score
answer_end = torch.argmax(answer_end_scores) + 1 # Get the most likely end of answer with the argmax of the score
answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]))
print(f"Question: {question}")
print(f"Answer: {answer}\n")
The answer that I get here is:
Question: Dis asked if it is possible to post the two invoice in ARB.I have not access so I wanted to check if you would be able to do it.
Answer: dis is not yet on boarded to ARB portal
How do I get a score for this answer? Score here is very similar to what is I get when I run Question-Answer pipeline .
I have to take this approach since Question-Answer pipeline when used is giving me Key Error for the below code
from transformers import pipeline
nlp = pipeline("question-answering")
context = r"""
As checked Dis is not yet on boarded to ARB portal, hence we cannot upload the invoices in portal.
"""
print(nlp(question="Dis asked if it is possible to post the two invoice in ARB?", context=context))
This is my attempt to get the score. It appears that I cannot figure out what feature.p_mask. So I could not remove the non-context indexes that contribute to the softmax at the moment.
# ... assuming imports and question and context
model_name="deepset/roberta-base-squad2"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
inputs = tokenizer(question, context,
add_special_tokens=True,
return_tensors='pt')
input_ids = inputs['input_ids'].tolist()[0]
outputs = model(**inputs)
# used to compute score
start = outputs.start_logits.detach().numpy()
end = outputs.end_logits.detach().numpy()
# from source code
# Ensure padded tokens & question tokens cannot belong to the set of candidate answers.
#?? undesired_tokens = np.abs(np.array(feature.p_mask) - 1) & feature.attention_mask
# Generate mask
undesired_tokens = inputs['attention_mask']
undesired_tokens_mask = undesired_tokens == 0.0
# Make sure non-context indexes in the tensor cannot contribute to the softmax
start_ = np.where(undesired_tokens_mask, -10000.0, start)
end_ = np.where(undesired_tokens_mask, -10000.0, end)
# Normalize logits and spans to retrieve the answer
start_ = np.exp(start_ - np.log(np.sum(np.exp(start_), axis=-1, keepdims=True)))
end_ = np.exp(end_ - np.log(np.sum(np.exp(end_), axis=-1, keepdims=True)))
# Compute the score of each tuple(start, end) to be the real answer
outer = np.matmul(np.expand_dims(start_, -1), np.expand_dims(end_, 1))
# Remove candidate with end < start and end - start > max_answer_len
max_answer_len = 15
candidates = np.tril(np.triu(outer), max_answer_len - 1)
scores_flat = candidates.flatten()
idx_sort = [np.argmax(scores_flat)]
start, end = np.unravel_index(idx_sort, candidates.shape)[1:]
end += 1
score = candidates[0, start, end-1]
start, end, score = start.item(), end.item(), score.item()
print(tokenizer.decode(input_ids[start:end]))
print(score)
See more source code

You are given a list of names and a list of email addresses. How would you automatically assign the 'best' email?

I am having a bit of trouble in terms of runtime for an algorithm that matches names with the most likely email address. The function itself works well (in that it pairs the name and email address correctly), but the runtime is so grand that it is difficult to implement on large data sets. I am a beginner at coding and would love to hear what solutions you guys might offer.
quick note I implemented Levenshtein's algorithm here. If there are more efficient algorithms, comment below!
from string import digits
import copy
import re
# levenshtein algorithm found on https://www.python-course.eu/levenshtein_distance.php
def call_counter(func):
def helper(*args, **kwargs):
helper.calls += 1
return func(*args, **kwargs)
helper.calls = 0
helper.__name__= func.__name__
return helper
def memoize(func):
mem = {}
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in mem:
mem[key] = func(*args, **kwargs)
return mem[key]
return memoizer
#call_counter
#memoize
def levenshtein(s, t):
if s == "":
return len(t)
if t == "":
return len(s)
if s[-1] == t[-1]:
cost = 0
else:
cost = 1
res = min([levenshtein(s[:-1], t)+1,
levenshtein(s, t[:-1])+1,
levenshtein(s[:-1], t[:-1]) + cost])
return res
def emailmatch(emails_file,name_file):
name_email_match = {} #store the matching emails in a dictionary
with open(name_file, 'r') as names:
match_name = 0
for individual in names:
with open(emails_file,'r') as address_emails:
first_name = individual[:(individual.index(" "))].lower()
last_name = individual[(individual.rindex(" ")):].lower()
full_name = (first_name + last_name).lower()
full_name_period = (first_name+"."+last_name).lower()
best_match = "" #this holds the best matching email
minimum = 999
for emails in address_emails:
email = emails[0:(emails.index('#'))]
temp = min(levenshtein(last_name,email),
levenshtein(first_name,email),
levenshtein(full_name,email),
levenshtein(full_name_period,email))
if (temp < minimum):
minimum = temp
best_match = emails
name_email_match[individual] = best_match
return name_email_match
emailmatch('emails.txt', 'names.txt')

Resources