creating nested dictionary from flat list with python - algorithm

i have a list of file in this form:
base/images/graphs/one.png
base/images/tikz/two.png
base/refs/images/three.png
base/one.txt
base/chapters/two.txt
i would like to convert them to a nested dictionary of this sort:
{ "name": "base" , "contents":
[{"name": "images" , "contents":
[{"name": "graphs", "contents":[{"name":"one.png"}] },
{"name":"tikz", "contents":[{"name":"two.png"}]}
]
},
{"name": "refs", "contents":
[{"name":"images", "contents": [{"name":"three.png"}]}]
},
{"name":"one.txt", },
{"name": "chapters", "contents":[{"name":"two.txt"}]
]
}
trouble is, my attempted solution, given some input like images/datasetone/grapha.png" ,"images/datasetone/graphb.png" each one of them will end up in a different dictionary named "datasetone" however i'd like both to be in the same parent dictionary as they are in the same directory, how do i create this nested structure without duplicating parent dictionaries when there's more than one file in a common path?
here is what i had come up with and failed:
def path_to_tree(params):
start = {}
for item in params:
parts = item.split('/')
depth = len(parts)
if depth > 1:
if "contents" in start.keys():
start["contents"].append(create_base_dir(parts[0],parts[1:]))
else:
start ["contents"] = [create_base_dir(parts[0],parts[1:]) ]
else:
if "contents" in start.keys():
start["contents"].append(create_leaf(parts[0]))
else:
start["contents"] =[ create_leaf(parts[0]) ]
return start
def create_base_dir(base, parts):
l={}
if len(parts) >=1:
l["name"] = base
l["contents"] = [ create_base_dir(parts[0],parts[1:]) ]
elif len(parts)==0:
l = create_leaf(base)
return l
def create_leaf(base):
l={}
l["name"] = base
return l
b=["base/images/graphs/one.png","base/images/graphs/oneb.png","base/images/tikz/two.png","base/refs/images/three.png","base/one.txt","base/chapters/two.txt"]
d =path_to_tree(b)
from pprint import pprint
pprint(d)
In this example you can see we end up with as many dictionaries named "base" as there are files in the list, but only one is necessary, the subdirectories should be listed in the "contents" array.

This does not assume that all paths start with the same thing, so we need a list for it:
from pprint import pprint
def addBits2Tree( bits, tree ):
if len(bits) == 1:
tree.append( {'name':bits[0]} )
else:
for t in tree:
if t['name']==bits[0]:
addBits2Tree( bits[1:], t['contents'] )
return
newTree = []
addBits2Tree( bits[1:], newTree )
t = {'name':bits[0], 'contents':newTree}
tree.append( t )
def addPath2Tree( path, tree ):
bits = path.split("/")
addBits2Tree( bits, tree )
tree = []
for p in b:
print p
addPath2Tree( p, tree )
pprint(tree)
Which produces the following for your example path list:
[{'contents': [{'contents': [{'contents': [{'name': 'one.png'},
{'name': 'oneb.png'}],
'name': 'graphs'},
{'contents': [{'name': 'two.png'}],
'name': 'tikz'}],
'name': 'images'},
{'contents': [{'contents': [{'name': 'three.png'}],
'name': 'images'}],
'name': 'refs'},
{'name': 'one.txt'},
{'contents': [{'name': 'two.txt'}], 'name': 'chapters'}],
'name': 'base'}]

Omitting the redundant name tags, you can go on with :
import json
result = {}
records = ["base/images/graphs/one.png", "base/images/tikz/two.png",
"base/refs/images/three.png", "base/one.txt", "base/chapters/two.txt"]
recordsSplit = map(lambda x: x.split("/"), records)
for record in recordsSplit:
here = result
for item in record[:-1]:
if not item in here:
here[item] = {}
here = here[item]
if "###content###" not in here:
here["###content###"] = []
here["###content###"].append(record[-1])
print json.dumps(result, indent=4)
The # characters are used for uniqueness (there could be a folder which name was content in the hierarchy). Just run it and see the result.
EDIT : Fixed a few typos, added the output.

Related

PyYAML loader with duplicate keys

Using PyYAML for loading a YAML (large) file which has duplicate keys. I would like to preserve all keys and would modify duplicate key according to project need. But it seems PyYAML is silently overwrites results with the last key and not getting a chance to modify it as my need (loss of information), resulting in this dict: {'blocks':{'a':'b2:11 c2:22'}}
simple example YAML:
import yaml
given_str = '''
blocks:
a:
b1:1
c1:2
a:
b2:11
c2:22'''
p = yaml.load(given_str)
How can I load the YAML with duplicate keys so that I get a chance to recursively traverse it and modify keys as my need. I need to load YAML and then transfer it into a database.
Assuming your input YAML has no merge keys ('<<'), no tags and no comments you want
to preserve, you can use the following:
import sys
import ruamel.yaml
from pathlib import Path
from collections.abc import Hashable
file_in = Path('input.yaml')
class MyConstructor(ruamel.yaml.constructor.SafeConstructor):
def construct_mapping(self, node, deep=False):
"""deep is True when creating an object/mapping recursively,
in that case want the underlying elements available during construction
"""
if not isinstance(node, ruamel.yaml.nodes.MappingNode):
raise ConstructorError(
None, None, f'expected a mapping node, but found {node.id!s}', node.start_mark,
)
total_mapping = self.yaml_base_dict_type()
if getattr(node, 'merge', None) is not None:
todo = [(node.merge, False), (node.value, False)]
else:
todo = [(node.value, True)]
for values, check in todo:
mapping: Dict[Any, Any] = self.yaml_base_dict_type()
for key_node, value_node in values:
# keys can be list -> deep
key = self.construct_object(key_node, deep=True)
# lists are not hashable, but tuples are
if not isinstance(key, Hashable):
if isinstance(key, list):
key = tuple(key)
if not isinstance(key, Hashable):
raise ConstructorError(
'while constructing a mapping',
node.start_mark,
'found unhashable key',
key_node.start_mark,
)
value = self.construct_object(value_node, deep=deep)
if key in mapping:
pat = key + '_undup_{}'
index = 0
while True:
nkey = pat.format(index)
if nkey not in mapping:
key = nkey
break
index += 1
mapping[key] = value
total_mapping.update(mapping)
return total_mapping
yaml = ruamel.yaml.YAML(typ='safe')
yaml.default_flow_style = False
yaml.Constructor = MyConstructor
data = yaml.load(file_in)
yaml.dump(data, sys.stdout)
which gives:
blocks:
a: b1:1 c1:2
a_undup_0: b2:11 c2:22
Please note that the values for both a keys are multiline plain scalars. For b1 and c1 to be a key
the mapping value indicator (:, the colon) needs to be followed by a whitespace character:
a:
b1: 1
c1: 2
After reading many forums, I think best solution is create a wrapper for yml loader (removing duplicates) is the solution. #Anthon - any comment?
import yaml
from collections import defaultdict, Counter
####### Preserving Duplicate ###################
def parse_preserving_duplicates(input_file):
class PreserveDuplicatesLoader(yaml.CLoader):
pass
def map_constructor(loader, node, deep=False):
"""Walk tree, removing degeneracy in any duplicate keys"""
keys = [loader.construct_object(node, deep=deep) for node, _ in node.value]
vals = [loader.construct_object(node, deep=deep) for _, node in node.value]
key_count = Counter(keys)
data = defaultdict(dict) # map all data removing duplicates
c = 0
for key, value in zip(keys, vals):
if key_count[key] > 1:
data[f'{key}{c}'] = value
c += 1
else:
data[key] = value
return data
PreserveDuplicatesLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
map_constructor)
return yaml.load(input_file, PreserveDuplicatesLoader)
##########################################################
with open(inputf, 'r') as file:
fp = parse_preserving_duplicates(input_file)

Storing dictionaries in json file

I am trying to store dictionaries in a JSON file which user input. The code is:
#client.command()
async def shibaku0(ctx, coin1, coin2, coin3, coin4, coin5, coin6, shibakunumber, oslink):
await ctx.message.delete()
with open('Shibaku0.json', 'r') as f:
coins_data = json.load(f)
coins_data[str(ctx.author.id)]["coins"] = (coin1, coin2, coin3, coin4, coin5, coin6)
shibakunumber[str(ctx.author.id)]["shibakunumber"] = (shibakunumber)
oslink[str(ctx.author.id)]["oslink"] = (oslink)
with open('Shibaku0.json', 'w') as f:
json.dump(coins_data, f)
embed=discord.Embed(title="Shibaku0", url=f'{oslink}', description=f'No. {shibakunumber}')
embed.add_field(name="Coins: ", value=f'{coin1} {coin2} {coin3} {coin4} {coin5} {coin6}', inline=True)
embed.set_footer(text=f"{ctx.author.name}'s Shibaku")
await ctx.send(embed=embed)
I want to store the "coins", "shibakunumber, "oslink", but I am getting this error message when I try and run the code:
TypeError: list indices must be integers or slices, not str
To make a json that is an associative array (JSON object / dict). You need to have { or a single object within the file. Each object associates userid with the data for that user, i.e, a nested object.
Here's a sample :
import json
# sample_data/Obj.json
# {
# "user1": {"coins": "coinstringvalue", "shibakunumber": 1, "oslink": "linkstringvalue"},
# "user2": {"coins": "coinstringvalue2", "shibakunumber": 2, "oslink": "linkstringvalue2"}
# }
mydata = json.load(open('sample_data/Obj.json', 'r'))
print(mydata['user1'])
print(mydata['user2'])
print(mydata['user2']['shibakunumber'])
This gives the output:
{'coins': 'coinstringvalue', 'shibakunumber': 1, 'oslink': 'linkstringvalue'}
{'coins': 'coinstringvalue2', 'shibakunumber': 2, 'oslink': 'linkstringvalue2'}
2
Note that the mydata is a dict type in python.
Here's an example of starting with an empty json file.
import json
# sample_data/empty.json
# {}
mydata = json.load(open('sample_data/empty.json', 'r'))
print(mydata)
# author_id = str(ctx.author.id)
author_id = str(1161214)
if author_id not in mydata:
mydata[author_id] = dict()
mydata[author_id]['coins'] = ("coin1", "coin2")
mydata[author_id]['shibakunumber'] = 2
mydata[author_id]['oslink'] = "somelink"
json.dump(mydata, open('sample_data/new_file.json', 'w'))
loaded_data = json.load(open('sample_data/new_file.json', 'r'))
print(loaded_data)
Produces:
mydata {}
loaded_data {'1161214': {'coins': ['coin1', 'coin2'], 'shibakunumber': 2, 'oslink': 'somelink'}}

Transform a list of files (JSON) to a dataframe

Spark Version: '2.0.0.2.5.0.0-1245'
So, my original question changed a bit but it's still the same issue.
What I want to do is load a huge amount of JSON files and transform those to a DataFrame - also probably save them as CSV or parquet file for further processing. Each JSON file represents one row in the final DataFrame.
import os
import glob
HDFS_MOUNT = # ...
DATA_SET_BASE = # ...
schema = StructType([
StructField("documentId", StringType(), True),
StructField("group", StringType(), True),
StructField("text", StringType(), True)
])
# Get the file paths
file_paths = glob.glob(os.path.join(HDFS_MOUNT, DATA_SET_BASE, '**/*.json'))
file_paths = [f.replace(HDFS_MOUNT + '/', '') for f in file_paths]
print('Found {:d} files'.format(len(file_paths))) # 676 files
sql = SQLContext(sc)
df = sql.read.json(file_paths, schema=schema)
print('Loaded {:d} rows'.format(df.count())) # 9660 rows (what !?)
Besides the fact that there are 9660 rows instead of 676 (number of available files) I also have the problem that the content seems to be None:
df.head(2)[0].asDict()
gives
{
'documentId': None,
'group': None,
'text': None,
}
Example Data
This is just fake data of course but it resembles the actual data.
Note: Some fields may be missing e.g. text must not always be present.
a.json
{
"documentId" : "001",
"group" : "A",
"category" : "indexed_document",
"linkIDs": ["adiojer", "asdi555", "1337"]
}
b.json
{
"documentId" : "002",
"group" : "B",
"category" : "indexed_document",
"linkIDs": ["linkId", "1000"],
"text": "This is the text of this document"
}
assuming that all your files has the same structure and are in the same directory:
df = sql_cntx.read.json('/hdfs/path/to/folder/*.json')
There might be a problem if any of the columns has Null values for all rows. Then spark will not be able to determine schema, so you have an option to tell spark which schema to use:
from pyspark import SparkContext, SQLContext
from pyspark.sql.types import StructType, StructField, StringType, LongType
sc = SparkContext(appName="My app")
sql_cntx = SQLContext(sc)
schema = StructType([
StructField("field1", StringType(), True),
StructField("field2", LongType(), True)
])
df = sql_cntx.read.json('/hdfs/path/to/folder/*.json', schema=schema)
UPD:
in case if file has multirows formatted json you can try this code:
sc = SparkContext(appName='Test')
sql_context = SQLContext(sc)
rdd = sc.wholeTextFiles('/tmp/test/*.json').values()
df = sql_context.read.json(rdd, schema=schema)
df.show()

Count number of dictionarys in dictionary in swift

I have a buch of accounts stored in a string dictionary and i would like to count the number of accounts existing, so basicly a ".count" but to find the number of dictionaries created.
var dictionary: [String : [ String ]] = ["" : []]
let storeString = "StoreString"
func addUpdateArray(strings: [String], index: Int) {
let locator = storeString + index.description
dictionary[locator] = strings
}
addUpdateArray(["Account1", "Hy"], 1)
addUpdateArray(["Account2", "Hey"], 3)
and now I would like to see how many accounts are have created of the kind dictionary, is ther a way?
Something like this?
var accounts = [String:[String:String]]() // or whatever your structure is
accounts["Edmund"] = [
"amount": "23.87",
"curreny": "dollars"
]
accounts["Baldrick"] = [
"amount": "23.87",
"curreny": "dollars"
]
accounts["Percy"] = [
"amount": "87.00",
"curreny": "peso"
]
println(accounts.keys.array.count) // 3
If you have dictionary of dictionaries and you want to count the number of actual values inside, you can do it like this:
var accounts = [
"accountsGroup1" : ["account1", "account2", "account3", "account4"],
"accountsGroup2" : ["account1", "account2"],
"accountsGroup3" : ["account1", "account2", "account3", "account4"]
]
let accountsCount = accounts.values.map { $0.count }
let numberOfAllAccounts = reduce(accountsCount, 0) { $0 + $1 }
println(numberOfAllAccounts)

How to compute "shortest distance" between two words?

Recently I had an interview and I was asked to write a algorithm to find the minimum number of 1 letter changes to get from a particular of word to a given word , i.e. Cat->Cot->Cog->Dog
I dont want the solution of the problem just guide me through How I can use BFS in this algorithm ?
according to this scrabble list, the shortest path between cat and dog is:
['CAT', 'COT', 'COG', 'DOG']
from urllib import urlopen
def get_words():
try:
html = open('three_letter_words.txt').read()
except IOError:
html = urlopen('http://www.yak.net/kablooey/scrabble/3letterwords.html').read()
with open('three_letter_words.txt', 'w') as f:
f.write(html)
b = html.find('<PRE>') #ignore the html before the <pre>
while True:
a = html.find("<B>", b) + 3
b = html.find("</B>", a)
word = html[a: b]
if word == "ZZZ":
break
assert(len(word) == 3)
yield word
words = list(get_words())
def get_template(word):
c1, c2, c3 = word[0], word[1], word[2]
t1 = 1, c1, c2
t2 = 2, c1, c3
t3 = 3, c2, c3
return t1, t2, t3
d = {}
for word in words:
template = get_template(word)
for ti in template:
d[ti] = d.get(ti, []) + [word] #add the word to the set of words with that template
for ti in get_template('COG'):
print d[ti]
#['COB', 'COD', 'COG', 'COL', 'CON', 'COO', 'COO', 'COP', 'COR', 'COS', 'COT', 'COW', 'COX', 'COY', 'COZ']
#['CIG', 'COG']
# ['BOG', 'COG', 'DOG', 'FOG', 'HOG', 'JOG', 'LOG', 'MOG', 'NOG', 'TOG', 'WOG']
import networkx
G = networkx.Graph()
for word_list in d.values():
for word1 in word_list:
for word2 in word_list:
if word1 != word2:
G.add_edge(word1, word2)
print G['COG']
#{'COP': {}, 'COS': {}, 'COR': {}, 'CIG': {}, 'COT': {}, 'COW': {}, 'COY': {}, 'COX': {}, 'COZ': {}, 'DOG': {}, 'CON': {}, 'COB': {}, 'COD': {}, 'COL': {}, 'COO': {}, 'LOG': {}, 'TOG': {}, 'JOG': {}, 'BOG': {}, 'HOG': {}, 'FOG': {}, 'WOG': {}, 'NOG': {}, 'MOG': {}}
print networkx.shortest_path(G, 'CAT', 'DOG')
['CAT', 'OCA', 'DOC', 'DOG']
As a bonus we can get the farthest:
print max(networkx.all_pairs_shortest_path(G, 'CAT')['CAT'].values(), key=len)
#['CAT', 'CAP', 'YAP', 'YUP', 'YUK']
At first sight I thaught about Levenshtein distance but you need to use BFS. So I think that you should start from building tree. Given word should be root and then next nodes are words with changed first letter. Next next nodes have changed second letter. When you build the graph you use BFS and when you found new word store the path length. At the end of algorithm choose minimal distance.
Begin with just the starting word in your path set.
If the ending word of any path in your path set is the desired word, stop, that path is the desired path.
Replace each path in your path set with every possible path that starts with that path but is one word longer.
Go to step 2.
If we start to build a directed acyclic graph from the destination word to the source word, in a breadth-wise fashion, and we do a dictionary look-up to verify if we have seen the word earlier in the tree while adding the word, then the first occurrence of the source word,should give the shortest path in the reverse direction from the 'target word' to the 'source word'.
From this we can print the path from the 'source' to the 'target'

Resources