Storing dictionaries in json file - discord.py

I am trying to store dictionaries in a JSON file which user input. The code is:
#client.command()
async def shibaku0(ctx, coin1, coin2, coin3, coin4, coin5, coin6, shibakunumber, oslink):
await ctx.message.delete()
with open('Shibaku0.json', 'r') as f:
coins_data = json.load(f)
coins_data[str(ctx.author.id)]["coins"] = (coin1, coin2, coin3, coin4, coin5, coin6)
shibakunumber[str(ctx.author.id)]["shibakunumber"] = (shibakunumber)
oslink[str(ctx.author.id)]["oslink"] = (oslink)
with open('Shibaku0.json', 'w') as f:
json.dump(coins_data, f)
embed=discord.Embed(title="Shibaku0", url=f'{oslink}', description=f'No. {shibakunumber}')
embed.add_field(name="Coins: ", value=f'{coin1} {coin2} {coin3} {coin4} {coin5} {coin6}', inline=True)
embed.set_footer(text=f"{ctx.author.name}'s Shibaku")
await ctx.send(embed=embed)
I want to store the "coins", "shibakunumber, "oslink", but I am getting this error message when I try and run the code:
TypeError: list indices must be integers or slices, not str

To make a json that is an associative array (JSON object / dict). You need to have { or a single object within the file. Each object associates userid with the data for that user, i.e, a nested object.
Here's a sample :
import json
# sample_data/Obj.json
# {
# "user1": {"coins": "coinstringvalue", "shibakunumber": 1, "oslink": "linkstringvalue"},
# "user2": {"coins": "coinstringvalue2", "shibakunumber": 2, "oslink": "linkstringvalue2"}
# }
mydata = json.load(open('sample_data/Obj.json', 'r'))
print(mydata['user1'])
print(mydata['user2'])
print(mydata['user2']['shibakunumber'])
This gives the output:
{'coins': 'coinstringvalue', 'shibakunumber': 1, 'oslink': 'linkstringvalue'}
{'coins': 'coinstringvalue2', 'shibakunumber': 2, 'oslink': 'linkstringvalue2'}
2
Note that the mydata is a dict type in python.
Here's an example of starting with an empty json file.
import json
# sample_data/empty.json
# {}
mydata = json.load(open('sample_data/empty.json', 'r'))
print(mydata)
# author_id = str(ctx.author.id)
author_id = str(1161214)
if author_id not in mydata:
mydata[author_id] = dict()
mydata[author_id]['coins'] = ("coin1", "coin2")
mydata[author_id]['shibakunumber'] = 2
mydata[author_id]['oslink'] = "somelink"
json.dump(mydata, open('sample_data/new_file.json', 'w'))
loaded_data = json.load(open('sample_data/new_file.json', 'r'))
print(loaded_data)
Produces:
mydata {}
loaded_data {'1161214': {'coins': ['coin1', 'coin2'], 'shibakunumber': 2, 'oslink': 'somelink'}}

Related

JSR223 Assertion : Passing a custom key to JsonOutput.toJson()

I am doing a validation using the following code:
import groovy.json.JsonOutput
import groovy.json.JsonSlurper
String sourceTargetPassJson = "{\"Apple\":[\"Apple\",\"Orange\",\"Kiwi\",\"Avocado\"],\"Orange\":[\"Orange\",\"Kiwi\",\"Avocado\"]}";
def sourceVar = vars.get("source_type");
def targetVar = vars.get("target_type");
log.info("Source :" + sourceVar + ", Target :" + targetVar + ".");
def jsonSlurper = new JsonSlurper();
def response = jsonSlurper.parseText(sourceTargetPassJson);
def json = JsonOutput.toJson(response.sourceVar);
log.info("Response: " + json);
vars.put("json", json);
Here, the def json = JsonOutput.toJson(response.sourceVar); is failing because there is no key called sourceVar, which is obtained based on the Jmeter variable source_type. Given that the above assertion is in a loop controller, I want to validate based on the source_type obtained on each iteration.
Example :
Iteration 1, sourceVar = "Apple", json should have ["Apple","Orange","Kiwi","Avocado"].
Iteration 2, sourceVar = "Orange", json should have ["Orange","Kiwi","Avocado"].
How do I obtain the value for the variable json here based on the custom value of sourceVar?
The issue was simply resolved by the following change :
String json = response[sourceVar];
So, if the sourceVar is "Apple", the value of json obtained is [Apple,Orange,Kiwi,Avocado]

PyYAML loader with duplicate keys

Using PyYAML for loading a YAML (large) file which has duplicate keys. I would like to preserve all keys and would modify duplicate key according to project need. But it seems PyYAML is silently overwrites results with the last key and not getting a chance to modify it as my need (loss of information), resulting in this dict: {'blocks':{'a':'b2:11 c2:22'}}
simple example YAML:
import yaml
given_str = '''
blocks:
a:
b1:1
c1:2
a:
b2:11
c2:22'''
p = yaml.load(given_str)
How can I load the YAML with duplicate keys so that I get a chance to recursively traverse it and modify keys as my need. I need to load YAML and then transfer it into a database.
Assuming your input YAML has no merge keys ('<<'), no tags and no comments you want
to preserve, you can use the following:
import sys
import ruamel.yaml
from pathlib import Path
from collections.abc import Hashable
file_in = Path('input.yaml')
class MyConstructor(ruamel.yaml.constructor.SafeConstructor):
def construct_mapping(self, node, deep=False):
"""deep is True when creating an object/mapping recursively,
in that case want the underlying elements available during construction
"""
if not isinstance(node, ruamel.yaml.nodes.MappingNode):
raise ConstructorError(
None, None, f'expected a mapping node, but found {node.id!s}', node.start_mark,
)
total_mapping = self.yaml_base_dict_type()
if getattr(node, 'merge', None) is not None:
todo = [(node.merge, False), (node.value, False)]
else:
todo = [(node.value, True)]
for values, check in todo:
mapping: Dict[Any, Any] = self.yaml_base_dict_type()
for key_node, value_node in values:
# keys can be list -> deep
key = self.construct_object(key_node, deep=True)
# lists are not hashable, but tuples are
if not isinstance(key, Hashable):
if isinstance(key, list):
key = tuple(key)
if not isinstance(key, Hashable):
raise ConstructorError(
'while constructing a mapping',
node.start_mark,
'found unhashable key',
key_node.start_mark,
)
value = self.construct_object(value_node, deep=deep)
if key in mapping:
pat = key + '_undup_{}'
index = 0
while True:
nkey = pat.format(index)
if nkey not in mapping:
key = nkey
break
index += 1
mapping[key] = value
total_mapping.update(mapping)
return total_mapping
yaml = ruamel.yaml.YAML(typ='safe')
yaml.default_flow_style = False
yaml.Constructor = MyConstructor
data = yaml.load(file_in)
yaml.dump(data, sys.stdout)
which gives:
blocks:
a: b1:1 c1:2
a_undup_0: b2:11 c2:22
Please note that the values for both a keys are multiline plain scalars. For b1 and c1 to be a key
the mapping value indicator (:, the colon) needs to be followed by a whitespace character:
a:
b1: 1
c1: 2
After reading many forums, I think best solution is create a wrapper for yml loader (removing duplicates) is the solution. #Anthon - any comment?
import yaml
from collections import defaultdict, Counter
####### Preserving Duplicate ###################
def parse_preserving_duplicates(input_file):
class PreserveDuplicatesLoader(yaml.CLoader):
pass
def map_constructor(loader, node, deep=False):
"""Walk tree, removing degeneracy in any duplicate keys"""
keys = [loader.construct_object(node, deep=deep) for node, _ in node.value]
vals = [loader.construct_object(node, deep=deep) for _, node in node.value]
key_count = Counter(keys)
data = defaultdict(dict) # map all data removing duplicates
c = 0
for key, value in zip(keys, vals):
if key_count[key] > 1:
data[f'{key}{c}'] = value
c += 1
else:
data[key] = value
return data
PreserveDuplicatesLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
map_constructor)
return yaml.load(input_file, PreserveDuplicatesLoader)
##########################################################
with open(inputf, 'r') as file:
fp = parse_preserving_duplicates(input_file)

Discord.py json file prints all information instead of only the one being asked for

I have code which enables the user to view a specific item in their list which is stored in a json file. But it prints everything from the json file. This is the code for the command:
#client.command(aliases = ["shib, shibaku, Shib"])
async def Shibaku(ctx, int = 0):
if int == 1:
with open('Shibaku1.json', 'r') as f:
coins_data = json.load(f)
for oslink in coins_data[str(ctx.author.id)]:
await ctx.send(f'{oslink}')
else:
await ctx.send("Empty Slot")
7 items are stored in the json and they all get printed:
{
"331971067788787733": [
"\ud83d\ude04",
"\ud83d\ude02",
"\ud83d\ude00",
"\ud83d\ude06",
"\ud83d\ude18",
"\ud83d\ude1d",
"100",
"https://opensea.io/assets/0xb70b759ad676b227a01f7d406e2dc9c67103aaeb/908"
]
}
I only need to print the link at the end
If you want to send only laast element of the list use coins_data[str(ctx.author.id)][-1]:
#client.command(aliases = ["shib, shibaku, Shib"])
async def Shibaku(ctx, int = 0):
if int == 1:
with open('Shibaku1.json', 'r') as f:
coins_data = json.load(f)
await ctx.send(coins_data[str(ctx.author.id)][-1])
else:
await ctx.send("Empty Slot")

Get position of token in berts output layer

We are interested in the bert vectors for each token. With bert vector we mean the word vector for a specific token in berts output layer. So we would like to find out which token produces which bert vector. We wrote some code but we are not sure if it is correct or how to test it.
So in the code we process a sentence with bert. We construct a list of position ids and hand them to the model. Afterwards we use the same position ids to map the tokens to the output layer. Then there is some code that produces calculates the character offsets of each vector in the input sentence.
Is this the correct way how to use position_ids to generate
from transformers import BertModel, BertConfig, BertTokenizer
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
def sentence_to_vector(input_sentence):
tokens_encoded = tokenizer.encode(input_sentence, add_special_tokens=True)
input_ids = torch.tensor(tokens_encoded).unsqueeze(0) # Batch size 1
seq_length = input_ids.size(1)
# code to construct position_ids from here:
# https://github.com/huggingface/transformers/blob/8da280ebbeca5ebd7561fd05af78c65df9161f92/pytorch_pretrained_bert/modeling.py#L188:L189
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
outputs = model(input_ids, position_ids=position_ids)
tokens = tokenizer.convert_ids_to_tokens(input_ids[0])
# from the BertModel documentation (example at the bottom):
# The last hidden-state is the first element of the output tuple
# https://huggingface.co/transformers/model_doc/bert.html#transformers.BertModel
#ttv = {} # token to vector
#for i in position_ids[0]:
# ttv[tokens[i]] = outputs[0][0][position_ids[0][i]]
data = []
last_offset = 0
for i in range(0, len(position_ids[0])):
token = tokens[position_ids[0][i]]
vector = outputs[0][0][position_ids[0][i]]
pos_begin = None
pos_end = None
if not token == "[CLS]" and not token == "[SEP]":
pos_begin = input_sentence.find(token, last_offset)
pos_end = pos_begin + len(token)
last_offset = pos_end
data.append({
"token": token,
"pos_begin": pos_begin,
"pos_end": pos_end,
"vector": vector
})
return data
input_sentence = "do the chicken dance!"
data = sentence_to_vector(input_sentence)
for token in data:
print(token["token"] + "\t" + str(token["pos_begin"]) + "\t" + str(token["pos_end"]) + "\t" + str(token["vector"][0:3]) + "..." )

How to automatically turn BibTex citation into something parseable by Zotero?

I have a citation system which publishes users notes to a wiki (Researchr). Programmatically, I have access to the full BibTeX record of each entry, and I also display this on the individual pages (for example - click on BibTeX). This is in the interest of making it easy for users of other citation manager to automatically import the citation of a paper that interests them. I would also like other citation managers, especially Zotero, to be able to automatically detect and import a citation.
Zotero lists a number of ways of exposing metadata that it will understand, including meta tags with RDF, COiNS, Dublin Core and unAPI. Is there a Ruby library for converting BibTeX to any of these standards automatically - or a Javascript library? I could probably create something, but if something existed, it would be far more robust (BibTeX has so many publication types and fields etc).
There's a BibTeX2RDF convertor available here, might be what you're after.
unAPI is not a data standard - it's a way to serve data (to Zotero and other programs). Zotero imports Bibtex, so serving Bibtex via unAPI works just fine. Inspire is an example of a site that does that:
http://inspirehep.net/
By now one can simply import bibtex files of type .bib directly in Zotero. However, I noticed my bibtex files were often less complete than Zotero (in particular they often missed a DOI), and I did not find an "auto-complete" function (based on the data in the bibtex entries) in Zotero.
So I import the .bib file with Zotero, to ensure they are all in there. Then I run a python script that gets all the missing DOI's it can find for the entries in that .bib file, and exports them to a space separated .txt file.:
# pip install habanero
from habanero import Crossref
import re
def titletodoi(keyword):
cr = Crossref()
result = cr.works(query=keyword)
items = result["message"]["items"]
item_title = items[0]["title"]
tmp = ""
for it in item_title:
tmp += it
title = keyword.replace(" ", "").lower()
title = re.sub(r"\W", "", title)
# print('title: ' + title)
tmp = tmp.replace(" ", "").lower()
tmp = re.sub(r"\W", "", tmp)
# print('tmp: ' + tmp)
if title == tmp:
doi = items[0]["DOI"]
return doi
else:
return None
def get_dois(titles):
dois = []
for title in titles:
try:
doi = titletodoi(title)
print(f"doi={doi}, title={title}")
if not doi is None:
dois.append(doi)
except:
pass
# print("An exception occurred")
print(f"dois={dois}")
return dois
def read_titles_from_file(filepath):
with open(filepath) as f:
lines = f.read().splitlines()
split_lines = splits_lines(lines)
return split_lines
def splits_lines(lines):
split_lines = []
for line in lines:
new_lines = line.split(";")
for new_line in new_lines:
split_lines.append(new_line)
return split_lines
def write_dois_to_file(dois, filename, separation_char):
textfile = open(filename, "w")
for doi in dois:
textfile.write(doi + separation_char)
textfile.close()
filepath = "list_of_titles.txt"
titles = read_titles_from_file(filepath)
dois = get_dois(titles)
write_dois_to_file(dois, "dois_space.txt", " ")
write_dois_to_file(dois, "dois_per_line.txt", "\n")
The DOIs of the .txt are fed into magic wand of Zotero. Next, I (manually) remove the duplicates by choosing the latest added entry (because that comes from the magic wand with the most data).
After that, I run another script to update all the reference id's in my .tex and .bib files to those generated by Zotero:
# Importing library
import bibtexparser
from bibtexparser.bparser import BibTexParser
from bibtexparser.customization import *
import os, fnmatch
import Levenshtein as lev
# Let's define a function to customize our entries.
# It takes a record and return this record.
def customizations(record):
"""Use some functions delivered by the library
:param record: a record
:returns: -- customized record
"""
record = type(record)
record = author(record)
record = editor(record)
record = journal(record)
record = keyword(record)
record = link(record)
record = page_double_hyphen(record)
record = doi(record)
return record
def get_references(filepath):
with open(filepath) as bibtex_file:
parser = BibTexParser()
parser.customization = customizations
bib_database = bibtexparser.load(bibtex_file, parser=parser)
# print(bib_database.entries)
return bib_database
def get_reference_mapping(main_filepath, sub_filepath):
found_sub = []
found_main = []
main_into_sub = []
main_references = get_references(main_filepath)
sub_references = get_references(sub_filepath)
for main_entry in main_references.entries:
for sub_entry in sub_references.entries:
# Match the reference ID if 85% similair titles are detected
lev_ratio = lev.ratio(
remove_curly_braces(main_entry["title"]).lower(),
remove_curly_braces(sub_entry["title"]).lower(),
)
if lev_ratio > 0.85:
print(f"lev_ratio={lev_ratio}")
if main_entry["ID"] != sub_entry["ID"]:
print(f'replace: {sub_entry["ID"]} with: {main_entry["ID"]}')
main_into_sub.append([main_entry, sub_entry])
# Keep track of which entries have been found
found_sub.append(sub_entry)
found_main.append(main_entry)
return (
main_into_sub,
found_main,
found_sub,
main_references.entries,
sub_references.entries,
)
def remove_curly_braces(string):
left = string.replace("{", "")
right = left.replace("{", "")
return right
def replace_references(main_into_sub, directory):
for pair in main_into_sub:
main = pair[0]["ID"]
sub = pair[1]["ID"]
print(f"replace: {sub} with: {main}")
# UNCOMMENT IF YOU WANT TO ACTUALLY DO THE PRINTED REPLACEMENT
# findReplace(latex_root_dir, sub, main, "*.tex")
# findReplace(latex_root_dir, sub, main, "*.bib")
def findReplace(directory, find, replace, filePattern):
for path, dirs, files in os.walk(os.path.abspath(directory)):
for filename in fnmatch.filter(files, filePattern):
filepath = os.path.join(path, filename)
with open(filepath) as f:
s = f.read()
s = s.replace(find, replace)
with open(filepath, "w") as f:
f.write(s)
def list_missing(main_references, sub_references):
for sub in sub_references:
if not sub["ID"] in list(map(lambda x: x["ID"], main_references)):
print(f'the following reference has a changed title:{sub["ID"]}')
latex_root_dir = "some_path/"
main_filepath = f"{latex_root_dir}latex/Literature_study/zotero.bib"
sub_filepath = f"{latex_root_dir}latex/Literature_study/references.bib"
(
main_into_sub,
found_main,
found_sub,
main_references,
sub_references,
) = get_reference_mapping(main_filepath, sub_filepath)
replace_references(main_into_sub, latex_root_dir)
list_missing(main_references, sub_references)
# For those references which have levenshtein ratio below 85 you can specify a manual swap:
manual_swap = [] # main into sub
# manual_swap.append(["cantley_impact_2021","cantley2021impact"])
# manual_swap.append(["widemann_envision_2021","widemann2020envision"])
for pair in manual_swap:
main = pair[0]
sub = pair[1]
print(f"replace: {sub} with: {main}")
# UNCOMMENT IF YOU WANT TO ACTUALLY DO THE PRINTED REPLACEMENT
# findReplace(latex_root_dir, sub, main, "*.tex")
# findReplace(latex_root_dir, sub, main, "*.bib")

Resources