PyMC: Directly changing an object's name doesn't apply when pulling out traces - pymc

Here is a bare bit of code which produces an error:
import pymc
import numpy as np
a = pymc.Normal('a', 1, 1)
b = np.empty(4, dtype=object)
for i in range(4):
b[i] = 1*a
b[i].__name__ = 'b_%i'%i
M = pymc.MCMC([a,b])
M.sample(10)
M.trace('b_0') # Causes a KeyError:'b_0'
I don't understand why I get a KeyError: 'b_0' when I try to extract the trace of b_0 and all the other b's. Are the traces just not being saved? If so, is there a way to directly flick some switch to change that without having to make the object using #deterministic.

I looked through it, apparently the trace wasn't being saved. Also, the "flag variable" for keeping the trace isn't .trace, it's .keep_trace

Related

How can I export layered drawings from drawio to create "animated" slides in beamer?

When preparing lectures, or conference presentations with beamer, I usually use layered drawings. Then for graphics included in consecutive slides ("frames" in beamer), I simply use different sets of layers.
For graphics created in IPE, I have created a dedicated expallviews.lua script.
Unfortunately, for graphics created with diagrams.net locally run as drawio-desktop, no such automated export of various layers exists. The only way is to manually select the visible layers in GUI and then export consecutive drawings to a set of PDF files.
Is there a more convenient method to solve that problem?
The described problem has been reported in issues 405 and 737 in the drawio-desktop repository.
After reviewing those issues, I have found a method based on automated (instead of a manual via GUI) changing the visibility of layers and exporting such drawings to the set of PDF files. The proposed method is described in the comment to the issue 405. It uses a simple Python script:
#!/usr/bin/python3
"""
This script modifies the visibility of layers in the XML
file with diagram generated by drawio.
It works around the problem of lack of a possibility to export
only the selected layers from the CLI version of drawio.
Written by Wojciech M. Zabolotny 6.10.2022
(wzab01<at>gmail.com or wojciech.zabolotny<at>pw.edu.pl)
The code is published under LGPL V2 license
"""
from lxml import etree as let
import xml.etree.ElementTree as et
import xml.parsers.expat as pe
from io import StringIO
import os
import sys
import shutil
import zlib
import argparse
PARSER = argparse.ArgumentParser()
PARSER.add_argument("--layers", help="Selected layers, \"all\", comma separated list of integers or integer ranges like \"0-3,6,7\"", default="all")
PARSER.add_argument("--layer_prefix", help="Layer name prefix", default="Layer_")
PARSER.add_argument("--outfile", help="Output file", default="output.drawio")
PARSER.add_argument("--infile", help="Input file", default="input.drawio")
ARGS = PARSER.parse_args()
INFILENAME = ARGS.infile
OUTFILENAME = ARGS.outfile
# Find all elements with 'value' starting with the layer prefix.
# Return tuples with the element and the rest of 'value' after the prefix.
def find_layers(el_start):
res = []
for el in el_start:
val = el.get('value')
if val is not None:
if val.find(ARGS.layer_prefix) == 0:
# This is a layer element. Add it, and its name
# after the prefix to the list.
res.append((el,val[len(ARGS.layer_prefix):]))
continue
# If it is not a layer element, scan its children
res.extend(find_layers(el))
return res
# Analyse the list of visible layers, and create the list
# of layers that should be visible. Customize this part
# if you want a more sophisticate method for selection
# of layers.
# Now only "all", comma separated list of integers
# or ranges of integers are supported.
def build_visible_list(layers):
if layers == "all":
return layers
res = []
for lay in layers.split(','):
# Is it a range?
s = lay.find("-")
if s > 0:
# This is a range
first = int(lay[:s])
last = int(lay[(s+1):])
res.extend(range(first,last+1))
else:
res.append(int(lay))
return res
def is_visible(layer_tuple,visible_list):
if visible_list == "all":
return True
if int(layer_tuple[1]) in visible_list:
return True
try:
EL_ROOT = et.fromstring(open(INFILENAME,"r").read())
except et.ParseError as perr:
# Handle the parsing error
ROW, COL = perr.position
print(
"Parsing error "
+ str(perr.code)
+ "("
+ pe.ErrorString(perr.code)
+ ") in column "
+ str(COL)
+ " of the line "
+ str(ROW)
+ " of the file "
+ INFILENAME
)
sys.exit(1)
visible_list = build_visible_list(ARGS.layers)
layers = find_layers(EL_ROOT)
for layer_tuple in layers:
if is_visible(layer_tuple,visible_list):
print("set "+layer_tuple[1]+" to visible")
layer_tuple[0].attrib['visible']="1"
else:
print("set "+layer_tuple[1]+" to invisible")
layer_tuple[0].attrib['visible']="0"
# Now write the modified file
t=et.ElementTree(EL_ROOT)
with open(OUTFILENAME, 'w') as f:
t.write(f, encoding='unicode')
The maintained version of that script, together with a demonstration of its use is also available in my github repository.

Losing the crs when writing to .gpkg with geopandas

When I write my .gpkg I am losing the CRS. I have tried setting the CRS with .set_crs, or adding the CRS when writing the .gpkg (which creates a fault - "fiona._env - WARNING - dataset filename.gpkg does not support layer creation option EPSG"
My code
for layername in fiona.listlayers(file):
vector = geopandas.read_file(file, layer=layername)
vector.set_crs(4326)
vector.to_file(filename + ".gpkg", layer = layername, driver='GPKG')
or
for layername in fiona.listlayers(file):
vector = geopandas.read_file(file, layer=layername)
vector.to_file(filename + ".gpkg", layer = layername, driver='GPKG', epsg=4326)
neither works.
vector.set_crs(4326) does not work in place by default. You either need to assign it or specify inplace=True.
for layername in fiona.listlayers(file):
vector = geopandas.read_file(file, layer=layername)
# vector.set_crs(4326, inplace=True) # one option
vector = vector.set_crs(4326) # other option
vector.to_file(filename + ".gpkg", layer = layername, driver='GPKG')
Your second attempt does not work because to_file does not have espg keyword you are trying to lose and that gets lost among arguments passed to Fiona and GDAL (which silently ignores it).

Is there any good way to rewrite the edgetpu old code by using pycoral api?

I'm a beginner using coral devboard mini.
I want to start a Smart Bird Feeder project.
https://coral.ai/projects/bird-feeder/
I've been trying to execute the code by referring to
I can't run bird_classify.py.
The error is as follows
untimeError: Internal: Unsupported data type in custom op handler: 0Node number 0 (edgetpu-custom-op) failed to prepare.
Originally, the samples in this project seemed to be deprecated, and
The edgetpu requires an old runtimeversion of 13, instead of the current 14.
(tflite is 2.5 ) I have downloaded it directly and re-installed it in
/usr/lib/python3/dist-packagesm
, but I cannot uninstall the new version and cannot match the version.
Is there a better way to do this?
Also, I've decided to give up on running the same environment as the sample, and use the pycoralapi to run the
If there is a good way to rewrite the code to use pycoral, please let me know.
Thanks
#!/usr/bin/python3
"""
Coral Smart Bird Feeder
Uses ClassificationEngine from the EdgeTPU API to analyze animals in
camera frames. Sounds a deterrent if a squirrel is detected.
Users define model, labels file, storage path, deterrent sound, and
optionally can set this to training mode for collecting images for a custom
model.
"""
import argparse
import time
import re
import imp
import logging
import gstreamer
import sys
sys.path.append('/usr/lib/python3/dist-packages/edgetpu')
from edgetpu.classification.engine import ClassificationEngine
from PIL import Image
from playsound import playsound
from pycoral.adapters import classify
from pycoral.adapters import common
from pycoral.utils.dataset import read_label_file
from pycoral.utils.edgetpu import make_interpreter
def save_data(image,results,path,ext='png'):
"""Saves camera frame and model inference results
to user-defined storage directory."""
tag = '%010d' % int(time.monotonic()*1000)
name = '%s/img-%s.%s' %(path,tag,ext)
image.save(name)
print('Frame saved as: %s' %name)
logging.info('Image: %s Results: %s', tag,results)
def load_labels(path):
"""Parses provided label file for use in model inference."""
p = re.compile(r'\s*(\d+)(.+)')
with open(path, 'r', encoding='utf-8') as f:
lines = (p.match(line).groups() for line in f.readlines())
return {int(num): text.strip() for num, text in lines}
def print_results(start_time, last_time, end_time, results):
"""Print results to terminal for debugging."""
inference_rate = ((end_time - start_time) * 1000)
fps = (1.0/(end_time - last_time))
print('\nInference: %.2f ms, FPS: %.2f fps' % (inference_rate, fps))
for label, score in results:
print(' %s, score=%.2f' %(label, score))
def do_training(results,last_results,top_k):
"""Compares current model results to previous results and returns
true if at least one label difference is detected. Used to collect
images for training a custom model."""
new_labels = [label[0] for label in results]
old_labels = [label[0] for label in last_results]
shared_labels = set(new_labels).intersection(old_labels)
if len(shared_labels) < top_k:
print('Difference detected')
return True
def user_selections():
parser = argparse.ArgumentParser()
parser.add_argument('--model', required=True,
help='.tflite model path')
parser.add_argument('--labels', required=True,
help='label file path')
parser.add_argument('--top_k', type=int, default=3,
help='number of classes with highest score to display')
parser.add_argument('--threshold', type=float, default=0.1,
help='class score threshold')
parser.add_argument('--storage', required=True,
help='File path to store images and results')
parser.add_argument('--sound', required=True,
help='File path to deterrent sound')
parser.add_argument('--print', default=False, required=False,
help='Print inference results to terminal')
parser.add_argument('--training', default=False, required=False,
help='Training mode for image collection')
args = parser.parse_args()
return args
def main():
"""Creates camera pipeline, and pushes pipeline through ClassificationEngine
model. Logs results to user-defined storage. Runs either in training mode to
gather images for custom model creation or in deterrent mode that sounds an
'alarm' if a defined label is detected."""
args = user_selections()
print("Loading %s with %s labels."%(args.model, args.labels))
engine = ClassificationEngine(args.model)
labels = load_labels(args.labels)
storage_dir = args.storage
#Initialize logging file
logging.basicConfig(filename='%s/results.log'%storage_dir,
format='%(asctime)s-%(message)s',
level=logging.DEBUG)
last_time = time.monotonic()
last_results = [('label', 0)]
def user_callback(image,svg_canvas):
nonlocal last_time
nonlocal last_results
start_time = time.monotonic()
results = engine.classify_with_image(image, threshold=args.threshold, top_k=args.top_k)
end_time = time.monotonic()
results = [(labels[i], score) for i, score in results]
if args.print:
print_results(start_time,last_time, end_time, results)
if args.training:
if do_training(results,last_results,args.top_k):
save_data(image,results, storage_dir)
else:
#Custom model mode:
#The labels can be modified to detect/deter user-selected items
if results[0][0] !='background':
save_data(image, storage_dir,results)
if 'fox squirrel, eastern fox squirrel, Sciurus niger' in results:
playsound(args.sound)
logging.info('Deterrent sounded')
last_results=results
last_time = end_time
result = gstreamer.run_pipeline(user_callback)
if __name__ == '__main__':
main()
enter code here
I suggest that you follow one of the examples available from the coral examples. There is an example named classify_image.py which uses the edgetpu (tflite) that I found works. After you install the coral examples, you have to drill down through the directory hierarchy. So, in my case, from root it is: /home/pi/ml-projects/coral/pycoral/tensorflow/examples/lite/examples. There are 17 files in that last examples directory. I'm using: numpy 1.19.3, pycoral 2.0.0, scipy 1.7.1, tensorflow 2.4.0, tflite-runtime 2.5.0.post1. I've installed the following edgetpu-runtime: edgetpu_runtime_20201105.zip.

Multiple Securities Trading Algorithm

I am very new to Python and I am having trouble executing my algorithmic trading strategy on more than one security at a time. I am currently using these lines of code for the stocks:
data_p = pd.read_csv('AAPL_30m.csv', index_col = 0, parse_dates = True)
data_p.drop(columns = ['Adj Close'])
Does anyone know how I would go about properly adding more securities?
Since no data is provided, I can only give you a rough idea on how this can be done. Change directory to the folder with all your data series in csv files:
import pandas as pd
import os
os.chdir(r'C:\Users\username\Downloads\new')
files = os.listdir()
Assume the files in the folder is
['AAPL.csv',
'AMZN.csv',
'GOOG.csv']
Then start with an empty dictionary d and loop through all the files in the directory to read as pandas dataframe. Eventually combine all of them to one big dataframe (if you find it more useful)
d = {}
for f in files:
name = f.split('.')[0]
df = pd.read_csv(f)
....
*** Do your processing ***
....
d[name] = df.copy()
dff = pd.concat(d)
Since I do not know your format and your index, I assume you can do pd.concat(d), alternatively, you may also try out pd.DataFrame(d)

Script working in Python2 but not in Python 3 (hashlib)

I worked today in a simple script to checksum files in all available hashlib algorithms (md5, sha1.....) I wrote it and debug it with Python2, but when I decided to port it to Python 3 it just won't work. The funny thing is that it works for small files, but not for big files. I thought there was a problem with the way I was buffering the file, but the error message is what makes me think it is something related to the way I am doing the hexdigest (I think) Here is a copy of my entire script, so feel free to copy it, use it and help me figure out what the problem is with it. The error I get when checksuming a 250 MB file is
"'utf-8' codec can't decode byte 0xf3 in position 10: invalid continuation byte"
I google it, but can't find anything that fixes it. Also if you see better ways to optimize it, please let me know. My main goal is to make work 100% in Python 3. Thanks
#!/usr/local/bin/python33
import hashlib
import argparse
def hashFile(algorithm = "md5", filepaths=[], blockSize=4096):
algorithmType = getattr(hashlib, algorithm.lower())() #Default: hashlib.md5()
#Open file and extract data in chunks
for path in filepaths:
try:
with open(path) as f:
while True:
dataChunk = f.read(blockSize)
if not dataChunk:
break
algorithmType.update(dataChunk.encode())
yield algorithmType.hexdigest()
except Exception as e:
print (e)
def main():
#DEFINE ARGUMENTS
parser = argparse.ArgumentParser()
parser.add_argument('filepaths', nargs="+", help='Specified the path of the file(s) to hash')
parser.add_argument('-a', '--algorithm', action='store', dest='algorithm', default="md5",
help='Specifies what algorithm to use ("md5", "sha1", "sha224", "sha384", "sha512")')
arguments = parser.parse_args()
algo = arguments.algorithm
if algo.lower() in ("md5", "sha1", "sha224", "sha384", "sha512"):
Here is the code that works in Python 2, I will just put it in case you want to use it without having to modigy the one above.
#!/usr/bin/python
import hashlib
import argparse
def hashFile(algorithm = "md5", filepaths=[], blockSize=4096):
'''
Hashes a file. In oder to reduce the amount of memory used by the script, it hashes the file in chunks instead of putting
the whole file in memory
'''
algorithmType = hashlib.new(algorithm) #getattr(hashlib, algorithm.lower())() #Default: hashlib.md5()
#Open file and extract data in chunks
for path in filepaths:
try:
with open(path, mode = 'rb') as f:
while True:
dataChunk = f.read(blockSize)
if not dataChunk:
break
algorithmType.update(dataChunk)
yield algorithmType.hexdigest()
except Exception as e:
print e
def main():
#DEFINE ARGUMENTS
parser = argparse.ArgumentParser()
parser.add_argument('filepaths', nargs="+", help='Specified the path of the file(s) to hash')
parser.add_argument('-a', '--algorithm', action='store', dest='algorithm', default="md5",
help='Specifies what algorithm to use ("md5", "sha1", "sha224", "sha384", "sha512")')
arguments = parser.parse_args()
#Call generator function to yield hash value
algo = arguments.algorithm
if algo.lower() in ("md5", "sha1", "sha224", "sha384", "sha512"):
for hashValue in hashFile(algo, arguments.filepaths):
print hashValue
else:
print "Algorithm {0} is not available in this script".format(algorithm)
if __name__ == "__main__":
main()
I haven't tried it in Python 3, but I get the same error in Python 2.7.5 for binary files (the only difference is that mine is with the ascii codec). Instead of encoding the data chunks, open the file directly in binary mode:
with open(path, 'rb') as f:
while True:
dataChunk = f.read(blockSize)
if not dataChunk:
break
algorithmType.update(dataChunk)
yield algorithmType.hexdigest()
Apart from that, I'd use the method hashlib.new instead of getattr, and hashlib.algorithms_available to check if the argument is valid.

Resources