I excute <include("C:\Users\Administrator\Desktop\ERGO.jl-main\notebooks\example.jl")>,but it reports erro.Here is part of the codes:
function main()
using Images, ImageView, DataFrames, CSV, Statistics, LinearAlgebra
import Glob
import Distributions
import JSON
import ImageMagick
using Logging
import Gtk
import DataStructures
import CSV
import Random
import StatsPlots
import ProgressMeter.#showprogress
## These are const, if you change them while this notebook is running behavior will be undefined.
const ROI_PX = 7; # --> ROI is 7*2+1 x 7*2+1 pixels
const PX_NM = 100; # 1 pixel is 100nm
const FRAMESIZE=64; # x/y dim of frame
rootpath = "../data"
#assert ispath(rootpath)
fpath = joinpath(rootpath, "sequence-MT0.N1.HD-AS-Exp-as-list");
pospath = rootpath
outdir = joinpath(rootpath, "output")
if !ispath(outdir)
mkpath(outdir)
end
#info "Using $(fpath) as inputdirectory, $(outdir) as output"
....
end
**ERROR: LoadError: LoadError: UndefVarError: #showprogress not defined**
Stacktrace:
[1] top-level scope
# :0
[2] include(fname::String)
# Base.MainInclude .\client.jl:444
[3] top-level scope
# none:1
in expression starting at C:\Users\Administrator\Desktop\ERGO.jl-main\notebooks\example.jl:53
in expression starting at C:\Users\Administrator\Desktop\ERGO.jl-main\notebooks\example.jl:4
I don't know how to solve this error, could someone help me ? Thanks a lot!
don't put package loading inside function:
julia> function main()
import ProgressMeter.#showprogress
#showprogress for _ = 1:10
end
end
ERROR: LoadError: UndefVarError: #showprogress not defined
in expression starting at REPL[1]:3
julia> import ProgressMeter.#showprogress
julia> function main()
#showprogress for _ = 1:10
end
end
main (generic function with 1 method)
put them outside of your main(). The reason is that macro expansion happens before runtime.
Related
I am trying to wrap some Fortran modules to be called from Python using the ctypes library. I am going directly from Fortran to Python without writing any C code. Simply compiling a shared library.
Yet I am struggling with what the best approach should be for memory allocation, namely who should own the data. Should I allocate inside the Fortran module and then expose the data using ctypes or should I allocate with numpy and pass as argument to Fortran.
I am also open to doing the intermediary step and write the C wrappers (F->C->Python). But then the same question arises.
Thanks!
PS: I know F2Py exists I know how to use it but I am interested in going the ctypes way.
mod.f90:
module angio_global
use iso_c_binding
implicit none
integer :: lx,ly
real*8 :: raio_init
real*8,allocatable :: phi(:,:)
real*8 :: epsilon,rho_phi
contains
subroutine init_fields(phi_,lx_,ly_)
integer ,intent(in) :: lx_,ly_
real*8,intent(in) :: phi_(lx_,ly_)
integer :: i,j
real*8 :: dx,dy
lx = lx_
ly = ly_
allocate(phi(0:lx+1,0:ly+1))
phi(1:lx,1:ly) = phi_
raio_init = 20.0_dp
epsilon = 1.0_dp
rho_phi = 1.0_dp
do j=1,ly
do i=1,lx
dx = i-lx/2.0_dp
dy = j-ly/2.0_dp
if(dx**2+dy**2<raio_init**2) phi(i,j) = 1.0_dp
enddo
enddo
end subroutine
subroutine wrap_init_fields(phi_,lx_,ly_) bind(c)
integer(c_int),intent(in),value :: lx_,ly_
real(c_double),intent(in) :: phi_(lx_,ly_)
call init_fields(phi_,lx_,ly_)
end subroutine
subroutine wrap_get_phi(phi_,lx_,ly_) bind(c)
integer(c_int),intent(in),value :: lx_,ly_
real(c_double),intent(inout) :: phi_(lx_,ly_)
phi_ = phi(1:lx,1:ly)
end subroutine
end module
caller.py:
from ctypes import CDLL, c_int, c_double,c_float
import numpy as np
from numpy.ctypeslib import ndpointer
import matplotlib.pyplot as plt
angio = CDLL('./global.so')
def init_fields(phi):
Nx,Ny = np.shape(phi)
phi_ptr = ndpointer(dtype=phi.dtype,shape=phi.shape,flags="F_CONTIGUOUS")
angio.wrap_init_fields.argtypes = [phi_ptr,c_int,c_int]
angio.wrap_init_fields(phi,Nx,Ny)
return
def get_phi(phi):
Nx,Ny = np.shape(phi)
phi_ptr = ndpointer(dtype=phi.dtype,shape=phi.shape,flags="F_CONTIGUOUS")
angio.wrap_get_phi.argtypes = [phi_ptr,c_int,c_int]
angio.wrap_get_phi(phi,Nx,Ny)
phi = np.zeros((200,200),dtype = np.float64,order="F")-1.0
init_fields(phi)
I'm using Python's watchdog module to listen for created events on a certain directory. Where I do some processing on newly created .csv files. When I test my code with nothing in the handler the watchdog fires correctly for all pasted/created files, but when I add my code/logic inside the handler it fires less than expected (52/60 files for example).
OS used: Windows 10, code is expected to work on a Windows server.
Python version: 3.7.3
Code:
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import pandas as pd
import numpy as np
from error_handling import ErrorHandler
import os
from timeloop import Timeloop
from datetime import timedelta
import json
from fill_data import OnMyWatch2
class OnMyWatch:
# Set the directory on watch
watchDirectory = "."
def __init__(self):
self.observer1 = Observer()
def run(self):
self.observer1.schedule(Handler() , self.watchDirectory, recursive = True)
self.observer1.start()
try:
while True:
time.sleep(1)
except:
self.observer1.stop()
print("Observer Stopped")
self.observer1.join()
class Handler(FileSystemEventHandler):
#staticmethod
def on_any_event(event):
if event.is_directory:
return None
elif event.event_type == 'created':
try:
# Event is created, you can process it now
print("Watchdog received created event - % s." % event.src_path)
pathlower = str(event.src_path).lower()
if ".csv" in pathlower:
print("File is csv file")
# Once any code is added here the problem happens
# Example:
# df = pd.read_csv(event.src_path, names= ALL_INI.columnlistbundle.split("|"), sep="|", encoding='latin-1', engine='python')
# arraySelectedColumnsBundle = ALL_INI.selectedcolumnsbundle.split(",")
# bundle_df = df[np.array(arraySelectedColumnsBundle)]
else:
print("File is not csv file")
except Exception as e:
ErrorHandler(0, 'In Observer ', '-7', 'Exception ' + str(e), '', 1, '')
if __name__ == '__main__':
if os.path.isdir("."):
watch = OnMyWatch()
watch.run()
I'm trying to code a password generator but it's not working and I can't understand the error.
import random
import string
lw = list(string.ascii_lowercase)
uw = list(string.ascii_uppercase)
ns = list(string.digits)
password = ""
def addLW():
f = randrange(1, len(lw))
password = password + lw[f]
def addUW():
f = randrange(1, len(lw))
password = password + uw[f]
def addN():
f = randrange(1, len(lw))
password = password + ns[f]
funcs = [addLW, addUW, addN]
maxx = input("Password generator.\nMax: ")
if maxx.isdigit():
maxx = int(maxx)
for i in range(maxx):
func = random.choice(funcs)
func()
print(f"Password: {password}")
else:
print("Error")
Full error:
Traceback (most recent call last):
File "Password Generator.py", line 29, in <module>
func()
File "Password Generator.py", line 14, in addUW
f = randrange(1, len(lw))
NameError: name 'randrange' is not defined
I don't understand because I've already imported 'random'...
import random
You've imported random. That means your global namespace now contains the binding to the random namespace. It does not contain randrange() or anything else within that namespace, so you need to explicitly use random.randrange() if you want it to find that method.
You can bring randrange itself in to the global namespace with:
from random import randrange
but that suffers from a few issues:
that only gives you randrange(), not any other stuff from random;
it will quickly pollute your global namespace with names if you need other things; and
it will get tedious if you want to import a large number of things (unless you import *, but see the previous bullet point about polluting the global namespace).
Recently I wrote a multiprocess code in Python 3.4 to download some images, it's working blazingly fast at first, then I get the following error and cannot start the program anymore.
Traceback (most recent call last):
File "multiprocessing_d.py", line 23, in <module>
main()
File "multiprocessing_d.py", line 16, in main
p.map(download, lines)
File "/usr/local/lib/python3.4/multiprocessing/pool.py", line 260, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "/usr/local/lib/python3.4/multiprocessing/pool.py", line 608, in get
raise self._value
multiprocessing.pool.MaybeEncodingError: Error sending result: '<multiprocessing.pool.ExceptionWithTraceback object at 0x7f1e047f32e8>'. Reason: 'TypeError("cannot serialize '_io.BufferedReader' object",)'
My code is as following
download_helper.py
import sys
import os
from pathlib import Path
url_prefix = r"Some prefix"
def setup_download_dir(dictionary):
download_dir = Path(dictionary)
if not download_dir.exists():
download_dir.mkdir()
return dictionary
def download_link(dictionary, line):
from urllib.request import urlretrieve
itemid = line.split()[0].decode()
link = line.split()[1].decode()
if (link.startswith("http")):
image_url = link
else:
image_url = url_prefix + link
if os.path.isfile(dictionary + "/" + itemid + ".jpg"):
#print("Already have " + itemid + ".jpg")
pass
else:
urlretrieve(image_url, dictionary + "/" + itemid + ".jpg")
multiprocessing_d.py
from functools import partial
from multiprocessing.pool import Pool
import sys
from time import time
from download_helper import setup_download_dir, download_link
def main():
file_path = sys.argv[1]
dic_path = sys.argv[2]
download_dir = setup_download_dir(dic_path)
download = partial(download_link, download_dir)
with open(file_path, 'rb') as f:
lines = f.readlines()
ts = time()
p = Pool(processes=16, maxtasksperchild=1)
p.map(download, lines)
p.close()
p.join()
print('Took {}s'.format(time() - ts))
f.close()
if __name__ == "__main__":
main()
I've tried to search online but didn't find much information useful. My suspect is that there might be some exception raised in urlretrieve, but I don't know how to debug it. Any comments or suggestions would be appreciated!!
James
I'm working with Graphite monitoring using Carbon and Ceres as the storage method. I have some problems with correcting bad data. It seems that (due to various problems) I've ended up with overlapping files. That is, since Carbon / Ceres stores the data as timestamp#interval.slice, I can have two or more files with overlapping time ranges.
There are two kinds of overlaps:
File A: +------------+ orig file
File B: +-----+ subset
File C: +---------+ overlap
This is causing problems because the existing tools available (ceres-maintenance defrag and rollup) don't cope with these overlaps. Instead, they skip the directory and move on. This is a problem, obviously.
I've created a script that fixes this problem, as follows:
For subsets, just delete the subset file.
For overlaps, using the file system 'truncate' on the orig file at the point where the next file starts. While it is possible to cut off the start of the overlap file and rename it properly, I would suggest that this is fraught with danger.
I've found that it's possible to do this in two ways:
Walk the dirs and iterate over the files, fixing as you go, and find the file subsets, remove them;
Walk the dirs and fix all the problems in a dir before moving on. This is BY FAR the faster approach, since the dir walk is hugely time consuming.
Code:
#!/usr/bin/env python2.6
################################################################################
import io
import os
import time
import sys
import string
import logging
import unittest
import datetime
import random
import zmq
import json
import socket
import traceback
import signal
import select
import simplejson
import cPickle as pickle
import re
import shutil
import collections
from pymongo import Connection
from optparse import OptionParser
from pprint import pprint, pformat
################################################################################
class SliceFile(object):
def __init__(self, fname):
self.name = fname
basename = fname.split('/')[-1]
fnArray = basename.split('#')
self.timeStart = int(fnArray[0])
self.freq = int(fnArray[1].split('.')[0])
self.size = None
self.numPoints = None
self.timeEnd = None
self.deleted = False
def __repr__(self):
out = "Name: %s, tstart=%s tEnd=%s, freq=%s, size=%s, npoints=%s." % (
self.name, self.timeStart, self.timeEnd, self.freq, self.size, self.numPoints)
return out
def setVars(self):
self.size = os.path.getsize(self.name)
self.numPoints = int(self.size / 8)
self.timeEnd = self.timeStart + (self.numPoints * self.freq)
################################################################################
class CeresOverlapFixup(object):
def __del__(self):
import datetime
self.writeLog("Ending at %s" % (str(datetime.datetime.today())))
self.LOGFILE.flush()
self.LOGFILE.close()
def __init__(self):
self.verbose = False
self.debug = False
self.LOGFILE = open("ceresOverlapFixup.log", "a")
self.badFilesList = set()
self.truncated = 0
self.subsets = 0
self.dirsExamined = 0
self.lastStatusTime = 0
def getOptionParser(self):
return OptionParser()
def getOptions(self):
parser = self.getOptionParser()
parser.add_option("-d", "--debug", action="store_true", dest="debug", default=False, help="debug mode for this program, writes debug messages to logfile." )
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="verbose mode for this program, prints a lot to stdout." )
parser.add_option("-b", "--basedir", action="store", type="string", dest="basedir", default=None, help="base directory location to start converting." )
(options, args) = parser.parse_args()
self.debug = options.debug
self.verbose = options.verbose
self.basedir = options.basedir
assert self.basedir, "must provide base directory."
# Examples:
# ./updateOperations/1346805360#60.slice
# ./updateOperations/1349556660#60.slice
# ./updateOperations/1346798040#60.slice
def getFileData(self, inFilename):
ret = SliceFile(inFilename)
ret.setVars()
return ret
def removeFile(self, inFilename):
os.remove(inFilename)
#self.writeLog("removing file: %s" % (inFilename))
self.subsets += 1
def truncateFile(self, fname, newSize):
if self.verbose:
self.writeLog("Truncating file, name=%s, newsize=%s" % (pformat(fname), pformat(newSize)))
IFD = None
try:
IFD = os.open(fname, os.O_RDWR|os.O_CREAT)
os.ftruncate(IFD, newSize)
os.close(IFD)
self.truncated += 1
except:
self.writeLog("Exception during truncate: %s" % (traceback.format_exc()))
try:
os.close(IFD)
except:
pass
return
def printStatus(self):
now = self.getNowTime()
if ((now - self.lastStatusTime) > 10):
self.writeLog("Status: time=%d, Walked %s dirs, subsetFilesRemoved=%s, truncated %s files." % (now, self.dirsExamined, self.subsets, self.truncated))
self.lastStatusTime = now
def fixupThisDir(self, inPath, inFiles):
# self.writeLog("Fixing files in dir: %s" % (inPath))
if not '.ceres-node' in inFiles:
# self.writeLog("--> Not a slice directory, skipping.")
return
self.dirsExamined += 1
sortedFiles = sorted(inFiles)
sortedFiles = [x for x in sortedFiles if ((x != '.ceres-node') and (x.count('#') > 0)) ]
lastFile = None
fileObjList = []
for thisFile in sortedFiles:
wholeFilename = os.path.join(inPath, thisFile)
try:
curFile = self.getFileData(wholeFilename)
fileObjList.append(curFile)
except:
self.badFilesList.add(wholeFilename)
self.writeLog("ERROR: file %s, %s" % (wholeFilename, traceback.format_exc()))
# name is timeStart, really.
fileObjList = sorted(fileObjList, key=lambda thisObj: thisObj.name)
while fileObjList:
self.printStatus()
changes = False
firstFile = fileObjList[0]
removedFiles = []
for curFile in fileObjList[1:]:
if (curFile.timeEnd <= firstFile.timeEnd):
# have subset file. elim.
self.removeFile(curFile.name)
removedFiles.append(curFile.name)
self.subsets += 1
changes = True
if self.verbose:
self.writeLog("Subset file situation. First=%s, overlap=%s" % (firstFile, curFile))
fileObjList = [x for x in fileObjList if x.name not in removedFiles]
if (len(fileObjList) < 2):
break
secondFile = fileObjList[1]
# LT is right. FirstFile's timeEnd is always the first open time after first is done.
# so, first starts#100, len=2, end=102, positions used=100,101. second start#102 == OK.
if (secondFile.timeStart < firstFile.timeEnd):
# truncate first file.
# file_A (last): +---------+
# file_B (curr): +----------+
# solve by truncating previous file at startpoint of current file.
newLenFile_A_seconds = int(secondFile.timeStart - firstFile.timeStart)
newFile_A_datapoints = int(newLenFile_A_seconds / firstFile.freq)
newFile_A_bytes = int(newFile_A_datapoints) * 8
if (not newFile_A_bytes):
fileObjList = fileObjList[1:]
continue
assert newFile_A_bytes, "Must have size. newLenFile_A_seconds=%s, newFile_A_datapoints=%s, newFile_A_bytes=%s." % (newLenFile_A_seconds, newFile_A_datapoints, newFile_A_bytes)
self.truncateFile(firstFile.name, newFile_A_bytes)
if self.verbose:
self.writeLog("Truncate situation. First=%s, overlap=%s" % (firstFile, secondFile))
self.truncated += 1
fileObjList = fileObjList[1:]
changes = True
if not changes:
fileObjList = fileObjList[1:]
def getNowTime(self):
return time.time()
def walkDirStructure(self):
startTime = self.getNowTime()
self.lastStatusTime = startTime
updateStatsDict = {}
self.okayFiles = 0
emptyFiles = 0
for (thisPath, theseDirs, theseFiles) in os.walk(self.basedir):
self.printStatus()
self.fixupThisDir(thisPath, theseFiles)
self.dirsExamined += 1
endTime = time.time()
# time.sleep(11)
self.printStatus()
self.writeLog( "now = %s, started at %s, elapsed time = %s seconds." % (startTime, endTime, endTime - startTime))
self.writeLog( "Done.")
def writeLog(self, instring):
print instring
print >> self.LOGFILE, instring
self.LOGFILE.flush()
def main(self):
self.getOptions()
self.walkDirStructure()