What's wrong with Windows Task Sheduler + pytesseract + multiprocessing? - windows

Have the python code with pytesseract & multiprocessing. When I start the code manually from PyCharm it works fine with any number of threads. When I start the code with Win Task Sheduler with 'threads=1' it works fine.
However if I start the code with Win Task Sheduler with 'threads=2' or more than 2, it finishes without processing the images and without any errors.
I've got log messages like this. Script starts but does nothing and there is no any errors in Win logs
2020-05-24 13:09:31,834;START
2020-05-24 13:09:31,834;threads: 2
2020-05-24 13:10:31,832;START
2020-05-24 13:10:31,832;threads: 2
2020-05-24 13:11:31,851;START
2020-05-24 13:11:31,851;threads: 2
Code
from PIL import Image
import pytesseract
from pytesseract import Output
import datetime
from glob import glob
import os
import multiprocessing as multiprocessing
import cv2
import logging
def loggerinit(name, filename, overwrite):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
# create the logging file handler
fh = logging.FileHandler(filename, encoding = 'UTF-8')
formatter = logging.Formatter('%(asctime)s;%(message)s')
fh.setFormatter(formatter)
# add handler to logger object
logger.addHandler(fh)
return logger
def getfiles(dirname, mask):
return glob(os.path.join(dirname, mask))
def tess_file(fname):
img = cv2.imread(fname)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
im_for_T = Image.fromarray(img)
pytesseract.pytesseract.tesseract_cmd = 'C://Tesseract-OCR//tesseract.exe'
TESSDATA_PREFIX = 'C:/Program Files/Tesseract-OCR/tessdata'
try:
os.environ['OMP_THREAD_LIMIT'] = '1'
tess_data = pytesseract.image_to_osd(im_for_T, output_type=Output.DICT)
return fname, tess_data
except:
return fname, None
if __name__ == '__main__':
logger = loggerinit('tess', 'tess.log', False)
files = getfiles('Croped', '*.jpg')
t1 = datetime.datetime.now()
logger.info('START')
threads = 2
logger.info('threads: ' + str(threads))
p = multiprocessing.Pool(threads)
results = p.map(tess_file,files)
e = []
for r in results:
if type(r) == type(None):
e.append('OCR error: ' + r)
else:
print(r[0],". rotate: ",r[1]['rotate'])
p.close()
p.join()
t2 = datetime.datetime.now()
delta = (t2 - t1).total_seconds()
print('Total time: ', delta)
print('Files: ', len(files))
logger.info('Files: ' + str(len(files)))
logger.info('Stop.' + 'Total time: ' + str(delta))
# Print error if exist
for ee in e:
print(ee)
Whats wrong? How can I fix this issue?

Related

Fine tuning Bert for NER attempt on Mac OS

I'm using a MacBook Air/OS Monterey 12.5 (There are updates available; Ventura 13.1
Python version 3.10.8 and also tried using 3.11
Pylance has pointed that all the imports I was trying to execute were not being resolved so I changed the VS Code interpreter to Python 3.10.
Anyways, here's the code:
import pandas as pd
import torch
import numpy as np
from tqdm import tqdm
from transformers import BertTokenizerFast
from transformers import BertForTokenClassification
from torch.utils.data import Dataset, DataLoader
df = pd.read_csv('ner.csv')
labels = [i.split() for i in df['labels'].values.tolist()]
unique_labels = set()
for lb in labels:
[unique_labels.add(i) for i in lb if i not in unique_labels]
# print(unique_labels)
labels_to_ids = {k: v for v, k in enumerate(sorted(unique_labels))}
ids_to_labels = {v: k for v, k in enumerate(sorted(unique_labels))}
# print(labels_to_ids)
text = df['text'].values.tolist()
example = text[36]
#print(example)
tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
text_tokenized = tokenizer(example, padding='max_length', max_length=512, truncation=True, return_tensors='pt')
'''
print(text_tokenized)
print(tokenizer.decode(text_tokenized.input_ids[0]))
'''
def align_label_example(tokenized_input, labels):
word_ids = tokenized_input.word_ids()
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
if word_idx is None:
label_ids.append(-100)
elif word_idx != previous_word_idx:
try:
label_ids.append(labels_to_ids[labels[word_idx]])
except:
label_ids.append(-100)
else:
label_ids.append(labels_to_ids[labels[word_idx]] if label_all_tokens else -100)
previous_word_idx = word_idx
return label_ids;
label = labels[36]
label_all_tokens = False
new_label = align_label_example(text_tokenized, label)
'''
print(new_label)
print(tokenizer.convert_ids_to_tokens(text_tokenized['input_ids'][0]))
'''
def align_label(texts, labels):
tokenized_inputs = tokenizer(texts, padding='max_length', max_length=512, truncation=True)
word_ids = tokenized_inputs.word_ids()
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
if word_idx is None:
label_ids.append(-100)
elif word_idx != previous_word_idx:
try:
label_ids.append(labels_to_ids[labels[word_idx]])
except:
label_ids.append(-100)
else:
try:
label_ids.append(labels_to_ids[labels[word_idx]] if label_all_tokens else -100)
except:
label_ids.append(-100)
previous_word_idx = word_idx
return label_ids
class DataSequence(torch.utils.data.Dataset):
def __init__(self, df):
lb = [i.split() for i in df['labels'].values.tolist()]
txt = df['text'].values.tolist()
self.texts = [tokenizer(str(i),
padding='max_length', max_length=512, truncation=True, return_tensors='pt') for i in txt]
self.labels = [align_label(i,j) for i,j in zip(txt, lb)]
def __len__(self):
return len(self.labels)
def get_batch_labels(self, idx):
return torch.LongTensor(self.labels[idx])
def __getitem__(self, idx):
batch_data = self.get_batch_data(idx)
batch_labels = self.get_batch_labels(idx)
return batch_data, batch_labels
df = df[0:1000]
df_train, df_val, df_test = np.split(df.sample(frac=1, random_state=42),
[int(.8 * len(df)), int(.9 * len(df))])
class BertModel(torch.nn.Module):
def __init__(self):
super(BertModel, self).__init__()
self.bert = BertForTokenClassification.from_pretrained('bert-base-cased', num_labels=len(unique_labels))
def forward(self, input_id, mask, label):
output = self.bert(input_ids=input_id, attention_mask=mask, labels=label, return_dict=False)
return output
def train_loop(model, df_train, df_val):
train_dataset = DataSequence(df_train)
val_dataset = DataSequence(df_val)
train_dataloader = DataLoader(train_dataset, num_workers=4, batch_size=BATCH_SIZE, shuffle=True)
val_dataloader = DataLoader(val_dataset, num_workers=4, batch_size=BATCH_SIZE)
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE)
if use_cuda:
model = model.cuda()
best_acc = 0
best_loss = 1000
for epoch_num in range(EPOCHS):
total_acc_train = 0
total_loss_train = 0
model.train()
for train_data, train_label in tqdm(train_dataloader):
train_label = train_label.to(device)
mask = train_data['attention_mask'].squeeze(1).to(device)
input_id = train_data['input_ids'].squeeze(1).to(device)
optimizer.zero_grad()
loss, logits = model(input_id, mask, train_label)
for i in range(logits.shape[0]):
logits_clean = logits[i][train_label[i] != -100]
label_clean = train_label[i][train_label[i] != -100]
predictions = logits_clean.argmax(dim=1)
acc = (predictions == label_clean).float().mean()
total_acc_train += acc
total_loss_train += loss.item()
loss.backward()
optimizer.step()
model.eval()
total_acc_val = 0
total_loss_val = 0
for val_data, val_label in val_dataloader:
val_label = val_label.to(device)
mask = val_data['attention_mask'].squeeze(1).to(device)
input_id = val_data['input_ids'].squeeze(1).to(device)
loss, logits = model(input_id, mask, val_label)
for i in range(logits.shape[0]):
logits_clean = logits[i][val_label[i] != -100]
label_clean = val_label[i][val_label[i] != -100]
predictions = logits_clean.argmax(dim=1)
acc = (predictions == label_clean).float().mean()
total_acc_val += acc
total_loss_val += loss.item()
val_accuracy = total_acc_val / len(df_val)
val_loss = total_loss_val / len(df_val)
print(
f'Epochs: {epoch_num + 1} | Loss: {total_loss_train / len(df_train): .3f} | Accuracy: {total_acc_train / len(df_train): .3f} | Val_Loss: {total_loss_val / len(df_val): .3f} | Accuracy: {total_acc_val / len(df_val): .3f}')
LEARNING_RATE = 5e-3
EPOCHS = 5
BATCH_SIZE = 2
model = BertModel()
train_loop(model, df_train, df_val)
And the debugger says:
Exception has occurred: RuntimeError (note: full exception trace is shown but execution is paused at: <module>)
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.
File "/Users/filipedonatti/Projects/pyCodes/second_try.py", line 141, in train_loop
for train_data, train_label in tqdm(train_dataloader):
File "/Users/filipedonatti/Projects/pyCodes/second_try.py", line 197, in <module>
train_loop(model, df_train, df_val)
File "<string>", line 1, in <module> (Current frame)
By the way,
Despite using Mac, I have downloaded Anaconda-Navigator, however I've been trying and executing this code on VS Code. I've downloaded numpy, torch, datasets and other libraries through Brew with the pip3 command.
I'm at a loss, I can run the code on a google collab notebook or Jupiter notebook, and I know training models and such in my humble Mac would not be advised, but I am just exercising this so I can train and use the model in a much more powerful machine.
Please help me with this issue, I've been trying to find a solution for days.
Peace and happy holidays.
I've tried solving the issue by writing:
if __name__ == '__main__':
freeze_support()
I've tried using this:
import parallelTestModule
extractor = parallelTestModule.ParallelExtractor()
extractor.runInParallel(numProcesses=2, numThreads=4)
So...
It turns out the correct way to solve this is to implement a function to train the loop as such:
def run():
model = BertModel()
torch.multiprocessing.freeze_support()
print('loop')
train_loop(model, df_train, df_val)
if __name__ == '__main__':
run()
Redefining that train_loop line in the end. Issue solved. For more see this link: https://github.com/pytorch/pytorch/issues/5858

I am trying to bundle my Python(3.5.3) Tkinter app using cx_Freeze(5.1.1). When I hide my command prompt the app doesn't work.

As suggested here I use it to hide my command prompt in my setup.py file. It does hide my command prompt but the app does not work. Basically I am trying to make a Windows native Microsoft MSI for my GUI that I have built for youtube-dl command line tool that is used to consume media from some of the most popular video hosting sites. Any help is much appreciated. Here is my app.py:-
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from tkinter import filedialog
from tkinter.ttk import Progressbar
import youtube_dl
import threading
import os
download_folder = os.path.expanduser("~")+"/Downloads/"
download_folder_chosen = ""
window = Tk()
window.title("IOB Youtube Downloader")
window.geometry('510x100')
def my_hook(d):
if d:
if d['status'] == 'downloading':
percent_done = d['_percent_str']
percent_done = percent_done.replace(" ", "")
percent_done = percent_done.replace("%", "")
bar['value'] = percent_done
bar.grid(column=1, row=2, pady=15)
bar_lbl.configure(text=percent_done + "%")
bar_lbl.grid(column=1, row=3)
txt['state'] = DISABLED
btn['state'] = DISABLED
if d['status'] == 'finished':
bar.grid_forget()
txt['state'] = NORMAL
btn['state'] = NORMAL
bar_lbl.configure(text="Download Completed !!!")
bar_lbl.grid(column=1, row=2)
messagebox.showinfo('IOB Youtube Downloader', 'Download Complete')
if d['status'] == 'error':
print("\n"*10)
print(d)
messagebox.showerror('IOB Youtube Downloader', 'Download Error')
else:
bar_lbl.configure(text="Download Error. Please try again !!!")
bar_lbl.grid(column=1, row=2)
def start_thread():
t1 = threading.Thread(target=clicked, args=())
t1.start()
def clicked():
res = txt.get()
if download_folder_chosen != "":
location = download_folder_chosen + "/"
else:
location = download_folder
ydl_opts = {
'progress_hooks': [my_hook],
'format': 'best',
'outtmpl': location + u'%(title)s-%(id)s.%(ext)s',
}
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([res])
except:
messagebox.showerror('IOB Youtube Downloader', 'Download Error')
def choose_directory():
global download_folder_chosen
current_directory = filedialog.askdirectory()
download_folder_chosen = current_directory
messagebox.showinfo('IOB Youtube Downloader', 'Download Location:- ' + download_folder_chosen)
style = ttk.Style()
style.theme_use('default')
style.configure("blue.Horizontal.TProgressbar", background='blue')
bar = Progressbar(window, length=200, style='black.Horizontal.TProgressbar')
bar_lbl = Label(window, text="")
lbl = Label(window, text="Paste URL")
lbl.grid(column=0, row=0)
txt = Entry(window,width=60)
txt.grid(column=1, row=0)
btn = Button(window, text="Download", command=start_thread)
btn.grid(column=2, row=0)
btn2 = Button(window, text="...", command=choose_directory)
btn2.grid(column=3, row=0)
window.iconbitmap('favicon.ico')
window.mainloop()
And here is my setup.py file that I use to build the bundle exe using cx_Freeze.
from cx_Freeze import setup, Executable
import sys
import os
base = None
if sys.platform == 'win32':
base = "Win32GUI"
os.environ["TCL_LIBRARY"] = r"C:\Python35\tcl\tcl8.6"
os.environ["TK_LIBRARY"] = r"C:\Python35\tcl\tk8.6"
setup(
name = "IOB Youtube Downloader",
options = {"build_exe": {"packages":["tkinter",], "include_files":[r"C:\Python35\DLLs\tk86t.dll", r"C:\Python35\DLLs\tcl86t.dll", r"E:\Youtube_Downloader\Src\favicon.ico"]}},
version = "1.0",
author = "IO-Bridges",
description = "Download videos from all popular video streaming sites.",
executables = [Executable
(
r"downloader.py",
# base=base, <---- Here setting the base
shortcutName="IOB Youtube Downloader",
shortcutDir="DesktopFolder",
icon="favicon.ico"
)]
)

Python 3.4 Multiprocess throws TypeError("cannot serialize '_io.BufferedReader' object",)

Recently I wrote a multiprocess code in Python 3.4 to download some images, it's working blazingly fast at first, then I get the following error and cannot start the program anymore.
Traceback (most recent call last):
File "multiprocessing_d.py", line 23, in <module>
main()
File "multiprocessing_d.py", line 16, in main
p.map(download, lines)
File "/usr/local/lib/python3.4/multiprocessing/pool.py", line 260, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "/usr/local/lib/python3.4/multiprocessing/pool.py", line 608, in get
raise self._value
multiprocessing.pool.MaybeEncodingError: Error sending result: '<multiprocessing.pool.ExceptionWithTraceback object at 0x7f1e047f32e8>'. Reason: 'TypeError("cannot serialize '_io.BufferedReader' object",)'
My code is as following
download_helper.py
import sys
import os
from pathlib import Path
url_prefix = r"Some prefix"
def setup_download_dir(dictionary):
download_dir = Path(dictionary)
if not download_dir.exists():
download_dir.mkdir()
return dictionary
def download_link(dictionary, line):
from urllib.request import urlretrieve
itemid = line.split()[0].decode()
link = line.split()[1].decode()
if (link.startswith("http")):
image_url = link
else:
image_url = url_prefix + link
if os.path.isfile(dictionary + "/" + itemid + ".jpg"):
#print("Already have " + itemid + ".jpg")
pass
else:
urlretrieve(image_url, dictionary + "/" + itemid + ".jpg")
multiprocessing_d.py
from functools import partial
from multiprocessing.pool import Pool
import sys
from time import time
from download_helper import setup_download_dir, download_link
def main():
file_path = sys.argv[1]
dic_path = sys.argv[2]
download_dir = setup_download_dir(dic_path)
download = partial(download_link, download_dir)
with open(file_path, 'rb') as f:
lines = f.readlines()
ts = time()
p = Pool(processes=16, maxtasksperchild=1)
p.map(download, lines)
p.close()
p.join()
print('Took {}s'.format(time() - ts))
f.close()
if __name__ == "__main__":
main()
I've tried to search online but didn't find much information useful. My suspect is that there might be some exception raised in urlretrieve, but I don't know how to debug it. Any comments or suggestions would be appreciated!!
James

The Image will not Show in a Loop, Why not?

(Skip to hash-tags if in a hurry)
This program will only work if it ends on the image showing.
I want to use it as a function inside another looping program, but it will not work. It will display the stats of the Pokemon(p.whatever), but the image will not show. The image will show in IDLE Python 3.4, but not the terminal. I've been stuck on this for months.
Here is the program that works(in IDLE Python 3.4, not the terminal):
import pykemon
print('What are you looking for?')
askedpokemon = input()
pokemonInDatabase = False
while pokemonInDatabase == False:
pokemonInDatabase = True
try:
if ('1' in askedpokemon) or ('2' in askedpokemon) or ('3' in askedpokemon) or ('4' in askedpokemon) or ('5' in askedpokemon) or ('6' in askedpokemon) or ('7' in askedpokemon) or ('8' in askedpokemon) or ('9' in askedpokemon):
p = (pykemon.get(pokemon_id = askedpokemon))
else:
askedpokemon = askedpokemon.lower()
p = (pykemon.get(pokemon = askedpokemon))
#Turns askedpokemon into number
askedpokemon = p.resource_uri
askedpokemon = askedpokemon.replace('/api/v1/pokemon/',' ')
askedpokemon = askedpokemon.replace('/',' ')
askedpokemon = askedpokemon.strip()
except pykemon.exceptions.ResourceNotFoundError:
print(askedpokemon + " is not a valid Pokemon name or id number.")
print('Try another')
askedpokemon = input()
pokemonInDatabase = False
print (p)
pTypes = (p.types)
for key, value in pTypes.items() :
pTypes = str(key)
print (' Type: ' + pTypes)
print (' HP: ' + str(p.hp))
print (' Attack: ' + str(p.attack))
print ('Defense: ' + str(p.defense))
print (' Sp Atk: ' + str(p.sp_atk))
print (' Sp Def: ' + str(p.sp_def))
print (' Speed: ' + str(p.speed))
print ('Exp Yield: ' + str(p.exp))
#######################################################
import time
import urllib
import urllib.request
import tkinter as tk
root = tk.Tk()
url = "http://assets22.pokemon.com/assets/cms2/img/pokedex/full/526.png"
if len(askedpokemon) < 3:
if len(askedpokemon) == 2:
askedpokemon = ('0' + askedpokemon)
if len(askedpokemon) == 1:
askedpokemon = ('00' + askedpokemon)
url = url.replace('526', askedpokemon)
u = urllib.request.urlopen(url)
raw_data = u.read()
u.close()
import base64
b64_data = base64.encodestring(raw_data)
image = tk.PhotoImage(data=b64_data)
label = tk.Label(image=image)
label.pack()
##########################################################
Below is the working program with its modules.
https://drive.google.com/file/d/0B3Q4wQpL0nDUYWFFSjV3cUhXVWc/view?usp=sharing
Here is an mcve that illustrates the problem. Call the file tem.py.
import tkinter as tk
root = tk.Tk()
image = tk.PhotoImage(file='python.png')
label = tk.Label(image=image)
label.pack()
When you run in a terminal, this runs, but the root window closes after label.pack(), before you can see it. Either put root.mainloop() at the end of the code or run with python -i tem.py (as IDLE, in effect, does). The -i says to switch from batch to interactive mode after the end of the program instead of closing. IDLE does this so one can interact with the live program before it is closed.

Graphite / Carbon / Ceres node overlap

I'm working with Graphite monitoring using Carbon and Ceres as the storage method. I have some problems with correcting bad data. It seems that (due to various problems) I've ended up with overlapping files. That is, since Carbon / Ceres stores the data as timestamp#interval.slice, I can have two or more files with overlapping time ranges.
There are two kinds of overlaps:
File A: +------------+ orig file
File B: +-----+ subset
File C: +---------+ overlap
This is causing problems because the existing tools available (ceres-maintenance defrag and rollup) don't cope with these overlaps. Instead, they skip the directory and move on. This is a problem, obviously.
I've created a script that fixes this problem, as follows:
For subsets, just delete the subset file.
For overlaps, using the file system 'truncate' on the orig file at the point where the next file starts. While it is possible to cut off the start of the overlap file and rename it properly, I would suggest that this is fraught with danger.
I've found that it's possible to do this in two ways:
Walk the dirs and iterate over the files, fixing as you go, and find the file subsets, remove them;
Walk the dirs and fix all the problems in a dir before moving on. This is BY FAR the faster approach, since the dir walk is hugely time consuming.
Code:
#!/usr/bin/env python2.6
################################################################################
import io
import os
import time
import sys
import string
import logging
import unittest
import datetime
import random
import zmq
import json
import socket
import traceback
import signal
import select
import simplejson
import cPickle as pickle
import re
import shutil
import collections
from pymongo import Connection
from optparse import OptionParser
from pprint import pprint, pformat
################################################################################
class SliceFile(object):
def __init__(self, fname):
self.name = fname
basename = fname.split('/')[-1]
fnArray = basename.split('#')
self.timeStart = int(fnArray[0])
self.freq = int(fnArray[1].split('.')[0])
self.size = None
self.numPoints = None
self.timeEnd = None
self.deleted = False
def __repr__(self):
out = "Name: %s, tstart=%s tEnd=%s, freq=%s, size=%s, npoints=%s." % (
self.name, self.timeStart, self.timeEnd, self.freq, self.size, self.numPoints)
return out
def setVars(self):
self.size = os.path.getsize(self.name)
self.numPoints = int(self.size / 8)
self.timeEnd = self.timeStart + (self.numPoints * self.freq)
################################################################################
class CeresOverlapFixup(object):
def __del__(self):
import datetime
self.writeLog("Ending at %s" % (str(datetime.datetime.today())))
self.LOGFILE.flush()
self.LOGFILE.close()
def __init__(self):
self.verbose = False
self.debug = False
self.LOGFILE = open("ceresOverlapFixup.log", "a")
self.badFilesList = set()
self.truncated = 0
self.subsets = 0
self.dirsExamined = 0
self.lastStatusTime = 0
def getOptionParser(self):
return OptionParser()
def getOptions(self):
parser = self.getOptionParser()
parser.add_option("-d", "--debug", action="store_true", dest="debug", default=False, help="debug mode for this program, writes debug messages to logfile." )
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="verbose mode for this program, prints a lot to stdout." )
parser.add_option("-b", "--basedir", action="store", type="string", dest="basedir", default=None, help="base directory location to start converting." )
(options, args) = parser.parse_args()
self.debug = options.debug
self.verbose = options.verbose
self.basedir = options.basedir
assert self.basedir, "must provide base directory."
# Examples:
# ./updateOperations/1346805360#60.slice
# ./updateOperations/1349556660#60.slice
# ./updateOperations/1346798040#60.slice
def getFileData(self, inFilename):
ret = SliceFile(inFilename)
ret.setVars()
return ret
def removeFile(self, inFilename):
os.remove(inFilename)
#self.writeLog("removing file: %s" % (inFilename))
self.subsets += 1
def truncateFile(self, fname, newSize):
if self.verbose:
self.writeLog("Truncating file, name=%s, newsize=%s" % (pformat(fname), pformat(newSize)))
IFD = None
try:
IFD = os.open(fname, os.O_RDWR|os.O_CREAT)
os.ftruncate(IFD, newSize)
os.close(IFD)
self.truncated += 1
except:
self.writeLog("Exception during truncate: %s" % (traceback.format_exc()))
try:
os.close(IFD)
except:
pass
return
def printStatus(self):
now = self.getNowTime()
if ((now - self.lastStatusTime) > 10):
self.writeLog("Status: time=%d, Walked %s dirs, subsetFilesRemoved=%s, truncated %s files." % (now, self.dirsExamined, self.subsets, self.truncated))
self.lastStatusTime = now
def fixupThisDir(self, inPath, inFiles):
# self.writeLog("Fixing files in dir: %s" % (inPath))
if not '.ceres-node' in inFiles:
# self.writeLog("--> Not a slice directory, skipping.")
return
self.dirsExamined += 1
sortedFiles = sorted(inFiles)
sortedFiles = [x for x in sortedFiles if ((x != '.ceres-node') and (x.count('#') > 0)) ]
lastFile = None
fileObjList = []
for thisFile in sortedFiles:
wholeFilename = os.path.join(inPath, thisFile)
try:
curFile = self.getFileData(wholeFilename)
fileObjList.append(curFile)
except:
self.badFilesList.add(wholeFilename)
self.writeLog("ERROR: file %s, %s" % (wholeFilename, traceback.format_exc()))
# name is timeStart, really.
fileObjList = sorted(fileObjList, key=lambda thisObj: thisObj.name)
while fileObjList:
self.printStatus()
changes = False
firstFile = fileObjList[0]
removedFiles = []
for curFile in fileObjList[1:]:
if (curFile.timeEnd <= firstFile.timeEnd):
# have subset file. elim.
self.removeFile(curFile.name)
removedFiles.append(curFile.name)
self.subsets += 1
changes = True
if self.verbose:
self.writeLog("Subset file situation. First=%s, overlap=%s" % (firstFile, curFile))
fileObjList = [x for x in fileObjList if x.name not in removedFiles]
if (len(fileObjList) < 2):
break
secondFile = fileObjList[1]
# LT is right. FirstFile's timeEnd is always the first open time after first is done.
# so, first starts#100, len=2, end=102, positions used=100,101. second start#102 == OK.
if (secondFile.timeStart < firstFile.timeEnd):
# truncate first file.
# file_A (last): +---------+
# file_B (curr): +----------+
# solve by truncating previous file at startpoint of current file.
newLenFile_A_seconds = int(secondFile.timeStart - firstFile.timeStart)
newFile_A_datapoints = int(newLenFile_A_seconds / firstFile.freq)
newFile_A_bytes = int(newFile_A_datapoints) * 8
if (not newFile_A_bytes):
fileObjList = fileObjList[1:]
continue
assert newFile_A_bytes, "Must have size. newLenFile_A_seconds=%s, newFile_A_datapoints=%s, newFile_A_bytes=%s." % (newLenFile_A_seconds, newFile_A_datapoints, newFile_A_bytes)
self.truncateFile(firstFile.name, newFile_A_bytes)
if self.verbose:
self.writeLog("Truncate situation. First=%s, overlap=%s" % (firstFile, secondFile))
self.truncated += 1
fileObjList = fileObjList[1:]
changes = True
if not changes:
fileObjList = fileObjList[1:]
def getNowTime(self):
return time.time()
def walkDirStructure(self):
startTime = self.getNowTime()
self.lastStatusTime = startTime
updateStatsDict = {}
self.okayFiles = 0
emptyFiles = 0
for (thisPath, theseDirs, theseFiles) in os.walk(self.basedir):
self.printStatus()
self.fixupThisDir(thisPath, theseFiles)
self.dirsExamined += 1
endTime = time.time()
# time.sleep(11)
self.printStatus()
self.writeLog( "now = %s, started at %s, elapsed time = %s seconds." % (startTime, endTime, endTime - startTime))
self.writeLog( "Done.")
def writeLog(self, instring):
print instring
print >> self.LOGFILE, instring
self.LOGFILE.flush()
def main(self):
self.getOptions()
self.walkDirStructure()

Resources