Graphite / Carbon / Ceres node overlap - slice

I'm working with Graphite monitoring using Carbon and Ceres as the storage method. I have some problems with correcting bad data. It seems that (due to various problems) I've ended up with overlapping files. That is, since Carbon / Ceres stores the data as timestamp#interval.slice, I can have two or more files with overlapping time ranges.
There are two kinds of overlaps:
File A: +------------+ orig file
File B: +-----+ subset
File C: +---------+ overlap
This is causing problems because the existing tools available (ceres-maintenance defrag and rollup) don't cope with these overlaps. Instead, they skip the directory and move on. This is a problem, obviously.

I've created a script that fixes this problem, as follows:
For subsets, just delete the subset file.
For overlaps, using the file system 'truncate' on the orig file at the point where the next file starts. While it is possible to cut off the start of the overlap file and rename it properly, I would suggest that this is fraught with danger.
I've found that it's possible to do this in two ways:
Walk the dirs and iterate over the files, fixing as you go, and find the file subsets, remove them;
Walk the dirs and fix all the problems in a dir before moving on. This is BY FAR the faster approach, since the dir walk is hugely time consuming.
Code:
#!/usr/bin/env python2.6
################################################################################
import io
import os
import time
import sys
import string
import logging
import unittest
import datetime
import random
import zmq
import json
import socket
import traceback
import signal
import select
import simplejson
import cPickle as pickle
import re
import shutil
import collections
from pymongo import Connection
from optparse import OptionParser
from pprint import pprint, pformat
################################################################################
class SliceFile(object):
def __init__(self, fname):
self.name = fname
basename = fname.split('/')[-1]
fnArray = basename.split('#')
self.timeStart = int(fnArray[0])
self.freq = int(fnArray[1].split('.')[0])
self.size = None
self.numPoints = None
self.timeEnd = None
self.deleted = False
def __repr__(self):
out = "Name: %s, tstart=%s tEnd=%s, freq=%s, size=%s, npoints=%s." % (
self.name, self.timeStart, self.timeEnd, self.freq, self.size, self.numPoints)
return out
def setVars(self):
self.size = os.path.getsize(self.name)
self.numPoints = int(self.size / 8)
self.timeEnd = self.timeStart + (self.numPoints * self.freq)
################################################################################
class CeresOverlapFixup(object):
def __del__(self):
import datetime
self.writeLog("Ending at %s" % (str(datetime.datetime.today())))
self.LOGFILE.flush()
self.LOGFILE.close()
def __init__(self):
self.verbose = False
self.debug = False
self.LOGFILE = open("ceresOverlapFixup.log", "a")
self.badFilesList = set()
self.truncated = 0
self.subsets = 0
self.dirsExamined = 0
self.lastStatusTime = 0
def getOptionParser(self):
return OptionParser()
def getOptions(self):
parser = self.getOptionParser()
parser.add_option("-d", "--debug", action="store_true", dest="debug", default=False, help="debug mode for this program, writes debug messages to logfile." )
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="verbose mode for this program, prints a lot to stdout." )
parser.add_option("-b", "--basedir", action="store", type="string", dest="basedir", default=None, help="base directory location to start converting." )
(options, args) = parser.parse_args()
self.debug = options.debug
self.verbose = options.verbose
self.basedir = options.basedir
assert self.basedir, "must provide base directory."
# Examples:
# ./updateOperations/1346805360#60.slice
# ./updateOperations/1349556660#60.slice
# ./updateOperations/1346798040#60.slice
def getFileData(self, inFilename):
ret = SliceFile(inFilename)
ret.setVars()
return ret
def removeFile(self, inFilename):
os.remove(inFilename)
#self.writeLog("removing file: %s" % (inFilename))
self.subsets += 1
def truncateFile(self, fname, newSize):
if self.verbose:
self.writeLog("Truncating file, name=%s, newsize=%s" % (pformat(fname), pformat(newSize)))
IFD = None
try:
IFD = os.open(fname, os.O_RDWR|os.O_CREAT)
os.ftruncate(IFD, newSize)
os.close(IFD)
self.truncated += 1
except:
self.writeLog("Exception during truncate: %s" % (traceback.format_exc()))
try:
os.close(IFD)
except:
pass
return
def printStatus(self):
now = self.getNowTime()
if ((now - self.lastStatusTime) > 10):
self.writeLog("Status: time=%d, Walked %s dirs, subsetFilesRemoved=%s, truncated %s files." % (now, self.dirsExamined, self.subsets, self.truncated))
self.lastStatusTime = now
def fixupThisDir(self, inPath, inFiles):
# self.writeLog("Fixing files in dir: %s" % (inPath))
if not '.ceres-node' in inFiles:
# self.writeLog("--> Not a slice directory, skipping.")
return
self.dirsExamined += 1
sortedFiles = sorted(inFiles)
sortedFiles = [x for x in sortedFiles if ((x != '.ceres-node') and (x.count('#') > 0)) ]
lastFile = None
fileObjList = []
for thisFile in sortedFiles:
wholeFilename = os.path.join(inPath, thisFile)
try:
curFile = self.getFileData(wholeFilename)
fileObjList.append(curFile)
except:
self.badFilesList.add(wholeFilename)
self.writeLog("ERROR: file %s, %s" % (wholeFilename, traceback.format_exc()))
# name is timeStart, really.
fileObjList = sorted(fileObjList, key=lambda thisObj: thisObj.name)
while fileObjList:
self.printStatus()
changes = False
firstFile = fileObjList[0]
removedFiles = []
for curFile in fileObjList[1:]:
if (curFile.timeEnd <= firstFile.timeEnd):
# have subset file. elim.
self.removeFile(curFile.name)
removedFiles.append(curFile.name)
self.subsets += 1
changes = True
if self.verbose:
self.writeLog("Subset file situation. First=%s, overlap=%s" % (firstFile, curFile))
fileObjList = [x for x in fileObjList if x.name not in removedFiles]
if (len(fileObjList) < 2):
break
secondFile = fileObjList[1]
# LT is right. FirstFile's timeEnd is always the first open time after first is done.
# so, first starts#100, len=2, end=102, positions used=100,101. second start#102 == OK.
if (secondFile.timeStart < firstFile.timeEnd):
# truncate first file.
# file_A (last): +---------+
# file_B (curr): +----------+
# solve by truncating previous file at startpoint of current file.
newLenFile_A_seconds = int(secondFile.timeStart - firstFile.timeStart)
newFile_A_datapoints = int(newLenFile_A_seconds / firstFile.freq)
newFile_A_bytes = int(newFile_A_datapoints) * 8
if (not newFile_A_bytes):
fileObjList = fileObjList[1:]
continue
assert newFile_A_bytes, "Must have size. newLenFile_A_seconds=%s, newFile_A_datapoints=%s, newFile_A_bytes=%s." % (newLenFile_A_seconds, newFile_A_datapoints, newFile_A_bytes)
self.truncateFile(firstFile.name, newFile_A_bytes)
if self.verbose:
self.writeLog("Truncate situation. First=%s, overlap=%s" % (firstFile, secondFile))
self.truncated += 1
fileObjList = fileObjList[1:]
changes = True
if not changes:
fileObjList = fileObjList[1:]
def getNowTime(self):
return time.time()
def walkDirStructure(self):
startTime = self.getNowTime()
self.lastStatusTime = startTime
updateStatsDict = {}
self.okayFiles = 0
emptyFiles = 0
for (thisPath, theseDirs, theseFiles) in os.walk(self.basedir):
self.printStatus()
self.fixupThisDir(thisPath, theseFiles)
self.dirsExamined += 1
endTime = time.time()
# time.sleep(11)
self.printStatus()
self.writeLog( "now = %s, started at %s, elapsed time = %s seconds." % (startTime, endTime, endTime - startTime))
self.writeLog( "Done.")
def writeLog(self, instring):
print instring
print >> self.LOGFILE, instring
self.LOGFILE.flush()
def main(self):
self.getOptions()
self.walkDirStructure()

Related

Fine tuning Bert for NER attempt on Mac OS

I'm using a MacBook Air/OS Monterey 12.5 (There are updates available; Ventura 13.1
Python version 3.10.8 and also tried using 3.11
Pylance has pointed that all the imports I was trying to execute were not being resolved so I changed the VS Code interpreter to Python 3.10.
Anyways, here's the code:
import pandas as pd
import torch
import numpy as np
from tqdm import tqdm
from transformers import BertTokenizerFast
from transformers import BertForTokenClassification
from torch.utils.data import Dataset, DataLoader
df = pd.read_csv('ner.csv')
labels = [i.split() for i in df['labels'].values.tolist()]
unique_labels = set()
for lb in labels:
[unique_labels.add(i) for i in lb if i not in unique_labels]
# print(unique_labels)
labels_to_ids = {k: v for v, k in enumerate(sorted(unique_labels))}
ids_to_labels = {v: k for v, k in enumerate(sorted(unique_labels))}
# print(labels_to_ids)
text = df['text'].values.tolist()
example = text[36]
#print(example)
tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
text_tokenized = tokenizer(example, padding='max_length', max_length=512, truncation=True, return_tensors='pt')
'''
print(text_tokenized)
print(tokenizer.decode(text_tokenized.input_ids[0]))
'''
def align_label_example(tokenized_input, labels):
word_ids = tokenized_input.word_ids()
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
if word_idx is None:
label_ids.append(-100)
elif word_idx != previous_word_idx:
try:
label_ids.append(labels_to_ids[labels[word_idx]])
except:
label_ids.append(-100)
else:
label_ids.append(labels_to_ids[labels[word_idx]] if label_all_tokens else -100)
previous_word_idx = word_idx
return label_ids;
label = labels[36]
label_all_tokens = False
new_label = align_label_example(text_tokenized, label)
'''
print(new_label)
print(tokenizer.convert_ids_to_tokens(text_tokenized['input_ids'][0]))
'''
def align_label(texts, labels):
tokenized_inputs = tokenizer(texts, padding='max_length', max_length=512, truncation=True)
word_ids = tokenized_inputs.word_ids()
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
if word_idx is None:
label_ids.append(-100)
elif word_idx != previous_word_idx:
try:
label_ids.append(labels_to_ids[labels[word_idx]])
except:
label_ids.append(-100)
else:
try:
label_ids.append(labels_to_ids[labels[word_idx]] if label_all_tokens else -100)
except:
label_ids.append(-100)
previous_word_idx = word_idx
return label_ids
class DataSequence(torch.utils.data.Dataset):
def __init__(self, df):
lb = [i.split() for i in df['labels'].values.tolist()]
txt = df['text'].values.tolist()
self.texts = [tokenizer(str(i),
padding='max_length', max_length=512, truncation=True, return_tensors='pt') for i in txt]
self.labels = [align_label(i,j) for i,j in zip(txt, lb)]
def __len__(self):
return len(self.labels)
def get_batch_labels(self, idx):
return torch.LongTensor(self.labels[idx])
def __getitem__(self, idx):
batch_data = self.get_batch_data(idx)
batch_labels = self.get_batch_labels(idx)
return batch_data, batch_labels
df = df[0:1000]
df_train, df_val, df_test = np.split(df.sample(frac=1, random_state=42),
[int(.8 * len(df)), int(.9 * len(df))])
class BertModel(torch.nn.Module):
def __init__(self):
super(BertModel, self).__init__()
self.bert = BertForTokenClassification.from_pretrained('bert-base-cased', num_labels=len(unique_labels))
def forward(self, input_id, mask, label):
output = self.bert(input_ids=input_id, attention_mask=mask, labels=label, return_dict=False)
return output
def train_loop(model, df_train, df_val):
train_dataset = DataSequence(df_train)
val_dataset = DataSequence(df_val)
train_dataloader = DataLoader(train_dataset, num_workers=4, batch_size=BATCH_SIZE, shuffle=True)
val_dataloader = DataLoader(val_dataset, num_workers=4, batch_size=BATCH_SIZE)
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE)
if use_cuda:
model = model.cuda()
best_acc = 0
best_loss = 1000
for epoch_num in range(EPOCHS):
total_acc_train = 0
total_loss_train = 0
model.train()
for train_data, train_label in tqdm(train_dataloader):
train_label = train_label.to(device)
mask = train_data['attention_mask'].squeeze(1).to(device)
input_id = train_data['input_ids'].squeeze(1).to(device)
optimizer.zero_grad()
loss, logits = model(input_id, mask, train_label)
for i in range(logits.shape[0]):
logits_clean = logits[i][train_label[i] != -100]
label_clean = train_label[i][train_label[i] != -100]
predictions = logits_clean.argmax(dim=1)
acc = (predictions == label_clean).float().mean()
total_acc_train += acc
total_loss_train += loss.item()
loss.backward()
optimizer.step()
model.eval()
total_acc_val = 0
total_loss_val = 0
for val_data, val_label in val_dataloader:
val_label = val_label.to(device)
mask = val_data['attention_mask'].squeeze(1).to(device)
input_id = val_data['input_ids'].squeeze(1).to(device)
loss, logits = model(input_id, mask, val_label)
for i in range(logits.shape[0]):
logits_clean = logits[i][val_label[i] != -100]
label_clean = val_label[i][val_label[i] != -100]
predictions = logits_clean.argmax(dim=1)
acc = (predictions == label_clean).float().mean()
total_acc_val += acc
total_loss_val += loss.item()
val_accuracy = total_acc_val / len(df_val)
val_loss = total_loss_val / len(df_val)
print(
f'Epochs: {epoch_num + 1} | Loss: {total_loss_train / len(df_train): .3f} | Accuracy: {total_acc_train / len(df_train): .3f} | Val_Loss: {total_loss_val / len(df_val): .3f} | Accuracy: {total_acc_val / len(df_val): .3f}')
LEARNING_RATE = 5e-3
EPOCHS = 5
BATCH_SIZE = 2
model = BertModel()
train_loop(model, df_train, df_val)
And the debugger says:
Exception has occurred: RuntimeError (note: full exception trace is shown but execution is paused at: <module>)
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.
File "/Users/filipedonatti/Projects/pyCodes/second_try.py", line 141, in train_loop
for train_data, train_label in tqdm(train_dataloader):
File "/Users/filipedonatti/Projects/pyCodes/second_try.py", line 197, in <module>
train_loop(model, df_train, df_val)
File "<string>", line 1, in <module> (Current frame)
By the way,
Despite using Mac, I have downloaded Anaconda-Navigator, however I've been trying and executing this code on VS Code. I've downloaded numpy, torch, datasets and other libraries through Brew with the pip3 command.
I'm at a loss, I can run the code on a google collab notebook or Jupiter notebook, and I know training models and such in my humble Mac would not be advised, but I am just exercising this so I can train and use the model in a much more powerful machine.
Please help me with this issue, I've been trying to find a solution for days.
Peace and happy holidays.
I've tried solving the issue by writing:
if __name__ == '__main__':
freeze_support()
I've tried using this:
import parallelTestModule
extractor = parallelTestModule.ParallelExtractor()
extractor.runInParallel(numProcesses=2, numThreads=4)
So...
It turns out the correct way to solve this is to implement a function to train the loop as such:
def run():
model = BertModel()
torch.multiprocessing.freeze_support()
print('loop')
train_loop(model, df_train, df_val)
if __name__ == '__main__':
run()
Redefining that train_loop line in the end. Issue solved. For more see this link: https://github.com/pytorch/pytorch/issues/5858

What's wrong with Windows Task Sheduler + pytesseract + multiprocessing?

Have the python code with pytesseract & multiprocessing. When I start the code manually from PyCharm it works fine with any number of threads. When I start the code with Win Task Sheduler with 'threads=1' it works fine.
However if I start the code with Win Task Sheduler with 'threads=2' or more than 2, it finishes without processing the images and without any errors.
I've got log messages like this. Script starts but does nothing and there is no any errors in Win logs
2020-05-24 13:09:31,834;START
2020-05-24 13:09:31,834;threads: 2
2020-05-24 13:10:31,832;START
2020-05-24 13:10:31,832;threads: 2
2020-05-24 13:11:31,851;START
2020-05-24 13:11:31,851;threads: 2
Code
from PIL import Image
import pytesseract
from pytesseract import Output
import datetime
from glob import glob
import os
import multiprocessing as multiprocessing
import cv2
import logging
def loggerinit(name, filename, overwrite):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
# create the logging file handler
fh = logging.FileHandler(filename, encoding = 'UTF-8')
formatter = logging.Formatter('%(asctime)s;%(message)s')
fh.setFormatter(formatter)
# add handler to logger object
logger.addHandler(fh)
return logger
def getfiles(dirname, mask):
return glob(os.path.join(dirname, mask))
def tess_file(fname):
img = cv2.imread(fname)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
im_for_T = Image.fromarray(img)
pytesseract.pytesseract.tesseract_cmd = 'C://Tesseract-OCR//tesseract.exe'
TESSDATA_PREFIX = 'C:/Program Files/Tesseract-OCR/tessdata'
try:
os.environ['OMP_THREAD_LIMIT'] = '1'
tess_data = pytesseract.image_to_osd(im_for_T, output_type=Output.DICT)
return fname, tess_data
except:
return fname, None
if __name__ == '__main__':
logger = loggerinit('tess', 'tess.log', False)
files = getfiles('Croped', '*.jpg')
t1 = datetime.datetime.now()
logger.info('START')
threads = 2
logger.info('threads: ' + str(threads))
p = multiprocessing.Pool(threads)
results = p.map(tess_file,files)
e = []
for r in results:
if type(r) == type(None):
e.append('OCR error: ' + r)
else:
print(r[0],". rotate: ",r[1]['rotate'])
p.close()
p.join()
t2 = datetime.datetime.now()
delta = (t2 - t1).total_seconds()
print('Total time: ', delta)
print('Files: ', len(files))
logger.info('Files: ' + str(len(files)))
logger.info('Stop.' + 'Total time: ' + str(delta))
# Print error if exist
for ee in e:
print(ee)
Whats wrong? How can I fix this issue?

Pexpect helpful hints - find EOF with child.readline() and also own computer backup code

I have used this website over a hundred times and it has helped me so much with my coding (in python, arduino, terminal commands and Window's prompt). I thought I would put up some knowledge that I found, for things that Stack overflow could not help me with but my be helpful for others in a similar situation. So have a look at the code below. I hope if helps people with creating their own backup code. I am most proud with the "while '\r\n' in output" part of the below code. :
output = child0.readline()
while '\r\n' in output:
msg.log(output.replace('\r\n', ''), logMode + 's')
output = child0.readline()
This helps find the EOF when the program has finished running. Hence you can output the terminal program's output as the program is running.
I will be adding a Windows version to this code too. Possibly with robocopy.
Any questions with the below code, please do not hesitate to ask. NB: I changed people's names and removed my username and passwords.
#!/usr/bin/python
# Written by irishcream24, amateur coder
import subprocess
import sys
import os.path
import logAndError # my own library to handle errors and log events
from inspect import currentframe as CF # help with logging
from inspect import getframeinfo as GFI # help with logging
import threading
import fcntl
import pexpect
import time
import socket
import time as t
from sys import platform
if platform == "win32":
import msvcrt
portSearch = "Uno"
portResultPosition = 1
elif platform == "darwin":
portSearch = "usb"
portResultPosition = 0
else:
print 'Unknown operating system'
print 'Ending Program...'
sys.exit()
# Check if another instance of the program is running, if so, then stop second.
pid_file = 'program.pid'
fp = open(pid_file, 'w')
try:
fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
# another instance is running
print "Program already running, stopping the second instance..."
sys.exit(1)
# Determine where main program files are stored
directory = os.path.dirname(os.path.realpath(__file__))
# To print stderr to both screen and file
errCounter = 0
exitFlag = [0]
class tee:
def __init__(self, _fd1, _fd2):
self.fd1 = _fd1
self.fd2 = _fd2
def __del__(self):
if self.fd1 != sys.stdout and self.fd1 != sys.stderr :
self.fd1.close()
if self.fd2 != sys.stdout and self.fd2 != sys.stderr :
self.fd2.close()
def write(self, text):
global errCounter
global exitFlag
if errCounter == 0:
self.fd1.write('%s: ' %t.strftime("%d/%m/%y %H:%M"))
self.fd2.write('%s: ' %t.strftime("%d/%m/%y %H:%M"))
errCounter = 1
exitFlag[0] = 1
self.fd1.write(text)
self.fd2.write(text)
def flush(self):
self.fd1.flush()
self.fd2.flush()
# Error and log handling
errMode = 'pf' # p = print to screen, f = print to file, e = end program
errorFileAddress = '%s/errorFile.txt' %directory
outputlog = open(errorFileAddress, "a")
sys.stderr = tee(sys.stderr, outputlog)
logFileAddress = '%s/log.txt' %directory
logMode = 'pf' # p = print to screen, f = print to file
msg = logAndError.logAndError(errorFileAddress, logFileAddress)
# Set computer to be backed up
sourceComputer = 'DebbieMac'
try:
sourceComputer = sys.argv[1]
except:
print 'No source argument given.'
if sourceComputer == 'SamMac' or sourceComputer == 'DebbieMac' or sourceComputer == 'mediaCentre' or sourceComputer == 'garageComputer':
pass
else:
msg.error('incorrect source computer supplied!', errMode, GFI(CF()).lineno, exitFlag)
sys.exit()
# Source and destination setup
backupRoute = 'network'
try:
backupRoute = sys.argv[2]
except:
print 'No back up route argument given.'
if backupRoute == 'network' or backupRoute == 'direct' or backupRoute == 'testNetwork' or backupRoute == 'testDirect':
pass
else:
msg.error('incorrect backup route supplied!', errMode, GFI(CF()).lineno, exitFlag)
sys.exit()
# Source, destination and exclude dictionaries
v = {
'SamMac network source' : '/Users/SamJones',
'SamMac network destination' : '/Volumes/Seagate/Sam_macbook_backup/Backups',
'SamMac direct source' : '/Users/SamJones',
'SamMac direct destination' : '/Volumes/Seagate\ Backup\ Plus\ Drive/Sam_macbook_backup/Backups',
'SamMac testNetwork source' : '/Users/SamJones/Documents/Arduino/arduino_sketches-master',
'SamMac testNetwork destination' : '/Volumes/Seagate/Sam_macbook_backup/Arduino',
'SamMac exclude' : ['.*', '.Trash', 'Library', 'Pictures'],
'DebbieMac network source' : '/Users/DebbieJones',
'DebbieMac network destination' : '/Volumes/Seagate/Debbie_macbook_backup/Backups',
'DebbieMac direct source' : '/Users/DebbieJones',
'DebbieMac direct destination' : '/Volumes/Seagate\ Backup\ Plus\ Drive/Debbie_macbook_backup/Backups',
'DebbieMac testNetwork source': '/Users/DebbieJones/testFolder',
'DebbieMac testNetwork destination' : '/Volumes/Seagate/Debbie_macbook_backup',
'DebbieMac testDirect source' : '/Users/DebbieJones/testFolder',
'DebbieMac testDirect destination' : '/Volumes/Seagate\ Backup\ Plus\ Drive/Debbie_macbook_backup',
'DebbieMac exclude' : ['.*', '.Trash', 'Library', 'Pictures']
}
# Main threading code
class mainThreadClass(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
PIDMessage = 'Starting backup PID: %s'%os.getpid()
msg.log(PIDMessage, logMode)
mainThread()
msg.log('Process completed successfully\n', logMode)
def mainThread():
if platform == "win32":
pass
elif platform == "darwin":
if 'network' in backupRoute:
# Connect to SeagateBackup
if os.path.ismount('/Volumes/Seagate') == False:
msg.log('Mounting Seagate Backup Hub', logMode)
commandM = 'mount volume'
smbPoint = '"smb://username:password#mediacentre/Seagate"'
childM = pexpect.spawn("%s '%s %s'" %('osascript -e', commandM, smbPoint), timeout = None)
childM.expect(pexpect.EOF)
else:
msg.log('Seagate already mounted', logMode)
# Use rsync to backup files
commandR = 'rsync -avb '
for s in v['%s exclude' %sourceComputer]:
commandR = commandR + "--exclude '%s' " %s
commandR = commandR + '--delete --backup-dir="../PreviousBackups/%s" ' %time.strftime("%d-%m-%y %H%M")
commandR = commandR + '%s %s' %(v['%s %s source' %(sourceComputer, backupRoute)], v['%s %s destination' %(sourceComputer, backupRoute)])
msg.log(commandR, logMode)
msg.log('Running rsync...rsync output below', logMode)
child0 = pexpect.spawn(commandR, timeout = None)
# Handling command output
# If no '\r\n' in readline() output, then EOF reached
output = child0.readline()
while '\r\n' in output:
msg.log(output.replace('\r\n', ''), logMode + 's')
output = child0.readline()
return
if __name__ == '__main__':
# Create new threads
threadMain = mainThreadClass()
# Start new Threads
threadMain.start()
logAndError.py
# to handle errors
import time
import sys
import threading
class logAndError:
def __init__(self, errorAddress, logAddress):
self.errorAddress = errorAddress
self.logAddress = logAddress
self.lock = threading.RLock()
def error(self, message, errMode, lineNumber=None, exitFlag=[0]):
message = '%s: %s' %(time.strftime("%d/%m/%y %H:%M"), message)
# p = print to screen, f = print to file, e = end program
if 'p' in errMode:
print message
if 'f' in errMode and 'e' not in errMode:
errorFile = open(self.errorAddress, 'a')
errorFile.write('%s\n' %message)
errorFile.close()
return
def log(self, logmsg, logMode):
with self.lock:
logmsg2 = '%s: %s' %(time.strftime("%d/%m/%y %H:%M"), logmsg)
if 'p' in logMode:
# s = simple (no date stamp)
if 's' in logMode:
print logmsg
else:
print logmsg2
if 'f' in logMode:
if 's' in logMode:
logFile = open(self.logAddress, 'a')
logFile.write('%s\n' %logmsg)
logFile.close()
else:
logFile = open(self.logAddress, 'a')
logFile.write('%s\n' %logmsg2)
logFile.close()
return

PyQt Copy folder with progress and transfer rate/speed

I'd like to expand my current code with the ability to show the transfer rate/speed of the files being copied. Im working on Windows 10 with py 3.6 and Qt 5.8. Here is my code:
import os
import shutil
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QLabel, QProgressBar, QFileDialog
class FileCopyProgress(QWidget):
def __init__(self, parent=None, src=None, dest=None):
super(FileCopyProgress, self).__init__()
self.src = src
self.dest = dest
self.build_ui()
def build_ui(self):
hbox = QVBoxLayout()
lbl_src = QLabel('Source: ' + self.src)
lbl_dest = QLabel('Destination: ' + self.dest)
self.pb = QProgressBar()
self.pb.setMinimum(0)
self.pb.setMaximum(100)
self.pb.setValue(0)
hbox.addWidget(lbl_src)
hbox.addWidget(lbl_dest)
hbox.addWidget(self.pb)
self.setLayout(hbox)
self.setWindowTitle('File copy')
self.auto_start_timer = QTimer()
self.auto_start_timer.singleShot(2000, lambda: self.copyFilesWithProgress(self.src, self.dest, self.progress, self.copydone))
self.show()
def progress(self, done, total):
progress = int(round((done/float(total))*100))
try:
self.pb.setValue(progress)
except:
pass
app.processEvents()
def copydone(self):
self.pb.setValue(100)
self.close()
def countfiles(self, _dir):
files = []
if os.path.isdir(_dir):
for path, dirs, filenames in os.walk(_dir):
files.extend(filenames)
return len(files)
def makedirs(self, dest):
if not os.path.exists(dest):
os.makedirs(dest)
#pyqtSlot()
def copyFilesWithProgress(self, src, dest, callback_progress, callback_copydone):
numFiles = self.countfiles(src)
if numFiles > 0:
dest = os.path.join(dest, src.replace(BASE_DIR, '').replace('\\', ''))
print(''.join(['Destination: ', dest]))
self.makedirs(dest)
numCopied = 0
for path, dirs, filenames in os.walk(src):
for directory in dirs:
destDir = path.replace(src,dest)
self.makedirs(os.path.join(destDir, directory))
for sfile in filenames:
srcFile = os.path.join(path, sfile)
destFile = os.path.join(path.replace(src, dest), sfile)
shutil.copy(srcFile, destFile)
numCopied += 1
callback_progress(numCopied, numFiles)
callback_copydone()
BASE_DIR = 'C:\\dev'
app = QApplication([])
FileCopyProgress(src="C:\dev\pywin32-221", dest='C:\dev\copied')
# Run the app
app.exec_()
This code opens a gui with a progressbar showing progress while copying files. A simple label with the current transfer rate/speed (approximate) would be rlly nice :)
Unfortuanetly i can't find any examples, can someone give me a hint or maybe a working example please?
EDIT:
I did a remake and now I have transfer rate, time elapsed and time remaining. The data seems to be realistic. I have only one problem: Lets assume i have a folder/file that time remaining is 7 sec -> currently it starts with 7 sec and gets an update every 1 second. We expect that in the next step it would display 6 sec but instead it goes:
5 sec
3 sec
1.5 sec
1.4 sec
1.3 sec
1.2 sec
1.1 sec and so on
Where is the mistake?
class FileCopyProgress(QWidget):
def __init__(self, parent=None, src=None, dest=None):
super(FileCopyProgress, self).__init__()
self.src = src
self.dest = dest
self.rate = "0"
self.total_time = "0 s"
self.time_elapsed = "0 s"
self.time_remaining = "0 s"
self.build_ui()
def build_ui(self):
hbox = QVBoxLayout()
lbl_src = QLabel('Source: ' + self.src)
lbl_dest = QLabel('Destination: ' + self.dest)
self.pb = QProgressBar()
self.lbl_rate = QLabel('Transfer rate: ' + self.rate)
self.lbl_time_elapsed = QLabel('Time Elapsed: ' + self.time_elapsed)
self.lbl_time_remaining = QLabel('Time Remaining: ' + self.time_remaining)
self.pb.setMinimum(0)
self.pb.setMaximum(100)
self.pb.setValue(0)
hbox.addWidget(lbl_src)
hbox.addWidget(lbl_dest)
hbox.addWidget(self.pb)
hbox.addWidget(self.lbl_rate)
hbox.addWidget(self.lbl_time_elapsed)
hbox.addWidget(self.lbl_time_remaining)
self.setLayout(hbox)
self.setWindowTitle('File copy')
self.auto_start_timer = QTimer()
self.auto_start_timer.singleShot(100, lambda: self.copy_files_with_progress(self.src, self.dest, self.progress, self.copy_done))
self.copy_timer = QTimer()
self.copy_timer.timeout.connect(lambda: self.process_informations())
self.copy_timer.start(1000)
self.show()
#pyqtSlot()
def process_informations(self):
time_elapsed_raw = time.clock() - self.start_time
self.time_elapsed = '{:.2f} s'.format(time_elapsed_raw)
self.lbl_time_elapsed.setText('Time Elapsed: ' + self.time_elapsed)
# example - Total: 100 Bytes, bisher kopiert 12 Bytes/s
time_remaining_raw = self._totalSize/self._copied
self.time_remaining = '{:.2f} s'.format(time_remaining_raw) if time_remaining_raw < 60. else '{:.2f} min'.format(time_remaining_raw)
self.lbl_time_remaining.setText('Time Remaining: ' + self.time_remaining)
rate_raw = (self._copied - self._copied_tmp)/1024/1024
self.rate = '{:.2f} MB/s'.format(rate_raw)
self.lbl_rate.setText('Transfer rate: ' + self.rate)
self._copied_tmp = self._copied
def progress(self):
self._progress = (self._copied/self._totalSize)*100
try:
self.pb.setValue(self._progress)
except:
pass
app.processEvents()
def get_total_size(self, src):
return sum( os.path.getsize(os.path.join(dirpath,filename)) for dirpath, dirnames, filenames in os.walk(src) for filename in filenames ) # total size of files in bytes
def copy_done(self):
self.pb.setValue(100)
print("done")
self.close()
def make_dirs(self, dest):
if not os.path.exists(dest):
os.makedirs(dest)
#pyqtSlot()
def copy_files_with_progress(self, src, dst, callback_progress, callback_copydone, length=16*1024*1024):
self._copied = 0
self._copied_tmp = 0
self._totalSize = self.get_total_size(src)
print(''.join(['Pre Dst: ', dst]))
dst = os.path.join(dst, src.replace(BASE_DIR, '').replace('\\', ''))
print(''.join(['Src: ', src]))
print(''.join(['Dst: ', dst]))
self.make_dirs(dst)
self.start_time = time.clock()
for path, dirs, filenames in os.walk(src):
for directory in dirs:
destDir = path.replace(src, dst)
self.make_dirs(os.path.join(destDir, directory))
for sfile in filenames:
srcFile = os.path.join(path, sfile)
destFile = os.path.join(dst, sfile)
# destFile = os.path.join(path.replace(src, dst), sfile)
with open(srcFile, 'rb') as fsrc:
with open(destFile, 'wb') as fdst:
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
self._copied += len(buf)
callback_progress()
try:
self.copy_timer.stop()
except:
print('Error: could not stop QTimer')
callback_copydone()
That's all about logic, think in the following way:
You have a TOTAL size of all files, let's say you have 100 dashes: [----- ... -----]
Now you get the starting time when you started transferring your files.
Choose some interval, let's have for example 2 secs.
So, after 2 secs see how much of the total you already transferred, in other words how much of files you already have in the new dir. Let's say you transferred 26 dashes.
The calc would be, 26 dashes/2secs = 13dashes per seconds.
Going deeper we have 26% of the content downloaded in 2 seconds since the total is 100 or 13% per second.
Even further we can calc also the prediction of time.
Total Time = 100%/13% = 7.6 secs
Time Elapsed = 2secs
Time Remaining = 7.6 - 2 = 5.6 secs
I think you got the idea...
Note: You just gotta keep verifying and remaking all these calcs in each 2 seconds, if you choose 2 secs of course, and updating the information to the user. If you wanna be even more precise or show more frequently updated information to the user just reduce that interval to let's say 0.5 secs and make the calc on top of milliseconds. It's up to you how often you update the information, that's just an overview of how to do the math. ")

Reading memory and Access is Denied

I need to access all memory of a running process in my local Windows 7-64bit. I am new to winapi.
Here is my problem; Whenever I try to Open a process and reads its memory, I get Access is Denied error.
I searched and found something. It is said that If I run the main process as Administrator and use PROCESS_ALL_ACCESS on OpenProcess, I would have enough right to do it as it is said. OK, I did it. but nothing is changed. On reading memory, I still get Access is Denied.
So, I kept searching and found another thing which is enabling SeDebugPrivilege. I have also done that but nothing is changed. I still get the error.
I've read the quest and his answer here;
Windows Vista/Win7 Privilege Problem: SeDebugPrivilege & OpenProcess .
But as I said, I am really new to winapi. I could not solve my problem yet. Is there anything which which I need to configure in my local operating system?
Here is my Python code with pywin32;
from _ctypes import byref, sizeof, Structure
from ctypes import windll, WinError, c_buffer, c_void_p, create_string_buffer
from ctypes.wintypes import *
import win32security
import win32api
import gc
import ntsecuritycon
from struct import Struct
from win32con import PROCESS_ALL_ACCESS
from struct import calcsize
MEMORY_STATES = {0x1000: "MEM_COMMIT", 0x10000: "MEM_FREE", 0x2000: "MEM_RESERVE"}
MEMORY_PROTECTIONS = {0x10: "PAGE_EXECUTE", 0x20: "PAGE_EXECUTE_READ", 0x40: "PAGEEXECUTE_READWRITE",
0x80: "PAGE_EXECUTE_WRITECOPY", 0x01: "PAGE_NOACCESS", 0x04: "PAGE_READWRITE",
0x08: "PAGE_WRITECOPY"}
MEMORY_TYPES = {0x1000000: "MEM_IMAGE", 0x40000: "MEM_MAPPED", 0x20000: "MEM_PRIVATE"}
class MEMORY_BASIC_INFORMATION(Structure):
_fields_ = [
("BaseAddress", c_void_p),
("AllocationBase", c_void_p),
("AllocationProtect", DWORD),
("RegionSize", UINT),
("State", DWORD),
("Protect", DWORD),
("Type", DWORD)
]
class SYSTEM_INFO(Structure):
_fields_ = [("wProcessorArchitecture", WORD),
("wReserved", WORD),
("dwPageSize", DWORD),
("lpMinimumApplicationAddress", DWORD),
("lpMaximumApplicationAddress", DWORD),
("dwActiveProcessorMask", DWORD),
("dwNumberOfProcessors", DWORD),
("dwProcessorType", DWORD),
("dwAllocationGranularity", DWORD),
("wProcessorLevel", WORD),
("wProcessorRevision", WORD)]
class PyMEMORY_BASIC_INFORMATION:
def __init__(self, MBI):
self.MBI = MBI
self.set_attributes()
def set_attributes(self):
self.BaseAddress = self.MBI.BaseAddress
self.AllocationBase = self.MBI.AllocationBase
self.AllocationProtect = MEMORY_PROTECTIONS.get(self.MBI.AllocationProtect, self.MBI.AllocationProtect)
self.RegionSize = self.MBI.RegionSize
self.State = MEMORY_STATES.get(self.MBI.State, self.MBI.State)
# self.Protect = self.MBI.Protect # uncomment this and comment next line if you want to do a bitwise check on Protect.
self.Protect = MEMORY_PROTECTIONS.get(self.MBI.Protect, self.MBI.Protect)
self.Type = MEMORY_TYPES.get(self.MBI.Type, self.MBI.Type)
ASSUME_ALIGNMENT = True
class TARGET:
"""Given a ctype (initialized or not) this coordinates all the information needed to read, write and compare."""
def __init__(self, ctype):
self.alignment = 1
self.ctype = ctype
# size of target data
self.size = sizeof(ctype)
self.type = ctype._type_
# get the format type needed for struct.unpack/pack.
while hasattr(self.type, "_type_"):
self.type = self.type._type_
# string_buffers and char arrays have _type_ 'c'
# but that makes it slightly slower to unpack
# so swap is for 's'.
if self.type == "c":
self.type = "s"
# calculate byte alignment. this speeds up scanning substantially
# because we can read and compare every alignment bytes
# instead of every single byte.
# although if we are scanning for a string the alignment is defaulted to 1 \
# (im not sure if this is correct).
elif ASSUME_ALIGNMENT:
# calc alignment
divider = 1
for i in xrange(4):
divider *= 2
if not self.size % divider:
self.alignment = divider
# size of target ctype.
self.type_size = calcsize(self.type)
# length of target / array length.
self.length = self.size / self.type_size
self.value = getattr(ctype, "raw", ctype.value)
# the format string used for struct.pack/unpack.
self.format = str(self.length) + self.type
# efficient packer / unpacker for our own format.
self.packer = Struct(self.format)
def get_packed(self):
"""Gets the byte representation of the ctype value for use with WriteProcessMemory."""
return self.packer.pack(self.value)
def __str__(self):
return str(self.ctype)[:10] + "..." + " <" + str(self.value)[:10] + "..." + ">"
class Memory(object):
def __init__(self, process_handle, target):
self._process_handle = process_handle
self._target = target
self.found = []
self.__scann_process()
def __scann_process(self):
"""scans a processes pages for the target value."""
si = SYSTEM_INFO()
psi = byref(si)
windll.kernel32.GetSystemInfo(psi)
base_address = si.lpMinimumApplicationAddress
max_address = si.lpMaximumApplicationAddress
page_address = base_address
while page_address < max_address:
page_address = self.__scan_page(page_address)
if len(self.found) >= 60000000:
print("[Warning] Scan ended early because too many addresses were found to hold the target data.")
break
gc.collect()
return self.found
def __scan_page(self, page_address):
"""Scans the entire page for TARGET instance and returns the next page address and found addresses."""
information = self.VirtualQueryEx(page_address)
base_address = information.BaseAddress
region_size = information.RegionSize
next_region = base_address + region_size
size = self._target.size
target_value = self._target.value
step = self._target.alignment
unpacker = self._target.packer.unpack
if information.Type != "MEM_PRIVATE" or \
region_size < size or \
information.State != "MEM_COMMIT" or \
information.Protect not in ["PAGE_EXECUTE_READ", "PAGEEXECUTE_READWRITE", "PAGE_READWRITE"]:
return next_region
page_bytes = self.ReadMemory(base_address, region_size)
for i in xrange(0, (region_size - size), step):
partial = page_bytes[i:i + size]
if unpacker(partial)[0] == target_value:
self.found.append(base_address + i)
del page_bytes # free the buffer
return next_region
def ReadMemory(self, address, size):
cbuffer = c_buffer(size)
success = windll.kernel32.ReadProcessMemory(
self._process_handle,
address,
cbuffer,
size,
0)
assert success, "ReadMemory Failed with success == %s and address == %s and size == %s.\n%s" % (
success, address, size, WinError(win32api.GetLastError()))
return cbuffer.raw
def VirtualQueryEx(self, address):
MBI = MEMORY_BASIC_INFORMATION()
MBI_pointer = byref(MBI)
size = sizeof(MBI)
success = windll.kernel32.VirtualQueryEx(
self._process_handle,
address,
MBI_pointer,
size)
assert success, "VirtualQueryEx Failed with success == %s.\n%s" % (
success, WinError(win32api.GetLastError())[1])
assert success == size, "VirtualQueryEx Failed because not all data was written."
return PyMEMORY_BASIC_INFORMATION(MBI)
def AdjustPrivilege(priv):
flags = win32security.TOKEN_ADJUST_PRIVILEGES | win32security.TOKEN_QUERY
p = win32api.GetCurrentProcess()
htoken = win32security.OpenProcessToken(p, flags)
id = win32security.LookupPrivilegeValue(None, priv)
newPrivileges = [(id, win32security.SE_PRIVILEGE_ENABLED)]
win32security.AdjustTokenPrivileges(htoken, 0, newPrivileges)
win32api.CloseHandle(htoken)
def OpenProcess(pid=win32api.GetCurrentProcessId()):
# ntsecuritycon.SE_DEBUG_NAME = "SeDebugPrivilege"
AdjustPrivilege(ntsecuritycon.SE_DEBUG_NAME)
phandle = windll.kernel32.OpenProcess( \
PROCESS_ALL_ACCESS,
0,
pid)
assert phandle, "Failed to open process!\n%s" % WinError(win32api.GetLastError())[1]
return phandle
PID = 22852
process_handle = OpenProcess(PID)
Memory(process_handle, TARGET(create_string_buffer("1456")))
Here is the error I always get;
AssertionError: ReadMemory Failed with success == 0 and address == 131072 and size == 4096.
[Error 5] Access is denied.
I do not know what information else about my code and my personal Windows 7 operating system, I should provide to you. If you need to know more, please ask it from me, I will provide it to solve that problem.
I guess, this is about a lack of configuration in my operating system , not about pywin32. I'll be waiting for your solutions.

Resources