Why isn't SvcShutdown() being called by my service? - winapi

I cannot figure out why SvcShutdown() is not being called in my win32 service when Windows shuts down. Everything else works perfectly (stopping, pause/continue etc). After scouring the 'Net for hours, I find nothing.
Any help would be very, VERY appreciated!!
Thanks in advance,
James
import win32serviceutil
import servicemanager
import win32service
import win32event
import win32api
import datetime
LOGINFO = 0
LOGWARNING = 1
LOGERROR = 2
class MyService(win32serviceutil.ServiceFramework):
_svc_name_ = 'MyService'
_svc_display_name_ = 'MyService service'
def __init__(self, *args):
win32serviceutil.ServiceFramework.__init__(self, *args)
# Create events for service stop, pause & continue
# CreateEvent(securityAttrs, bManualReset, bInitialState, name)
self.evStop = win32event.CreateEvent(None, 0, 0, None)
self.evPause = win32event.CreateEvent(None, 0, 0, None)
self.evContinue = win32event.CreateEvent(None, 0, 0, None)
# Create event list for WaitForMultipleObjects()
self.evhandles = self.evStop, self.evPause, self.evContinue
# sigStatus must be this range for a valid SCM event
self.validSignals = range(win32event.WAIT_OBJECT_0,
win32event.MAXIMUM_WAIT_OBJECTS)
# Signal ID returned from WFMO() else None
self.sigStatus = None
# Service run state. False means pausing/paused or stopping
self.runState = True
def logEvent(self, msg, logtype=LOGINFO, logcategory=None):
import servicemanager
if logtype == LOGINFO:
servicemanager.LogInfoMsg(str(msg))
elif logtype == LOGWARNING:
servicemanager.LogWarningMsg(str(msg))
elif logtype == LOGERROR:
servicemanager.LogErrorMsg(str(msg))
def sleep(self, sec):
'''A delay method sympathetic to SCM notifications.'''
while sec > 0:
# SCM event has taken place?
if self.notificationFromSCM():
break
win32api.Sleep(1000)
sec = sec -1
def notificationFromSCM(self):
'''Returns True if SCM notification(s) have taken place.
sigStatus has the value.
Note: that calls to WaitForMultipleObjects() only returns the event
status ONCE, after which it's reset (ie. calling it may return
WAIT_OBJECT_0 and an immediate subsequent call will yield WAIT_TIMEOUT
or similar.'''
if self.sigStatus is not None:
# Still have a live SCM event to process, so exit
return True
# WaitForMultipleObjects(handles, bWaitAll, dwMilliseconds)
self.sigStatus = win32event.WaitForMultipleObjects(self.evhandles, 0, 0)
if self.sigStatus in self.validSignals:
return True
else:
# Timeout signal or similar, so MUST reset sigStatus
self.sigStatus = None
return False
def SvcDoRun(self):
self.ReportServiceStatus(win32service.SERVICE_START_PENDING)
self.logEvent('Starting {0} Service...'.format(self._svc_display_name_))
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
self.logEvent('{0} Service started.'.format(self._svc_display_name_))
while True:
if self.runState:
try:
# Insert service work activity here...
self.logEvent('Working: {0}'.format(datetime.datetime.now()))
self.sleep(10)
except Exception as x:
self.logEvent('Exception : {0}'.format(x), LOGERROR)
else:
self.sleep(30)
# SCM notification?
if self.notificationFromSCM():
if self.sigStatus == self.evhandles.index(self.evStop):
# STOP event
self.logEvent('Stopping {0} Service...'.format(self._svc_display_name_))
break
elif self.sigStatus == self.evhandles.index(self.evPause):
# PAUSE event
self.logEvent('Pausing {0} Service...'.format(self._svc_display_name_))
self.runState = False
# Other cleanup code here...
self.logEvent('{0} Service paused.'.format(self._svc_display_name_))
self.ReportServiceStatus(win32service.SERVICE_PAUSED)
elif self.sigStatus == self.evhandles.index(self.evContinue):
# CONTINUE event
self.logEvent('Resuming {0} service...'.format(self._svc_display_name_))
self.runState = True
# Reset pause & continue to non-signaled state
win32event.ResetEvent(self.evPause)
win32event.ResetEvent(self.evContinue)
# Other cleanup code here...
self.logEvent('{0} Service started.'.format(self._svc_display_name_))
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
# Clear signal flag
self.sigStatus = None
# If we get here, then service has been stopped/shutdown
self.logEvent('{0} Service stopped.'.format(self._svc_display_name_))
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
# Signal STOP event
win32event.SetEvent(self.evStop)
def SvcPause(self):
self.ReportServiceStatus(win32service.SERVICE_PAUSE_PENDING)
# Signal PAUSE event
win32event.SetEvent(self.evPause)
def SvcContinue(self):
self.ReportServiceStatus(win32service.SERVICE_CONTINUE_PENDING)
# Signal CONTINUE event
win32event.SetEvent(self.evContinue)
def SvcShutdown(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
self.logEvent('**SvcShutdown event**')
# Shutdown code here...
win32event.SetEvent(self.evStop)

Have a look at this:
[python-win32] win32 service not receiving shutdown notification on reboot

Related

python coroutine asyncio/ await / aiohttp

new in asyncio world.
going straight to the point...
I want to do/make a request(aiohttp) to a site.
if the wait for an answer pass than N seconds I want to stop the process of waiting.
Do the process again by setting a limit of attempts if needed.
async def search_skiping_add(name_search):
start_time = time()
async with aiohttp.ClientSession() as session:
url = f'https://somesitehere.com'
r = await session.get(url)
final_time = time()
result_time =round(final_time-start_time)
print(result_time)
Maybe, I know, have some way to do it synchronously, but it's an excuse to start using asyncio somehow too.
This should give you an idea of how to use async with aiohttp:
from aiohttp import ClientSession
from asyncio import gather, create_task, sleep, run
from traceback import format_exc
def returnPartionedList(inputlist: list, x: int = 100) -> list: # returns inputlist split into x parts, default is 100
return([inputlist[i:i + x] for i in range(0, len(inputlist), x)])
# You could change validate to an integer and thereby increasing the retry count as needed.
async def GetRessource(url: str, session: ClientSession, validate: bool = False) -> dict:
try:
async with session.get(url) as response:
if response.status == 200:
r: dict = await response.json() # Set equal to .text() to get results as a string
return(r)
else:
r: str = await response.text()
if not validate:
await sleep(3) # Sleep for x amount of seconds before retry
return(await GetRessource(url, session, True))
print(f"Error, got response code: {response.status} message: {r}")
except Exception:
print(f"General Exception:\n{format_exc()}")
return({})
async def GetUrls(urls: list) -> list:
resultsList: list = []
UrlPartitions: list = returnPartionedList(urls, 20) # Rate limit to 20 requests per loop
async with ClientSession(timeout=15) as session: # Timeout is equal to the time to wait in seconds before terminating, default is 300 seconds or 5 minutes.
for partition in UrlPartitions:
partitionTasks: list = [create_task(GetRessource(url, session)) for url in partition]
resultsList.append(await gather(*partitionTasks, return_exceptions=False))
return(resultsList) # Or you can do more processing here before returning
async def main():
urls: list = ["...", "...", "..."] # list of urls to get from
results: list = await GetUrls(urls)
print(results)
if __name__ == "__main__":
run(main())

Is it useless to use the Lock() in multiprocess Pool()? [duplicate]

I am having troubles with the multiprocessing module. I am using a Pool of workers with its map method to concurrently analyze lots of files. Each time a file has been processed I would like to have a counter updated so that I can keep track of how many files remains to be processed. Here is sample code:
import os
import multiprocessing
counter = 0
def analyze(file):
# Analyze the file.
global counter
counter += 1
print counter
if __name__ == '__main__':
files = os.listdir('/some/directory')
pool = multiprocessing.Pool(4)
pool.map(analyze, files)
I cannot find a solution for this.
The problem is that the counter variable is not shared between your processes: each separate process is creating it's own local instance and incrementing that.
See this section of the documentation for some techniques you can employ to share state between your processes. In your case you might want to share a Value instance between your workers
Here's a working version of your example (with some dummy input data). Note it uses global values which I would really try to avoid in practice:
from multiprocessing import Pool, Value
from time import sleep
counter = None
def init(args):
''' store the counter for later use '''
global counter
counter = args
def analyze_data(args):
''' increment the global counter, do something with the input '''
global counter
# += operation is not atomic, so we need to get a lock:
with counter.get_lock():
counter.value += 1
print counter.value
return args * 10
if __name__ == '__main__':
#inputs = os.listdir(some_directory)
#
# initialize a cross-process counter and the input lists
#
counter = Value('i', 0)
inputs = [1, 2, 3, 4]
#
# create the pool of workers, ensuring each one receives the counter
# as it starts.
#
p = Pool(initializer = init, initargs = (counter, ))
i = p.map_async(analyze_data, inputs, chunksize = 1)
i.wait()
print i.get()
Counter class without the race-condition bug:
class Counter(object):
def __init__(self):
self.val = multiprocessing.Value('i', 0)
def increment(self, n=1):
with self.val.get_lock():
self.val.value += n
#property
def value(self):
return self.val.value
A extremly simple example, changed from jkp's answer:
from multiprocessing import Pool, Value
from time import sleep
counter = Value('i', 0)
def f(x):
global counter
with counter.get_lock():
counter.value += 1
print("counter.value:", counter.value)
sleep(1)
return x
with Pool(4) as p:
r = p.map(f, range(1000*1000))
Faster Counter class without using the built-in lock of Value twice
class Counter(object):
def __init__(self, initval=0):
self.val = multiprocessing.RawValue('i', initval)
self.lock = multiprocessing.Lock()
def increment(self):
with self.lock:
self.val.value += 1
#property
def value(self):
return self.val.value
https://eli.thegreenplace.net/2012/01/04/shared-counter-with-pythons-multiprocessing
https://docs.python.org/2/library/multiprocessing.html#multiprocessing.sharedctypes.Value
https://docs.python.org/2/library/multiprocessing.html#multiprocessing.sharedctypes.RawValue
Here is a solution to your problem based on a different approach from that proposed in the other answers. It uses message passing with multiprocessing.Queue objects (instead of shared memory with multiprocessing.Value objects) and process-safe (atomic) built-in increment and decrement operators += and -= (instead of introducing custom increment and decrement methods) since you asked for it.
First, we define a class Subject for instantiating an object that will be local to the parent process and whose attributes are to be incremented or decremented:
import multiprocessing
class Subject:
def __init__(self):
self.x = 0
self.y = 0
Next, we define a class Proxy for instantiating an object that will be the remote proxy through which the child processes will request the parent process to retrieve or update the attributes of the Subject object. The interprocess communication will use two multiprocessing.Queue attributes, one for exchanging requests and one for exchanging responses. Requests are of the form (sender, action, *args) where sender is the sender name, action is the action name ('get', 'set', 'increment', or 'decrement' the value of an attribute), and args is the argument tuple. Responses are of the form value (to 'get' requests):
class Proxy(Subject):
def __init__(self, request_queue, response_queue):
self.__request_queue = request_queue
self.__response_queue = response_queue
def _getter(self, target):
sender = multiprocessing.current_process().name
self.__request_queue.put((sender, 'get', target))
return Decorator(self.__response_queue.get())
def _setter(self, target, value):
sender = multiprocessing.current_process().name
action = getattr(value, 'action', 'set')
self.__request_queue.put((sender, action, target, value))
#property
def x(self):
return self._getter('x')
#property
def y(self):
return self._getter('y')
#x.setter
def x(self, value):
self._setter('x', value)
#y.setter
def y(self, value):
self._setter('y', value)
Then, we define the class Decorator to decorate the int objects returned by the getters of a Proxy object in order to inform its setters whether the increment or decrement operators += and -= have been used by adding an action attribute, in which case the setters request an 'increment' or 'decrement' operation instead of a 'set' operation. The increment and decrement operators += and -= call the corresponding augmented assignment special methods __iadd__ and __isub__ if they are defined, and fall back on the assignment special methods __add__ and __sub__ which are always defined for int objects (e.g. proxy.x += value is equivalent to proxy.x = proxy.x.__iadd__(value) which is equivalent to proxy.x = type(proxy).x.__get__(proxy).__iadd__(value) which is equivalent to type(proxy).x.__set__(proxy, type(proxy).x.__get__(proxy).__iadd__(value))):
class Decorator(int):
def __iadd__(self, other):
value = Decorator(other)
value.action = 'increment'
return value
def __isub__(self, other):
value = Decorator(other)
value.action = 'decrement'
return value
Then, we define the function worker that will be run in the child processes and request the increment and decrement operations:
def worker(proxy):
proxy.x += 1
proxy.y -= 1
Finally, we define a single request queue to send requests to the parent process, and multiple response queues to send responses to the child processes:
if __name__ == '__main__':
subject = Subject()
request_queue = multiprocessing.Queue()
response_queues = {}
processes = []
for index in range(4):
sender = 'child {}'.format(index)
response_queues[sender] = multiprocessing.Queue()
proxy = Proxy(request_queue, response_queues[sender])
process = multiprocessing.Process(
target=worker, args=(proxy,), name=sender)
processes.append(process)
running = len(processes)
for process in processes:
process.start()
while subject.x != 4 or subject.y != -4:
sender, action, *args = request_queue.get()
print(sender, 'requested', action, *args)
if action == 'get':
response_queues[sender].put(getattr(subject, args[0]))
elif action == 'set':
setattr(subject, args[0], args[1])
elif action == 'increment':
setattr(subject, args[0], getattr(subject, args[0]) + args[1])
elif action == 'decrement':
setattr(subject, args[0], getattr(subject, args[0]) - args[1])
for process in processes:
process.join()
The program is guaranteed to terminate when += and -= are process-safe. If you remove process-safety by commenting the corresponding __iadd__ or __isub__ of Decorator then the program will only terminate by chance (e.g. proxy.x += value is equivalent to proxy.x = proxy.x.__iadd__(value) but falls back to proxy.x = proxy.x.__add__(value) if __iadd__ is not defined, which is equivalent to proxy.x = proxy.x + value which is equivalent to proxy.x = type(proxy).x.__get__(proxy) + value which is equivalent to type(proxy).x.__set__(proxy, type(proxy).x.__get__(proxy) + value), so the action attribute is not added and the setter requests a 'set' operation instead of an 'increment' operation).
Example process-safe session (atomic += and -=):
child 0 requested get x
child 0 requested increment x 1
child 0 requested get y
child 0 requested decrement y 1
child 3 requested get x
child 3 requested increment x 1
child 3 requested get y
child 2 requested get x
child 3 requested decrement y 1
child 1 requested get x
child 2 requested increment x 1
child 2 requested get y
child 2 requested decrement y 1
child 1 requested increment x 1
child 1 requested get y
child 1 requested decrement y 1
Example process-unsafe session (non-atomic += and -=):
child 2 requested get x
child 1 requested get x
child 0 requested get x
child 2 requested set x 1
child 2 requested get y
child 1 requested set x 1
child 1 requested get y
child 2 requested set y -1
child 1 requested set y -1
child 0 requested set x 1
child 0 requested get y
child 0 requested set y -2
child 3 requested get x
child 3 requested set x 2
child 3 requested get y
child 3 requested set y -3 # the program stalls here
A more sophisticated solution based on the lock-free atomic operations, as given by example on atomics library README:
from multiprocessing import Process, shared_memory
import atomics
def fn(shmem_name: str, width: int, n: int) -> None:
shmem = shared_memory.SharedMemory(name=shmem_name)
buf = shmem.buf[:width]
with atomics.atomicview(buffer=buf, atype=atomics.INT) as a:
for _ in range(n):
a.inc()
del buf
shmem.close()
if __name__ == "__main__":
# setup
width = 4
shmem = shared_memory.SharedMemory(create=True, size=width)
buf = shmem.buf[:width]
total = 10_000
# run processes to completion
p1 = Process(target=fn, args=(shmem.name, width, total // 2))
p2 = Process(target=fn, args=(shmem.name, width, total // 2))
p1.start(), p2.start()
p1.join(), p2.join()
# print results and cleanup
with atomics.atomicview(buffer=buf, atype=atomics.INT) as a:
print(f"a[{a.load()}] == total[{total}]")
del buf
shmem.close()
shmem.unlink()
(atomics could be installed via pip install atomics on most of the major platforms)
This is a different solution and the simplest to my taste.
The reasoning is you create an empty list and append to it each time your function executes , then print len(list) to check progress.
Here is an example based on your code :
import os
import multiprocessing
counter = []
def analyze(file):
# Analyze the file.
counter.append(' ')
print len(counter)
if __name__ == '__main__':
files = os.listdir('/some/directory')
pool = multiprocessing.Pool(4)
pool.map(analyze, files)
For future visitors, the hack to add counter to multiprocessing is as follow :
from multiprocessing.pool import ThreadPool
counter = []
def your_function():
# function/process
counter.append(' ') # you can append anything
return len(counter)
pool = ThreadPool()
result = pool.map(get_data, urls)
Hope this will help.
I'm working on a process bar in PyQT5, so I use thread and pool together
import threading
import multiprocessing as mp
from queue import Queue
def multi(x):
return x*x
def pooler(q):
with mp.Pool() as pool:
count = 0
for i in pool.imap_unordered(ggg, range(100)):
print(count, i)
count += 1
q.put(count)
def main():
q = Queue()
t = threading.Thread(target=thr, args=(q,))
t.start()
print('start')
process = 0
while process < 100:
process = q.get()
print('p',process)
if __name__ == '__main__':
main()
this I put in Qthread worker and it works with acceptable latency

Pexpect helpful hints - find EOF with child.readline() and also own computer backup code

I have used this website over a hundred times and it has helped me so much with my coding (in python, arduino, terminal commands and Window's prompt). I thought I would put up some knowledge that I found, for things that Stack overflow could not help me with but my be helpful for others in a similar situation. So have a look at the code below. I hope if helps people with creating their own backup code. I am most proud with the "while '\r\n' in output" part of the below code. :
output = child0.readline()
while '\r\n' in output:
msg.log(output.replace('\r\n', ''), logMode + 's')
output = child0.readline()
This helps find the EOF when the program has finished running. Hence you can output the terminal program's output as the program is running.
I will be adding a Windows version to this code too. Possibly with robocopy.
Any questions with the below code, please do not hesitate to ask. NB: I changed people's names and removed my username and passwords.
#!/usr/bin/python
# Written by irishcream24, amateur coder
import subprocess
import sys
import os.path
import logAndError # my own library to handle errors and log events
from inspect import currentframe as CF # help with logging
from inspect import getframeinfo as GFI # help with logging
import threading
import fcntl
import pexpect
import time
import socket
import time as t
from sys import platform
if platform == "win32":
import msvcrt
portSearch = "Uno"
portResultPosition = 1
elif platform == "darwin":
portSearch = "usb"
portResultPosition = 0
else:
print 'Unknown operating system'
print 'Ending Program...'
sys.exit()
# Check if another instance of the program is running, if so, then stop second.
pid_file = 'program.pid'
fp = open(pid_file, 'w')
try:
fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
# another instance is running
print "Program already running, stopping the second instance..."
sys.exit(1)
# Determine where main program files are stored
directory = os.path.dirname(os.path.realpath(__file__))
# To print stderr to both screen and file
errCounter = 0
exitFlag = [0]
class tee:
def __init__(self, _fd1, _fd2):
self.fd1 = _fd1
self.fd2 = _fd2
def __del__(self):
if self.fd1 != sys.stdout and self.fd1 != sys.stderr :
self.fd1.close()
if self.fd2 != sys.stdout and self.fd2 != sys.stderr :
self.fd2.close()
def write(self, text):
global errCounter
global exitFlag
if errCounter == 0:
self.fd1.write('%s: ' %t.strftime("%d/%m/%y %H:%M"))
self.fd2.write('%s: ' %t.strftime("%d/%m/%y %H:%M"))
errCounter = 1
exitFlag[0] = 1
self.fd1.write(text)
self.fd2.write(text)
def flush(self):
self.fd1.flush()
self.fd2.flush()
# Error and log handling
errMode = 'pf' # p = print to screen, f = print to file, e = end program
errorFileAddress = '%s/errorFile.txt' %directory
outputlog = open(errorFileAddress, "a")
sys.stderr = tee(sys.stderr, outputlog)
logFileAddress = '%s/log.txt' %directory
logMode = 'pf' # p = print to screen, f = print to file
msg = logAndError.logAndError(errorFileAddress, logFileAddress)
# Set computer to be backed up
sourceComputer = 'DebbieMac'
try:
sourceComputer = sys.argv[1]
except:
print 'No source argument given.'
if sourceComputer == 'SamMac' or sourceComputer == 'DebbieMac' or sourceComputer == 'mediaCentre' or sourceComputer == 'garageComputer':
pass
else:
msg.error('incorrect source computer supplied!', errMode, GFI(CF()).lineno, exitFlag)
sys.exit()
# Source and destination setup
backupRoute = 'network'
try:
backupRoute = sys.argv[2]
except:
print 'No back up route argument given.'
if backupRoute == 'network' or backupRoute == 'direct' or backupRoute == 'testNetwork' or backupRoute == 'testDirect':
pass
else:
msg.error('incorrect backup route supplied!', errMode, GFI(CF()).lineno, exitFlag)
sys.exit()
# Source, destination and exclude dictionaries
v = {
'SamMac network source' : '/Users/SamJones',
'SamMac network destination' : '/Volumes/Seagate/Sam_macbook_backup/Backups',
'SamMac direct source' : '/Users/SamJones',
'SamMac direct destination' : '/Volumes/Seagate\ Backup\ Plus\ Drive/Sam_macbook_backup/Backups',
'SamMac testNetwork source' : '/Users/SamJones/Documents/Arduino/arduino_sketches-master',
'SamMac testNetwork destination' : '/Volumes/Seagate/Sam_macbook_backup/Arduino',
'SamMac exclude' : ['.*', '.Trash', 'Library', 'Pictures'],
'DebbieMac network source' : '/Users/DebbieJones',
'DebbieMac network destination' : '/Volumes/Seagate/Debbie_macbook_backup/Backups',
'DebbieMac direct source' : '/Users/DebbieJones',
'DebbieMac direct destination' : '/Volumes/Seagate\ Backup\ Plus\ Drive/Debbie_macbook_backup/Backups',
'DebbieMac testNetwork source': '/Users/DebbieJones/testFolder',
'DebbieMac testNetwork destination' : '/Volumes/Seagate/Debbie_macbook_backup',
'DebbieMac testDirect source' : '/Users/DebbieJones/testFolder',
'DebbieMac testDirect destination' : '/Volumes/Seagate\ Backup\ Plus\ Drive/Debbie_macbook_backup',
'DebbieMac exclude' : ['.*', '.Trash', 'Library', 'Pictures']
}
# Main threading code
class mainThreadClass(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
PIDMessage = 'Starting backup PID: %s'%os.getpid()
msg.log(PIDMessage, logMode)
mainThread()
msg.log('Process completed successfully\n', logMode)
def mainThread():
if platform == "win32":
pass
elif platform == "darwin":
if 'network' in backupRoute:
# Connect to SeagateBackup
if os.path.ismount('/Volumes/Seagate') == False:
msg.log('Mounting Seagate Backup Hub', logMode)
commandM = 'mount volume'
smbPoint = '"smb://username:password#mediacentre/Seagate"'
childM = pexpect.spawn("%s '%s %s'" %('osascript -e', commandM, smbPoint), timeout = None)
childM.expect(pexpect.EOF)
else:
msg.log('Seagate already mounted', logMode)
# Use rsync to backup files
commandR = 'rsync -avb '
for s in v['%s exclude' %sourceComputer]:
commandR = commandR + "--exclude '%s' " %s
commandR = commandR + '--delete --backup-dir="../PreviousBackups/%s" ' %time.strftime("%d-%m-%y %H%M")
commandR = commandR + '%s %s' %(v['%s %s source' %(sourceComputer, backupRoute)], v['%s %s destination' %(sourceComputer, backupRoute)])
msg.log(commandR, logMode)
msg.log('Running rsync...rsync output below', logMode)
child0 = pexpect.spawn(commandR, timeout = None)
# Handling command output
# If no '\r\n' in readline() output, then EOF reached
output = child0.readline()
while '\r\n' in output:
msg.log(output.replace('\r\n', ''), logMode + 's')
output = child0.readline()
return
if __name__ == '__main__':
# Create new threads
threadMain = mainThreadClass()
# Start new Threads
threadMain.start()
logAndError.py
# to handle errors
import time
import sys
import threading
class logAndError:
def __init__(self, errorAddress, logAddress):
self.errorAddress = errorAddress
self.logAddress = logAddress
self.lock = threading.RLock()
def error(self, message, errMode, lineNumber=None, exitFlag=[0]):
message = '%s: %s' %(time.strftime("%d/%m/%y %H:%M"), message)
# p = print to screen, f = print to file, e = end program
if 'p' in errMode:
print message
if 'f' in errMode and 'e' not in errMode:
errorFile = open(self.errorAddress, 'a')
errorFile.write('%s\n' %message)
errorFile.close()
return
def log(self, logmsg, logMode):
with self.lock:
logmsg2 = '%s: %s' %(time.strftime("%d/%m/%y %H:%M"), logmsg)
if 'p' in logMode:
# s = simple (no date stamp)
if 's' in logMode:
print logmsg
else:
print logmsg2
if 'f' in logMode:
if 's' in logMode:
logFile = open(self.logAddress, 'a')
logFile.write('%s\n' %logmsg)
logFile.close()
else:
logFile = open(self.logAddress, 'a')
logFile.write('%s\n' %logmsg2)
logFile.close()
return

Multithreading attempt Python fail

The following code is an attempt of mine at a voice recognition program. The voice recognition works fine and can understand me, but I encountered a problem at certain points in the program the code would sort of freeze, or hang but without an error.
To get around this I attempted to add a timer using multi-threading which should begin at a = True and after 3 seconds the program would automatically close. If the recognising worked perfectly the a = False should stop the timer before it closed the program. This obviously hasn't worked or I wouldn't be here.
I added a few print statements here and there so I could visually see where the code was when running, and I saw the the code for the timer begins however the code for voice recognition does not.
import speech_recognition as sr
r = sr.Recognizer()
import os, threading, time, sys
import subprocess as sp
print("Voice Recognition Software\n\n***********************************\n")
class myThread (threading.Thread):
def run():
print("Checking")
while True:
if a == True:
if a == False:
continue
for x in range(3):
time.sleep(1)
if a == False:
break
sys.exit()
def program():
while True:
print("voice recog has begun")
r.energy_threshold = 8000
t = None
with sr.Microphone() as source:
print (">")
a = True
audio = r.listen(source)
a = False
try:
a = True
print("Processing...")
t = r.recognize_google(audio)
a = False
print (": " + t)
except sr.UnknownValueError:
print("Unknown input")
continue
except sr.RequestError as e:
print("An error occured at GAPI\nA common cause is lack of internet connection")
continue
if "open" in t:
t = t.replace("open","")
t = t.replace(" ","")
t = t + ".exe"
print (t)
for a,d,f in os.walk("C:\\"):
for files in f:
if files == t.lower() or files == t.capitalize() or files == t.upper():
pat = os.path.join(a,files)
print (pat)
sp.call([pat])
success = True
if success == True:
continue
a = False
success = False
thread1 = myThread.run()
thread2 = myThread.program()
thread1.start()
thread2.start()
EDIT:
I see some mistakes of my own here like indentation of the def function but even after fixing what I see it doesn't work as intended.

Unable to get arduino serial communication working in wxpython GUI

This is the definition which is used to update the labels in the GUI:
def updateV(self, event):
""""""
global v
ser = serial.Serial( port='COM3', baudrate=9600)
x = ser.read() # read one byte
ser.close()
print x
if v>3:
self.labelOne.SetBackgroundColour('red')
self.labelOne.SetLabel('Battery Voltage : ' + x)
else:
self.labelOne.SetBackgroundColour('white')
self.labelOne.SetLabel('Battery Voltage : ' + str(v))
self.Refresh()
This is the simple arduino code i have been using:
int a;
void setup() {
Serial.begin(9600);// put your setup code here, to run once:
}
void loop() {
a=5;
Serial.println(a);
delay(10);
}
I have been using this definition to update my labels for my GUI. I recently started to set up serial communication on my GUI using that code. Logically using the mainloop() of the wx library, i thought i could update the 'x' value and get it printed on the GUI. But all the GUI window shows in 0.0 even though the python console prints 5 regularly. Please help! I am pretty new to this.
Your issue is that ser.read() will block. Even if you tweak the timeout of your serial.Serial instance, it still will keep the GUI busy. In that situation I do not know a method to "force" a refresh/wx.Yield(), it simply will not work. The standard solution for blocking calls is to spin up a thread
or poll regularily (e. g. with wx.Timer). However, I was only able to make threading work. The example is based on wxTerminal in pyserial.
# -*- coding: utf-8 -*-
import wx
import serial
from threading import Thread
ARDUINO_NEWLINE = '\r\n'
class serial_reader(object):
def __init__(self, callback=None):
"""Creates serial reader.
:param callback: callable, gets called when byte on serial arrives.
"""
self.callback = callback
self.thread = None
# Signal if serial is alive and should be read
self.alive = False
def start_reader(self, serial_cfg):
"""Start the receiver thread.
:param serial_cfg: dictionary, gets unpacked to parameters for :class:`serial.Serial`
"""
self.ser_cfg = serial_cfg
self.serial = serial.Serial(**serial_cfg)
# set != None so it will not block for longer than timeout on shutdown
self.serial.timeout = 0.1
self.alive = True
self.thread = Thread(target=self.serial_read)
self.thread.daemon = True
self.thread.start()
def stop_reader(self):
"""Stop the receiver thread, wait util it is finished."""
if self.thread is not None:
# signal no more reads
self.alive = False
# wait until thread has finished
self.thread.join()
self.thread = None
# cleanup
self.serial.close()
def serial_read(self):
"""Thread that handles the incoming traffic."""
while self.alive:
try:
text = self.serial.read()
if text and self.callback:
# return value to main loop in thread-safe manner
wx.CallAfter(self.callback, text)
except serial.serialutil.SerialException:
# will happen when Windows goes in sleep mode
print 'serial.serialutil.SerialException'
class ser_frm(wx.Frame):
def __init__(self, *args, **kwds):
wx.Frame.__init__(self, *args, **kwds)
self.txt = wx.TextCtrl(self, -1, '', style=wx.TE_MULTILINE)
class serial_controller(object):
def __init__(self, app):
self.app = app
# buffer for serial data
self.ser_buf = ''
self.frm = ser_frm(None, -1, 'testfrm')
# setup serial configuration
self.serial_cfg = {'port': 'COM4', 'baudrate': 9600}
# When parameter dsrdtr is set to True, the Arduino
# will not reset on serial open, for details see
# http://playground.arduino.cc/Main/DisablingAutoResetOnSerialConnection
self.serial_cfg['dsrdtr'] = True
self.ser_rd = serial_reader(callback=self.on_serial)
tit = 'Arduino on port {port} at baudrate {baudrate}'.format(**self.serial_cfg)
self.frm.SetTitle(tit)
self.ser_rd.start_reader(self.serial_cfg)
self.frm.Show()
self.frm.Bind(wx.EVT_CLOSE, self.on_close)
def on_close(self, evt):
"""Shutdown serial read thread before closing."""
if self.ser_rd.alive:
self.ser_rd.stop_reader()
evt.Skip()
def on_serial(self, text):
"""Handle input from the serial port."""
self.ser_buf += text
if self.ser_buf.endswith(ARDUINO_NEWLINE):
if self.frm.txt.GetInsertionPoint() > 1000:
self.frm.txt.SetValue('')
self.frm.txt.AppendText(self.ser_buf)
self.ser_buf = ''
if __name__ == "__main__":
app = wx.App(redirect=False)
serialctr = serial_controller(app)
app.MainLoop()
EDIT: It is not necessary to tinker with DSR/DTR on Arduinos with USB on chip (e.g. the Arduino micro), so delete the line
self.serial_cfg['dsrdtr'] = True
and it will still work properly.

Resources