Synchronous generator in asyncio - async-await

I have the following scenario:
I have a blocking, synchronous generator
I have an non-blocking, async function
I would like to run blocking generator (executed in a ThreadPool) and the async function on the event loop. How do I achieve this?
The following function simply prints the output from the generator, not from sleep function.
Thanks!
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import asyncio
import time
def f():
while True:
r = np.random.randint(0, 3)
time.sleep(r)
yield r
async def gen():
loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor()
gen = await loop.run_in_executor(executor, f)
for item in gen:
print(item)
print('Inside generator')
async def sleep():
while True:
await asyncio.sleep(1)
print('Inside async sleep')
async def combine():
await asyncio.gather(sleep(), gen())
def main():
loop = asyncio.get_event_loop()
loop.run_until_complete(combine())
if __name__ == '__main__':
main()

run_in_executor doesn't work on generators because it is designed for blocking functions. While a generator is a valid function, it returns immediately when called, providing an object that the caller is supposed to exhaust through repeated invocations of next. (This is what Python's for loop does under the hood.) To use a blocking generator from async code, you have two choices:
wrap each step of the iteration (each individual call to next) in a separate call to run_in_executor, or
start a for loop in a separate thread and use a queue to transfer the objects to an async consumer.
Either approach can be abstracted into a function that accepts an iterator and returns an equivalent async iterator. This is an implementation of the second approach:
import asyncio, threading
def async_wrap_iter(it):
"""Wrap blocking iterator into an asynchronous one"""
loop = asyncio.get_event_loop()
q = asyncio.Queue(1)
exception = None
_END = object()
async def yield_queue_items():
while True:
next_item = await q.get()
if next_item is _END:
break
yield next_item
if exception is not None:
# the iterator has raised, propagate the exception
raise exception
def iter_to_queue():
nonlocal exception
try:
for item in it:
# This runs outside the event loop thread, so we
# must use thread-safe API to talk to the queue.
asyncio.run_coroutine_threadsafe(q.put(item), loop).result()
except Exception as e:
exception = e
finally:
asyncio.run_coroutine_threadsafe(q.put(_END), loop).result()
threading.Thread(target=iter_to_queue).start()
return yield_queue_items()
It can be tested with a trivial sync iterator that uses time.time() to block and an async heartbeat function to prove that the event loop is running:
# async_wrap_iter definition as above
import time
def test_iter():
for i in range(5):
yield i
time.sleep(1)
async def test():
ait = async_wrap_iter(test_iter())
async for i in ait:
print(i)
async def heartbeat():
while True:
print('alive')
await asyncio.sleep(.1)
async def main():
asyncio.create_task(heartbeat())
await test()
asyncio.run(main())

Related

AttributeError: Can't pickle local object in Multiprocessing

I am very new to python and I encounter this error.
CODE 1 :
import multiprocessing as mp
import os
def calc(num1, num2):
global addi
def addi(num1, num2):
print(num1+num2)
m = mp.Process(target = addi, args = (num1, num2))
m.start()
print("here is main", os.getpid())
m.join()
if __name__ == "__main__":
# creating processes
calc(5, 6)
ERROR 1 : ForkingPickler(file, protocol).dump(obj)
AttributeError: Can't pickle local object 'calc.<locals>.addi'
After reading around a little I understand that pickle cannot be used for local methods and so I also tried the below solution which gave another error.
CODE 2 :
import multiprocessing as mp
import os
def calc(num1, num2):
**global addi**
def addi(num1, num2):
print(num1+num2)
m = mp.Process(target = addi, args = (num1, num2))
m.start()
print("here is main", os.getpid())
m.join()
if __name__ == "__main__":
# creating processes
calc(5, 6)
ERROR 2 :
self = reduction.pickle.load(from_parent)
AttributeError: Can't get attribute 'addi' on <module '__mp_main__' from '/Users
Could someone please help me out with this? I am clueless on what to do next!
The python version I am using is python3.8.9
Thank you so much!
Basically, the reason you are getting this error is because multiprocessing uses pickle, which can only serialize top-module level functions in general. Function addi is not a top-module level function. In fact, the line global addi is not doing anything because addi has never been declared in the outer module. So you have three ways to fix this.
Method 1
You can define addi in the global scope before executing calc function:
import multiprocessing as mp
import os
def addi(num1, num2):
print(num1 + num2)
def calc(num1, num2):
m = mp.Process(target=addi, args=(num1, num2))
m.start()
print("here is main", os.getpid())
m.join()
if __name__ == "__main__":
# creating processes
calc(5, 6)
Output
here is main 9924
11
Method 2
You can switch to multiprocess, which uses dill instead of pickle, and can serialize such functions.
import multiprocess as mp # Note that we are importing "multiprocess", no "ing"!
import os
def calc(num1, num2):
def addi(num1, num2):
print(num1 + num2)
m = mp.Process(target=addi, args=(num1, num2))
m.start()
print("here is main", os.getpid())
m.join()
if __name__ == "__main__":
# creating processes
calc(5, 6)
Output
here is main 67632
11
Method 2b
While it's a useful library, there are a few valid reasons why you may not want to use multiprocess. A big one is the fact that the standard library's multiprocessing and this fork are not compatible with each other (especially if you use anything from within the subpackage multiprocessing.managers). This means that if you are using this fork in your own project, but also use third-party libraries which themselves use the standard library's multiprocesing instead, you may see unexpected behaviour.
Anyway, in cases where you want to stick with the standard library's multiprocessing and not use the fork, you can use dill yourself to serialize python closures like the function addi by subclassing the Process class and adding some of our own logic. An example is given below:
import dill
from multiprocessing import Process # Use the standard library only
import os
class DillProcess(Process):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._target = dill.dumps(self._target) # Save the target function as bytes, using dill
def run(self):
if self._target:
self._target = dill.loads(self._target) # Unpickle the target function before executing
self._target(*self._args, **self._kwargs) # Execute the target function
def calc(num1, num2):
def addi(num1, num2):
print(num1 + num2)
m = DillProcess(target=addi, args=(num1, num2)) # Note how we use DillProcess, and not multiprocessing.Process
m.start()
print("here is main", os.getpid())
m.join()
if __name__ == "__main__":
# creating processes
calc(5, 6)
Output
here is main 23360
11
Method 3
This method is for those who cannot use any third-party libraries in their code. I will recommend making sure that the above methods did not work before resorting to this one because it's a little hacky and you do need to restructure some of your code.
Anyways, this method works by referencing your local functions in the top-module scope, so that they become accessible by pickle. To do this dynamically, we create a placeholder class and add all the local functions as its class attributes. We would also need to make sure that the functions' __qualname__ attribute is altered to point to their new location, and that this all is done every run outside the if __name__ ... block (otherwise newly started processes won't see the attributes). Consider a slightly modified version of your code here:
import multiprocessing as mp
import os
def calc(num1, num2):
def addi(num1, num2):
print(num1 + num2)
# Another local function you might have
def addi2():
print('hahahaha')
m = mp.Process(target=addi, args=(num1, num2))
m.start()
print("here is main", os.getpid())
m.join()
if __name__ == "__main__":
# creating processes
calc(5, 6)
Below is a how you can make it work by using the above detailed method:
import multiprocessing as mp
import os
# This is our placeholder class, all local functions will be added as it's attributes
class _LocalFunctions:
#classmethod
def add_functions(cls, *args):
for function in args:
setattr(cls, function.__name__, function)
function.__qualname__ = cls.__qualname__ + '.' + function.__name__
def calc(num1, num2, _init=False):
# The _init parameter is to initialize all local functions outside __main__ block without actually running the
# whole function. Basically, you shift all local function definitions to the top and add them to our
# _LocalFunctions class. Now, if the _init parameter is True, then this means that the function call was just to
# initialize the local functions and you SHOULD NOT do anything else. This means that after they are initialized,
# you simply return (check below)
def addi(num1, num2):
print(num1 + num2)
# Another local function you might have
def addi2():
print('hahahaha')
# Add all functions to _LocalFunctions class, separating each with a comma:
_LocalFunctions.add_functions(addi, addi2)
# IMPORTANT: return and don't actually execute the logic of the function if _init is True!
if _init is True:
return
# Beyond here is where you put the function's actual logic including any assertions, etc.
m = mp.Process(target=addi, args=(num1, num2))
m.start()
print("here is main", os.getpid())
m.join()
# All factory functions must be initialized BEFORE the "if __name__ ..." clause. If they require any parameters,
# substitute with bogus ones and make sure to put the _init parameter value as True!
calc(0, 0, _init=True)
if __name__ == '__main__':
a = calc(5, 6)
So there are a few things you would need to change in your code, namely that all local functions inside are defined at the top and all factory functions need to be initialized (for which they need to accept the _init parameter) outside the if __name__ ... clause. But this is probably the best you can do if you can't use dill.
set_start_method('fork') in main

Is there a way to make the "global" command carry into and through a discord.py command?

global t
t = 0
#bot.command()
async def ping(ctx, member: discord.Member):
while True:
await ctx.channel.send(member.mention)
#bot.event
async def on_message(message):
try:
if message.author == member:
t = 5 #(A)
return
except:
pass
if t == 5:
break
Line (A) shows an error. I assumed the problem was that the variable t was not carrying through the #bot.event, but it seems like even the global command does not work. Is there some other problem that I just don't see?
Explanation
When defining t, you are already defining it at the module (global) level. Thus, the global t command is redundant.
Instead, you should use global inside each new local scope you want to use t in.
As of now, the statement t = 5 creates a variable local to on_message. You probably want to edit the global t, which can be accomplished with the following edits to your code:
Code
t = 0
#bot.command()
async def ping(ctx, member: discord.Member):
while True:
await ctx.channel.send(member.mention)
#bot.event
async def on_message(message):
global t # Since we are assigning t in this function, we must state to use t from the global scope
try:
if message.author == member:
t = 5 #(A)
return
except:
pass
if t == 5:
break
Reference
Scopes and namespaces

Python: Register hooks to existing code to get control when a function is called

I'd like to get control (to execute some pre-emptive tasks) when a function is called in Python without modifying the source program, e.g., when calling test()
def test(i : int, s: str) -> int:
pass
I'd like a function myobserver to be called, and have some way to inspect (maybe even modify?!) the parameters? Think of it sorta like a mini-debugger, e.g., to add logging to an existing program that can't/shouldn't be modified?
def myobserver(handle)
name = get_name(handle)
for n, arg in enumerate(get_arg_iterator(handle)):
print("Argument {n} of function {name}: {arg}")
ETA: I am not looking for the traditional decorator, because adding a decorator requires changing the source code. (In this sense, decorators are nicer than adding a print, but still similar because they require changes to source.)
Your are looking for python decorators:
from functools import wraps
def debugger(func):
#wraps(func)
def with_logging(*args, **kwargs):
print('"'+func.__name__+'({},{})"'.format(*args, **kwargs)+" was invoked")
# -------------------
# Your logic here
# -------------------
return func(*args, **kwargs)
return with_logging
#debugger
def test(i : int, s: str) -> int:
print('We are in test', i, s)
test(10, 'hello')
EDIT
Since the decorator method mentioned above interferes with the source code (have to apply the # decorators), I propose the following:
# This is source code to observe, should not be _touched_!
class SourceCode:
def __init__(self, label):
self.label = label
def test1(self, i, s):
print('For object labeled {}, we are in {} with param {}, {}'.format(self.label, 'test1', i, s))
def test2(self, k):
print('For object labeled {}, we are in {} with param {}'.format(self.label, 'test2', k))
What I propose is perform some manual effort in writing the hooks, I am not sure if this is feasible (just occured to me, hence adding):
from functools import wraps
# the following is pretty much syntactic and generic
def hook(exist_func, debugger):
#wraps(exist_func)
def run(*args, **kwargs):
return debugger(exist_func, *args, **kwargs)
return run
# here goes your debugger
def myobserver(orig_func, *args, **kwargs):
# -----------------------
# Your logic goes here
# -----------------------
print('Inside my debugger')
return orig_func(*args, **kwargs)
# ---------------------------------
obj = SourceCode('Test')
# making the wrapper ready to receive
no_iterference_hook1 = hook(obj.test1, myobserver)
no_iterference_hook2 = hook(obj.test2, myobserver)
# call your debugger on the function of your choice
no_iterference_hook1(10, 'hello')
no_iterference_hook2('Another')

Making closeEvent() work outside of a class in PySide

Seemingly the closeEvent method in the following code is called upon clicking x (in the top right corner of the window). I'm guessing the information that allows python to make that connection is inside the self argument.
Is there a way to implement this into a procedural or functional program.
import sys
from PySide import QtGui
class Example(QtGui.QWidget):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('Message box')
self.show()
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(self, 'Message',
"Are you sure to quit?", QtGui.QMessageBox.Yes |
QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()
def main():
app = QtGui.QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
Apparently, you can assign your own function to the QWidget.closeEvent function (property), given that you take the automatically passed in instance argument and event into account:
def myHandler(widget_inst, event):
print("Handling closeEvent")
mywidget = QWidget()
mywidget.closeEvent = myHandler
This is going to get tedious and is not the way things were intended to be done.

Handling events in wxPython

How to pass an argument to an event handler in wxPython?
Here's my code:
def close_handler(event):
baz(foo)
...
foo = 'bar'
frame.Bind(wx.EVT_CLOSE, close_handler)
How to pass foo to close_handler() function?
import functools
def close_handler(event, foo):
baz(foo)
foo = 'bar'
func = functools.partial(close_handler, foo=foo)
frame.Bind(wx.EVT_CLOSE, func)
Or to conserve some space:
import functools
def close_handler(event, foo):
baz(foo)
frame.Bind(wx.EVT_CLOSE, functools.partial(close_handler, foo='bar'))
Have close_handler create a second event-handling function that uses foo:
def close_handler(foo):
return lambda event: baz(foo)
foo = 'bar'
frame.Bind(wx.EVT_CLOSE, close_handler(foo))
Here lambda event: baz(foo) is an anonymous function that calls baz(foo), silently discarding the event argument. Or you could pass it to baz if you wanted: lambda event: baz(foo, event).
You could also write this without using lambda like so:
def close_handler(foo):
def event_handler(event):
return baz(foo)
return event_handler

Resources