I found an example here but I dont understand how to make the code work
class Worker:
def __init__(self, func, n=3):
self.func = func
self.queue = asyncio.Queue()
self.semaphore = asyncio.Semaphore(n)
def put(self, *args):
self.queue.put_nowait(args)
async def run(self):
while True:
args = await self._get()
if args is None:
return
asyncio.ensure_future(self._target(args))
async def _get(self):
get_task = asyncio.ensure_future(self.queue.get())
join_task = asyncio.ensure_future(self.queue.join())
await asyncio.wait(coros, return_when='FIRST_COMPLETED')
if get_task.done():
return task.result()
async def _target(self, args):
try:
async with self.semaphore:
return await self.func(*args)
finally:
self.queue.task_done()
I tried then:
def work(a1,a2): print('work done',a1,a2)
W = Worker(work,n=3)
W.put(1,2)
W.put(1,2)
W.put(1,2)
result = await W.run() # for Jupyter notebooks
# asyncio.run(W.run()) # for normal python
I get error:
NameError: name 'coros' is not defined
I admit, the linked solution confused me, and it didn't seem to work. So, I just rewrote the Worker class which hopefully now works for you:
import asyncio
class Worker:
def __init__(self, func, n=3):
self.func = func
self.queue = asyncio.Queue()
self.semaphore = asyncio.Semaphore(n)
def put(self, *args):
self.queue.put_nowait(args)
async def run(self):
tasks = []
while True:
try:
args = self.queue.get_nowait()
except asyncio.QueueEmpty:
break
tasks.append(asyncio.ensure_future(self.do_work(args)))
await asyncio.gather(*tasks)
async def do_work(self, args):
async with self.semaphore:
await self.func(*args)
This seems like a much simpler way to do it to me. Basically, this changes Worker.run to just start a task for each item in the queue, and each task must first acquire the semaphore before calling the provided work function. And then it finishes after all the work is done.
Here is the usage:
async def work(a1, a2):
print("Starting work...", a1, a2)
await asyncio.sleep(1)
print("Finishing work...")
W = Worker(work, n=3)
W.put(1,2)
W.put(3,4)
W.put(5,6)
W.put(7,8)
W.put(9,10)
asyncio.get_event_loop().run_until_complete(W.run())
"""Output
Starting work... 1 2
Starting work... 3 4
Starting work... 5 6
Finishing work...
Finishing work...
Finishing work...
Starting work... 7 8
Starting work... 9 10
Finishing work...
Finishing work...
"""
It should be noted that you cannot use asyncio.run while also using asyncio.Semaphore this way, because asyncio.run always starts a new loop, while asyncio.Semaphore(n) sets its loop as the default loop before asyncio.run is called. This causes the semaphore to use a different loop than Worker.run.
So just using asyncio.get_event_loop.run_until_complete works fine because it uses the default loop (which the semaphore is expecting).
Related
class DetectClient(object):
def __init__(self, url):
self.url = url
self.ws = None
self.connect()
PeriodicCallback(self.heartbeat, 3000).start()
IOLoop.instance().start()
#gen.coroutine
def connect(self):
try:
self.ws = yield websocket_connect(self.url)
except Exception as e:
print("connection error, %s" % e)
else:
print("detect connected.")
self.run()
it seems only one client instance can be started by some reason.
like this, how to start two instances of this client in the main function?
if __name__ == "__main__":
DetectClient('ws.//1231231')
DetectClient('ws.//1231231')
Don't start IOLoop in every client. You only have to start the IOLoop once globally.
For running multiple coroutines simultaneously, you can use gen.multi:
Here's a modified code example (I've not tested it):
from tornado import gen, ioloop
class DetectClient(object):
def __init__(self, url):
self.url = url
self.ws = None
#gen.coroutine
def connect(self):
try:
self.ws = yield websocket_connect(self.url)
except Exception as e:
print("connection error, %s" % e)
else:
print("detect connected.")
self.run()
PeriodicCallback(self.heartbeat, 3000).start()
#gen.coroutine
def main():
waiter = gen.WaitIterator(
DetectClient('ws.//1231231').connect(),
DetectClient('ws.//1231231').connect()
)
while not waiter.done():
try:
yield waiter.next()
except Exception as e:
print(e)
continue
if __name__ == '__main__':
loop = ioloop.IOLoop.current()
loop.run_sync(main)
I have some code for a cog that looks something like this:
class Example(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.counter = 0
#commands.Cog.listener()
async def on_message(self, message):
print("Listener triggered")
self.counter += 1
#commands.group()
async def first(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send("Invalid subcommand")
#first.command()
async def second(self, ctx):
print("Command triggered")
await ctx.send(f"Current counter: {self.counter}")
When I run this code and send a message to my bot, second gets called before on_message. I have some code in second that expects on_message to be executed first, but I can't figure out a good way to make this happen. Suggestions?
check wait_for event.
and I think you are trying to make a bot count the number of valid uses of command. To do this, try:
def check(message):
#here put ur condition for message is a valide command
await client.wait_for('message', check=check)
I have the following code:
self.current_task = self.bot.wait_for('message', check=check, timeout=10 * 60)
response = await self.current_task
However I want the bot.wait_for to cancel if a condition becomes true in another function within the same class. I've tried doing self.current_task.close(), however it still waits for a response/timeout before returning a nonetype.
You can have a variable which evaluates if the condition is met, in the check func simply check if it's met and raise an error and catch it in a try/except block
def check(m):
if self.is_condition_met: # Change the variable accordingly
raise # Whatever error you want
return # normal check
An example would be
class SomeCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.is_condition_met = False
#commands.command()
async def met(self, ctx):
self.is_condition_met = True
#commands.command()
async def foo(self, ctx):
await ctx.send("Please send a message")
def check(m):
if self.is_condition_met:
raise RuntimeError("condition is met")
return ctx.author == m.author
try:
message = await bot.wait_for("message", check=check, timeout=20.0)
except RuntimeError:
await ctx.send("Condition was met")
except asyncio.TimeoutError:
await ctx.send("Time is over")
If you invoke the foo command first, and then the met command the wait should be cancelled as soon as someone sends a message
I'm making an Administration cog for my discord bot and my code wouldn't identify 'ctx'. PyCharm suggested to replace 'ctx' with 'self' and I have no idea what 'self' does. And from what PyCharm is saying, There are millions of other stuff which I have to write down what it is. PyCharm couldn't identify guild, send, author and channel and it also says that return ctx.author.guild_permissions.manage_messages is an unreachable code. As a note if this seems to be a really stupid question, I am a beginner who started 2 weeks ago.
As for the code:
class Administration(commands.Cog):
def __init__(self, client):
self.client = client
#commands.Cog.listener()
async def on_ready(self):
print("Admin cog ready")
async def cog_check(self, ctx):
admin = get(ctx.guild.roles, name="Admin")
return admin in ctx.author.roles
return ctx.author.guild_permissions.manage_messages
#commands.command(aliases=["purge"])
async def clear(ctx, amount=3):
"""Clears 3 messages"""
await ctx.channel.purge(limit=amount)
#commands.command(pass_context=True)
async def giverole(ctx, user: discord.Member, role: discord.Role):
"""Gives a role to a user"""
await user.add_roles(role)
await ctx.send(f"hey {ctx.author.name}, {user.name} has been giving a role called: {role.name}")
#commands.command(aliases=['make_role'])
#commands.has_permissions(manage_roles=True)
async def create_role(ctx, *, name):
"""Creates a role"""
guild = ctx.guild
await guild.create_role(name=name)
await ctx.send(f'Role `{name}` has been created')
#commands.command(name="slap", aliases=["warn"])
async def slap(ctx, members: commands.Greedy[discord.Member], *, reason='no reason'):
"""Warns someone"""
slapped = ", ".join(x.name for x in members)
await ctx.send('{} just got slapped for {}'.format(slapped, reason))
def setup(client):
client.add_cog(Administration(client))
In classes, (unless it's a staticmethod or classmethod) you always pass self as the first argument.
#commands.command(aliases=["purge"])
async def clear(self, ctx, amount=3): # Note how I put `self` as the first arg, do the same in all commands in the cog
"""Clears 3 messages"""
await ctx.channel.purge(limit=amount)
Also, this will never work
async def cog_check(self, ctx):
admin = get(ctx.guild.roles, name="Admin")
return admin in ctx.author.roles
return ctx.author.guild_permissions.manage_messages
The function will end no matter what when it reaches the first return, you can simply use the AND or OR logical operator if you want to also evaluate the second return statement
async def cog_check(self, ctx):
admin = get(ctx.guild.roles, name="Admin")
return admin in ctx.author.roles and/or ctx.author.guild_permissions.manage_messages
I am trying to update my code from asyncio3.6 to asyncio3.7.
One philosophical element of the transition is that it is strongly encouraged to use a single entry point in your program, in particular a single asyncio.run(main()).
This program works:
import asyncio
async def foo():
while True:
await asyncio.sleep(1)
print("hi")
asyncio.run(foo())
And this program works:
import asyncio
async def foo():
while True:
await asyncio.sleep(1)
print("hi")
async def main():
await foo()
asyncio.run(main())
But the following program exits with no error before anything is printed:
import asyncio
async def foo():
while True:
await asyncio.sleep(1)
print("hi")
async def bar():
while True:
await asyncio.sleep(1)
print("ho")
async def main():
asyncio.create_task(foo())
asyncio.create_task(bar())
asyncio.run(main())
Is the asyncio3.7 prescribed best practice to have main await a future which is, say, set when some error occurs?
I know that adding some await future line to the bottom of main makes the third program "work", but I'm still not happy with the use of create_task for the infinite coroutines inside main; the whole point of having a single entry point is that you can catch all unhandled exceptions raised by your program at a single point. But when you have so-called "dangling" tasks like this it doesn't suppress the need to set_exception_handler on the loop.
In this case you simply need something like:
async def main():
# wait for both `foo()` and `bar()` to finish
await asyncio.gather(foo(), bar())
You can also use asyncio.wait(return_when=asyncio.FIRST_COMPLETED) to wait until either foo() or bar() finishes, but that requires additional care to actually retrieve their results in order to correctly propagate the exceptions.
Awaiting an explicit future is more advanced usage, most appropriate when the decision to exit the program must be made inside a deeply nested callback.