can we start multiple clients. tornado client-side? - websocket

class DetectClient(object):
def __init__(self, url):
self.url = url
self.ws = None
self.connect()
PeriodicCallback(self.heartbeat, 3000).start()
IOLoop.instance().start()
#gen.coroutine
def connect(self):
try:
self.ws = yield websocket_connect(self.url)
except Exception as e:
print("connection error, %s" % e)
else:
print("detect connected.")
self.run()
it seems only one client instance can be started by some reason.
like this, how to start two instances of this client in the main function?
if __name__ == "__main__":
DetectClient('ws.//1231231')
DetectClient('ws.//1231231')

Don't start IOLoop in every client. You only have to start the IOLoop once globally.
For running multiple coroutines simultaneously, you can use gen.multi:
Here's a modified code example (I've not tested it):
from tornado import gen, ioloop
class DetectClient(object):
def __init__(self, url):
self.url = url
self.ws = None
#gen.coroutine
def connect(self):
try:
self.ws = yield websocket_connect(self.url)
except Exception as e:
print("connection error, %s" % e)
else:
print("detect connected.")
self.run()
PeriodicCallback(self.heartbeat, 3000).start()
#gen.coroutine
def main():
waiter = gen.WaitIterator(
DetectClient('ws.//1231231').connect(),
DetectClient('ws.//1231231').connect()
)
while not waiter.done():
try:
yield waiter.next()
except Exception as e:
print(e)
continue
if __name__ == '__main__':
loop = ioloop.IOLoop.current()
loop.run_sync(main)

Related

Why does Windows Service not run my django server?

I try to create a windows service to run my django project server. I can start the service, however, the server didn't run. So I cannot find the server on the chrome browser. My code is below.
And my based-code from this website https://metallapan.se/post/windows-service-pywin32-pyinstaller/.
Does anyone know what am I missing to create a service to run my Django server? Thanks in advance.
import os
import time
import sys
import win32serviceutil # ServiceFramework and commandline helper
import win32service # Events
import servicemanager # Simple setup and logging
# from django.views.decorators.csrf import ensure_csrf_cookie
class TestServiceForMeasurewarning:
"""Silly little application stub"""
def stop(self):
"""Stop the service"""
self.running = False
def run(self):
"""Main service loop. This is where work is done!"""
self.running = True
while self.running:
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_tutorial.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
# args = ['manage.py', 'runserver', '0.0.0.0:8002']
args = ['manage.py', 'runserver', '0.0.0.0:8003']
execute_from_command_line(args)
# execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
class MyServiceFramework(win32serviceutil.ServiceFramework):
_svc_name_ = 'Test Service For Measurementwarning'
_svc_display_name_ = 'MeasurementwarningTestingService'
def SvcStop(self):
"""Stop the service"""
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
self.service_impl.stop()
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
def SvcDoRun(self):
"""Start the service; does not return until stopped"""
self.ReportServiceStatus(win32service.SERVICE_START_PENDING)
self.service_impl = TestServiceForMeasurewarning()
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
# Run the service
self.service_impl.run()
def init():
if len(sys.argv) == 1:
servicemanager.Initialize()
servicemanager.PrepareToHostSingle(MyServiceFramework)
servicemanager.StartServiceCtrlDispatcher()
else:
win32serviceutil.HandleCommandLine(MyServiceFramework)
if __name__ == '__main__':
init()
When the service manager executes SvcDoRun, __name__ will not be '__main__', so just remove that if statement. You can actually remove the main() function and just put the code in the service_impl function directly.

AttributeError: 'CogLoader' object has no attribute '_BotBase__extensions'

As a part of learning OOP, I'm trying to create a class that will handle the loading and unloading cogs. However I'm getting the following error
Traceback (most recent call last):
File "c:\Users\mirza\Desktop\Work\Working Directory\Project\Bots\2B\bot.py", line 50, in <module>
main()
File "c:\Users\mirza\Desktop\Work\Working Directory\Project\Bots\2B\bot.py", line 43, in main
bot = CustomBotClient()
File "c:\Users\mirza\Desktop\Work\Working Directory\Project\Bots\2B\bot.py", line 21, in __init__
raise e
File "c:\Users\mirza\Desktop\Work\Working Directory\Project\Bots\2B\bot.py", line 18, in __init__
self.load_extension(f"cogs.{filename[:-3]}")
File "C:\Users\mirza\AppData\Local\Programs\Python\Python39\lib\site-packages\discord\ext\commands\bot.py", line 671, in load_extension
if name in self.__extensions:
AttributeError: 'CustomBotClient' object has no attribute '_BotBase__extensions'
This the code from bot.py that is giving me the error
import discord
from discord.ext import commands
import os
from dotenv import load_dotenv
class CogLoader(commands.Bot):
def __init__(self):
for filename in os.listdir('./cogs'):
if filename.endswith(".py"):
try:
self.load_extension(f"cogs.{filename[:-3]}")
except Exception as e:
print(f"cogs.{filename[:-3]} cannot be loaded")
raise e
#commands.command()
async def load(self, ctx, extension):
self.load_extension(f"cogs.{extension}")
#commands.command()
async def unload(self, ctx, extension):
self.unload_extension(f"cogs.{extension}")
#commands.command()
async def reload(self, ctx, extension):
self.unload_extension(f"cogs.{extension}")
self.load_extension(f"cogs.{extension}")
class CustomBotClient(CogLoader):
async def on_ready(self):
print(f"Bot {self.user} is connected to Discord and ready to roll!")
def main():
bot = CustomBotClient()
load_dotenv()
bot.run(os.getenv("TOKEN"))
if __name__ == "__main__":
main()
This is the cog that I'm trying to load
from discord.ext import commands
class Greetings(commands.Cog):
def __init__(self, bot):
self.bot = bot
#commands.command(name="GreetME")
async def greet_me(self, ctx):
await ctx.send("Hello! {author.user}")
def setup(bot):
bot.add_cog(Greetings(bot))
I'm following a youtube tutorial but he didnot used a class for this. So, I'm a loss here. Any help would be appreciated.
You need to initialize the commands.Bot in your CogLoader.__init__
This can be accomplished through the following code:
class CogLoader(commands.Bot):
def __init__(self, command_prefix, **options):
super().__init__(command_prefix, **options)
for filename in os.listdir('./cogs'):
if filename.endswith(".py"):
try:
self.load_extension(f"cogs.{filename[:-3]}")
except Exception as e:
print(f"cogs.{filename[:-3]} cannot be loaded")
raise e
and
def main():
bot = CustomBotClient(command_prefix="!")
This way, commands.Bot and BotBase, their attributes and their methods are loaded.

Is there a way to cancel a pending bot.wait_for?

I have the following code:
self.current_task = self.bot.wait_for('message', check=check, timeout=10 * 60)
response = await self.current_task
However I want the bot.wait_for to cancel if a condition becomes true in another function within the same class. I've tried doing self.current_task.close(), however it still waits for a response/timeout before returning a nonetype.
You can have a variable which evaluates if the condition is met, in the check func simply check if it's met and raise an error and catch it in a try/except block
def check(m):
if self.is_condition_met: # Change the variable accordingly
raise # Whatever error you want
return # normal check
An example would be
class SomeCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.is_condition_met = False
#commands.command()
async def met(self, ctx):
self.is_condition_met = True
#commands.command()
async def foo(self, ctx):
await ctx.send("Please send a message")
def check(m):
if self.is_condition_met:
raise RuntimeError("condition is met")
return ctx.author == m.author
try:
message = await bot.wait_for("message", check=check, timeout=20.0)
except RuntimeError:
await ctx.send("Condition was met")
except asyncio.TimeoutError:
await ctx.send("Time is over")
If you invoke the foo command first, and then the met command the wait should be cancelled as soon as someone sends a message

How do I mock the python decorator argument?

How do I mock a cached value in Python unit test using cachetools ? I wrote a decorator named cache_controller. I had valid reasons not to use Cachetools' decorator.
When I try to mock TTLCache objects while writing the test code, I cannot mock it. What would be the reason ?
src/helpers/cache.py
from cachetools import TTLCache
total_cache = TTLCache(maxsize=1024, ttl=600)
src/wrappers/cache_controller.py
from functools import wraps
def cache_controller(cache, cache_args: tuple):
"""
:return: wrapper
"""
def decorator(func):
#wraps(func)
def wrapper(self, *args, **kwargs):
print("Cache in CACHE CONROLLER:", cache)
cache_key = tuple([kwargs[arg] for arg in cache_args])
print("Cache key in CACHE CONROLLER:", cache_key)
cached_value = cache.get(cache_key)
print("Cache value in CACHE CONROLLER:", cached_value)
if cached_value:
print(f"{self.__class__.__name__} : {cache_key} : from Local Cache : {cached_value}")
return cached_value
result = func(self, *args, **kwargs)
cache.update({cache_key: result})
print(f"{self.__class__.__name__} : {cache_key} : Updated to Local Cache : {result}")
return result
return wrapper
return decorator
src.run.py
from .wrappers.cache_controller import cache_controller
from .helpers.cache import total_cache
class ExampleClass(object):
#cache_controller(
cache=total_cache,
cache_args=("a", "b")
)
def example_method(self, a: int, b: int):
return a+b
*test/test_cache_controller.py
import unittest
from cachetools.ttl import TTLCache
from src.run import ExampleClass
from unittest import TestCase, mock
class TestCacheController(TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def setUp(self):
self.fake_cache = TTLCache(maxsize=10, ttl=3600)
self.fake_cache.__setitem__((5, 6), 1)
self._class = ExampleClass()
def tearDown(self):
self._class = None
# mock.patch("src.run.total_cache")
def test_if_cache_filled(self, mock_total_cache):
mock_total_cache.return_value = self.fake_cache
result = self._class.example_method(a=5, b=6)
func_expected = 11
cache_expected = 1
print("Result:", result)
self.assertEqual(result, cache_expected)
if __name__ == '__main__':
unittest.main()
.. and run test code
cachetools-mock ❯ python -m test.test_cache_controller
Cache in CACHE CONROLLER: TTLCache([], maxsize=1024, currsize=0)
Cache key in CACHE CONROLLER: (5, 6)
Cache value in CACHE CONROLLER: None
ExampleClass : (5, 6) : Updated to Local Cache : 11
Result: 11
F
======================================================================
FAIL: test_if_cache_filled (__main__.TestCacheController)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/sumeyyeozkaynak/.pyenv/versions/3.7.5/lib/python3.7/unittest/mock.py", line 1255, in patched
return func(*args, **keywargs)
File "/Users/sumeyyeozkaynak/Desktop/.optiwisdom/workspace/cachetools-mock/test/test_cache_controller.py", line 28, in test_if_cache_filled
self.assertEqual(result, cache_expected)
AssertionError: 11 != 1
----------------------------------------------------------------------
Ran 1 test in 0.004s
FAILED (failures=1)
I've seen many replies that the arguments the decorators are getting are not mockable. It is correct because when the object is being tested after being mocked, the class is called again and the mocked object is overwrite.
If the import ExampleClass is imported in the Unit Test method under test, the problem will be eliminated.
import unittest
from unittest import TestCase, mock
class TestCacheController(TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#mock.patch.dict("src.helpers.cache.total_cache", {(5, 6): 1})
def test_if_cache_filled(self):
from src.run import ExampleClass
result = ExampleClass().example_method(a=5, b=6)
print("Result:", result)
self.assertEqual(result, 1)
if __name__ == '__main__':
unittest.main()

Asyncio worker class to handle parallel jobs

I found an example here but I dont understand how to make the code work
class Worker:
def __init__(self, func, n=3):
self.func = func
self.queue = asyncio.Queue()
self.semaphore = asyncio.Semaphore(n)
def put(self, *args):
self.queue.put_nowait(args)
async def run(self):
while True:
args = await self._get()
if args is None:
return
asyncio.ensure_future(self._target(args))
async def _get(self):
get_task = asyncio.ensure_future(self.queue.get())
join_task = asyncio.ensure_future(self.queue.join())
await asyncio.wait(coros, return_when='FIRST_COMPLETED')
if get_task.done():
return task.result()
async def _target(self, args):
try:
async with self.semaphore:
return await self.func(*args)
finally:
self.queue.task_done()
I tried then:
def work(a1,a2): print('work done',a1,a2)
W = Worker(work,n=3)
W.put(1,2)
W.put(1,2)
W.put(1,2)
result = await W.run() # for Jupyter notebooks
# asyncio.run(W.run()) # for normal python
I get error:
NameError: name 'coros' is not defined
I admit, the linked solution confused me, and it didn't seem to work. So, I just rewrote the Worker class which hopefully now works for you:
import asyncio
class Worker:
def __init__(self, func, n=3):
self.func = func
self.queue = asyncio.Queue()
self.semaphore = asyncio.Semaphore(n)
def put(self, *args):
self.queue.put_nowait(args)
async def run(self):
tasks = []
while True:
try:
args = self.queue.get_nowait()
except asyncio.QueueEmpty:
break
tasks.append(asyncio.ensure_future(self.do_work(args)))
await asyncio.gather(*tasks)
async def do_work(self, args):
async with self.semaphore:
await self.func(*args)
This seems like a much simpler way to do it to me. Basically, this changes Worker.run to just start a task for each item in the queue, and each task must first acquire the semaphore before calling the provided work function. And then it finishes after all the work is done.
Here is the usage:
async def work(a1, a2):
print("Starting work...", a1, a2)
await asyncio.sleep(1)
print("Finishing work...")
W = Worker(work, n=3)
W.put(1,2)
W.put(3,4)
W.put(5,6)
W.put(7,8)
W.put(9,10)
asyncio.get_event_loop().run_until_complete(W.run())
"""Output
Starting work... 1 2
Starting work... 3 4
Starting work... 5 6
Finishing work...
Finishing work...
Finishing work...
Starting work... 7 8
Starting work... 9 10
Finishing work...
Finishing work...
"""
It should be noted that you cannot use asyncio.run while also using asyncio.Semaphore this way, because asyncio.run always starts a new loop, while asyncio.Semaphore(n) sets its loop as the default loop before asyncio.run is called. This causes the semaphore to use a different loop than Worker.run.
So just using asyncio.get_event_loop.run_until_complete works fine because it uses the default loop (which the semaphore is expecting).

Resources