How do I mock the python decorator argument? - caching

How do I mock a cached value in Python unit test using cachetools ? I wrote a decorator named cache_controller. I had valid reasons not to use Cachetools' decorator.
When I try to mock TTLCache objects while writing the test code, I cannot mock it. What would be the reason ?
src/helpers/cache.py
from cachetools import TTLCache
total_cache = TTLCache(maxsize=1024, ttl=600)
src/wrappers/cache_controller.py
from functools import wraps
def cache_controller(cache, cache_args: tuple):
"""
:return: wrapper
"""
def decorator(func):
#wraps(func)
def wrapper(self, *args, **kwargs):
print("Cache in CACHE CONROLLER:", cache)
cache_key = tuple([kwargs[arg] for arg in cache_args])
print("Cache key in CACHE CONROLLER:", cache_key)
cached_value = cache.get(cache_key)
print("Cache value in CACHE CONROLLER:", cached_value)
if cached_value:
print(f"{self.__class__.__name__} : {cache_key} : from Local Cache : {cached_value}")
return cached_value
result = func(self, *args, **kwargs)
cache.update({cache_key: result})
print(f"{self.__class__.__name__} : {cache_key} : Updated to Local Cache : {result}")
return result
return wrapper
return decorator
src.run.py
from .wrappers.cache_controller import cache_controller
from .helpers.cache import total_cache
class ExampleClass(object):
#cache_controller(
cache=total_cache,
cache_args=("a", "b")
)
def example_method(self, a: int, b: int):
return a+b
*test/test_cache_controller.py
import unittest
from cachetools.ttl import TTLCache
from src.run import ExampleClass
from unittest import TestCase, mock
class TestCacheController(TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def setUp(self):
self.fake_cache = TTLCache(maxsize=10, ttl=3600)
self.fake_cache.__setitem__((5, 6), 1)
self._class = ExampleClass()
def tearDown(self):
self._class = None
# mock.patch("src.run.total_cache")
def test_if_cache_filled(self, mock_total_cache):
mock_total_cache.return_value = self.fake_cache
result = self._class.example_method(a=5, b=6)
func_expected = 11
cache_expected = 1
print("Result:", result)
self.assertEqual(result, cache_expected)
if __name__ == '__main__':
unittest.main()
.. and run test code
cachetools-mock ❯ python -m test.test_cache_controller
Cache in CACHE CONROLLER: TTLCache([], maxsize=1024, currsize=0)
Cache key in CACHE CONROLLER: (5, 6)
Cache value in CACHE CONROLLER: None
ExampleClass : (5, 6) : Updated to Local Cache : 11
Result: 11
F
======================================================================
FAIL: test_if_cache_filled (__main__.TestCacheController)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/sumeyyeozkaynak/.pyenv/versions/3.7.5/lib/python3.7/unittest/mock.py", line 1255, in patched
return func(*args, **keywargs)
File "/Users/sumeyyeozkaynak/Desktop/.optiwisdom/workspace/cachetools-mock/test/test_cache_controller.py", line 28, in test_if_cache_filled
self.assertEqual(result, cache_expected)
AssertionError: 11 != 1
----------------------------------------------------------------------
Ran 1 test in 0.004s
FAILED (failures=1)

I've seen many replies that the arguments the decorators are getting are not mockable. It is correct because when the object is being tested after being mocked, the class is called again and the mocked object is overwrite.
If the import ExampleClass is imported in the Unit Test method under test, the problem will be eliminated.
import unittest
from unittest import TestCase, mock
class TestCacheController(TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#mock.patch.dict("src.helpers.cache.total_cache", {(5, 6): 1})
def test_if_cache_filled(self):
from src.run import ExampleClass
result = ExampleClass().example_method(a=5, b=6)
print("Result:", result)
self.assertEqual(result, 1)
if __name__ == '__main__':
unittest.main()

Related

Why does Windows Service not run my django server?

I try to create a windows service to run my django project server. I can start the service, however, the server didn't run. So I cannot find the server on the chrome browser. My code is below.
And my based-code from this website https://metallapan.se/post/windows-service-pywin32-pyinstaller/.
Does anyone know what am I missing to create a service to run my Django server? Thanks in advance.
import os
import time
import sys
import win32serviceutil # ServiceFramework and commandline helper
import win32service # Events
import servicemanager # Simple setup and logging
# from django.views.decorators.csrf import ensure_csrf_cookie
class TestServiceForMeasurewarning:
"""Silly little application stub"""
def stop(self):
"""Stop the service"""
self.running = False
def run(self):
"""Main service loop. This is where work is done!"""
self.running = True
while self.running:
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_tutorial.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
# args = ['manage.py', 'runserver', '0.0.0.0:8002']
args = ['manage.py', 'runserver', '0.0.0.0:8003']
execute_from_command_line(args)
# execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
class MyServiceFramework(win32serviceutil.ServiceFramework):
_svc_name_ = 'Test Service For Measurementwarning'
_svc_display_name_ = 'MeasurementwarningTestingService'
def SvcStop(self):
"""Stop the service"""
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
self.service_impl.stop()
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
def SvcDoRun(self):
"""Start the service; does not return until stopped"""
self.ReportServiceStatus(win32service.SERVICE_START_PENDING)
self.service_impl = TestServiceForMeasurewarning()
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
# Run the service
self.service_impl.run()
def init():
if len(sys.argv) == 1:
servicemanager.Initialize()
servicemanager.PrepareToHostSingle(MyServiceFramework)
servicemanager.StartServiceCtrlDispatcher()
else:
win32serviceutil.HandleCommandLine(MyServiceFramework)
if __name__ == '__main__':
init()
When the service manager executes SvcDoRun, __name__ will not be '__main__', so just remove that if statement. You can actually remove the main() function and just put the code in the service_impl function directly.

can we start multiple clients. tornado client-side?

class DetectClient(object):
def __init__(self, url):
self.url = url
self.ws = None
self.connect()
PeriodicCallback(self.heartbeat, 3000).start()
IOLoop.instance().start()
#gen.coroutine
def connect(self):
try:
self.ws = yield websocket_connect(self.url)
except Exception as e:
print("connection error, %s" % e)
else:
print("detect connected.")
self.run()
it seems only one client instance can be started by some reason.
like this, how to start two instances of this client in the main function?
if __name__ == "__main__":
DetectClient('ws.//1231231')
DetectClient('ws.//1231231')
Don't start IOLoop in every client. You only have to start the IOLoop once globally.
For running multiple coroutines simultaneously, you can use gen.multi:
Here's a modified code example (I've not tested it):
from tornado import gen, ioloop
class DetectClient(object):
def __init__(self, url):
self.url = url
self.ws = None
#gen.coroutine
def connect(self):
try:
self.ws = yield websocket_connect(self.url)
except Exception as e:
print("connection error, %s" % e)
else:
print("detect connected.")
self.run()
PeriodicCallback(self.heartbeat, 3000).start()
#gen.coroutine
def main():
waiter = gen.WaitIterator(
DetectClient('ws.//1231231').connect(),
DetectClient('ws.//1231231').connect()
)
while not waiter.done():
try:
yield waiter.next()
except Exception as e:
print(e)
continue
if __name__ == '__main__':
loop = ioloop.IOLoop.current()
loop.run_sync(main)

AttributeError: 'CogLoader' object has no attribute '_BotBase__extensions'

As a part of learning OOP, I'm trying to create a class that will handle the loading and unloading cogs. However I'm getting the following error
Traceback (most recent call last):
File "c:\Users\mirza\Desktop\Work\Working Directory\Project\Bots\2B\bot.py", line 50, in <module>
main()
File "c:\Users\mirza\Desktop\Work\Working Directory\Project\Bots\2B\bot.py", line 43, in main
bot = CustomBotClient()
File "c:\Users\mirza\Desktop\Work\Working Directory\Project\Bots\2B\bot.py", line 21, in __init__
raise e
File "c:\Users\mirza\Desktop\Work\Working Directory\Project\Bots\2B\bot.py", line 18, in __init__
self.load_extension(f"cogs.{filename[:-3]}")
File "C:\Users\mirza\AppData\Local\Programs\Python\Python39\lib\site-packages\discord\ext\commands\bot.py", line 671, in load_extension
if name in self.__extensions:
AttributeError: 'CustomBotClient' object has no attribute '_BotBase__extensions'
This the code from bot.py that is giving me the error
import discord
from discord.ext import commands
import os
from dotenv import load_dotenv
class CogLoader(commands.Bot):
def __init__(self):
for filename in os.listdir('./cogs'):
if filename.endswith(".py"):
try:
self.load_extension(f"cogs.{filename[:-3]}")
except Exception as e:
print(f"cogs.{filename[:-3]} cannot be loaded")
raise e
#commands.command()
async def load(self, ctx, extension):
self.load_extension(f"cogs.{extension}")
#commands.command()
async def unload(self, ctx, extension):
self.unload_extension(f"cogs.{extension}")
#commands.command()
async def reload(self, ctx, extension):
self.unload_extension(f"cogs.{extension}")
self.load_extension(f"cogs.{extension}")
class CustomBotClient(CogLoader):
async def on_ready(self):
print(f"Bot {self.user} is connected to Discord and ready to roll!")
def main():
bot = CustomBotClient()
load_dotenv()
bot.run(os.getenv("TOKEN"))
if __name__ == "__main__":
main()
This is the cog that I'm trying to load
from discord.ext import commands
class Greetings(commands.Cog):
def __init__(self, bot):
self.bot = bot
#commands.command(name="GreetME")
async def greet_me(self, ctx):
await ctx.send("Hello! {author.user}")
def setup(bot):
bot.add_cog(Greetings(bot))
I'm following a youtube tutorial but he didnot used a class for this. So, I'm a loss here. Any help would be appreciated.
You need to initialize the commands.Bot in your CogLoader.__init__
This can be accomplished through the following code:
class CogLoader(commands.Bot):
def __init__(self, command_prefix, **options):
super().__init__(command_prefix, **options)
for filename in os.listdir('./cogs'):
if filename.endswith(".py"):
try:
self.load_extension(f"cogs.{filename[:-3]}")
except Exception as e:
print(f"cogs.{filename[:-3]} cannot be loaded")
raise e
and
def main():
bot = CustomBotClient(command_prefix="!")
This way, commands.Bot and BotBase, their attributes and their methods are loaded.

Why using "fork" works but using "spawn" fails in Python3.8+ `multiprocessing`?

I work on macOS and lately got bitten by the "fork" to "spawn" change in Python 3.8 multiprocessing (see doc). Below shows a simplified working example where using "fork" succeeds but using "spawn" fails. The purpose of the code is to create a custom queue object that supports calling size() under macOS, hence the inheritance from the Queue object and getting multiprocessing's context.
import multiprocessing
from multiprocessing import Process
from multiprocessing.queues import Queue
from time import sleep
class Q(Queue):
def __init__(self):
super().__init__(ctx=multiprocessing.get_context())
self.size = 1
def call(self):
return print(self.size)
def foo(q):
q.call()
if __name__ == '__main__':
multiprocessing.set_start_method('spawn') # this would fail
# multiprocessing.set_start_method('fork') # this would succeed
q = Q()
p = Process(target=foo, args=(q,))
p.start()
p.join(timeout=1)
The error message output when using "spawn" is shown below.
Process Process-1:
Traceback (most recent call last):
File "/usr/local/Cellar/python#3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/usr/local/Cellar/python#3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/Users/fanchen/Private/python_work/sandbox.py", line 23, in foo
q.call()
File "/Users/fanchen/Private/python_work/sandbox.py", line 19, in call
return print(self.size)
AttributeError: 'Q' object has no attribute 'size'
It seems that the child process deems self.size not necessary for code execution, so it is not copied. My question is why does this happen?
Code snippet tested under macOS Catalina 10.15.6, Python 3.8.5
The problem is that spawned processes do not have shared resources, so to properly recreate the queue instance for each process you need to add serialization and deserialization methods.
Here is a working code:
# Portable queue
# The idea of Victor Terron used in Lemon project (https://github.com/vterron/lemon/blob/master/util/queue.py).
# Pickling/unpickling methods are added to share Queue instance between processes correctly.
import multiprocessing
import multiprocessing.queues
class SharedCounter(object):
""" A synchronized shared counter.
The locking done by multiprocessing.Value ensures that only a single
process or thread may read or write the in-memory ctypes object. However,
in order to do n += 1, Python performs a read followed by a write, so a
second process may read the old value before the new one is written by the
first process. The solution is to use a multiprocessing.Lock to guarantee
the atomicity of the modifications to Value.
This class comes almost entirely from Eli Bendersky's blog:
http://eli.thegreenplace.net/2012/01/04/shared-counter-with-pythons-multiprocessing/
"""
def __init__(self, n = 0):
self.count = multiprocessing.Value('i', n)
def __getstate__(self):
return (self.count,)
def __setstate__(self, state):
(self.count,) = state
def increment(self, n = 1):
""" Increment the counter by n (default = 1) """
with self.count.get_lock():
self.count.value += n
#property
def value(self):
""" Return the value of the counter """
return self.count.value
class Queue(multiprocessing.queues.Queue):
""" A portable implementation of multiprocessing.Queue.
Because of multithreading / multiprocessing semantics, Queue.qsize() may
raise the NotImplementedError exception on Unix platforms like Mac OS X
where sem_getvalue() is not implemented. This subclass addresses this
problem by using a synchronized shared counter (initialized to zero) and
increasing / decreasing its value every time the put() and get() methods
are called, respectively. This not only prevents NotImplementedError from
being raised, but also allows us to implement a reliable version of both
qsize() and empty().
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs, ctx=multiprocessing.get_context())
self._counter = SharedCounter(0)
def __getstate__(self):
return super().__getstate__() + (self._counter,)
def __setstate__(self, state):
super().__setstate__(state[:-1])
self._counter = state[-1]
def put(self, *args, **kwargs):
super().put(*args, **kwargs)
self._counter.increment(1)
def get(self, *args, **kwargs):
item = super().get(*args, **kwargs)
self._counter.increment(-1)
return item
def qsize(self):
""" Reliable implementation of multiprocessing.Queue.qsize() """
return self._counter.value
def empty(self):
""" Reliable implementation of multiprocessing.Queue.empty() """
return not self.qsize()
You can also use multiprocessing.manager.Queue

model/view QCompleter in a QLineEdit

ubuntu 10.04, KDE 4.4.5
python 2.6.4
qt 4.6.2
pyqt 4.6.2
I'm trying to create a QCompleter, which works fine if I just build the QLineEdit.
However if I drop the QLineEdit into a QMainWindow, the QCompleter no longer works.
Here is the LineEdit class
# LineEdit class
import sys
from PyQt4 import QtCore, QtGui
class LineEdit(QtGui.QLineEdit):
def __init__(self, parent=None):
super(LineEdit, self).__init__(parent)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.completer = QtGui.QCompleter(self)
self.completer.setCompletionMode(QtGui.QCompleter.UnfilteredPopupCompletion)
self.pFilterModel = QtGui.QSortFilterProxyModel(self)
self.pFilterModel.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.completer.setPopup(self.view())
self.setCompleter(self.completer)
self.textEdited[unicode].connect(self.pFilterModel.setFilterFixedString)
def setModel(self, model):
self.pFilterModel.setSourceModel(model)
self.completer.setModel(self.pFilterModel)
def setModelColumn( self, column ):
self.completer.setCompletionColumn(column)
self.pFilterModel.setFilterKeyColumn(column)
def view(self):
return self.completer.popup()
def index( self ):
return self.currentIndex()
The QCompleter works if I build LinEdit this way
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
model = QtGui.QStandardItemModel()
for i,word in enumerate(['test', 'blah', 'heh', 'yep']):
item = QtGui.QStandardItem(word)
model.setItem(i, 0, item)
lineEdit = LineEdit()
lineEdit.setModel(model)
lineEdit.setModelColumn(0)
lineEdit.show()
sys.exit(app.exec_())
This compiles fine, but no longer shows the QCompleter
if __name__ == '__main__':
class Example(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.centralWidget = QtGui.QWidget(self)
self.layout = QtGui.QVBoxLayout(self.centralWidget)
# can I push this inside the LineEdit class instead?
model = QtGui.QStandardItemModel()
for i, word in enumerate(['test', 'blah', 'heh', 'yep', 'hello', 'hi']):
item = QtGui.QStandardItem(word)
model.setItem(i, 0, item)
# Make a LineEdit instance
self.lineEdit = LineEdit(parent=self.centralWidget)
self.lineEdit.setModel(model)
self.lineEdit.setModelColumn(0)
self.layout.addWidget(self.lineEdit)
self.setCentralWidget(self.centralWidget)
app = QtGui.QApplication(sys.argv)
QtWin = Example()
QtWin.show()
sys.exit(app.exec_())
turned out to be quite simple really, hopefully this will help anyone else using PyQt's QCompleter for auto-completion
import sys
from PyQt4 import QtCore, QtGui
class LineEdit(QtGui.QLineEdit):
def __init__(self, parent, completerContents):
super(LineEdit, self).__init__(parent)
self.completerList = QtCore.QStringList()
for content in completerContents:
self.completerList.append(QtCore.QString(content))
self.completer = QtGui.QCompleter(self.completerList, self)
self.completer.setCompletionMode(QtGui.QCompleter.PopupCompletion)
self.completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.setCompleter(self.completer)
if __name__ == '__main__':
class Example(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.centralWidget = QtGui.QWidget(self)
self.layout = QtGui.QVBoxLayout(self.centralWidget)
# Example LineEdit Call
self.lineEdit = LineEdit(parent=self.centralWidget, completerContents=('test', 'blah', 'heh', 'yep', 'hello', 'hi'))
self.layout.addWidget(self.lineEdit)
self.setCentralWidget(self.centralWidget)
app = QtGui.QApplication(sys.argv)
QtWin = Example()
QtWin.show()
sys.exit(app.exec_())
There is possible 2 reasons of such a behavior in the second case:
Your completer has no completion model in the second case
Your LineEdit has set other completer
Don't know if U can to debug this and set breakpoint on QLineEdit::setCompleter in python.

Resources