How do I test my AJAX function defined in my views file? - ajax

I'm using Django and Python 3.7 and tryihng to tests an AJAX request from a view, located at web/views/tax_calculator.py
# Basic function that serves the default page
def get(request):
return render(request, "web/template/tax_calculator.html", {})
# This is an Ajax call that will calculate the overall taxes you pay for
# an S-Corp vs a sole proprietorship
def post(request):
state = request.GET.get('state', None)
gross_income = request.GET.get('gross', None)
owner_salary = request.GET.get('salary', None)
data = {
'sole_pr_taxes': TaxCalculatorService.calc_sole_pr_taxes(state, gross_income),
's_corp_taxes': TaxCalculatorService.calc_s_corp_taxes(state, gross_income, owner_salary),
}
return JsonResponse(data)
Here is my test file, located at web/tests/test_views.py
from django.test.client import Client
import json
from web.models import *
c = Client()
class ViewTests(TestCase):
# Basic test to verify we can get valid return data
def test_calculate_tax(self):
state = 'MN'
gross = 100000
salary = 75000
json_data = json.dumps({'state': state,
'gross': gross,
'salary': salary})
response = c.post('/content/vote/', json_data,
content_type='application/json',
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 302) # this is OK.
print(response.content)
self.assertEqual(response.content, 2)
This results in the error below. What else do I need to do to make my test understand my Ajax request?
======================================================================
ERROR: test_calculate_tax (web.tests.test_views.ViewTests)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/davea/Documents/workspace/myproject/web/tests/test_views.py", line 20, in test_calculate_tax
response = c.post('/content/vote/', json_data,
AttributeError: 'Client' object has no attribute 'post'
----------------------------------------------------------------------
Ran 2 tests in 0.010s

Related

How to save user data to database instead of a pickle or a json file when trying to post videos on YouTube using Django and data v3 api

I'm trying to upload videos to youtube using Django and MSSQL, I want to store the user data to DB so that I can log in from multiple accounts and post videos.
The official documentation provided by youtube implements a file system and after login, all the user data gets saved there, I don't want to store any data in a file as saving files to DB would be a huge risk and not a good practice. So how can I bypass this step and save data directly to DB and retrieve it when I want to post videos to a specific account?
In short, I want to replace the pickle file implementation with storing it in the database.
Here's my code
def youtubeAuthenticate():
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
api_service_name = "youtube"
api_version = "v3"
client_secrets_file = "client_secrets.json"
creds = None
# the file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first time
if os.path.exists("token.pickle"):
with open("token.pickle", "rb") as token:
creds = pickle.load(token)
# if there are no (valid) credentials availablle, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(client_secrets_file, SCOPES)
creds = flow.run_local_server(port=0)
# save the credentials for the next run
with open("token.pickle", "wb") as token:
pickle.dump(creds, token)
return build(api_service_name, api_version, credentials=creds)
#api_view(['GET','POST'])
def postVideoYT(request):
youtube = youtubeAuthenticate()
print('yt',youtube)
try:
initialize_upload(youtube, request.data)
except HttpError as e:
print("An HTTP error %d occurred:\n%s" % (e.resp.status, e.content))
return Response("Hello")
def initialize_upload(youtube, options):
print('options', options)
print("title", options['title'])
# tags = None
# if options.keywords:
# tags = options.keywords.split(",")
body=dict(
snippet=dict(
title=options['title'],
description=options['description'],
tags=options['keywords'],
categoryId=options['categoryId']
),
status=dict(
privacyStatus=options['privacyStatus']
)
)
# # Call the API's videos.insert method to create and upload the video.
insert_request = youtube.videos().insert(
part=",".join(body.keys()),
body=body,
media_body=MediaFileUpload(options['file'], chunksize=-1, resumable=True)
)
path = pathlib.Path(options['file'])
ext = path.suffix
getSize = os.path.getsize(options['file'])
resumable_upload(insert_request,ext,getSize)
# This method implements an exponential backoff strategy to resume a
# failed upload.
def resumable_upload(insert_request, ext, getSize):
response = None
error = None
retry = 0
while response is None:
try:
print("Uploading file...")
status, response = insert_request.next_chunk()
if response is not None:
respData = response
if 'id' in response:
print("Video id '%s' was successfully uploaded." % response['id'])
else:
exit("The upload failed with an unexpected response: %s" % response)
except HttpError as e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,
e.content)
else:
raise
except RETRIABLE_EXCEPTIONS as e:
error = "A retriable error occurred: %s" % e
if error is not None:
print(error)
retry += 1
if retry > MAX_RETRIES:
exit("No longer attempting to retry.")
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
print("Sleeping %f seconds and then retrying..." % sleep_seconds)
time.sleep(sleep_seconds)

DJANGO-STORAGES, PARAMIKO: connection failure for global connection

I have a strange problem using the SFTP-API from django-storages(https://github.com/jschneier/django-storages). I am trying to use it in order to fetch media-files, which are stored on a different server and thus needed to create a Proxy for SFTP Downloads, since plain Django just sends GET-requests to the MEDIA_ROOT. I figured that Middleware provides a good hook:
import mimetypes
from storages.backends.sftpstorage import SFTPStorage
from django.http import HttpResponse
from storages.backends.sftpstorage import SFTPStorage
class SFTPMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.get_response(request)
try:
path = request.get_full_path()
SFTP = SFTPStorage() # <- this is where the magic happens
if SFTP.exists(path):
file = SFTP._read(path)
type, encoding = mimetypes.guess_type(path)
response = HttpResponse(file, content_type=type)
response['Content-Disposition'] = u'attachment; filename="{filename}"'.format(filename=path)
except PermissionError:
pass
return response
which works fine, but obviously it opens a new connection every time a website call is issued which I don't want (it also crashes after 3 reloads or something, I think it has to many parallel connections by then). So I tried just opening one connection to the Server via SFTP by moving the SFTP = SFTPStorage()-initialization into the __init__()-method which is just called once:
import mimetypes
from storages.backends.sftpstorage import SFTPStorage
from django.http import HttpResponse
from storages.backends.sftpstorage import SFTPStorage
class SFTPMiddleware:
def __init__(self, get_response):
self.get_response = get_response
self.SFTP = SFTPStorage() # <- this is where the magic happens
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.get_response(request)
try:
path = request.get_full_path()
if self.SFTP.exists(path):
file = self.SFTP._read(path)
type, encoding = mimetypes.guess_type(path)
response = HttpResponse(file, content_type=type)
response['Content-Disposition'] = u'attachment; filename="{filename}"'.format(filename=path)
except PermissionError:
pass
return response
But this implementation doesn't seem to work, the program is stuck either before the SFTP.exists() or after the SFTP._read() methods.
Can anybody tell me how to fix this problem? Or does anybody even have a better idea as to how to tackle this problem?
Thanks in advance,
Kingrimursel

How can I build a list of async tasks with argument for AsyncHTMLSession().run?

From the documentation I have this example I've tested and works..
from requests_html import AsyncHTMLSession
asession = AsyncHTMLSession()
async def get_pythonorg():
r = await asession.get('https://python.org/')
async def get_reddit():
r = await asession.get('https://reddit.com/')
async def get_google():
r = await asession.get('https://google.com/')
result = asession.run(get_pythonorg, get_reddit, get_google)
But what if my urls are variable? I'd like to do this..
from requests_html import AsyncHTMLSession
urls = ('https://python.org/', 'https://reddit.com/', 'https://google.com/')
asession = AsyncHTMLSession()
async def get_url(url):
r = await asession.get(url)
tasks = []
for url in urls:
tasks.append(get_url(url=url))
result = asession.run(*tasks)
but I get..
Traceback (most recent call last): File "./test.py", line 17, in <module>
result = asession.run(*tasks) File "/home/deanresin/.local/lib/python3.7/site-packages/requests_html.py", line 772, in run
asyncio.ensure_future(coro()) for coro in coros File "/home/deanresin/.local/lib/python3.7/site-packages/requests_html.py", line 772, in <listcomp>
asyncio.ensure_future(coro()) for coro in coros TypeError: 'coroutine' object is not callable sys:1: RuntimeWarning: coroutine 'get_url' was never awaited
TLTR:
It is because you are passing coroutines objects and not coroutines functions.
You can do:
from requests_html import AsyncHTMLSession
urls = ('https://python.org/', 'https://reddit.com/', 'https://google.com/')
asession = AsyncHTMLSession()
async def get_url(url):
r = await asession.get(url)
# if you want async javascript rendered page:
await r.html.arender()
return r
all_responses = asession.run(*[lambda url=url: get_url(url) for url in urls])
Explanations:
The error is coming from result = asession.run(*tasks) so let's see the source code of AsyncHTMLSession.run() :
def run(self, *coros):
""" Pass in all the coroutines you want to run, it will wrap each one
in a task, run it and wait for the result. Return a list with all
results, this is returned in the same order coros are passed in. """
tasks = [
asyncio.ensure_future(coro()) for coro in coros
]
done, _ = self.loop.run_until_complete(asyncio.wait(tasks))
return [t.result() for t in done]
So in the following list comprehension you are normally passing a callable coroutine function and not coroutine object
tasks = [
asyncio.ensure_future(coro()) for coro in coros
]
But you then in your error you have for coro in coros TypeError: 'coroutine' object is not callable.
So you are passing a list of coroutines objects and not coroutines functions.
Indeed when you are doing this:
tasks = []
for url in urls:
tasks.append(get_url(url=url))
You are making a list of coroutines objects by calling your coroutine function.
So in order to make a list of coroutines functions you can use lambda function like this:
[lambda url=url: get_url(url) for url in urls]
Note the url=url in order to make the url parameter accessed when the lambda is defined.
More informations about this here.

Call DRF ViewSet via Celery task

I have a Django Rest Framework ViewSet:
MyModelViewSet(generics.RetrieveUpdateDestroyAPIView):
def perform_destroy(self, instance):
# do something besides deleting the object
Now I'm writing a Celery periodic task that deletes expired objects based on a filter (let's say end_date < now).
I want the task to reuse and perform the same actions that are executed in the ViewSet's perform_destroy method.
Can this be done? How?
Thanks!
You can solve your issue by using Request in DRF, schedule a celery task to call the request. It works well, I've implemented this before.
Example code:
from rest_framework.request import Request as DRFRequest
from django.conf import settings
from django.http import HttpRequest
from your_module.views import MyModelViewSet
CELERY_CACHING_QUEUE = getattr(settings, "CELERY_CACHING_QUEUE", None)
def delete_resource(resource_pk: int) -> None:
"""
This method helps to delete the resource by the id.
"""
print(f'Starting deleting resource {resource_pk}...')
request = HttpRequest()
request.method = 'DELETE'
request.META = {
'SERVER_NAME': settings.ALLOWED_HOSTS[0],
'SERVER_PORT': 443
}
drf_request = DRFRequest(request)
# If your API need user has access permission,
# you should handle for getting the value of
# user_has_access_permission before
# E.g. below
# drf_request.user = user_has_access_permission
try:
view = MyModelViewSet(
kwargs={
'pk': resource_pk
},
request=drf_request
)
view.initial(drf_request)
view.delete(drf_request)
except (Exception, KeyError) as e:
print(f'Cannot delete resource: {resource_pk}, error: {e}')
return
print(f'Finished deleting resource {resource_pk}...')
#task(name="delete_resource_task", queue=CELERY_CACHING_QUEUE)
def delete_resource_task(resource_pk: int) -> None:
"""
Async task helps to delete resource.
"""
delete_resource(resource_pk)

how to pass AWS lambda 1 output as lambda 2's input using BOTO 3

I have a AWS lambda function which have some info and another 6 lambda functions which query cloudwatch logs using BOTO 3.
How can i call lambda 2 with lambda 1 input.
I'm calling 6 other lambdas because execution time taking more than 48 mins to query logs. I've divided time duration into 6 parts and calling each lambda with some start and end times, As lambda execution time is 15 mins max
lambda 1 code
import json
import boto3
import time
client = boto3.client('lambda')
payload3=b"""{ "query":"fields #timestamp, #message | sort #timestamp desc", "log_group":"abc.log", "sTime":1585630748456, "eTime":1585713248457}"""
def lambda_handler(event,context):
response = client.invoke(
FunctionName='testamurin',
InvocationType='Event',
Payload=payload3
)
return {
'statusCode': 200
}
lambda 2
import json
import boto3
import time
client = boto3.client('logs')
def lambda_handler(event,context):
response = client.start_query(
logGroupName = log_group,
startTime=sTime,
endTime=eTime,
queryString = query
)
query_id = response['queryId']
response = None
while response == None or response['status'] == 'Running':
print('Waiting for query to complete ...')
time.sleep(1)
response = client.get_query_results(
queryId=query_id)
return {
'body': response
}
When i'm running lambda1 its getting successful. But, in lambda 2 i'm getting error
[ERROR] NameError: name 'log_group' is not defined
Traceback (most recent call last):
File "/var/task/lambda_function.py", line 16, in lambda_handler
logGroupName = log_group,
I assume the function 1 is calling function 2
Function 1 input contains first_name and last_name
Function 1
import json
from boto3 import client as boto3_client
lambda_client = boto3_client('lambda')
def my_handler(event, context):
response = lambda_client.invoke(
FunctionName="FUNCTION-2-NAME-FOUND-IN-LAMBDA-CONSOLE",
InvocationType='RequestResponse',
Payload=json.dumps(event)
)
# do something with the response
# ...
Function 2
def my_handler(event, context):
message = 'Hello {} {}!'.format(event['first_name'],
event['last_name'])
return {
'message' : message
}

Resources