Lots of ResourceWarning in FastApi with asyncpg - python-asyncio

I have an async FastApi application with async sqlalchemy, source code:
database.py
from sqlalchemy import (
Column,
String,
)
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.decl_api import DeclarativeMeta
from app.config import settings
engine = create_async_engine(settings.DATABASE_URL, pool_per_ping=True)
Base: DeclarativeMeta = declarative_base()
async_session = sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
class Titles(Base):
__tablename__ = "titles"
id = Column(String(100), primary_key=True)
title = Column(String(100), unique=True)
async def get_session() -> AsyncSession:
async with async_session() as session:
yield session
routers.py
import .database
from fastapi_utils.cbv import cbv
from fastapi_utils.inferring_router import InferringRouter
router = InferringRouter()
async def get_titles(session: AsyncSession):
results = await session.execute(select(database.Titles)))
return results.scalars().all()
#cbv(router)
class TitlesView:
session: AsyncSession = Depends(database.get_session)
#router.get("/titles", status_code=HTTP_200_OK)
async def get(self) -> List[TitlesSchema]:
results = await get_titles(self.session)
return [TitlesSchema.from_orm(result) for result in results]
main.py
from fastapi import FastAPI
from app.routers import router
def create_app() -> FastAPI:
fast_api_app = FastAPI()
fast_api_app.include_router(router, prefix="/", tags=["Titles"])
return fast_api_app
app = create_app()
manage.py
import asyncio
import sys
from .database import async_session, Base, engine
async def init_models():
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all, checkfirst=True)
if __name__ == "__main__":
asyncio.run(init_models())
sys.stdout.write("Models initiated\n")
It runs with docker:
python manage.py
CMD ["uvicorn", "main:app", "--reload", "--host", "0.0.0.0", "--port", "8000", "--limit-max-requests", "10000"]
And right after i see message Models initiated, after init_models() func i see couple of warnings:
app_1 | Models initiated
app_1 | /usr/local/lib/python3.9/site-packages/asyncpg/connection.py:131: ResourceWarning: unclosed connection <asyncpg.connection.Connection object at 0x7efe5a613c80>; run in asyncio debug mode to show the traceback of connection origin
app_1 | /usr/local/lib/python3.9/asyncio/sslproto.py:320: ResourceWarning: unclosed transport <asyncio.sslproto._SSLProtocolTransport object at 0x7efe5a631700>
app_1 | /usr/local/lib/python3.9/asyncio/selector_events.py:704: ResourceWarning: unclosed transport <_SelectorSocketTransport fd=6>
app_1 | INFO: Uvicorn running on http://0.0.0.0:5000 (Press CTRL+C to quit)
app_1 | INFO: Started reloader process [15] using statreload
app_1 | INFO: Started server process [17]
app_1 | INFO: Waiting for application startup.
app_1 | INFO: Application startup complete.
And after i make changes, i see a bunch of warnings:
app_1 | WARNING: StatReload detected file change in 'ref_info/main.py'. Reloading...
app_1 | INFO: Shutting down
app_1 | INFO: Waiting for application shutdown.
app_1 | INFO: Application shutdown complete.
app_1 | INFO: Finished server process [15]
app_1 | sys:1: ResourceWarning: unclosed file <_io.TextIOWrapper name=0 mode='r' encoding='UTF-8'>
app_1 | INFO: Started server process [16]
app_1 | INFO: Waiting for application startup.
app_1 | INFO: Application startup complete.
Is it ok, and i need to hide it? Or i setted up smth wrong?

Ok, i solved it.
engine = create_async_engine(
settings.DATABASE_ASYNC_URI,
echo="debug" if settings.DEBUG else False,
)
async_session = sessionmaker(
bind=engine,
class_=AsyncSession,
autoflush=True,
autocommit=False,
expire_on_commit=False,
)
async def get_session() -> AsyncGenerator[AsyncSession, None]:
async with async_session() as session:
assert isinstance(session, AsyncSession)
yield session
async def connect() -> None:
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all, checkfirst=True)
async def disconnect() -> None:
if engine:
await engine.dispose()
Add connect and disconnect to your FastApi app startapp and shutdown events

Related

How to test fastapi with oracle, sql alchemy?

I have a fastapi application where I use sqlalchemy and stored procedures.
Now I want to test my endpoints like in the documentation
import pytest
from fastapi.testclient import TestClient
from fastapi import FastAPI
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from ..dependencies import get_db
import cx_Oracle
host = 'xxxx'
port = 1111
sid = 'FUU'
user = 'bar'
password = 'fuubar'
sid = cx_Oracle.makedsn(host, port, sid=sid)
database_url = 'oracle://{user}:{password}#{sid}'.format(
user=user,
password=password,
sid=sid,
)
engine = create_engine(database_url, connect_args={"check_same_thread": False})
TestingSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
app = FastAPI()
init_router(app)
#pytest.fixture()
def session():
db = TestingSessionLocal()
try:
yield db
finally:
db.close()
#pytest.fixture()
def client(session):
# Dependency override
def override_get_db():
try:
yield session
finally:
session.close()
app.dependency_overrides[get_db] = override_get_db
yield TestClient(app)
def test_index(client):
res = client.get("/")
assert res.text
assert res.status_code == 200
def test_search_course_by_verid_exist():
response = client.get(
'search', params={"search_query": "1111", "semester": "S2022"})
# course exist
assert response.status_code == 200
I've tried it with creating a new app and/or importing it via getting the app from the main.py
from ..main import app
The method is in my courses router.
#router.get("/search", status_code=status.HTTP_200_OK)
async def search_course(
response: Response,
search_query: Union[str, None] = None,
semester: Union[int, None] = None,
db: Session = Depends(get_db),
):
.....
return response
The index test already failes by returning assert 400 == 200. For the 2nd (test_search_course_by_verid_exist) I'll get
AttributeError: 'function' object has no attribute 'get'
My main has some middleware settings like
app.add_middleware(
SessionMiddleware, secret_key="fastAPI"
) # , max_age=300 this should match Login action timeout in token-settings of a realm
app.add_middleware(
TrustedHostMiddleware,
allowed_hosts=settings.ALLOWED_HOSTS,
)
# MIDDLEWARE
#app.middleware("http")
async def check_route(request: Request, call_next):
....
I'm clueless what I'm missing or if things are just different with cx_Oracle
I've tried changing the testclient from fastapi to the starlette one. I've tried not overriding the db and just import the original db settings (which are basically the same). But nothing works.
I'm not sure if this is the proper way to test FastAPI application, https://fastapi.tiangolo.com/tutorial/testing/
Why you didn't declare client as :
client = TestClient(app)
?
Idk if this was the root problem. But naming my fixtures solved the problem and the db connection is working.
conftest.py
#pytest.fixture(name="db_session", scope="session")
def db_session(app: FastAPI) -> Generator[TestingSessionLocal, Any, None]:
Also created the app fixture
#pytest.fixture(name="app", scope="session")
def app() -> Generator[FastAPI, Any, None]:
"""
Create a fresh database on each test case.
"""
_app = start_application()
yield _app

How code AWS Lambda to report back to pipline about job done?

Edit: changed lambda function and logs output, problem reminas :/
Have following lambda function as step in codepipeline:
import boto3
import json
import sys
import os
import pymysql
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
DB_HOST = os.environ['DB_HOST']
DB_USER = os.environ['DB_USER']
DB_PASS = os.environ['DB_PASS'],
DB_PORT = int(os.environ['DB_PORT'])
codepipeline = boto3.client('codepipeline')
cursorType = pymysql.cursors.DictCursor
try:
connection = pymysql.connect(
host=DB_HOST,
user=DB_USER,
password=DB_PASS,
port=DB_PORT,
)
except pymysql.MySQLError as err:
logger.error("Error: Could not connect to MySql db")
logger.error(err)
sys.exit()
logger.info("Success: Connected to MySql db")
def lambda_handler(event, context):
cursor = connection.cursor()
try:
logger.info("Dropping db...")
cursor.execute(f"drop database {DB_NAME}")
logger.info("Creating db...")
cursor.execute(f"create database {DB_NAME}")
logger.info("Db created")
connection.close()
logger.info('Conection closed')
job_id = event['CodePipeline.job']['id']
logger.info("Job id `{job_id}`")
response = codepipeline.put_job_success_result(jobId=job_id)
logger.info(response)
except Exception as err:
logger.error(err)
response = codepipeline.put_job_failure_result(
jobId=job_id, failureDetails={'message': message, 'type': 'JobFailed'}
)
return {
"statusCode": 200,
}
Function log from function run:
START RequestId: 02e2f7cb-817d-4e49-90db-5b4cae5c9336 Version: $LATEST
[INFO] 2020-09-23T07:38:34.515Z Found credentials in environment variables.
[INFO] 2020-09-23T07:38:34.598Z Success: Connected to MySql db
{'CodePipeline.job': {'id': '9a8b13ea-d4f8-4aea-8481-60db0b7b5b5d... snip}
Dropping db
Creating db
Db created
Conection closed
[INFO] 2020-09-23T07:38:34.732Z 02e2f7cb-817d-4e49-90db-5b4cae5c9336 Job id 9a8b13ea-d4f8-4aea-8481-60db0b7b5b5d
successfuly done
END RequestId: 02e2f7cb-817d-4e49-90db-5b4cae5c9336
REPORT RequestId: 02e2f7cb-817d-4e49-90db-5b4cae5c9336 Duration: 60060.17 ms Billed Duration: 60000 ms Memory Size: 128 MB Max Memory Used: 76 MB Init Duration: 426.53 ms
2020-09-23T07:39:34.660Z 02e2f7cb-817d-4e49-90db-5b4cae5c9336 Task timed out after 60.06 seconds
[INFO] 2020-09-23T07:39:35.55Z Found credentials in environment variables.
[INFO] 2020-09-23T07:39:35.94Z Success: Connected to MySql db
START RequestId: 02e2f7cb-817d-4e49-90db-5b4cae5c9336 Version: $LATEST
{'CodePipeline.job': {'id': '9a8b13ea-d4f8-4aea-8481-60db0b7b5b5d',... snip}
Dropping db
Creating db
Db created
Conection closed
[INFO] 2020-09-23T07:41:39.974Z 02e2f7cb-817d-4e49-90db-5b4cae5c9336 Job id 9a8b13ea-d4f8-4aea-8481-60db0b7b5b5d
successfuly done
END RequestId: 02e2f7cb-817d-4e49-90db-5b4cae5c9336
REPORT RequestId: 02e2f7cb-817d-4e49-90db-5b4cae5c9336 Duration: 60060.32 ms Billed Duration: 60000 ms Memory Size: 128 MB Max Memory Used: 30 MB
2020-09-23T07:42:39.925Z 02e2f7cb-817d-4e49-90db-5b4cae5c9336 Task timed out after 60.06 seconds
How can I "force" lambda to report back to codepipeline that job is done either ok or not instead of just running in some kind of loop?
Lambda IAM role has policy attached like below:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ssm:Describe*",
"ssm:Get*",
"ssm:List*",
"kms:Decrypt",
"ssm:GetParametersByPath",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:DescribeInstances",
"ec2:AttachNetworkInterface",
"codepipeline:PutJobSuccessResult",
"codepipeline:PutJobFailureResult"
],
"Resource": "*"
}
]
}
Help please as I can't find reason why lambda is not "letting know" back to pipeline about job status.
So either create codepipline vpc endpoint or move lambda to private network... case solved :).

ModuleNotFound : Heroku log showing error on Procfile and fails to launch flask app

I am working on a simple flask Todo list web app with postgres database and I tried deploying it to heroku but it is giving me an error saying
Application error
An error occurred in the application and your page could not be served. If you are the application owner, check your logs for details.
I followed the Quickstart correctly but that did not help
when I check logs by heroku logs --tail It says
ModuleNotFoundError:No module named TodoApp
There are no error logs on command promt also. I can not find any way to check what is going wrong.
This is in my Procfile
web: gunicorn TodoApp:app
This is in my requirements.txt
asn1crypto==0.22.0
certifi==2017.4.17
cffi==1.10.0
chardet==3.0.4
click==6.7
cryptography==1.9
enum34==1.1.6
Flask==0.12.2
Flask-SQLAlchemy==2.2
Flask-WTF==0.14.2
gunicorn==19.7.1
idna==2.5
ipaddress==1.0.18
itsdangerous==0.24
Jinja2==2.9.6
MarkupSafe==1.0
psycopg2==2.7.1
pycparser==2.17
PyJWT==1.5.0
pyOpenSSL==17.0.0
pytz==2017.2
requests==2.18.1
six==1.10.0
SQLAlchemy==1.1.10
twilio==6.4.1
urllib3==1.21.1
Werkzeug==0.12.2
WTForms==2.1
This is my todo.py file
from flask import Flask, render_template , Response
from flask import request , jsonify
from models import db ,db1, User
from flask_sqlalchemy import SQLAlchemy
from message import client
import json
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] =
'postgresql://postgres:Abcd#123456#localhost/learningflask'
db.init_app(app)
#app.route("/")
def index():
return render_template("index.html",User=User.query.all())
#app.route("/getdata", methods=['GET'])
def get_User():
myuser=User.query.all()
if myuser =="":
return 404
return Response(json.dumps({'taskname':[user.taskname for user in
myuser]}), mimetype='application/json')
#app.route('/', methods=['POST'])
def my_form_post():
text=request.form['text']
if text !="":
newuser = User(text)
db.session.add(newuser)
db.session.commit()
message = client.messages.create(
to="", from_="" , body ="A task has
been added to your todo list : "+ text)
print(message.sid)
return render_template("index.html",User=User.query.all())
else:
newuser = request.form.getlist('removeId')
if newuser:
for n in newuser:
User.query.filter_by(uid=n).delete()
db.session.commit()
return render_template("index.html",User=User.query.all())
else:
edituser = request.form.getlist('editId')
if edituser:
updatedText=request.form['updatedText']
for e in edituser:
User.query.filter_by(uid=e).update(dict(taskname=updatedText))
db.session.commit()
return
render_template("index.html",User=User.query.all())
if __name__ == "__main__":
app.run(debug=True)
db.create_all()
Here is the folder structure
The error might be in this line:
web: gunicorn TodoApp:app
you should use the file name before the : and then the flask object you have created.
web: gunicorn Todo:app
this should work. toto:app is the <filename>:<appname> .

Mocking Twisted web client HTTP requests using HTTPretty

As Httpretty works on the Python socket layer, even Twisted web requests should be mocked out. But i am seeing some weird behavior on using httpretty. It tries to connect to localhost somehow. Below example shows the difference:
import httpretty
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
import requests
#httpretty.activate
def main():
httpretty.register_uri(
httpretty.GET, "http://example.com",
body='[{"title": "Test Deal"}]',
content_type="application/json")
agent = Agent(reactor)
d = agent.request(
'GET',
'http://example.com',
Headers({'User-Agent': ['Twisted Web Client Example']}),
None)
def cbError(message):
print 'Async Failed : %s' % message
d.addErrback(cbError)
def cbShutdown(ignored): reactor.stop()
d.addBoth(cbShutdown)
reactor.run()
print 'Response received from Sync: %s' % \
requests.get('http://example.com').status_code
main()
And the response is :
Async Failed : [Failure instance: Traceback (failure with no frames): <class 'twisted.internet.error.ConnectionRefusedError'>: Connection was refused by other side: 111: Connection refused.
]
Response received from Sync: 200
How can i use httpretty with Twisted web client?
You can't. HTTPretty is blocking HTTP client libraries (like requests). It doesn't mock non-blocking sockets.

How to forward a websocket server in localhost with ngrok

I' trying to run a websocket server on local host and forward it to web using ngrok. But couldn't figure it how. These are the original code's from AutobahnPython git repository https://github.com/tavendo/AutobahnPython.
Server code:
from autobahn.twisted.websocket import WebSocketServerProtocol, \
WebSocketServerFactory
class MyServerProtocol(WebSocketServerProtocol):
def onConnect(self, request):
print("Client connecting: {0}".format(request.peer))
def onOpen(self):
print("WebSocket connection open.")
def onMessage(self, payload, isBinary):
if isBinary:
print("Binary message received: {0} bytes".format(len(payload)))
else:
print("Text message received: {0}".format(payload.decode('utf8')))
## echo back message verbatim
self.sendMessage(payload, isBinary)
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {0}".format(reason))
if __name__ == '__main__':
import sys
from twisted.python import log
from twisted.internet import reactor
log.startLogging(sys.stdout)
factory = WebSocketServerFactory("ws://localhost:9000", debug = False)
factory.protocol = MyServerProtocol
reactor.listenTCP(9000, factory)
reactor.run()
Client Code:
from autobahn.twisted.websocket import WebSocketClientProtocol, \
WebSocketClientFactory
class MyClientProtocol(WebSocketClientProtocol):
def onConnect(self, response):
print("Server connected: {0}".format(response.peer))
def onOpen(self):
print("WebSocket connection open.")
def hello():
self.sendMessage(u"Hello, world!".encode('utf8'))
self.sendMessage(b"\x00\x01\x03\x04", isBinary = True)
self.factory.reactor.callLater(1, hello)
## start sending messages every second ..
hello()
def onMessage(self, payload, isBinary):
if isBinary:
print("Binary message received: {0} bytes".format(len(payload)))
else:
print("Text message received: {0}".format(payload.decode('utf8')))
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {0}".format(reason))
if __name__ == '__main__':
import sys
from twisted.python import log
from twisted.internet import reactor
log.startLogging(sys.stdout)
factory = WebSocketClientFactory("ws://localhost:9000", debug = False)
factory.protocol = MyClientProtocol
reactor.connectTCP("127.0.0.1", 9000, factory)
reactor.run()
This is the changed code:
from autobahn.twisted.websocket import WebSocketClientProtocol, \
WebSocketClientFactory
class MyClientProtocol(WebSocketClientProtocol):
def onConnect(self, response):
print("Server connected: {0}".format(response.peer))
def onOpen(self):
print("WebSocket connection open.")
def hello():
self.sendMessage(u"Hello, world!".encode('utf8'))
self.sendMessage(b"\x00\x01\x03\x04", isBinary = True)
self.factory.reactor.callLater(1, hello)
## start sending messages every second ..
hello()
def onMessage(self, payload, isBinary):
if isBinary:
print("Binary message received: {0} bytes".format(len(payload)))
else:
print("Text message received: {0}".format(payload.decode('utf8')))
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {0}".format(reason))
if __name__ == '__main__':
import sys
from twisted.python import log
from twisted.internet import reactor
log.startLogging(sys.stdout)
#~ factory = WebSocketClientFactory("ws://localhost:9000", debug = False)
factory = WebSocketClientFactory("ws://ngrok.com:xxxxx", debug = False)
factory.protocol = MyClientProtocol
reactor.connectTCP("ws://ngrok.com", xxxxx, factory)
reactor.run()
This is the ngrok command: ./ngrok -proto=tcp 9000
What am I doing wrong here?
I tried using
ngrok http 8091
(where 8091 is the port where my WebsocketServer is running using Racthet IO) and it works. I can still connect to the Websocket replacing the http with ws on the connection.
Try using this command:
ngrok tcp 8091
It works perfectly for me.

Resources