python coroutine asyncio/ await / aiohttp - async-await

new in asyncio world.
going straight to the point...
I want to do/make a request(aiohttp) to a site.
if the wait for an answer pass than N seconds I want to stop the process of waiting.
Do the process again by setting a limit of attempts if needed.
async def search_skiping_add(name_search):
start_time = time()
async with aiohttp.ClientSession() as session:
url = f'https://somesitehere.com'
r = await session.get(url)
final_time = time()
result_time =round(final_time-start_time)
print(result_time)
Maybe, I know, have some way to do it synchronously, but it's an excuse to start using asyncio somehow too.

This should give you an idea of how to use async with aiohttp:
from aiohttp import ClientSession
from asyncio import gather, create_task, sleep, run
from traceback import format_exc
def returnPartionedList(inputlist: list, x: int = 100) -> list: # returns inputlist split into x parts, default is 100
return([inputlist[i:i + x] for i in range(0, len(inputlist), x)])
# You could change validate to an integer and thereby increasing the retry count as needed.
async def GetRessource(url: str, session: ClientSession, validate: bool = False) -> dict:
try:
async with session.get(url) as response:
if response.status == 200:
r: dict = await response.json() # Set equal to .text() to get results as a string
return(r)
else:
r: str = await response.text()
if not validate:
await sleep(3) # Sleep for x amount of seconds before retry
return(await GetRessource(url, session, True))
print(f"Error, got response code: {response.status} message: {r}")
except Exception:
print(f"General Exception:\n{format_exc()}")
return({})
async def GetUrls(urls: list) -> list:
resultsList: list = []
UrlPartitions: list = returnPartionedList(urls, 20) # Rate limit to 20 requests per loop
async with ClientSession(timeout=15) as session: # Timeout is equal to the time to wait in seconds before terminating, default is 300 seconds or 5 minutes.
for partition in UrlPartitions:
partitionTasks: list = [create_task(GetRessource(url, session)) for url in partition]
resultsList.append(await gather(*partitionTasks, return_exceptions=False))
return(resultsList) # Or you can do more processing here before returning
async def main():
urls: list = ["...", "...", "..."] # list of urls to get from
results: list = await GetUrls(urls)
print(results)
if __name__ == "__main__":
run(main())

Related

running asyncio with streamlit dashboard

How do I create a streamlit dashboard that doesn't continuously append the new values from the asyncio.gather(firstWorker(),secondWorker()? The code below runs but will very quickly turn into a very long dashboard where I was hoping to figure out if I could just 2 fixed streamlit title or metric to represent workerOne workerTwo where only the val_one and val_two are updated on the dashboard. streamlit metric or text elements seem to all have the same behavior of continuously appending...any tips appreciated.
Am using python 3.10 on Windows 10 running the dashboard with: $ streamlit run app.py
import streamlit as st
import asyncio
import random as r
val_one = 0
val_two = 0
st.title("Hello World")
async def firstWorker():
global val_one
while True:
await asyncio.sleep(r.randint(1, 3))
val_one = r.randint(1, 10)
st.metric("First Worker Executed: ",val_one)
async def secondWorker():
global val_two
while True:
await asyncio.sleep(r.randint(1, 3))
val_two = r.randint(1, 10)
st.metric("Second Worker Executed: ",val_two)
async def main():
await(asyncio.gather(
firstWorker(),
secondWorker()
)
)
if __name__ == '__main__':
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
asyncio.run(main())
except KeyboardInterrupt:
pass
finally:
print("Closing Loop")
loop.close()
Trying to avoid if possible an infinite long dashboard as well as have the anyscio processes run in the background even if the streamlit dashboard isnt being utilized by a web browser.
Put the code after if __name__ == '__main__': into st.empty() container to overwrite the previous content whenever an update is made.
import streamlit as st
import asyncio
import random as r
val_one = 0
val_two = 0
st.title("Hello World")
async def firstWorker():
global val_one
while True:
await asyncio.sleep(r.randint(1, 3))
val_one = r.randint(1, 10)
st.metric("First Worker Executed: ",val_one)
async def secondWorker():
global val_two
while True:
await asyncio.sleep(r.randint(1, 3))
val_two = r.randint(1, 10)
st.metric("Second Worker Executed: ",val_two)
async def main():
await(asyncio.gather(
firstWorker(),
secondWorker()
)
)
if __name__ == '__main__':
with st.empty(): # Modified to use empty container
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
asyncio.run(main())
except KeyboardInterrupt:
pass
finally:
print("Closing Loop")
loop.close()
Output:
Version 2:
st.title("Hello World")
async def firstWorker():
await asyncio.sleep(r.randint(1, 3))
val_one = r.randint(1, 10)
st.metric("First Worker Executed: ", val_one)
async def secondWorker():
await asyncio.sleep(r.randint(1, 3))
val_two = r.randint(1, 10)
st.metric("Second Worker Executed: ", val_two)
async def main():
with st.empty():
while True:
left_col, right_col = st.columns(2)
with left_col:
await (asyncio.gather(firstWorker()))
with right_col:
await (asyncio.gather(secondWorker()))
if __name__ == '__main__':
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
asyncio.run(main())
except KeyboardInterrupt:
pass
finally:
print("Closing Loop")
loop.close()
Output:
I don't know why you use global variables. I got rid of them this time unless otherwise you need them.

sys:1: RuntimeWarning: coroutine 'AirthingsWaveDetect.get_sensors' was never awaited

I am a beginner trying to get the data from airthings plus to hubitat using a raspberry pi. When running a python script I get this error message :
readwaveplus.py:391: RuntimeWarning: coroutine 'AirthingsWaveDetect.get_sensor_data' was never awaited
data = airthingsdetect.get_sensor_data()[MAC]
RuntimeWarning: Enable tracemalloc to get the object allocation traceback
Read Error
^CTraceback (most recent call last):
File "readwaveplus.py", line 414, in <module>
time.sleep(SamplePeriod)
KeyboardInterrupt
sys:1: RuntimeWarning: coroutine 'AirthingsWaveDetect.get_sensors' was never awaited
It seems I need to add "await" somewhere but I am not able to figure out where. I tried to add await but not sure where It should be added.
Here is the script :
async def main():
logging.basicConfig()
_LOGGER.setLevel(logging.DEBUG)
ad = AirthingsWaveDetect(0)
num_dev_found = await ad.find_devices()
if num_dev_found > 0:
devices = await ad.get_info()
for mac, dev in devices.items():
_LOGGER.info("Device: {}: {}".format(mac, dev))
devices_sensors = await ad.get_sensors()
for mac, sensors in devices_sensors.items():
for sensor in sensors:
_LOGGER.info("Sensor: {}: {}".format(mac, sensor))
sensordata = await ad.get_sensor_data()
for mac, data in sensordata.items():
for name, val in data.items():
_LOGGER.info("Sensor data: {}: {}: {}".format(mac, name, val))
if __name__ == "__main__":
asyncio.run(main())
import time
import requests
# The period between two measurements in seconds (Default: 300)
SamplePeriod = 300
MAC = 'd8:71:4d:ca:de:dc'
# The hostname or IP address of the MQTT broker Hubitat hub to connect
makerAPIHostname = "192.168.1.38"
makerAPIAppID = "2416"
makerAPIToken = "8ca5e2ac-7d0b-4c24-a89b-f6022844e2ae"
makerAPIDeviceID = "AirThings Wave Plus"
from airthings import AirthingsWaveDetect
scan_interval = 120
airthingsdetect = AirthingsWaveDetect(scan_interval, MAC)
V_MAX = 3.0
V_MIN = 2.0
#---- Initialize ----#
URL = "http://{}/apps/api/{}/devices/{}/{}?access_token={}".format(makerAPIHostname, makerAPIAppID, makerAPIDeviceID, '{}/{}', makerAPIToken)
devices_sensors = airthingsdetect.get_sensors()
while True:
try:
data = airthingsdetect.get_sensor_data()[MAC]
except:
print( 'Read Error' )
pass
else:
sensorData = "{},{},{},{},{},{},{},{},{}".format(
data['temperature'],
data['humidity'],
data['rel_atm_pressure'],
data['co2'],
data['voc'],
round(data['radon_1day_avg']/37.,2),
round(data['radon_longterm_avg']/37.,2),
max(0, min(100, round( (data['battery']-V_MIN)/(V_MAX-V_MIN)*100))),
round(data['illuminance'])
)
#print( sensorData )
try:
request = URL.format('setValues', sensorData)
requests.get(request)
except:
pass
finally:
time.sleep(SamplePeriod)
Any idea ? Thanks.
Async needs an await but I don't know where. Thanks.

discord.py not finding commands within a class

If I use .join in discord with the code below, I get the following error:
Ignoring exception in command None:
discord.ext.commands.errors.CommandNotFound: Command "join" is not found
I'm not sure how to fix this (this is turning into being part of a music bot). Here is the relevant code:
import discord
import discord.voice_client
from discord import utils
from discord.ext import commands
from discord.utils import *
import discord.utils
import lavalink
bot = commands.Bot(command_prefix= ".", intents=intents)
class MusicCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.bot.music = lavalink.Client(self.bot.user.id)
self.bot.music.add_node('localhost', 7000, 'testing', 'na' , 'music-node')
self.bot.add_listener(self.bot.music.voice_update_handler, 'on-socket-response')
self.bot.music.add_event_hook(self.track_hook)
#commands.command(name= 'Join')
async def join(self, ctx):
print("join command worked")
member = utils.find(lambda m: m.id == ctx.author.id, ctx.guild.members)
if member is not None and member.voice is not None:
vc = member.voice.channel
player = self.bot.music.player_manager.create(ctx.guild.id, endpoint= str(ctx.guild.region))
if not player.is_connected:
player.store('channel', ctx.channel.id)
await self.connect_to(ctx.guild.id, str(vc.id))
#commands.command(name= "Play")
async def play(self, ctx, *, query):
try:
player = self.bot.music.player_manager.get(ctx.guild.id)
query = f'ytsearch: {query}'
results = await player.node.get_tracks(query)
tracks = results['tracks'][0:10]
i = 0
query_result = ''
for track in tracks:
i = i + 1
query_result = query_result + f'{i}) {track["info"]["title"]} - {track["info"]["url"]}\n'
show_songs = discord.Embed(
title= None,
description= None,
colour= discord.Colour.blue()
)
show_songs.description = query_result
await ctx.channel.send(embed= show_songs)
def check(m):
return m.author.id == ctx.author.id
response = await self.bot.wait_for('message', check=check)
track = tracks[int(response.conetent)-1]
player.add(requester = ctx.author.id, track = track)
if not player.is_playing:
await player.play()
except Exception as error:
print(error)
async def track_hook(self, event):
if isinstance(event, lavalink.events.QueueEndEvent):
guild_id = int(event.player.guild.id)
await self.connect_to(guild_id, None)
async def connect_to(self, guild_id: int, channel_id: str):
ws = self.bot._connection._get_websocket(guild_id)
await ws.voice_state(str(guild_id), channel_id)
def setup(bot):
bot.add_cog(MusicCog(bot))
I've tried changing #commands.command to #bot.command, which didn't end up working, and I'm almost certain the indentation is correct throughout the code, so at this point, I'm not sure how to fix it. Any help would be appreciated!
By default, commands are case sensitive so you'd have to type .Join or .Play in order to invoke your them.
If you want your commands to be case insensitive, simply type :
bot = commands.Bot(command_prefix='.', intents=intents, case_insensitive=True)
PS : the name argument in the commands.command() decorator refers to what needs to be typed after your prefix to invoke the function.

Asychronous server hanging in python

Am currently using python asynchronous server to server some clients , the server works well at first then eventually hangs without showing if it is receiving and requests from clients, when I press ctr-c it then shows that it receives the requests from the clients and it does not stop, it just continues to revieve requests .
I don't where this bug is emanating from.
Thank u in advance
import asyncio, json
from collections import Coroutine
from typing import Any
import Engine
import struct
header_struct = struct.Struct("!Q") # messages up to 2**64 - 1 in length
async def recvall(reader, length):
blocks = []
while length:
block = await reader.read(length)
if not block:
raise EOFError('socket closed with {} bytes left in this block'.format(length))
length -= len(block)
blocks.append(block)
return b''.join(blocks)
async def get_block(reader):
data = await recvall(reader, header_struct.size)
(block_length,) = header_struct.unpack(data)
return await recvall(reader, block_length)
async def put_block(writer, message):
block_length = len(message)
writer.write(header_struct.pack(block_length))
writer.write(message)
# await writer.drain()
async def handle_conversation(reader, writer):
address__ = writer.get_extra_info("peername")
print("Accepted connection from {}".format(address__))
while True:
# ************************try to check if there data to send*********************************
try:
block = await get_block(reader)
# decode the data
data = block.decode()
decoded_data = json.loads(data)
# dont forget to make this synchronous
answer = await Engine.get_answer(decoded_data["Task"], decoded_data["content"])
# don't forget to check in there is necessary data to push and making sure data is conveyed
await put_block(writer, answer)
print(answer)
except Exception as e:
raise
if __name__ == '__main__':
address = Engine.parse_command_line("asyncio server using coroutine")
# loop = asyncio.get_event_loop()
# coro = asyncio.start_server(handle_conversation, *address)
async def main():
server = await asyncio.start_server(
handle_conversation, *address)
addr = server.sockets[0].getsockname()
print(f'Serving on {addr}')
async with server:
await server.serve_forever()
asyncio.run(main(), debug=True)
Engine Code
import argparse
import json
import time
import upload_pic_and_search
import accept_connections
import connect
import opinion_poll
import share
import zmq
from jsonrpclib import Server
context = zmq.Context()
aphorisms = {"share": share.share_,
"poll": opinion_poll.add_poll,
"add_profile_pic": upload_pic_and_search.profile_pic,
"connect": connect.connect,
"accept_connection": accept_connections.accept_connection}
def sighn_up(doc):
"""this function will connect to sighn up """
proxy = Server('http://localhost:7002')
answer = proxy.sighn_up(doc)
return answer
def Verification(doc):
"""Code verification routine"""
proxy = Server('http://localhost:7002')
answer = proxy.verify(doc)
return answer
def login(doc):
"""This function handkes authetication"""
proxy = Server('http://localhost:7002')
answer = proxy.autheticate(doc)
return answer
def post(doc):
"""connect to server that handles posts"""
proxy = Server('http://localhost:6700')
answer = proxy.post(doc)
return answer
def comment(doc):
"""connect to the server that stores comments"""
proxy = Server('http://localhost:6701')
answer = proxy.comments_(doc)
return answer
def reply(doc):
"""store the reply"""
proxy = Server('http://localhost:6702')
answer = proxy.reply(doc)
return answer
def share(doc):
"""share the post"""
proxy = Server('http://localhost:6703')
answer = proxy.share(doc)
return answer
def likes(doc):
"""connect to the likes queue"""
zcontext = zmq.Context()
osock = zcontext.socket(zmq.PUSH)
osock.connect("tcp://127.0.0.1:6704")
osock.send_json(doc)
return {"Task": "like", "like": True}
def follow(doc):
"""handles the follow coroutine"""
zcontext = zmq.Context()
osock = zcontext.socket(zmq.PUSH)
osock.connect("tcp://127.0.0.1:6705")
osock.send_json(doc)
def connect(doc):
"""connect to routine for connection"""
zcontext = zmq.Context()
osock = zcontext.socket(zmq.PUSH)
osock.connect("tcp://127.0.0.1:6706")
osock.send_json(doc)
def accept_connection(doc):
"""the queue responsible accepting connections"""
zcontext = zmq.Context()
osock = zcontext.socket(zmq.PUSH)
osock.connect("tcp://127.0.0.1:6707")
osock.send_json(doc)
def add_profile_pic(doc):
"""Add the profile pic of the user"""
proxy = Server('http://localhost:7006')
answer = proxy.profile_pic(doc)
return answer
def search(doc):
"""search the user in the database"""
proxy = Server('http://localhost:7006')
answer = proxy.search(doc)
return answer
def profile(doc):
"""search the user in the database"""
proxy = Server('http://localhost:7006')
answer = proxy.profile(doc)
return answer
async def get_answer(aphorism, content):
"""Return the response to particular question"""
# time.sleep(0.0)
# fetch responsible function
# function = aphorisms.get(aphorism, "Error:Unknown aphorism.")
function = eval(aphorism)
answer = function(content)
return send(answer)
def send(data):
"""Prepare the data to be sent via socket"""
json_data = json.dumps(data)
data_bytes = json_data.encode()
return data_bytes
def parse_command_line(description):
"""arse command line and return a socket address."""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('host', help="IP or hostname")
parser.add_argument("-p", metavar='port', type=int, default=1060, help="TCP port (default 1060)")
args = parser.parse_args()
address = (args.host, args.p)
return address
def recv_untill(sock, suffix):
"""Receive bytes over socket `sock` until we receive the `suffix`."""
message = sock.recv(4096)
if not message:
raise EOFError("Socket closed")
while not message.endswith(suffix):
data = sock.recv(4096)
if not data:
raise IOError('received {!r} then socket closed'.format(message))
message += data
return message

Why asyncio.sleep(delay) completes earlier than given delay?

import asyncio, aiohttp, logging, time, random
pause = 1/10
async def req(i):
await asyncio.sleep(random.randint(1, 5))
async def run():
for i in range(100):
asyncio.ensure_future(req(i))
t0 = time.time()
await asyncio.sleep(pause)
print(time.time() - t0)
tasks = asyncio.Task.all_tasks()
if len(tasks) != 1:
tasks.remove(asyncio.Task.current_task())
await asyncio.wait(tasks)
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
The output is:
Why await asyncio.sleep(pause) was finished after 0.093654s????????????
It a bug/feature of asyncio on Windows. You can read discussion here.

Resources