I would like to run a program that does not properly support my desired resolution+DPI settings.
Also I want to change my default GTK theme to a lighter one.
What I currently have:
#!/bin/bash
xfconf-query -c xsettings -p /Xft/DPI -s 0
GTK_THEME=/usr/share/themes/Adwaita/gtk-2.0/gtkrc /home/unknown/scripts/ch_resolution.py --output DP-0 --resolution 2560x1440 beersmith3
This sets my DPI settings to 0, changes the gtk-theme, runs a python script that changes my resolution and runs the program, and on program exit changes it back. This is working properly.
Now I want to change back my DPI settings to 136 on program exit
xfconf-query -c xsettings -p /Xft/DPI -s 136
My guess is I need to use a while loop but have no idea how to do it.
ch_resolution.py
#!/usr/bin/env python3
import argparse
import re
import subprocess
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--output', required=True)
parser.add_argument('--resolution', required=True)
parser.add_argument('APP')
args = parser.parse_args()
device_context = '' # track what device's modes we are looking at
modes = [] # keep track of all the devices and modes discovered
current_modes = [] # remember the user's current settings
# Run xrandr and ask it what devices and modes are supported
xrandrinfo = subprocess.Popen('xrandr -q', shell=True, stdout=subprocess.PIPE)
output = xrandrinfo.communicate()[0].decode().split('\n')
for line in output:
# luckily the various data from xrandr are separated by whitespace...
foo = line.split()
# Check to see if the second word in the line indicates a new context
# -- if so, keep track of the context of the device we're seeing
if len(foo) >= 2: # throw out any weirdly formatted lines
if foo[1] == 'disconnected':
# we have a new context, but it should be ignored
device_context = ''
if foo[1] == 'connected':
# we have a new context that we want to test
device_context = foo[0]
elif device_context != '': # we've previously seen a 'connected' dev
# mode names seem to always be of the format [horiz]x[vert]
# (there can be non-mode information inside of a device context!)
if foo[0].find('x') != -1:
modes.append((device_context, foo[0]))
# we also want to remember what the current mode is, which xrandr
# marks with a '*' character, so we can set things back the way
# we found them at the end:
if line.find('*') != -1:
current_modes.append((device_context, foo[0]))
for mode in modes:
if args.output == mode[0] and args.resolution == mode[1]:
cmd = 'xrandr --output ' + mode[0] + ' --mode ' + mode[1]
subprocess.call(cmd, shell=True)
break
else:
print('Unable to set mode ' + args.resolution + ' for output ' + args.output)
sys.exit(1)
subprocess.call(args.APP, shell=True)
# Put things back the way we found them
for mode in current_modes:
cmd = 'xrandr --output ' + mode[0] + ' --mode ' + mode[1]
subprocess.call(cmd, shell=True)
edit:
Thanks #AndreLDM for pointing out that I do not need a separate python script to change the resolution, I don't know why I didn't think of that.
I changed it so I don't need the python script and it is working properly now. If I can improve this script please tell me!
#!/bin/bash
xrandr --output DP-0 --mode 2560x1440
xfconf-query -c xsettings -p /Xft/DPI -s 0
GTK_THEME=/usr/share/themes/Adwaita/gtk-2.0/gtkrc beersmith3
if [ $? == 0 ]
then
xrandr --output DP-0 --mode 3840x2160
xfconf-query -c xsettings -p /Xft/DPI -s 136
exit 0
else
xrandr --output DP-0 --mode 3840x2160
xfconf-query -c xsettings -p /Xft/DPI -s 136
exit 1
fi
Related
If I'm in a deep directory, let's say
/run/media/PhoenixFlame101/Coding/Projects/react-app
the fish prompt currently looks like this:
/r/m/Ph/C/P/react-app >
How do I change it to show only the current directory? Like this:
react-app >
I am also using tide, if that makes any difference.
Edit:
Since #glenn-jackman asked here's the outputs of type fish_prompt:
fish_prompt is a function with definition
# Defined in /home/PhoenixFlame101/.config/fish/functions/fish_prompt.fish # line 2
function fish_prompt
_tide_status=$status _tide_pipestatus=$pipestatus if not set -e _tide_repaint
jobs -q && set -lx _tide_jobs
/usr/bin/fish -c "set _tide_pipestatus $_tide_pipestatus
set _tide_parent_dirs $_tide_parent_dirs
PATH=$(string escape "$PATH") CMD_DURATION=$CMD_DURATION fish_bind_mode=$fish_bind_mode set _tide_prompt_4007 (_tide_2_line_prompt)" &
builtin disown
command kill $_tide_last_pid 2>/dev/null
set -g _tide_last_pid $last_pid
end
math $COLUMNS-(string length -V "$_tide_prompt_4007[1]$_tide_prompt_4007[3]")+5 | read -lx dist_btwn_sides
echo -ns \n''(string replace #PWD# (_tide_pwd) "$_tide_prompt_4007[1]")''
string repeat -Nm(math max 0, $dist_btwn_sides-$_tide_pwd_len) ' '
echo -ns "$_tide_prompt_4007[3]"\n"$_tide_prompt_4007[2] "
end
and type prompt_pwd:
prompt_pwd is a function with definition
# Defined in /usr/share/fish/functions/prompt_pwd.fish # line 1
function prompt_pwd --description 'short CWD for the prompt'
set -l options h/help d/dir-length= D/full-length-dirs=
argparse -n prompt_pwd $options -- $argv
or return
if set -q _flag_help
__fish_print_help prompt_pwd
return 0
end
set -q argv[1]
or set argv $PWD
set -ql _flag_d
and set -l fish_prompt_pwd_dir_length $_flag_d
set -q fish_prompt_pwd_dir_length
or set -l fish_prompt_pwd_dir_length 1
set -l fulldirs 0
set -ql _flag_D
and set fish_prompt_pwd_full_dirs $_flag_D
set -q fish_prompt_pwd_full_dirs
or set -l fish_prompt_pwd_full_dirs 1
for path in $argv
# Replace $HOME with "~"
set -l realhome ~
set -l tmp (string replace -r '^'"$realhome"'($|/)' '~$1' $path)
if test "$fish_prompt_pwd_dir_length" -eq 0
echo $tmp
else
# Shorten to at most $fish_prompt_pwd_dir_length characters per directory
# with full-length-dirs components left at full length.
set -l full
if test $fish_prompt_pwd_full_dirs -gt 0
set -l all (string split -m (math $fish_prompt_pwd_full_dirs - 1) -r / $tmp)
set tmp $all[1]
set full $all[2..]
else if test $fish_prompt_pwd_full_dirs -eq 0
# 0 means not even the last component is kept
string replace -ar '(\.?[^/]{'"$fish_prompt_pwd_dir_length"'})[^/]*' '$1' $tmp
continue
end
string join / (string replace -ar '(\.?[^/]{'"$fish_prompt_pwd_dir_length"'})[^/]*/' '$1/' $tmp) $full
end
end
end
I'm not sure what exactly this does, but I hope it helps!
Question
How can I instruct my the bash script to not attempt to re-connect if to my rsync daemon if the process lock.file already exists? (as to prevent the bash script from attempting to infinitely create new connections after the first connection has already been made)?
This is an example of one of my rsync-daemon wrapper scripts:
#!/bin/sh
#
#
while [ 1 ]
do
cputool --load-limit 7.5 -- nice -n -15 rsync -avxP --no-i-r --rsync-path="rsync" --log-file=/var/log/rsync-home.log --exclude 'snap' --exclude 'lost+found' --exclude=".*" --exclude=".*/" 127.0.0.1::home /media/username/external/home-files-only && sync && echo 3 > /proc/sys/vm/drop_caches
if [ "$?" = "0" ] ; then
echo "rsync completed normally"
exit
else
echo "Rsync failure. Backing off and retrying..."
sleep 10
fi
done
#end of shell script
This is my /etc/rsyncd.conf:
[home]
path = /home/username
list = yes
use chroot = false
strict modes = false
uid = root
gid = root
read only = yes
# Data source information
max connections = 1
lock file = /var/run/rsyncd-home.lock
[prod-bkup]
path = /media/username/external/Server-Backups/Prod/today
list = yes
use chroot = false
strict modes = false
uid = root
gid = root
# Don't allow to modify the source files
read only = yes
max connections = 1
lock file = /var/run/rsyncd-prod-bkup.lock
[test-bkup]
path = /media/username/external/Server-Backups/Test/today
list = yes
use chroot = false
strict modes = false
uid = root
gid = root
# Don't allow to modify the source files
read only = yes
max connections = 1
lock file = /var/run/rsyncd-test-bkup.lock
[VminRoot2]
path = /root/VDI-Files
list = yes
use chroot = false
strict modes = false
uid = root
gid = root
# Don't allow to modify the source files
read only = yes
max connections = 1
lock file = /var/run/rsyncd-VminRoot2.lock
Thanks to #james-brown I now have multiple ways to ensure my script runs once.. correctly...
Solution 1 (quick & dirty):
flock -n <lock file> <script>
Or in my case, using this command to execute my cron job:
flock -n /var/run/rsyncd-home.lock /path/to/my_script.sh
caveat - this leaves your script vulnerable to stale lock files that may prevent execution on the next time interval.
Solution 2:
So, I used a bullet-proof method (so I think... I invite folks to correct my understanding, if need be)...
First, I did apt install procmail, then removed/hashed out the below two lines in my /etc/rsyncd.conf and ran systemctl restart rsync:
#max connections = 1
#lock file = /var/run/rsyncd-home.lock
From there I edited /usr/local/bin/backupscript.sh as follows:
#!/bin/bash
#
LOCK=/var/run/rsyncd-home.lock
remove_lock()
{
rm -f "$LOCK"
}
another_instance()
{
echo "There is another instance running, exiting"
exit 1
}
lockfile -r 0 -l 3600 "$LOCK" || another_instance
trap remove_lock EXIT
#new using rsyncd & perpetual restart
while [ 1 ]
do
cputool --load-limit 7.5 -- nice -n -15 rsync -avxP --no-i-r --rsync-path="rsync" --log-file=/var/log/rsync-home.log --exclude 'snap' --exclude="Variety Images" --exclude="Downloads/WebDev/Vmin-Vbox" --exclude 'Downloads/WebDev/Win10-Vbox' --exclude="Videos/other" --exclude 'lost+found' --exclude=".*" --exclude=".*/" 127.0.0.1::home /media/username/external/home-files-only && sync && echo 3 > /proc/sys/vm/drop_caches
if [ "$?" = "0" ] ; then
echo "rsync completed normally"
exit
else
echo "Rsync failure. Backing off and retrying..."
sleep 10
fi
done
#end of shell script
PRESTO:
The script will only connect to rsync daemon once, it will re-connect on dropped connections thanks to the while loop, and there is no danger of stale lock files interrupting my backup process at future intervals... (i.e. problem solved).
Very useful reference:
https://www.baeldung.com/linux/bash-ensure-instance-running
I am looking to run multiple instances of a command line script at the same time. I am new to this concept of "multi-threading" so am at bit of a loss as to why I am seeing the things that I am seeing.
I have tried to execute the sub-processing in two different ways:
1 - Using multiple calls of Popen without a communicate until the end:
command = 'raster2pgsql -I -C -e -s 26911 %s -t 100x100 -F p839.%s_image_sum_sum1 | psql -U david -d projects -h pg3' % (workspace + '\\r_sumsum1{}'.format(i), str(i))
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
command = 'raster2pgsql -I -C -e -s 26911 %s -t 100x100 -F p839.%s_image_sum_sum2 | psql -U david -d projects -h pg3' % (workspace + '\\r_sumsum2{}'.format(i), str(i))
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
command = 'raster2pgsql -I -C -e -s 26911 %s -t 100x100 -F p839.%s_image_sum_sum3 | psql -U david -d projects -h pg3' % (workspace + '\\r_sumsum3{}'.format(i), str(i))
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(stdoutdata, stderrdata) = process.communicate()
this starts up each of the command line item but only completes the last entry leaving the other 2 hanging.
2 - Attempting to implement an example from Python threading multiple bash subprocesses? but nothing happens except for a printout of the commands (program hangs with no command line arguments running as observed in windows task manager:
import threading
import Queue
import commands
import time
workspace = r'F:\Processing\SM'
image = 't08r_e'
image_name = (image.split('.'))[0]
i = 0
process_image_tif = workspace + '\\{}{}.tif'.format((image.split('r'))[0], str(i))
# thread class to run a command
class ExampleThread(threading.Thread):
def __init__(self, cmd, queue):
threading.Thread.__init__(self)
self.cmd = cmd
self.queue = queue
def run(self):
# execute the command, queue the result
(status, output) = commands.getstatusoutput(self.cmd)
self.queue.put((self.cmd, output, status))
# queue where results are placed
result_queue = Queue.Queue()
# define the commands to be run in parallel, run them
cmds = ['raster2pgsql -I -C -e -s 26911 %s -t 100x100 -F p839.%s_image_sum_sum1 | psql -U david -d projects -h pg3' % (workspace + '\\r_sumsum1{}'.format(i), str(i)),
'raster2pgsql -I -C -e -s 26911 %s -t 100x100 -F p839.%s_image_sum_sum2 | psql -U david -d projects -h pg3' % (workspace + '\\r_sumsum2{}'.format(i), str(i)),
'raster2pgsql -I -C -e -s 26911 %s -t 100x100 -F p839.%s_image_sum_sum3 | psql -U david -d projects -h pg3' % (workspace + '\\r_sumsum3{}'.format(i), str(i)),
]
for cmd in cmds:
thread = ExampleThread(cmd, result_queue)
thread.start()
# print results as we get them
while threading.active_count() > 1 or not result_queue.empty():
while not result_queue.empty():
(cmd, output, status) = result_queue.get()
print(cmd)
print(output)
How can I run all of these commands at the same time achieving a result at the end? I am running in windows, pyhton 2.7.
My first try didn't work because of the repeated definitions of stdout and sterror. Removing these definitions causes expected behavior.
I have a set of commands that I am attempting to run in a script. To be exact, the lines are
rm tmp_pipe
mkfifo tmp_pipe
python listen_pipe.py &
while [ true ]; do nc -l -w30 7036 >>tmp_pipe; done &
listen_pipe.py is simply
if __name__ == "__main__":
f = open("tmp_pipe")
vals = " "
while "END" not in vals:
vals = f.readline()
if len(vals) > 0:
print(vals)
else:
f = open("tmp_pipe")
If I run the commands in the order shown I get my desired output, which is a connection to an ESP device that streams motion data. The connection resets after 30 seconds if the ESP device leaves the network range or if the device is turned off. The python script continues to read from the pipe and does not terminate when the tcp connection is reset. However, if I run this code inside a script file nc fails to connect and the device remains in an unconnected state indefinitely. The script is just
#!/bin/bash
rm tmp_pipe
mkfifo tmp_pipe
python listen_pipe.py &
while [ true ]; do nc -l -w30 7036 >>tmp_pipe; done &
This is being run on Ubuntu 16.04. Any suggestions are greatly welcomed, I have been fighting with this code all day. Thanks,
Ian
I'm messing around with jcjohnson/neural-style. I have it up and running just fine. However, I cannot for the life of me make the -output_image or -init flags work. My gut says it's some very simple misunderstanding on my part.
-output_image is driving me especially nuts because it should just be a string to use as the filename, the script even appends the file formate in function that takes -output_image as an argument. I've tried single, double, and no quotes, as well as changing the order of the flags. My gut says it's something trivially simple because nobody is complaining about this on-line anywhere,but I'm a little in over my head because I'm not especially proficient with either bash, torch, or lua.
I'm using this simple script I hacked together that works just fine if I leave off both of the troublesome flags
#!/bin/bash
echo "#### Starting Job B ####"
th neural_style.lua \
-gpu -1 \
-print_iter 1 \
-save_iter 10 \
-num_iterations 3000 \
-style_image a_style.jpg \
-content_image a_content.jpg \
-style_scale 0.15 \
-style_weight 25 \
-content_weight 1e3 \
-output_image a \
-init a_460.png \
The above throws "Invalid argument: "
if it helps, in the script 'neural_style.lua'
the flags are defined as
cmd:option('-init', 'random', 'random|image')
...
cmd:option('-output_image', 'out.png')
and used as
local function maybe_save(t)
local should_save = params.save_iter > 0 and t % params.save_iter == 0
should_save = should_save or t == params.num_iterations
if should_save then
local disp = deprocess(img:double())
disp = image.minmax{tensor=disp, min=0, max=1}
local filename = build_filename(params.output_image, t)
if t == params.num_iterations then
filename = params.output_image -- ## LOOK HERE ##
end
-- Maybe perform postprocessing for color-independent style transfer
if params.original_colors == 1 then
disp = original_colors(content_image, disp)
end
image.save(filename, disp) -- ## LOOK HERE ##
end
end
as well as
if params.init == 'random' then
img = torch.randn(content_image:size()):float():mul(0.001)
elseif params.init == 'image' then
img = content_image_caffe:clone():float()
else
error('Invalid init type') -- ## NOT THE ERROR THROWN #
end
if params.gpu >= 0 then