currentDate="20160324"
headerDumpFile="header.txt"
#currentDate="$(date +ā%Y%m%dā)"
printf "Current date in dd/mm/yyyy format %s\n" $currentDate
contId=""
labelList="c12,playlist-play,play,pause,end,playlist-end,heartbeat,ns_st_cl"
params="corporate=abc&user=abc&password=abc&startdate=$currentDate&site=abc&extralabels=$labelList"
url="https://example.com/v1/start?$params"
a=1
while true
do
curl -D $headerDumpFile -v -k -H "Accept-Encoding:gzip" $url > $a.zip
contId= cat $headerDumpFile | grep "X-CS-Continuation-Id:" | awk '{print $NF}'
if [ "$contId" ];then
printf "Breaking the Loop.."
break;
fi
url="https://example.com/v1/start?$params&continuationId=${contId}"
a=$((a + 1))
echo $contId
echo $url
done
When i Do echo url its giving value of contId as blank but when i do echo $contId. Its printed correctly .Please suggest
Perhaps is it what you want to achieve:
contId=$(cat $headerDumpFile | grep "X-CS-Continuation-Id:" | awk '{print $NF}')
Or the simpler:
contId=$(awk '/X-CS-Continuation-Id:/ {print $NF}' $headerDumpFile)
Note that unlike what you were guessing, echo $contId isn't displaying anything in your code. What is displayed is the result of the bogus contId= cat $headerDumpFile | grep "X-CS-Continuation-Id:" | awk '{print $NF}' line.
Related
I have a text file which contains the following lines:
"user","password_last_changed","expires_in"
"jeffrey","2021-09-21 12:54:26","90 days"
"root","2021-09-21 11:06:57","0 days"
How can I grab two fields jeffrey and 90 days from inverted commas and save in a variable.
If awk is an option, you could save an array and then save the elements as individual variables.
$ IFS="\"" read -ra var <<< $(awk -F, '/jeffrey/{ print $1, $NF }' input_file)
$ $ var2="${var[3]}"
$ echo "$var2"
90 days
$ var1="${var[1]}"
$ echo "$var1"
jeffrey
while read -r line; do # read in line by line
name=$(echo $line | awk -F, ' { print $1} ' | sed 's/"//g') # grap first col and strip "
expire=$(echo $line | awk -F, ' { print $3} '| sed 's/"//g') # grap third col and strip "
echo "$name" "$expire" # do your business
done < yourfile.txt
IFS=","
arr=( $(cat txt | head -2 | tail -1 | cut -d, -f 1,3 | tr -d '"') )
echo "${arr[0]}"
echo "${arr[1]}"
The result is into an array, you can access to the elements by index.
May be this below method will help you using
sed and awk command
#!/bin/sh
username=$(sed -n '/jeffrey/p' demo.txt | awk -F',' '{print $1}')
echo "$username"
expires_in=$(sed -n '/jeffrey/p' demo.txt | awk -F',' '{print $3}')
echo "$expires_in"
Output :
jeffrey
90 days
Note :
This above method will work if their is only distinct username
As far i know username are not duplicate
Here is my shell script and where 'i' stands for key.txt. Key file consists of around 8 properties and its values separated by a space like shown below.
Key.txt file :
bat slservice
solr slservice
tvs kimservice
ACM kimservice
product kimservice
tax kimservice
tvs kimservice
SNB taxservice
Shell script:
#!/bin/bash
while read i
do
key=$(echo $i | awk '{print $1}')
service=`echo $i | awk '{print $2}'`
ip=`cat IP.txt | grep $service | awk '{print $2}'|awk 'NR==1{print $1}'`
echo "VALIDATING $service properties"
echo "validating $key in $service IP = $ip"
curl -X GET -H "Content-type: application/json" -H "Accept: application/json" http://${ip}/v1/config 2>/dev/null>$service.json
v1_value=$(jq ".\"$key\"" "$service.json" | grep -oE "\b([0-9]{1,3}\.){3}[0-9]{1,3}\b")
echo "$key $v1_value" > file.txt
acutal_value=$(cat file.txt | grep "$key" | awk '{print $2}')
LB_name=$(cat mapping.txt | grep "$key" | awk '{print $2}')
LB_ip=$(cat LB.txt | grep "${LB_name}" | awk '{print $2}')
if [ "${acutal_value}" == "${LB_ip}" ]; then
echo "$key of $service value is matching $v1_value = $LB_ip"
else
echo "$key of $service v1/value ${acutal_value} is notmatching to GCP LB ${LB_ip}"
fi
done < key.txt
The above script is working fine and giving the exact output if we use mapping.txt file alone as a input file for LB_name variable.
But in my current situation I have 4 mapping (mapping1.txt to mapping4.txt)files and each mapping file should run only once for execution of all the props in key file and then followed by 2,3,4 files.
Here is the exact line that we are talking about:
LB_name=$(cat mapping.txt | grep "$key" | awk '{print $2}')
Waiting for suggestions !
Is there any way to write bulk data in a file in shell script instead of writing line by line code in file?
In below script, I want to write difference between arrival time and generation time of files in test.csv file.
########################################################
echo "Starting the Execution for Time difference\n";
############################################################
# Functions used across the script
datediff() {
Unixtime=`echo $1 $2 $3 $4`
Filetime=`echo $5 $6 $7 $8`
echo $Unixtime;
echo $Filetime;
d1=`date -d "$Unixtime" +%s`
d2=`date -d "$Filetime" +%s`
echo $d1;
echo $d2;
TIME_DIFF=`expr $d1 - $d2`
TIME_DIFF=`expr $TIME_DIFF / 60`
echo $TIME_DIFF;
echo "$Unixtime,$Filetime,$TIME_DIFF,$9" >> ../test.csv
}
rm -f ../test.csv;
for i in `ls -1 | grep -v 'DelayCheck.s*'`
do
DayMonth=`ls -lrt $i | awk '{print $7" "$6" "}'`
Year=`ls --full-time $i | awk '{print $6}' | cut -c1-4`
HourMin=`ls -lrt $i | awk '{print " "$8}'`
timeA=`echo $DayMonth $Year $HourMin`
FileYearMonDay=`ls -ltr $i | awk '{print $9}' | awk -F'--' '{print $3}' | cut
-c2-9`
timeB1=`date -d $FileYearMonDay +'%d %b %Y'`
timeB2=`echo $i | awk -F'--' '{print substr($3,10,13)}' | sed -e
's/../:&/2g'`
timeB=`echo $timeB1 $timeB2`
echo "Time A is $timeA";
echo " Time b is $timeB";
datediff $timeA $timeB $i
done
echo $?;
script is working fine, but the problem is there is over 100k files. So script performance is bad.
I had tried to search is there any way to write bulk data in a file but I didn't find any solution.
I have a shell script and I need help to make it efficient. I am using temp files to store and read the data, but I need to read the data in memory.
It collects metrics from the Postgres database using a command and fetches the metrics. My current script fetches the metrics to a temp file, then reads from it.
I want to stop using temp files and use memory instead.
The script works, I just need help to automate more and get rid of reading data from temp files.
List item
INPUT=`mktemp`
#/usr/pgsql-9.5/bin/pgbench -c1 -j1 -t 1000 -S man > $INPUT
TESTTIME=15 #seconds
echo "Waiting $TESTTIME seconds..."
/usr/pgsql-9.5/bin/pgbench -c1 -j1 -T $TESTTIME -r man > $INPUT
OLDIFS=$IFS
IFS=" "
[ ! -f $INPUT ] && { echo "$INPUT file not found"; exit 99; }
tps=`cat $INPUT |awk '/^tps/ {print $3}' |awk -F'.' '{print $1}' |head -n1`
update_l=`cat $INPUT |awk '/UPDATE/ {print $1}' |tail -n1`
select_l=`cat $INPUT |awk '/SELECT/ {print $1}' |tail -n1`
insert_l=`cat $INPUT |awk '/INSERT/ {print $1}' |tail -n1`
echo ${PLOTTER_PREFIX}.tps $tps kv
echo ${PLOTTER_PREFIX}.update_latency $update_l kv
echo ${PLOTTER_PREFIX}.select_latency $select_l kv
echo ${PLOTTER_PREFIX}.insert_latency $insert_l kv
#{ while read line; do
# # statsite_buildData ${PLOTTER_PREFIX}.latency average ${latency average} kv
# echo ${PLOTTER_PREFIX}.${line} kv
# done } < $INPUT
statsite_sendData
#echo $Test
IFS=$OLDIFS
rm -f $INPUT
You can capture the output of the command to a variable, like so:
output=$(/usr/pgsql-9.5/bin/pgbench -c1 -j1 -T $TESTTIME -r man)
Then just use echo instead of cat and substitute $INPUT with the variable name.
tps=`echo "$output" | awk '/^tps/ {print $3}' | awk -F'.' '{print $1}' |head -n1`
update_l=`echo "$output" | awk '/UPDATE/ {print $1}' | tail -n1`
...
I would also suggest using $() instead of surrounding commands with backticks. So the above would become:
tps=$(echo "$output" | awk '/^tps/ {print $3}' | awk -F'.' '{print $1}' |head -n1)
update_l=$(echo "$output" | awk '/UPDATE/ {print $1}' | tail -n1)
...
/!bin/sh
if [ "`echo $desc $status | awk -F"," '{print $3}' | awk -F" " '{print $1}' | sed '/^$/d'`" != "OK" ]; then
echo "howdy dody"
fi
echo $desc $status | awk -F"," '{print $3}' | awk -F" " '{print $1}' | sed '/^$/d'
First if-condition won't run, im guessing it's because of improper quotation, but i can't figure it out.
Thanks in advance for any help.
You can also use single quotes around the argument to the -F option as you have around other arguments:
if [ "`echo $desc $status | awk -F',' '{print $3}' | awk -F' ' '{print $1}' | sed '/^$/d'`" != "OK" ]; then
It is much easier to write your test if you wrap it in a function:
mytest()
{
echo "$1 $2" \
| awk -F"," -v arg3="$3" '{print arg3}' \
| awk -F" " -v arg1="$1" '{print arg1}' \
| sed '/^$/d'
}
This way, you can verify that it works correctly. Once you gained this confidence
if [ "$(mytest "$desc" "$status")" != "OK" ]; then
echo "howdy doody"
fi
or
if mytest "$desc" "$status" | grep -q -v '^OK$'; then
echo "howdy doody"
fi
If you're using Bash, I'd recommend $(...) instead of back-quotes. What error messages do you get? My guess is that the -F"," option to awk is not being quoted properly. Trying inserting \ to escape the quotation marks.
At first glance, you might want to try escaping some of the double quotes:
if [ "`echo $desc $status | awk -F"," '{print $3}' | awk -F" " '{print $1}' | sed '/^$/d'`" != "OK" ]; then
echo "howdy dody"
fi
to
if [ "`echo $desc $status | awk -F\",\" '{print $3}' | awk -F\" \" '{print $1}' | sed '/^$/d'`" != "OK" ]; then
echo "howdy doody"
fi
Escaping the double quotes is certainly a good idea, but it looks like the $3 and the $1 are intended to be interpreted by awk. They are being interpreted by your shell instead. You probably want to escape the '$'s. (It is possible that you have meaningful values for $1 and $3 in the shell, but not likely.)