AWK performance while processing big files - bash

I have an awk script that I use for calculate how much time some transactions takes to complete. The script gets the unique ID of each transaction and stores the minimum and maximum timestamp of each one. Then it calculates the difference and at the end it shows those results that are over 60 seconds.
It works very well when used with some thousand (200k) but it takes more time when used in real world. I tested it several times and it takes about 15 minutes to process about 28 million of lines. Can I consider this good performance or it is possible to improve it?
I'm open to any kind of suggestion.
Here you have the complete code
zgrep -E "\(([a-z0-9]){15,}:" /path/to/very/big/log | awk '{
gsub("[()]|:.*","",$4); #just removing ugly chars
++cont
min=$4"min" #name for maximun value of current transaction
max=$4"max" #same as previous, just for readability
split($2,secs,/[:,]/) #split hours,minutes and seconds
seconds = 3600*secs[1] + 60*secs[2] + secs[3] #turn everything into seconds
if(arr[min] > seconds || arr[min] == 0)
arr[min]=seconds
if(arr[max] < seconds)
arr[max]=seconds
dif=arr[max] - arr[min]
if(dif > 60)
result[$4] = dif
}
END{
for(x in result)
print x" - "result[x]
print ":Processed "cont" lines"
}'

You don't need to calculate the dif every time you read a record. Just do it once in the END section.
You don't need that cont variable, just use NR.
You dont need to populate min and max separately string concatenation is slow in awk.
You shouldn't change $4 as that will force the record to be recompiled.
Try this:
awk '{
name = $4
gsub(/[()]|:.*/,"",name); #just removing ugly chars
split($2,secs,/[:,]/) #split hours,minutes and seconds
seconds = 3600*secs[1] + 60*secs[2] + secs[3] #turn everything into seconds
if (NR==1) {
min[name] = max[name] = seconds
}
else {
if (min[name] > seconds) {
min[name] = seconds
}
if (max[name] < seconds) {
max[name] = seconds
}
}
}
END {
for (name in min) {
diff = max[name] - min[name]
if (diff > 60) {
print name, "-", diff
}
}
print ":Processed", NR, "lines"
}'

After making some test, and with the suggestions gave by Ed Morton (both for code improvement and performance test) I found that the bottleneck was the zgrep command. Here is an example that does several things:
Check if we have a transaction line (first if)
Cleans the transaction id
checks if this has been already registered (second if) by checking if it is in the array
If is not registered then checks if it is the appropriate type of transaction and if so it registers the timestamp in second
If is already registered saves the new time-stamp as the maximun
After all it makes the necessary operations to calculate the time difference
Thank you very much to all that helped me.
zcat /veryBigLog.gz | awk '
{if($4 ~ /^\([:alnum:]/ ){
name=$4;gsub(/[()]|:.*/,"",name);
if(!(name in min)){
if($0 ~ /TypeOFTransaction/ ){
split($2,secs,/[:,]/)
seconds = 3600*secs[1] + 60*secs[2] + secs[3]
max[name] = min[name]=seconds
print lengt(min) "new "name " start at "seconds
}
}else{
split($2,secs,/[:,]/)
seconds = 3600*secs[1] + 60*secs[2] + secs[3]
if( max[name] < seconds) max[name]=seconds
print name " new max " max[name]
}
}}END{
for(x in min){
dif=max[x]- min[x]
print max[x]" max - min "min[x]" : "dif
}
print "Processed "NR" Records"
print "Found "length(min)" MOs" }'

Related

Awk Standard deviation for each unique identifier

I have the following dataset with multiple different ids in column 1 and I wish to calculate the mean and standard deviation for column 2 for each id
123456 0.1234
123456 0.5673
123456 0.0011
123456 -0.0947
123457 0.9938
123457 0.0001
123457 0.2839
I have the following code to get the mean per id but struggling to amend this to get the SD as well
awk '{sum4[$1] += $2; count4[$1]++}; END{ for (id in sum4) { print id, sum4[id]/count4[id] } }' < want3.txt > mean_id.txt
The desired output is a file of id mean and sd
123456 0.149275 0.2926
123457 0.425933 0.5118
Any advice would be much appreciated.
Thanks
here is another approach which is more memory efficient but possibly less precision for large mean.
$ awk -v t=1 '{s[$1]+=$2; ss[$1]+=$2*$2; c[$1]++}
END {for(k in s) print k,m=s[k]/c[k],sqrt((ss[k]-m^2*c[k])/(c[k]-t))}' file
123456 0.149275 0.292628
123457 0.425933 0.51185
this computes the sample standard deviation, if you have the full distribution not just the samples you can set t=0 to get population standard deviation which will be slightly lower but for large N they are practically equivalent (within the error of margin due to measurement errors).
With GNU awk. Derived from Ivan's answer with standard deviation of the population (division by n). I switched to sample standard deviation (division by n-1).
awk '
{
numrec[$1] += 1
sum[$1] += $2
array[numrec[$1]] = $2
array[$1,numrec[$1]] = $2
}
END {
for(w in numrec) {
for(x=1; x<=numrec[w]; x++)
sumsq[w] += ((array[w,x]-(sum[w]/numrec[w]))^2)
printf("%d %.6f %.4f\n", w, sum[w]/numrec[w], sqrt(sumsq[w]/(numrec[w]-1)))
}
}
' file
Output:
123456 0.149275 0.2926
123457 0.425933 0.5118

Parsing multiple instances of data

I am trying to parse multiple instances of data from a textfile. I can grep and grab one line and the lat/lon associated with that find, but I am having issued parsing multiple instances:
... CATEGORICAL ...
SLGT 33618675 34608681 35658642 36668567 38218542 41018363
41588227 41918045 41377903 40177805 38927813 37817869
36678030 35068154 33368262 33078321 32888462 33618675
SLGT 30440169 31710202 33010185 33730148 34010037 33999962
33709892 32869871 30979883 29539912 29430025 30440169
SLGT 41788755 41698893 42069059 42639132 43889124 44438960
44438757 43988717 43278708 42398720 41788755
MRGL 42897922 41907743 40147624 38837627 37637700 35897915
35028021 34038079 33118130 31998226 31698419 32078601
32818733 33848809 34758764 36998623 38588677 39458701
40178757 40608870 41069099 43549479 44499512 44809478
45259379 44989263 45109100 45718986 46478920 46758853
46738752 46398664 44768565 44308457 43198218
MRGL 29720174 31900221 33650181 34160154 34430032 34649931
34159800 32539784 31359767 29739808 29299723 28969581
28959440 99999999 26769674 26579796 26139874
TSTM 45077438 43177245 40597113 99999999 30488085 30248563
29588926 28739072 28569092 99999999 27138160 27578139
27908100 27848061 27518032 26968006 26338005 25698017
25338025 25088048 25058071 25238109 25578128 25888157
26218171 26578170 26988163 27138160 99999999 29200399
31910374 33520340 35190229 35450147 36109944 36399709
35779395 36399167 38559059 40189373 41729594 43029985
42820283 42860489 43580863 44121062 44521135 45281179
46271166 47561286 48251548 48671765 49051814 99999999
38810245 37660271 37120322 36950398 37090559 37380662
38090741 39410791 39980777 40930695 41380598 41370510
41190353 40840299 40220263 38810245
From: https://www.spc.noaa.gov/products/outlook/archive/2019/KWNSPTSDY1_201906241300.txt
Here is my code and results:
#!/bin/sh
sed -n '/^MRGL/,/^TSTM/p;/^TSTM/q' day1_status | sed '$ d' | sed -e 's/MRGL//g' > MRGL
while read line
do
count=1
ncols=$(echo $line | wc -w)
while [ $count -le $ncols ]
do
echo $line | cut -d' ' -f$count
((count++))
done
done < MRGL > MRGL_output.txt
cat MRGL_output.txt | sed ':a;s/\B[0-9]\{2\}\>/.&/;ta'| sed 's/./, -/6' > MRGL_final
Results:
one instance of MRGL and the lat/lon associated with that polygon
more MRGL
32947889 34137855 35307825 36147735 36327622 35797468
27107968 25518232 99999999 27088303 28418215 30208125
30618064
Turn the line above into a single instance of lines
more MRGL_output.txt
32947889
34137855
35307825
36147735
36327622
35797468
27107968
25518232
99999999
27088303
28418215
30208125
30618064
Final format that I need it in
more MRGL_final
32.94, -78.89
34.13, -78.55
35.30, -78.25
36.14, -77.35
36.32, -76.22
35.79, -74.68
27.10, -79.68
25.51, -82.32
99.99, -99.99
27.08, -83.03
28.41, -82.15
30.20, -81.25
30.61, -80.64
Just need to parse multiple instances that show up.
UPDATE for better explanation.
... CATEGORICAL ...
ENH 38298326 40108202 40518094 40357974 39907953 39017948
38038052 36148202 35848297 35888367 36618371 38298326
SLGT 30440169 31710202 33010185 33730148 34010037 33999962
33709892 32869871 30979883 29539912 29430025 30440169
SLGT 33548672 34408661 35918543 36858496 38648520 41018363
41588227 41918045 41377903 40177805 38927813 37817869
36678030 35068154 33368262 33078321 32888462 33548672
SLGT 41788755 41698893 42069059 42639132 43889124 44438960
44438757 43988717 43278708 42398720 41788755
MRGL 29720174 31900221 33650181 34160154 34430032 34649931
34159800 32539784 31359767 30059748 29299723 28969581
28959440 99999999 26769674 26579796 26139874
MRGL 42897922 41907743 40147624 38837627 37637700 35897915
35028021 34038079 33118130 31938225 30758424 30678620
30988709 34128741 36208583 37738554 39508601 40628878
41069099 43549479 44499512 44809478 45259379 44989263
45109100 45718986 46478920 46758853 46738752 46398664
44768565 44308457 43198218
TSTM 30488085 29978211 29408316 29068379 99999999 27138160
27578139 27908100 27848061 27518032 26968006 26338005
25698017 25338025 25088048 25058071 25238109 25578128
25888157 26218171 26578170 26988163 27138160 99999999
45427410 43217292 40247181 99999999 28650405 31910374
33520340 35190229 35450147 36109944 36399709 35779395
36769245 38319148 40189373 41219571 41299753 39959979
38220054 37320091 36560136 36070290 36100295 35840394
36790544 37150626 37880709 39110774 40120876 41150895
41600769 41890540 43070599 43580863 43390914 43401262
44171458 45521497 46131301 47181242 47561286 48251548
48671765 49371856
Wanting to take this data set above and grab each available risk ENH, SLGT, MRGL, TSTM lat and long and place into this format:
"Enhanced Risk"
38.29, -83.26
40.10, -82.02
40.51, -80.94
40.35, -79.74
39.90, -79.53
39.01, -79.48
38.03, -80.52
36.14, -82.02
35.84, -82.97
35.88, -83.67
36.61, -83.71
38.29, -83.26
End:
"Slight Risk"
30.44, -101.69
31.71, -102.02
33.01, -101.85
33.73, -101.48
34.01, -100.37
33.99, -99.62
33.70, -98.92
32.86, -98.71
30.97, -98.83
29.53, -99.12
29.43, -100.25
30.44, -101.69
End:
"Slight Risk"
33.54, -86.72
34.40, -86.61
35.91, -85.43
36.85, -84.96
38.64, -85.20
41.01, -83.63
41.58, -82.27
41.91, -80.45
41.37, -79.03
40.17, -78.05
38.92, -78.13
37.81, -78.69
36.67, -80.30
35.06, -81.54
33.36, -82.62
33.07, -83.21
32.88, -84.62
33.54, -86.72
End:
"Slight Risk"
41.78, -87.55
41.69, -88.93
42.06, -90.59
42.63, -91.32
43.88, -91.24
44.43, -89.60
44.43, -87.57
43.98, -87.17
43.27, -87.08
42.39, -87.20
41.78, -87.55
End:
"Marginal Risk"
29.72, -101.74
31.90, -102.21
33.65, -101.81
34.16, -101.54
34.43, -100.32
34.64, -99.31
34.15, -98.00
32.53, -97.84
31.35, -97.67
30.05, -97.48
29.29, -97.23
28.96, -95.81
28.95, -94.40
26.76, -96.74
26.57, -97.96
26.13, -98.74
End:
Here's a little awk program which seems to work, although I'm not certain about some of the details. In particular, I don't know what the minimum value for longitude is; evidently, a value under the minimum has 100 added to it before the longitude is negated. So you'll have to change LON_THRESHOLD to what you consider the correct value.
I've tried to avoid the usual temptation to golf awk programs into a textual minimum, in the hopes that the way this program works is less obscure. But it's entirely possible that some awkisms snuck in anyway. I added a bit of explanation at the end.
BEGIN { risk["HIGH"] = "High Risk"
risk["ENH"] = "Enhanced Risk"
risk["SLGT"] = "Slight Risk"
risk["MRGL"] = "Marginal Risk"
LON_THRESHOLD = 30
END_STRING = "End:"
}
END { if (in_risk) print END_STRING }
in_risk && substr($0, 1, 1) != " " {
print END_STRING "\n" "\n"
in_risk = 0
}
$1 in risk { printf("\"%s\"\n", risk[$1])
in_risk = 2
}
in_risk { for (i = in_risk; i <= NF; ++i) {
lat = substr($i, 1, 4) / 100
lon = substr($i, 5, 4) / 100
if (lon < LON_THRESHOLD) lon += 100
printf "%5.2f, %.2f\n", lat, -lon
}
in_risk = 1
}
Save that program as, for example, noaa.awk, and then apply it with:
awk -f noaa.awk input.txt
By way of explanation:
Awk programs consist of a series of rules. Each rule has a predicate -- that is, an expression which evaluates to a true or false value -- and an action.
Awk processes each line from its input in turn, running through all of the rules and executing the actions of the ones whose predicates evaluate to a true value. Inside the action, you can use the $ operator to access individual fields in the input (by default, fields are separated with whitespace). $0 is the entire input line, and $n is field number n. Unlike bash/sh, $ is an operator and can be applied to an expression.
BEGIN and END rules are special, in that they are not real variables. BEGIN rules are executed exactly once, before any other processing; END rules are executed exactly once after all processing is finished. In this example, as is common, BEGIN is used to initialise reference data, while END is used for any necessary termination -- in this case, printing the final End: line.
In cases like this, where the desired action is really dependent on where we are in the file, it's necessary to build some kind of state machine, and I did that using the variable in_risk, which has three possible values:
0 or undefined: We're not currently in a block corresponding to a risk selector.
1: The current line, if it starts with a space, is part of a previously identified risk selector.
2: The current line has been detected as starting with a risk selector.
The reason for the difference between the last two values is that $1 in a line which starts with a risk selector is the risk selector, whereas in a line which starts with a space, $1 is actually the first number. So when we're iterating over the numbers in a line, we have to start with $2 for lines which start with a risk selector.
If you're just asking how to turn a file of lines of like AABBCCDD into lines like AA.BB, -CC.DD:
perl -nE '/^(..)(..)(..)(..)$/ && say "$1.$2, -$3.$4"' MRGL_output.txt
(There's almost certainly better ways to get from your original input to those lines, but I'm not really clear on what your posted code is doing or why)
I think this will process your original input correctly, but can't be sure because the numbers in your sample output don't match up with your sample input so I can't verify:
perl -anE 'if (/^MRGL/ .. /^TSTM/) { exit if /^TSTM/; push #nums, #F }
END { for (#nums) {
if (/^(..)(..)(..)(..)$/) { say "$1.$2, -$3.$4" }
}}' day1_status
Got GNU Awk?
awk -v RS='\\s+' '
/[A-Z]/ {p = /^MRGL$/? 1: 0; next}
p {print gensub(/(..)(..)(..)(..)/, "\\1.\\2, -\\3.\\4", "G")}
' file
-v RS'\\s+' - Use any amount of whitespace as the Record Separator
/[A-Z]/ {...} - On records with uppercase alphabetics, do
p = /^MRGL$/? 1: 0; next - Set flag if record is MRGL, else unset, but always skip any other rules.
p {print gensub(...)} - Print result of gensub if flag is set
/(...)/, "\\1", "G" - Capturing groups, Backreferences, Global substitution.

Awk Calc Avg Rows Below Certain Line

I'm having trouble calculating an average of specific numbers in column BELOW a specific text identifier using awk. I have two columns of data and I'm trying to start the average keying on a common identifier that repeats, which is 01/1991. So, awk should calc the average of all lines beginning with 01/1991, which repeats, using the next 21 lines with total count of rows for average = 22 for the total number of years 1991-2012. The desired output is an average of each TextID/Name entry for all the January's (01) for each year 1991 - 2012 show below:
TextID/Name 1
Avg: 50.34
TextID/Name 2
Avg: 45.67
TextID/Name 3
Avg: 39.97
...
sample data:
TextID/Name 1
01/1991, 57.67
01/1992, 56.43
01/1993, 49.41
..
01/2012, 39.88
TextID/Name 2
01/1991, 45.66
01/1992, 34.77
01/1993, 56.21
..
01/2012, 42.11
TextID/Name 3
01/1991, 32.22
01/1992, 23.71
01/1993, 29.55
..
01/2012, 35.10
continues with the same data for TextID/Name 4
I'm getting an answer using this code shown below but the average is starting to calculate BEFORE the specific identifier line and not on and below that line (01/1991).
awk '$1="01/1991" {sum+=$2} (NR%22==0){avg=sum/22;print"Average: "avg;sum=0;next}' myfile
Thanks and explanations of the solution is greatly appreciated! I have edited the original answer with more description - thank you again.
If you look at your file, the first field is "01/1991," with a comma at the end, not "01/1991". Also, NR%22==0 will look at line numbers divisible by 22, not 22 lines after the point it thinks you care about.
You can do something like this instead:
awk '
BEGIN { l=-1; }
$1 == "01/1991," {
l=22;
s=0;
}
l > 0 { s+=$2; l--; }
l == 0 { print s/22; l--; }'
It has a counter l that it sets to the number of lines to count, then it sums up that number of lines.
You may want to consider simply summing all lines from one 01/1991 to the next though, which might be more robust.
If you're allowed to use Perl instead of Awk, you could do:
#!/usr/bin/env perl
$start = 0;
$have_started = 0;
$count = 0;
$sum = 0;
while (<>) {
$line = $_;
# Grab the value after the date and comma
if ($line = /\d+\/\d+,\s+([\d\.]+)/) {
$val = $+;
}
# Start summing values after 01/1991
if (/01\/1991,\s+([\d\.]+)/) {
$have_started = 1;
$val = $+;
}
# If we have started counting,
if ($have_started) {
$count++;
$sum += $+;
}
}
print "Average of all values = " . $sum/$count;
Run it like so:
$ cat your-text-file.txt | above-perl-script.pl

How to analyze time tracking reports with awk?

I'm tracking my time with two great tools, todotxt and punch. With these one
can generate reports that look like this:
2012-11-23 (2 hours 56 minutes):
first task (52 minutes)
second task (2 hours 4 minutes)
2012-11-24 (2 hours 8 minutes):
second task (2 hours 8 minutes)
My question: what's a convenient way for analyzing this kind of output? E.g.
how could I sum up the time that is spent doing "first task"/"second task" or find out
my total working hours for a longer period such as "2012-11-*"?
So, I'd like to have a command such as punch.sh report /regex-for-date-or-task-i'm-interested-in/.
I read and saw that this is possible with awk. But I don't know how to 1) sum minutes + hours and 2) provide "task names with spaces" as variables for awk.
UPDATE:
I'm also tagging my tasks with +tags to mark different projects (as in first task +projecttag). So it would also be great to sum the time spent on all tasks with a certain tag.
Thanks for any help!
Before running this script. Please uncomment the appropriate gsub() function. Run like:
awk -f script.awk file
Contents of script.awk:
BEGIN {
FS="[( \t)]"
}
/^\t/ {
line = $0
#### If your input has...
## No tags, show hrs in each task
# gsub(/^[\t ]*| *\(.*/,"",line)
## Tags, show hrs in each task
# gsub(/^[\t ]*| *\+.*/,"",line)
## Tags, show hrs in each tag
# gsub(/^[^+]*\+| *\(.*/,"",line)
####
for(i=1;i<=NF;i++) {
if ($i == "hours") h[line]+=$(i-1)
if ($i == "minutes") m[line]+=$(i-1)
}
}
END {
for (i in m) {
while (m[i] >= 60) { m[i]-=60; h[i]++ }
print i ":", (h[i] ? h[i] : "0") " hrs", m[i], "mins"
}
}

Fast editing subtitles file

I like GNU/Linux and writing bash scripts to automatize my tasks. But I am a beginner and have a lot of problems with it. So, I have a subtitle file in format like I this(I'm Polish so it's a Polish subtitles):
00:00:27:W zamierzchłych czasach|ziemia pokryta była lasami.
00:00:31:Od wieków mieszkały|w nich duchy bogów.
00:00:37:Człowiek żył w harmonii ze zwierzętami.
I thing you understanding this simple format. The problem is that in the "movie file", before the movie starts is 1:15 of introduction. I want to add to each subtitle file's line 1:15. So the example should look like this:
00:01:43:W zamierzchłych czasach|ziemia pokryta była lasami.
00:01:46:Od wieków mieszkały|w nich duchy bogów.
00:01:52:Człowiek żył w harmonii ze zwierzętami.
Could you help me to write this script?
BTW I'm Polish and I'm still learning English. So if you cannot understand me, write.
Here's a solution in awk - probably easier than bash for this kind of problem:
#!/usr/bin/awk -f
BEGIN {
FS=":"
}
{
hr = $1
min = $2
sec = $3
sec = sec + 15
if (sec >= 60) {
sec = sec - 60
min = min + 1
}
min = min + 1
if (min >= 60) {
min = min - 60
hr = hr + 1
}
printf "%02d:%02d:%02d:%s\n", hr, min, sec, $4
}
Suggestions for improvement welcome!

Resources