I have hash which looks like this:
$VAR1 = {
'638047' => {
'commands' => 'perl nSWIFT/bin/tqtest.pl -var clist=',
},
'638049' => {
'commands' => 'perl nSWIFT/bin/tqtest.pl-var clist=',
}
};
I want to create a directory such that the file name starts with hash values and it stores the numbers that are generated by running the commands given as perl nswift/bin/tqtest.pl -var clist=. The numbers should be stored in the same file as their key. For example:
'638047' -> '638050' and '638049' -> '638051'
then it should be stored accordingly.
It have tried this but not getting there.
my %stat
my #qbsid_filename = keys %stat;
foreach (#qbsid_filename){
open QBS, ">/root/qbsid/$_";
}
my $newqbsid, #files;
opendir (DIREC, '/root/qbsid') or die $!;
foreach my $qbsid ( keys %stat){
my $cmd = $stat{$qbsid}->{commands};
if ($cmd =~ m/perl(.*)/){
$ex_cmd = $1;
}
$newqbsid = qx | perl $ex_cmd|;
}
close (DIREC);
I am not very good with algorithms. So, finding it difficult.
Please inform me if I am going wrong somewhere or you need more information. Thanks.
Try this:
my %stat = { ... };
while (my ($qbsid_filename, $qbsid) = each %stat) {
my $cmd = $qbsid->{commands};
next unless $cmd =~ m/^perl/;
open my $qbs, '>', "/root/qbsid/$qbsid_filename"
or die "Cannot open $qbsid_filename: $!";
open my $qbscmd, '-|', "$cmd"
or die "Cannot execute $cmd: $!";
while (<$qbscmd>) {
print $qbs $_;
}
close ($qbscmd);
close ($qbs);
}
Related
I need to create a file with all hashicorp vault key value pairs data using shell script.
I want to dump all the data from vault to a flat file.
please advice best way to do it.
Thanks in advance
Prudhvi
Just for keys and values you can use my little Perl script 'vault-backup', that also freezes the data using the correct vault commands.
Please note that this does NOT create a full backup of your Vault! There are no methods being backed up, or any other (unlistable) stuff outside the secrets. It's only usable for simple keys and values. It also probably isn't usable for multiline or binary values. You can patch the script to support that, if you like. ;)
#!/usr/bin/perl
#
# Usage: vault-backup [<PATH> [stdout]]
use Data::Dumper;
use Storable qw(freeze thaw);
# Set vault environment variables
# Always end with a " && " for the actual command
my $setenv =
"VAULT_ADDR=https://myvault.somewhere.com:8200 && ".
"VAULT_CA_PATH=/etc/yourcertificates/ && ";
my $path = $ARGV[0] || "secret/";
if ($path!~/\/$/) {
$path="$path/";
}
push #list, getData($path);
if ($ARGV[1] eq "stdout") {
print Dumper(\#list);
} else {
my $fn="vault-backup-frozen-".time().".dat";
open W,">$fn";
print W freeze(\#list);
close W;
print STDERR "Wrote data to $fn\n";
}
sub getData {
my $path=shift;
print STDERR "Starting getData($path)\n";
my #ret=();
my $command="$setenv vault kv list -tls-skip-verify $path | tail -n+3 ";
print STDERR "starting command: $command\n";
my #lines = `$command`;
chomp #lines;
foreach my $line (#lines) {
if ($line=~/\/$/) {
my #result = getData($path.$line);
if (scalar(#result)>0) {
# Find deeper results
push #ret, #result;
} else {
# empty final dir, no values
push #ret, { path => $path.$line };
}
} else {
# Found a key!
my $command="$setenv vault kv get -tls-skip-verify $path$line";
print STDERR "starting command: $command\n";
my $values = `$command`;
push #ret, {path=>$path.$line, value=>$values};
}
}
return #ret;
}
To restore the data, you can use the script below. It handles data only, it does not act on metadata.
#!/usr/bin/perl
# Usage: vault-restore <backup-filename>
use Data::Dumper;
use Storable qw(thaw);
my %all_entries;
# Set vault environment variables
# Always end with a " && " for the actual command
my $setenv =
"VAULT_ADDR=https://myothervault.somewhere.com:8200 && ".
"VAULT_CA_PATH=/etc/mycertificates/ && ";
# Read the data
my $fn = $ARGV[0] || die("I need a filename with the frozen data");
open F,"<$fn";
my #list = #{ thaw(join("",<F>)) };
close F;
print STDERR "Read ".scalar(#list)." entries.\n";
# Process the data
foreach my $entry (#list) {
print STDERR "\n# adding entry -> $entry->{path}\n";
addEntry($entry);
}
foreach my $path (keys %all_entries) {
my $keyvalues="";
foreach my $key (keys %{$all_entries{$path}}) {
my $value=$all_entries{$path}{$key};
$keyvalues.="'$key=$value' ";
}
print STDERR "vault kv put $path $keyvalues\n";
# `$command`;
}
sub addEntry {
my $entry=shift;
my $path = $entry->{'path'};
if ($entry->{'value'}) {
my $values = $entry->{value};
my #list=split("\n", $values);
my $metadata_engage=0;
my $data_engage=0;
foreach my $keyvalue (#list) {
if ($keyvalue=~/==== Metadata ====/) {
$metadata_engage=1;
$data_engage=0;
} elsif ($keyvalue=~/==== Data ====/) {
$metadata_engage=0;
$data_engage=1;
} elsif ($data_engage) {
my ($key,$value)=($keyvalue=~/^([^ ]+) +(.*)$/);
if ($key ne "Key" && $key ne "---") {
# print STDERR "key=$key ; value=$value\n";
$all_entries{$path}{$key}=$value;
} else {
# print STDERR "-- separator\n";
}
}
}
} else {
print STDERR "Found a final but empty path: $path\n";
}
}
this is my Perl script
use strict;
use warnings;
use XML::Twig;
use Data::Dumper;
sub xml2array{
my $path = shift;
my $twig = XML::Twig->new->parsefile($path);
return map { $_ -> att('VirtualPath') } $twig -> get_xpath('//Signals');
}
sub compareMappingToArray {
my $mapping = shift;
my $signalsRef = shift;
my $i = 1;
print "In file : $mapping\n";
open(my $fh, $mapping);
while (my $r = <$fh>) {
chomp $r;
if ($r =~ /\'(ModelSpecific.*)\'/) {
my $s = $1;
my #matches = grep { /^$s$/ } #{$signalsRef};
print "line $i : not found - $s\n" if scalar #matches ==0;
print "line $i : multiple $s\n" if scalar #matches > 1;
}
$i = $i + 1 # keep line index
}
}
my $mapping = "C:/Users/HOR1DY/Desktop/Global/TA_Mapping/CAN/CAN_ESP_002_mapping.pm";
my #virtualpath = xml2array("SignalModel.xml");
compareMappingToArray($mapping, \#virtualpath);
The script works well, the aim of it is to compare the file "SignalModel.xml" and "CAN_ESP_002_mapping.pm" and putting the lines that didn't matches in a .TXT file. Here is how the .TXT file looks like:
In file : C:/Users/HOR1DY/Desktop/Global/TA_Mapping/CAN/CAN_ESP_002_mapping.pm
line 331 : not found - ModelSpecific.EID.NET.CAN_Engine.VCU.Transmit.VCU_202.R2B_VCU_202__byte_3
line 348 : not found - ModelSpecific.EID.NET.CAN_Engine.CMM_WX.Transmit.CMM_HYB_208.R2B_CMM_HYB_208__byte_2
line 368 : not found - ModelSpecific.EID.NET.CAN_Engine.VCU.Transmit.VCU_222.R2B_VCU_222__byte_0
But for this script, I put the two files that need to be compare inside of the code and instead of doing that, I would like to run the script in windows cmd line and having something like:
C:\Users>perl CANMappingChecker.pl -'file 1' 'file 2'
All the files are in .zip file so if I can execute the script that he goes inside and take the 2 files that I need for comparison, it should be perfect.
I really don't know how to do and what to put inside my script to make that in the cmd windows. Thanks for your help !
Program (or script) parameters are stored in the #ARGV array. shift and pop without any parameter will work on #ARGV when used outside of a sub, in a sub they operate on #_.
See Archive::Zip for zip file handling.
Using Strawberry Perl 5.22.0 in Windows 7. Is there a more "perlish" way to write this snippet of code? I hate the duplication of file open sections, but cannot think of a way to make it only open once because of the requirement to test the creation time.
...
my $x;
my $fh;
my $sentinelfile = "Logging.yes"; #if this file exists then enable logging
my $logfile = "transfers.log";
my $log = 0; #default to NO logging
$log = 1 if -e $sentinelfile; #enable logging if sentinel file exists
if($log){
#logfile remains open after this so remember to close at end of program!
if (-e $logfile) { #file exists
open($fh, "<", $logfile); #open for read will NOT create if not exist
chomp ($x = <$fh>); #grab first row
close $fh;
if (((scalar time - $x)/3600/24) > 30) { #when ~30 days since created
rename($logfile, $logfile . time); #rename existing logfile
open($fh, ">", $logfile); #open for write and truncate
print $fh time,"\n"; #save create date
print $fh "--------------------------------------------------\n";
} else { #file is not older than 30 days
open($fh, ">>", $logfile); #open for append
}
} else { #file not exist
open($fh, ">", $logfile); #open new for write
print $fh time,"\n"; #save create date
print $fh "--------------------------------------------------\n";
}
} #if $log
...
To recap: logfile logs stuff. First row of file contains the logfile creation date. Second row contains horizontal rule. Rest of file contains text. Around 30 days after file was created, rename file and start a new one. After the above chunk of code the logfile is open and ready for logging stuff. It gets closed at the end of the rest of the program.
There are other, non-cosmetic problems with your code: a) You do not ever check if your calls to open succeeded; b) You are creating a race condition. The file can come into existence after the -e check has failed. The subsequent open $fh, '>' ... would then clobber it; c) You don't check if your rename call succeeded etc.
The following would be a partial improvement on your existing code:
if ($log) {
if (open $fh, '<', $logfile) { #file exists
chomp ($x = <$fh>);
close $fh
or die "Failed to close '$logfile': $!";
if (((time - $x)/3600/24) > 30) {
my $rotated_logfile = join '.', $logfile, time;
rename $logfile => $rotated_logfile
or die "Failed to rename '$logfile' to '$rotated_logfile': $!";
open $fh, '>', $logfile
or die "Failed to create '$logfile'";
print $fh time, "\n", '-' x 50, "\n";
}
else {
open $fh, '>>', $logfile
or die "Cannot open '$logfile' for appending: $!";
}
}
else {
open $fh, '>', $logfile
or die "Cannot to create '$logfile': $!";
print $fh time, "\n", '-' x 50, "\n";
}
}
It would be better to abstract every bit of discrete functionality to suitably named functions.
For example, here is a completely untested re-write:
use autouse Carp => qw( croak );
use constant SENTINEL_FILE => 'Logging.yes';
use constant ENABLE_LOG => -e SENTINEL_FILE;
use constant HEADER_SEPARATOR => '-' x 50;
use constant SECONDS_PER_DAY => 24 * 60 * 60;
use constant ROTATE_AFTER => 30 * SECONDS_PER_DAY;
my $fh;
if (ENABLE_LOG) {
if (my $age = read_age( $logfile )) {
if ( is_time_to_rotate( $age ) ) {
rotate_log( $logfile );
}
else {
$fh = open_log( $logfile );
}
}
unless ($fh) {
$fh = create_log( $logfile );
}
}
sub is_time_to_rotate {
my $age = shift;
return $age > ROTATE_AFTER;
}
sub rotate_log {
my $file = shift;
my $saved_file = join '.', $file, time;
rename $file => $saved_file
or croak "Failed to rename '$file' to '$saved_file': $!"
return;
}
sub create_log {
my $file = shift;
open my $fh, '>', $file
or croak "Failed to create '$file': $!";
print $fh time, "\n", HEADER_SEPARATOR, "\n"
or croak "Failed to write header to '$file': $!";
return $fh;
}
sub open_log {
my $file = shift;
open my $fh, '>>', $file
or croak "Failed to open '$file': $!";
return $fh;
}
sub read_age {
my $file = shift;
open my $fh, '<', $file
or return;
defined (my $creation_time = <$fh>)
or croak "Failed to read creation time from '$file': $!";
return time - $creation_time;
}
If you need to read a line of a file, rename it and then work with it, you have to open it twice.
However, you can also do away with using that first line.
On Windows, according to perlport (Files and Filesystems), the inode change time time-stamp (ctime) "may really" mark the file creation time. This is likely to be completely suitable for a log file that doesn't get manipulated and moved around. It can be obtained with the -C file-test operator
my $days_float = -C $filename;
Now you can numerically test this against 30. Then there is no need to print the file's creation time to its first line (but you may as well if it is useful for viewing or other tools).
Also, there is the module Win32API::File::Time, with the purpose to
provide maximal access to the file creation, modification, and access times under MSWin32
Plese do read the docs for some caveats. I haven't used it but it seems tailored for your need.
A good point is raised in a comment: apparently the OS retains the original time-stamp as the file is being renamed. In that case, when the file's too old copy it into a new one (with the new name) and delete it, instead of using rename. Then open that log file anew, so with a new time-stamp.
Here is a complete example
archive_log($logfile) if -f $logfile and -C $logfile > 30;
open my $fh_log, '>>', $logfile or die "Can't open $logfile: $!";
say $fh_log "Log a line";
sub archive_log {
my ($file) = #_;
require POSIX; POSIX->import('strftime');
my $ts = strftime("%Y%m%d_%H:%M:%S", localtime); # 20170629_12:44:10
require File::Copy; File::Copy->import('copy');
my $archive = $file . "_$ts";
copy ($file, $archive) or die "Can't copy $file to $archive: $!";
unlink $file or die "Can't unlink $file: $!";
}
The archive_log archives the current log by copying it and then removes it.
So after that we can just open for append, which creates the file if not there.
The -C tests for file existence but since its output is used in a numerical test we need -f first.
Since this happens once a month I load modules at runtime, with require and import, once the log actually need be rotated. If you already use File::Copy then there is no need for this. As for the time-stamp, I threw in something to make this a working example.
I tested this on UNIX, by changing -C to -M and tweaking the timestamp by touch -t -c.
Better yet, to reduce the caller's code fully move the tests into the sub as well, for
my $fh_log = open_log($logfile);
say $fh_log "Log a line";
sub open_log {
my ($file) = #_;
if (-f $file and -C $file > 30) {
# code from archive_log() above, to copy and unlink $file
}
open my $fh_log, '>>', $file or die "Can't open $file: $!";
return $fh_log;
}
Note. On UNIX the file's creation time is not kept anywhere. The closest notion is the ctime above, but this is of course different. For one thing, it changes with many operations, for instance mv, ln, chmod, chown, chgrp (and probably others).
I have hundreds of files, each with different number of entries (>xxxx) and want to keep only shared entries among all files, separately. I'm not sure what is the best method to do this, maybe perl! I used sort, uniq of bash, but I didn't get the correct answer. The format of IDs start with > and follows 4 characters among all files.
1.fa
>abcd
CTGAATGCC
2.fa
>abcd
AAATGCGCG
>efgh
CGTAC
3.fa
>abcd
ATGCAATA
>efgh
TAACGTAA
>ijkl
TGCAA
Final results, of this example would be:
1.fa
>abcd
CTGAATGCC
2.fa
>abcd
AAATGCGCG
3.fa
>abcd
ATGCAATA
This Perl program will do as you ask. It uses Perl's built-in edit in place functionality and renames the original files to 1.fa.bak etc. It shouldn't have a problem with blank lines in your data as long as the sequence is always on one line immediately following the ID
use strict;
use warnings 'all';
my #files = glob '*.fa';
printf "Processing %d file%s\n", scalar #files, #files == 1 ? "" : "s";
exit if #files < 2;
my %ids;
{
local #ARGV = #files;
while ( <> ) {
++$ids{$1} if /^>(\S+)/;
}
}
# remove keys that aren't in all files
delete #ids{ grep { $ids{$_} < #files } keys %ids };
my $n = keys %ids;
printf "%d ID%s common to all files\n", $n, $n == 1 ? '' : "s";
exit unless $n;
{
local #ARGV = #files;
local $^I = '.bak';
while ( <> ) {
next unless /^>(\S+)/ and $ids{$1};
print;
print scalar <>;
}
}
Here is Perl solution, that may help you:
use feature qw(say);
use strict;
use warnings;
my $file_dir = 'files';
chdir $file_dir;
my #files = <*.fa>;
my $num_files = scalar #files;
my %ids;
for my $file (#files) {
open ( my $fh, '<', $file) or die "Could not open file '$file': $!";
while (my $id = <$fh>) {
chomp $id;
chomp (my $sequence = <$fh>);
$ids{$id}++;
}
close $fh;
}
for my $file (#files) {
open ( my $fh, '<', $file) or die "Could not open file '$file': $!";
my $new_name = $file . '.new';
open ( my $fh_write, '>', $new_name ) or die "Could not open file '$new_name': $!";
while (my $id = <$fh>) {
chomp $id;
chomp (my $sequence = <$fh>);
if ( $ids{$id} == $num_files ) {
say $fh_write $id;
say $fh_write $sequence;
}
}
close $fh_write;
close $fh;
}
It assumes that all the .fa files are located in the directory named $file_dir, and it writes the new sequences to new files in the same directory. The new file names get the .new extension.
I'm trying to emulate RapidCRC's ability to check crc32 values within filenames on Windows Vista Ultimate 64-bit. However, I seem to be running into some kind of argument limitation.
I wrote a quick Perl script, created a batch file to call it, then placed a shortcut to the batch file in %APPDATA%\Microsoft\Windows\SendTo
This works great when I select about 20 files or less, right-click and "send to" my batch file script. However, nothing happens at all when I select more than that. I suspect there's a character or number of arguments limit somewhere.
Hopefully I'm missing something simple and that the solution or a workaround isn't too painful.
References:
batch file (crc32_inline.bat):
crc32_inline.pl %*
Perl notes:
I'm using (strawberry) perl v5.10.0
I have C:\strawberry\perl\bin in my path, which is where crc32.bat exists.
perl script (crc32_inline.pl):
#!/usr/bin/env perl
use strict;
use warnings;
use Cwd;
use English qw( -no_match_vars );
use File::Basename;
$OUTPUT_AUTOFLUSH = 1;
my $crc32_cmd = 'crc32.bat';
my $failure_report_basename = 'crc32_failures.txt';
my %failures = ();
print "\n";
foreach my $arg (#ARGV) {
# if the file has a crc, check to see if it matches the calculated
# crc.
if (-f $arg and $arg =~ /\[([0-9a-f]{8})\]/i) {
my $crc = uc $1;
my $basename = basename($arg);
print "checking ${basename}... ";
my $calculated_crc = uc `${crc32_cmd} "${arg}"`;
chomp($calculated_crc);
if ($crc eq $calculated_crc) {
print "passed.\n";
}
else {
print "FAILED (calculated ${calculated_crc})\n";
my $dirname = dirname($arg);
$failures{$dirname}{$basename} = $calculated_crc;
}
}
}
print "\nReport Summary:\n";
if (scalar keys %failures == 0) {
print " All files OK\n";
}
else {
print sprintf(" %d / %d files failed crc32 validation.\n" .
" See %s for details.\n",
scalar keys %failures,
scalar #ARGV,
$failure_report_basename);
my $failure_report_fullname = $failure_report_basename;
if (defined -f $ARGV[0]) {
$failure_report_fullname
= dirname($ARGV[0]) . '/' . $failure_report_basename;
}
$OUTPUT_AUTOFLUSH = 0;
open my $fh, '>' . $failure_report_fullname or die $!;
foreach my $dirname (sort keys %failures) {
print {$fh} $dirname . "\n";
foreach my $basename (sort keys %{$failures{$dirname}}) {
print {$fh} sprintf(" crc32(%s) basename(%s)\n",
$failures{$dirname}{$basename},
$basename);
}
}
close $fh;
$OUTPUT_AUTOFLUSH = 1;
}
print sprintf("\n%s done! (%d seconds elapsed)\n" .
"Press enter to exit.\n",
basename($0),
time() - $BASETIME);
<STDIN>;
I will recommend just putting a shortcut to your script in the "Send To" directory instead of doing it via a batch file (which is subject to cmd.exes limits on command line length).