Couldn't read config file: No such file or directory at ./try.pl line 8 - perl-module

I am not being able to read the contents of the file tutc.txt. I want to write a subroutine to read the contents of a file which will be called from the perl script.
My module is named Module.pm
package Module;
use warnings;
use strict;
use Carp;
use feature "switch";
no warnings 'experimental::smartmatch';
# Constructor and initialisation
sub new { #class method
my $class = shift; #shift without arguments is shift #_ , takes 1st element of argument array
my $self = {#_}; #created a Hash reference with #_ helping to store values in a hash
bless ($self, $class); #turning self into an object by telling which class it belongs to without hardcode name in
$self->{_created} = 1; #syntax for accessing the contemts of a hash: refrence $object_name->{property_name}.
return $self;
}
#reading from config file
sub read {
my ($self, $file) = shift;
my $self = #_;
open my $config_fh, $file or return 0;
$self->{_filename} = $file; # Store a special property containing the name of the file
my $section;
my $config_name;
my $config_val;
while (my $line = <$config_fh>)
{
chomp $line;
given ($line) {
when (/^\[(.*)\]/)
{
$section = $1;
}
when (/^(?<key>[^=]+)=(?<value>.*)/)
{
$section //= '';
$self->{"$section.$config_name"} = $config_val;
}
}
}
close $config_fh;
return $self;
}
sub fetch {
my ($self, $key) = shift;
return $self->{$key};
}
My perl file looks like the following:
#!/usr/bin/perl
use Module;
use strict;
use warnings;
my $value = Module->new();
$value->read('/Users/hhansraj/git/edgegrid-curl/tutc.txt') or die "Couldn't read config file: $!";
print "The author's first name is ",$value->fetch('author.firstname'),"\n";
My text file looks like the following:
[author]
firstname=Doug
lastname=Sheppard
[site]
name=Perl.com
url=http://www.perl.com/

In your "read" subroutine, it looks like the first two lines of code (listed below) may be the source of your problem.
my ($self, $file) = shift;
my $self = #_;
In the first line, you're removing the first element of the #_ array (arguments to the subroutine) and putting that into the $self variable. And nothing is being entered into the $file variable. In the second line, you are redeclaring the $self variable and are assigning to it the size of what's left of the #_ array. I suspect that you're code is assigning the value/data to the $self variable that you are wanting.
Since the $file variable is not getting assigned any value, that is probably creating an issue with the open function. Also, you did not specify the file mode in your attempt to open the file. To just fix the missing mode specification to specify read only mode, you can change the following line:
open my $config_fh, $file or return 0;
to be
open (my $config_fh, "<", $file) or return 0;

Related

How to take out certain elements from a pdb file

I am trying to take out certain columns from a pdb file. I already have taken out all lines that start out with ATOM in my code. For some reason my sub functions are not working and I do not know where or how to call them.
My code is:
open (FILE, $ARGV[0])
or die "Could not open file\n";
my #newlines;
while ( my $line = <FILE> ) {
if ($line =~ m/^ATOM.*/) {
push #newlines, $line;
}
}
my $atomcount = #newlines;
#print "#newlines\n";
#print "$atomcount\n";
##############################################################
#This function will take out the element from each line
#The element is from column 77 and contains one or two letters
sub atomfreq {
foreach my $record1(#newlines) {
my $element = substr($record1, 76, 2);
print "$element\n";
return;
}
}
################################################################
#This function will take out the residue name from each line
#The element is from column 18 and contains 3 letters
sub resfreq {
foreach my $record2(#newlines) {
my $residue = substr($record2, 17, 3);
print "$residue\n";
return;
}
}
As #Ossip already said in this answer you simply need to call your functions:
sub atomfreq {
...
}
sub resfreq {
...
}
atomfreq();
resfreq();
But I'm not sure whether these functions do what you intended because the comments imply that they should print every $residue and $element from the #newlines array. You've put a return statement inside the for loop which will immediately return from the whole function (and its for loop) so it will print only the first $residue or $element. Because the functions aren't supposed to return anything you can just drop that statement:
sub atomfreq {
foreach my $record1(#newlines) {
my $element = substr($record1, 76, 2);
print "$element\n";
}
}
sub resfreq {
foreach my $record2(#newlines) {
my $residue = substr($record2, 17, 3);
print "$residue\n";
}
}
atomfreq();
resfreq();
You can just call them right under your other code like this:
atomfreq();
resfreq();

Perl - Uncompressing zip files on windows is too slow

I've created a uncompress function, put together from a few code snippets and a few alterations from my side, automatically handling the file type.
My current usecase is to extract a ~550mb zip file from a SMB share on windows with a lot of files in it (qt 5.5 source code)
On Linux, this is a tgz file on a nfs share and it takes 67 seconds for the function to extract it. (other uncompression method than for zip files)
On Windows it takes >15minutes.
I'm thinking about using a system(7z $source) call as alternative.
Do you have any suggestions what's the fastest method to extract a zip file on windows?
Plz be honest, if my uncompress function is crap, i'm no perl expert... :)
Here's my code:
#uncompress full archive file $archFile to $destPath
sub uncompress
{
my $fileToExtract = shift;
my $targetPath = shift;
my $silent = shift;
my $status;
my $buff;
unless (-f $fileToExtract)
{
&error ("$fileToExtract is not a file!");
}
unless (-d $targetPath)
{
&makeDir($targetPath, 1);
}
# just look for .tar since all .tar archives with all compressions can be extracted.
if ($fileToExtract =~ m/.tar/)
{
my $pwd = getcwd();
changeDirectory($targetPath, 1);
my $tar = Archive::Tar->new();
$tar->read($fileToExtract);
$tar->extract();
changeDirectory($pwd, 1);
return;
}
elsif ($fileToExtract =~ m/.zip$/)
{
my $u = new IO::Uncompress::Unzip $fileToExtract or die "Cannot open $fileToExtract: $UnzipError";
for ($status = 1; $status > 0; $status = $u->nextStream())
{
my $header = $u->getHeaderInfo();
my (undef, $path, $name) = splitpath($header->{Name});
my (undef, $path, $name) = splitpath($header->{Name});
my $destdir = "$targetPath$path";
unless (-d $destdir)
{
&makeDir( $destdir, 1);
}
if ($name =~ m!/$!) {
last if $status < 0;
next;
}
my $destfile = "$destdir/$name";
if ($destfile =~ m/\/\/$/) # skip if no filename is given
{
next;
}
$destfile =~ s|\/\/|\/|g; # remove unnecessary doubleslashes
my $fh = openFileHandle ( $destfile , '>', 1 );
binmode($fh);
while (($status = $u->read($buff)) > 0) {
$fh->write($buff);
}
$fh->close();
unless (defined $silent)
{
&syslog ("Uncompress $destfile -> $targetPath");
}
#set timestamps of file to the ones in the zip
my $stored_time = $header->{'Time'};
utime ($stored_time, $stored_time, $destfile);
}
if ($status < 0)
{
die "Error processing $fileToExtract: $!\n"
}
}
else
{
my $ae = Archive::Extract->new( archive => $fileToExtract );
$ae->extract( to => $targetPath ) or &error("Failed to extract $fileToExtract with error $ae->error");
unless (defined $silent)
{
foreach my $file (#{$ae->files})
{
#only print if not a directory
if( $file!~m|/$| )
{
&syslog("Uncompress $fileToExtract -> $targetPath");
}
}
}
}
return;
}
You could simply do it in below manner using Archive::Extract, it provides generic archive extracting mechanism, therefore you don't have to install separate modules for tar and zip.
use Archive::Extract;
my $ae = Archive::Extract->new( archive => $fileToExtract );
my $ok = $ae->extract( to => $targetPath );
If you specifically want to check whether a file is tar or zip then you can use below:
$ae->is_tar
$ae->is_zip
Note that Archive::Extract is a core module therefore you'll not have to install it separetely.

How can I deteremine if I'd read something from a socket?

The following piece of code is supposed to read a few, undetermined in number, lines from a socket.
use warnings;
use strict;
use IO::Socket::INET;
my $server = shift;
my $port = shift;
my $sock = new IO::Socket::INET (
PeerAddr => $server,
PeerPort => $port,
Proto => 'tcp',
Timeout => 1,
Blocking => 0
)
or die "Could not connect";
while (my $in = <$sock>) {
print "$in";
}
print "Received last line\n";
Unfortunately, the $in = <$sock> part is blocking although I have set Blocking => 0 and the server does not send any more text. Hence, Received last line won't be printed.
So, I tried to improve the behavior with use IO::Select:
use warnings;
use strict;
use IO::Socket::INET;
use IO::Select;
my $server = shift;
my $port = shift;
my $sock = new IO::Socket::INET (
PeerAddr => $server,
PeerPort => $port,
Proto => 'tcp',
Timeout => 1,
Blocking => 1
)
or die "Could not connect";
my $select = new IO::Select;
$select -> add($sock);
sleep 1;
while ($select -> can_read) {
my $in = <$sock>;
print $in;
}
This second approach only prints the first sent line, then seems to block for ever.
Since I have seen such examples working, I believe the problem is Windows, on which I am trying to run these scripts.
Is there a way how I can achieve a non blocking read?
When using select (used by can_read), it defies the purpose to follow up with blocking IO. You must also avoid buffering IO because the system (i.e. select) doesn't know about any data in your library's buffers. This means you can't mix select with read, readline (aka <> and <$fh>) and eof. You must use sysread.
my %clients;
for my $fh ($select->can_read()) {
my $client = $clients{$fh} //= {};
our $buf; local *buf = $client->{buf} //= ''; # alias my $buf = $client->{buf};
my $rv = sysread($sock, $buf, 64*1024, length($buf));
if (!defined($rv)) {
my $error = $!;
$select->remove($fh);
delete($clients{$fh});
# ... Handle error ...
next;
}
if (!$rv) {
$select->remove($fh);
delete($clients{$fh});
# ... Handle EOF ... # Don't forget to check if there's anything in $buf.
next;
}
... remove any complete messages from $buf and handle them ...
}
If you want to read a line at a time, you'd use
while ($buf =~ s/^([^\n]*)\n//) {
process_msg($client, $1);
}

How to handle error thrown by module in perl

I am using the module DBD::Oracle in perl to insert xml contents into oracle 11 g instance. While inserting some of the documents in my sample set the script fails as the module returns Unsupported named object type for bind parameter. I would like to handle this error and make the loop iteration to go on.
following is my code,
use strict;
use warnings;
use DBI;
use DBD::Oracle qw(:ora_session_modes);
use DBD::Oracle qw(:ora_types);
die("USAGE: $0 <input_directory>") unless ($#ARGV == 0);
my $directory=$ARGV[0];
my $dbh = DBI->connect('dbi:Oraclle:dbname',"username", "pass");
my $SQL;
opendir(IMD, $directory) || die ("Cannot open directory");
my #listOfFiles= readdir(IMD);
closedir(IMD);
my $xmltype_string;
my $xml;
my $i = 1;
foreach my $file(#listOfFiles)
{
unless($file eq '.' or $file eq '..')
{
print "inserting File no. $i \t $file .... \n";
{
local $/=undef;
open (FILE , "<" , "$directory/$file" );
$xml=<FILE>;
close (FILE);
}
$SQL="insert into sampleTable values ( :ind, :xml)";
my $sth =$dbh-> prepare($SQL);
$sth->bind_param(":xml" , $xml , { ora_type => ORA_XMLTYPE});
$sth->bind_param(":ind" , $i);
$sth-> execute();
$i++;
}
}
Am getting the error in bind param.
Error handling is usually done via the Try::Tiny module:
use Try::Tiny;
try {
something_that_could_die();
}
catch {
handle_error($_);
}
finally {
do_something_either_way();
}; # ← trailing semicolon not optional.
Both catch and finally are optional.

How can this perl sub be optimised for speed?

The following perl sub is used to store arrays of hashes.
Each hash to be stored is first checked for uniqueness using a given key, if a hash exists on the array with the same key value then it's not stored.
How can this perl sub be optimised for speed?
Example use:
my #members;
...
$member= {};
$hash->{'name'}='James';
hpush('name', \#members,$member);
The sub:
sub hpush {
# push a set of key value pairs onto an array as a hash, if the key doesn't already exist
if (#_ != 3) {
print STDERR "hpush requires 3 args, ".#_." given\n";
return;
}
my $uniq = shift;
my $rarray = shift;
my $rhash = shift;
my $hash = ();
#print "\nHash:\n";
for my $key ( keys %{$rhash} ) {
my $valuea = $rhash->{$key};
#print "key: $key\n";
#print "key=>value: $key => $valuea\n";
$hash->{ $key} = $valuea;
}
#print "\nCurrent Array:\n";
for my $node (#{$rarray}) {
#print "node: $node \n";
for my $key ( keys %{$node} ) {
my $valueb = $node->{$key};
#print "key=>value: $key => $valueb\n";
if ($key eq $uniq) {
#print "key=>value: $key => $valueb\n";
if (($valueb =~ m/^[0-9]+$/) && ($hash->{$key} == $valueb)) {
#print "Not pushing i $key -> $valueb\n";
return;
} elsif ($hash->{$key} eq $valueb) {
#print "Not pushing s $key -> $valueb\n";
return;
}
}
}
}
push #{$rarray}, $hash;
#print "Pushed\n";
}
Note that the perl isn't mine and I'm a perl beginner
This code is rather... not very efficient. First, it copies $rhash to $hash, with a for loop... for some reason. Then it loops through the hash keys, instead of simply using the hash key that it's looking for. Then it does two equivalent checks, apparently some attempt to distinguish numbers from non-numbers and selecting the appropriate check (== or eq). This is all unnecessary.
This code below should be roughly equivalent. I've trimmed it down hard. This should be as fast as it is possible to get it.
use strict;
use warnings;
hpush('name', \#members,$member);
sub hpush {
my ($uniq, $rarray, $rhash) = #_;
for my $node (#{$rarray}) {
if (exists $node->{$uniq}) {
return if ($node->{$uniq} eq $rhash->{$uniq});
}
}
push #{$rarray}, $rhash;
}

Resources