In the following code I need to print output from the variables $stout, $stderr. How can I do this and not without using $ssh = Net::SSH::Perl->new?
#!/usr/bin/perl
use strict;
use warnings;
use Net::SSH::Perl;
use FindBin qw($RealBin);
use File::Basename;
use lib "/nfs/site/proj/dpg/tools/lib/perl";
use Util;
use Getopt::Std;
use Net::SSH::Perl;
use Cwd;
use File::Copy;
my $host = 'someip.com.com';
my $pass = '';
my $user = '';
my $cmd = 'pwd';
my($stdout,$stderr,$exit) = system('ssh someip.com cat /nfs/site/home/aa/test11.txt') ;
if ($stdout) {
print "Standard output is \n$stdout";
} elsif($stderr) {
print "Standard error is \n$stderr";
}
I use IO::CaptureOutput, its simpler.
use strict;
use warnings;
use IO::CaptureOutput qw(capture_exec);
my $cmd = "ssh someip.com cat /nfs/site/home/aa/test11.txt";
my ($stdout, $stderr, $success, $exitcode) = capture_exec( $cmd );
You can also use list parameters in capture_exec which I think is safer.
open3 allows you to read/write all handles:
use FileHandle;
use IPC::Open3;
my $cmd = "ssh someip.com cat /nfs/site/home/aa/test11.txt";
open3(\*GPW, \*GPR, \*GPE, "$cmd") or die "$cmd";
# print GPW "write to ssh";
close GPW;
while (<GPR>) {
# read stdout
}
while (<GPE>) {
# read stderr
}
close GPR; close GPE;
I've always been a fan of IPC::Run:
use IPC::Run;
my $exitcode = run [ "ssh", "someip.com", "cat", ... ],
undef, \my $stdout, \my $stderr;
At this point the STDOUT and STDERR results from the command will be stored in those two lexicals.
Though as a solution to the general issue of ssh'ing to a host and retrieving the contents of a file from within a Perl program, you might like IPC::PerlSSH:
use IPC::PerlSSH;
my $ips = IPC::PerlSSH->new( Host => "someip.com" );
$ips->use_library( "FS", qw( readfile ) );
my $content = $ips->call( "readfile", "/nfs/site/home/aa/test11.txt" );
The $ips object here will just hang around and allow for reuse to collect multiple files, execute other commands, and generally reuse the connection, rather than having to set up a new ssh connection every time.
Related
I have a script that runs well with Perl < 5.36:
#!/usr/bin/env perl
use strict;
use warnings FATAL => 'all';
use feature 'say';
use autodie ':all';
use Parallel::ForkManager;
sub execute {
my $command = shift;
print "Executing Command: $command\n";
if (system($command) != 0) {
my $fail_filename = "$0.fail";
print "$command failed.\n";
die;
}
}
sub run_parallel {
my $cmd = shift;
my $manager = new Parallel::ForkManager(2);
foreach my $command (#{ $cmd }) {
$manager->start and next;
execute( $command );
$manager->finish;
}
$manager->wait_all_children;#necessary after all lists
}
my #commands = ('echo "a"','echo "b"','echo "c"','which ls','which rm');
run_parallel(\#commands);
but when I make minor changes with the above to 5.36:
#!/usr/bin/env perl
use 5.036;
use warnings FATAL => 'all';
use autodie ':all';
use Parallel::ForkManager;
sub execute {
my $command = shift;
print "Executing Command: $command\n";
if (system($command) != 0) {
my $fail_filename = "$0.fail";
print "$command failed.\n";
die;
}
}
sub run_parallel {
my $cmd = shift;
my $manager = new Parallel::ForkManager(2);
foreach my $command (#{ $cmd }) {
$manager->start and next;
execute( $command );
$manager->finish;
}
$manager->wait_all_children;#necessary after all lists
}
my #commands = ('echo "a"','echo "b"','echo "c"','which ls','which rm');
run_parallel(\#commands);
I get an error:
Bareword found where operator expected at debug.pl line 20, near "new Parallel::ForkManager"
All I switched was use 5.036
Is Parallel::ForkManager incompatible with perl 5.36 or am I doing something wrong?
Perl v5.36 with use v5.36 turns off indirect object notation, where the method comes before the invocant:
my $p = new Some::Module; # indirect object notation
my $p = Some::Module->new(); # what you should do
If this is inconvenient for you in the short term, you can require the minimum version so you still get the things turned off by use v5.36:
require v5.36;
If you don't actually use v5.36 features, also consider requiring the minimum version that your code actually needs. In your snippet, I don't immediately see any minimum version requirement (other than just Perl 5).
Loading the 5.36 feature bundle (which you do through use 5.036;) disables the indirect method call syntax as if you had done no feature qw( indirect );.
This is a method call using the indirect syntax:
METHODNAME INVOCANT ARGS
Either re-enable the feature or use the "direct" syntax:
INVOCANT->METHODNAME( ARGS )
In your case,
my $manager = Parallel::ForkManager->new( 2 );
rysnc_put and scp_put both show the progress in the terminal.
But when i try to write it into a file i can only see the progress from rysnc_put. The problem remains even if i just print the output from the pty [my edit].
Is this a problem with my pty usage?
It is only mentioned for scp_put, that there is no progress when the STDOUT is not a tty.
Note that scp will not generate progress reports unless its stdout stream is attached to a tty.
#!/usr/bin/perl
use strict;
use warnings;
use Net::OpenSSH;
my $SSH = Net::OpenSSH->new(
'192.168.178.33',
(
user => 'TT',
password => 'TTpassword',
port => '22',
)
);
my $pty = new IO::Pty;
my $slave = $pty->slave;
$SSH->scp_put(
{
quiet => 0,
verbose => 1,
stdout_fh => $pty,
stderr_fh => $pty,
bwlimit => '200',
},
'/opt/TT/CHANGES.md',
'/opt/TT/CHANGES.md',,
);
while (<$pty>) {
print STDOUT "$_";
}
Another solution to your problem is to customize the pure perl implementation of the SCP protocol available from Net::SSH::Any.
I designed the SCP classes from that package to be subclassed so that things like that could be done but never went to actually document that API, so, well, you would be relying on undocumented features (though, I don't plan to change it).
use strict;
use warnings;
package MyPutter {
use parent 'Net::SSH::Any::SCP::Putter::Standard';
use Data::Dumper qw(Dumper);
for my $name (qw(open_dir open_file close_file close_dir)) {
my $method = $name;
my $super = Net::SSH::Any::SCP::Putter::Standard->can($name);
no strict 'refs';
*$name = sub {
my ($putter, $a) = #_;
print STDERR "$method $a->{path} --> $a->{local_path}\n";
$super->(#_);
};
}
sub read_file {
my $putter = shift;
my $a = shift;
my $data = $putter->SUPER::read_file($a, #_);
$a->{bytes_copied} += length($data);
print STDERR "read_file $a->{path} --> $a->{local_path} $a->{bytes_copied}/$a->{size}\n";
return $data;
}
};
use Net::OpenSSH;
my $ssh = Net::OpenSSH->new("localhost");
my $any = $ssh->any;
my $putter = MyPutter->_new($any, {}, "/tmp/out2", "/tmp/passwd3");
$putter->run({})
Even if scp_put and rsync_put are similar methods, they wrap two unrelated programs (scp and rsync) which regarding that matter, do not behave in the same way.
Specifically, scp checks whether its stdio streams are attached to a pty and when not, it suppresses progress messages. AFAIK that feature can not be disabled.
Update: Reading from the PTY in parallel:
This code reads from the PTY and writes the data to a file in parallel:
use strict;
use warnings;
use Net::OpenSSH;
use POSIX ();
use IO::Pty;
my $ssh = Net::OpenSSH->new("localhost"); #, scp_cmd => "/home/salva/t/openssh-8.9p1/scp");
my $pty = new IO::Pty;
my $slave = $pty->slave;
print "slave: $slave, fd: ".fileno($slave)."\n";
my $logger_pid = fork;
unless ($logger_pid) {
defined $logger_pid or die "fork failed: $!";
open my $log, ">", "/tmp/scp.log";
select $log;
$| = 1;
print("Reading from PTY\n");
while (1) {
my $buffer;
sysread($pty, $buffer, 1);
syswrite($log, $buffer, 1);
}
print "PTY closed unexpectedly\n";
POSIX::_exit();
}
my $scp_pid = fork;
unless($scp_pid) {
defined $scp_pid or die "fork failed: $!";
$pty->make_slave_controlling_terminal();
$ssh->scp_put({stdout_fh => $slave,
stderr_fh => $slave,
quiet => 0},
"/etc/passwd", "/tmp/foo.vdi");
}
waitpid($scp_pid, 0);
kill 9, $logger_pid;
waitpid($logger_pid, 0);
I have the code as shown below. A BEGIN which loads the session or, if none is yet created, it creates one. But it doesn't do it all the time. It's a login script; If I enter the PIN and it's wrong, the script displays the login form again, which is submitted to this same script. Up to 3 attempts permitted but, it will fail to load the session, usually on attempt 2. Inconsistent so, please can anyone see what might be wrong and why is the session sometimes not loading.
I do have warnings enabled and I have shown that in the code.
I used to start the script with 'print $session->header' but, having changed to 'print $cgi->header;' I can see clearly that the session is undefined, when the script fails. I should add that, if I refresh the failed page perhaps as many as 5 times, the session does eventually reload with all data intact.
#!/usr/bin/perl
#use CGI::Carp qw/warningsToBrowser fatalsToBrowser/;
use strict;
use warnings 'all';
use CGI qw(:all);
use CGI::Session;
use Crypt::PasswdMD5;
use DBI;
use Data::Dumper;
my $cgi = CGI->new;
my $session;
my $sessions_dir_location;
my $session_id;
BEGIN{
unshift #INC, "/var/www/vhosts/example.com/subDomain.example.com/cgi-bin/library";
my $document_root = $ENV{'DOCUMENT_ROOT'};
$document_root =~ s/\///;
my ( $var
, $www
, $vhosts
, $domain
) = split ('/', $document_root, 5);
$sessions_dir_location = '/' . $var . '/' . $www . '/' . $vhosts . '/' . $domain;
$session = CGI::Session->load() or die CGI::Session->errstr();
if ( $session->is_expired ) {
print $session->header(),
$cgi->start_html(),
$cgi->p("Your session timed out! Refresh the screen to start new session!"),
$cgi->end_html();
exit(0);
}
if ( $session->is_empty ) {
$session = new CGI::Session(undef, undef,
{Directory=>"$sessions_dir_location/sessions_storage/"}) or die CGI::Session->errstr;
}
#add the library dir to #INC;
use lib do {
use Cwd 'realpath';
my ($dir) = __FILE__ =~ m{^(.*)/};
realpath("$dir/library");
};
use feature 'say';
use FindBin '$RealBin';
use lib $RealBin;
use lib "$RealBin/library";
}
my $self = $cgi->url;
my %login = $cgi->Vars;
print $cgi->header;
# capture and display warnings
local $SIG{__WARN__} = sub {
my $message = shift;
print $cgi->header;
print qq($message);
};
print qq(<pre>);
print Dumper \%login;
print qq(</pre>);
print qq(<pre>session);
print Dumper \$session; #undef
print qq(</pre>);
#next is line 141
my $session_stored_user_name = $session->param("entered_user_name");
Error message is this:
Can't call method "param" on an undefined value at /var/www/vhosts/example.com/subDomain.example.com/cgi-bin/dashboard-login/login-with-pin.pl line 141, <DAT> line 45.
Please, also, what or where is <DAT> line 45?
If I want to start a Perl Dancer app, I have to run the following command:
perl app.psgi
If I want to pass an option to the application and access it inside the script from #ARGV, I can do it like this:
perl app.psgi --option1 --option2
I can run this app using also "plackup", however I cannot pass the options like when I am running the script using Perl. The #ARGV parameters array is empty.
What can I do?
How can I pass command line options to the "app.psgi" script started from "plackup"?
Below is the file of how the script approximately looks like:
#!/usr/bin/env perl
use Dancer2;
use Data::Dumper;
use MIME::Base64 qw( encode_base64 );
use POSIX;
my $system = shift #ARGV || 'default_system';
print "SYSTEM: $system\n";
my $host = '127.0.0.1';
my $port = 5000;
set host => $host;
set port => $port;
get '/expenses' => sub {
my %params = params;
return to_json {status => 'OK'};
};
post '/expenses' => sub {
my %params = params;
return to_json {status => 'OK'};
};
dance;
It seem like plackup is running the app in a sandbox environment where #ARGV is being erased.
You can still try use environment variables instead of arguments on the command line. For example, using MY_SYSTEM as an example:
#!/usr/bin/env perl
use Dancer2;
use Data::Dumper;
use MIME::Base64 qw( encode_base64 );
use POSIX;
print "SYSTEM: $ENV{MY_SYSTEM}\n";
# [...]
and then run the app using:
$ MY_SYSTEM=Foo plackup app.psgi
I am new to perl scripting. I am trying to get the count of directories & subdirectories.
So I have searched all the available help on scripting.
But unable get the count of Subdirectories. Below is the script I used.
use strict;
use warnings;
use File::Slurp;
my #dirs = ('.');
my $directory_count = 0;
my $file_count = 0;
my $outfile = 'log.txt';
open my $fh, '>', $outfile or die "can't create logfile; $!";
for my $dir (#dirs) {
for my $file (read_dir ($dir)) {
if ( -d "$dir/$file" ) {
$directory_count++;
}
else {
$file_count++;
}
}
print $fh "Directories: $directory_count\n";
print $fh "Files: $file_count\n";
}
close $fh;
Here, I am unable to identify where to change the command of dir with /s.
Please help it will reduce lot of manual work.
Ravi
Never EVER write your own directory traversal. There are too many pitfalls, gotchas and edge cases. Things like path delimiters, files with spaces, alternate data streams, soft links, hard links, DFS paths... just don't do it.
Use File::Find or if you prefer File::Find::Rule.
As I prefer the former, I'll give an example:
use strict;
use warnings;
use File::Find;
my $dir_count;
my $file_count;
#find runs this for every file in it's traversal.
#$_ is 'current file'. $File::Find::Name is full path to file.
sub count_stuff {
if ( -d ) { $dir_count++ };
if ( -f ) { $file_count++ };
}
find ( \&count_stuff, "." );
print "Dirs: $dir_count\n";
print "Files: $file_count\n";
Here is a script that does it: 1) without global variables; and 2) without adding another sub to the namespace.
#!/usr/bin/env perl
use strict;
use warnings;
use File::Find;
run(\#ARGV);
sub run {
my $argv = shift;
for my $dir ( #$argv ) {
my $ret = count_files_and_directories( $dir );
printf(
"%s: %d files and %d directories\n",
$dir,
$ret->{files},
$ret->{directories}
);
}
return;
}
sub count_files_and_directories {
my $top = shift;
my %ret = (directories => 0, files => 0);
find(
{
wanted => sub {
-d and $ret{directories} += 1;
-f and $ret{files} += 1;
},
no_chdir => 1,
},
$top,
);
\%ret;
}
It seems simpler to use File::Find::Rule.. For example:
use warnings;
use strict;
use File::Find::Rule;
my #files = File::Find::Rule->new->file->in('.');
my #dirs = File::Find::Rule->new->directory->in('.');