following is my Perl code:
use strict;
use File::Find;
use MIME::Base64;
use File::Temp qw(tempfile);
sub loadFiles(); #udf
sub mySub(); #udf
my #files = ();
my $dir = shift || die "Argument missing: directory name\n";
my $finalLoc;
my $filePath;
my $fileContents;
my $base64EncFile;
my $domain = "WTX";
my $devFilePath;
my $deviceDir;
my $position;
my $user = "admin";
my $encPwd = "YzNKcGNtRnRZVEF4";
my $decPwd;
my $response;
my $temp;
my $tempFilename;
loadFiles(); #call
foreach (#files) {
#take the file path into a variable
$filePath = $_;
#replace the '/' with '\' in the file path
$filePath =~ s/\//\\/g;
#take the file path into a variable
$devFilePath = $_;
#replace the '\' with '/' in the file path
$devFilePath =~ s/\\/\//g;
#perform string operation to derive a target file path
$position = index( $devFilePath, "RPDM" );
$deviceDir = "local:///" . substr( $devFilePath, $position );
#open handle on file to read the contents
open( FILE, "< $filePath" );
#read the entire file into a variable, 'fileContents'
$fileContents = do { local $/; <FILE> };
#base64 encode the file contents
$base64EncFile = encode_base64($fileContents);
#replace the <CR><LF> characters in the file and flatten the base64 string
$base64EncFile =~ s/[\x0A\x0D]//g;
#printing file path
print "FilePath=$filePath\n";
#creating a temp file with 9 random characters at the end, example 'tempUKv1vqBTp'
$temp = File::Temp->new(
TEMPLATE => "tempXXXXXXXXX",
UNLINK => 0
) or die "Could not make tempfile: $!";
$tempFilename = $temp->filename;
#Printing temp file name
print "TempFileName=$tempFilename\n";
#open the temp file for writing
open(TEMP, ">$tempFilename");
select(TEMP);
while($base64EncFile){
#??? HOW TO PRINT THE VARIABLE $base64EncFile CONTENTS INTO THE TEMP FILE ???
}
#creating a final request for sending to the web service
my $dpString = "<env:Envelope xmlns:env='http://schemas.xmlsoap.org/soap/envelope/' xmlns:dp='http://www.datapower.com/schemas/management'><env:Body><dp:request domain='$domain'><dp:set-file name='$deviceDir'>". $base64EncFile."</dp:set-file></dp:request></env:Body></env:Envelope>";
#decode the encoded password
$decPwd = decode_base64($encPwd);
system('C:\\apps\\curl-7.15.0\\curl.exe', '-#', '-k', '-u', "admin:$decPwd", '--data-binary', "$dpString", 'https://host/service/fileSet');
print "-----------------------------------------------------------\n";
close(TEMP);
close(FILE);
}
sub loadFiles() {
find( \&mySub, "$dir" ); #custom subroutine find, parse $dir
}
# following gets called recursively for each file in $dir, check $_ to see if you want the file!
sub mySub() {
push #files, $File::Find::name
if (/(\.xml|\.xsl|\.xslt|\.ffd|\.dpa|\.wsdl|\.xsd)$/i)
; # modify the regex as per your needs or pass it as another arg
}
Task I am trying to accomplish is, given a folder argument to the above perl program will make recursive calls to a given web service end point. Problem is - using the System command in Perl is unable to send files over 32 Kb. While trying to use File::Temp module in perl, I am not sure how to set the contents of a variable into a temp file (my first week using Perl).
Any help to achieve this will be helpful. Thanks!
Are you asking how to write a string to an open file?
print $fh $string;
should do the trick.
In your example, that would translate to replacing L62-65 with something like:
print TEMP $base64EncFile;
Related
I have been trying to zip files on remote windows server but not getting success by whatever i tried. Below is the small peice of code. Please tell me where m going wrong. This code is not producing any error but just not generating the zip file.
use strict;
use warnings;
# before running check perl module is installed in your PC.
use Archive::Zip;
use File::Basename 'basename';
my #files = ('D:\Scripts\Testing\abc.txt');
# if it is more than one file add it by using comma as separator
my $zip = Archive::Zip->new;
foreach my $file (#files) {
my $member = basename $file;
printf qq{Adding file "%s" as archive member "%s"\n}, $file, $member;
$zip->addFile( $file, $member );
printf "Member added\n";
}
printf "Writing to zip\n";
$zip->writeToFileNamed('zippedFolders.zip');
#zip file name change it as u want
Could you please:
use Cwd;
use strict;
use warnings;
# before running check perl module is installed in your PC.
use Archive::Zip;
use File::Basename;
my (#files,$dirname,$bsename) = "";
my $inFile = "D:\\Scripts\\Testing\\abc.txt"; # if it is more than one file add it by using comma as separator
my $curdir = getcwd();
#Need to open file here and to be read the file
open(IN, $inFile) || die "Cant \n";
while(<IN>) {
my $sngfile = $_;
chomp($sngfile);
push(#files, $sngfile);
}
my $zip = Archive::Zip->new();
foreach my $file (#files)
{
$dirname = dirname($file);
$bsename = basename($file);
#Check file exist here your code
if($dirname!~m/\.$/) {
print "$dirname\t$bsename\n";
#printf qq{Adding file "%s" as archive member "%s"\n}, $dirname, $bsename;
$zip->addFile("$dirname/$bsename"); }
}
printf "$curdir\\Writing to zip\n";
$zip->writeToFileNamed("$curdir/zippedFolders.zip"); #zip file name change it as u want
I have a program that takes directory name as input from user and searches all files inside the directory and prints the contents of file. Is there any way so that I can read the extension of file and read the contents of file that are of specified extension? For example, it should read contents of file that is in ".txt" format.
My code is
use strict;
use warnings;
use File::Basename;
#usr/bin/perl
print "enter a directory name\n";
my $dir = <>;
print "you have entered $dir \n";
chomp($dir);
opendir DIR, $dir or die "cannot open directory $!";
while ( my $file = readdir(DIR) ) {
next if ( $file =~ m/^\./ );
my $filepath = "${dir}${file}";
print "$filepath\n";
print " $file \n";
open( my $fh, '<', $filepath ) or die "unable to open the $file $!";
while ( my $row = <$fh> ) {
chomp $row;
print "$row\n";
}
}
To get just the ".txt" files, you can use a file test operator (-f : regular file) and a regex.
my #files = grep { -f && /\.txt$/ } readdir $dir;
Otherwise, you can look for just text files, using perl's -T (ascii-text file test operator)
my #files = grep { -T } readdir $dir;
Otherwise you can try even this:
my #files = grep {-f} glob("$dir/*.txt");
You're pretty close here. You have a main loop that looks like this:
while ( my $file = readdir(DIR) ) {
next if $file =~ /^\./; # skip hidden files
# do stuff
}
See where you're skipping loop iterations if the filename starts with a dot. That's an excellent place to put any other skip requirements that you have - like skipping files that don't end with '.txt'.
while ( my $file = readdir(DIR) ) {
next if $file =~ /^\./; # skip hidden files
next unless $file =~ /\.txt$/i; # skip non-text files
# do stuff
}
In the same way as your original test checked for the start of the string (^) followed by a literal dot (\.), we're now searching for a dot (\.) followed by txt and the end of the string ($). Note that I've also added the /i option to the match operator to make the match case-insensitive - so that we match ".TXT" as well as ".txt".
It's worth noting that the extension of a file is a terrible way to work out what the file contains.
Try this. Below code gives what you expect.
use warnings;
use strict;
print "Enter the directory name: ";
chomp(my $dir=<>);
print "Enter the file extension type: "; #only type the file format. like txt rtf
chomp(my $ext=<>);
opendir('dir',"$dir");
my #files = grep{m/.$ext/g} readdir('dir');
foreach my $ech(#files){
open('file',"$dir/$ech");
print <file>;
}
I'm store the all file from the particular directory to store the one array and i get the particular file format by using the grep command. Then open the files into the foreach condition
I am trying to read multiple .txt files in a folder. Each file should be read line by line, however, I failed to read multiple .txt files by using glob. Any advice on my code?
my %data;
#FILES = glob("*.txt");
$EmailMsg .= "EG. Folder(week) = Folder(CW01) --CW01 = Week 1 -- Number is week\n ";
$EmailMsg .= "=======================================================================================================\n";
# Try to Loop multiple files here
foreach my $file (#FILES) {
local $/ = undef;
open my $fh, '<', $file;
$data{$file} = <$fh>;
# Read the file one line at a time.
while (my $line = <$fh>) {
chomp $line;
$line =~ s/^\s+//;
$line =~ s/\s+$//;
my ($name, $date, $week) = split /\:/, $line;
if ($name eq "NoneFolder") {
$EmailMsg .= "Folder ($week) - No Folder created on the FTP! Failed to open folder!\n";
}
if ($name eq "EmptyFiles") {
$EmailMsg .= "Folder ($week) - No Files insides the folder! Failed download files!\n";
}
}
}
$EmailMsg .= "=======================================================================================================\n";
$EmailMsg .= "Please note that if you receive this email means that the script is running fine just that no folder is created or no files inside the folder for the week on the FTP.\n";
# close the file.
#close <$fh>;
Currently output:
EG. Folder(week) = Folder(CW01) --CW01 = Week 1 -- Number is week
=======================================================================================================
=======================================================================================================
Please note that if you receive this email means that the script is running fine just that no folder is created or no files inside the folder for the week on the FTP.
It failed to get any .txt files.
You are trying to read each file twice: firstly into the hash %data and then again line by line.
Once you have reached end of file, you have to either reopen the file or use seek to move the read pointer back to the beginning.
You also need to set $/ back to its original value, otherwise your loop will read the entire file instead of one line at a time.
It's not clear whether you really need the second copy of the file data in the hash, but you can avoid having to reset $/ by putting the change within a block, like this
open my $fh, '<', $file;
$data{$file} = do {
local $/ = undef;
<$fh>;
};
and then reset the file pointer to the start again before the while loop.
seek $fh, 0, 0;
#!/usr/bin/perl
use strict;
use warnings FATAL => 'all';
my #files=('Read a file.pl','Read a single text file.pl','Read only one
file.pl','Read the file using while.pl','Reading the file.pl');
foreach my $i(#files) {
open(FH, "<$i");
{
while (my $row = <FH>) {
chomp $row;
print "$row\n";
}
}
}
The file globbing works for me. You might want to specify scope for your #FILES variable and check that there actually are files matching the path you have specified,
#!/bin/env perl
use strict;
use warnings;
## glob on all files in home directory
## see: http://perldoc.perl.org/File/Glob.html
use File::Glob ':globally';
my #configs = <~myname/project/etc/*.cfg>;
foreach my $fn (#configs) {
print "file $fn\n";
}
your code,
my %data;
#here are some .c files,
my #FILES = glob("../*.c");
foreach my $fn (#FILES) {
print "file $fn\n";
}
exit;
This way catches more garbage for about the same amount of code.
my $PATH = shift #ARGV ;
chomp $PATH ;
opendir(TXTFILE,$PATH) || die ("failed to opendir: $PATH") ;
my #file = readdir TXTFILE ;
closedir(TXTFILE) ;
foreach(#file) { #
next unless ($_ =~ /\.txt$/i) ; # Only get .txt files
$PATH =~ s/\/$//g ; $PATH =~ s/$/\// ; # Uniform trailing slash
my $thisfile = $PATH . $_ ; # now a fully qualified filename
unless (open(THISFILE,$thisfile)) { # Notify on busted files.
warn ("$thisfile failed to open") ;
next ;
}
while(<THISFILE>) {
# etc. etc.
}
close(THISFILE) ;
}
To my perl script, a file is passed as an arguement. The file can be a .txt file or a .zip file containing the .txt file.
I want to write code that looks something like this
if ($file is a zip) {
unzip $file
$file =~ s/zip$/txt/;
}
One way to check the extension is to do a split on . and then match the last result in the array (returned by split).
Is there some better way?
You can use File::Basename for this.
#!/usr/bin/perl
use 5.010;
use strict;
use warnings;
use File::Basename;
my #exts = qw(.txt .zip);
while (my $file = <DATA>) {
chomp $file;
my ($name, $dir, $ext) = fileparse($file, #exts);
given ($ext) {
when ('.txt') {
say "$file is a text file";
}
when ('.zip') {
say "$file is a zip file";
}
default {
say "$file is an unknown file type";
}
}
}
__DATA__
file.txt
file.zip
file.pl
Running this gives:
$ ./files
file.txt is a text file
file.zip is a zip file
file.pl is an unknown file type
Another solution is to make use of File::Type which determines the type of binary file.
use strict;
use warnings;
use File::Type;
my $file = '/path/to/file.ext';
my $ft = File::Type->new();
my $file_type = $ft->mime_type($file);
if ( $file_type eq 'application/octet-stream' ) {
# possibly a text file
}
elsif ( $file_type eq 'application/zip' ) {
# file is a zip archive
}
This way, you do not have to deal with missing/wrong extensions.
How about checking the end of the filename?
if ($file =~ /\.zip$/i) {
and then:
use strict;
use Archive::Extract;
if ($file =~ /\.zip$/i) {
my $ae = Archive::Extract->new(archive => $file);
my $ok = $ae->extract();
my $files = $ae->files();
}
more information here.
You can check the file extension using a regex match as:
if($file =~ /\.zip$/i) {
# $file is a zip file
}
I know this question is several years old, but for anyone that comes here in the future, an easy way to break apart a file path into its constituent path, filename, basename and extension is as follows.
use File::Basename;
my $filepath = '/foo/bar.txt';
my ($basename, $parentdir, $extension) = fileparse($filepath, qr/\.[^.]*$/);
my $filename = $basename . $extension;
You can test it's results with the following.
my #test_paths = (
'/foo/bar/fish.wibble',
'/foo/bar/fish.',
'/foo/bar/fish.asdf.d',
'/foo/bar/fish.wibble.',
'/fish.wibble',
'fish.wibble',
);
foreach my $this_path (#test_paths) {
print "Current path: $this_path\n";
my ($this_basename, $parentdir, $extension) = fileparse($this_path, qr/\.[^.]*$/);
my $this_filename = $this_basename . $extension;
foreach my $var (qw/$parentdir $this_filename $this_basename $extension/) {
print "$var = '" . eval($var) . "'\n";
}
print "\n\n";
}
Hope this helps.
Why rely on file extension? Just try to unzip and use appropriate exception handling:
eval {
# try to unzip the file
};
if ($#) {
# not a zip file
}
Maybe a little bit late but it could be used as an alternative reference:
sub unzip_all {
my $director = shift;
opendir my $DIRH, "$director" or die;
my #files = readdir $DIRH;
foreach my $file (#files){
my $type = `file $director/$file`;
if ($type =~ m/gzip compressed data/){
system "gunzip $director/$file";
}
}
close $DIRH;
return;
}
Here is possible to use linux file executing it from perl by the use of backticks(``). You area able to pass the path of your folder and evaluate if exists a file that is classified by file as gzip compressed.
If you do not mind using a perl module, you can use Module::Generic::File, such as:
use Module::Generic::File qw( file );
my $f = file( '/some/where/file.zip' );
if( $f->extension eq 'zip' )
{
# do something
}
Module::Generic::File has a lot of features to handle and manipulate a file.
this morning, my friend and I discussed and wrote the below code. The idea behind this Perl script is to create the directory structure and copy the files to the corresponding directory.
#!/usr/bin/perl
use File::Path;
use File::Copy;
use Path::Class;
use File::Basename qw/dirname/;
my $src = "/Vijay/new.txt";
unless (open(MYFILE, "file1")) {
die ("cannot open input file file1\n");
}
$line = <MYFILE>;
while ($line ne "") {
print ($line);
mkdir_and_copy($src,$line);
$line = <MYFILE>;
}
sub mkdir_and_copy {
my ($from, $to) = #_;
my($directory, $filename) = $to =~ m/(.*\/)(.*)$/;
print("creating dir $directory");
system "mkdir -p $directory";
print("copying file $from to $to");
system "cp -f $from $to";
return;
}
The above piece of code creates the directory structure, but fails to copy the files to the corresponding directory. Could you please let us know, where exactly we are wrong?
Contents of file1:
test/test1/test2/test.txt
Contents of new.txt:
Shell/Test/test1/test1.txt
Shell/Test/test2/test2.txt
Shell/Test/test3/test3.txt
Output:
> ./mypgm.pl
test/test1/test2/test.txt
creating dir test/test1/test2/copying file /Vijay/new.txt to test/test1/test2/test.txt
cp: cannot access /Vijay/new.txt: No such file or directory
>
The directory Vijay has the file new.txt with the above mentioned content.
Thanks in advance,
Vijay
Hello everyone,
I just modified my code. Please refer the below section of code.
#!/usr/bin/perl
use File::Path;
use File::Copy;
use File::Basename qw/dirname/;
my $src = "./Vijay/new.txt";
unless (open(MYFILE, "file1"))
{
die ("cannot open input file file1\n");
}
$line = ;
while ($line ne "")
{
print ($line); print("\n");
mkdir_and_copy($src,$line);
$line = ""; }
sub mkdir_and_copy
{
my ($from, $to) = #_;
my($directory, $filename) = $to =~ m/(.\/)(.)$/;
$temp = $directory.$filename;
print("Creating dirrectory $directory \n");
if(! -d $directory)
{
mkpath($directory) #or die "Failed to create path";
}
printf("From: $from \n");
printf("To: $temp \n");
copy($from,$temp) or die "Failed to Copy";
return;
}
Now, it creates the exact directory structure and copies the file to the corresponding directory. Could you please tell me that, whether the above code is a proper one?
Your goal is not clear to me, but perhaps this will help you solve the problem:
# Perl scripts should always include this.
# Your original script was generating some useful warnings.
use strict;
use warnings;
my $src = "/Vijay/new.txt";
my $f1 = 'file1';
# This is the recommended way to open a file --
# that is, using a lexical file handle.
open(my $file_handle, '<', $f1) or die "open() failed : $f1 : $!";
# This is the typical way of iterating over the lines in a file.
while (my $line = <$file_handle>){
# You probably want to remove the newline
# before passing the line to mkdir_and_copy()
chomp $line;
mkdir_and_copy($src, $line);
}
sub mkdir_and_copy {
my ($from, $to) = #_;
my ($directory, $filename) = $to =~ m/(.*\/)(.*)$/;
# When writing a script that makes system() calls,
# start by simply printing them. After everything
# looks good, convert the print commands to system() calls.
print "system(): mkdir -p $directory", "\n";
print "system(): cp -f $from $to", "\n";
# The return is not needed.
}
When I run the script with the inputs you provided, here's the output:
system(): mkdir -p test/test1/test2/
system(): cp -f /Vijay/new.txt test/test1/test2/test.txt
This can't be your intent. In particular, why are you iterating over file1 when it contains only one line? Perhaps you meant to iterate over new.txt?
The first thing to do if something "does't work" is to catch errors and to look at them. Then to investigate content of variables. In your case the variable $to just contains the file name, so the script copies it into the current working directory, I'd imagine, not into the newly created directory.
HOWEVER, the methods you're using to get your job done are not exactly the best. It would be better to actually use File::Path and File::Copy, and in particular your way of splitting a path into directory and filename at the first slash is anything but general. This sort of thing should be done in libraries, of which Perl has many.
I'll bet your $line variable still has a newline appended to it. The input returned from the filehandle input operator (<MYFILE>) includes the record separator (usually the newline character(s) for your OS). Try this:
$line = <MYFILE>;
chomp($line);