Automate paired analysis - perl

I have a growing number of files to process using a simple Perl script I wrote. The script takes two files as input and prints an output. I want to use a bash script (or anything really) to automate the following usage:
perl Program.pl GeneLevels_A GeneLevels_B > GeneLevels_A_B
with every paired, non-directional combination of files in a particular directory.
Here is the Perl script:
#!/usr/bin/perl
use strict;
use warnings;
die "Usage: $0 <File_1> <File_2>\n" unless #ARGV == 2;
my $file1 = shift #ARGV;
my $file2 = shift #ARGV;
my %hash1;
my %hash2;
my $counter = 0;
my $amt = 25;
my $start = 244 - $amt;
open (REF, $file1);
while (<REF>) {
my $line = $_;
chomp $line;
if ($counter < $start) {
$counter++;
next;
}
my #cells = split('\t', $line);
my $ID = $cells[2];
my $row = $cells[0];
$hash1{$ID} = $row;
$counter++;
}
close REF;
$counter = 0;
open (FILE, $file2);
while (<FILE>) {
my $line = $_;
chomp $line;
if ($counter < $start) {
$counter++;
next;
}
my #cells = split('\t', $line);
my $ID = $cells[2];
my $row = $cells[0];
$hash2{$ID} = $row;
$counter++;
}
close FILE;
while ( my ($key, $value) = each(%hash1) ) {
if ( exists $hash2{$key} ) {
print "$key\t$value\t$hash2{$key}\n";
}
}
A good solution would allow me to run the Perl script on every file with an appropriate suffix.
An even better solution would assess the suffixes of existing files to determine which pairs of files have already been processed this way and omit those. For example if File_A, File_B, File_C, and File_B_C exist then only File_A_B and File_A_C would be produced. Note that File_A_B and File_B_A are equivalent.

This should work. Better checks for bad arguments would be a good thing to add:
#!/bin/bash
if [ $# != 2 ]; then
echo "usage: pair <suffix1> <suffix2>"
exit
fi
suffix1=$1
suffix2=$2
for file1 in *_${suffix1}; do
fileCheck=$(echo $file1 | sed -e "s#_$suffix2##")
if [ "$fileCheck" = "$file1" ]; then
file2=${file1/_$suffix1/_$suffix2}
if [[ ( ! -f ${file1}_${suffix2} ) && ( ! -f ${file2}_${suffix1} ) ]]; then
echo processing ${file1}_${suffix2}
perl Program.pl $file1 $file2 > ${file1}_${suffix2}
fi
fi
done

Related

how to replace specific line by write appending to file

i have two files . one is user's input file and another file is original config file. After comparing two files , do add/delete functions in my original config file.
user's input file: (showing line by line)
add:L28A:Z:W #add--> DID ID --> Bin ID
del:L28C:B:Q:X:
rpl:L38A:B:M:D:
original input file
L28A:B:Q:M:X:
L28C:B:Q:M:X:
L38A:B:Q:M:X:
based on user's input file , first is doing add function second is delete function and third is replace function.
so output for original input txt file should show:
L28A:B:Q:M:X:Z:W
L28C:M:
L38A:B:M:D:
but my code is showing :
L28A:B:Q:M:X:
L28C:B:Q:M:X:
L38A:B:Q:M:X:
L28A:B:Q:M:X:Z:W
L28C:M:
L38A:B:M:D:
how can i replace above three lines with new modify lines?
use strict;
use warnings;
use File::Copy;
use vars qw($requestfile $requestcnt $configfile $config2cnt $my3file $myfile3cnt $new_file $new_filecnt #output);
my $requestfile = "DID1.txt"; #user's input file
my $configfile = "DID.txt"; #original config file
my $new_file = "newDID.txt";
readFileinString($requestfile, \$requestcnt);
readFileinString($configfile, \$config2cnt);
copy($configfile, $new_file) or die "The copy operation failed: $!";
while ($requestcnt =~ m/^((\w){3})\:([^\n]+)$/mig) #Each line from user request
{
my $action = $1;
my $requestFullLine = $3;
while ($requestFullLine =~ m/^((\w){4})\:([^\n]+)$/mig) #Each line from user request
{
my $DID = $1; #DID
my $requestBinList = $3; #Bin List in user request
#my #First_values = split /\:/, $requestBinList;
if ($config2cnt =~ m/^$DID\:([^\n]+)$/m) #configfile
{
my $ConfigFullLine = $1; #Bin list in config
my $testfile = $1;
my #First_values = split /\:/, $ConfigFullLine;
my #second_values = split /\:/, $requestBinList;
foreach my $sngletter(#second_values) # Each line from user request
{
if( grep {$_ eq "$sngletter"} #First_values)
{
print " $DID - $sngletter - Existing bin..\n\n";
}
else
{
print "$DID - $sngletter - Not existing bin..\n\n";
}
}
print "Choose option 1.Yes 2.No\n";
my $option = <STDIN>;
if ($option == 1) {
open(DES,'>>',$configfile) or die $!;
if($action eq 'add')
{
$ConfigFullLine =~ s/$/$requestBinList/g;
my $add = "$DID:$ConfigFullLine";
print DES "$add\n" ;
print"New Added Bin Valu $add\n\n";
}
if ( $action eq 'del')
{
foreach my $sngletter(#second_values){
$ConfigFullLine =~ s/$sngletter://g;
}
print DES "$DID:$ConfigFullLine\n";
print "New Deleted Bin Value $DID:$ConfigFullLine\n\n";
}
if ( $action eq 'rpl')
{
my $ConfigFullLine = $requestBinList;
my $replace = "$DID:$ConfigFullLine";
print DES "$replace\n";
print"Replace Bin Value $replace\n\n";
}
}
elsif ($option == 2)
{
print"Start from begining\n";
}
else
{
print "user chose invalid process or input is wrong\n";
}
}
else
{
print "New DID $DID detected\n";}
}
}
sub readFileinString
{
my $File = shift;
my $string = shift;
use File::Basename;
my $filenames = basename($File);
open(FILE1, "<$File") or die "\nFailed Reading File: [$File]\n\tReason: $!";
read(FILE1, $$string, -s $File, 0);
close(FILE1);
}
The problem is here:
open(DES,'>>',$configfile) or die $!;
You open your file for appending. So you get the original data, followed by your edited data.
Update: It appears that you have a working solution now, but I thought it might be interesting to show you how I would write this.
This program is a Unix filter. That is, it reads from STDIN and writes to STDOUT. I find that far more flexible than hard-coded filenames. You also don't have to explicitly open files - which saves time :-)
It also takes a command-line option, -c, telling it which file contains the edit definitions. So it is called like this (assuming we've called the program edit_files:
$ edit_files -c edit_definitions.txt < your_input_file > your_output_file
And here's the code.
#!/usr/bin/perl
use strict;
use warnings;
use Getopt::Std;
my %opts;
getopts('e:', \%opts);
my %edits = read_edits($opts{e});
while (<>) {
chomp;
my ($key, $val) = split /:/, $_, 2; #/ stop faulty syntax highlight
if (!exists $edits{$key}) {
print "$_\n";
next;
}
my $edit = $edits{$key};
if ($edit->[0] eq 'add') {
print "$_$edit->[1]\n";
} elsif ($edit->[0] eq 'del') {
$val =~ s/$_:// for split /:/, $edit->[1]; #/
print "$key:$val\n";
} elsif ($edit->[0] eq 'rpl') {
print "$key:$edit->[1]\n";
} else {
warn "$edit->[0] is an invalid edit type\n";
next;
}
}
sub read_edits {
my $file = shift;
open my $edit_fh, '<', $file or die $!;
my %edits;
while (<$edit_fh>) {
chomp;
# Remove comments
s/\s*#.*//; #/
my ($type, $key, $val) = split /:/, $_, 3; #/
$edits{$key} = [ $type, $val ];
}
}

error by running a bash command in Perl script

I wrote a Perl script in order to automate a process for sending a lot of jobs in an lsf based cluster.
The problem is that for a reason that I don't get it cuts the line with the job in two lines, and the job cannot run.
here is my script:
my $controler = $ARGV[0];
open my $cont, $controler or die "Could not open $controler: $!";
my $str = qx(wc -l $controler | awk '{print $1}');
#my $str2 = system($str);
#my #count = split /\n/,$str;
#print $str;
for (my $f = 1; $f <= $str; $f++) {
#print $count[0];
`mkdir ~/Multiple_alignments/ELEMENTS-AUTO-$f`;
#`mkdir ../SCORES-AUTO-$f`;
}
while ( $cont = <>){
chomp;
my #lines = split /\t/, $cont;
my $count2 = $lines[0];
my $cover = $lines[1];
my $length = $lines[2];
my $rho = $lines[3];
my #files = <*maf>;
foreach my $file (#files) {
#print $file . "\n";
my $base = basename($file, ".maf");
#print "$base\n";
#print "$cover\n";
#print "$length\n";
#print "$rho\n";
print "`bsub -q q_cf_htc_work -R \"select[type==X86_64 && mem>6000]\" rusage[mem=6000] -M6000 -o /home/vasilis.lenis/Multiple_alignments/out-files/phastCons_$base.o -e /home/vasilis.lenis/Multiple_alignments/out-files/phastCons_$base.e -J phastCons$base phastCons --target-coverage $cover --expected-length $length --rho $rho --most-conserved ../ELEMENTS-AUTO-$count2/most_conserved_$base.bed --msa-format MAF $file mytrees_no3.noncons.mod --no-post-probs`\n";
}
}
I just cannot understand why its happening.
(Also, the awk command that I have at the third line doesn't work)
Thank you in advance,
Vasilis.
Thank you very much for your guidance.
I believe that I solved the problem with the new line.
I was using wrong the chomp function.
chomp($cont);

Perl resume download from this script

I have this Perl-based download script.
I'd like to know how to make sure that when a user downloads a file with this script, can pause and resume the download (download resumable).
This is the code:
#!/usr/bin/perl
use XFSConfig;
use HCE_MD5;
use CGI::Carp qw(fatalsToBrowser);
my $code = (split('/',$ENV{REQUEST_URI}))[-2];
my $hce = HCE_MD5->new($c->{dl_key},"XFileSharingPRO");
my ($file_id,$file_code,$speed,$ip1,$ip2,$ip3,$ip4,$expire) = unpack("LA12SC4L", $hce->hce_block_decrypt(decode($code)) );
print("Content-type:text/html\n\nLink expired"),exit if time > $expire;
$speed||=500;
my $dx = sprintf("%05d",$file_id/$c->{files_per_folder});
my $ip="$ip1.$ip2.$ip3.$ip4";
$ip=~s/\.0$/.\\d+/;
$ip=~s/\.0\./.\\d+./;
$ip=~s/\.0\./.\\d+./;
$ip=~s/^0\./\\d+./;
print("Content-type:text/html\n\nNo file"),exit unless -f "$c->{upload_dir}/$dx/$file_code";
print("Content-type:text/html\n\nWrong IP"),exit if $ip && $ENV{REMOTE_ADDR}!~/^$ip/;
my $fsize = -s "$c->{upload_dir}/$dx/$file_code";
$|++;
open(my $in_fh,"$c->{upload_dir}/$dx/$file_code") || die"Can't open source file";
# unless($ENV{HTTP_ACCEPT_CHARSET}=~/utf-8/i)
# {
# $fname =~ s/([^A-Za-z0-9\-_.!~*'() ])/ uc sprintf "%%%02x",ord $1 /eg;
# $fname =~ tr/ /+/;
# }
print qq{Content-Type: application/octet-stream\n};
print qq{Content-length: $fsize\n};
#print qq{Content-Disposition: attachment; filename="$fname"\n};
print qq{Content-Disposition: attachment\n};
print qq{Content-Transfer-Encoding: binary\n\n};
$speed = int 1024*$speed/10;
my $buf;
while( read($in_fh, $buf, $speed) )
{
print $buf;
select(undef,undef,undef,0.1);
}
sub decode
{
$_ = shift;
my( $l );
tr|a-z2-7|\0-\37|;
$_=unpack('B*', $_);
s/000(.....)/$1/g;
$l=length;
$_=substr($_, 0, $l & ~7) if $l & 7;
$_=pack('B*', $_);
}
Thanks
To pause and resume downloads you should handle the http range header.
Take a look at http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35

How to create the next file or folder in a series of progressively numbered files?

Sorry for the bad title but this is the best I could do! :D
I have a script which creates a new project every time the specified function is called.
Each project must be stored in its own folder, with the name of the project. But, if you don't specify a name, the script will just name it "new projectX", where X is a progressive number.
With time the user could rename the folders or delete some, so every time the script runs, it checks for the smallest number available (not used by another folder) and creates the relevant folder.
Now I managed to make a program which I think works as wanted, but I would like to hear from you if it's OK or there's something wrong which I'm unable to spot, given my inexperience with the language.
while ( defined( $file = readdir $projects_dir ) )
{
# check for files whose name start with "new project"
if ( $file =~ m/^new project/i )
{
push( #files, $file );
}
}
# remove letters from filenames, only the number is left
foreach $file ( #files )
{
$file =~ s/[a-z]//ig;
}
#files = sort { $a <=> $b } #files;
# find the smallest number available
my $smallest_number = 0;
foreach $file ( #files )
{
if ( $smallest_number != $file )
{
last;
}
$smallest_number += 1;
}
print "Smallest number is $smallest_number";
Here's a basic approach for this sort of problem:
sub next_available_dir {
my $n = 1;
my $d;
$n ++ while -e ($d = "new project$n");
return $d;
}
my $project_dir = next_available_dir();
mkdir $project_dir;
If you're willing to use a naming pattern that plays nicely with Perl's string auto-increment feature, you can simplify the code further, eliminating the need for $n. For example, newproject000.
I think I would use something like:
use strict;
use warnings;
sub new_project_dir
{
my($base) = #_;
opendir(my $dh, $base) || die "Failed to open directory $base for reading";
my $file;
my #numbers;
while ($file = readdir $dh)
{
$numbers[$1] = 1 if ($file =~ m/^new project(\d+)$/)
}
closedir($dh) || die "Failed to close directory $base";
my $i;
my $max = $#numbers;
for ($i = 0; $i < $max; $i++)
{
next if (defined $numbers[$i]);
# Directory did not exist when we scanned the directory
# But maybe it was created since then!
my $dir = "new project$i";
next unless mkdir "$base/$dir";
return $dir;
}
# All numbers from 0..$max were in use...so try adding new numbers...
while ($i < $max + 100)
{
my $dir = "new project$i";
$i++;
next unless mkdir "$base/$dir";
return $dir;
}
# Still failed - give in...
die "Something is amiss - all directories 0..$i in use?";
}
Test code:
my $basedir = "base";
mkdir $basedir unless -d $basedir;
for (my $j = 0; $j < 10; $j++)
{
my $dir = new_project_dir($basedir);
print "Create: $dir\n";
if ($j % 3 == 2)
{
my $k = int($j / 2);
my $o = "new project$k";
rmdir "$basedir/$o";
print "Remove: $o\n";
}
}
Try this:
#!/usr/bin/env perl
use strict;
use warnings;
# get the current list of files
# see `perldoc -f glob` for details.
my #files = glob( 'some/dir/new\\ project*' );
# set to first name, in case there are none others
my $next_file = 'new project1';
# check for others
if( #files ){
# a Schwartian transform
#files = map { $_->[0] } # get original
sort { $a->[1] <=> $b->[1] } # sort by second field which are numbers
map { [ $_, do{ ( my $n = $_ ) =~ s/\D//g; $n } ] } # create an anonymous array with original value and the second field nothing but digits
#files;
# last file name is the biggest
$next_file = $files[-1];
# add one to it
$next_file =~ s/(.*)(\d+)$/$1.($2+1)/e;
}
print "next file: $next_file\n";
Nothing wrong per se, but that's an awful lot of code to achieve a single objective (get the minimum index of directories.
A core module, couple of subs and few Schwartzian transforms will make the code more flexible:
use strict;
use warnings;
use List::Util 'min';
sub num { $_[0] =~ s|\D+||g } # 'new project4' -> '4', 'new1_project4' -> '14' (!)
sub min_index {
my ( $dir, $filter ) = #_;
$filter = qr/./ unless defined $filter; # match all if no filter specified
opendir my $dirHandle, $dir or die $!;
my $lowest_index = min # get the smallest ...
map { num($_) } # ... numerical value ...
grep { -d } # ... from all directories ...
grep { /$filter/ } # ... that match the filter ...
readdir $dirHandle; # ... from the directory contents
$lowest_index++ while grep { $lowest_index == num( $_ ) } readdir $dirhandle;
return $lowest_index;
}
# Ready to use!
my $index = min_index ( 'some/dir' , qr/^new project/ );
my $new_project_name = "new project $index";

Removing files with duplicate content from single directory [Perl, or algorithm]

I have a folder with large number of files, some of with have exactly the same contents. I want to remove files with duplicate contents, meaning if two or more files with duplicate content found, I'd like to leave one of these files, and delete the others.
Following is what I came up with, but I don't know if it works :) , didn't try it yet.
How would you do it? Perl or general algorithm.
use strict;
use warnings;
my #files = <"./files/*.txt">;
my $current = 0;
while( $current <= $#files ) {
# read contents of $files[$current] into $contents1 scalar
my $compareTo = $current + 1;
while( $compareTo <= $#files ) {
# read contents of $files[compareTo] into $contents2 scalar
if( $contents1 eq $contents2 ) {
splice(#files, $compareTo, 1);
# delete $files[compareTo] here
}
else {
$compareTo++;
}
}
$current++;
}
Here's a general algorithm (edited for efficiency now that I've shaken off the sleepies -- and I also fixed a bug that no one reported)... :)
It's going to take forever (not to mention a lot of memory) if I compare every single file's contents against every other. Instead, why don't we apply the same search to their sizes first, and then compare checksums for those files of identical size.
So then when we md5sum every file (see Digest::MD5) calculate their sizes, we can use a hash table to do our matching for us, storing the matches together in arrayrefs:
use strict;
use warnings;
use Digest::MD5 qw(md5_hex);
my %files_by_size;
foreach my $file (#ARGV)
{
push #{$files_by_size{-s $file}}, $file; # store filename in the bucket for this file size (in bytes)
}
Now we just have to pull out the potential duplicates and check if they are the same (by creating a checksum for each, using Digest::MD5), using the same hashing technique:
while (my ($size, $files) = each %files_by_size)
{
next if #$files == 1;
my %files_by_md5;
foreach my $file (#$files_by_md5)
{
open my $filehandle, '<', $file or die "Can't open $file: $!";
# enable slurp mode
local $/;
my $data = <$filehandle>;
close $filehandle;
my $md5 = md5_hex($data);
push #{$files_by_md5{$md5}}, $file; # store filename in the bucket for this MD5
}
while (my ($md5, $files) = each %files_by_md5)
{
next if #$files == 1;
print "These files are equal: " . join(", ", #$files) . "\n";
}
}
-fini
Perl, with Digest::MD5 module.
use Digest::MD5 ;
%seen = ();
while( <*> ){
-d and next;
$filename="$_";
print "doing .. $filename\n";
$md5 = getmd5($filename) ."\n";
if ( ! defined( $seen{$md5} ) ){
$seen{$md5}="$filename";
}else{
print "Duplicate: $filename and $seen{$md5}\n";
}
}
sub getmd5 {
my $file = "$_";
open(FH,"<",$file) or die "Cannot open file: $!\n";
binmode(FH);
my $md5 = Digest::MD5->new;
$md5->addfile(FH);
close(FH);
return $md5->hexdigest;
}
If Perl is not a must and you are working on *nix, you can use shell tools
find /path -type f -print0 | xargs -0 md5sum | \
awk '($1 in seen){ print "duplicate: "$2" and "seen[$1] } \
( ! ($1 in seen ) ) { seen[$1]=$2 }'
md5sum *.txt | perl -ne '
chomp;
($sum, $file) = split(" ");
push #{$files{$sum}}, $file;
END {
foreach (keys %files) {
shift #{$files{$_}};
unlink #{$files{$_}} if #{$files{$_}};
}
}
'
Perl is kinda overkill for this:
md5sum * | sort | uniq -w 32 -D | cut -b 35- | tr '\n' '\0' | xargs -0 rm
(If you are missing some of these utilities or they don't have these flags/functions,
install GNU findutils and coreutils.)
Variations on a theme:
md5sum *.txt | perl -lne '
my ($sum, $file) = split " ", $_, 2;
unlink $file if $seen{$sum} ++;
'
No need to go and keep a list, just to remove one from the list and delete the rest; simply keep track of what you've seen before, and remove any file matching a sum that's already been seen. The 2-limit split is to do the right thing with filenames containing spaces.
Also, if you don't trust this, just change the word unlink to print and it will output a list of files to be removed. You can even tee that output to a file, and then rm $(cat to-delete.txt) in the end if it looks good.
a bash script is more expressive than perl in this case:
md5sum * |sort -k1|uniq -w32 -d|cut -f2 -d' '|xargs rm
I'd recommend that you do it in Perl, and use File::Find while you're at it.
Who knows what you're doing to generate your list of files, but you might want to combine it with your duplicate checking.
perl -MFile::Find -MDigest::MD5 -e '
my %m;
find(sub{
if(-f&&-r){
open(F,"<",$File::Find::name);
binmode F;
$d=Digest::MD5->new->addfile(F);
if(exists($m{$d->hexdigest}){
$m{$d->hexdigest}[5]++;
push $m{$d->hexdigest}[0], $File::Find::name;
}else{
$m{$d->hexdigest} = [[$File::Find::name],0,0,0,0,1];
}
close F
}},".");
foreach $d (keys %m) {
if ($m{$d}[5] > 1) {
print "Probable duplicates: ".join(" , ",$m{$d}[0])."\n\n";
}
}'
Here is a way of filtering by size first and by md5 checksum second:
#!/usr/bin/perl
use strict; use warnings;
use Digest::MD5 qw( md5_hex );
use File::Slurp;
use File::Spec::Functions qw( catfile rel2abs );
use Getopt::Std;
my %opts;
getopt('de', \%opts);
$opts{d} = '.' unless defined $opts{d};
$opts{d} = rel2abs $opts{d};
warn sprintf "Checking %s\n", $opts{d};
my $files = get_same_size_files( \%opts );
$files = get_same_md5_files( $files );
for my $size ( keys %$files ) {
for my $digest ( keys %{ $files->{$size}} ) {
print "$digest ($size)\n";
print "$_\n" for #{ $files->{$size}->{$digest} };
print "\n";
}
}
sub get_same_md5_files {
my ($files) = #_;
my %out;
for my $size ( keys %$files ) {
my %md5;
for my $file ( #{ $files->{$size}} ) {
my $contents = read_file $file, {binmode => ':raw'};
push #{ $md5{ md5_hex($contents) } }, $file;
}
for my $k ( keys %md5 ) {
delete $md5{$k} unless #{ $md5{$k} } > 1;
}
$out{$size} = \%md5 if keys %md5;
}
return \%out;
}
sub get_same_size_files {
my ($opts) = #_;
my $checker = defined($opts->{e})
? sub { scalar ($_[0] =~ /\.$opts->{e}\z/) }
: sub { 1 };
my %sizes;
my #files = grep { $checker->($_) } read_dir $opts->{d};
for my $file ( #files ) {
my $path = catfile $opts->{d}, $file;
next unless -f $path;
my $size = (stat $path)[7];
push #{ $sizes{$size} }, $path;
}
for my $k (keys %sizes) {
delete $sizes{$k} unless #{ $sizes{$k} } > 1;
}
return \%sizes;
}
You might want to have a look at how I did to find duplicate files and remove them. Though you have to modify it to your needs.
http://priyank.co.in/remove-duplicate-files