perl code for splitting a single file by matching charater into multiple files - perl

i want to split a large data file into multiple files where ever it matches a '^' character.
#!/usr/bin/perl -w
use strict;
print "enter the data file name";
chomp( my $a=<STDIN> );
open (<READ>,"$a")| "error";
while ($line=<READ>)
{
my #array=split(" ",$line) unless ^ ;
after splitting of the data file. A total of 23 files will be created

Here's a slight different answer from saiprathapreddy.obula's:
use warnings;
use strict;
my ($file) = #ARGV;
open(my $input, "<$file");
local $/ = "^";
my $i = 0;
while(<>){
chomp;
$i++;
open(my $output, ">file$i.txt");
print $output "$_";
}

$,="";
$"="";
my $i=1;
open OUT ,">DATA_${i}.txt";
while(<>){
chomp;
my #F=split(/\^/);
if(#F==1){
print OUT $_,"\n";
}
elsif(#F>1){
$i++;
close OUT;
open OUT ,">DATA_${i}.txt";
print OUT "#F[1..$#F] \n";
}
}
close OUT;

Here is a cleaned up and tested version of the program of saiprathapreddy.obula.
use strict;
use warnings;
open(FILE,'AUTOSAR.txt');
local $/;
my $var = <FILE>;
close FILE;
my #arr = split('\^',$var);
my $i=0;
foreach (#arr) {
$i++;
open(FILE1,">$i.txt");
print FILE1;
close FILE1;
}

use strict;
open(FILE,'AUTOSAR.txt');
local $/;
my $var = <FILE>;
my #arr = split('\^',$var);
my $i=0;
foreach (#arr) {
$i++;
open(FILE1,">$i.txt");
print FILE1 $_;
close FILE1;
}
close FILE;

Related

Printing value from split result Perl

Here I have a abc.txt file:
aaa,1000,kevin
bbb,2000,john
ccc,3000,jane
ddd,4000,kevin
Then I want to print out:
kevin
john
jane
my Perl script is:
open (INFILE, $ARGV[1]) or die "An input file is required as argument\n";
#store=();
while(<INFILE>)
{
chomp();
#data=split(/,/);
#
#
#
if (%store ne "0")
{
print "Printing users:\n";
foreach $key (keys %store)
{print $key."\n";}
}
print "Printing users:\n";
foreach $key (keys %store)
{print $key."\n";}
}
My idea is to store the value into hash and create key to each value. How can I do in the ### line?
You have declared #store and then using %store. I didn't understand that why you doing that, but the below code will give you desire output. First read the input file, split the data and then remove the duplicates.
use strict;
use warnings;
my $infile = $ARGV[0];
open my $fh, "<", $infile or die "An input file is required as argument: $!";
my %store;
while(my $line = <$fh>)
{
chomp($line);
my #data = split /,/, $line;
my #removeduplicate = (grep { !$store{$_}++ } #data)[2];
foreach(#removeduplicate){
if( $_ ne ''){
print "$_\n";
}
}
}
close $fh;
Output:
kevin
john
jane
hmmm. it depends what do you want. maybe this example will help you:
#!/usr/bin/perl
use strict;
use warnings;
use Data::Dumper; #for debug if you want
my $infile='abc.txt'; #or ARGV[0] whatever it is
my $fh;
open $fh,'<',$infile or die "problem with $infile $# $!";
my $inputline;
my %Storage;
my #Values;
while (defined($inputline=<$fh>)) {
chomp $inputline;
#Values=split ',',$inputline;
if (#Values != 3) {
warn "$inputline has formatted badly";
next;
}
#warn if exists $Storage{$Values[1]}; #optional warning for detected duplicates
$Storage{$Values[1]}=#Values[0,2]; #create hash data
#duplicates will be removed automaticly
}
close $fh;
print Dumper \%Storage; #print how perl it stores
foreach my $Key (keys %Storage) { #example loop
print #{Storage->{$Key}},"\n"; #do anything
}
I hope this template will be enough for you.

Replacing a pattern in file without reading the whole file- Perl

I want to replace a pattern 'good' with 'bad' in my file. Currently what I am doing is:
#!/perl/bin/perl
$filename= "abc.txt";
open my $fh, $filename;
my $text = do { local( $/ ); <$fh> };
close $fh;
$text =~s/good/bad/g;
Is there any way I can do this without reading the whole file??
Edit: Suppose I know that there's only 1 'good' in the file.
P.S.,Hi I am new here. Hope I am doing it correctly.
You could use Tie::File:
#!/usr/bin/perl
use strict;
use Tie::File;
my $filename = ...;
tie my #lines, 'Tie::File', $filename or die;
for (#lines) {
s/good/bad/g and last;
}
Too make sure that not the whole file is slurped in you want to read the lines one-by-one, e.g. using:
#!/usr/bin/perl
use strict;
use Tie::File;
my $filename = ...;
tie my #lines, 'Tie::File', $filename or die;
for(my $i=0; ; $i++) {
last if !defined $lines[$i]; # eof
last if $lines[$i] =~ s/good/bad/g;
}

Extracting specific multiple line of records that is pipe delimited in perl

I have a file that looks like
NAME|JOHN|TOKYO|JPN
AGE|32|M
INFO|SINGLE|PROFESSIONAL|IT
NAME|MARK|MANILA|PH
AGE|37|M
INFO|MARRIED|PROFESSIONAL|BPO
NAME|SAMANTHA|SYDNEY|AUS
AGE|37|F
INFO|MARRIED|PROFESSIONAL|OFFSHORE
NAME|LUKE|TOKYO|JPN
AGE|27|M
INFO|SINGLE|PROFESSIONAL|IT
I want to separate the records by country. I have stored each line into array variable #fields
my #fields = split(/\|/, $_ );
making $fields[3] as my basis for sorting it. I wanted it to separate into 2 output text files
OUTPUT TEXT FILE 1:
NAME|JOHN|TOKYO|JPN
AGE|32|M
INFO|SINGLE|PROFESSIONAL|IT
NAME|LUKE|TOKYO|JPN
AGE|27|M
INFO|SINGLE|PROFESSIONAL|IT
OUTPUT TEXT FILE 2
NAME|MARK|MANILA|PH
AGE|37|M
INFO|MARRIED|PROFESSIONAL|BPO
NAME|SAMANTHA|SYDNEY|AUS
AGE|37|F
INFO|MARRIED|PROFESSIONAL|OFFSHORE
Putting all that is from JPN to output text 1 & non-JPN country to output text file 2
here's the code that what trying to work out
use strict;
use warnings;
use Data::Dumper;
use Carp qw(croak);
my #fields;
my $tmp_var;
my $count;
;
my ($line, $i);
my $filename = 'data.txt';
open(my $input_fh, '<', $filename ) or croak "Can't open $filename: $!";
open(OUTPUTA, ">", 'JPN.txt') or die "wsl_reformat.pl: could not open $ARGV[0]";
open(OUTPUTB, ">", 'Non-JPN.txt') or die "wsl_reformat.pl: could not open $ARGV[0]";
my $fh;
while (<$input_fh>) {
chomp;
my #fields = split /\|/;
if ($fields[0] eq 'NAME') {
for ($i=1; $i < #fields; $i++) {
if ($fields[3] eq 'JPN') {
$fh = $_;
print OUTPUTA $fh;
}
else {
$fh = $_;
print OUTPUTB $fh;
}
}
}
}
close(OUTPUTA);
close(OUTPUTB)
Still has no luck on it :(
Here is the way I think ikegami was saying, but I've never tried this before (although it gave the correct results).
#!/usr/bin/perl
use strict;
use warnings;
open my $jpn_fh, ">", 'o33.txt' or die $!;
open my $other_fh, ">", 'o44.txt' or die $!;
my $fh;
while (<DATA>) {
if (/^NAME/) {
if (/JPN$/) {
$fh = $jpn_fh;
}
else {
$fh = $other_fh;
}
}
print $fh $_;
}
close $jpn_fh or die $!;
close $other_fh or die $!;
__DATA__
NAME|JOHN|TOKYO|JPN
AGE|32|M
INFO|SINGLE|PROFESSIONAL|IT
NAME|MARK|MANILA|PH
AGE|37|M
INFO|MARRIED|PROFESSIONAL|BPO
NAME|SAMANTHA|SYDNEY|AUS
AGE|37|F
INFO|MARRIED|PROFESSIONAL|OFFSHORE
NAME|LUKE|TOKYO|JPN
AGE|27|M
INFO|SINGLE|PROFESSIONAL|IT
You didn't say what you needed help with, so I'm assuming it's coming up with an algorithm. Here's a good one:
Open the file to read.
Open the file for the JPN entries.
Open the file for the non-JPN entries.
While not eof,
Read a line.
Parse the line.
If it's the first line of a record,
If the person's country is JPN,
Set current file handle to the file handle for JPN entries.
Else,
Set current file handle to the file handle for non-JPN entries.
Print the line to the current file handle.
my $jpn_qfn = '...';
my $other_qfn = '...';
open(my $jpn_fh, '>', $jpn_qfn)
or die("Can't create $jpn_qfn: $!\n");
open(my $other_fh, '>', $other_qfn)
or die("Can't create $other_qfn: $!\n");
my $fh;
while (<>) {
chomp;
my #fields = split /\|/;
if ($fields[0] eq 'NAME') {
$fh = $fields[3] eq 'JPN' ? $jpn_fh : $other_fh;
}
say $fh $_;
}
#!/usr/bin/env perl
use 5.012;
use autodie;
use strict;
use warnings;
# store per country output filehandles
my %output;
# since this is just an example, read from __DATA__ section
while (my $line = <DATA>) {
# split the fields
my #cells = split /[|]/, $line;
# if first field is NAME, this is a new record
if ($cells[0] eq 'NAME') {
# get the country code, strip trailing whitespace
(my $country = $cells[3]) =~ s/\s+\z//;
# if we haven't created and output file for this
# country, yet, do so
unless (defined $output{$country}) {
open my $fh, '>', "$country.out";
$output{$country} = $fh;
}
my $out = $output{$country};
# output this and the next two lines to
# country specific output file
print $out $line, scalar <DATA>, scalar <DATA>;
}
}
close $_ for values %output;
__DATA__
NAME|JOHN|TOKYO|JPN
AGE|32|M
INFO|SINGLE|PROFESSIONAL|IT
NAME|MARK|MANILA|PH
AGE|37|M
INFO|MARRIED|PROFESSIONAL|BPO
NAME|SAMANTHA|SYDNEY|AUS
AGE|37|F
INFO|MARRIED|PROFESSIONAL|OFFSHORE
NAME|LUKE|TOKYO|JPN
AGE|27|M
INFO|SINGLE|PROFESSIONAL|IT
Thanks for your Help heaps
I was able to solved this problem in perl,
many thanks
#!/usr/local/bin/perl
use strict;
use warnings;
use Data::Dumper;
use Carp qw(croak);
my #fields;
my $tmp_var;
my ($rec_type, $country);
my $filename = 'data.txt';
open (my $input_fh, '<', $filename ) or croak "Can't open $filename: $!";
open my $OUTPUTA, ">", 'o33.txt' or die $!;
open my $OUTPUTB, ">", 'o44.txt' or die $!;
my $Combline;
while (<$input_fh>) {
$_ = _trim($_);
#fields = split (/\|/, $_);
$rec_type = $fields[0];
$country = $fields[3];
if ($rec_type eq 'NAME') {
if ($country eq 'JPN') {
*Combline = $OUTPUTA;
}
else {
*Combline = $OUTPUTB;
}
}
print Combline;
}
close $OUTPUTA or die $!;
close $OUTPUTB or die $!;
sub _trim {
my $word = shift;
if ( $word ) {
$word =~ s/\s*\|/\|/g; #remove trailing spaces
$word =~ s/"//g; #remove double quotes
}
return $word;
}

Perl - Searching a file for specific text and deleting a range of lines [duplicate]

This question already has an answer here:
Closed 12 years ago.
Possible Duplicate:
How do I change, delete, or insert a line in a file, or append to the beginning of a file in Perl?
How would I use perl to open up a file, look for an item that someone inputs, and if it is found, it will delete from that line to 14 lines below.
Something like this will work:
#!/usr/bin/env perl
use Modern::Perl;
use IO::File;
say "Enter a pattern please: ";
chomp (my $input = <>);
my $pattern;
# check that the pattern is good
eval {
$pattern = qr ($input);
}; die $# if $#;
my $fh = IO::File->new("test.txt", "+<") or die "$!\n";
my #lines = $fh->getlines;
$fh->seek(0,0);
for (my $pos = 0; $pos < $#lines; ++$pos) {
if ($lines[$pos] =~ $pattern) {
$pos += 14;
} else {
print {$fh} $lines[$pos];
}
}
$fh->close;
$|++
#!/usr/bin/env perl
use strict;
use warnings;
use autodie;
my $filename = 'filename.txt';
my $tmp = 'filename.txt.tmp';
print "Enter a pattern please: ";
chomp (my $input = <>);
my $pattern = qr($input)x;
open my $i_fh, '+<', $filename;
open my $o_fh, '>', $tmp;
while(<$i_fh>){
# move print here if you want to print the matching line
if( /$pattern/ ){
<$i_fh> for 1..14;
next;
}
print {$o_fh} $_ ;
}
close $o_fh;
close $i_fh;
use File::Copy
move $tmp, $filename;

How many different ways are there to concatenate two files line by line using Perl?

Suppose file1 looks like this:
bye bye
hello
thank you
And file2 looks like this:
chao
hola
gracias
The desired output is this:
bye bye chao
hello hola
thank you gracias
I myself have already come up with five different approaches to solve this problem. But I think there must be more ways, probably more concise and more elegant ways, and I hope I can learn more cool stuff :)
The following is what I have tried so far, based on what I've learnt from the many solutions of my previous problems. Also, I'm trying to sort of digest or internalize the knowledge I've acquired from the Llama book.
Code 1:
#!perl
use autodie;
use warnings;
use strict;
open my $file1,'<','c:/file1.txt';
open my $file2,'<','c:/file2.txt';
while(defined(my $line1 = <$file1>)
and defined(my $line2 = <$file2>)){
die "Files are different sizes!\n" unless eof(file1) == eof(file2);
$line1 .= $line2;
$line1 =~ s/\n/ /;
print "$line1 \n";
}
Code 2:
#!perl
use autodie;
use warnings;
use strict;
open my $file1,'<','c:/file1.txt';
my #file1 = <$file1>;
open my $file2,'<','c:/file2.txt';
my #file2 =<$file2>;
for (my $n=0; $n<=$#file1; $n++) {
$file1[$n] .=$file2[$n];
$file1[$n]=~s/\n/ /;
print $file1[$n];
}
Code 3:
#!perl
use autodie;
use warnings;
use strict;
open my $file1,'<','c:/file1.txt';
open my $file2,'<','c:/file2.txt';
my %hash;
while(defined(my $line1 = <$file1>)
and defined(my $line2 = <$file2>)) {
chomp $line1;
chomp $line2;
my ($key, $val) = ($line1,$line2);
$hash{$key} = $val;
}
print map { "$_ $hash{$_}\n" } sort keys %hash;
Code 4:
#!perl
use autodie;
use warnings;
use strict;
open my $file1,'<','c:/file1.txt';
open my $file2,'<','c:/file2.txt';
while(defined(my $line1 = <$file1>)
and defined(my $line2 = <$file2>)) {
$line1 =~ s/(.+)/$1 $line2/;
print $line1;
}
Code 5:
#!perl
use autodie;
use warnings;
use strict;
open my $file1,'<','c:/file1.txt';
my #file1 =<$file1>;
open my $file2,'<','c:/file2.txt';
my #file2 =<$file2>;
while ((#file1) && (#file2)){
my $m = shift (#file1);
chomp($m);
my $n = shift (#file2);
chomp($n);
$m .=" ".$n;
print "$m \n";
}
I have tried something like this:
foreach $file1 (#file2) && foreach $file2 (#file2) {...}
But Perl gave me a syntactic error warning. I was frustrated. But can we run two foreach loops simultaneously?
Thanks, as always, for any comments, suggestions and of course the generous code sharing :)
This works for any number of files:
use strict;
use warnings;
use autodie;
my #handles = map { open my $h, '<', $_; $h } #ARGV;
while (#handles){
#handles = grep { ! eof $_ } #handles;
my #lines = map { my $v = <$_>; chomp $v; $v } #handles;
print join(' ', #lines), "\n";
}
close $_ for #handles;
The most elegant way doesn't involve perl at all:
paste -d' ' file1 file2
If I were a golfing man, I could rewrite #FM's answer as:
($,,$\)=(' ',"\n");#_=#ARGV;open $_,$_ for #_;print
map{chomp($a=<$_>);$a} #_=grep{!eof $_} #_ while #_
which you might be able to turn into a one-liner but that is just evil. ;-)
Well, here it is, under 100 characters:
C:\Temp> perl -le "$,=' ';#_=#ARGV;open $_,$_ for #_;print map{chomp($a =<$_>);$a} #_=grep{!eof $_ }#_ while #_" file1 file2
If it is OK to slurp (and why the heck not — we are looking for different ways), I think I have discovered the path the insanity:
#_=#ARGV;chomp($x[$.-1]{$ARGV}=$_) && eof
and $.=0 while<>;print "#$_{#_}\n" for #x
C:\Temp> perl -e "#_=#ARGV;chomp($x[$.-1]{$ARGV}=$_) && eof and $.=0 while<>;print qq{#$_{#_}\n} for #x" file1 file2
Output:
bye bye chao
hello hola
thank you gracias
An easier alternative to your Code 5 which allows for an arbitrary number of lines and does not care if files have different numbers of lines (hat tip #FM):
#!/usr/bin/perl
use strict; use warnings;
use File::Slurp;
use List::AllUtils qw( each_arrayref );
my #lines = map [ read_file $_ ], #ARGV;
my $it = each_arrayref #lines;
while ( my #lines = grep { defined and chomp and length } $it->() ) {
print join(' ', #lines), "\n";
}
And, without using any external modules:
#!perl
use autodie; use warnings; use strict;
my ($file1, $file2) = #ARGV;
open my $file1_h,'<', $file1;
my #file1 = grep { chomp; length } <$file1_h>;
open my $file2_h,'<', $file2;
my #file2 = grep { chomp; length } <$file2_h>;
my $n_lines = #file1 > #file2 ? #file1 : #file2;
for my $i (0 .. $n_lines - 1) {
my ($line1, $line2) = map {
defined $_ ? $_ : ''
} $file1[$i], $file2[$i];
print $line1, ' ', $line2, "\n";
}
If you want to concatenate only the lines that appear in both files:
#!perl
use autodie; use warnings; use strict;
my ($file1, $file2) = #ARGV;
open my $file1_h,'<', $file1;
my #file1 = grep { chomp; length } <$file1_h>;
open my $file2_h,'<', $file2;
my #file2 = grep { chomp; length } <$file2_h>;
my $n_lines = #file1 < #file2 ? #file1 : #file2;
for my $i (0 .. $n_lines - 1) {
print $file1[$i], ' ', $file2[$i], "\n";
}
An easy one with minimal error checking:
#!/usr/bin/perl -w
use strict;
open FILE1, '<file1.txt';
open FILE2, '<file2.txt';
while (defined(my $one = <FILE1>) or defined(my $twotemp = <FILE2>)){
my $two = $twotemp ? $twotemp : <FILE2>;
chomp $one if ($one);
chomp $two if ($two);
print ''.($one ? "$one " : '').($two ? $two : '')."\n";
}
And no, you can't run two loops simultaneous within the same thread, you'd have to fork, but that would not be guaranteed to run synchronously.