This server works fine but if I do this
bash$ (echo -n "abcd" ;sleep 50 ; echo "efgh") | nc localhost 9090
The server blocks for 50 seconds.In my complete code I have more than one IO::Select::INET. I have another socket listen other port (1234), and I can't process anything in that port while the server is blocking by the sleep. I try change the getline by getc but I only read the first letter "a" and it blocks.
Someone can help me?
use common::sense;
use IO::Select;
use IO::Socket;
use constant PORT1 => 9090;
use constant TIMEOUT => 1;
my $event_socket = new IO::Socket::INET(Listen => 1, LocalPort => PORT1, ReuseAddr => 1)
or die "Can't bind event_socket: $#\n";
my $sel = IO::Select->new;
$sel->add($event_socket);
my $event_emiter = undef;
while(1){
foreach my $sock (my #ready = $sel->can_read(TIMEOUT)) {
if ($sock == $event_socket) {
my $new = $event_socket->accept;
binmode($new, ":encoding(UTF-8)");
$sel->add($new);
$event_emiter=$new;
warn "[event socket] connect from ",$new->peerhost, "\n";
} elsif ($sock == $event_emiter) {
unless($sock->eof){
my $recv_data = $sock->getline;
warn "[event socket] LOL '$recv_data'\n";
} else {
$sel->remove($sock);
$sock->close;
$event_emiter = undef;
warn "[socket] disconnect\n";
}
} else {
$sel->remove($sock);
$sock->close;
warn "[socket] disconnect\n";
}
}
}
Rather than reading available data, you're reading until you read a newline. Always use sysread.
Change
elsif ($sock == $event_emiter) {
unless($sock->eof){
my $recv_data = $sock->getline;
warn "[event socket] LOL '$recv_data'\n";
} else {
$sel->remove($sock);
$sock->close;
$event_emiter = undef;
warn "[socket] disconnect\n";
}
}
to
elsif ($sock == $event_emiter) {
our $buf; local *buf = \$bufs{$fh}; # alias $buf = $bufs{$fh};
my $rv = sysread($fh, $buf, 64*1024, length($buf));
if (!$rv) {
if (defined($rv)) { # EOF
# ... Handle anything left in $buf ...
} else { # Error
# ... Handle error ...
}
delete $bufs{$fh};
$sel->remove($sock);
$sock->close;
$event_emiter = undef;
warn "[socket] disconnect\n";
next;
}
while ($buf =~ s/^(.*)\n//) {
warn "[event socket] LOL '$1'\n";
}
}
And add my %bufs; outside the select loop.
It blocks here:
my $recv_data = $sock->getline;
This is because getline() is blocking call which waits for \n. Instead, you should sysread() and assemble your command in separate buffer.
Related
For some reason I am implementing some specific network protocol similar to STOMP in plain pure Perl.
The connection can be either a direct network socket, or an SSL tunnel provided by openssl s_client created by a call to open3 (no IO::Socket::SSL available on the host).
Depending on the dialog a request to the server may or may not have a response, or may have multiple responses. How can I test the file descriptors for the existence of data? Currently when no data is available, it waits until the defined timeout.
EDIT: I have probably a vocabulary issue between file handle vs. file descriptor to perform my research. I just found that eof() may help but cannot use it correctly yet.
While it is a bit complicated to provide an SCCCE, here is the interesting parts of the code:
# creation of a direct socket connection
sub connect_direct_socket {
my ($host, $port) = #_;
my $sock = new IO::Socket::INET(PeerAddr => $host,
PeerPort => $port,
Proto => 'tcp') or die "Can't connect to $host:$port\n";
$sock->autoflush(1);
say STDERR "* connected to $host port $port" if $args{verbose} || $args{debug};
return $sock, $sock, undef;
}
# for HTTPS, we are "cheating" by creating a tunnel with OpenSSL in s_client mode
my $tunnel_pid;
sub connect_ssl_tunnel {
my ($dest) = #_;
my ($host, $port);
$host = $dest->{host};
$port = $dest->{port};
my $cmd = "openssl s_client -connect ${host}:${port} -servername ${host} -quiet";# -quiet -verify_quiet -partial_chain';
$tunnel_pid = open3(*CMD_IN, *CMD_OUT, *CMD_ERR, $cmd);
say STDERR "* connected via OpenSSL to $host:$port" if $args{verbose} || $args{debug};
say STDERR "* command = $cmd" if $args{debug};
$SIG{CHLD} = sub {
print STDERR "* REAPER: status $? on ${tunnel_pid}\n" if waitpid($tunnel_pid, 0) > 0 && $args{debug};
};
return *CMD_IN, *CMD_OUT, *CMD_ERR;
}
# later
($OUT, $IN, $ERR) = connect_direct_socket($url->{host}, $url->{port});
# or
($OUT, $IN, $ERR) = connect_ssl_tunnel($url);
# then I am sending with a
print $OUT $request;
# and read the response with
my $selector = IO::Select->new();
$selector->add($IN);
FRAME:
while (my #ready = $selector->can_read($args{'max-wait'} || $def_max_wait)) {
last unless #ready;
foreach my $fh (#ready) {
if (fileno($fh) == fileno($IN)) {
my $buf_size = 1024 * 1024;
my $block = $fh->sysread(my $buf, $buf_size);
if($block){
if ($buf =~ s/^\n*([^\n].*?)\n\n//s){
# process data here
}
if ($buf =~ s/^(.*?)\000\n*//s ){
goto EOR;
# next FRAME;
} }
$selector->remove($fh) if eof($fh);
}
}
}
EOR:
EDIT 2 and epilogue
As a summary, depending in the protocol dialog
a request can have an expected response (for instance a CONNECT must return a CONNECTED)
a request to get the pending messages can return a single response, multiple responses at once (without intermediate request), or no response (and in this case the can_read() with no parameter of Ikegami is blocking, what I want to avoid).
Thanks to Ikegami I have changed my code as the following:
the timeout argument to can_read() is passed as an argument to the sub that is processing the responses
for initial connections I am passing a timeout of several seconds
when I expect instant responses I am passing a timeout of 1 second
in the process loop, after any correct response I replace the initial timeout by a 0.1 to not block if no more data is waiting in the filehandle
Here is my updated code:
sub process_stomp_response {
my $IN = shift;
my $timeout = shift;
my $resp = [];
my $buf; # allocate the buffer once and not in loop - thanks Ikegami!
my $buf_size = 1024 * 1024;
my $selector = IO::Select->new();
$selector->add($IN);
FRAME:
while (1){
my #ready = $selector->can_read($timeout);
last FRAME unless #ready; # empty array = timed-out
foreach my $fh (#ready) {
if (fileno($fh) == fileno($IN)) {
my $bytes = $fh->sysread($buf, $buf_size);
# if bytes undef -> error, if 0 -> eof, else number of read bytes
my %frame;
if (defined $bytes){
if($bytes){
if ($buf =~ s/^\n*([^\n].*?)\n\n//s){
# process frame headers here
# [...]
}
if ($buf =~ s/^(.*?)\000\n*//s ){
# process frame body here
# [...]
push #$resp, \%frame;
$timeout = 0.1; # for next read short timeout
next FRAME;
}
} else {
# EOF
$selector->remove($fh);
last FRAME;
}
} else {
# something is wrong
say STDERR "Error reading STOMP response: $!";
}
} else {
# what? not the given fh
}
}
}
return $resp;
}
Do not use eof in conjunction with select (which can_read wraps). It performs a buffered read, which breaks select.
select will mark a handle as ready for reading when it reaches EOF, and sysread returns zero on EOF. So all you need to do to detect EOF is to check for sysread returning zero.
Note that using a new buffer for every pass was a mistake sysread can easily return only part of a message. The following fixes this, and shows how to handle errors and EOF from sysread.
Globals:
my %clients_by_fd;
When you get a new connection:
$selector->add( $fh );
$clients_by_fd{ fileno( $fh ) } = {
buf => "",
# Any other info you want here.
};
Event loop:
while ( 1 ) {
my #ready = $selector->can_read();
for my $fh ( #ready ) {
my $client = $clients_by_fd{ fileno( $fh ) };
my $buf_ref = \$client->{ buf };
my $rv = sysread( $fh, $$buf_ref, 1024*1024, length( $$buf_ref ) );
if ( !$rv ) {
if ( defined( $rv ) ) {
# EOF
if ( length( $$buf_ref ) ) {
warn( "Error reading: Incomplete message\n" );
}
} else {
# Error
warn( "Error reading: $!\n" );
}
delete $clients_by_fd{ fileno( $fh ) };
$select->remove( $fh );
}
while ( $$buf_ref =~ s/^.*?\n\n//s ) {
process_message( $client, $& );
}
}
}
I've perl script for unix socket server.pl
use IO::Select;
use IO::Socket;
$lsn = new IO::Socket::INET(Listen => 512, LocalPort => 8888);
my $socket_path = '/tmp/punix.sock';
unlink($socket_path);
$SIG{PIPE} = 'IGNORE';
$|++;
$lsn = IO::Socket::UNIX->new(
Type => SOCK_STREAM,
Local => $socket_path,
Listen => 512,
) or die("Can't create server socket: $!\n");
$sel = new IO::Select( $lsn );
while(#ready = $sel->can_read) {
foreach $fh (#ready) {
if($fh == $lsn) {
# Create a new socket
$new = $lsn->accept;
$sel->add($new);
}
else {
# Process socket
my $input = <$fh>;
#........ do some work
#
# Maybe we have finished with the socket
$sel->remove($fh);
$fh->close;
}
}
}
and clients are connecting parallel to socket and getting results.
this is working fine and fastly for first few connections say 60 connections out of 100 connections, after that rest 40 connections are processed slowly like 1 per second.
server.pl seems doesn't have any leaks/issues.
what could be the reason. I've tried with Event::Lib also same issue.
Might relate to my $input = <$fh>;. That's wrong. It blocks until a newline is received. You can only safely use sysread.
our $buf; local *buf = \$bufs{$fh}; # Creates alias $buf for $bufs{$fh}
my $rv = sysread($fh, $buf, length($buf), 64*1024);
if (!defined($rv)) {
... handle error ...
next;
}
if (!$rv) {
... handle eof ...
next;
}
while ($buf =~ s/^(.*)\n//) {
my $line = $1;
...
}
I am trying to implement a simple echo client/server, over a Unix socket. (My ultimate goal is to exchange JSON data, but this example is for simplicity). I have no idea why the client process disappears into a black hole when it tries to print to the socket the second time.
server.pl :
use IO::Socket::UNIX;
my $socket_path = '/tmp/mysocket';
unlink $socket_path if -e $socket_path;
my $socket = IO::Socket::UNIX->new(
Local => $socket_path,
Type => SOCK_STREAM,
Listen => SOMAXCONN,
);
die "Can't create socket: $!" unless $socket;
while (1) {
next unless my $connection = $socket->accept;
chomp( my $line = <$connection> );
print $connection "$line\n";
}
client.pl :
use IO::Socket::UNIX;
my $socket = IO::Socket::UNIX->new(
Type => SOCK_STREAM,
Peer => '/tmp/mysocket',
);
die "Can't create socket: $!" unless $socket;
my $line;
print $socket "one\n";
chomp( $line = <$socket> );
say $line;
print $socket "two\n";
chomp( $line = <$socket> );
say $line;
say "three";
Expected output:
> ./client.pl
> one
> two
> three
Actual output:
> ./client.pl
> one
You put the $socket->accept call inside your while loop. After your server establishes a connection and receives some input from the client, the next thing it wants to do is establish a new connection.
Move the accept call outside the while loop
my $connection = $socket->accept;
$connection->autoflush(1);
while (my $line = <$connection> ) {
chomp($line);
print $connection "$line\n";
}
or, if you do want to accept more than one connection,
while (1) {
next unless my $connection = $socket->accept;
$connection->autoflush(1);
while (my $line = <$connection>) {
chomp($line);
print $connection "$line\n";
}
}
Your current solution will also likely be "suffering from buffering", so both the server and the client should set autoflush(1) on their socket handlers.
Now to handle simultaneous connections, the server would usually call fork after getting a connection, and handling that connection in a child process.
while (1) {
my $connection = $socket->accept;
if (fork() == 0) {
$connection->autoflush(1);
while (my $line = <$connection>) {
chomp($line);
print $connection "$line\n";
}
close $connection;
exit;
}
}
A complete bi-directional example for anyone looking:
Server:
#! /usr/bin/perl -w
use strict;
use IO::Socket::UNIX qw( SOCK_STREAM SOMAXCONN );
my $SOCK_PATH = '/tmp/test.sock';
unlink($SOCK_PATH) if -e $SOCK_PATH;
my $server = IO::Socket::UNIX->new(
Type => SOCK_STREAM(),
Local => $SOCK_PATH,
Listen => SOMAXCONN,
)
or die("Can't create server socket: $!\n");
while (1) {
my $connection = $server->accept;
if (fork() == 0) {
print "** New connection received **\n";
$connection->autoflush(1);
my $count = 1;
while (my $line = <$connection>) {
if ($line){
chomp($line);
$connection->print($count . ' -> ' . $line . "\n"); # Sent response back to client, \n terminates
print "Received and replied to $count '$line'\n";
$count++;
}
}
close $connection;
exit;
}
}
Client:
#!/usr/bin/perl -w
use strict;
use IO::Socket::UNIX qw( SOCK_STREAM );
my $SOCK_PATH = '/tmp/test.sock';
my $client = IO::Socket::UNIX->new(
Type => SOCK_STREAM(),
Peer => $SOCK_PATH
)
or die("Can't connect to server: $!\n");
$client->autoflush(1);
## Listen for replies
if (fork() == 0) {
while (my $line = <$client>) {
if ($line){
chomp($line);
print("Recv: '" . $line . "'\n");
}
}
}
## Send something
for my $itm ('Alpha','Beta','Gamma','Delta'){
print("Send: " . $itm . "\n");
print($client $itm . "\n") or warn("Can't send: $!\n"); # send to server, \n terminates
}
print "** Client Finished **\n";
Im trying to emulate a TCP Server on the same PC where the app is running.
I dont know if it can be done in Perl because im not very experienced.
With the code bellow the first reply is working but i dont know how to implement the second.
#!/usr/bin/perl -w
use IO::Socket::INET;
use strict;
my $socket = IO::Socket::INET->new('LocalPort' => '3000',
'Proto' => 'tcp',
'Listen' => SOMAXCONN)
or die "Can't create socket ($!)\n";
print "Server listening\n";
while (my $client = $socket->accept) {
my $name = gethostbyaddr($client->peeraddr, AF_INET);
my $port = $client->peerport;
while (<$client>) {
print "$_";
print $client "RESPONSE1";
}
close $client
or die "Can't close ($!)\n";
}
die "Can't accept socket ($!)\n";
EDIT: Thank you guys for the imput, i ended up with php done it and its working, yay!
Use Net::Server for the connection, and a variable in the sub to keep the current state ($state in this code); something like this:
package MyServer;
use base qw/Net::Server/;
use strict;
use warnings;
sub process_request {
my $self = shift;
my $state = 0;
while (<STDIN>) {
s/\r?\n$//; # like chomp but for crlf too
if ($state == 0 and $_ eq 'data1') {
print "> okay1\n";
$state++;
} elsif ($state == 1 and $_ eq 'data2') {
print "> okay2\n";
$state++;
} else {
last if $state == 2;
$state = 0;
}
}
}
my $port = shift || 3000;
MyServer->run( port => $port );
The example in the Net::Server POD suggests using an alarm to timeout connections, which might be appropriate here. The code above does the following:
$ nc localhost 3000
data1
> okay1
data2
> okay2
data3
$
And if you need to move to a forking / preforking / non-blocking / co-routine driven system, there's a Net::Server personality for that.
"ready to go" code:
package MyServer;
use base qw/Net::Server/;
use strict;
use warnings;
sub process_request {
my $self = shift;
my $state = 0;
$| = 1;
binmode *STDIN;
while (read(*STDIN, local $_, 3 )) {
if ($state == 0 and $_ eq "\x{de}\x{c0}\x{ad}") {
print "\x{c4}\x{1a}\x{20}\x{de}";
$state++;
} elsif ($state == 1 and $_ eq "\x{18}\x{c0}\x{0a}") {
print "\x{11}\x{01}\x{73}\x{93}";
$state++;
last;
}
}
}
my $port = shift || 3000;
MyServer->run( port => $port );
It seems to me process_request sub doesn't work correctly when a low port is set (in my situation, port 23). In particular only with low port, while parsing data input, the first request contains additional chars (but it's all ok with subsequent requests).
Have you a tips? Thank you
I have a TCP server in Perl (revision 5.0 version 8 subversion 0) which use this subroutine to log in a file.
sub logger {
return if ($LOGFILE eq "") ;
my ($idt, $str) = #_ ;
unless( defined($str) ) {
$str = $idt ;
$idt = '' ;
}
my ($s,$m,$h,$J,$M,$A) = localtime(time()) ;
if(!open(OUT,">>$LOGFILE")) {
warn "logger:error open [$LOGFILE]:[$!]\n";
return;
}
if (!printf OUT "%4.4d/%2.2d/%2.2d %2.2d:%2.2d:%2.2d %s %s -> %s\n",
$A+1900, $M+1, $J, $h, $m, $s, $idt, $HOSTNAME, $str) {
warn "logger:error print [$LOGFILE]:[$!]\n";
return;
}
if (!close(OUT)) {
warn "logger:error close [$LOGFILE]:[$!]\n";
return;
}
}
So there is simultaneous writing in this log file.
Exceptionally , I have this error in STDERR:
printf() on closed filehandle
logger :error print [/my/path/logFile.LOG]:[Bad File Descriptor]
without any error on open!
How can I resolve this problem?
OUT is a package global. Assuming upgrading to a real logging system such as Log4perl is out of the question, you can use a lexical file handle called $OUT:
sub logger {
$LOGFILE or return;
my ($idt, $str) = #_ ;
unless( defined($str) ) {
$str = $idt ;
$idt = '' ;
}
my ($s,$m,$h,$J,$M,$A) = localtime(time()) ;
my $OUT;
unless (open $OUT, '>>', $LOGFILE) {
warn "logger:error open [$LOGFILE]:[$!]\n";
return;
}
unless (printf $OUT
"%4.4d/%2.2d/%2.2d %2.2d:%2.2d:%2.2d %s %s -> %s\n",
$A+1900, $M+1, $J, $h, $m, $s, $idt, $HOSTNAME, $str
) {
warn "logger:error print [$LOGFILE]:[$!]\n";
return;
}
unless (close $OUT) {
warn "logger:error close [$LOGFILE]:[$!]\n";
return;
}
return 1;
}