Using select with multiple sockets - sockets

I am using lwip with one thread (based on FreeRTOS, where I only want one task). There, I have 2 sockets listening for incoming connections. Therefore, I want to use select to wait for incoming new connections and also I don't want to block while waiting. Therefore, after creating both sockets, binding and listening I go to my infinitive loop:
#define TOTAL_MASTERS 2
fd_set current_sockets, ready_sockets;
struct timeval timeout;,
int master1_fd, master2_fd;
struct sockaddr_in address;
// Master 1
if ((master1_fd = lwip_socket(AF_INET, SOCK_STREAM, 0)) < 0) {
xil_printf("TCP server: Error creating Socket\r\n");
return;
}
// Set up to wait for subscribers
memset(&address, 0, sizeof(address));
address.sin_family = AF_INET;
address.sin_port = htons(port1);
address.sin_addr.s_addr = INADDR_ANY;
if (n=bind(master1_fd, (struct sockaddr *)&address, sizeof (address)) < 0) {
port = port11;
xil_printf("[err: %d] TCP server: Unable to bind to port %d\r\n",n, ((port&0xFF)<<8) | ((port>>8)&0xFF));
close(sock);
return;
}
if (n=listen(master1_fd, 1) < 0) {
xil_printf("[err: %d] TCP server: tcp_listen failed\r\n", n);
close(master1_fd);
return;
}
// Master 2
if ((master2_fd = lwip_socket(AF_INET, SOCK_STREAM, 0)) < 0) {
xil_printf("TCP server: Error creating Socket\r\n");
return;
}
// Set up to wait for subscribers
memset(&address, 0, sizeof(address));
address.sin_family = AF_INET;
address.sin_port = htons(port2);
address.sin_addr.s_addr = INADDR_ANY;
if (n=bind(master2_fd, (struct sockaddr *)&address, sizeof (address)) < 0) {
port = port22;
xil_printf("[err: %d] TCP server: Unable to bind to port %d\r\n",n, ((port&0xFF)<<8) | ((port>>8)&0xFF));
close(sock);
return;
}
if (n=listen(master2_fd, 1) < 0) {
xil_printf("[err: %d] TCP server: tcp_listen failed\r\n", n);
close(master2_fd);
return;
}
FD_ZERO(&current_sockets);
FD_SET(master1_fd, &current_sockets);
FD_SET(master2_fd, &current_sockets);
timeout.tv_sec = 0;
timeout.tv_usec = 5000; // 5ms for timeout
while(1)
{
FD_ZERO(&ready_sockets);
ready_sockets = current_sockets;
if(sret = select(TOTAL_PUBLISHERS, &current_sockets, NULL, NULL, &timeout) == 0)
{
//timeout
//xil_printf("Select time out: %d\n", sret);
}
else
{
xil_printf("Something happened: %d\n", sret);
for(int i=0; i<TOTAL_MASTERS; i++){
if(FD_ISSET(i, &ready_sockets)){
if(i == publisher_FPGA_ROS_mymsg.socket_to_wait_for_subscribers_fd){
if ((new_sd = lwip_accept(master1_fd, (struct sockaddr *)&remote, (socklen_t *)&size)) > 0){
if ((read_bytes = lwip_recvfrom(new_sd, message, ARR_SIZE, 0, NULL, NULL)) > 0) {
xil_printf("New client on master 1:\n%s", message);
}
}
}
if(i == publisher_FPGA_ROS_geometry_msgs_point.socket_to_wait_for_subscribers_fd){
if ((new_sd = lwip_accept(master2_fd, (struct sockaddr *)&remote, (socklen_t *)&size)) > 0){
if ((read_bytes = lwip_recvfrom(new_sd, message, ARR_SIZE, 0, NULL, NULL)) > 0) {
xil_printf("New client on master 2:\n%s", message);
}
}
}
}
}
}
}
The problem that I have is that with a timeout it does not react to incoming new clients but always times out. If I change &timeout in selectto NULL, then I get incoming connections but only on master1.
Is it possible to wait for incoming connections on more than one socket using only one thread or task?
Thanks for the help.

There are a number of mistakes in your code.
You are mixing lwip and C socket functions. Use only lwip functions for consistency.
Your bind(), listen(), and select() expressions are missing required parenthesis, since < and == have a higher precedence than =. You are using parenthesis correctly on the lwip_socket(), lwip_accept() and lwip_recvfrom() expressions, though. But really, it is generally considered bad practice to perform assignment and comparison in the same expression, you really should break those up into separate expressions.
You are passing the wrong max descriptor value to the 1st parameter of select(). It needs to be +1 more than the highest socket descriptor you are select()'ing.
You are passing your master list current_sockets to the 2nd parameter of select() instead of passing your ready_sockets copy. So current_sockets is being modified and may not contain all of the listening sockets anymore on the next call to select(). After a few calls, it is likely to end up completely empty.
Your FD_ISSET() check is wrong, too. You are checking file descriptors 0 and 1, which are not the listening sockets you created. You don't need the for loop to check master1_fd and master2_fd, you can pass them as-is to FD_ISSET().
With that said, try this instead:
fd_set current_sockets, ready_sockets;
struct timeval timeout;,
int master1_fd, master2_fd;
struct sockaddr_in address, remote;
socklen_t size;
// Master 1
master1_fd = lwip_socket(AF_INET, SOCK_STREAM, 0);
if (master1_fd < 0) {
xil_printf("[err: %d] TCP server: Error creating Socket\r\n", errno);
return;
}
// Set up to wait for subscribers
memset(&address, 0, sizeof(address));
address.sin_family = AF_INET;
address.sin_port = htons(port1);
address.sin_addr.s_addr = INADDR_ANY;
n = lwip_bind(master1_fd, (struct sockaddr *)&address, sizeof (address));
if (n < 0) {
xil_printf("[err: %d] TCP server: Unable to bind to port %hd\r\n", errno, port1);
lwip_close(master1_fd);
return;
}
n = lwip_listen(master1_fd, 1);
if (n < 0) {
xil_printf("[err: %d] TCP server: tcp_listen failed\r\n", errno);
lwip_close(master1_fd);
return;
}
// Master 2
master2_fd = lwip_socket(AF_INET, SOCK_STREAM, 0);
if (master2_fd < 0) {
xil_printf("[err: %d] TCP server: Error creating Socket\r\n", errno);
lwip_close(master1_fd);
return;
}
// Set up to wait for subscribers
memset(&address, 0, sizeof(address));
address.sin_family = AF_INET;
address.sin_port = htons(port2);
address.sin_addr.s_addr = INADDR_ANY;
n = lwip_bind(master2_fd, (struct sockaddr *)&address, sizeof (address));
if (n < 0) {
xil_printf("[err: %d] TCP server: Unable to bind to port %hd\r\n", errno, port2);
lwip_close(master2_fd);
lwip_close(master1_fd);
return;
}
n = lwip_listen(master2_fd, 1);
if (n < 0) {
xil_printf("[err: %d] TCP server: tcp_listen failed\r\n", errno);
lwip_close(master2_fd);
lwip_close(master1_fd);
return;
}
FD_ZERO(&current_sockets);
FD_SET(master1_fd, &current_sockets);
FD_SET(master2_fd, &current_sockets);
int max_fd;
if (master1_fd > master2_fd)
max_fd = master1_fd;
else
max_fd = master2_fd;
while (1)
{
FD_ZERO(&ready_sockets);
ready_sockets = current_sockets;
timeout.tv_sec = 0;
timeout.tv_usec = 5000; // 5ms for timeout
sret = lwip_select(max_fd+1, &ready_sockets, NULL, NULL, &timeout);
if (sret < 0)
{
//error
//xil_printf("Select error: %d\n", errno);
}
else if (sret == 0)
{
//timeout
//xil_printf("Select time out\n");
}
else
{
xil_printf("Something happened\n");
if (FD_ISSET(master1_fd, &ready_sockets)){
size = sizeof (remote);
new_sd = lwip_accept(master1_fd, (struct sockaddr *)&remote, &size)
if (new_sd > 0){
read_bytes = lwip_recv(new_sd, message, ARR_SIZE, 0);
if (read_bytes > 0) {
xil_printf("New client on master 1:\n%.*s", read_bytes, message);
}
}
}
if (FD_ISSET(master2_fd, &ready_sockets)){
size = sizeof (remote);
new_sd = lwip_accept(master2_fd, (struct sockaddr *)&remote, &size);
if (new_sd > 0){
read_bytes = lwip_recv(new_sd, message, ARR_SIZE, 0);
if (read_bytes > 0) {
xil_printf("New client on master 2:\n%.*s", read_bytes, message);
}
}
}
}
}
However, you still have a big logic hole in your code. You are leaking client sockets, as you never close() the socket descriptors that lwip_accept() returns. And you need to select() the accepted sockets to know when they have data available to be read.
So try something more like this instead:
fd_set ready_sockets;
struct timeval timeout;
int master1_fd, master2_fd, max_fd, curr_fd, i;
int[] sockets; // <-- PSEUDO-CODE!!
struct sockaddr_in address, remote;
socklen_t size;
// Master 1
master1_fd = lwip_socket(AF_INET, SOCK_STREAM, 0);
if (master1_fd < 0) {
xil_printf("[err: %d] TCP server: Error creating Socket\r\n", errno);
return;
}
// Set up to wait for subscribers
memset(&address, 0, sizeof(address));
address.sin_family = AF_INET;
address.sin_port = htons(port1);
address.sin_addr.s_addr = INADDR_ANY;
n = lwip_bind(master1_fd, (struct sockaddr *)&address, sizeof (address));
if (n < 0) {
xil_printf("[err: %d] TCP server: Unable to bind to port %hd\r\n", errno, port1);
lwip_close(master1_fd);
return;
}
n = lwip_listen(master1_fd, 1);
if (n < 0) {
xil_printf("[err: %d] TCP server: tcp_listen failed\r\n", errno);
lwip_close(master1_fd);
return;
}
// Master 2
master2_fd = lwip_socket(AF_INET, SOCK_STREAM, 0);
if (master2_fd < 0) {
xil_printf("[err: %d] TCP server: Error creating Socket\r\n", errno);
lwip_close(master1_fd);
return;
}
// Set up to wait for subscribers
memset(&address, 0, sizeof(address));
address.sin_family = AF_INET;
address.sin_port = htons(port2);
address.sin_addr.s_addr = INADDR_ANY;
n = lwip_bind(master2_fd, (struct sockaddr *)&address, sizeof (address));
if (n < 0) {
xil_printf("[err: %d] TCP server: Unable to bind to port %hd\r\n", errno, port2);
lwip_close(master2_fd);
lwip_close(master1_fd);
return;
}
n = lwip_listen(master2_fd, 1);
if (n < 0) {
xil_printf("[err: %d] TCP server: tcp_listen failed\r\n", errno);
lwip_close(master2_fd);
lwip_close(master1_fd);
return;
}
sockets.add(master1_fd); // <-- PSEUDO-CODE!!!
sockets.add(master2_fd); // <-- PSEUDO-CODE!!!
while (1)
{
max_fd = -1;
for (int i = 0; i < sockets.length; ++i){ // <-- PSEUDO-CODE!!!
curr_fd = sockets[i];
FD_SET(curr_fd, &ready_sockets);
if (curr_fd > max_fd){
max_fd = curr_fd;
}
}
timeout.tv_sec = 0;
timeout.tv_usec = 5000; // 5ms for timeout
sret = lwip_select(max_fd+1, &ready_sockets, NULL, NULL, &timeout);
if (sret < 0)
{
//error
//xil_printf("Select error: %d\n", errno);
}
else if (sret == 0)
{
//timeout
//xil_printf("Select time out\n");
}
else
{
xil_printf("Something happened\n");
i = 0;
while (i < sockets.length){ // <-- PSEUDO-CODE!!!
curr_fd = sockets[i];
if (FD_ISSET(curr_fd, &ready_sockets)){
if ((curr_fd == master1_fd) || (curr_fd == master2_fd)){
size = sizeof (remote);
new_sd = lwip_accept(curr_fd, (struct sockaddr *)&remote, &size)
if (new_sd > 0){
xil_printf("New client on master %d:\n", (curr_fd == master1_fd) ? 1 : 2);
sockets.add(new_sd); // <-- PSEUDO-CODE!!!
}
}
else{
read_bytes = lwip_recv(curr_fd, message, ARR_SIZE, 0);
if (read_bytes > 0) {
xil_printf("%.*s", read_bytes, message);
}
else {
lwip_close(curr_fd);
sockets.remove(i); // <-- PSEUDO-CODE!!!
continue;
}
}
}
++i;
}
}
}
for(i = 0; i < sockets.length; ++i){ // <-- PSEUDO-CODE!!!
lwip_close(sockets[i]);
}
Regarding the PSEUDO-CODE portions of the above code, you did not indicate whether you are using C or C++. In C++, you could simple use a std::vector for the sockets list. But in C, you would have to decide whether you want to use a fixed array with a max capacity or use a dynamically sized array. I didn't really feel like writing a bunch of extra code to manage the list. I leave that as an exercise for you to figure out. How you store the sockets outside of the fd_set being select()'ed is not important to this topic.

Related

TCP connection fails at recv() function

I'm a newbie in socket programming.
I'm trying to interface in TCP between server and client by following a certain protocol.
After the connection is established, the communication starts by client sending a certain packet to the server. My program fails at the recv() function at client side.
Let me share you the client side of my code:
iResult = WSAStartup(MAKEWORD(2, 2), &wsaData);
if (iResult != 0)
{
printf("WSAStartup failed with error: %d\n", iResult);
return 1;
}
ZeroMemory(&hints, sizeof(hints));
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
hints.ai_protocol = IPPROTO_TCP;
iResult = getaddrinfo(LOCAL_IP, "8081", &hints, &result);
if (iResult != 0)
{
printf("getaddrinfo failed with error: %d\n", iResult);
WSACleanup();
return 1;
}
// Attempt to connect to an address until one succeeds
for (ptr = result; ptr != NULL; ptr = ptr->ai_next)
{
// Create a SOCKET for connecting to server
ConnectSocket = socket(ptr->ai_family, ptr->ai_socktype,
ptr->ai_protocol);
if (ConnectSocket == INVALID_SOCKET)
{
printf("socket failed with error: %ld\n", WSAGetLastError());
WSACleanup();
return 1;
}
// Connect to server.
iResult = connect(ConnectSocket, ptr->ai_addr, (int)ptr->ai_addrlen);
if (iResult == SOCKET_ERROR)
{
closesocket(ConnectSocket);
ConnectSocket = INVALID_SOCKET;
continue;
}
break;
}
MSG_PDDAU_INFO_PACKET* ps8ptr = new MSG_PDDAU_INFO_PACKET;
memset(ps8bufrecv, 0, DEFAULT_BUFLEN);
s32packetlen = sizeof(MSG_PDDAU_INFO_PACKET);
printf("size %d\n", sizeof(MSG_PDDAU_INFO_PACKET));
iResult = recv(ConnectSocket, ps8bufrecv, s32packetlen, 0);
if (iResult == -1)
{
printf("Here !\n");
fprintf(stderr, "recv Error Occurred %s (%d)\n", strerror(errno), errno);
return -1;
}
memcpy(ps8ptr,ps8bufrecv,sizeof(MSG_PDDAU_INFO_PACKET));
And here is the server side of the code:
nErrorStatus = WSAStartup(wVersionRequested, &wsaData);
serv_sock = socket(PF_INET, SOCK_STREAM, 0);
if(serv_sock == -1)
{
printf( "socket() Error..Error --> Code %d ", WSAGetLastError() );
error_handling("socket error");
}
memset(&serv_addr, 0, sizeof(serv_addr));
serv_addr.sin_family = PF_INET;
serv_addr.sin_addr.s_addr = inet_addr(ip);
serv_addr.sin_port = htons(CLIENT_PORT_NUMBER);
if(bind(serv_sock, (struct sockaddr*) &serv_addr, sizeof(serv_addr)) == -1)
{
printf( "bind() Error..Error --> Code %d ", WSAGetLastError() );
error_handling("bind error");
}
if(listen(serv_sock, 5) == -1)
{
printf( "listen() Error..Error --> Code %d ", WSAGetLastError() );
error_handling("listen error");
}
clnt_addr_size = sizeof(clnt_addr);
clnt_sock = accept(serv_sock, (struct sockaddr*)&clnt_addr, &clnt_addr_size);
if(clnt_sock == -1)
{
printf( "accept() Error..Error --> Code %d ", WSAGetLastError() );
error_handling("accept error");
}
MSG_PDDAU_INFO_PACKET* ps8ptr = new MSG_PDDAU_INFO_PACKET;
ps8ptr->header.msg_id = MSG_PDDAU_INFO_ID;
ps8ptr->header.msg_type = MSG_REQUEST_TYPE;
ps8ptr->time_enable = 0x01;
s32packetlen = sizeof(MSG_PDDAU_INFO_PACKET);
memset(ps8bufsend, 0, DEFAULT_BUFLEN);
memcpy(ps8bufsend,ps8ptr, s32packetlen);
iResult = send(clnt_sock, ps8bufsend, s32packetlen, 0);
if (iResult == -1)
{
fprintf(stderr, "send Error Occurred %s (%d)\n", strerror(errno), errno);
return -1;
}
The code keeps failing at the first recv/send interface after the connection, and the error message looks like below.
Server side:
Time msg id : 5
Time setting failed
Client side:
recv Error Occurred Invalid argument (22)
I don't know what's the correct way to communicate in my case.
It'd be great if anyone can point out my problem here.

Unable to connect to local server in VPN

I have tried below code in multiple iOS, MacOS.
This is server code
void *run_server(void *thread_id) {
int server_fd, new_socket;
struct sockaddr_in server, client;
int opt = 1;
int addrlen = sizeof(server);
// Creating socket file descriptor
if ((server_fd = socket(AF_INET, SOCK_STREAM, 0)) == 0) {
perror("socket");
return NULL;
}
// Forcefully attaching socket to the port
if (setsockopt(server_fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt))) {
perror("setsockopt");
return NULL;
}
server.sin_family = AF_INET;
server.sin_addr.s_addr = ip("10.10.10.20"); // bind IP
server.sin_port = htons(27042);
// Forcefully attaching socket to the port
if (bind(server_fd, (struct sockaddr *)&server, sizeof(server)) < 0) {
perror("bind");
return NULL;
}
if (listen(server_fd, 3) < 0) {
perror("listen");
return NULL;
}
printf("Server is running with %u:%d\n", server.sin_addr.s_addr, htons(server.sin_port));
while (1) {
if ((new_socket = accept(server_fd, (struct sockaddr *)&client, (socklen_t*)&addrlen))<0) {
perror("accept");
return NULL;
}
printf("Client connected with %u:%d\n", client.sin_addr.s_addr, client.sin_port);
}
}
This is client code
void scan(unsigned int ip, int port) {
int sock = 0;
struct sockaddr_in serv_addr;
if ((sock = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
printf("\nSocket creation error\n");
return;
}
serv_addr.sin_family = AF_INET;
serv_addr.sin_port = htons(port);
serv_addr.sin_addr.s_addr = ip;
printf("IP %u, port %d\n", ip, port);
int connected = connect(sock, (struct sockaddr *)&serv_addr, sizeof(serv_addr));
if (connected != 0) {
char errorMsg[256];
strerror_r(errno, errorMsg, 256);
printf("Error %s\n", errorMsg);
}
if (connected < 0) {
close(sock);
return;
}
}
void *run_scan(void *thread_id) {
while (1) {
scan(ip("10.10.10.20"), 27042);
usleep(1000000);
}
return NULL;
}
And the main function
int main(int argc, const char * argv[]) {
pthread_t thread1;
if (pthread_create(&thread1, NULL, run_server, (void *)1)) {
printf("Can't create thread run_server");
}
pthread_t thread2;
if (pthread_create(&thread2, NULL, run_scan, (void *)2)) {
printf("Can't create thread run_scan");
}
while (1) sleep(1);
return 0;
}
Problem description:
This code is working normally with local IP (DHCP assigned IP). However when I connect to VPN and bind server IP to PPP network interface IP which is 10.10.10.20 (as above example code). I could not be able to open connect to that IP. The error is "Connection timeout". Note, the other host can still be able to connect to the server with IP 10.10.10.20.
Appreciate if any knowledge sharing regarding this situation.
Found the answer, basically this is a restriction imposed by the Linux kernel.
More info: https://www.softether.org/4-docs/1-manual/B._Troubleshooting_and_Supplemental/11.1_Troubleshooting

Why is the client's file descriptor used while calling the recv and send funtions on both server and client sides?

TCPServer
#include<stdio.h>
#include<sys/types.h>
#include<sys/socket.h>
#include<netinet/ip.h>
#include<netinet/in.h>
#include<string.h>
#include<stdlib.h>
int main()
{
int fd = socket(AF_INET, SOCK_STREAM, 0);
if(fd == -1)
{
printf("socket failed!\n");
exit(0);
}
printf("Enter port: ");
int port;
scanf("%d",&port);
struct sockaddr_in server;
server.sin_family = AF_INET;
server.sin_port = htons(port);
server.sin_addr.s_addr = INADDR_ANY;
int bind_ret = bind(fd, (struct sockaddr*)(&server), sizeof(server));
if(bind_ret == -1)
{
printf("bind failed!\n");
exit(0);
}
int listen_ret = listen(fd, 10);
if(listen_ret == -1)
{
printf("listen failed!\n");
exit(0);
}
struct sockaddr_in client;
int l = sizeof(client);
int client_fd = accept(fd, (struct sockaddr*)(&client), &l);
if(client_fd == -1)
{
printf("accept failed!\n");
exit(0);
}
while(1)
{
char msg_recv[50];
int recv_ret = recv(client_fd, msg_recv, sizeof(msg_recv),0);
if(recv_ret == -1)
{
printf("recv failed!\n");
exit(0);
}
msg_recv[recv_ret]='\0';
if(strcmp("bye",msg_recv)==0)
{
exit(0);
}
printf("Message recieved: %s\n",msg_recv);
char msg_send[50];
printf("Enter message: ");
scanf(" %s",msg_send);
int send_ret = send(client_fd, msg_send, strlen(msg_send),0);
if(send_ret == 0)
{
printf("send failed!\n");
}
if(strcmp("bye",msg_send) == 0)
exit(0);
}
}
TCPClient
#include<stdio.h>
#include<sys/socket.h>
#include<sys/types.h>
#include<netinet/ip.h>
#include<netinet/in.h>
#include<string.h>
#include<stdlib.h>
int main()
{ int fd = socket(AF_INET, SOCK_STREAM, 0);
if(fd == -1)
{
printf("socket failed!\n");
exit(0);
}
int port;
printf("Enter port number: ");
scanf("%d",&port);
struct sockaddr_in client;
client.sin_family = AF_INET;
client.sin_port = htons(port);
client.sin_addr.s_addr = INADDR_ANY;
int connect_ret = connect(fd, (struct sockaddr*)(&client), sizeof(client));
if(connect_ret == -1)
{
printf("connect failed!\n");
exit(0);
}
while(1)
{
printf("Enter message: ");
char msg_send[50];
scanf("%s",msg_send);
int send_ret = send(fd, msg_send, strlen(msg_send), 0);
if(send_ret == -1)
{
printf("send failed!\n");
exit(0);
}
if(strcmp("bye", msg_send)==0)
{
exit(0);
}
char msg_recv[50];
int recv_ret = recv(fd, msg_recv, sizeof(msg_recv), 0);
if(recv_ret == -1)
{
printf("recv failed!\n");
exit(0);
}
msg_recv[recv_ret]= '\0';
if(strcmp("bye", msg_recv) == 0)
exit(0);
printf("Message recieved: %s \n",msg_recv);
}
}
In the above program for Server, recv and send were called by passing client_fd as the argument, while in the program for Client, recv and send were called by passing fd as the argument. I wanted to know why on the server side we did not use its own socket file descriptor like we did on the client side?
The server’s fd descriptor is a listen()’ing socket. It can’t perform any I/O, only receive incoming client connections. accept() pulls a pending client connection from fd’s queue and returns a new socket descriptor that can perform I/O with that client.
The client’s fd descriptor is a connect()‘ing socket. It can perform I/O with the server once its connection has been accepted.

In socket programming using C language, how to set time limit on server for accepting client to establish connection?

I'm developing an application which involves 1 client and 1 server.
I want the server to listen for only 5 seconds for a connection. If the client is not attempting to establish a connection then the sever should stop listening and return an error message. If the client attempts to establish a connection then the server should accept the connection.
The server is listening forever if the client is not making an attempt to establish a connection. I want the server to listen for only 5 seconds, how can this be achieved?
This is the server-side output - server is waiting for client forever:
void reception(){
int sockfd, connfd, len;
struct sockaddr_in servaddr, cli;
sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (sockfd == -1) {
printf("socket creation failed...\n");
exit(0);
}
else
printf("Socket successfully created..\n");
bzero(&servaddr, sizeof(servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_addr.s_addr = htonl(INADDR_ANY);
servaddr.sin_port = htons(PORT);
if ((bind(sockfd, (SA*)&servaddr, sizeof(servaddr))) != 0) {
printf("socket bind failed...\n");
exit(0);
}
else
printf("Socket successfully binded..\n");
if ((listen(sockfd, 5)) != 0) {
printf("Listen failed...\n");
exit(0);
}
else
printf("Server listening..\n");
len = sizeof(cli);
connfd = accept(sockfd, (SA*)&cli, &len);
/*
Some function should be added at this point to
stop the server from listening after 5 seconds
*/
if (connfd < 0) {
printf("server acccept failed...\n");
exit(0);
}
else
printf("server acccept the client...\n");
receive(connfd);
close(sockfd);
}
You are using the listening socket in blocking mode, so accept() will block the calling thread until a client connects. To use a timeout, call select() or (e)poll() first to wait for the listening socket to enter a readable state indicating a connection is pending BEFORE you then call accept().
For example:
...
fd_set rds;
FD_ZERO(&rds);
FD_SET(sockfd, &rds);
struct timeval timeout;
timeout.tv_sec = 5;
timeout.tv_usec = 0;
int res;
if ((res = select(sockfd+1, &rds, NULL, NULL, &timeout)) <= 0)
{
if (res < 0)
printf("select failed...\n");
else
printf("Time out...\n");
exit(0);
}
connfd = accept(sockfd, (SA*)&cli, &len);
...
...
struct pollfd pfd;
pfd.fd = sockfd;
pfd.events = POLLIN;
pfd.revents = 0;
int res;
if ((res = poll(&pfd, 1, 5000)) <= 0)
{
if (res < 0)
printf("poll failed...\n");
else
printf("Time out...\n");
exit(0);
}
connfd = accept(sockfd, (SA*)&cli, &len);
...
int epfd = epoll_create(1);
if (epfd < 0)
{
printf("epoll_create failed...\n");
exit(0);
}
struct epoll_event ev;
ev.events = EPOLLIN;
ev.data.fd = sockfd;
if (epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &ev) < 0)
{
printf("epoll_ctl failed...\n");
exit(0);
}
struct epoll_event event;
int res;
if ((res = epoll_wait(epfd, &event, 1, 5000)) <= 0)
{
if (res < 0)
printf("epoll_ctl failed...\n");
else
printf("Time out...\n");
exit(0);
}
close(epfd);
connfd = accept(sockfd, (SA*)&cli, &len);
...
Either way, note that there is a very small race condition created by these scenarios. If the client connects and then disconnects before accept() is called, accept() may block waiting for a new pending connection, or it may return a valid file descriptor that fails on subsequent I/O calls. There is no guarantee one way or the other.
To solve the race condition, you can either:
put the listening socket into non-blocking mode. If select() reports the listening socket has a pending connection, but accept() is not able to accept it right away, then you can act accordingly.
use accept() on a listening socket in blocking mode, as you originally were, but use alert() to schedule accept() to be interrupted if it doesn't exit before 5 seconds have elapsed. See this answer for an example.

Why does bind return the same ephemeral port?

I have a problem where I create two UDP sockets, bind them to the loopback address with port 0 (requesting the stack to assign an ephemeral port). My understanding is that both sockets should be on different ports. In the code example below, both sockets are reported to be on the same IP address and port.
#include <stdio.h>
#include <arpa/inet.h>
int main(int, char**)
{
int fd1 = ::socket(AF_INET, SOCK_DGRAM, 0);
if (fd1 < 0)
{
perror("fd1 socket()");
return -1;
}
int fd2 = ::socket(AF_INET, SOCK_DGRAM, 0);
if (fd2 < 0)
{
perror("fd2 socket()");
return -1;
}
// Set SO_REUSEADDR for both sockets
int reuse = 1;
if (::setsockopt(fd1, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(reuse)) < 0)
{
perror("fd1 SO_REUSEADDR failed");
return -1;
}
if (::setsockopt(fd2, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(reuse)) < 0)
{
perror("fd2 SO_REUSEADDR failed");
return -1;
}
sockaddr_storage storage;
socklen_t addrlen = sizeof(storage);
sockaddr_in& addr = reinterpret_cast<sockaddr_in&>(storage);
addr.sin_family = AF_INET;
addr.sin_port = 1234;
addr.sin_port = 0;
if (::inet_pton(AF_INET, "127.0.0.1", &addr.sin_addr) <= 0)
{
perror("Failed to create address 127.0.0.1");
return -1;
}
sockaddr* pAddr = reinterpret_cast<sockaddr*>(&storage);
if (::bind(fd1, pAddr, addrlen) < 0)
{
perror("bind fd1 failed");
return -1;
}
// Get the local address for fd1
addrlen = sizeof(storage);
if (::getsockname(fd1, pAddr, &addrlen))
{
perror("getsockname for fd1 failed");
return -1;
}
char straddr[INET_ADDRSTRLEN];
if (!inet_ntop(AF_INET, &addr.sin_addr, straddr, sizeof(straddr)))
{
perror("inet_ntop for fd1 failed");
return -1;
}
printf("fd1=%d addr=%s:%d\n", fd1, straddr, addr.sin_port);
if (::bind(fd2, pAddr, addrlen) < 0)
{
perror("bind fd2 failed");
return -1;
}
// Get the local address for fd2
addrlen = sizeof(storage);
if (::getsockname(fd2, pAddr, &addrlen))
{
perror("getsockname for fd2 failed");
return -1;
}
if (!inet_ntop(AF_INET, &addr.sin_addr, straddr, sizeof(straddr)))
{
perror("inet_ntop for fd2 failed");
return -1;
}
printf("fd2=%d addr=%s:%d\n", fd2, straddr, addr.sin_port);
return 0;
}
This code gives the following output ...
fd1=4 addr=127.0.0.1:1933
fd2=5 addr=127.0.0.1:1933
I need both sockets on the same (local) IP address, but different ports. Can anyone explain why both sockets share the same port? Can anyone suggest a fix?
That is the expected behavior for SO_REUSEADDR on a UDP socket. Remove that setting to return to normal allocation rules.