error 9 Bad file descriptor error using sockets in python - sockets

I am trying to implement a very basic code of client server in python using non blocking sockets. I have made two threads for reading and writing.
My client code is below.
import sys
import socket
from time import sleep
from _thread import *
import threading
global s
def writeThread():
while True:
data = str(input('Please input the data you want to send to client 2 ( to end connection type end ) : '))
data = bytes(data, 'utf8')
print('You are trying to send : ', data)
s.sendall(data)
def readThread():
while True:
try:
msg = s.recv(4096)
except socket.timeout as e:
sleep(1)
print('recv timed out, retry later')
continue
except socket.error as e:
# Something else happened, handle error, exit, etc.
print(e)
sys.exit(1)
else:
if len(msg) == 0:
print('orderly shutdown on server end')
sys.exit(0)
else:
# got a message do something :)
print('Message is : ', msg)
if __name__ == '__main__':
global s
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('',6188))
s.settimeout(2)
wThread = threading.Thread(None,writeThread)
rThread = threading.Thread(None,readThread)
wThread.start()
rThread.start()
s.close()
Question:
I know this can be implemented through select module too but I would like to know how to do it this way.

Your main thread creates the socket, then creates thread1 and thread2. Then it closes the socket (and exits because the program ends after that). So that when thread1 and thread2 try to use it, it's no longer open. Hence EBADF (Bad file descriptor error).
Your main thread should not close the socket while the other threads are still running. It could wait for them to end:
[...]
s.settimeout(2)
wThread = threading.Thread(None,writeThread)
rThread = threading.Thread(None,readThread)
wThread.start()
rThread.start()
wThread.join()
rThread.join()
s.close()
However, since the main thread has nothing better to do than wait, it might be better to create only one additional thread (say rThread), then have the main thread take over the task currently being performed by the other. I.e.
[...]
s.settimeout(2)
rThread = threading.Thread(None,readThread)
rThread.start()
writeThread()

Related

Unable to capture MQTT log callback

I am having trouble getting the on_log callback to trigger. I have used it on other programs w/o problems but this one is being difficult. I have included the relevant code snippets (I hope!). All other call backs are working fine. This program isn't threaded (except MQTT.start) so there aren't any other actions. Any suggestions where to look would be appreciated. fwiw, the problem I'm trying to track down is that MQTT stops responding after a few hours. The MQTT server is on a separate server, is used by numerous other processes and has no known issues.
# Set up MQTT - wait until we have an ipaddr so we know the network has been started
logger.debug("Waiting for ip address to be assigned")
while True:
ipaddr = get_local_IP()
if ipaddr is not None:
logger.info('IP address is {}'.format(ipaddr))
break
sleep(2.0)
logger.debug("Waiting for MQTT broker connection")
mqttc = mqtt.Client()
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
while True:
try:
mqttc.connect("192.168.0.18", 1884, 30)
except IOError as e:
if e.errno != errno.ENETUNREACH:
raise
logger.warning('Network error - retrying')
sleep(15)
continue
logger.debug('Connect initiated without error')
break
mqttc.loop_start()
mqttc.on_log = on_log
while not MQ_link:
sleep(1)
def on_connect(mqttc, obj, flags, rc):
global MQ_link
logger.debug("Connected: rc = " + str(rc))
if rc == 0:
MQ_link = True
def on_log(mqttc, obj, level, string):
verb = string.split('(').strip()
if verb[0] not in ['Sending PINGREQ', 'Received PINGRESP']:
logger.debug('LOG: ' + string)

tornado Periodiccallback and socket operations inside callback

I am trying to make a non-blocking web-application which uses Tornado.
That application uses PeriodicCallback as a scheduler for grabbing the data from news sites:
for nc_uuid in self.LIVE_NEWSCOLLECTORS.keys():
self.LIVE_NEWSCOLLECTORS[nc_uuid].agreggator,ioloop=args
period=int(self.LIVE_NEWSCOLLECTORS[nc_uuid].period)*60
if self.timer is not None: period = int(self.timer)
#self.scheduler.add_job(func=self.LIVE_NEWSCOLLECTORS[nc_uuid].getNews,args=[self.source,i],trigger='interval',seconds=10,id=nc_uuid)
task = tornado.ioloop.PeriodicCallback(lambda:self.LIVE_NEWSCOLLECTORS[nc_uuid].getNews(self.source,i),1000*10,ioloop)
task.start()
'getData' which is calling as a callback has an async http request that parses and sent data to TCPServer for analyzing by calling method process_responce:
#gen.coroutine
def process_response(self,*args,**kwargs):
buf = {'sentence':str('text here')}
data_string = json.dumps(buf)
s.send(data_string)
while True:
try:
data = s.recv(100000)
if not data:
print "connection closed"
s.close()
break
else:
print "Received %d bytes: '%s'" % (len(data), data)
# s.close()
break
except socket.error, e:
if e.args[0] == errno.EWOULDBLOCK:
print 'error',errno.EWOULDBLOCK
time.sleep(1) # short delay, no tight loops
else:
print e
break
i+=1
Inside process_response I use basic example for non-blocking socket operations.
Process_response shows something like this:
error 10035
error 10035
Received 75 bytes: '{"mode": 1, "keyword": "\u0435\u0432\u0440\u043e", "sentence": "text here"}'
That looks normal behavior. But when recieving data the main IOLoop are being locked! If I would ask webserver it wouldn`t return my anydata until periodiccallback task finishes...
Where is my mistake?
time.sleep() is a blocking function and must never be used in non-blocking code. Use yield gen.sleep() instead.
Also consider using tornado.iostream.IOStream instead of raw socket operations.

Rust persistent TcpStream

I seem to be struggling with the std::io::TcpStream. I'm actually trying to open a TCP connection with another system but the below code emulates the problem exactly.
I have a Tcp server that simply writes "Hello World" to the TcpStream upon opening and then loops to keep the connection open.
fn main() {
let listener = io::TcpListener::bind("127.0.0.1", 8080);
let mut acceptor = listener.listen();
for stream in acceptor.incoming() {
match stream {
Err(_) => { /* connection failed */ }
Ok(stream) => spawn(proc() {
handle(stream);
})
}
}
drop(acceptor);
}
fn handle(mut stream: io::TcpStream) {
stream.write(b"Hello Connection");
loop {}
}
All the client does is attempt to read a single byte from the connection and print it.
fn main() {
let mut socket = io::TcpStream::connect("127.0.0.1", 8080).unwrap();
loop {
match socket.read_byte() {
Ok(i) => print!("{}", i),
Err(e) => {
println!("Error: {}", e);
break
}
}
}
}
Now the problem is my client remains blocked on the read until I kill the server or close the TCP connection. This is not what I want, I need to open a TCP connection for a very long time and send messages back and forth between client and server. What am I misunderstanding here? I have the exact same problem with the real system i'm communicating with - I only become unblocked once I kill the connection.
Unfortunately, Rust does not have any facility for asynchronous I/O now. There are some attempts to rectify the situation, but they are far from complete yet. That is, there is a desire to make truly asynchronous I/O possible (proposals include selecting over I/O sources and channels at the same time, which would allow waking tasks which are blocked inside an I/O operation via an event over a channel, though it is not clear how this should be implemented on all supported platforms), but there's still a lot to do and there's nothing really usable now, as far as I'm aware.
You can emulate this to some extent with timeouts, however. This is far from the best solution, but it works. It could look like this (simplified example from my code base):
let mut socket = UdpSocket::bind(address).unwrap();
let mut buf = [0u8, ..MAX_BUF_LEN];
loop {
socket.set_read_timeout(Some(5000));
match socket.recv_from(buf) {
Ok((amt, src)) => { /* handle successful read */ }
Err(ref e) if e.kind == TimedOut => {} // continue
Err(e) => fail!("error receiving data: {}", e) // bail out
}
// do other work, check exit flags, for example
}
Here recv_from will return IoError with kind set to TimedOut if there is no data available on the socket during 5 seconds inside recv_from call. You need to reset the timeout before inside each loop iteration since it is more like a "deadline" than a timeout - when it expires, all calls will start to fail with timeout error.
This is definitely not the way it should be done, but Rust currently does not provide anything better. At least it does its work.
Update
There is now an attempt to create an asynchronous event loop and network I/O based on it. It is called mio. It probably can be a good temporary (or even permanent, who knows) solution for asynchronous I/O.

python's asyncio and sharing socket among worker processes

Is it possible to share a socket amongst several worker processes using python's asyncio module?
Below is an example code that starts a server listening on port 2000. When a connection is established, and the client sends the string "S", the server starts sending data to the client. But all this happens only on one cpu core. How could I rewrite this example to take advantage of all the cpu cores? I took a look at asyncio subprocess module, but am not sure if I can use it to share the socket so that the server can simultaneously accept connections from multiple worker processes in parallel.
import asyncio
import datetime
clients = []
class MyServerProtocol(asyncio.Protocol):
def connection_made(self, transport):
self.transport = transport
self.peername = transport.get_extra_info("peername")
print("connection_made: {}".format(self.peername))
clients.append(self)
#asyncio.coroutine
def send_data_stream(self):
while True:
yield from asyncio.sleep(3)
if self in clients:
self.transport.write("{} {}\r\n".format('Endless stream of information', str(datetime.datetime.now())).encode())
print("sent data to: {}".format(self.peername))
else:
break
def data_received(self, data):
print("data_received: {}".format(data.decode()))
received = data.decode()
if received == "S":
asyncio.Task(self.send_data_stream())
def connection_lost(self, ex):
print("connection_lost: {}".format(self.peername))
clients.remove(self)
if __name__ == '__main__':
print("starting up..")
loop = asyncio.get_event_loop()
asyncio.set_event_loop(loop)
coro = loop.create_server(MyServerProtocol, port=2000)
server = loop.run_until_complete(coro)
for socket in server.sockets:
print("serving on {}".format(socket.getsockname()))
loop.run_forever()

How to use ZeroMQ in an GTK/QT/Clutter application?

In gtk applications all execution is taking place inside the gtk_main function. And other graphical frame works have similar event loops like app.exec for QT and clutter_main for Clutter. However ZeroMQ is based on the assumption that there is an while (1) ... loop that it is inserted into (see for instance here for examples).
How do you combine those two execution strategies?
I am currently wanting to use zeromq in a clutter application written in C, so I would of course like direct answers to that, but please add answers for other variants as well.
The proper way to combine zmq and gtk or clutter is to connect the file-descriptor of the zmq queue to the main event loop. The fd can be retrieved by using
int fd;
size_t sizeof_fd = sizeof(fd);
if(zmq_getsockopt(socket, ZMQ_FD, &fd, &sizeof_fd))
perror("retrieving zmq fd");
Connecting it to the main loop is the matter of using io_add_watch:
GIOChannel* channel = g_io_channel_unix_new(fd);
g_io_add_watch(channel, G_IO_IN|G_IO_ERR|G_IO_HUP, callback_func, NULL);
In the callback function, it is necessary to first check if there is really stuff to read, before reading. Otherwise, the function might block waiting for IO.
gboolean callback_func(GIOChannel *source, GIOCondition condition,gpointer data)
{
uint32_t status;
size_t sizeof_status = sizeof(status);
while (1){
if (zmq_getsockopt(socket, ZMQ_EVENTS, &status, &sizeof_status)) {
perror("retrieving event status");
return 0; // this just removes the callback, but probably
// different error handling should be implemented
}
if (status & ZMQ_POLLIN == 0) {
break;
}
// retrieve one message here
}
return 1; // keep the callback active
}
Please note: this is not actually tested, I did a translation from Python+Clutter, which is what I use, but I'm pretty sure that it'll work.
For reference, below is full Python+Clutter code which actually works.
import sys
from gi.repository import Clutter, GObject
import zmq
def Stage():
"A Stage with a red spinning rectangle"
stage = Clutter.Stage()
stage.set_size(400, 400)
rect = Clutter.Rectangle()
color = Clutter.Color()
color.from_string('red')
rect.set_color(color)
rect.set_size(100, 100)
rect.set_position(150, 150)
timeline = Clutter.Timeline.new(3000)
timeline.set_loop(True)
alpha = Clutter.Alpha.new_full(timeline, Clutter.AnimationMode.EASE_IN_OUT_SINE)
rotate_behaviour = Clutter.BehaviourRotate.new(
alpha,
Clutter.RotateAxis.Z_AXIS,
Clutter.RotateDirection.CW,
0.0, 359.0)
rotate_behaviour.apply(rect)
timeline.start()
stage.add_actor(rect)
stage.show_all()
stage.connect('destroy', lambda stage: Clutter.main_quit())
return stage, rotate_behaviour
def Socket(address):
ctx = zmq.Context()
sock = ctx.socket(zmq.SUB)
sock.setsockopt(zmq.SUBSCRIBE, "")
sock.connect(address)
return sock
def zmq_callback(queue, condition, sock):
print 'zmq_callback', queue, condition, sock
while sock.getsockopt(zmq.EVENTS) & zmq.POLLIN:
observed = sock.recv()
print observed
return True
def main():
res, args = Clutter.init(sys.argv)
if res != Clutter.InitError.SUCCESS:
return 1
stage, rotate_behaviour = Stage()
sock = Socket(sys.argv[2])
zmq_fd = sock.getsockopt(zmq.FD)
GObject.io_add_watch(zmq_fd,
GObject.IO_IN|GObject.IO_ERR|GObject.IO_HUP,
zmq_callback, sock)
return Clutter.main()
if __name__ == '__main__':
sys.exit(main())
It sounds like the ZeroMQ code wants simply to be executed over and over again as often as possible. The simplest way is to put the ZeroMQ code into an idle function or timeout function, and use non-blocking versions of the functions if they exist.
For Clutter, you would use clutter_threads_add_idle() or clutter_threads_add_timeout(). For GTK, you would use g_idle_add() or g_timeout_add().
The more difficult, but possibly better, way is to create a separate thread for the ZeroMQ code using g_thread_create(), and just use the while(1) construction with blocking functions as they suggest. If you do that, you will also have to find some way for the threads to communicate with each other - GLib's mutexes and async queues usually do fine.
I found that there is a QT integration library called Zeromqt. Looking at the source, the core of the integration is the following:
ZmqSocket::ZmqSocket(int type, QObject *parent) : QObject(parent)
{
...
notifier_ = new QSocketNotifier(fd, QSocketNotifier::Read, this);
connect(notifier_, SIGNAL(activated(int)), this, SLOT(activity()));
}
...
void ZmqSocket::activity()
{
uint32_t flags;
size_t size = sizeof(flags);
if(!getOpt(ZMQ_EVENTS, &flags, &size)) {
qWarning("Error reading ZMQ_EVENTS in ZMQSocket::activity");
return;
}
if(flags & ZMQ_POLLIN) {
emit readyRead();
}
if(flags & ZMQ_POLLOUT) {
emit readyWrite();
}
...
}
Hence, it is relying on QT's integrated socket handling and Clutter will not have something similar.
You can get a file descriptor for 0MQ socket (ZMQ_FD option) and integrate that with your event loop. I presume gtk has some mechanism for handling sockets.
This an example in Python, using the PyQt4. It's derived from a working application.
import zmq
from PyQt4 import QtCore, QtGui
class QZmqSocketNotifier( QtCore.QSocketNotifier ):
""" Provides Qt event notifier for ZMQ socket events """
def __init__( self, zmq_sock, event_type, parent=None ):
"""
Parameters:
----------
zmq_sock : zmq.Socket
The ZMQ socket to listen on. Must already be connected or bound to a socket address.
event_type : QtSocketNotifier.Type
Event type to listen for, as described in documentation for QtSocketNotifier.
"""
super( QZmqSocketNotifier, self ).__init__( zmq_sock.getsockopt(zmq.FD), event_type, parent )
class Server(QtGui.QFrame):
def __init__(self, topics, port, mainwindow, parent=None):
super(Server, self).__init__(parent)
self._PORT = port
# Create notifier to handle ZMQ socket events coming from client
self._zmq_context = zmq.Context()
self._zmq_sock = self._zmq_context.socket( zmq.SUB )
self._zmq_sock.bind( "tcp://*:" + self._PORT )
for topic in topics:
self._zmq_sock.setsockopt( zmq.SUBSCRIBE, topic )
self._zmq_notifier = QZmqSocketNotifier( self._zmq_sock, QtCore.QSocketNotifier.Read )
# connect signals and slots
self._zmq_notifier.activated.connect( self._onZmqMsgRecv )
mainwindow.quit.connect( self._onQuit )
#QtCore.pyqtSlot()
def _onZmqMsgRecv():
self._test_info_notifier.setEnabled(False)
# Verify that there's data in the stream
sock_status = self._zmq_sock.getsockopt( zmq.EVENTS )
if sock_status == zmq.POLLIN:
msg = self._zmq_sock.recv_multipart()
topic = msg[0]
callback = self._topic_map[ topic ]
callback( msg )
self._zmq_notifier.setEnabled(True)
self._zmq_sock.getsockopt(zmq.EVENTS)
def _onQuit(self):
self._zmq_notifier.activated.disconnect( self._onZmqMsgRecv )
self._zmq_notifier.setEnabled(False)
del self._zmq_notifier
self._zmq_context.destroy(0)
Disabling and then re-enabling the notifier in _on_ZmqMsgRecv is per the documentation for QSocketNotifier.
The final call to getsockopt is for some reason necessary. Otherwise, the notifier stops working after the first event. I was actually going to post a new question for this. Does anyone know why this is needed?
Note that if you don't destroy the notifier before the ZMQ context, you'll probably get an error like this when you quit the application:
QSocketNotifier: Invalid socket 16 and type 'Read', disabling...