ForkingPickler: TypeError: cannot pickle 'memoryview' object - sockets

I am trying to send and receive pickled versions of a random value generated by the producer. I am using the Multiprocess(not '-ing') and ForkingPickler module to to pickle and qeueue the generated value. However upon running the sample program below, i get the below error. Basis for using ForkingPickler is to pickle socket objects in future. I am now testing out with a sample version. Is this a feasible way to go about pickling socket objects?
rv = reduce(self.proto)
TypeError: cannot pickle 'memoryview' object
def producer(queue):
print('Producer: Running', flush=True)
# generate work
for i in range(10):
# generate a value
value = random()
# block
sleep(value)
# add to the queue
fork_value = ForkingPickler.dumps(value)
queue.put(fork_value)
# all done
queue.put(None)
print(f'Queue Size Consumer: {queue.qsize()}', flush=True)
print('Producer: Done', flush=True)
# consume work
def consumer(queue):
print('Consumer: Running', flush=True)
# consume work
while True:
print(f'Queue Size Consumer: {queue.qsize()}', flush=True)
# get a unit of work
fork_value = queue.get()
item = ForkingPickler.loads(fork_value)
# check for stop
if item is None:
break
# report
print(f'>got {item}', flush=True)
# all done
print('Consumer: Done', flush=True)
# entry point
if __name__ == '__main__':
# create the shared queue
queue = JoinableQueue()
# start the consumer
consumer_process = Process(target=consumer, args=(queue,))
consumer_process.start()
# start the producer
producer_process = Process(target=producer, args=(queue,))
producer_process.start()
# wait for all processes to finish
consumer_process.join()
producer_process.join()

Related

Qgis Plugin QgsMapToolEmitPoint QgsMapToolIdentifyFeature initialization

I'm working on a custom plugin in Qgis 3.22.3 Qt 5.15.2. My goal is to have a tool run when I click on a button in my dock widget that can either identify or select features from a layer so I can pass those attributes to a database function. I have tried creating subclasses of both QgsMapToolEmitPoint and QgsMapToolIdentifyFeature and the init method does get called but the button 'does nothing'. Slightly modifying my code to create a subclass of "QgsMapTool" does in fact start the tool and responds to logging from canvasPress events, etc. I'm still new to Qgis and figuring out how everything plays together, so I'm not sure if I can return features from QgsMaptool or not. I would love it if someone could help offer some suggestions or guidance because I haven't been able to find clarity, and any examples I've tried modifying seem like standalone scripts that are run directly from Qgis. Here is where my buttons connect(which gets called in init_gui), the subclass definition, and the function that gets called on button push. Code is very messy because of all the different iterations I've tried, but left it all here in case I have the right pieces. Again any leads would be helpful.
def connect_the_buttons(self):
# # get spots
b = self.dock.select_spots_tool
b.setEnabled(True)
b.setCheckable(True)
b.clicked.connect(self.getSpots)
def getSpots(self, checked):
if checked:
spot_id_List.clear()
t = self.selectSpots(self.iface)
#t = gs.selectSpots(self.iface)
self.iface.mapCanvas().setMapTool(t)
QApplication.setOverrideCursor(Qt.CrossCursor)
spots = QgsProject.instance().mapLayersByName('spots')[0]
self.iface.setActiveLayer(spots)
tType = type(t)
log(f'what is t? >> {type(t)}')
log(f'{tType}')
log(f'type({t})')
log(f'{self.selectSpots}')
else:
log('not checked')
QApplication.restoreOverrideCursor()
spot_id_List.clear()
class selectSpots(QgsMapToolEmitPoint):
#canvasClicked = pyqtSignal('QgsPointXY')
# def __new__(cls, *args, **kwargs):
# return super(selectSpots, cls).__new__(cls, *args, **kwargs)
def __init__(self, canvas):
log('selectSpots __init__ function is running...')
self.canvas = self.iface.mapCanvas()
#QgsMapTool.__init__(self, canvas)
#self.layer = self.iface.activeLayer()
#self.iface.canvasClicked.connect(self.id_spots)
#c = iface.mapCanvas()
#activeSpots = iface.activeLayer()
QgsMapToolEmitPoint.__init__(self, self.canvas)
#self.iface.layer().connect(self.getLayerId)
#self.iface.currentLayerChanged.connect(self.active_changed)
#QApplication.setOverrideCursor(Qt.CrossCursor)
# # def getLayerId(self):
# # log('connected_to_click layer event')
# # # def id_spots(self):
# # # log('this happend when i clicked')
def activate(self):
log('The activate method was called.')
def canvasDoubleClickEvent(self, e):
log('Double Clicked')
log(f'double click event: {e}')
# # def active_changed(self, layer):
# # activeSpots.removeSelection()
# # spot_id_List.clear()
def canvasReleaseEvent(self, event):
log('canvas release event recorded')
def canvasPressEvent(self, event):
log('canvas press event recorded')
point = event.mapPoint()
self.canvasClicked.emit(point)
log(f'point: {point}')
# # log(f'event: {event}')
# # #self.handle_spot_selection()
# # log(f'on press: spot_id_List: {spot_id_List}')
# # # selectedLayer = self.iface.activeLayer()
# # # selection = selectedLayer.selectedFeatures()
# # #self.iface.actionSelect().trigger()
# # for f in selection:
# # spot_id_List.append(f.attribute('id'))
# # log(f'after press: spot_id_List: {spot_id_List}')

Remove trailing bits from hex pyModBus

I want to built a function that sends a request from ModBus to serial in hex. I more o less have a working function but have two issues.
Issue 1
[b'\x06', b'\x1c', b'\x00!', b'\r', b'\x1e', b'\x1d\xd3', b'\r', b'\n', b'\x1e', b'\x1d']
I cant remove this part b'\r', b'\n', using the .split('\r \n') method since It's not a string.
Issue 2
When getting a value from holding register 40 (33) and i try to use the .to_bytes() method I keep getting b'\x00!', b'\r' and I'm expecting b'\x21'
r = client.read_holding_registers(40)
re = r.registers[0]
req = re.to_bytes(2, 'big')
My functions to generate my request and to send trough pyserial.
def scanned_code():
code = client.read_holding_registers(0)
# code2= client.re
r = code.registers[0]
return r
def send_request(data):
""" Takes input from create_request() and sends data to serial port"""
try:
for i in range(data):
serial_client.write(data[i])
# serial_client.writelines(data[i])
except:
print('no se pudo enviar el paquete <<<--------------------')
def create_request(job):
""" Request type is 33 looks for job
[06]
[1c]
req=33[0d][0a]
job=30925[0d][0a][1e]
[1d]
"""
r = client.read_holding_registers(40)
re = r.registers[0]
req = re.to_bytes(2, 'big')
num = job.to_bytes(2, 'big')
data = [
b'\x06',
b'\x1C',
req,
b'\x0D',
b'\x1E',
num,
b'\x0D',
b'\x0A',
b'\x1E',
b'\x1D'
]
print(data)
while True:
# verify order_trigger() is True.
while order_trigger() != False:
print('inside while loop')
# set flag coil back to 0
reset_trigger()
# get Job no.
job = scanned_code()
# check for JOB No. dif. than 0
if job != 0:
print(scanned_code())
send_request(create_request(job))
# send job request to host to get job data
# send_request()
# if TRUE send job request by serial to DVI client
# get job request data
# translate job request data to modbus
# send data to plc
else:
print(' no scanned code')
break
time.sleep(INTERNAL_SLEEP_TIME)
print('outside loop')
time.sleep(EXTERNAL_SLEEP_TIME)
As an additional question is this the proper way of doing things?

airflow http callback sensor

Our airflow implementation sends out http requests to get services to do tasks. We want those services to let airflow know when they complete their task, so we are sending a callback url to the service which they will call when their task is complete. I can't seem to find a callback sensor, however. How do people handle this normally?
There is no such thing as a callback or webhook sensor in Airflow. The sensor definition follows as taken from the documentation:
Sensors are a certain type of operator that will keep running until a certain criterion is met. Examples include a specific file landing in HDFS or S3, a partition appearing in Hive, or a specific time of the day. Sensors are derived from BaseSensorOperator and run a poke method at a specified poke_interval until it returns True.
This means that a sensor is an operator that performs polling behavior on external systems. In that sense, your external services should have a way of keeping state for each executed task - either internally or externally - so that a polling sensor can check on that state.
This way you can use for example the airflow.operators.HttpSensor that polls an HTTP endpoint until a condition is met. Or even better, write your own custom sensor that gives you the opportunity to do more complex processing and keep state.
Otherwise, if the service outputs data in a storage system you can use a sensor that polls a database for example. I believe you get the idea.
I'm attaching a custom operator example that I've written for integrating with the Apache Livy API. The sensor does two things: a) submits a Spark job through the REST API and b) waits for the job to be completed.
The operator extends the SimpleHttpOperator and at the same time implements the HttpSensor thus combining both functionalities.
class LivyBatchOperator(SimpleHttpOperator):
"""
Submits a new Spark batch job through
the Apache Livy REST API.
"""
template_fields = ('args',)
ui_color = '#f4a460'
#apply_defaults
def __init__(self,
name,
className,
file,
executorMemory='1g',
driverMemory='512m',
driverCores=1,
executorCores=1,
numExecutors=1,
args=[],
conf={},
timeout=120,
http_conn_id='apache_livy',
*arguments, **kwargs):
"""
If xcom_push is True, response of an HTTP request will also
be pushed to an XCom.
"""
super(LivyBatchOperator, self).__init__(
endpoint='batches', *arguments, **kwargs)
self.http_conn_id = http_conn_id
self.method = 'POST'
self.endpoint = 'batches'
self.name = name
self.className = className
self.file = file
self.executorMemory = executorMemory
self.driverMemory = driverMemory
self.driverCores = driverCores
self.executorCores = executorCores
self.numExecutors = numExecutors
self.args = args
self.conf = conf
self.timeout = timeout
self.poke_interval = 10
def execute(self, context):
"""
Executes the task
"""
payload = {
"name": self.name,
"className": self.className,
"executorMemory": self.executorMemory,
"driverMemory": self.driverMemory,
"driverCores": self.driverCores,
"executorCores": self.executorCores,
"numExecutors": self.numExecutors,
"file": self.file,
"args": self.args,
"conf": self.conf
}
print payload
headers = {
'X-Requested-By': 'airflow',
'Content-Type': 'application/json'
}
http = HttpHook(self.method, http_conn_id=self.http_conn_id)
self.log.info("Submitting batch through Apache Livy API")
response = http.run(self.endpoint,
json.dumps(payload),
headers,
self.extra_options)
# parse the JSON response
obj = json.loads(response.content)
# get the new batch Id
self.batch_id = obj['id']
log.info('Batch successfully submitted with Id %s', self.batch_id)
# start polling the batch status
started_at = datetime.utcnow()
while not self.poke(context):
if (datetime.utcnow() - started_at).total_seconds() > self.timeout:
raise AirflowSensorTimeout('Snap. Time is OUT.')
sleep(self.poke_interval)
self.log.info("Batch %s has finished", self.batch_id)
def poke(self, context):
'''
Function that the sensors defined while deriving this class should
override.
'''
http = HttpHook(method='GET', http_conn_id=self.http_conn_id)
self.log.info("Calling Apache Livy API to get batch status")
# call the API endpoint
endpoint = 'batches/' + str(self.batch_id)
response = http.run(endpoint)
# parse the JSON response
obj = json.loads(response.content)
# get the current state of the batch
state = obj['state']
# check the batch state
if (state == 'starting') or (state == 'running'):
# if state is 'starting' or 'running'
# signal a new polling cycle
self.log.info('Batch %s has not finished yet (%s)',
self.batch_id, state)
return False
elif state == 'success':
# if state is 'success' exit
return True
else:
# for all other states
# raise an exception and
# terminate the task
raise AirflowException(
'Batch ' + str(self.batch_id) + ' failed (' + state + ')')
Hope this will help you a bit.

An error in my code to be a simple ftp

I met an error when running codes at the bottom. It's like a simple ftp.
I use python2.6.6 and CentOS release 6.8
In most linux server, it gets right results like this:(I'm very sorry that I have just sign up and couldn't )
Clinet:
[root#Test ftp]# python client.py
path:put|/home/aaa.txt
Server:
[root#Test ftp]# python server.py
connected...
pre_data:put|aaa.txt|4
cmd: put
file_name: aaa.txt
file_size: 4
upload successed.
But I get errors in some server(such as my own VM in my PC). I have done lots of tests(python2.6/python2.7, Centos6.5/Centos6.7) and found this error is not because them. Here is the error imformation:
[root#Lewis-VM ftp]# python server.py
connected...
pre_data:put|aaa.txt|7sdfsdf ###Here gets the wrong result, "sdfsdf" is the content of /home/aaa.txt and it shouldn't be sent here to 'file_size' and so it cause the "ValueError" below
cmd: put
file_name: aaa.txt
file_size: 7sdfsdf
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 10699)
Traceback (most recent call last):
File "/usr/lib64/python2.6/SocketServer.py", line 570, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib64/python2.6/SocketServer.py", line 332, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/usr/lib64/python2.6/SocketServer.py", line 627, in __init__
self.handle()
File "server.py", line 30, in handle
if int(file_size)>recv_size:
ValueError: invalid literal for int() with base 10: '7sdfsdf\n'
What's more, I found that if I insert a time.sleep(1) between sk.send(cmd+"|"+file_name+'|'+str(file_size)) and sk.send(data) in client.py, the error will disappear. I have said that I did tests in different system and python versions and the error is not because them. So I guess that is it because of some system configs? I have check about socket.send() and socket.recv() in python.org but fount nothing helpful. So could somebody help me to explain why this happend?
The code are here:
#!/usr/bin/env python
#coding:utf-8
################
#This is server#
################
import SocketServer
import os
class MyServer(SocketServer.BaseRequestHandler):
def handle(self):
base_path = '/home/ftp/file'
conn = self.request
print 'connected...'
while True:
#####receive pre_data: we should get data like 'put|/home/aaa|7'
pre_data = conn.recv(1024)
print 'pre_data:' + pre_data
cmd,file_name,file_size = pre_data.split('|')
print 'cmd: ' + cmd
print 'file_name: '+ file_name
print 'file_size: '+ file_size
recv_size = 0
file_dir = os.path.join(base_path,file_name)
f = file(file_dir,'wb')
Flag = True
####receive 1024bytes each time
while Flag:
if int(file_size)>recv_size:
data = conn.recv(1024)
recv_size+=len(data)
else:
recv_size = 0
Flag = False
continue
f.write(data)
print 'upload successed.'
f.close()
instance = SocketServer.ThreadingTCPServer(('127.0.0.1',9999),MyServer)
instance.serve_forever()
#!/usr/bin/env python
#coding:utf-8
################
#This is client#
################
import socket
import sys
import os
ip_port = ('127.0.0.1',9999)
sk = socket.socket()
sk.connect(ip_port)
while True:
input = raw_input('path:')
#####we should input like 'put|/home/aaa.txt'
cmd,path = input.split('|')
file_name = os.path.basename(path)
file_size=os.stat(path).st_size
sk.send(cmd+"|"+file_name+'|'+str(file_size))
send_size = 0
f= file(path,'rb')
Flag = True
#####read 1024 bytes and send it to server each time
while Flag:
if send_size + 1024 >file_size:
data = f.read(file_size-send_size)
Flag = False
else:
data = f.read(1024)
send_size+=1024
sk.send(data)
f.close()
sk.close()
The TCP is a stream of data. That is the problem. TCP do not need to keep message boundaries. So when a client calls something like
connection.send("0123456789")
connection.send("ABCDEFGHIJ")
then a naive server like
while True;
data = conn.recv(1024)
print data + "_"
may print any of:
0123456789_ABCDEFGHIJ_
0123456789ABCDEFGHIJ_
0_1_2_3_4_5_6_7_8_9_A_B_C_D_E_F_G_H_I_J_
The server has no chance to recognize how many sends client called because the TCP stack at client side just inserted data to a stream and the server must be able to process the data received in different number of buffers than the client used.
Your server must contain a logic to separate the header and the data. All of application protocols based on TCP use a mechanism to identify application level boundaries. For example HTTP separates headers and body by an empty line and it informs about the body length in a separate header.
Your program works correctly when server receives a header with the command, name and size in a separate buffer it it fails when client is fast enough and push the data into stream quickly and the server reads header and data in one chunk.

gstreamer-1.0 on Raspberry Pi: cannot decode H.264 stream

I'm trying to run a gstreamer-1.0 python script (see below, works fine on an ubuntu laptop) on a Raspberry Pi. However, it seems to be unable to decode the stream:
0:00:11.237415476 9605 0xafb0cc60 ERROR vaapidecode ../../../gst/vaapi/gstvaapidecode.c:1025:gst_vaapidecode_ensure_allowed_caps: failed to retrieve VA display
0:00:11.239490439 9605 0xafb0cc60 WARN decodebin gstdecodebin2.c:2087:connect_pad:<decodebin0> Link failed on pad vaapidecode0:sink
0:00:11.244097356 9605 0xafb0cc60 WARN uridecodebin gsturidecodebin.c:939:unknown_type_cb:<decoder> warning: No decoder available for type 'video/x-h264, stream-format=(string)byte-stream, alignment=(string)nal, width=(int)426, height=(int)240, framerate=(fraction)30/1, parsed=(boolean)true, pixel-aspect-ratio=(fraction)1/1, level=(string)2.1, profile=(string)main'.
I searched for information about the error (the results didn't enlighten me) and the warnings but couldn't really find much advice other than to install gstreamer1.0-libav which had already been installed. Consequently the decoder should be available.
What might be wrong here and how do I fix it?
This is the script:
#!/usr/bin/env python
# GST_DEBUG=3,python:5,gnl*:5 python 01_parsepipeline.py http://www.ustream.tv/channel/17074538 worst novideo.png
from __future__ import print_function
import sys
import gi
from gi.repository import GObject as gobject, Gst as gst
from livestreamer import Livestreamer, StreamError, PluginError, NoPluginError
import cv2
import numpy
def exit(msg):
print(msg, file=sys.stderr)
sys.exit()
class Player(object):
def __init__(self):
self.fd = None
self.mainloop = gobject.MainLoop()
# This creates a playbin pipeline and using the appsrc source
# we can feed it our stream data
self.pipeline = gst.parse_launch('uridecodebin uri=appsrc:// name=decoder \
decoder. ! videorate ! video/x-raw,framerate=1/1 ! tee name=t \
t. ! queue ! videoconvert ! video/x-raw,format=RGB ! appsink name=appsink \
decoder. ! queue ! audioconvert ! fakesink')
if self.pipeline is None:
exit("couldn't build pipeline")
decoder = self.pipeline.get_by_name('decoder')
if decoder is None:
exit("couldn't get decoder")
decoder.connect("source-setup", self.on_source_setup)
vsink = self.pipeline.get_by_name('appsink')
if vsink is None:
exit("couldn't get sink")
vsink.set_property("emit-signals", True)
vsink.set_property("max-buffers", 1)
vsink.connect("new-sample", self.on_new_sample)
# Creates a bus and set callbacks to receive errors
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.connect("message::eos", self.on_eos)
self.bus.connect("message::error", self.on_error)
def on_new_sample(self, sink):
sample = sink.emit("pull-sample")
buf = sample.get_buffer()
caps = sample.get_caps()
height = caps.get_structure(0).get_value('height')
width = caps.get_structure(0).get_value('width')
(result, mapinfo) = buf.map(gst.MapFlags.READ)
if result == True:
arr = numpy.ndarray(
(height,
width,
3),
buffer=buf.extract_dup(0, buf.get_size()),
dtype=numpy.uint8)
resized_refimage = cv2.resize(refArray, (width, height))
diff = cv2.norm(arr, resized_refimage, cv2.NORM_L2)
buf.unmap(mapinfo)
s = "diff = " + str(diff)
print(s)
return gst.FlowReturn.OK
def exit(self, msg):
self.stop()
exit(msg)
def stop(self):
# Stop playback and exit mainloop
self.pipeline.set_state(gst.State.NULL)
self.mainloop.quit()
# Close the stream
if self.fd:
self.fd.close()
def play(self, stream):
# Attempt to open the stream
try:
self.fd = stream.open()
except StreamError as err:
self.exit("Failed to open stream: {0}".format(err))
# Start playback
self.pipeline.set_state(gst.State.PLAYING)
self.mainloop.run()
def on_source_setup(self, element, source):
# When this callback is called the appsrc expects
# us to feed it more data
print("source setup")
source.connect("need-data", self.on_source_need_data)
print("done")
def on_pad_added(self, element, pad):
string = pad.query_caps(None).to_string()
print(string)
if string.startswith('video/'):
#type = pad.get_caps()[0].get_name()
#print(type)
#if type.startswith("video"):
pad.link(self.vconverter.get_static_pad("sink"))
def on_source_need_data(self, source, length):
# Attempt to read data from the stream
try:
data = self.fd.read(length)
except IOError as err:
self.exit("Failed to read data from stream: {0}".format(err))
# If data is empty it's the end of stream
if not data:
source.emit("end-of-stream")
return
# Convert the Python bytes into a GStreamer Buffer
# and then push it to the appsrc
buf = gst.Buffer.new_wrapped(data)
source.emit("push-buffer", buf)
#print("sent " + str(length) + " bytes")
def on_eos(self, bus, msg):
# Stop playback on end of stream
self.stop()
def on_error(self, bus, msg):
# Print error message and exit on error
error = msg.parse_error()[1]
self.exit(error)
def main():
if len(sys.argv) < 4:
exit("Usage: {0} <url> <quality> <reference png image path>".format(sys.argv[0]))
# Initialize and check GStreamer version
gi.require_version("Gst", "1.0")
gobject.threads_init()
gst.init(None)
# Collect arguments
url = sys.argv[1]
quality = sys.argv[2]
refImage = sys.argv[3]
global refArray
image = cv2.imread(refImage)
refArray = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# refArray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
refArray = cv2.blur(refArray, (3,3))
# Create the Livestreamer session
livestreamer = Livestreamer()
# Enable logging
livestreamer.set_loglevel("debug")
livestreamer.set_logoutput(sys.stdout)
# Attempt to fetch streams
try:
streams = livestreamer.streams(url)
except NoPluginError:
exit("Livestreamer is unable to handle the URL '{0}'".format(url))
except PluginError as err:
exit("Plugin error: {0}".format(err))
if not streams:
exit("No streams found on URL '{0}'".format(url))
# Look for specified stream
if quality not in streams:
exit("Unable to find '{0}' stream on URL '{1}'".format(quality, url))
# We found the stream
stream = streams[quality]
# Create the player and start playback
player = Player()
# Blocks until playback is done
player.play(stream)
if __name__ == "__main__":
main()
The pipeline you've set up appears to be trying to invoke a vaapi decoder to hardware-decode h264 - vaapi isn't available on the raspberry pi, since the closed source X server doesn't implement it. You may be able to use omxh264dec from the gstreamer1.0-omx package instead.
If not, you could do software h264 decoding, but that will be slower (maybe unacceptably slow on a raspberry pi).