I'm trying to code a program that loops a short gif of a sun on a 1306 oled using a raspberry pi pico. I am copying the code from a tutorial I'm using and I'm getting this error when running my code.
from machine import Pin, I2C
from ssd1306 import SSD1306_I2C
import framebuf
import time
WIDTH = 128
HEIGHT = 64
i2c = I2C(0, scl = Pin(1), sda = Pin(0), freq=400000)
display = SSD1306_I2C(WIDTH, HEIGHT, i2c)
images = []
for n in range(1, 28):
with open('/TEMP/image%s.pbm' % n, 'rb') as f: #open folder and image
f.readline() # Magic number
f.readline() # Creator comment
f.readline() # Dimensions
data = bytearra y(f.read())
fbuf = framebuf.FrameBuffer(data, 64, 64, framebuf.MONO_HLSB) #adjust accordingly the width and height
images.append(fbuf)
while True:
for i in images:
display.blit(i, 32, 0)
display.show()
time.sleep(0.01)
then I receive this error:
Traceback (most recent call last):
File "<stdin>", line 19, in <module>
MemoryError: memory allocation failed, allocating 5000 bytes
also
>>> import micropython
>>> micropython.mem_info()
stack: 556 out of 7936
GC: total: 166016, used: 11120, free: 154896
No. of 1-blocks: 158, 2-blocks: 36, max blk sz: 64, max free sz: 9614
Your device does not have enough memory to fill all images into RAM. Read each image and show it on screen one-by-one
from machine import Pin, I2C
from ssd1306 import SSD1306_I2C
import framebuf
import time
WIDTH = 128
HEIGHT = 64
i2c = I2C(0, scl = Pin(1), sda = Pin(0), freq=400000)
display = SSD1306_I2C(WIDTH, HEIGHT, i2c)
images = []
while True:
for n in range(1, 28):
with open('/TEMP/image%s.pbm' % n, 'rb') as f: #open folder and image
f.readline() # Magic number
f.readline() # Creator comment
f.readline() # Dimensions
data = bytearray(f.read()) # there was space, probably typo posting your question
fbuf = framebuf.FrameBuffer(data, 64, 64, framebuf.MONO_HLSB) #adjust accordingly the width and height
display.blit(fbuf, 32, 0)
display.show()
time.sleep(0.01)
Related
For field, research where I want to study a plant-insect interaction, I am trying to set up a PICT (Plant Insect Interactions Camera Trap). There is a very detailed description available on https://zenodo.org/record/6301001, still I am stuck.
I can excess the camera through the browser but the script won’t start.
I am an absolute beginner and I have no idea what I am doing wrong. Can anybody help get this running?
This is the script from the paper which I saved as camera.py in home/pi:
import picamera
import socket
import uuid
from datetime import datetime as dt
qual=22 # level of image quality between 1 (highest quality, largest size) and 40 (lowest quality, smallest size), with typical values 20 to 25, default is 0.
video_duration = 3600 # video duration in seconds
video_number = 1000 # number of video sequences to shoot
UID = uuid.uuid4().hex[:4].upper()+'_'+dt.now().strftime('%Y-%m-%d_%H-%M') # generate random unique ID that will be used in video filename
HostName=socket.gethostname()
with picamera.PiCamera() as camera:
camera.resolution = (1296, 972) # max fps is 42 at 1296x972
camera.framerate = 15 # recomended are 12, 15, 24, 30
camera.annotate_frame_num = True
camera.annotate_text_size = int(round(camera.resolution[0]/64))
camera.annotate_background = picamera.Color('black') # text background colour
camera.annotate_foreground = picamera.Color('white') # text colour
for filename in camera.record_sequence([
'/home/pi/record/'+HostName+'_'+UID+'_%03d.h264' % (h + 1)
for h in range(video_number)
], quality=qual):
start = dt.now() # get the current date and time
while (dt.now() - start).seconds < video_duration: # run until video_duration is reached
camera.annotate_text = HostName+', '+str(camera.framerate)+' fps, Q='+str(qual)+', '+dt.now().strftime('%Y-%m-%d %H:%M:%S') # tag the video with a custom text
camera.wait_recording(0.2) # pause the script for a short interval to save power
I am gettin the following output:
~ $ python camera.py
Traceback (most recent call last):
File "camera.py", line 23, in <module>
], quality=qual):
File "/usr/lib/python2.7/dist-packages/picamera/camera.py", line 1270, in record_sequence
camera_port, output_port = self._get_ports(True, splitter_port)
File "/usr/lib/python2.7/dist-packages/picamera/camera.py", line 559, in _get_ports
self._check_camera_open()
File "/usr/lib/python2.7/dist-packages/picamera/camera.py", line 540, in _check_camera_open
raise PiCameraClosed("Camera is closed")
picamera.exc.PiCameraClosed: Camera is closed
I want to test the explainability of a multiclass semantic segmentation model, deeplab_v3plus with shap to know which features contribute the most to semantic classification. However I have a ValueError: max_evals=500 is too low when running my file, and I struggle to understand the reason.
import glob
from PIL import Image
import torch
from torchvision import transforms
from torchvision.utils import make_grid
import torchvision.transforms.functional as tf
from deeplab import deeplab_v3plus
import shap
def test(args):
# make a video prez
model = deeplab_v3plus('resnet101', num_classes=args.nclass, output_stride=16, pretrained_backbone=True)
model.load_state_dict(torch.load(args.seg_file,map_location=torch.device('cpu'))) # because no gpu available on sandbox environnement
model = model.to(args.device)
model.eval()
explainer = shap.Explainer(model)
with torch.no_grad():
for i, file in enumerate(args.img_folder):
img = img2tensor(file, args)
pred = model(img)
print(explainer(img))
if __name__ == '__main__':
class Arguments:
def __init__(self):
self.device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
self.seg_file = "Model_Woodscape.pth"
self.img_folder = glob.glob("test_img/*.png")
self.mean = [0.485, 0.456, 0.406]
self.std = [0.229, 0.224, 0.225]
self.h, self.w = 483, 640
self.nclass = 10
self.cmap = {
1: [128, 64, 128], # "road",
2: [69, 76, 11], # "lanemarks",
3: [0, 255, 0], # "curb",
4: [220, 20, 60], # "person",
5: [255, 0, 0], # "rider",
6: [0, 0, 142], # "vehicles",
7: [119, 11, 32], # "bicycle",
8: [0, 0, 230], # "motorcycle",
9: [220, 220, 0], # "traffic_sign",
0: [0, 0, 0] # "void"
}
args = Arguments()
test(args)
But it returns:
(dee_env) jovyan#jupyter:~/use-cases/Scene_understanding/Code_Woodscape/deeplab_v3+$ python test_shap.py
BILINEAR is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BILINEAR instead.
Traceback (most recent call last):
File "/home/jovyan/use-cases/Scene_understanding/Code_Woodscape/deeplab_v3+/test_shap.py", line 85, in <module>
test(args)
File "/home/jovyan/use-cases/Scene_understanding/Code_Woodscape/deeplab_v3+/test_shap.py", line 37, in test
print(explainer(img))
File "/home/jovyan/use-cases/Scene_understanding/Code_Woodscape/deeplab_v3+/dee_env/lib/python3.9/site-packages/shap/explainers/_permutation.py", line 82, in __call__
return super().__call__(
File "/home/jovyan/use-cases/Scene_understanding/Code_Woodscape/deeplab_v3+/dee_env/lib/python3.9/site-packages/shap/explainers/_explainer.py", line 266, in __call__
row_result = self.explain_row(
File "/home/jovyan/use-cases/Scene_understanding/Code_Woodscape/deeplab_v3+/dee_env/lib/python3.9/site-packages/shap/explainers/_permutation.py", line 164, in explain_row
raise ValueError(f"max_evals={max_evals} is too low for the Permutation explainer, it must be at least 2 * num_features + 1 = {2 * len(inds) + 1}!")
ValueError: max_evals=500 is too low for the Permutation explainer, it must be at least 2 * num_features + 1 = 1854721!
In the source code it looks like it's because I don't give enough arguments. I only have three images in my test_img/* folder, is that why?
I have the same issue. A possible solution I found which seems to be working for my case is to replace this line
explainer = shap.Explainer(model)
With this line
explainer = shap.explainers.Permutation(model, max_evals = 1854721)
shap.Explainer by default has algorithm='auto'. From the documentation: shape.Explainer
By default the “auto” options attempts to make the best choice given
the passed model and masker, but this choice can always be overriden
by passing the name of a specific algorithm.
Since 'permutation' has been selected you can directly use shap.explainers.Permutation and set max_evals to the value suggested in the error message above.
Given the high number of your use case, this might take a really long time. I would suggest to use an easier model just for testing the above solution.
I have connected an ssd1306 OLED and BME280 to my Pico. Everything works like a charm when connected to Pin 0 (sda) and 1 (scl) i2c pins. But due to my very bad planning, I have to switch to any other i2c pins. Once connected I cannot get it to work.
I have changed from i2c = machine.I2C(0, scl=machine.Pin(1), sda=machine.Pin(0),freq=400000) to i2c = machine.I2C(0, scl=machine.Pin(13), sda=machine.Pin(12),freq=400000) and others, but to no avail.
Code snippet:
import machine
import bme280
import time
from machine import Pin, I2C, ADC
from ssd1306 import SSD1306_I2C
i2c = machine.I2C(0, scl=machine.Pin(13), sda=machine.Pin(12),freq=400000)
bme = bme280.BME280(i2c=i2c)
oled = SSD1306_I2C(128, 64, i2c)
Error I'm getting is:
Traceback (most recent call last): File "<stdin>", line 9, in <module> File "/lib/bme280.py", line 75, in __init__ OSError: 5
Which is:
dig_88_a1 = self.i2c.readfrom_mem(self.address, 0x88, 26) from bme280.py (standard driver)
I have performed an i2c scan and it does return correct addresses on the new pins.
So I had a Similar Problem to you I had the ssd1306 OLED and I kept getting
\\Traceback (most recent call last):
File "<stdin>", line 5, in <module>
File "/lib/ssd1306.py", line 110, in __init__
File "/lib/ssd1306.py", line 36, in __init__
File "/lib/ssd1306.py", line 71, in init_display
File "/lib/ssd1306.py", line 115, in write_cmd
OSError: [Errno 5] EIO
Im running the QT PI from addafuit but its a RP2040 chip and I'm running micro python.
Anyhow, I pressed the restart button on the board and It magically started working. Just like you, my i2c scan was getting the right address.
the code I ended up using that worked for me was
from machine import Pin, I2C
from ssd1306 import SSD1306_I2C
i2c = I2C(0,sda=Pin(24),scl=Pin(25),freq=40000)
oled = SSD1306_I2C(128,64,i2c)
oled.fill(0)
oled.text("Hello",0,0)
oled.show()
and for the ssd1306 driver, I used
# MicroPython SSD1306 OLED driver, I2C and SPI interfaces
from micropython import const
import framebuf
# register definitions
SET_CONTRAST = const(0x81)
SET_ENTIRE_ON = const(0xA4)
SET_NORM_INV = const(0xA6)
SET_DISP = const(0xAE)
SET_MEM_ADDR = const(0x20)
SET_COL_ADDR = const(0x21)
SET_PAGE_ADDR = const(0x22)
SET_DISP_START_LINE = const(0x40)
SET_SEG_REMAP = const(0xA0)
SET_MUX_RATIO = const(0xA8)
SET_COM_OUT_DIR = const(0xC0)
SET_DISP_OFFSET = const(0xD3)
SET_COM_PIN_CFG = const(0xDA)
SET_DISP_CLK_DIV = const(0xD5)
SET_PRECHARGE = const(0xD9)
SET_VCOM_DESEL = const(0xDB)
SET_CHARGE_PUMP = const(0x8D)
# Subclassing FrameBuffer provides support for graphics primitives
# http://docs.micropython.org/en/latest/pyboard/library/framebuf.html
class SSD1306(framebuf.FrameBuffer):
def __init__(self, width, height, external_vcc):
self.width = width
self.height = height
self.external_vcc = external_vcc
self.pages = self.height // 8
self.buffer = bytearray(self.pages * self.width)
super().__init__(self.buffer, self.width, self.height, framebuf.MONO_VLSB)
self.init_display()
def init_display(self):
for cmd in (
SET_DISP | 0x00, # off
# address setting
SET_MEM_ADDR,
0x00, # horizontal
# resolution and layout
SET_DISP_START_LINE | 0x00,
SET_SEG_REMAP | 0x01, # column addr 127 mapped to SEG0
SET_MUX_RATIO,
self.height - 1,
SET_COM_OUT_DIR | 0x08, # scan from COM[N] to COM0
SET_DISP_OFFSET,
0x00,
SET_COM_PIN_CFG,
0x02 if self.width > 2 * self.height else 0x12,
# timing and driving scheme
SET_DISP_CLK_DIV,
0x80,
SET_PRECHARGE,
0x22 if self.external_vcc else 0xF1,
SET_VCOM_DESEL,
0x30, # 0.83*Vcc
# display
SET_CONTRAST,
0xFF, # maximum
SET_ENTIRE_ON, # output follows RAM contents
SET_NORM_INV, # not inverted
# charge pump
SET_CHARGE_PUMP,
0x10 if self.external_vcc else 0x14,
SET_DISP | 0x01,
): # on
self.write_cmd(cmd)
self.fill(0)
self.show()
def poweroff(self):
self.write_cmd(SET_DISP | 0x00)
def poweron(self):
self.write_cmd(SET_DISP | 0x01)
def contrast(self, contrast):
self.write_cmd(SET_CONTRAST)
self.write_cmd(contrast)
def invert(self, invert):
self.write_cmd(SET_NORM_INV | (invert & 1))
def show(self):
x0 = 0
x1 = self.width - 1
if self.width == 64:
# displays with width of 64 pixels are shifted by 32
x0 += 32
x1 += 32
self.write_cmd(SET_COL_ADDR)
self.write_cmd(x0)
self.write_cmd(x1)
self.write_cmd(SET_PAGE_ADDR)
self.write_cmd(0)
self.write_cmd(self.pages - 1)
self.write_data(self.buffer)
class SSD1306_I2C(SSD1306):
def __init__(self, width, height, i2c, addr=0x3C, external_vcc=False):
self.i2c = i2c
self.addr = addr
self.temp = bytearray(2)
self.write_list = [b"\x40", None] # Co=0, D/C#=1
super().__init__(width, height, external_vcc)
def write_cmd(self, cmd):
self.temp[0] = 0x80 # Co=1, D/C#=0
self.temp[1] = cmd
self.i2c.writeto(self.addr, self.temp)
def write_data(self, buf):
self.write_list[1] = buf
self.i2c.writevto(self.addr, self.write_list)
class SSD1306_SPI(SSD1306):
def __init__(self, width, height, spi, dc, res, cs, external_vcc=False):
self.rate = 10 * 1024 * 1024
dc.init(dc.OUT, value=0)
res.init(res.OUT, value=0)
cs.init(cs.OUT, value=1)
self.spi = spi
self.dc = dc
self.res = res
self.cs = cs
import time
self.res(1)
time.sleep_ms(1)
self.res(0)
time.sleep_ms(10)
self.res(1)
super().__init__(width, height, external_vcc)
def write_cmd(self, cmd):
self.spi.init(baudrate=self.rate, polarity=0, phase=0)
self.cs(1)
self.dc(0)
self.cs(0)
self.spi.write(bytearray([cmd]))
self.cs(1)
def write_data(self, buf):
self.spi.init(baudrate=self.rate, polarity=0, phase=0)
self.cs(1)
self.dc(1)
self.cs(0)
self.spi.write(buf)
self.cs(1)
So the only advice and can give to you is to try to press the restart button.
I am trying to send video from raspberry pi to my laptop via laptop
and save them as pictures so i found the below code online
but I get the following errors when I run them
so i run this client code on the pi using Thonny ide that comes preloaded
, I apologize for the way code is formatted below and would be very grateful if anybody can help me sort this out
Server on the laptop is run using python 3.6 idle
import sys
import numpy as np
import cv2
import socket
class VideoStreamingTest(object):
def __init__(self):
self.server_socket = socket.socket()
self.server_socket.bind(('0.0.0.0', 9006))
self.server_socket.listen(0)
self.connection, self.client_address = self.server_socket.accept()
self.connection = self.connection.makefile('rb')
self.streaming()
def streaming(self):
try:
print("Connection from: ", self.client_address)
print("Streaming...")
print("Press 'q' to exit")
stream_bytes = ' '
while True:
stream_bytes += self.connection.read(1024)
first = stream_bytes.find('\xff\xd8')
last = stream_bytes.find('\xff\xd9')
if first != -1 and last != -1:
jpg = stream_bytes[first:last + 2]
stream_bytes = stream_bytes[last + 2:]
#image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.CV_LOAD_IMAGE_GRAYSCALE)
image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.CV_LOAD_IMAGE_UNCHANGED)
cv2.imshow('image', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
finally:
self.connection.close()
self.server_socket.close()
if __name__ == '__main__':
VideoStreamingTest()
I get the following error
Connection from: ('192.168.43.3', 47518)
Streaming...
Press 'q' to exit
Traceback (most recent call last):
File "C:\Users\John Doe\d-ff\Desktop\AutoRCCar-master
3\test\stream_server_test.py", line 46, in <module>
VideoStreamingTest()
File "C:\Users\John Doe\d-ff\Desktop\AutoRCCar-master
3\test\stream_server_test.py", line 16, in __init__
self.streaming()
File "C:\Users\John Doe\d-ff\Desktop\AutoRCCar-master
3\test\stream_server_test.py", line 28, in streaming
stream_bytes += self.connection.read(1024)
TypeError: must be str, not bytes
Client side on the pi
import io
import socket
import struct
import time
import picamera
# create socket and bind host
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(('ToM', 9006))
connection = client_socket.makefile('wb')
try:
with picamera.PiCamera() as camera:
camera.resolution = (320, 240) # pi camera resolution
camera.framerate = 5 # 10 frames/sec
time.sleep(2) # give 2 secs for camera to initilize
start = time.time()
stream = io.BytesIO()
# send jpeg format video stream
for foo in camera.capture_continuous(stream, 'jpeg', use_video_port = True):
connection.write(struct.pack('<L', stream.tell()))
connection.flush()
stream.seek(0)
connection.write(stream.read())
if time.time() - start > 600:
break
stream.seek(0)
stream.truncate()
connection.write(struct.pack('<L', 0))
finally:
connection.close()
client_socket.close()
I get the following error
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/pi/Desktop/stream_client.py", line 40, in <module>
connection.close()
File "/usr/lib/python3.5/socket.py", line 594, in write
return self._sock.send(b)
BrokenPipeError: [Errno 32] Broken pipe
I first thought it might be because of the limited bandwidth since i was running vnc viewer (remote desktop) via wifi on the pi but I don't think it is
I also had same problem. After some searching I found solution.
In python 3 we have to specify whether string is regular string or binary.Thats why we use b'string' instead of just 'string'
Change
stream_bytes = ' '
to
stream_bytes = b' '
Also change
first = stream_bytes.find('\xff\xd8')
last = stream_bytes.find('\xff\xd9')
to
first = stream_bytes.find(b'\xff\xd8')
last = stream_bytes.find(b'\xff\xd9')
Note that you are using cv2.CV_LOAD_IMAGE_UNCHANGED which is not available in opencv3.0
Use cv2.IMREAD_COLOR to show image in color.
Edit these changes and your stream should run smoothly.
connection.write(struct.pack('<L', 0))
Check out by inserting the above within try
I have trained a network model and saved its weights and architecture via checkpoint = ModelCheckpoint(filepath='weights.hdf5') callback. During training, I am using multiple GPUs by calling the funtion below:
def make_parallel(model, gpu_count):
def get_slice(data, idx, parts):
shape = tf.shape(data)
size = tf.concat([ shape[:1] // parts, shape[1:] ],axis=0)
stride = tf.concat([ shape[:1] // parts, shape[1:]*0 ],axis=0)
start = stride * idx
return tf.slice(data, start, size)
outputs_all = []
for i in range(len(model.outputs)):
outputs_all.append([])
#Place a copy of the model on each GPU, each getting a slice of the batch
for i in range(gpu_count):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i) as scope:
inputs = []
#Slice each input into a piece for processing on this GPU
for x in model.inputs:
input_shape = tuple(x.get_shape().as_list())[1:]
slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx':i,'parts':gpu_count})(x)
inputs.append(slice_n)
outputs = model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
#Save all the outputs for merging back together later
for l in range(len(outputs)):
outputs_all[l].append(outputs[l])
# merge outputs on CPU
with tf.device('/cpu:0'):
merged = []
for outputs in outputs_all:
merged.append(merge(outputs, mode='concat', concat_axis=0))
return Model(input=model.inputs, output=merged)
With the testing code:
from keras.models import Model, load_model
import numpy as np
import tensorflow as tf
model = load_model('cpm_log/deneme.hdf5')
x_test = np.random.randint(0, 255, (1, 368, 368, 3))
output = model.predict(x = x_test, batch_size=1)
print output[4].shape
I got the error below:
Traceback (most recent call last):
File "cpm_test.py", line 5, in <module>
model = load_model('cpm_log/Jun5_1000/deneme.hdf5')
File "/usr/local/lib/python2.7/dist-packages/keras/models.py", line 240, in load_model
model = model_from_config(model_config, custom_objects=custom_objects)
File "/usr/local/lib/python2.7/dist-packages/keras/models.py", line 301, in model_from_config
return layer_module.deserialize(config, custom_objects=custom_objects)
File "/usr/local/lib/python2.7/dist-packages/keras/layers/__init__.py", line 46, in deserialize
printable_module_name='layer')
File "/usr/local/lib/python2.7/dist-packages/keras/utils/generic_utils.py", line 140, in deserialize_keras_object
list(custom_objects.items())))
File "/usr/local/lib/python2.7/dist-packages/keras/engine/topology.py", line 2378, in from_config
process_layer(layer_data)
File "/usr/local/lib/python2.7/dist-packages/keras/engine/topology.py", line 2373, in process_layer
layer(input_tensors[0], **kwargs)
File "/usr/local/lib/python2.7/dist-packages/keras/engine/topology.py", line 578, in __call__
output = self.call(inputs, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/keras/layers/core.py", line 659, in call
return self.function(inputs, **arguments)
File "/home/muhammed/DEV_LIBS/developments/mocap/pose_estimation/training/cpm/multi_gpu.py", line 12, in get_slice
def get_slice(data, idx, parts):
NameError: global name 'tf' is not defined
By inspecting the error output, I decide that the problem is with the parallelization code. However, I can't resolve the issue.
You may need to use custom_objects to enable loading of the model.
import tensorflow as tf
model = load_model('model.h5', custom_objects={'tf': tf,})