Error code in a SPI camera connected to raspberry pi - micropython code - micropython

I've got the following code:
import machine
import time as utime
from OV5642_reg import *
OV5642=1
BMP =0
JPEG=1
RAW =2
OV5642_CHIPID_HIGH=0x300a
OV5642_CHIPID_LOW=0x300b
class ArducamClass(object):
def __init__(self,Type):
self.CameraMode=JPEG
self.CameraType=Type
self.SPI_CS = machine.Pin(5, mode=machine.Pin.OUT, value=1)
self.I2cAddress=0x3c
self.spi = machine.SPI(0, baudrate=4000000,polarity=0,phase=0,bits=8, sck=machine.Pin(2), mosi=machine.Pin(3), miso=machine.Pin(4))
self.i2c = machine.I2C(0, scl=machine.Pin(9), sda=machine.Pin(8), freq=1000000)
print(self.i2c.scan())
self.Spi_write(0x07,0x80)
utime.sleep(0.1)
self.Spi_write(0x07,0x00)
utime.sleep(0.1)
def Camera_Detection(self):
while True:
if self.CameraType==OV5642:
self.I2cAddress=0x3c
self.wrSensorReg16_8(0xff,0x01)
id_h=self.rdSensorReg16_8(OV5642_CHIPID_HIGH)
id_l=self.rdSensorReg16_8(OV5642_CHIPID_LOW)
if((id_h==0x56)and(id_l==0x42)):
print('CameraType is OV5642')
break
else:
print('Can\'t find OV5642 module')
utime.sleep(1)
def Set_Camera_mode(self,mode):
self.CameraMode=mode
def wrSensorReg16_8(self,addr,val):
buffer=bytearray(3)
buffer[0]=(addr>>8)&0xff
buffer[1]=addr&0xff
buffer[2]=val
self.iic_write(buffer)
def rdSensorReg16_8(self,addr):
buffer=bytearray(2)
rt=bytearray(1)
buffer[0]=(addr>>8)&0xff
buffer[1]=addr&0xff
self.iic_write(buffer)
self.iic_readinto(rt)
return rt[0]
def iic_write(self, buf, *, start=0, end=None):
if end is None:
end = len(buf)
self.i2c.writeto(self.I2cAddress, buf[start:end])
def iic_readinto(self, buf, *, start=0, end=None):
if end is None:
end = len(buf)
print(buf[start:end])
print(self.I2cAddress)
self.i2c.readfrom_into(self.I2cAddress, buf[start:end])
def Spi_write(self,address,value):
maskbits = 0x80
buffer=bytearray(2)
buffer[0]=address | maskbits
buffer[1]=value
self.SPI_CS.value(0)
self.spi_write(buffer)
self.SPI_CS.value(1)
def spi_write(self, buf, *, start=0, end=None):
if end is None:
end = len(buf)
self.spi.write(buf[start:end])
mycam = ArducamClass(OV5642)
mycam.Camera_Detection()
Which gives me the following error:
Traceback (most recent call last):
File "<stdin>", line 89, in <module>
File "<stdin>", line 33, in Camera_Detection
File "<stdin>", line 58, in rdSensorReg16_8
File "<stdin>", line 71, in iic_readinto
OSError: [Errno 5] EIO
I'm sure that the camera is connected correctly (I've tested it using the standard CircuitPython code provided with it and it works) so I think that I'm misinterpreting how the readfrom_into call works... any hint?
I tried to understand what get's written in the readfrom_into part but can't get anything.. The starting code (based on circuitpython) is here: https://github.com/ArduCAM/PICO_SPI_CAM/blob/master/Python/Arducam.py

Related

How to find expected value of np.array using scipy.stats?

I am trying to get the expected value of a NumPy array but I am running into a problem when I pass my array into the function here is an example of what is happening:
a = np.ones(10)
stats.rv_continuous.expect(args=a)
I get this error:
Traceback (most recent call last):
File "<pyshell#3>", line 1, in <module>
stats.rv_continuous.expect(args=a)
TypeError: expect() missing 1 required positional argument: 'self'
If I try stats.rv_continuous.expect(a) , I get this error:
'numpy.ndarray' object has no attribute '_argcheck'
Can someone tell me how to get scipy.stats to work with an array?
update:
following bob's comment I changed the code to:
st=stats.rv_continuous()
ev = st.expect(args=signal_array)
print(ev)
where signal_array is a numpy array. However I now get this error:
Traceback (most recent call last):
File "C:\Users\...\OneDrive\Área de Trabalho\TickingClock\Main.py", line 35, in <module>
ev = st.expect(args=signal_array)
File "C:\Users\...\AppData\Local\Programs\Python\Python39\lib\site-packages\scipy\stats\_distn_infrastructure.py", line 2738, in expect
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
File "C:\Users\...\AppData\Local\Programs\Python\Python39\lib\site-packages\scipy\integrate\quadpack.py", line 351, in quad
retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
File "C:\Users\...\AppData\Local\Programs\Python\Python39\lib\site-packages\scipy\integrate\quadpack.py", line 465, in _quad
return _quadpack._qagie(func,bound,infbounds,args,full_output,epsabs,epsrel,limit)
File "C:\Users\...\AppData\Local\Programs\Python\Python39\lib\site-packages\scipy\stats\_distn_infrastructure.py", line 2722, in fun
return x * self.pdf(x, *args, **lockwds)
File "C:\Users\...\AppData\Local\Programs\Python\Python39\lib\site-packages\scipy\stats\_distn_infrastructure.py", line 1866, in pdf
args, loc, scale = self._parse_args(*args, **kwds)
TypeError: _parse_args() got multiple values for argument 'loc'

When I add workers to neutral network I get an error, pytorch

I have checked a lot of post and none of them seem to work for me. But when I try to add workers to the dataloader in pytorch it just feeds me an error back. I have tired reading it and figuring it out but I can't seem to find a solution. I assume there is something I'm supposed to add to make the workers able to do their job.
I have 64GB of ram, i9-9900k, and a 3080ti. So I don't think its a memory error is it?
I included the error code with 1 worker and 4 workers because they seem to be different.
Also it works with zero workers.
here is the error with 4 workers:
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\multiprocessing\spawn.py", line 105, in spawn_main
exitcode = _main(fd)
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\multiprocessing\spawn.py", line 114, in _main
prepare(preparation_data)
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\multiprocessing\spawn.py", line 225, in prepare
_fixup_main_from_path(data['init_main_from_path'])
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\multiprocessing\spawn.py", line 277, in _fixup_main_from_path
run_name="__mp_main__")
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\runpy.py", line 263, in run_path
pkg_name=pkg_name, script_name=fname)
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.
Traceback (most recent call last):
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 990, in _try_get_data
data = self._data_queue.get(timeout=timeout)
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\queue.py", line 172, in get raise Empty
queue.Empty
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "c:/Users/14055/Desktop/Class 1 Project/Chegg.py", line 202, in <module>
training()
File "c:/Users/14055/Desktop/Class 1 Project/Chegg.py", line 122, in training
for data, target in load_data.train_loader:
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 521, in __next__ data = self._next_data()
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 1186, in _next_data idx, data = self._get_data()
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 1142, in _get_data success, data = self._try_get_data()
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 1003, in _try_get_data
raise RuntimeError('DataLoader worker (pid(s) {}) exited unexpectedly'.format(pids_str)) from e
RuntimeError: DataLoader worker (pid(s) 23204, 7668, 13636, 6132) exited unexpectedly
Error with 1 worker:
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\multiprocessing\spawn.py", line 105, in spawn_main
exitcode = _main(fd)
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\multiprocessing\spawn.py", line 114, in _main
prepare(preparation_data)
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\multiprocessing\spawn.py", line 225, in prepare
_fixup_main_from_path(data['init_main_from_path'])
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\multiprocessing\spawn.py", line 277, in _fixup_main_from_path
run_name="__mp_main__")
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\runpy.py", line 263, in run_path
pkg_name=pkg_name, script_name=fname)
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\runpy.py", line 96, in _run_module_code
mod_name, mod_spec, pkg_name, script_name)
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "c:\Users\14055\Desktop\Class 1 Project\Chegg.py", line 202, in <module>
training()
File "c:\Users\14055\Desktop\Class 1 Project\Chegg.py", line 122, in training
for data, target in load_data.train_loader:
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 359, in __iter__
return self._get_iterator()
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 305, in _get_iterator
return _MultiProcessingDataLoaderIter(self)
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 918, in __init__
w.start()
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\multiprocessing\popen_spawn_win32.py", line 33, in __init__
prep_data = spawn.get_preparation_data(process_obj._name)
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\multiprocessing\spawn.py", line 143, in get_preparation_data
_check_not_importing_main()
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\multiprocessing\spawn.py", line 136, in _check_not_importing_main
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.
Traceback (most recent call last):
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 990, in _try_get_data
data = self._data_queue.get(timeout=timeout)
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\queue.py", line 172, in get
raise Empty
queue.Empty
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "c:/Users/14055/Desktop/Class 1 Project/Chegg.py", line 202, in <module>
training()
File "c:/Users/14055/Desktop/Class 1 Project/Chegg.py", line 122, in training
for data, target in load_data.train_loader:
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 521, in __next__
data = self._next_data()
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 1186, in _next_data
idx, data = self._get_data()
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 1142, in _get_data
success, data = self._try_get_data()
File "C:\Users\14055\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 1003, in _try_get_data
raise RuntimeError('DataLoader worker (pid(s) {}) exited unexpectedly'.format(pids_str)) from e
RuntimeError: DataLoader worker (pid(s) 3372) exited unexpectedly
Code:
from numpy import testing
import torch.cuda
import numpy as np
import time
import array as arr
import os
from datetime import date, datetime
from torchvision import datasets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
torch.cuda.set_device(0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def load_data():
num_workers = 1
load_data.batch_size = 20
transform = transforms.ToTensor()
train_data = datasets.MNIST(root='data', train=True, download=True, transform=transform)
load_data.train_loader = torch.utils.data.DataLoader(train_data,
batch_size=load_data.batch_size, num_workers=num_workers, pin_memory=True,
shuffle=True)
test_data = datasets.MNIST(root='data', train=False, download=True, transform=transform)
load_data.test_loader = torch.utils.data.DataLoader(test_data,
batch_size=load_data.batch_size, num_workers=num_workers, pin_memory=True,
shuffle=True)
def visualize():
dataiter = iter(load_data.train_loader)
visualize.images, labels = dataiter.next()
visualize.images = visualize.images.numpy()
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(load_data.batch_size):
ax = fig.add_subplot(2, load_data.batch_size/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(visualize.images[idx]), cmap='gray')
ax.set_title(str(labels[idx].item()))
#plt.show()
def fig_values():
img = np.squeeze(visualize.images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
#plt.show()
load_data()
#visualize()
#fig_values()
class NeuralNet(nn.Module):
def __init__(self, gpu = True):
super(NeuralNet, self ).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=128, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(num_features=128)
self.tns1 = nn.Conv2d(in_channels=128, out_channels=4, kernel_size=1, padding=1)
self.conv2 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(num_features=16)
self.pool1 = nn.MaxPool2d(2,2)
self.conv3 = nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, padding=1)
self.bn3 = nn.BatchNorm2d(num_features=16)
self.conv4 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, padding=1)
self.bn4 = nn.BatchNorm2d(num_features=32)
self.pool2 = nn.MaxPool2d(2,2)
self.tns2 = nn.Conv2d(in_channels=32, out_channels=16, kernel_size=1, padding=1)
self.conv5 = nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, padding=1)
self.bn5 = nn.BatchNorm2d(num_features=16)
self.conv6 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, padding=1)
self.bn6 = nn.BatchNorm2d(num_features=32)
self.conv7 = nn.Conv2d(in_channels=32, out_channels=10, kernel_size=1, padding=1)
self.gpool = nn.AvgPool2d(kernel_size=7)
self.drop = nn.Dropout2d(0.1)
def forward(self, x):
x = self.tns1(self.drop(self.bn1(F.relu(self.conv1(x)))))
x = self.drop(self.bn2(F.relu(self.conv2(x))))
x = self.pool1(x)
x = self.drop(self.bn3(F.relu(self.conv3(x))))
x = self.drop(self.bn4(F.relu(self.conv4(x))))
x = self.tns2(self.pool2(x))
x = self.drop(self.bn5(F.relu(self.conv5(x))))
x = self.drop(self.bn6(F.relu(self.conv6(x))))
x = self.conv7(x)
x = self.gpool(x)
x = x.view(-1, 10)
return F.log_softmax(x).to(device)
#has antioverfit
def training():
model.to(device)
optimizer= torch.optim.SGD(model.parameters(), lr=0.003, weight_decay= 0.00005, momentum = .9, nesterov = True)
n_epochs = 20000
a = np.float64([9,9,9,9,9]) #antioverfit
testing_loss = 0.0
for epoch in range(n_epochs) :
if(testing_loss <= a[4]): # part of anti overfit
train_loss = 0.0
testing_loss = 0.0
model.train().to(device)
for data, target in load_data.train_loader:
optimizer.zero_grad()
data = data.to(device) #gpu
target = target.to(device) #gpu
output = model(data).to(device)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
train_loss += loss.item()*data.size(0)
train_loss = train_loss/len(load_data.train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(epoch+1, train_loss))
model.eval().to(device) # Gets Validation loss
train_loss = 0.0
with torch.no_grad():
for data, target in load_data.test_loader:
data = data.to(device)
target = target.to(device)
output = model(data).to(device)
loss =F.nll_loss(output, target)
testing_loss += loss.item()*data.size(0)
testing_loss = testing_loss / len(load_data.test_loader.dataset)
print('Validation loss = ' , testing_loss)
a = np.insert(a,0,testing_loss) # part of anti overfit
a = np.delete(a,5)
print('Validation loss = ' , testing_loss)
def evalution():
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval().to(device)
for data, target in load_data.test_loader:
data = data.to(device)
target = target.to(device)
output = model(data).to(device)
loss =F.nll_loss(output, target)
test_loss += loss.item()*data.size(0)
_, pred = torch.max(output, 1)
correct = np.squeeze(pred.eq(target.data.view_as(pred))).to(device)
for i in range(load_data.batch_size):
try:
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
except IndexError:
break
# calculate and print avg test loss
test_loss = test_loss/len(load_data.test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' )
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
acc = (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total))
name = f"model-{acc}.pt"
name2 = f"model-{acc}.pth"
save_path = os.path.join("models", name)
save_path2 = os.path.join("models", name2)
torch.save(model, save_path)
torch.save(model, save_path2)
model = NeuralNet().to(device)
summary(model, input_size=(1, 28, 28))
training()
evalution()

RuntimeError using Networkx on Example code

Following the examples on https://networkx.github.io/documentation/stable/reference/drawing.html, I tried the following code:
import networkx as nx
G = nx.complete_graph(5)
A = nx.nx_agraph.to_agraph(G)
H = nx.nx_agraph.from_agraph(A)
I get a RuntimeError as follows:
H = nx.nx_agraph.from_agraph(A)
Traceback (most recent call last):
File "/home/nom/anaconda3/envs/wcats/lib/python3.7/site-packages/pygraphviz/agraph.py", line 1750, in iteritems
ah = gv.agnxtattr(self.handle, self.type, ah)
StopIteration: agnxtattr
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<ipython-input-10-19c378da806e>", line 1, in <module>
H = nx.nx_agraph.from_agraph(A)
File "/home/nom/anaconda3/envs/wcats/lib/python3.7/site-packages/networkx/drawing/nx_agraph.py", line 85, in from_agraph
N.graph.update(A.graph_attr)
File "/home/nom/anaconda3/envs/wcats/lib/python3.7/site-packages/pygraphviz/agraph.py", line 1740, in keys
return list(self.__iter__())
File "/home/nom/anaconda3/envs/wcats/lib/python3.7/site-packages/pygraphviz/agraph.py", line 1743, in __iter__
for (k, v) in self.iteritems():
RuntimeError: generator raised StopIteration
This error is so basic that I suspect there's a problem with the package itself. Any suggestions on how I can try to troubleshoot this one?

Cannot cast ListType[tuple(float64 x 2)] to list(tuple(float64 x 2)) in numba

Hello I am trying to use typed List in numba v46.0
>>> from numba.typed import List
>>> from numba import types
>>> mylist = List.empty_list(item_type=types.Tuple((types.f8, types.f8)))
>>> mylist2 = List.empty_list(item_type=types.List(dtype=types.Tuple((types.f8, types.f8))))
>>> mylist2.append(mylist)
but I got the following error, I am wondering how to fix it?
Traceback (most recent call last): File "", line 1, in
File
"/usr/local/lib/python3.7/site-packages/numba/typed/typedlist.py",
line 223, in append
_append(self, item) File "/usr/local/lib/python3.7/site-packages/numba/dispatcher.py", line
401, in _compile_for_args
error_rewrite(e, 'typing') File "/usr/local/lib/python3.7/site-packages/numba/dispatcher.py", line
344, in error_rewrite
reraise(type(e), e, None) File "/usr/local/lib/python3.7/site-packages/numba/six.py", line 668, in
reraise
raise value.with_traceback(tb) numba.errors.TypingError: Failed in nopython mode pipeline (step: nopython frontend) Internal error at
. Failed in
nopython mode pipeline (step: nopython mode backend) Cannot cast
ListType[tuple(float64 x 2)] to list(tuple(float64 x 2)): %".24" =
load {i8*, i8*}, {i8*, i8*}* %"item"
File
"../../usr/local/lib/python3.7/site-packages/numba/listobject.py",
line 434:
def impl(l, item):
casteditem = _cast(item, itemty)
the following should work
mylist2 = List.empty_list(item_type=types.ListType(itemty=types.Tuple((types.f8, types.f8))))

TypeError: must be str, not bytes , Python 3, Raspberry pi

I am trying to send video from raspberry pi to my laptop via laptop
and save them as pictures so i found the below code online
but I get the following errors when I run them
so i run this client code on the pi using Thonny ide that comes preloaded
, I apologize for the way code is formatted below and would be very grateful if anybody can help me sort this out
Server on the laptop is run using python 3.6 idle
import sys
import numpy as np
import cv2
import socket
class VideoStreamingTest(object):
def __init__(self):
self.server_socket = socket.socket()
self.server_socket.bind(('0.0.0.0', 9006))
self.server_socket.listen(0)
self.connection, self.client_address = self.server_socket.accept()
self.connection = self.connection.makefile('rb')
self.streaming()
def streaming(self):
try:
print("Connection from: ", self.client_address)
print("Streaming...")
print("Press 'q' to exit")
stream_bytes = ' '
while True:
stream_bytes += self.connection.read(1024)
first = stream_bytes.find('\xff\xd8')
last = stream_bytes.find('\xff\xd9')
if first != -1 and last != -1:
jpg = stream_bytes[first:last + 2]
stream_bytes = stream_bytes[last + 2:]
#image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.CV_LOAD_IMAGE_GRAYSCALE)
image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.CV_LOAD_IMAGE_UNCHANGED)
cv2.imshow('image', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
finally:
self.connection.close()
self.server_socket.close()
if __name__ == '__main__':
VideoStreamingTest()
I get the following error
Connection from: ('192.168.43.3', 47518)
Streaming...
Press 'q' to exit
Traceback (most recent call last):
File "C:\Users\John Doe\d-ff\Desktop\AutoRCCar-master
3\test\stream_server_test.py", line 46, in <module>
VideoStreamingTest()
File "C:\Users\John Doe\d-ff\Desktop\AutoRCCar-master
3\test\stream_server_test.py", line 16, in __init__
self.streaming()
File "C:\Users\John Doe\d-ff\Desktop\AutoRCCar-master
3\test\stream_server_test.py", line 28, in streaming
stream_bytes += self.connection.read(1024)
TypeError: must be str, not bytes
Client side on the pi
import io
import socket
import struct
import time
import picamera
# create socket and bind host
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(('ToM', 9006))
connection = client_socket.makefile('wb')
try:
with picamera.PiCamera() as camera:
camera.resolution = (320, 240) # pi camera resolution
camera.framerate = 5 # 10 frames/sec
time.sleep(2) # give 2 secs for camera to initilize
start = time.time()
stream = io.BytesIO()
# send jpeg format video stream
for foo in camera.capture_continuous(stream, 'jpeg', use_video_port = True):
connection.write(struct.pack('<L', stream.tell()))
connection.flush()
stream.seek(0)
connection.write(stream.read())
if time.time() - start > 600:
break
stream.seek(0)
stream.truncate()
connection.write(struct.pack('<L', 0))
finally:
connection.close()
client_socket.close()
I get the following error
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/pi/Desktop/stream_client.py", line 40, in <module>
connection.close()
File "/usr/lib/python3.5/socket.py", line 594, in write
return self._sock.send(b)
BrokenPipeError: [Errno 32] Broken pipe
I first thought it might be because of the limited bandwidth since i was running vnc viewer (remote desktop) via wifi on the pi but I don't think it is
I also had same problem. After some searching I found solution.
In python 3 we have to specify whether string is regular string or binary.Thats why we use b'string' instead of just 'string'
Change
stream_bytes = ' '
to
stream_bytes = b' '
Also change
first = stream_bytes.find('\xff\xd8')
last = stream_bytes.find('\xff\xd9')
to
first = stream_bytes.find(b'\xff\xd8')
last = stream_bytes.find(b'\xff\xd9')
Note that you are using cv2.CV_LOAD_IMAGE_UNCHANGED which is not available in opencv3.0
Use cv2.IMREAD_COLOR to show image in color.
Edit these changes and your stream should run smoothly.
connection.write(struct.pack('<L', 0))
Check out by inserting the above within try