Getting 'OSError: -2' while converting a tif image into jpg image using python - python-imaging-library

I'm trying to convert tiff images into jpg format and use it later in opencv. It is working fine in my local system but when I am executing it over linux server which is not connected to internet it is getting failed while saving the Image object as jpg format.
I'm using python3.8 and had installed all the libraries and its dependencies using wheel files over server using pip.
Here is the piece of code:
import PIL
import cv2
def face_detect(sourceImagepath1, processedFileName, imagename, pdfname):
temp_path = TEMP_PATH
processed_path = PROCESSED_PATH
misc_path = MISC_PATH
# cascade file path1
cascpath1 = misc_path + 'frontalface_cascade.xml'
# Create harr cascade
faceCascade = cv2.CascadeClassifier(cascpath1)
# Read image with PIL
image_pil = Image.open(sourceImagepath1)
# Save image in jpg format
image_pil.save(temp_path + processedFileName + '.jpg')
# Read image with opencv
image_cv = cv2.imread(temp_path + processedFileName + '.jpg')
# Convert image into grayscale
image_gray = cv2.cvtColor(image_cv, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
face = faceCascade.detectMultiScale(
image_gray,
scaleFactor=1.3,
minNeighbors=5,
minSize=(30, 30)
# flags = cv2.CASCADE_SCALE_IMAGE
)
if len(face) > 0:
# Coordinates based on auto-face detection
x, y, w, h = face[0][0], face[0][1], face[0][2], face[0][3]
crop_image = image_pil.crop([x - 20, y - 30, x + w + 40, y + h + 60])
crop_image.save(processed_path + imagename)
# Save tif file as pdf
image_pil.save(processed_path + pdfname, save_all=True)
# Close image object
image_pil.close()
return len(face)
Here TEMP_PATH,PROCESSED_PATH,MISC_PATH are global variables of syntax like '/Users/user/Documents/Temp/'. I'm getting error on line:
image_pil.save(temp_path + processedFileName + '.jpg')
Below is the error i'm getting when executing the file
Traceback (most recent call last):
File "*path_from_root_directory*/PYTHON_SCRIPTS/Script/staging.py", line 363, in <module>
auto_face_count = face_detect(sourceImagepath1, processedFileName, imagename, pdfname)
File "*path_from_root_directory*/PYTHON_SCRIPTS/Script/staging.py", line 71, in greyScaleCheck
image_pil.save(temp_path + processedFileName + '.jpg')
File "*path_from_root_directory*/python3.8/site-packages/PIL/Image.py", line 2201, in save
self._ensure_mutable()
File "*path_from_root_directory*/python3.8/site-packages/PIL/Image.py", line 624, in _ensure_mutable
self._copy()
File "*path_from_root_directory*/python3.8/site-packages/PIL/Image.py", line 617, in _copy
self.load()
File "*path_from_root_directory*/python3.8/site-packages/PIL/TiffImagePlugin.py", line 1122, in load
return self._load_libtiff()
File "*path_from_root_directory*/python3.8/site-packages/PIL/TiffImagePlugin.py", line 1226, in _load_libtiff
raise OSError(err)
OSError: -2
I have provided full privileges to python directory and its sub-directories/files. Anyone have any idea why I'm getting this error ?

Related

Setting up a PICT with Raspberry Pi W zero: Error in picamera *.py : ], quality=qual):

For field, research where I want to study a plant-insect interaction, I am trying to set up a PICT (Plant Insect Interactions Camera Trap). There is a very detailed description available on https://zenodo.org/record/6301001, still I am stuck.
I can excess the camera through the browser but the script won’t start.
I am an absolute beginner and I have no idea what I am doing wrong. Can anybody help get this running?
This is the script from the paper which I saved as camera.py in home/pi:
import picamera
import socket
import uuid
from datetime import datetime as dt
qual=22 # level of image quality between 1 (highest quality, largest size) and 40 (lowest quality, smallest size), with typical values 20 to 25, default is 0.
video_duration = 3600 # video duration in seconds
video_number = 1000 # number of video sequences to shoot
UID = uuid.uuid4().hex[:4].upper()+'_'+dt.now().strftime('%Y-%m-%d_%H-%M') # generate random unique ID that will be used in video filename
HostName=socket.gethostname()
with picamera.PiCamera() as camera:
camera.resolution = (1296, 972) # max fps is 42 at 1296x972
camera.framerate = 15 # recomended are 12, 15, 24, 30
camera.annotate_frame_num = True
camera.annotate_text_size = int(round(camera.resolution[0]/64))
camera.annotate_background = picamera.Color('black') # text background colour
camera.annotate_foreground = picamera.Color('white') # text colour
for filename in camera.record_sequence([
'/home/pi/record/'+HostName+'_'+UID+'_%03d.h264' % (h + 1)
for h in range(video_number)
], quality=qual):
start = dt.now() # get the current date and time
while (dt.now() - start).seconds < video_duration: # run until video_duration is reached
camera.annotate_text = HostName+', '+str(camera.framerate)+' fps, Q='+str(qual)+', '+dt.now().strftime('%Y-%m-%d %H:%M:%S') # tag the video with a custom text
camera.wait_recording(0.2) # pause the script for a short interval to save power
I am gettin the following output:
~ $ python camera.py
Traceback (most recent call last):
File "camera.py", line 23, in <module>
], quality=qual):
File "/usr/lib/python2.7/dist-packages/picamera/camera.py", line 1270, in record_sequence
camera_port, output_port = self._get_ports(True, splitter_port)
File "/usr/lib/python2.7/dist-packages/picamera/camera.py", line 559, in _get_ports
self._check_camera_open()
File "/usr/lib/python2.7/dist-packages/picamera/camera.py", line 540, in _check_camera_open
raise PiCameraClosed("Camera is closed")
picamera.exc.PiCameraClosed: Camera is closed

Convert pointcloud csv to hdf5 to train on PointCNN network

I am trying to train my point cloud data on PointCNN so I need to convert my dataset to hdf5 as used in PointCNN. PointCNN used the modelnet40_ply_hdf5_2048 dataset.
I have tried converting my custom dataset but I am having issues with the label.
I tried this to get the label/shape_names
shape_ids = {}
shape_ids = [line.rstrip() for line in open(os.path.join(PATH, 'filelist1.txt'))]
shape_names = ['_'.join(x.split('_')[0:-1]) for x in shape_ids]
datapath = [(shape_names[i], os.path.join(PATH, shape_names[i], shape_ids[i])) for i
in range(len(shape_ids))]
Convert to h5py file
import numpy as np
from tqdm import tqdm
import h5py
filenames = [line.rstrip() for line in open(os.path.join(PATH))]
f = h5py.File("filename", 'w')
data = np.zeros((len(filenames), 1024, 3))
for i in range(0, len(datapath)):
fn = datapath[i]
cls = classes[datapath[i][0]]
label = np.array([cls]).astype(np.int32)
csvreader = np.genfromtxt("data1/" + filenames[i] + ".csv", delimiter=",").astype(np.float32)
for j in range(0,1024):
data[i,j] = [csvreader[j][0], csvreader[j][1], csvreader[j][2]]
label
dset1 = f.create_dataset("data", data=data, compression="gzip", compression_opts=4)
dset2 = f.create_dataset("label", data=label, compression="gzip", compression_opts=1)
f.close()
It did convert successfully but when I tried to train on PointCNN
PointCNN training
------Building model-------
------Successfully Built model-------
Traceback (most recent call last):
File "train_pytorch.py", line 174, in <module>
current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
File "provider.py", line 28, in shuffle_data
idx = np.arange(len(labels))
TypeError: len() of unsized object

PIL Autocontrast not supported for this image

I want to apply autoconstrast from PIL library to a .png image. With some images autoconstrast work, but with others is doesn't work
raise OSError("not supported for this image mode")
OSError: not supported for this image mode
I give you my image, if someone can apply autocontrast on it ?
my image
files = os.listdir('./STARE/train/images2/')
for file in files:
#inputImage = './STARE/train/images/' + file
im1 = Image.open('STARE/train/images2/' + file)
#masque = Image.open('STARE/train/GT/' + file)
image = file
image = image[:-4]
#preprocessing.N4(inputImage, './STARE/train/images/' + image + "_2.tif")
images_egal = ImageOps.autocontrast(im1)
images_egal.show()
images_egal.save('STARE/train/images_egal/' + image + "_2.png")

How to use pillow library to access image files in sub-directories recursively?

I want to crop and resize multiple images in many sub-directories. The code works if the images are in the same directory, but fails to read from other directories.
I have tried using os.walk() module. It successfully iterate the files from all subdirectories, but the pillow's Image.open() function fails to access the images and thereby displaying error: "image.." not found.
import os
from PIL import Image
for dirpath, dirnames, files in os.walk('.'):
for filename in files:
t = filename.split(".")
ext = t[-1]
if ext in ["jpg"]:
print(filename)
coords = (500, 250, 810,720)
image_obj = Image.open(filename)
cropped_image = image_obj.crop(coords)
resized_image =cropped_image.resize([227,227])
# name = "./data2" + str(i) +".jpg"
resized_image.save("new" + filename)
I expect the code to recursively crop and resize the images in all the sub-directories. The following error occurred.
frame0.jpg
Traceback (most recent call last):
File "........./data2/cropitall.py", line 18, in <module>
image_obj = Image.open(filename) #path of image to be cropped
File "C:\Python36\lib\site-packages\PIL\Image.py", line 2652, in open
fp = builtins.open(filename, "rb")
FileNotFoundError: [Errno 2] No such file or directory: 'frame0.jpg'
Process finished with exit code 1
To open the image you need the entire path to the file, not just file name.
Instead of
image_obj = Image.open(filename)
do
path = os.path.join(dirpath, filename)
image_obj = Image.open(path)

TypeError: must be str, not bytes , Python 3, Raspberry pi

I am trying to send video from raspberry pi to my laptop via laptop
and save them as pictures so i found the below code online
but I get the following errors when I run them
so i run this client code on the pi using Thonny ide that comes preloaded
, I apologize for the way code is formatted below and would be very grateful if anybody can help me sort this out
Server on the laptop is run using python 3.6 idle
import sys
import numpy as np
import cv2
import socket
class VideoStreamingTest(object):
def __init__(self):
self.server_socket = socket.socket()
self.server_socket.bind(('0.0.0.0', 9006))
self.server_socket.listen(0)
self.connection, self.client_address = self.server_socket.accept()
self.connection = self.connection.makefile('rb')
self.streaming()
def streaming(self):
try:
print("Connection from: ", self.client_address)
print("Streaming...")
print("Press 'q' to exit")
stream_bytes = ' '
while True:
stream_bytes += self.connection.read(1024)
first = stream_bytes.find('\xff\xd8')
last = stream_bytes.find('\xff\xd9')
if first != -1 and last != -1:
jpg = stream_bytes[first:last + 2]
stream_bytes = stream_bytes[last + 2:]
#image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.CV_LOAD_IMAGE_GRAYSCALE)
image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.CV_LOAD_IMAGE_UNCHANGED)
cv2.imshow('image', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
finally:
self.connection.close()
self.server_socket.close()
if __name__ == '__main__':
VideoStreamingTest()
I get the following error
Connection from: ('192.168.43.3', 47518)
Streaming...
Press 'q' to exit
Traceback (most recent call last):
File "C:\Users\John Doe\d-ff\Desktop\AutoRCCar-master
3\test\stream_server_test.py", line 46, in <module>
VideoStreamingTest()
File "C:\Users\John Doe\d-ff\Desktop\AutoRCCar-master
3\test\stream_server_test.py", line 16, in __init__
self.streaming()
File "C:\Users\John Doe\d-ff\Desktop\AutoRCCar-master
3\test\stream_server_test.py", line 28, in streaming
stream_bytes += self.connection.read(1024)
TypeError: must be str, not bytes
Client side on the pi
import io
import socket
import struct
import time
import picamera
# create socket and bind host
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(('ToM', 9006))
connection = client_socket.makefile('wb')
try:
with picamera.PiCamera() as camera:
camera.resolution = (320, 240) # pi camera resolution
camera.framerate = 5 # 10 frames/sec
time.sleep(2) # give 2 secs for camera to initilize
start = time.time()
stream = io.BytesIO()
# send jpeg format video stream
for foo in camera.capture_continuous(stream, 'jpeg', use_video_port = True):
connection.write(struct.pack('<L', stream.tell()))
connection.flush()
stream.seek(0)
connection.write(stream.read())
if time.time() - start > 600:
break
stream.seek(0)
stream.truncate()
connection.write(struct.pack('<L', 0))
finally:
connection.close()
client_socket.close()
I get the following error
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/pi/Desktop/stream_client.py", line 40, in <module>
connection.close()
File "/usr/lib/python3.5/socket.py", line 594, in write
return self._sock.send(b)
BrokenPipeError: [Errno 32] Broken pipe
I first thought it might be because of the limited bandwidth since i was running vnc viewer (remote desktop) via wifi on the pi but I don't think it is
I also had same problem. After some searching I found solution.
In python 3 we have to specify whether string is regular string or binary.Thats why we use b'string' instead of just 'string'
Change
stream_bytes = ' '
to
stream_bytes = b' '
Also change
first = stream_bytes.find('\xff\xd8')
last = stream_bytes.find('\xff\xd9')
to
first = stream_bytes.find(b'\xff\xd8')
last = stream_bytes.find(b'\xff\xd9')
Note that you are using cv2.CV_LOAD_IMAGE_UNCHANGED which is not available in opencv3.0
Use cv2.IMREAD_COLOR to show image in color.
Edit these changes and your stream should run smoothly.
connection.write(struct.pack('<L', 0))
Check out by inserting the above within try