I would like to use the chaco tools ScatterInspector and/or ScatterInspectorOverlay with enaml. I've set up a very simple controller and view (source below) but cannot determine how to proceed. I have tried unsuccessfully to follow the minimal and old examples I've found.
If I uncomment the overlay part for ScatterInspectorOverlay, the code fails to run with
File ".../chaco/scatter_inspector_overlay.py", line 51, in overlay if not plot or not plot.index or not getattr(plot, "value", True):
If I comment out the overlay part, I of course don't get the overlay behavior I want and also, on moving the mouse, get
File ".../chaco/tools/scatter_inspector.py", line 48, in normal_mouse_move index = plot.map_index((event.x, event.y), threshold=self.threshold)
view.enaml source:
from enaml.widgets.api import (
Window, Container, EnableCanvas,
)
enamldef ScatterView(Window):
attr controller
title = "Scatter Inspector Test"
initial_size = (640,480)
Container:
EnableCanvas:
component = controller.scatter_plot
controller.py source:
import enaml
from enaml.stdlib.sessions import show_simple_view
from traits.api import HasTraits, Instance
from chaco.api import Plot, ArrayPlotData, ScatterInspectorOverlay
from chaco.tools.api import ScatterInspector
from numpy import linspace, sin
class ScatterController(HasTraits):
scatter_plot = Instance(Plot)
def _scatter_plot_default(self):
# data
x = linspace(-14, 14, 100)
y = sin(x) * x**3
plotdata = ArrayPlotData(x = x, y = y)
# plot
scatter_plot = Plot(plotdata)
renderer = scatter_plot.plot(("x", "y"), type="scatter", color="red")
# inspector
scatter_plot.tools.append(ScatterInspector(scatter_plot))
# overlay
# scatter_plot.overlays.append( ScatterInspectorOverlay(
# scatter_plot,
# hover_color = 'red',
# hover_marker_size = 6,
# selection_marker_size = 6,
# selection_color = 'yellow',
# selection_outline_color='purple',
# selection_line_width = 3
# ))
#return
return scatter_plot
if __name__ == "__main__":
with enaml.imports():
from view import ScatterView
main_controller = ScatterController()
window = ScatterView(controller=ScatterController())
show_simple_view(window)
The problem with my above code was that I was adding ScatterInspector to scatter_plot rather than to renderer and that I was missing the [0] index to get renderer.
The key thing I was really wanting to do, though, was to be notified when the mouse was hovering over a data point and/or a data point was selected. I added when_hover_or_selection_changes which shows how to do that.
Working controller.py:
...
# plot
scatter_plot = Plot(plotdata)
renderer = scatter_plot.plot(("x", "y"), type="scatter", color="lightblue")[0]
# inspector
renderer.tools.append(ScatterInspector(renderer))
# overlay
renderer.overlays.append(ScatterInspectorOverlay(renderer,
hover_color="red",
hover_marker_size=6,
selection_marker_size=6,
selection_color="yellow",
selection_outline_color="purple",
selection_line_width=3))
...
# get notified when hover or selection changes
#on_trait_change('renderer.index.metadata')
def when_hover_or_selection_changes(self):
print 'renderer.index.metadata = ', self.renderer.index.metadata
Related
I am trying to run the code below in a bokeh server to visualize the plots. the final_imdb_dataframe is in the same directory as the python code. these are the steps i take to get the server to run:
open my terminal
type in "cd" then a space and then my directory in for the map containing my python script and the final_imdb_dataframe
press enter
type: bokeh serve --show "filename.py"
press enter
in my terminal i am getting this error message however:
bokeh : The term 'bokeh' is not recognized as the name of a cmdlet, function, script file, or operable program. (the complete error message is attached as a picture)
when i type in "pip show bokeh" in the command promt in python it gives me this:
pip show bokeh
Name: bokeh
Version: 2.4.3
Summary: Interactive plots and applications in the browser from Python
Home-page: https://github.com/bokeh/bokeh
Author: Bokeh Team
Author-email: info#bokeh.org
License: BSD-3-Clause
Location: c:\users\fazan\anaconda\lib\site-packages
Requires: Jinja2, numpy, packaging, pillow, PyYAML, tornado, typing-extensions
Required-by: hvplot, panel
Note: you may need to restart the kernel to use updated packages.
so it should work right?
i don't know what to do anymore...
code:
import pandas as pd
from bokeh.plotting import figure, show
import numpy as np
from bokeh.models import ColumnDataSource, HoverTool, Slider, RangeSlider, Div, Select
from bokeh.io import output_file, curdoc
from bokeh.layouts import column, row, layout
#EERSTE PLOT:
#line diagram of amount of movies released each year en avg rating displayed over the years
df = pd.read_csv("final_imdb_dataframe")
#"unnamed column weghalen
df.drop(df.columns[df.columns.str.contains('unnamed',case = False)],axis = 1, inplace = True)
#creating plot 2
plot2 = figure(plot_width = 1000,
plot_height = 400,
x_axis_label= "year",
title = "watchtime and movie rating over the years")
#creating a dropdown menu
source2 = ColumnDataSource(data={'x': df["release_date"], 'y': df["watchtime"]})
plot2.circle(x="x", y="y", size = 3, source = source2, alpha = 0.2)
def update_plot(attr, old, new):
if new == 'watchtime':
source2.data = {'x' : df["release_date"], 'y' : df["watchtime"]}
else:
source2.data = {'x' : df["release_date"], 'y' : df["movie_rating"]}
select = Select(title = "keuze menu", options=["watchtime", "movie_rating"], value = 'watchtime')
#code for updating the plot when the value is changed
select.on_change('value', update_plot)
#TWEEDE PLOT:
#creating a list of all the years a movie came out
dfrelease = df.release_date.drop_duplicates().tolist()
#sorting the list with release dates to get them in chronological order
dfrelease1 = sorted(dfrelease)
#creating an empty list
total_releases_per_year = []
#adding the amount of movies that came out every year to the list
for i in range(len(dfrelease1)):
total_releases_per_year.append(len(df[df["release_date"] == dfrelease1[i]]))
#creating a columndatasource of the two lists created above to make it possible for the hovertool to be used
source1 = ColumnDataSource(data=dict(year = dfrelease1, amount_of_movies_made = total_releases_per_year))
#making a hovertool
hover2 = HoverTool(tooltips=[("year", "#year"), ("amount of movies made", "#amount_of_movies_made")])
#creating plot 3
plot3 = figure(plot_width = 1000,
plot_height = 400,
x_axis_label= "year",
y_axis_label = "amount of movies",
title = "amount of movies per year")
plot3.line("year", "amount_of_movies_made", source = source1)
plot3.add_tools(hover2)
#DERDE PLOT:
#creating the ColumnDataSource
df_sel = df[df["release_date"] == 2020]
source1 = ColumnDataSource(df_sel)
#scatter plot
p1 = figure(title = "relation 'watchtime' and 'movie rating' over the years",
x_axis_label= "watchtime",
y_axis_label= "rating (0-10)",
plot_width = 1000,
plot_height = 400)
p1.circle(x="watchtime", y="movie_rating", alpha = 0.2, source=source1)
#making the rangeslider
slider = RangeSlider(title="release date", value= (1919, 2021), start=1919, end=2020, step=1)
#making the hover tool
hover1 = HoverTool(tooltips=[("title", "#movie_name"),("gross", "#gross_collection"),("votes", "#votes"),("genre", "#genre"), ("watchtime", "#watchtime"),("rating", "#movie_rating"),("release date", "#release_date")])
#making a def callback for the rangeslider to work when moved
def callback(attr, old, new):
year = slider.value
source1.data = df[(df["release_date"] >= year[0]) & (df["release_date"] <= year[1])]
slider.on_change("value", callback)
layout = column(slider, p1) #create a layout with slider on top of plot
p1.add_tools(hover1) #adding hover tool
layout3 = column(select, plot2, plot3, slider, p1) #making the layout for the dashboard
curdoc().add_root(layout3)*
i tried what i wrote above and expected acces to the bokeh server that would show me my plots. i have done this before on my previous computer (mac) and it worked then. now on my lenovo it is not working.`
I want to use object detection using tensorflow lite in order to detect a clear face or a covered face where the statement "door opens" is printed when a clear face is detected. I could run this code smoothly previously but later after rebooting raspberry pi 4, although the tensorflow lite runtime is initialized, the raspberry pi 4 disconnects with the ssh completely. The following is the code:
######## Webcam Object Detection Using Tensorflow-trained Classifier #########
#
# Author: Evan Juras
# Date: 10/27/19
# Description:
# This program uses a TensorFlow Lite model to perform object detection on a live webcam
# feed. It draws boxes and scores around the objects of interest in each frame from the
# webcam. To improve FPS, the webcam object runs in a separate thread from the main program.
# This script will work with either a Picamera or regular USB webcam.
#
# This code is based off the TensorFlow Lite image classification example at:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/examples/python/label_image.py
#
# I added my own method of drawing boxes and labels using OpenCV.
# Import packages
import os
import argparse
import cv2
import numpy as np
import sys
import time
from threading import Thread
import importlib.util
import simpleaudio as sa
# Define VideoStream class to handle streaming of video from webcam in separate processing thread
# Source - Adrian Rosebrock, PyImageSearch: https://www.pyimagesearch.com/2015/12/28/increasing-raspberry-pi-fps-with-python-and-opencv/
class VideoStream:
"""Camera object that controls video streaming from the Picamera"""
def __init__(self,resolution=(640,480),framerate=30):
# Initialize the PiCamera and the camera image stream
self.stream = cv2.VideoCapture(0)
ret = self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
ret = self.stream.set(3,resolution[0])
ret = self.stream.set(4,resolution[1])
# Read first frame from the stream
(self.grabbed, self.frame) = self.stream.read()
# Variable to control when the camera is stopped
self.stopped = False
def start(self):
# Start the thread that reads frames from the video stream
Thread(target=self.update,args=()).start()
return self
def update(self):
# Keep looping indefinitely until the thread is stopped
while True:
# If the camera is stopped, stop the thread
if self.stopped:
# Close camera resources
self.stream.release()
return
# Otherwise, grab the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# Return the most recent frame
return self.frame
def stop(self):
# Indicate that the camera and thread should be stopped
self.stopped = True
# Define and parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument('--modeldir', help='Folder the .tflite file is located in',
required=True)
parser.add_argument('--graph', help='Name of the .tflite file, if different than detect.tflite',
default='masktracker.tflite')
parser.add_argument('--labels', help='Name of the labelmap file, if different than labelmap.txt',
default='facelabelmap.txt')
parser.add_argument('--threshold', help='Minimum confidence threshold for displaying detected objects',
default=0.5)
parser.add_argument('--resolution', help='Desired webcam resolution in WxH. If the webcam does not support the resolution entered, errors may occur.',
default='640x480')
parser.add_argument('--edgetpu', help='Use Coral Edge TPU Accelerator to speed up detection',
action='store_true')
args = parser.parse_args()
MODEL_NAME = args.modeldir
GRAPH_NAME = args.graph
LABELMAP_NAME = args.labels
min_conf_threshold = float(args.threshold)
resW, resH = args.resolution.split('x')
imW, imH = int(resW), int(resH)
use_TPU = args.edgetpu
# Import TensorFlow libraries
# If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow
# If using Coral Edge TPU, import the load_delegate library
pkg = importlib.util.find_spec('tflite_runtime')
if pkg:
from tflite_runtime.interpreter import Interpreter
if use_TPU:
from tflite_runtime.interpreter import load_delegate
else:
from tensorflow.lite.python.interpreter import Interpreter
if use_TPU:
from tensorflow.lite.python.interpreter import load_delegate
# If using Edge TPU, assign filename for Edge TPU model
if use_TPU:
# If user has specified the name of the .tflite file, use that name, otherwise use default 'edgetpu.tflite'
if (GRAPH_NAME == 'masktracker.tflite'):
GRAPH_NAME = 'edgetpu.tflite'
# Get path to current working directory
CWD_PATH = os.getcwd()
# Path to .tflite file, which contains the model that is used for object detection
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)
# Load the label map
with open(PATH_TO_LABELS, 'r') as f:
labels = [line.strip() for line in f.readlines()]
# Have to do a weird fix for label map if using the COCO "starter model" from
# https://www.tensorflow.org/lite/models/object_detection/overview
# First label is '???', which has to be removed.
if labels[0] == '???':
del(labels[0])
# Load the Tensorflow Lite model.
# If using Edge TPU, use special load_delegate argument
if use_TPU:
interpreter = Interpreter(model_path=PATH_TO_CKPT,
experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
print(PATH_TO_CKPT)
else:
interpreter = Interpreter(model_path=PATH_TO_CKPT)
interpreter.allocate_tensors()
# Get model details
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
floating_model = (input_details[0]['dtype'] == np.float32)
input_mean = 127.5
input_std = 127.5
# Initialize frame rate calculation
frame_rate_calc = 1
freq = cv2.getTickFrequency()
global image_capture
image_capture = 0
# Initialize video stream
videostream = VideoStream(resolution=(imW,imH),framerate=30).start()
time.sleep(1)
#for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True):
while True:
# Start timer (for calculating frame rate)
t1 = cv2.getTickCount()
# Grab frame from video stream
frame1 = videostream.read()
# Acquire frame and resize to expected shape [1xHxWx3]
frame = frame1.copy()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (width, height))
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Perform the actual detection by running the model with the image as input
interpreter.set_tensor(input_details[0]['index'],input_data)
interpreter.invoke()
# Retrieve detection results
boxes = interpreter.get_tensor(output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = interpreter.get_tensor(output_details[1]['index'])[0] # Class index of detected objects
scores = interpreter.get_tensor(output_details[2]['index'])[0] # Confidence of detected objects
#num = interpreter.get_tensor(output_details[3]['index'])[0] # Total number of detected objects (inaccurate and not needed)
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(len(scores)):
if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1,(boxes[i][0] * imH)))
xmin = int(max(1,(boxes[i][1] * imW)))
ymax = int(min(imH,(boxes[i][2] * imH)))
xmax = int(min(imW,(boxes[i][3] * imW)))
# Draw label
object_name = labels[int(classes[i])] # Look up object name from "labels" array using class index
if (object_name=="face unclear" ):
color = (0, 255, 0)
cv2.rectangle(frame, (xmin,ymin), (xmax,ymax),color, 2)
print("Face Covered: Door Not Opened")
if(image_capture == 0):
path = r'/home/pi/Desktop/tflite_1/photographs'
date_string = time.strftime("%Y-%m-%d_%H%M%S")
#print(date_string)
cv2.imwrite(os.path.join(path, (date_string + ".jpg")),frame)
#cv2.imshow("Photograph",frame)
#mp3File = input(alert_audio.mp3)
print("Photo Taken")
#w_object = sa.WaveObject.from_wave_file('alert_audio.wav')
#p_object = w_object.play()
#p_object.wait_done()
image_capture = 1
else:
color = (0, 0, 255)
cv2.rectangle(frame, (xmin,ymin), (xmax,ymax),color, 2)
print("Face Clear: Door Opened")
image_capture = 0
#cv2.rectangle(frame, (xmin,ymin), (xmax,ymax),color, 2)
#image = np.asarray(ImageGrab.grab())
label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
if ((scores[0] < min_conf_threshold)):
cv2.putText(frame,"No Face Detected",(260,260),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, color=(255,0,0))
print("No Face Detected")
image_capture = 0
# Draw framerate in corner of frame
cv2.putText(frame,'FPS: {0:.2f}'.format(frame_rate_calc),(30,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
# Calculate framerate
t2 = cv2.getTickCount()
time1 = (t2-t1)/freq
frame_rate_calc= 1/time1
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
# Clean up
cv2.destroyAllWindows()
videostream.stop()
Any help is appreciated.
Regards,
MD
I'm trying to create a directed graph using networkx and bokeh, however, I also want to show the arrows for each out-edge. I found that the Holoviews library has the ability to add a 'directed=true' parameter to its graph constructor. However, I also want to utilize Bokeh's design features such as adjusting node color/size based on previously set node-attributes. The latter only works if I use Bokeh's from_networkx() to get a bokeh.models.renderers.GraphRender object, and then use its attributes node_renderer and edge_renderer.
The issue is when using Holoviews' renderer to specify Bokeh as the backend, it returns a bokeh.plotting.figure.Figure instead of the GraphRenderer. Ultimately, I want to be know how to be able to control the node size/color based on some attributes possible through Bokeh, and simultaneously use Holoviews to display the arrowheads on each edge.
import networkx as nx
import holoviews as hv
from holoviews import opts
hv.extension('bokeh')
from bokeh.io import show, output_file
# ... some code for I/O ...
G = nx.DiGraph(edgeList) # Directed networkx graph
# Set Node/Edge attributes to display upon hover
numConnections = {k:v for k,v in G.out_degree()}
nx.set_node_attributes(G, numConnections, name='numConnections')
# Returns Holoviews graph
hvGraph = hv.Graph.from_networkx(G, nx.spring_layout).opts(tools=['hover'], directed=True, arrowhead_length=0.01)
# Renders Holoviews graph into bokeh.plotting.figure.Figure
hvRendered = hv.render(hvGraph, 'bokeh')
output_file("out.html")
show(hvRendered)
# # The below code runs as expected using Bokeh only, and not Holoviews
# # to produce the directed graph (without arrowed edges):
# from bokeh.models import Plot, Range1d, MultiLine, Circle
# from bokeh.models import LinearColorMapper, ColorBar, BasicTicker
# import bokeh.models as bm
# from bokeh.models.graphs import from_networkx
# from bokeh.models.graphs import NodesAndLinkedEdges, EdgesAndLinkedNodes
# # Returns GraphRenderer from bokeh.models.renderers.DateRenderer
# graphRenderer = from_networkx(G, nx.spring_layout)
# mapper = LinearColorMapper(palette="Viridis256", low=76, high=0)
# # Node size/color when unselected / selected / hover
# graphRenderer.node_renderer.glyph = Circle(
# size='node_size',
# fill_color= {'field': "numConnections", "transform": mapper},
# fill_alpha=.8
# )
# graphRenderer.node_renderer.selection_glyph = Circle(
# size=25,
# fill_color=Inferno6[4]
# )
# graphRenderer.node_renderer.hover_glyph = Circle(
# size=20,
# fill_color=Inferno6[3]
# )
# # Edge size/color when unselected / selected / hover
# graphRenderer.edge_renderer.glyph = MultiLine(
# line_color="#CCCCCC",
# line_alpha=0.8,
# line_width=3
# )
# graphRenderer.edge_renderer.selection_glyph = MultiLine(
# line_color=Inferno6[4],
# line_width=4
# )
# graphRenderer.edge_renderer.hover_glyph = MultiLine(
# line_color=Inferno6[3],
# line_width=4
# )
# graphRenderer.node_renderer.data_source.data['numConnections'] = [v for k,v in
nx.get_node_attributes(G,'numConnections').items()]
# graphRenderer.selection_policy = NodesAndLinkedEdges()
# graphRenderer.inspection_policy = NodesAndLinkedEdges()
# bar = ColorBar(color_mapper=mapper, location=(0,0), title='#connections')
# # Create Bokeh Plot
# plot = Plot(
# plot_width=20,
# plot_height=20,
# x_range=Range1d(-1.1,1.1),
# y_range=Range1d(-1.1,1.1)
# )
# plot.add_tools(
# bm.HoverTool(tooltips=[("#connections", "#numConnections")]),
# bm.TapTool(),
# bm.BoxSelectTool()
# )
# plot.renderers.append(graphRenderer)
# output_file("bokeh.html")
# show(plot)
After rendering the Holoview graph into a Bokeh Figure (not a models.GraphRenderer), if I try to call the attribute node_renderer using the rendered Bokeh Figure object, it obviously throws an exception.
Traceback (most recent call last): File "holoview.py", line 106, in hvRenderedGraph.node_renderer.selection_glyph = Circle() AttributeError: 'BokehRenderer' object has no attribute 'node_renderer'
You can get the GraphRender object by following code:
from bokeh.models import GraphRenderer
gr = hvRendered.select_one(GraphRenderer)
then use gr.node_renderer and gr.edge_renderer to adjust the style.
Using TensorFlow I am trying to detect one object(png and grayscale image). I have trained and exported a model.ckpt successfully. Now I am trying to restore the saved model.ckpt for prediction. Here is the script:
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
if tf.__version__ != '1.4.0':
raise ImportError('Please upgrade your tensorflow installation to v1.4.0!')
# This is needed to display the images.
#matplotlib inline
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
from utils import label_map_util
from utils import visualization_utils as vis_util
MODEL_NAME = 'melon_graph'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('training', 'object_detection.pbtxt')
NUM_CLASSES = 1
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape((im_height, im_width, 1)).astype(np.float64)
# For the sake of simplicity we will use only 2 images:
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'test_images'
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'te_data{}.png'.format(i)) for i in range(1, 336) ]
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
for image_path in TEST_IMAGE_PATHS:
image = Image.open(image_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
(boxes, scores, classes, num) = sess.run([detection_boxes, detection_scores, detection_classes, num_detections], feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(image_np,np.squeeze(boxes),np.squeeze(classes).astype(np.float64), np.squeeze(scores), category_index, use_normalized_coordinates=True, line_thickness=5)
plt.figure(figsize=IMAGE_SIZE)
plt.imshow(image_np)
and this is the error
Traceback (most recent call last): File "cochlear_detection.py",
line 81, in
(boxes, scores, classes, num) = sess.run([detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded}) File
"/anaconda/lib/python3.6/site-packages/tensorflow/python/client/session.py",
line 889, in run
run_metadata_ptr) File "/anaconda/lib/python3.6/site-packages/tensorflow/python/client/session.py",
line 1096, in _run
% (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape()))) ValueError: Cannot feed value of shape (1, 2048, 2048, 1) for Tensor
'image_tensor:0', which has shape '(?, ?, ?, 3)'
I'm trying to build a simple Mayavi script application which utilises the mlab iso_surface module.
However, when I run my app it throws up two windows, one showing my mayavi iso_surface plot and the other showing a blank "Edit properties" window. It seems that the mayavi scene is not being displayed in the specified view layout for the "Edit properties" window.
So my question is: Why is the mayavi iso_surface scene not appearing in the view layout, and how do I get it in there?
A simple test script which displays this behaviour is pasted below. I am using Canopy version: 2.1.1.3504 (64 bit), python 3.5.2 on a Windows 10 system.
[Note: I have modified my original question to include another question. How do I update the 's' data with the input from a Range object (mult_s)? I have had a go at doing this below, but with no success. It throws up: TraitError: Cannot set the undefined 's' attribute of a 'ArraySource' object.]
class Isoplot1(HasTraits):
scene = Instance(MlabSceneModel, ())
mult_s = Range(1, 5, 1)
#on_trait_change('scene.activated')
def _setup(self):
# Create x, y, z, s data
L = 10.
x, y, z = np.mgrid[-L:L:101j, -L:L:101j, -L:L:101j]
self.s0 = np.sqrt(4 * x ** 2 + 2 * y ** 2 + z ** 2)
# create the data pipeline
self.src1 = mlab.pipeline.scalar_field(x, y, z, self.s0)
# Create the plot
self.plot1 = self.scene.mlab.pipeline.iso_surface(
self.src1, contours=[5, ], opacity=0.5, color=(1, 1, 0)
)
#on_trait_change('mult_s')
def change_s(self):
self.src1.set(s=self.s0 * self.mult_s)
# Set the layout
view = View(Item('scene',
editor=SceneEditor(scene_class=MayaviScene),
height=400, width=600, show_label=False),
HGroup('mult_s',),
resizable=True
)
isoplot1 = Isoplot1()
isoplot1.configure_traits()
If you use self.scene.mlab.pipeline.scalar_field instead of mlab.pipeline.scalar_field this should not happen.
In general, you should avoid creating any visualization in the initializer. Instead you should always setup the scene when the scene.activated event is fired. To be safe for uses with raw mlab you should rewrite your code as follows.
from mayavi import mlab
from traits.api import HasTraits, Instance, on_trait_change
from traitsui.api import View, Item
from mayavi.core.ui.api import MayaviScene, SceneEditor, MlabSceneModel
import numpy as np
class Isoplot1(HasTraits):
scene = Instance(MlabSceneModel, ())
#on_trait_change('scene.activated')
def _setup(self):
# Create x, y, z, s data
L = 10.
x, y, z = np.mgrid[-L:L:101j, -L:L:101j, -L:L:101j]
s = np.sqrt(4 * x ** 2 + 2 * y ** 2 + z ** 2)
# create the data pipeline
self.src1 = mlab.pipeline.scalar_field(x, y, z, s)
# Create the plot
self.plot1 = self.scene.mlab.pipeline.iso_surface(
self.src1, contours=[5, ], opacity=0.5, color=(1, 1, 0)
)
# Set the layout
view = View(Item('scene',
editor=SceneEditor(scene_class=MayaviScene),
height=400, width=600, show_label=False),
resizable=True
)
isoplot1 = Isoplot1()
isoplot1.configure_traits()
You probably already know this but just in case you can also take a look at some of the other mayavi interactive examples in the documentation.