Model visualization error: 'file' not defined - visualization

I am trying to use the Keras visualization module:
# Begin a model
model = Sequential()
model.add(Convolution2D(4,1,5,input_shape=(1,1,49),init='uniform',weights=None,border_mode='valid') )
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(1, 2)))
model.add(Flatten())
model.add(Dense(600,init='normal'))
model.add(Activation('tanh'))
model.add(Dense(2, init='normal'))
model.add(Activation('softmax'))
model.summary()
sgd = SGD(lr=list_lr[i], decay=0.0, momentum=0.1, nesterov=False)
model.compile(loss='categorical_crossentropy', metrics=['accuracy'],optimizer=sgd)
# using visualization
from keras.utils.visualize_util import plot
plot(model, to_file='/home/wj/DL/model.png')
model.fit(X_train,train_label, batch_size=list_batch[j], nb_epoch=list_epoch[k],shuffle=False,verbose=2,validation_split=0.2)
I get the following error:
Traceback (most recent call last):
File "6.2.4.cnn.py", line 85, in <module>
plot(model, to_file='/home/wj/DL/model.png')
File "/usr/local/lib/python3.4/dist-packages/keras/utils/visualize_util.py", line 67, in plot dot.write_png(to_file)
File "/usr/local/lib/python3.4/dist-packages/pydot.py", line 1809, in <lambda>
lambda path, f=frmt, prog=self.prog : self.write(path, format=f, prog=prog))
File "/usr/local/lib/python3.4/dist-packages/pydot.py", line 1895, in write dot_fd = file(path, "w+b")
NameError: name 'file' is not defined
What am I doing wrong?

Take your model as example.
Import and define your model.
from keras.models import Sequential
from keras.layers import Dense, Activation, Convolution2D, MaxPooling2D,Flatten
model = Sequential()
model.add(Convolution2D(4,1,5,input_shape=(1,1,49),init='uniform',weights=None,border_mode='valid') )
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(1, 2)))
model.add(Flatten())
model.add(Dense(600,init='normal'))
model.add(Activation('tanh'))
model.add(Dense(2, init='normal'))
model.add(Activation('softmax'))
Before plotting the model, make sure you have already install libraries:
Install pydot,
pip install git+https://github.com/nlhepler/pydot.git
and graphviz,
sudo apt-get install graphviz
Then, import the necessary libraries and call lib. to plot your model.
from IPython.display import Image, display, SVG
from keras.utils.visualize_util import model_to_dot
# Show the model in ipython notebook
figure = SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg'))
display(figure)
# Save the model as png file
from keras.utils.visualize_util import plot
plot(model, to_file='model.png', show_shapes=True)

Related

How to export a PyTorch model to MATLAB?

I have created and trained a model in PyTorch, and I would like to be able to use the trained model within a MATLAB program. Based on this post I have been exporting the model to ONNX and then attempting to load the ONNX model in MATLAB. I followed this pytorch tutorial on how to export a model to ONNX.
However, I get this error
Error using nnet.internal.cnn.onnx.importONNXNetwork>iHandleTranslationIssues
Unable to import the network because of the following issues:
1 operator(s) : Unable to create an input layer for ONNX input #1 (with name 'input') because its data format is unknown or not supported as a MATLAB input layer.
If you know the input format, pass it by using the "InputDataFormats" parameter.
The input shape declared in the ONNX file is '(batch_size, 12)'.
1 operator(s) : Unable to create an output layer for ONNX network output #1 (with name 'output') because its data format is unknown or not supported as a MATLAB
output layer. If you know the output format, pass it using the 'OutputDataFormats' parameter.
To import the ONNX network as a dlnetwork, set the 'TargetNetwork' value to 'dlnetwork'.
To import the ONNX network as a layer graph with weights, use importONNXLayers.
To import the ONNX network as a function, use importONNXFunction.
Error in nnet.internal.cnn.onnx.importONNXNetwork (line 37)
iHandleTranslationIssues(translationIssues);
Error in importONNXNetwork (line 113)
Network = nnet.internal.cnn.onnx.importONNXNetwork(modelfile, varargin{:});
Here is a minimal example to create the error
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.onnx
class FFNN(nn.Module):
def __init__(self):
super(FFNN, self).__init__()
input_size = 12
self.layer1 = nn.Linear(input_size, 24)
self.layer2 = nn.Linear(24, 24)
self.layer3 = nn.Linear(24, 12)
self.norm1 = nn.BatchNorm1d(12, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
self.layer4 = nn.Linear(12, 6)
self.layer5 = nn.Linear(6, 1)
def forward(self, x):
x = F.relu(self.layer1(x))
x = F.relu(self.layer2(x))
x = self.norm1(self.layer3(x))
x = F.relu(self.layer4(x))
out = self.layer5(x)
return out
net = FFNN()
net.eval()
batch_size = 1
input_size = 12
x = torch.randn(batch_size, input_size, requires_grad=True) # random model input for onnx
out = net(x)
input_names = ['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'Qm', 'l1', 'uPre', 'basal', 'dG']
output_names = ['output']
# Export the model
torch.onnx.export(net, # model being run
x, # model input (or a tuple for multiple inputs)
'model.onnx', # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names = ['input'], # the model's input names
output_names = ['output'], # the model's output names
dynamic_axes={'input' : {0 : 'batch_size'}, # variable length axes
'output' : {0 : 'batch_size'}})
nnMPC = importONNXNetwork("model.onnx"); % produces error
However I can validate the model in ONNX in python and it loads correctly.
So I think the problem is how I am loading it into MATLAB.
(my validation code)
import onnx
import onnxruntime
import numpy as np
onnx_model = onnx.load('model.onnx')
onnx.checker.check_model(onnx_model)
ort_session = onnxruntime.InferenceSession('model.onnx')
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
# compute ONNX Runtime output prediction
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}
ort_outs = ort_session.run(None, ort_inputs)
# compare ONNX Runtime and PyTorch results
np.testing.assert_allclose(to_numpy(out), ort_outs[0], rtol=1e-03, atol=1e-05)
print("Exported model has been tested with ONNXRuntime, and the result looks good!")

CuPy error when pushing / popping pycuda context

I am using tensorRT to perform inference with CUDA. I'd like to use CuPy to preprocess some images that I'll feed to the tensorRT engine. The preprocessing function, called my_function, works fine as long as tensorRT is not run between different calls of the my_function method (see code below). Specifically, the issue is not strictly related by tensorRT but by the fact that tensorRT inference requires to be wrapped by push and pop operations of the pycuda context.
With respect to the following code, the last execution of my_function will raise the following error:
File "/home/ubuntu/myfile.py", line 188, in _pre_process_cuda
img = ndimage.zoom(img, scaling_factor)
File "/home/ubuntu/.local/lib/python3.6/site-packages/cupyx/scipy/ndimage/interpolation.py", line 482, in zoom
kern(input, zoom, output)
File "cupy/core/_kernel.pyx", line 822, in cupy.core._kernel.ElementwiseKernel.__call__
File "cupy/cuda/function.pyx", line 196, in cupy.cuda.function.Function.linear_launch
File "cupy/cuda/function.pyx", line 164, in cupy.cuda.function._launch
File "cupy_backends/cuda/api/driver.pyx", line 299, in cupy_backends.cuda.api.driver.launchKernel
File "cupy_backends/cuda/api/driver.pyx", line 124, in cupy_backends.cuda.api.driver.check_status
cupy_backends.cuda.api.driver.CUDADriverError: CUDA_ERROR_INVALID_HANDLE: invalid resource handle
Note: in the following code I haven't reported the entire tensorRT inference code. In fact, simply pushing and popping a pycuda context generates the error
Code:
import numpy as np
import cv2
import time
from PIL import Image
import requests
from io import BytesIO
from matplotlib import pyplot as plt
import cupy as cp
from cupyx.scipy import ndimage
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
def my_function(numpy_frame):
dtype = 'float32'
img = cp.array(numpy_frame, dtype='float32')
# print(img)
img = ndimage.zoom(img, (0.5, 0.5, 3))
img = (cp.array(2, dtype=dtype) / cp.array(255, dtype=dtype)) * img - cp.array(1, dtype=dtype)
img = img.transpose((2, 0, 1))
img = img.ravel()
return img
# load image
url = "https://www.pexels.com/photo/109919/download/?search_query=&tracking_id=411xe21veam"
response = requests.get(url)
img = Image.open(BytesIO(response.content))
img = np.array(img)
# initialize tensorrt
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
trt_runtime = trt.Runtime(TRT_LOGGER)
cfx = cuda.Device(0).make_context()
my_function(img) # ok
my_function(img) # ok
# ----- TENSORRT ---------
cfx.push()
# .... tensorrt inference....
cfx.pop()
# ----- TENSORRT ---------
my_function(img) # <---- error
I even tried to do it other ways, but unfortunately with the same result:
cfx.push()
my_function(img) # ok
cfx.pop()
cfx.push()
my_function(img) # error
cfx.pop()
#admin: if you can think of a better name for this question feel free to edit it :)
There were multiple contexts open. For instance, it seems that all of the following open a context:
import pycuda.autoinit
cfx.cuda.Device(0).make_context()
cfx.push()
So if you run the three command above, then simply running one cfx.pop() won't be enough. You will need to run cfx.pop() three times to pop all the contexts.

ValueError: Could not interpret optimizer identifier: <tensorflow.python.keras.optimizers.SGD object at 0x0000013887021208>

I try to run this code and I have this error, Please any one had the same error in the past:
sgd = optimizers.SGD(lr = 0.01, decay = 1e-6, momentum = 0.9, nesterov = True)
Compile model
model.compile(optimizer = sgd, loss = OBJECTIVE_FUNCTION, metrics = LOSS_METRICS)
fit_history = model.fit_generator(
train_generator,
steps_per_epoch=STEPS_PER_EPOCH_TRAINING,
epochs = NUM_EPOCHS,
validation_data=validation_generator,
validation_steps=STEPS_PER_EPOCH_VALIDATION,
callbacks=[cb_checkpointer, cb_early_stopper]
)
model.load_weights("../working/best.hdf5")
Now I have this error:
File "", line 1, in runfile('C:/Users/ResNet50VF72.py', wdir='C:/Users/RESNET')
File "C:\ProgramData\Anaconda3\lib\site-packages\spyder\utils\site\sitecustomize.py", line 705, in runfile execfile(filename, namespace)
File "C:\ProgramData\Anaconda3\lib\site-packages\spyder\utils\site\sitecustomize.py", line 102, in execfile exec(compile(f.read(), filename, 'exec'), namespace)
File "C:/Users/RESNET/ResNet50VF72.py", line 110, in model.compile(optimizer = sgd, loss = OBJECTIVE_FUNCTION, metrics = LOSS_METRICS)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 96, in compile self.optimizer = optimizers.get(optimizer)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\optimizers.py", line 793, in get str(identifier))
ValueError: Could not interpret optimizer identifier : <tensorflow.python.keras.optimizers.SGD object at 0x0000013887021208>
I had the same issue with another optimizer:
ValueError: Could not interpret optimizer identifier: <tensorflow.python.keras.optimizers.Adam object at 0x7f3fc4575ef0>
This was because I created my model using keras and not tensorflow.keras, the solution was switching from:
from keras.models import Sequential
to
from tensorflow.keras.models import Sequential
Or one could also use only keras and not tensorflow.keras (I was mixing old and new code), it seems it is the mixing of the two which causes issues (which shouldn't be a surprise).
You should import like this :
from keras.optimizers import gradient_descent_v2
and set your hyperparameters like this :
opt = gradient_descent_v2.SGD(learning_rate=lr, decay=lr/epochs)
reference:
https://programmerah.com/keras-nightly-import-package-error-cannot-import-name-adam-from-keras-optimizers-29815/

ValueError: Cannot feed value of shape (1, 2048, 2048, 1) for Tensor 'image_tensor:0', which has shape '(?, ?, ?, 3)'

Using TensorFlow I am trying to detect one object(png and grayscale image). I have trained and exported a model.ckpt successfully. Now I am trying to restore the saved model.ckpt for prediction. Here is the script:
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
if tf.__version__ != '1.4.0':
raise ImportError('Please upgrade your tensorflow installation to v1.4.0!')
# This is needed to display the images.
#matplotlib inline
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
from utils import label_map_util
from utils import visualization_utils as vis_util
MODEL_NAME = 'melon_graph'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('training', 'object_detection.pbtxt')
NUM_CLASSES = 1
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape((im_height, im_width, 1)).astype(np.float64)
# For the sake of simplicity we will use only 2 images:
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'test_images'
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'te_data{}.png'.format(i)) for i in range(1, 336) ]
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
for image_path in TEST_IMAGE_PATHS:
image = Image.open(image_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
(boxes, scores, classes, num) = sess.run([detection_boxes, detection_scores, detection_classes, num_detections], feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(image_np,np.squeeze(boxes),np.squeeze(classes).astype(np.float64), np.squeeze(scores), category_index, use_normalized_coordinates=True, line_thickness=5)
plt.figure(figsize=IMAGE_SIZE)
plt.imshow(image_np)
and this is the error
Traceback (most recent call last): File "cochlear_detection.py",
line 81, in
(boxes, scores, classes, num) = sess.run([detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded}) File
"/anaconda/lib/python3.6/site-packages/tensorflow/python/client/session.py",
line 889, in run
run_metadata_ptr) File "/anaconda/lib/python3.6/site-packages/tensorflow/python/client/session.py",
line 1096, in _run
% (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape()))) ValueError: Cannot feed value of shape (1, 2048, 2048, 1) for Tensor
'image_tensor:0', which has shape '(?, ?, ?, 3)'

Loading a pretrained model fails when multiple GPU was used for training

I have trained a network model and saved its weights and architecture via checkpoint = ModelCheckpoint(filepath='weights.hdf5') callback. During training, I am using multiple GPUs by calling the funtion below:
def make_parallel(model, gpu_count):
def get_slice(data, idx, parts):
shape = tf.shape(data)
size = tf.concat([ shape[:1] // parts, shape[1:] ],axis=0)
stride = tf.concat([ shape[:1] // parts, shape[1:]*0 ],axis=0)
start = stride * idx
return tf.slice(data, start, size)
outputs_all = []
for i in range(len(model.outputs)):
outputs_all.append([])
#Place a copy of the model on each GPU, each getting a slice of the batch
for i in range(gpu_count):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i) as scope:
inputs = []
#Slice each input into a piece for processing on this GPU
for x in model.inputs:
input_shape = tuple(x.get_shape().as_list())[1:]
slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx':i,'parts':gpu_count})(x)
inputs.append(slice_n)
outputs = model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
#Save all the outputs for merging back together later
for l in range(len(outputs)):
outputs_all[l].append(outputs[l])
# merge outputs on CPU
with tf.device('/cpu:0'):
merged = []
for outputs in outputs_all:
merged.append(merge(outputs, mode='concat', concat_axis=0))
return Model(input=model.inputs, output=merged)
With the testing code:
from keras.models import Model, load_model
import numpy as np
import tensorflow as tf
model = load_model('cpm_log/deneme.hdf5')
x_test = np.random.randint(0, 255, (1, 368, 368, 3))
output = model.predict(x = x_test, batch_size=1)
print output[4].shape
I got the error below:
Traceback (most recent call last):
File "cpm_test.py", line 5, in <module>
model = load_model('cpm_log/Jun5_1000/deneme.hdf5')
File "/usr/local/lib/python2.7/dist-packages/keras/models.py", line 240, in load_model
model = model_from_config(model_config, custom_objects=custom_objects)
File "/usr/local/lib/python2.7/dist-packages/keras/models.py", line 301, in model_from_config
return layer_module.deserialize(config, custom_objects=custom_objects)
File "/usr/local/lib/python2.7/dist-packages/keras/layers/__init__.py", line 46, in deserialize
printable_module_name='layer')
File "/usr/local/lib/python2.7/dist-packages/keras/utils/generic_utils.py", line 140, in deserialize_keras_object
list(custom_objects.items())))
File "/usr/local/lib/python2.7/dist-packages/keras/engine/topology.py", line 2378, in from_config
process_layer(layer_data)
File "/usr/local/lib/python2.7/dist-packages/keras/engine/topology.py", line 2373, in process_layer
layer(input_tensors[0], **kwargs)
File "/usr/local/lib/python2.7/dist-packages/keras/engine/topology.py", line 578, in __call__
output = self.call(inputs, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/keras/layers/core.py", line 659, in call
return self.function(inputs, **arguments)
File "/home/muhammed/DEV_LIBS/developments/mocap/pose_estimation/training/cpm/multi_gpu.py", line 12, in get_slice
def get_slice(data, idx, parts):
NameError: global name 'tf' is not defined
By inspecting the error output, I decide that the problem is with the parallelization code. However, I can't resolve the issue.
You may need to use custom_objects to enable loading of the model.
import tensorflow as tf
model = load_model('model.h5', custom_objects={'tf': tf,})