Max pooling indices - neural-network

I am trying to find the indices 2d max pooling in lasagne
network = batch_norm(Conv2DLayer(
network, num_filters=filter_size, filter_size=(kernel, kernel),pad=pad,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform(),name="conv"), name="BN")
pool_in = lasagne.layers.get_output(network)
network = MaxPool2DLayer(network, pool_size=(pool_size, pool_size),stride=2,name="pool")
pool_out = lasagne.layers.get_output(network)
ind1 = T.grad(T.sum(pool_out), wrt=pool_in)
When I try to build the model it raises a error
DisconnectedInputError: grad method was asked to compute the gradient with respect to a variable that is not part of the computational graph of the cost, or is used only by a non-differentiable operator: Elemwise{mul,no_inplace}.0
Backtrace when the node is created:
File "//anaconda/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 2871, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "//anaconda/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 2975, in run_ast_nodes
if self.run_code(code, result):
File "//anaconda/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 3035, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-28-0b136cc660e2>", line 1, in <module>
network = build_model()
File "<ipython-input-27-20acc3fe0d98>", line 8, in build_model
pool_in = lasagne.layers.get_output(network)
File "//anaconda/lib/python2.7/site-packages/lasagne/layers/helper.py", line 191, in get_output
all_outputs[layer] = layer.get_output_for(layer_inputs, **kwargs)
File "//anaconda/lib/python2.7/site-packages/lasagne/layers/special.py", line 52, in get_output_for
return self.nonlinearity(input)
File "//anaconda/lib/python2.7/site-packages/lasagne/nonlinearities.py", line 157, in rectify
return theano.tensor.nnet.relu(x)
What is the right way of coding functions on lasagne layers intermediate outputs.

I had a similar problem a while ago, check out my solution for 2d and 3d max pooling indices:
Theano max_pool_3d
(Its based on the same Google-groups post, i guess)

Related

"ValueError: max_evals=500 is too low for the Permutation explainer" shap answers me do I have to give more data (photos)?

I want to test the explainability of a multiclass semantic segmentation model, deeplab_v3plus with shap to know which features contribute the most to semantic classification. However I have a ValueError: max_evals=500 is too low when running my file, and I struggle to understand the reason.
import glob
from PIL import Image
import torch
from torchvision import transforms
from torchvision.utils import make_grid
import torchvision.transforms.functional as tf
from deeplab import deeplab_v3plus
import shap
def test(args):
# make a video prez
model = deeplab_v3plus('resnet101', num_classes=args.nclass, output_stride=16, pretrained_backbone=True)
model.load_state_dict(torch.load(args.seg_file,map_location=torch.device('cpu'))) # because no gpu available on sandbox environnement
model = model.to(args.device)
model.eval()
explainer = shap.Explainer(model)
with torch.no_grad():
for i, file in enumerate(args.img_folder):
img = img2tensor(file, args)
pred = model(img)
print(explainer(img))
if __name__ == '__main__':
class Arguments:
def __init__(self):
self.device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
self.seg_file = "Model_Woodscape.pth"
self.img_folder = glob.glob("test_img/*.png")
self.mean = [0.485, 0.456, 0.406]
self.std = [0.229, 0.224, 0.225]
self.h, self.w = 483, 640
self.nclass = 10
self.cmap = {
1: [128, 64, 128], # "road",
2: [69, 76, 11], # "lanemarks",
3: [0, 255, 0], # "curb",
4: [220, 20, 60], # "person",
5: [255, 0, 0], # "rider",
6: [0, 0, 142], # "vehicles",
7: [119, 11, 32], # "bicycle",
8: [0, 0, 230], # "motorcycle",
9: [220, 220, 0], # "traffic_sign",
0: [0, 0, 0] # "void"
}
args = Arguments()
test(args)
But it returns:
(dee_env) jovyan#jupyter:~/use-cases/Scene_understanding/Code_Woodscape/deeplab_v3+$ python test_shap.py
BILINEAR is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BILINEAR instead.
Traceback (most recent call last):
File "/home/jovyan/use-cases/Scene_understanding/Code_Woodscape/deeplab_v3+/test_shap.py", line 85, in <module>
test(args)
File "/home/jovyan/use-cases/Scene_understanding/Code_Woodscape/deeplab_v3+/test_shap.py", line 37, in test
print(explainer(img))
File "/home/jovyan/use-cases/Scene_understanding/Code_Woodscape/deeplab_v3+/dee_env/lib/python3.9/site-packages/shap/explainers/_permutation.py", line 82, in __call__
return super().__call__(
File "/home/jovyan/use-cases/Scene_understanding/Code_Woodscape/deeplab_v3+/dee_env/lib/python3.9/site-packages/shap/explainers/_explainer.py", line 266, in __call__
row_result = self.explain_row(
File "/home/jovyan/use-cases/Scene_understanding/Code_Woodscape/deeplab_v3+/dee_env/lib/python3.9/site-packages/shap/explainers/_permutation.py", line 164, in explain_row
raise ValueError(f"max_evals={max_evals} is too low for the Permutation explainer, it must be at least 2 * num_features + 1 = {2 * len(inds) + 1}!")
ValueError: max_evals=500 is too low for the Permutation explainer, it must be at least 2 * num_features + 1 = 1854721!
In the source code it looks like it's because I don't give enough arguments. I only have three images in my test_img/* folder, is that why?
I have the same issue. A possible solution I found which seems to be working for my case is to replace this line
explainer = shap.Explainer(model)
With this line
explainer = shap.explainers.Permutation(model, max_evals = 1854721)
shap.Explainer by default has algorithm='auto'. From the documentation: shape.Explainer
By default the “auto” options attempts to make the best choice given
the passed model and masker, but this choice can always be overriden
by passing the name of a specific algorithm.
Since 'permutation' has been selected you can directly use shap.explainers.Permutation and set max_evals to the value suggested in the error message above.
Given the high number of your use case, this might take a really long time. I would suggest to use an easier model just for testing the above solution.

Loading a pretrained model fails when multiple GPU was used for training

I have trained a network model and saved its weights and architecture via checkpoint = ModelCheckpoint(filepath='weights.hdf5') callback. During training, I am using multiple GPUs by calling the funtion below:
def make_parallel(model, gpu_count):
def get_slice(data, idx, parts):
shape = tf.shape(data)
size = tf.concat([ shape[:1] // parts, shape[1:] ],axis=0)
stride = tf.concat([ shape[:1] // parts, shape[1:]*0 ],axis=0)
start = stride * idx
return tf.slice(data, start, size)
outputs_all = []
for i in range(len(model.outputs)):
outputs_all.append([])
#Place a copy of the model on each GPU, each getting a slice of the batch
for i in range(gpu_count):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i) as scope:
inputs = []
#Slice each input into a piece for processing on this GPU
for x in model.inputs:
input_shape = tuple(x.get_shape().as_list())[1:]
slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx':i,'parts':gpu_count})(x)
inputs.append(slice_n)
outputs = model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
#Save all the outputs for merging back together later
for l in range(len(outputs)):
outputs_all[l].append(outputs[l])
# merge outputs on CPU
with tf.device('/cpu:0'):
merged = []
for outputs in outputs_all:
merged.append(merge(outputs, mode='concat', concat_axis=0))
return Model(input=model.inputs, output=merged)
With the testing code:
from keras.models import Model, load_model
import numpy as np
import tensorflow as tf
model = load_model('cpm_log/deneme.hdf5')
x_test = np.random.randint(0, 255, (1, 368, 368, 3))
output = model.predict(x = x_test, batch_size=1)
print output[4].shape
I got the error below:
Traceback (most recent call last):
File "cpm_test.py", line 5, in <module>
model = load_model('cpm_log/Jun5_1000/deneme.hdf5')
File "/usr/local/lib/python2.7/dist-packages/keras/models.py", line 240, in load_model
model = model_from_config(model_config, custom_objects=custom_objects)
File "/usr/local/lib/python2.7/dist-packages/keras/models.py", line 301, in model_from_config
return layer_module.deserialize(config, custom_objects=custom_objects)
File "/usr/local/lib/python2.7/dist-packages/keras/layers/__init__.py", line 46, in deserialize
printable_module_name='layer')
File "/usr/local/lib/python2.7/dist-packages/keras/utils/generic_utils.py", line 140, in deserialize_keras_object
list(custom_objects.items())))
File "/usr/local/lib/python2.7/dist-packages/keras/engine/topology.py", line 2378, in from_config
process_layer(layer_data)
File "/usr/local/lib/python2.7/dist-packages/keras/engine/topology.py", line 2373, in process_layer
layer(input_tensors[0], **kwargs)
File "/usr/local/lib/python2.7/dist-packages/keras/engine/topology.py", line 578, in __call__
output = self.call(inputs, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/keras/layers/core.py", line 659, in call
return self.function(inputs, **arguments)
File "/home/muhammed/DEV_LIBS/developments/mocap/pose_estimation/training/cpm/multi_gpu.py", line 12, in get_slice
def get_slice(data, idx, parts):
NameError: global name 'tf' is not defined
By inspecting the error output, I decide that the problem is with the parallelization code. However, I can't resolve the issue.
You may need to use custom_objects to enable loading of the model.
import tensorflow as tf
model = load_model('model.h5', custom_objects={'tf': tf,})

XGBoost: xgb.importance feature map

I am getting the below error when I am trying to use the following code.
******Code******
importance = bst.get_fscore(fmap='xgb.fmap')
importance = sorted(importance.items(), key=operator.itemgetter(1))
******Error******
File "scripts/xgboost_bnp.py", line 225, in <module>
importance = bst.get_fscore(fmap='xgb.fmap')
File "/usr/lib/python2.7/site-packages/xgboost/core.py", line 754, in get_fscore
trees = self.get_dump(fmap)
File "/usr/lib/python2.7/site-packages/xgboost/core.py", line 740, in get_dump
ctypes.byref(sarr)))
File "/usr/lib/python2.7/site-packages/xgboost/core.py", line 92, in _check_call
raise XGBoostError(_LIB.XGBGetLastError())
xgboost.core.XGBoostError: can not open file "xgb.fmap"
The error is raised because you are calling get_fscore with an optional parameter fmap stating that feature importance of each feature should be fetched from a feature map file called xgb.fmap, which does not exist in your file system.
Here is a function returning sorted feature names and their importances:
import xgboost as xgb
import pandas as pd
def get_xgb_feat_importances(clf):
if isinstance(clf, xgb.XGBModel):
# clf has been created by calling
# xgb.XGBClassifier.fit() or xgb.XGBRegressor().fit()
fscore = clf.booster().get_fscore()
else:
# clf has been created by calling xgb.train.
# Thus, clf is an instance of xgb.Booster.
fscore = clf.get_fscore()
feat_importances = []
for ft, score in fscore.iteritems():
feat_importances.append({'Feature': ft, 'Importance': score})
feat_importances = pd.DataFrame(feat_importances)
feat_importances = feat_importances.sort_values(
by='Importance', ascending=False).reset_index(drop=True)
# Divide the importances by the sum of all importances
# to get relative importances. By using relative importances
# the sum of all importances will equal to 1, i.e.,
# np.sum(feat_importances['importance']) == 1
feat_importances['Importance'] /= feat_importances['Importance'].sum()
# Print the most important features and their importances
print feat_importances.head()
return feat_importances

Incompatible shapes on tensorflow.equal() op for correct predictions evaluation

Using the MNIST tutorial of Tensorflow, I try to make a convolutional network for face recognition with the "Database of Faces".
The images size are 112x92, I use 3 more convolutional layer to reduce it to 6 x 5 as adviced here
I'm very new at convolutional network and most of my layer declaration is made by analogy to the Tensorflow MNIST tutorial, it may be a bit clumsy, so feel free to advice me on this.
x_image = tf.reshape(x, [-1, 112, 92, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_conv3 = weight_variable([5, 5, 64, 128])
b_conv3 = bias_variable([128])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
h_pool3 = max_pool_2x2(h_conv3)
W_conv4 = weight_variable([5, 5, 128, 256])
b_conv4 = bias_variable([256])
h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4)
h_pool4 = max_pool_2x2(h_conv4)
W_conv5 = weight_variable([5, 5, 256, 512])
b_conv5 = bias_variable([512])
h_conv5 = tf.nn.relu(conv2d(h_pool4, W_conv5) + b_conv5)
h_pool5 = max_pool_2x2(h_conv5)
W_fc1 = weight_variable([6 * 5 * 512, 1024])
b_fc1 = bias_variable([1024])
h_pool5_flat = tf.reshape(h_pool5, [-1, 6 * 5 * 512])
h_fc1 = tf.nn.relu(tf.matmul(h_pool5_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
print orlfaces.train.num_classes # 40
W_fc2 = weight_variable([1024, orlfaces.train.num_classes])
b_fc2 = bias_variable([orlfaces.train.num_classes])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
My problem appear when the session run the "correct_prediction" op which is
tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
At least I think given the error message:
W tensorflow/core/common_runtime/executor.cc:1027] 0x19369d0 Compute status: Invalid argument: Incompatible shapes: [8] vs. [20]
[[Node: Equal = Equal[T=DT_INT64, _device="/job:localhost/replica:0/task:0/cpu:0"](ArgMax, ArgMax_1)]]
Traceback (most recent call last):
File "./convolutional.py", line 133, in <module>
train_accuracy = accuracy.eval(feed_dict = {x: batch[0], y_: batch[1], keep_prob: 1.0})
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 405, in eval
return _eval_using_default_session(self, feed_dict, self.graph, session)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2728, in _eval_using_default_session
return session.run(tensors, feed_dict)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 345, in run
results = self._do_run(target_list, unique_fetch_targets, feed_dict_string)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 419, in _do_run
e.code)
tensorflow.python.framework.errors.InvalidArgumentError: Incompatible shapes: [8] vs. [20]
[[Node: Equal = Equal[T=DT_INT64, _device="/job:localhost/replica:0/task:0/cpu:0"](ArgMax, ArgMax_1)]]
Caused by op u'Equal', defined at:
File "./convolutional.py", line 125, in <module>
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_math_ops.py", line 328, in equal
return _op_def_lib.apply_op("Equal", x=x, y=y, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/op_def_library.py", line 633, in apply_op
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1710, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 988, in __init__
self._traceback = _extract_stack()
It looks like the y_conv output a matrix of shape 8 x batch_size instead of number_of_class x batch_size
If I change the batch size from 20 to 10, the error message stay the same but instead [8] vs. [20] I get [4] vs. [10]. So from that I conclude that the problem may come from the y_conv declaration (last line of the code above).
The loss function, optimizer, training, etc declarations is the same as in the MNIST tutorial:
cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
sess.run((tf.initialize_all_variables()))
for i in xrange(1000):
batch = orlfaces.train.next_batch(20)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict = {x: batch[0], y_: batch[1], keep_prob: 1.0})
print "Step %d, training accuracy %g" % (i, train_accuracy)
train_step.run(feed_dict = {x: batch[0], y_: batch[1], keep_prob: 0.5})
print "Test accuracy %g" % accuracy.eval(feed_dict = {x: orlfaces.test.images, y_: orlfaces.test.labels, keep_prob: 1.0})
Thanks for reading, have a good day
Well, after a lot debugging, I found that my issue was due to a bad instantiation of the labels. Instead of creating arrays full of zeros and replace one value by one, I created them with random value! Stupid mistake. In case someone wondering what I did wrong there and how I fix it here is the change I made.
Anyway during all the debugging I made, to find this mistake, I found some useful information to debug this kind of problem:
For the cross entropy declaration, the tensorflow's MNIST tutorial use a formula that can lead to NaN value
This formula is
cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
Instead of this, I found two ways to declare it in a safer fashion:
cross_entropy = -tf.reduce_sum(y_ * tf.log(tf.clip_by_value(y_conv, 1e-10, 1.0)))
or also:
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logit, y_))
As mrry says. printing the shape of the tensors can help to detect shape anomaly.
To get the shape of a tensor just call his get_shape() method like this:
print "W shape:", W.get_shape()
user1111929 in this question use a debug print that help me assert where the problem come from.

Plotting many datapoints with matplotlib in Python

I recently switched from MATLAB to Python for data analysis and I am using matplotlib for visualization of the data. This works fine if the number of data points I would like to visualise are low. However, if I would like to visualize e.g.
import matplotlib.pyplot as plt
signal = [round(random.random() * 100) for i in xrange(0, 1000000)]
plt.plot(signal)
plt.show()
I am getting an error:
Exception in Tkinter callback
Traceback (most recent call last):
File "/usr/lib/python2.7/lib-tk/Tkinter.py", line 1489, in __call__
return self.func(*args)
File "/usr/lib/pymodules/python2.7/matplotlib/backends/backend_tkagg.py", line 276, in resize
self.show()
File "/usr/lib/pymodules/python2.7/matplotlib/backends/backend_tkagg.py", line 348, in draw
FigureCanvasAgg.draw(self)
File "/usr/lib/pymodules/python2.7/matplotlib/backends/backend_agg.py", line 451, in draw
self.figure.draw(self.renderer)
File "/usr/lib/pymodules/python2.7/matplotlib/artist.py", line 55, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/lib/pymodules/python2.7/matplotlib/figure.py", line 1034, in draw
func(*args)
File "/usr/lib/pymodules/python2.7/matplotlib/artist.py", line 55, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/lib/pymodules/python2.7/matplotlib/axes.py", line 2086, in draw
a.draw(renderer)
File "/usr/lib/pymodules/python2.7/matplotlib/artist.py", line 55, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/lib/pymodules/python2.7/matplotlib/lines.py", line 562, in draw
drawFunc(renderer, gc, tpath, affine.frozen())
File "/usr/lib/pymodules/python2.7/matplotlib/lines.py", line 938, in _draw_lines
self._lineFunc(renderer, gc, path, trans)
File "/usr/lib/pymodules/python2.7/matplotlib/lines.py", line 978, in _draw_solid
renderer.draw_path(gc, path, trans)
File "/usr/lib/pymodules/python2.7/matplotlib/backends/backend_agg.py", line 145, in draw_path
self._renderer.draw_path(gc, path, transform, rgbFace)
OverflowError: Allocated too many blocks
Can you give me an advise what you would do in this case? Are you down sampling your data?
If I perform the same plot in MATLAB even with more data points, I haven't had this problem before.
Not sure exactly what you are trying to show but you can change the plot to '.' and it will work.
import random
import pylab as plt
signal = [round(random.random() * 100) for i in xrange(0, 1000000)]
plt.plot(signal, '.')
plt.show()
You may be able to get what you want by customising using the matlotlibrc file, the docs are here
I think scatter plot is faster
import matplotlib.pyplot as plt
x=np.random.normal(0,1,1000000)
y=np.random.normal(0,1,1000000)
plt.scatter(x,y)