I am trying to train a simple MLP model that maps input questions (using a 300D word embedding) and image features extracted using a pretrained VGG16 model to a feature vector of fixed length. However, I can't figure out how to fix the error mentioned below. Here is the code I'm trying to run at the moment:
parser = argparse.ArgumentParser()
parser.add_argument('-num_hidden_units', type=int, default=1024)
parser.add_argument('-num_hidden_layers', type=int, default=3)
parser.add_argument('-dropout', type=float, default=0.5)
parser.add_argument('-activation', type=str, default='tanh')
parser.add_argument('-language_only', type=bool, default= False)
parser.add_argument('-num_epochs', type=int, default=10) #default=100
parser.add_argument('-model_save_interval', type=int, default=10)
parser.add_argument('-batch_size', type=int, default=128)
args = parser.parse_args()
questions_train = open('data/qa/preprocess/questions_train2014.txt', 'r').read().splitlines()
answers_train = open('data/qa/preprocess/answers_train2014_modal.txt', 'r').read().splitlines()
images_train = open('data/qa/preprocess/images_train2014.txt', 'r').read().splitlines()
vgg_model_path = 'data/coco/vgg_feats.mat'
maxAnswers = 1000
questions_train, answers_train, images_train = selectFrequentAnswers(questions_train,answers_train,images_train, maxAnswers)
#encode the remaining answers
labelencoder = preprocessing.LabelEncoder()
labelencoder.fit(answers_train)
nb_classes = len(list(labelencoder.classes_))
joblib.dump(labelencoder,'models/labelencoder.pkl')
features_struct = scipy.io.loadmat(vgg_model_path)
VGGfeatures = features_struct['feats']
print ('loaded vgg features')
image_ids = open('data/coco/coco_vgg_IDMap.txt').read().splitlines()
id_map = {}
for ids in image_ids:
id_split = ids.split()
id_map[id_split[0]] = int(id_split[1])
nlp = English()
print ('loaded word2vec features...')
img_dim = 4096
word_vec_dim = 300
model = Sequential()
if args.language_only:
model.add(Dense(args.num_hidden_units, input_dim=word_vec_dim, init='uniform'))
else:
model.add(Dense(args.num_hidden_units, input_dim=img_dim+word_vec_dim, init='uniform'))
model.add(Activation(args.activation))
if args.dropout>0:
model.add(Dropout(args.dropout))
for i in range(args.num_hidden_layers-1):
model.add(Dense(args.num_hidden_units, init='uniform'))
model.add(Activation(args.activation))
if args.dropout>0:
model.add(Dropout(args.dropout))
model.add(Dense(nb_classes, init='uniform'))
model.add(Activation('softmax'))
json_string = model.to_json()
if args.language_only:
model_file_name = 'models/mlp_language_only_num_hidden_units_' + str(args.num_hidden_units) + '_num_hidden_layers_' + str(args.num_hidden_layers)
else:
model_file_name = 'models/mlp_num_hidden_units_' + str(args.num_hidden_units) + '_num_hidden_layers_' + str(args.num_hidden_layers)
open(model_file_name + '.json', 'w').write(json_string)
print ('Compiling model...')
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
print ('Compilation done...')
print ('Training started...')
for k in range(args.num_epochs):
#shuffle the data points before going through them
index_shuf = list(range(len(questions_train)))
shuffle(index_shuf)
questions_train = [questions_train[i] for i in index_shuf]
answers_train = [answers_train[i] for i in index_shuf]
images_train = [images_train[i] for i in index_shuf]
progbar = generic_utils.Progbar(len(questions_train))
for qu_batch,an_batch,im_batch in zip(grouper(questions_train, args.batch_size, fillvalue=questions_train[-1]),
grouper(answers_train, args.batch_size, fillvalue=answers_train[-1]),
grouper(images_train, args.batch_size, fillvalue=images_train[-1])):
X_q_batch = get_questions_matrix_sum(qu_batch, nlp)
if args.language_only:
X_batch = X_q_batch
else:
X_i_batch = get_images_matrix(im_batch, id_map, VGGfeatures)
X_batch = np.hstack((X_q_batch, X_i_batch))
Y_batch = get_answers_matrix(an_batch, labelencoder)
loss = model.train_on_batch(X_batch, Y_batch)
progbar.add(args.batch_size, values=[("train loss", loss)])
#print type(loss)
if k%args.model_save_interval == 0:
model.save_weights(model_file_name + '_epoch_{:02d}.hdf5'.format(k))
model.save_weights(model_file_name + '_epoch_{:02d}.hdf5'.format(k))
And here is the error I get:
Keras: Error when checking input: expected dense_9_input to have shape
(4396,) but got array with shape (4096,)
I think that the error lies in what you pass in the else statement in the first layer of your model versus what you pass in training. In your first layer you specify:
model = Sequential()
if args.language_only:
model.add(Dense(args.num_hidden_units, input_dim=word_vec_dim, init='uniform'))
else:
model.add(Dense(args.num_hidden_units, input_dim=img_dim+word_vec_dim, init='uniform'))
You clearly pass input_dim = img_dim + word_vec_dim = 4096 + 300 = 4396. During training you pass:
X_q_batch = get_questions_matrix_sum(qu_batch, nlp)
if args.language_only:
X_batch = X_q_batch
else:
X_i_batch = get_images_matrix(im_batch, id_map, VGGfeatures)
X_batch = np.hstack((X_q_batch, X_i_batch))
So, in the else branch, X_batch will have X_q_batch or X_i_batch rows, which apparently = 4096.
By the way, for debugging purposes, it would be easier to give your layers a name, e.g.
x = Dense(64, activation='relu', name="dense_one")
I hope this helps.
Related
this is my first time creating a FFN to train it to translate French to English using word prediction:
Input are two arrays of size 2 x window_size + 1 from source language and window_size target language. And the label of size 1
For e.g for window_size = 2:
["je","mange", "la", "pomme","avec"]
and
["I", "eat"]
So the input of size [5] and [2] after concatenating => 7
Label: "the" (refering to "la" in French)
The label is changed to one-hot-encoding before comparing with yHat
I'm using unique index for each word ( 1 to len(vocab) ) and train using the index (not the words)
The output of the FFN is a probability of the size of the vocab of the target language
The problem is that the FFN doesn't learn and the accuracy stays at 0.
When I print the size of y_final (target probability) and yHat (Model Hypo) they have different dimensions:
yHat.size()=[512, 7, 10212]
with 64 batch_size, 7 is the concatenated input size and 10212 size of target vocab, while
y_final.size()= [512, 10212]
And over all the forward method I have these sizes:
torch.Size([512, 5, 32])
torch.Size([512, 5, 64])
torch.Size([512, 5, 64])
torch.Size([512, 2, 256])
torch.Size([512, 2, 32])
torch.Size([512, 2, 64])
torch.Size([512, 2, 64])
torch.Size([512, 7, 64])
torch.Size([512, 7, 128])
torch.Size([512, 7, 10212])
Since the accuracy augments when yHat = y_final then I thought that it is never the case because they don't even have the same shapes (2D vs 3D). Is this the problem ?
Please refer to the code and if you need any other info please tell me.
The code is working fine, no errors.
trainingData = TensorDataset(encoded_source_windows, encoded_target_windows, encoded_labels)
# print(trainingData)
batchsize = 512
trainingLoader = DataLoader(trainingData, batch_size=batchsize, drop_last=True)
def ffnModel(vocabSize1,vocabSize2, learningRate=0.01):
class ffNetwork(nn.Module):
def __init__(self):
super().__init__()
self.embeds_src = nn.Embedding(vocabSize1, 256)
self.embeds_target = nn.Embedding(vocabSize2, 256)
# input layer
self.inputSource = nn.Linear(256, 32)
self.inputTarget = nn.Linear(256, 32)
# hidden layer 1
self.fc1 = nn.Linear(32, 64)
self.bnormS = nn.BatchNorm1d(5)
self.bnormT = nn.BatchNorm1d(2)
# Layer(s) afer Concatenation:
self.fc2 = nn.Linear(64,128)
self.output = nn.Linear(128, vocabSize2)
self.softmaaax = nn.Softmax(dim=0)
# forward pass
def forward(self, xSource, xTarget):
xSource = self.embeds_src(xSource)
xSource = F.relu(self.inputSource(xSource))
xSource = F.relu(self.fc1(xSource))
xSource = self.bnormS(xSource)
xTarget = self.embeds_target(xTarget)
xTarget = F.relu(self.inputTarget(xTarget))
xTarget = F.relu(self.fc1(xTarget))
xTarget = self.bnormT(xTarget)
xCat = torch.cat((xSource, xTarget), dim=1)#dim=128 or 1 ?
xCat = F.relu(self.fc2(xCat))
print(xCat.size())
xCat = self.softmaaax(self.output(xCat))
return xCat
# creating instance of the class
net = ffNetwork()
# loss function
lossfun = nn.CrossEntropyLoss()
# lossfun = nn.NLLLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=learningRate)
return net, lossfun, optimizer
def trainModel(vocabSize1,vocabSize2, learningRate):
# number of epochs
numepochs = 64
# create a new Model instance
net, lossfun, optimizer = ffnModel(vocabSize1,vocabSize2, learningRate)
# initialize losses
losses = torch.zeros(numepochs)
trainAcc = []
# loop over training data batches
batchAcc = []
batchLoss = []
for epochi in range(numepochs):
#Switching on training mode
net.train()
# loop over training data batches
batchAcc = []
batchLoss = []
for A, B, y in tqdm(trainingLoader):
# forward pass and loss
final_y = []
for i in range(y.size(dim=0)):
yy = [0] * target_vocab_length
yy[y[i]] = 1
final_y.append(yy)
final_y = torch.tensor(final_y)
yHat = net(A, B)
loss = lossfun(yHat, final_y)
################
print("\n yHat.size()")
print(yHat.size())
print("final_y.size()")
print(final_y.size())
# backprop
optimizer.zero_grad()
loss.backward()
optimizer.step()
# loss from this batch
batchLoss.append(loss.item())
print(f'batchLoss: {loss.item()}')
#Accuracy calculator:
matches = torch.argmax(yHat) == final_y # booleans (false/true)
matchesNumeric = matches.float() # convert to numbers (0/1)
accuracyPct = 100 * torch.mean(matchesNumeric) # average and x100
batchAcc.append(accuracyPct) # add to list of accuracies
print(f'accuracyPct: {accuracyPct}')
trainAcc.append(np.mean(batchAcc))
losses[epochi] = np.mean(batchLoss)
return trainAcc,losses,net
trainAcc,losses,net = trainModel(len(source_vocab),len(target_vocab), 0.01)
print(trainAcc)
I have a training function, in which inside there are two vectors:
d_labels_a = torch.zeros(128)
d_labels_b = torch.ones(128)
Then I have these features:
# Compute output
features_a = nets[0](input_a)
features_b = nets[1](input_b)
features_c = nets[2](inputs)
And then a domain classifier (nets[4]) makes predictions:
d_pred_a = torch.squeeze(nets[4](features_a))
d_pred_b = torch.squeeze(nets[4](features_b))
d_pred_a = d_pred_a.float()
d_pred_b = d_pred_b.float()
print(d_pred_a.shape)
The error raises in the loss function: ` pred_a = torch.squeeze(nets3)
pred_b = torch.squeeze(nets3)
pred_c = torch.squeeze(nets3)
loss = criterion(pred_a, labels_a) + criterion(pred_b, labels_b) + criterion(pred_c, labels) + d_criterion(d_pred_a, d_labels_a) + d_criterion(d_pred_b, d_labels_b)
The problem is that d_pred_a/b is different from d_labels_a/b, but only after a certain point. Indeed, when I print the shape of d_pred_a/b it istorch.Size([128])but then it changes totorch.Size([112])` independently.
It comes from here:
# Compute output
features_a = nets[0](input_a)
features_b = nets[1](input_b)
features_c = nets[2](inputs)
because if I print the shape of features_a is torch.Size([128, 2048]) but it changes into torch.Size([112, 2048])
nets[0] is a VGG, like this:
class VGG16(nn.Module):
def __init__(self, input_size, batch_norm=False):
super(VGG16, self).__init__()
self.in_channels,self.in_width,self.in_height = input_size
self.block_1 = VGGBlock(self.in_channels,64,batch_norm=batch_norm)
self.block_2 = VGGBlock(64, 128,batch_norm=batch_norm)
self.block_3 = VGGBlock(128, 256,batch_norm=batch_norm)
self.block_4 = VGGBlock(256,512,batch_norm=batch_norm)
#property
def input_size(self):
return self.in_channels,self.in_width,self.in_height
def forward(self, x):
x = self.block_1(x)
x = self.block_2(x)
x = self.block_3(x)
x = self.block_4(x)
# x = self.avgpool(x)
x = torch.flatten(x,1)
return x
I solved. The problem was the last batch. I used drop_last=True in the dataloader and It worked.
I want use keras Lstm to get the time series features, then use the features to Kmeans. But now I can not get the layers output values. How can I get the layers output values?
This is my lstm network
Layer (type) Output Shape Param #
lstm_66 (LSTM) (None, None, 50) 10400
lstm_67 (LSTM) (None, 100) 60400
dense_19 (Dense) (None, 1) 101
activation_19 (Activation) (None, 1) 0
I want to get the lstm_67 output values,my code is:
import keras.backend as K
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
sess = tf.Session()
sess.run(tf.global_variables_initializer())
import numpy as np
statesAll=[]
layers = model.layers
print layers[1].output,type(layers[1].output[1]),sess.run(layers[1].output)
and the result is:
Tensor("lstm_61/TensorArrayReadV3:0", shape=(?, 100), dtype=float32)
So, how can I get the layers output value?
Thanks!
But it not work,my code is:
def load_data(file_name, sequence_length=10, split=0.8):
df = pd.read_csv(file_name, sep=',', usecols=[1])
data_all = np.array(df).astype(float)
scaler = MinMaxScaler()
data_all = scaler.fit_transform(data_all)
data = []
print len(data_all)
for i in range(len(data_all) - sequence_length - 1):
data.append(data_all[i: i + sequence_length + 1])
reshaped_data = np.array(data).astype('float64')
np.random.shuffle(reshaped_data)
x = reshaped_data[:, :-1]
y = reshaped_data[:, -1]
split_boundary = int(reshaped_data.shape[0] * split)
train_x = x[: split_boundary]
test_x = x[split_boundary:]
train_y = y[: split_boundary]
test_y = y[split_boundary:]
return train_x, train_y, test_x, test_y, scaler
def build_model(n_samples, time_steps, input_dim):
model = Sequential()
model.add(LSTM(input_dim=1, output_dim=50,return_sequences=True))
model.add(LSTM(100, return_sequences=False))
model.add(Dense(output_dim=1))
model.add(Activation('linear'))
model.compile(loss='mse', optimizer='rmsprop')
print(model.layers)
return model
def train_model(train_x, train_y, test_x, test_y):
model = build_model()
model.fit(train_x, train_y, batch_size=128, nb_epoch=30,validation_split=0.1)
return model
train_x, train_y, test_x, test_y, scaler = load_data(file path)
train_x = np.reshape(train_x, (train_x.shape[0], train_x.shape[1], 1))
test_x = np.reshape(test_x, (test_x.shape[0], test_x.shape[1], 1))
model = train_model(train_x, train_y, test_x, test_y)
from keras import backend as K
layers = model.layers
K.eval(layers[1].output)
In TensorFlow 2.x, you can do like this:
from tensorflow.python.keras import backend as K
model = build_model()
# lstm_67 is the second layer.
lstm = K.function([model.layers[0].input], [model.layers[1].output])
lstm_output = lstm([test_x])[0]
keras.backend.eval() should do.
Look at the documentation here and here
First of all, this is a tensor, you need to use the tf. Print () method to see the specific value. If you use Spyder, you will not see this information in the console. You need to execute this program in the command line.
I want to make the data which divided label and features, beause tf.nn.softmax_cross_entropy_with_logits required.
queue = tf.RandomShuffleQueue(
capacity=capacity,
min_after_dequeue=min_after_dequeue,
dtypes=[tf.float32],
shapes=[[n_input+1]] #
)
make the queue and put the label and features.
after that I should divide label and features for cost function. but how to do that?
Thank you
import tensorflow as tf
import numpy as np
# Parameters
learning_rate = 0.003
training_epochs = 30
batch_size = 2
display_step = 1
min_after_dequeue = 5
capacity = 16246832
# Network Parameters
# feature size
n_input = 199
# 1st layer num features
n_hidden_1 = 150
# 2nd layer num features
n_hidden_2 = 100
# 3rd layer num features
n_hidden_3 = 50
# 4th layer num features
n_hidden_4 = 30
#class
n_classes = 3
#read csv, 0 index is label
filename_queue = tf.train.string_input_producer(["data.csv"])
record_default = [[0.0] for x in xrange(200)] # with a label and 199 features
#testfile
reader = tf.TextLineReader()
#file read
key, value = reader.read(filename_queue)
#decode
features = tf.decode_csv(value, record_defaults= record_default)
featurespack = tf.pack(features)
#xy = tf.map_fn(fn = lambda f: [f[1:],f[0]], elems=featurespack)
#for the batch
queue = tf.RandomShuffleQueue(
capacity=capacity,
min_after_dequeue=min_after_dequeue,
dtypes=[tf.float32],
shapes=[[n_input+1]]
)
#enqueue
enqueue_op = queue.enqueue(featurespack)
#dequeue
inputs = queue.dequeue_many(batch_size)
#threading
qr = tf.train.QueueRunner(queue, [enqueue_op] * 4)
#features n=199
x = tf.placeholder("float", [None, n_input])
# class 0,1,2
y = tf.placeholder("float", [None, n_classes])
#dropout
dropout_keep_prob = tf.placeholder("float")
# Create model
def multilayer_perceptron(_X, _weights, _biases, _keep_prob):
layer_1 = tf.nn.dropout(tf.nn.relu(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1'])), _keep_prob)
layer_2 = tf.nn.dropout(tf.nn.relu(tf.add(tf.matmul(layer_1, _weights['h2']), _biases['b2'])), _keep_prob)
layer_3 = tf.nn.dropout(tf.nn.relu(tf.add(tf.matmul(layer_2, _weights['h3']), _biases['b3'])), _keep_prob)
layer_4 = tf.nn.dropout(tf.nn.relu(tf.add(tf.matmul(layer_3, _weights['h4']), _biases['b4'])), _keep_prob)
return tf.sigmoid(tf.matmul(layer_4, _weights['out']) + _biases['out'])
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1], stddev=0.1)),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], stddev=0.1)),
'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3], stddev=0.1)),
'h4': tf.Variable(tf.random_normal([n_hidden_3, n_hidden_4], stddev=0.1)),
'out': tf.Variable(tf.random_normal([n_hidden_4, n_classes], stddev=0.1))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'b3': tf.Variable(tf.random_normal([n_hidden_3])),
'b4': tf.Variable(tf.random_normal([n_hidden_4])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
pred = multilayer_perceptron(x, weights, biases, dropout_keep_prob)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) # Softmax loss
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer
# optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.8).minimize(cost) # Adam Optimizer
# Initializing the variables
print "1"
with tf.Session() as sess:
#init
tf.initialize_all_variables().run
#what is
coord = tf.train.Coordinator()
#queue start what is
tf.train.start_queue_runners (coord=coord)
#i dont know well
enqueue_threads = qr.create_threads(sess, coord=coord, start=True)
print sess.run(features)
print sess.run(features)
print sess.run(features)
print sess.run(features)
print sess.run(features)
#
#print sess.run(feature)
#Training cycle
for epoch in range(training_epochs):
print epoch
avg_cost = 0.
# Loop over all batches
for i in range(10):
print i
if coord.should_stop():
break
#get inputs
inputs_value = sess.run(inputs)
#THIS IS NOT WORK
batch_xs = np.ndarray([x[1:] for x in inputs_value])
batch_ys = np.ndarray([x[0] for x in inputs_value])
print 'batch', len(batch_ys), len(batch_xs)
#batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Fit training using batch data
#optimzierm put x and y
sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, dropout_keep_prob: 0.5})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, dropout_keep_prob: 0.5})/batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost))
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
#print ("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels, dropout_keep_prob: 1.}))
coord.request_stop ()
coord.join (enqueue_threads)
print ("Optimization Finished!")
I have a model that has been trained on CIFAR-10, but I don't realise how can I make a prediction in pycaffe.
I got an image from lmdb but I don't know how to load it in a net and get a predicted class.
My code:
net = caffe.Net('acc81/model.prototxt',
'acc81/cifar10_full_iter_70000.caffemodel.h5',
caffe.TEST)
lmdb_env = lmdb.open('cifar10_test_lmdb/')
lmdb_txn = lmdb_env.begin()
lmdb_cursor = lmdb_txn.cursor()
for key, value in lmdb_cursor:
datum = caffe.proto.caffe_pb2.Datum()
datum.ParseFromString(value)
image = caffe.io.datum_to_array(datum)
image = image.astype(np.uint8)
# What's next with the image variable?
# If i try:
# out = net.forward_all(data=np.asarray([image]))
# I get Exception: Input blob arguments do not match net inputs.
print("Image class is " + label)
Use this python script
# Run the script with anaconda-python
# $ /home/<path to anaconda directory>/anaconda/bin/python LmdbClassification.py
import sys
import numpy as np
import lmdb
import caffe
from collections import defaultdict
caffe.set_mode_gpu()
# Modify the paths given below
deploy_prototxt_file_path = '/home/<username>/caffe/examples/cifar10/cifar10_deploy.prototxt' # Network definition file
caffe_model_file_path = '/home/<username>/caffe/examples/cifar10/cifar10_iter_5000.caffemodel' # Trained Caffe model file
test_lmdb_path = '/home/<username>/caffe/examples/cifar10/cifar10_test_lmdb/' # Test LMDB database path
mean_file_binaryproto = '/home/<username>/caffe/examples/cifar10/mean.binaryproto' # Mean image file
# Extract mean from the mean image file
mean_blobproto_new = caffe.proto.caffe_pb2.BlobProto()
f = open(mean_file_binaryproto, 'rb')
mean_blobproto_new.ParseFromString(f.read())
mean_image = caffe.io.blobproto_to_array(mean_blobproto_new)
f.close()
# CNN reconstruction and loading the trained weights
net = caffe.Net(deploy_prototxt_file_path, caffe_model_file_path, caffe.TEST)
count = 0
correct = 0
matrix = defaultdict(int) # (real,pred) -> int
labels_set = set()
lmdb_env = lmdb.open(test_lmdb_path)
lmdb_txn = lmdb_env.begin()
lmdb_cursor = lmdb_txn.cursor()
for key, value in lmdb_cursor:
datum = caffe.proto.caffe_pb2.Datum()
datum.ParseFromString(value)
label = int(datum.label)
image = caffe.io.datum_to_array(datum)
image = image.astype(np.uint8)
out = net.forward_all(data=np.asarray([image]) - mean_image)
plabel = int(out['prob'][0].argmax(axis=0))
count += 1
iscorrect = label == plabel
correct += (1 if iscorrect else 0)
matrix[(label, plabel)] += 1
labels_set.update([label, plabel])
if not iscorrect:
print("\rError: key = %s, expected %i but predicted %i" % (key, label, plabel))
sys.stdout.write("\rAccuracy: %.1f%%" % (100.*correct/count))
sys.stdout.flush()
print("\n" + str(correct) + " out of " + str(count) + " were classified correctly")
print ""
print "Confusion matrix:"
print "(r , p) | count"
for l in labels_set:
for pl in labels_set:
print "(%i , %i) | %i" % (l, pl, matrix[(l,pl)])