how do I optmize the weights of the input layer using backward for this simple neural network in pytorch when .grad is None - neural-network

I defined the following simple neural network:
import torch
import torch.nn as nn
X = torch.tensor(([1, 2]), dtype=torch.float)
y = torch.tensor([1.])
learning_rate = 0.001
class Neural_Network(nn.Module):
def __init__(self, ):
super(Neural_Network, self).__init__()
self.W1 = torch.nn.Parameter(torch.tensor(([1, 0], [2, 3]), dtype=torch.float, requires_grad=True))
self.W2 = torch.nn.Parameter(torch.tensor(([2], [1]), dtype=torch.float, requires_grad=True))
def forward(self, X):
self.xW1 = torch.matmul(X, self.W1)
self.h = torch.tensor([torch.tanh(self.xW1[0]), torch.tanh(self.xW1[1])])
return torch.sigmoid(torch.matmul(self.h, self.W2))
net = Neural_Network()
for z in range(60):
loss = (y - net(X))**2
optim = torch.optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)
loss = criterion(net(X), y)
loss.backward()
optim.step()
I can run it and print(net.W1) print(net.W2) prints
Parameter containing:
tensor([[1., 0.],
[2., 3.]], requires_grad=True)
Parameter containing:
tensor([[2.0078],
[1.0078]], requires_grad=True)
So my problem is that it seems like W1 is not being updated.
When I call print(net.W1.grad) I get None for every iteration which confuses me a lot.
I tried to define the function as one line like so:
loss = (y - torch.sigmoid(math.tanh(x[0] * W_1[0][0] + x[1] * W_1[1][0]) * W_2[0] + math.tanh(x[0] * W_1[0][1] + x[1] * W_1[1][1]) * W_2[1])) ** 2, but it did not help anything.
For sure I could hardcode the derivate and everything but it seems painful and I though .backward() can be used in this case.
How can I optmize W1 with using backward()?

I suspect that the following line:
self.h = torch.tensor([torch.tanh(self.xW1[0]), torch.tanh(self.xW1[1])])
is the culprit.
The new tensor self.h does not inherit the requires_grad attribute from self.xW1 and, by default, it is set to False.
You can call self.h = self.tanh(self.xW1) and the operation will be then applied point-wise to all the elements of self.xW1.
In addition, I suggest you to inspect your gradients by using PyTorch hooks.

Related

Ratio Distribution - The integral is probably divergent, or slowly convergent or failed to converge after 100 iterations

import numpy as np
from scipy.stats import norm, rv_continuous
from scipy.special import erf
import scipy.integrate as integrate
class normal_ratio_wiki(rv_continuous):
def _pdf(self, z, mu_x, mu_y, sigma_x, sigma_y):
a_z = np.sqrt(((1/(sigma_x**2))*(np.power(z,2))) + (1/(sigma_y**2)))
b_z = ((mu_x/(sigma_x**2)) * z) + (mu_y/sigma_y**2)
c = ((mu_x**2)/(sigma_x**2)) + ((mu_y**2)/(sigma_y**2))
d_z = np.exp(((b_z**2)-((c*a_z**2))) / (2*(a_z**2)))
pdf_z = ((b_z * d_z) / (a_z**3)) * (1/(np.sqrt(2*math.pi)*sigma_x*sigma_y)) * \
(norm.cdf(b_z/a_z) - norm.cdf(-b_z/a_z)) + ((1/((a_z**2) * math.pi * sigma_x * sigma_y))*np.exp(-c/2))
return pdf_z
def _cdf(self, z, mu_x, mu_y, sigma_x, sigma_y):
cdf_z = integrate.quad(self._pdf, -np.inf, np.inf, args=(mu_x, mu_y, sigma_x, sigma_y))[0]
return cdf_z
rng1 = np.random.default_rng(99)
rng2 = np.random.default_rng(88)
# Sample Data 1
x = rng1.normal(141739.951, 1.223808e+06, 1000)
y = rng2.normal(333.91, 64.494571, 1000)
# Sample Data 2
# x = rng1.normal(500, 20, 1000)
# y = rng2.normal(400, 10, 1000)
z = x / y
# 1st approach with normal_ratio_wiki
mu_x = x.mean()
mu_y = y.mean()
sigma_x = x.std()
sigma_y = y.std()
rng3 = np.random.default_rng(11)
nr_wiki_inst = normal_ratio_wiki(name='normal_ratio_wiki', seed=rng3)
nr_wiki_vars = nr_wiki_inst.rvs(mu_x, mu_y, sigma_x, sigma_y, size = 100)
nr_wiki_params = nr_wiki_vars.fit(nr_wiki_vars)
Hello, I am working on simulating the ratio distribution of two uncorrelated normal distributions by defining the custom distribution using scipy.
Approach is from here.
While calling scipy.dist.rvs or fit method from custom defined distribution above using either approach, I get the following errors RuntimeError: Failed to converge after 100 iterations. and IntegrationWarning: The integral is probably divergent, or slowly convergent. respectively. If we comment _cdf(...), then the process takes a lot of time to run scipy.dist.rvs but then fails on calling fit. Tried different bounded intervals but no success.
I believe implementing custom _rvs, _ppf and/ or _fit methods may help resolving the issue. How should we define this based on above _pdf and _cdf methods? Please advise.
Note that example integrate.quad(nr_wiki_inst.pdf, -np.inf, np.inf, args=(mu_x, mu_y, sigma_x, sigma_y)) works separately without any issues.

Weights of network stays the same after optimizer step

My network just refuses to train. To make code reading less of a hassle, I abbreviate some complicated logic. Would update more if needed.
model = DistMultNN()
optimizer = optim.SGD(model.parameters(), lr=0.0001)
for t in range(500):
e1_neg = sampling_logic()
e2_neg = sampling_logic()
e1_pos = sampling_logic()
r = sampling_logic()
e2_pos = sampling_logic()
optimizer.zero_grad()
y_pred = model(tuple(zip(e1_pos, r, e2_pos)), e1_neg, e2_neg)
loss = model.loss(y_pred)
loss.backward()
optimizer.step()
I define my network as follow
class DistMultNN(nn.Module):
def __init__(self):
super().__init__()
self.seed = 42
self.entities_embedding = nn.init.xavier_uniform_(
torch.zeros((self.NO_ENTITIES, self.ENCODING_DIM), requires_grad=True))
self.relation_embedding = nn.init.xavier_uniform_(
torch.zeros((self.NO_RELATIONSHIPS, self.ENCODING_DIM), requires_grad=True))
self.W = torch.rand(self.ENCODING_DIM, self.ENCODING_DIM, requires_grad=True) # W is symmetric, todo: requireGrad?
self.W = (self.W + self.W.t()) / 2
self.b = torch.rand(self.ENCODING_DIM, 1, requires_grad=True)
self.lambda_ = 1.
self.rnn = torch.nn.RNN(input_size=encoding_dim, hidden_size=1, num_layers=1, nonlinearity='relu')
self.loss_func = torch.nn.LogSoftmax(dim=1)
def loss(self, y_pred):
softmax = -1 * self.loss_func(y_pred)
result = torch.mean(softmax[:, 0])
result.requires_grad = True
return result
def forward(self, samples, e1neg, e2neg):
batch_size = len(samples)
batch_result = np.zeros((batch_size, len(e1neg[0]) + 1))
for datapoint_id in range(batch_size):
entity_1 = entities_embed_lookup(datapoint_id[0])
entity_2 = entities_embed_lookup(datapoint_id[2])
r = relation_embed_lookup(datapoint_id[1])
x = self.some_fourier_transform(entity_1, r, entity_2)
batch_result[datapoint_id][0] = self.some_matmul(x)
for negative_example_id in range(len(e1neg[0])):
same_thing_with_negative_examples()
batch_result[datapoint_id][negative_example_id + 1] = self.some_matmul(x)
batch_result_tensor = torch.tensor(data=batch_result)
return batch_result_tensor
I tried checking weights using e.g. print(model.rnn.all_weights) in the training loop but they do not change. What did I do wrong?
So first of all the result.requires_grad = True should not be needed and in fact should throw an error, because results would normally not be a leaf variable.
So in your forward at the end you create a new tensor out of a numpy array:
batch_result_tensor = torch.tensor(data=batch_result)
and out of this result you calculate the loss and want to backward it. This doesn't work because batch_result_tensor is not part of any computation graph needed to calculate a gradient. You can't just mix numpy and torch this way.
The forward function has to consists of operations with torch tensors, which require grad, if you want to update and optimize them. So the default case is you have layers, which have weight tensors which requires grad. You have a input which you pass to the layers and so the computational graph is build and all the operations are recorded in it.
So I would start making batch_result a torch tensor and remove batch_result_tensor = torch.tensor(data=batch_result) and result.requires_grad = True. You might have to change more.

How to define a loss function in pytorch with dependency to partial derivatives of the model w.r.t input?

After reading about how to solve an ODE with neural networks following the paper Neural Ordinary Differential Equations and the blog that uses the library JAX I tried to do the same thing with "plain" Pytorch but found a point rather "obscure": How to properly use the partial derivative of a function (in this case the model) w.r.t one of the input parameters.
To resume the problem at hand as shown in 2 it is intended to solve the ODE y' = -2*x*y with the condition y(x=0) = 1 in the domain -2 <= x <= 2. Instead of using finite differences the solution is replaced by a NN as y(x) = NN(x) with a single layer with 10 nodes.
I managed to (more or less) replicate the blog with the following code
import torch
import torch.nn as nn
from torch import optim
import matplotlib.pyplot as plt
import numpy as np
# Define the NN model to solve the problem
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.lin1 = nn.Linear(1,10)
self.lin2 = nn.Linear(10,1)
def forward(self, x):
x = torch.sigmoid(self.lin1(x))
x = torch.sigmoid(self.lin2(x))
return x
model = Model()
# Define loss_function from the Ordinary differential equation to solve
def ODE(x,y):
dydx, = torch.autograd.grad(y, x,
grad_outputs=y.data.new(y.shape).fill_(1),
create_graph=True, retain_graph=True)
eq = dydx + 2.* x * y # y' = - 2x*y
ic = model(torch.tensor([0.])) - 1. # y(x=0) = 1
return torch.mean(eq**2) + ic**2
loss_func = ODE
# Define the optimization
# opt = optim.SGD(model.parameters(), lr=0.1, momentum=0.99,nesterov=True) # Equivalent to blog
opt = optim.Adam(model.parameters(),lr=0.1,amsgrad=True) # Got faster convergence with Adam using amsgrad
# Define reference grid
x_data = torch.linspace(-2.0,2.0,401,requires_grad=True)
x_data = x_data.view(401,1) # reshaping the tensor
# Iterative learning
epochs = 1000
for epoch in range(epochs):
opt.zero_grad()
y_trial = model(x_data)
loss = loss_func(x_data, y_trial)
loss.backward()
opt.step()
if epoch % 100 == 0:
print('epoch {}, loss {}'.format(epoch, loss.item()))
# Plot Results
plt.plot(x_data.data.numpy(), np.exp(-x_data.data.numpy()**2), label='exact')
plt.plot(x_data.data.numpy(), y_data.data.numpy(), label='approx')
plt.legend()
plt.show()
From here I manage to get the results as shown in the fig.
enter image description here
The problems is that at the definition of the ODE functional, instead of passing (x,y) I would rather prefer to pass something like (x,fun) (where fun is my model) such that the partial derivative and specific evaluations of the model can be done with a call . So, something like
def ODE(x,fun):
dydx, = "grad of fun w.r.t x as a function"
eq = dydx(x) + 2.* x * fun(x) # y' = - 2x*y
ic = fun( torch.tensor([0.]) ) - 1. # y(x=0) = 1
return torch.mean(eq**2) + ic**2
Any ideas? Thanks in advance
EDIT:
After some trials I found a way to pass the model as an input but found another strange behavior... The new problem is to solve the ODE y'' = -2 with the BC y(x=-2) = -1 and y(x=2) = 1, for which the analytical solution is y(x) = -x^2+x/2+4
Let's modify a bit the previous code as:
import torch
import torch.nn as nn
from torch import optim
import matplotlib.pyplot as plt
import numpy as np
# Define the NN model to solve the equation
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.lin1 = nn.Linear(1,10)
self.lin2 = nn.Linear(10,1)
def forward(self, x):
y = torch.sigmoid(self.lin1(x))
z = torch.sigmoid(self.lin2(y))
return z
model = Model()
# Define loss_function from the Ordinary differential equation to solve
def ODE(x,fun):
y = fun(x)
dydx = torch.autograd.grad(y, x,
grad_outputs=y.data.new(y.shape).fill_(1),
create_graph=True, retain_graph=True)[0]
d2ydx2 = torch.autograd.grad(dydx, x,
grad_outputs=dydx.data.new(dydx.shape).fill_(1),
create_graph=True, retain_graph=True)[0]
eq = d2ydx2 + torch.tensor([ 2.]) # y'' = - 2
bc1 = fun(torch.tensor([-2.])) - torch.tensor([-1.]) # y(x=-2) = -1
bc2 = fun(torch.tensor([ 2.])) - torch.tensor([ 1.]) # y(x= 2) = 1
return torch.mean(eq**2) + bc1**2 + bc2**2
loss_func = ODE
So, here I passed the model as argument and managed to derive twice... so far so good. BUT, using the sigmoid function for this case is not only not necessary but also gives a result that is far from the analytical one.
If I change the NN for:
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.lin1 = nn.Linear(1,1)
self.lin2 = nn.Linear(1,1)
def forward(self, x):
y = self.lin1(x)
z = self.lin2(y)
return z
In which case I would expect to optimize a double pass through two linear functions that would retrieve a 2nd order function ... I get the error:
RuntimeError: One of the differentiated Tensors appears to not have been used in the graph. Set allow_unused=True if this is the desired behavior.
Adding the option to the definition of dydx doesn't solve the problem, and adding it to d2ydx2 gives a NoneType definition.
Is there something wrong with the layers as they are?
Quick Solution:
add allow_unused=True to .grad functions. So, change
dydx = torch.autograd.grad(
y, x,
grad_outputs=y.data.new(y.shape).fill_(1),
create_graph=True, retain_graph=True)[0]
d2ydx2 = torch.autograd.grad(dydx, x, grad_outputs=dydx.data.new(
dydx.shape).fill_(1), create_graph=True, retain_graph=True)[0]
To
dydx = torch.autograd.grad(
y, x,
grad_outputs=y.data.new(y.shape).fill_(1),
create_graph=True, retain_graph=True, allow_unused=True)[0]
d2ydx2 = torch.autograd.grad(dydx, x, grad_outputs=dydx.data.new(
dydx.shape).fill_(1), create_graph=True, retain_graph=True, allow_unused=True)[0]
More explanation:
See what allow_unused do:
allow_unused (bool, optional): If ``False``, specifying inputs that were not
used when computing outputs (and therefore their grad is always zero)
is an error. Defaults to ``False``.
So, if you try to differentiate w.r.t to a variable that is not in being used to compute the value, it will give an error. Also, note that error only occurs when you use linear layers.
This is because when you use linear layers, you have y=W1*W2*x + b = Wx+b and dy/dx is not a function of x, it is simply W. So when you try to differentiate dy/dx w.r.t x it throws an error. This error goes away as soon as you use sigmoid because then dy/dx will be a function of x. To avoid the error, either make sure dy/dx is a function of x or use allow_unused=True

GP + Tensorflow training

I am trying to train a GPR model and a tensorflow model together. The training part has no issue. But for prediction using the trained model I receive a type error in a tf.placeholder op.
pred, uncp=sess.run([my, yv], feed_dict={X:xtr})
The code is similar to the 2nd example from https://gpflow.readthedocs.io/en/master/notebooks/advanced_usage.html
import numpy as np
import tensorflow as tf
import gpflow
float_type = gpflow.settings.float_type
gpflow.reset_default_graph_and_session()
def cnn_fn(x, output_dim):
out= tf.layers.dense(inputs=x, units=output_dim, activation=tf.nn.relu)
print(out)
return out
N = 150
xtr = np.random.rand(N,1)
ytr = np.sin(12*xtr) + 0.66*np.cos(25*xtr) + np.random.randn(N,1)*0.1 + 3
xtr = np.random.rand(N,28)
print(xtr.shape, ytr.shape)
nepoch=50
gp_dim=xtr.shape[1]
print(gp_dim)
minibatch_size = 16
X = tf.placeholder(tf.float32, [None, gp_dim])
Y = tf.placeholder(tf.float32, [None, 1])
with tf.variable_scope('cnn'):
f_X = tf.cast(cnn_fn(X, gp_dim), dtype=float_type)
k = gpflow.kernels.Matern52(gp_dim)
gp_model = gpflow.models.GPR(f_X, tf.cast(Y, dtype=float_type), k)
loss = -gp_model.likelihood_tensor
m, v = gp_model._build_predict(f_X)
my, yv = gp_model.likelihood.predict_mean_and_var(m, v)
with tf.variable_scope('adam'):
opt_step = tf.train.AdamOptimizer(0.001).minimize(loss)
tf_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='adam')
tf_vars += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='cnn')
## initialize
sess = tf.Session()
sess.run(tf.variables_initializer(var_list=tf_vars))
gp_model.initialize(session=sess)
for i in range(nepoch):
shind=np.array(range(len(xtr)))
np.random.shuffle(shind)
for j in range(int(len(xtr)/minibatch_size)):
ind=shind[j*minibatch_size: (j+1)*minibatch_size]
sess.run(opt_step, feed_dict={X:xtr[ind], Y:ytr[ind]})
Executing the code above runs fine. But adding the following line gives an error:
pred, uncp=sess.run([my, yv], feed_dict={X:xtr})
with the following error:
<ipython-input-25-269715087df2> in <module>
----> 1 pred, uncp=sess.run([my, yv], feed_dict={X:xtr})
[...]
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_1' with dtype float and shape [?,1]
[[node Placeholder_1 (defined at <ipython-input-24-39ccf45cd248>:2) = Placeholder[dtype=DT_FLOAT, shape=[?,1], _device="/job:localhost/replica:0/task:0/device:GPU:0"]()]]
The reason your code fails is because you are not actually feeding in a value to one of the placeholders. This is easier to spot if you actually give them names:
X = tf.placeholder(tf.float32, [None, gp_dim], name='X')
Y = tf.placeholder(tf.float32, [None, 1], name='Y')
Tensorflow requires the entire compute graph to be well-defined, and the GPR model you are using depends on both X and Y. If you run the following line, it works fine:
pred, uncp = sess.run([my, yv], feed_dict={X: xtr, Y: ytr})
Update: as user1018464 pointed out in the comment, you are using the GPR model, in which the predictions directly depend on the training data (e.g. see equations (2.22) and (2.23) on page 16 of http://www.gaussianprocess.org/gpml/chapters/RW2.pdf). Hence you will need to pass in both xtr and ytr to make predictions.
Other models such as SVGP represent the function through "inducing features", commonly "pseudo input/output" pairs that summarise the data, in which case you won't need to feed in the original input values at all (I got it wrong when I first answered).
You could set up the model as follows:
gp_model = gpflow.models.SVGP(f_X, tf.cast(Y, dtype=float_type), k, gpflow.likelihoods.Gaussian(), xtr.copy(), num_data=N)
Then pred, uncp=sess.run([my, yv], feed_dict={X:xtr}) works as expected.
If you want to predict at different points Xtest, you need to set up a separate placeholder, and reuse the cnn (note the reuse=True in the variable_scope with the same name), as in example 2 of the notebook:
Xtest = tf.placeholder(tf.float32, [None, Mnist.input_dim], name='Xtest')
with tf.variable_scope('cnn', reuse=True):
f_Xtest = tf.cast(cnn_fn(Xtest, gp_dim), dtype=float_type)
Set up the model as before using f_X, but use f_Xtest in the call to _build_predict:
m, v = gp_model._build_predict(f_Xtest)
my, yv = gp_model.likelihood.predict_mean_and_var(m, v)
Now you need to pass in both X, Y, and Xtest into the session's run():
pred, uncp = sess.run([my, yv], feed_dict={X: xtr, Y: Ytr, Xtest: xtest})
where xtest is the numpy array with points at which you want to predict.
The GPflow manages TensorFlow sessions for you and you don't need to create your own TF session, when you use GPflow alone. In your case, tf.layers.dense makes
new variables, which should be initialized and I would advise to use a session which were created by GPflow. Essentially, you need to replace these lines
## initialize
sess = tf.Session()
sess.run(tf.variables_initializer(var_list=tf_vars))
gp_model.initialize(session=sess)
with:
sess = gpflow.get_default_session()
sess.run(tf.variables_initializer(var_list=tf_vars)
or wrap your code with default session context:
with tf.Session() as session:
... build and run

How to monitor tensor values in Theano/Keras?

I know this question has been asked in various forms, but I can't really find any answer I can understand and use. So forgive me if this is a basic question, 'cause I'm a newbie to these tools(theano/keras)
Problem to Solve
Monitor variables in Neural Networks
(e.g. input/forget/output gate values in LSTM)
What I'm currently getting
no matter in which stage I'm getting those values, I'm getting something like :
Elemwise{mul,no_inplace}.0
Elemwise{mul,no_inplace}.0
[for{cpu,scan_fn}.2, Subtensor{int64::}.0, Subtensor{int64::}.0]
[for{cpu,scan_fn}.2, Subtensor{int64::}.0, Subtensor{int64::}.0]
Subtensor{int64}.0
Subtensor{int64}.0
Is there any way I can't monitor(e.g. print to stdout, write to a file, etc) them?
Possible Solution
Seems like callbacks in Keras can do the job, but it doesn't work either for me. I'm getting same thing as above
My Guess
Seems like I'm making very simple mistakes.
Thank you very much in advance, everyone.
ADDED
Specifically, I'm trying to monitor input/forget/output gating values in LSTM.
I found that LSTM.step() is for computing those values:
def step(self, x, states):
h_tm1 = states[0] # hidden state of the previous time step
c_tm1 = states[1] # cell state from the previous time step
B_U = states[2] # dropout matrices for recurrent units?
B_W = states[3] # dropout matrices for input units?
if self.consume_less == 'cpu': # just cut x into 4 pieces in columns
x_i = x[:, :self.output_dim]
x_f = x[:, self.output_dim: 2 * self.output_dim]
x_c = x[:, 2 * self.output_dim: 3 * self.output_dim]
x_o = x[:, 3 * self.output_dim:]
else:
x_i = K.dot(x * B_W[0], self.W_i) + self.b_i
x_f = K.dot(x * B_W[1], self.W_f) + self.b_f
x_c = K.dot(x * B_W[2], self.W_c) + self.b_c
x_o = K.dot(x * B_W[3], self.W_o) + self.b_o
i = self.inner_activation(x_i + K.dot(h_tm1 * B_U[0], self.U_i))
f = self.inner_activation(x_f + K.dot(h_tm1 * B_U[1], self.U_f))
c = f * c_tm1 + i * self.activation(x_c + K.dot(h_tm1 * B_U[2], self.U_c))
o = self.inner_activation(x_o + K.dot(h_tm1 * B_U[3], self.U_o))
with open("test_visualization.txt", "a") as myfile:
myfile.write(str(i)+"\n")
h = o * self.activation(c)
return h, [h, c]
And as it's in the code above, I tried to write the value of i into a file, but it only gave me values like :
Elemwise{mul,no_inplace}.0
[for{cpu,scan_fn}.2, Subtensor{int64::}.0, Subtensor{int64::}.0]
Subtensor{int64}.0
So I tried i.eval() or i.get_value(), but both failed to give me values.
.eval() gave me this:
theano.gof.fg.MissingInputError: An input of the graph, used to compute Subtensor{::, :int64:}(<TensorType(float32, matrix)>, Constant{10}), was not provided and not given a value.Use the Theano flag exception_verbosity='high',for more information on this error.
and .get_value() gave me this:
AttributeError: 'TensorVariable' object has no attribute 'get_value'
So I backtracked those chains(which line calls which functions..) and tried to get values at every steps I found but in vain.
Feels like I'm in some basic pitfalls.
I use the solution described in the Keras FAQ:
http://keras.io/getting-started/faq/#how-can-i-visualize-the-output-of-an-intermediate-layer
In detail:
from keras import backend as K
intermediate_tensor_function = K.function([model.layers[0].input],[model.layers[layer_of_interest].output])
intermediate_tensor = intermediate_tensor_function([thisInput])[0]
yields:
array([[ 3., 17.]], dtype=float32)
However I'd like to use the functional API but I can't seem to get the actual tensor, only the symbolic representation. For example:
model.layers[1].output
yields:
<tf.Tensor 'add:0' shape=(?, 2) dtype=float32>
I'm missing something about the interaction of Keras and Tensorflow here but I'm not sure what. Any insight much appreciated.
One solution is to create a version of your network that is truncated at the LSTM layer of which you want to monitor the gate values, and then replace the original layer with a custom layer in which the stepfunction is modified to return not only the hidden layer values, but also the gate values.
For instance, say you want to access the access the gate values of a GRU. Create a custom layer GRU2 that inherits everything from the GRU class, but adapt the step function such that it returns a concatenation of the states you want to monitor, and then takes only the part containing the previous hidden layer activations when computing the next activations. I.e:
def step(self, x, states):
# get prev hidden layer from input that is concatenation of
# prev hidden layer + reset gate + update gate
x = x[:self.output_dim, :]
###############################################
# This is the original code from the GRU layer
#
h_tm1 = states[0] # previous memory
B_U = states[1] # dropout matrices for recurrent units
B_W = states[2]
if self.consume_less == 'gpu':
matrix_x = K.dot(x * B_W[0], self.W) + self.b
matrix_inner = K.dot(h_tm1 * B_U[0], self.U[:, :2 * self.output_dim])
x_z = matrix_x[:, :self.output_dim]
x_r = matrix_x[:, self.output_dim: 2 * self.output_dim]
inner_z = matrix_inner[:, :self.output_dim]
inner_r = matrix_inner[:, self.output_dim: 2 * self.output_dim]
z = self.inner_activation(x_z + inner_z)
r = self.inner_activation(x_r + inner_r)
x_h = matrix_x[:, 2 * self.output_dim:]
inner_h = K.dot(r * h_tm1 * B_U[0], self.U[:, 2 * self.output_dim:])
hh = self.activation(x_h + inner_h)
else:
if self.consume_less == 'cpu':
x_z = x[:, :self.output_dim]
x_r = x[:, self.output_dim: 2 * self.output_dim]
x_h = x[:, 2 * self.output_dim:]
elif self.consume_less == 'mem':
x_z = K.dot(x * B_W[0], self.W_z) + self.b_z
x_r = K.dot(x * B_W[1], self.W_r) + self.b_r
x_h = K.dot(x * B_W[2], self.W_h) + self.b_h
else:
raise Exception('Unknown `consume_less` mode.')
z = self.inner_activation(x_z + K.dot(h_tm1 * B_U[0], self.U_z))
r = self.inner_activation(x_r + K.dot(h_tm1 * B_U[1], self.U_r))
hh = self.activation(x_h + K.dot(r * h_tm1 * B_U[2], self.U_h))
h = z * h_tm1 + (1 - z) * hh
#
# End of original code
###########################################################
# concatenate states you want to monitor, in this case the
# hidden layer activations and gates z and r
all = K.concatenate([h, z, r])
# return everything
return all, [h]
(Note that the only lines I added are at the beginning and end of the function).
If you then run your network with GRU2 as last layer instead of GRU (with return_sequences = True for the GRU2 layer), you can just call predict on your network, this will give you all hidden layer and gate values.
The same thing should work for LSTM, although you might have to puzzle a bit to figure out how to store all the outputs you want in one vector and retrieve them again afterwards.
Hope that helps!
You can use theano's printing module for printing during execution (and not during definition, which is what you're doing and the reason why you're not getting values, but their abstract definition).
Print
Just use the Print function. Don't forget to use the output of Print to continue your graph, otherwise the output will be disconnected and Print will most likely be removed during optimisation. And you will see nothing.
from keras import backend as K
from theano.printing import Print
def someLossFunction(x, ref):
loss = K.square(x - ref)
loss = Print('Loss tensor (before sum)')(loss)
loss = K.sum(loss)
loss = Print('Loss scalar (after sum)')(loss)
return loss
Plot
A little bonus you might enjoy.
The Print class has a global_fn parameter, to override the default callback to print. You can provide your own function and directly access to the data, to build a plot for instance.
from keras import backend as K
from theano.printing import Print
import matplotlib.pyplot as plt
curve = []
# the callback function
def myPlottingFn(printObj, data):
global curve
# Store scalar data
curve.append(data)
# Plot it
fig, ax = plt.subplots()
ax.plot(curve, label=printObj.message)
ax.legend(loc='best')
plt.show()
def someLossFunction(x, ref):
loss = K.sum(K.square(x - ref))
# Callback is defined line below
loss = Print('Loss scalar (after sum)', global_fn=myplottingFn)(loss)
return loss
BTW the string you passed to Print('...') is stored in the print object under property name message (see function myPlottingFn). This is useful for building multi-curves plot automatically