Issue while solving Gym Environment ; ValueError: Weights for model sequential have not yet been created - tf.keras

I am new to stackoverflow, so I apologize for any errors while asking a question. I am trying to solve the cartpole-v1 gym environment using a dqn agent. I am facing an issue as follows ValueError: Weights for model sequential have not yet been created. Weights are created when the Model is first called on inputs or build() is called with an input_shape. I've searched how to fix this but to no success. My tensorflow version is 2.8.0. My code for my agent is as follows. I believe, the problem is most probably due to my build_model and in the model.fit line. This is the error that I am facing
class DQNAgent0:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=2000)
self.gamma = 0.95 # discount factor
self.epsilon = 1.0 # 100% exploration at the start
self.epsilon_decay = 0.995
self.epsilon_min = 0.01
self.learning_rate = 0.001
self.model = self._build_model()
def _build_model(self):
'''model = tf.keras.Sequential([
tf.keras.layers.Dense(1),
#tf.keras.Input((self.state_size,)),
tf.keras.layers.Dense(24, activation="relu"),
tf.keras.layers.Dense(24, activation="relu"),
tf.keras.layers.Dense(self.action_size, activation="linear"),
])
model.compile(loss=tf.keras.losses.mse,
optimizer=tf.keras.optimizers.Adam(learning_rate=self.learning_rate))'''
#model = tf.keras.Sequential()
model = tf.keras.Sequential([tf.keras.layers.Dense(1)])
model.add(tf.keras.Input(shape = self.state_size))
model.add(tf.keras.layers.Dense(24, activation = 'relu'))
model.add(tf.keras.layers.Dense(24, activation = 'relu'))
model.add(tf.keras.layers.Dense(self.action_size, activation = 'linear'))
#opt = tf.keras.optimizers.Adam(learning_rate = self.learning_rate)
#model.compile(loss = 'mse', optimizer = opt)
model.compile(loss = tf.keras.losses.mse, optimizer = tf.keras.optimizers.Adam(learning_rate = self.learning_rate))
return model
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
if np.random.rand() <= self.epsilon:
return random .randrange(self.action_size) # exploratory action
act_values = self.model.predict(state)
return np.argmax(act_values[0])
def replay(self, batch_size):
#creating a random sample from our memory
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = (reward + self.gamma * np.amax(self.model.predict(next_state[0]))) # reward at current timestep + discounted future reward
target_f = self.model.predict(state)
target_f[0][action] = target #mapping future reward to the current reward
self.model.fit(tf.expand_dims(state, axis=-1), target_f, epochs = 1, verbose = 0) # fitting a model to train with state as input x and target_f as y (predicted future reward)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name)

Related

Test Custom CrossValidator with origin CrossValidator of SparkML

I created a custom CrossValidator with SplitType as Random/Time-based/Stratified based on this reference. When SplitType == Random, I use the original _kFold function of CrossValidator in pyspark.ml to test the result of my custom CrossValidator with _fit function as below.
I expected when SplitType == Random, the results should be close to what original Sparkml CrossValidator produce but even after multiple runs, orignal SparkML CrossValidator gives always better results. Anyone can help me to understand the reasons please?
def _fit(self, dataset : DataFrame):
est = self.getOrDefault(self.estimator)
epm = self.getOrDefault(self.estimatorParamMaps)
numModels = len(epm)
eva = self.getOrDefault(self.evaluator)
nFolds = self.getOrDefault(self.numFolds)
metrics_all = [[0.0] * numModels for i in range(nFolds)]
SplitType = self.getOrDefault(self.SplitType)
pool = ThreadPool(processes=min(self.getParallelism(), numModels))
subModels = None
collectSubModelsParam = self.getCollectSubModels()
if collectSubModelsParam:
subModels = [[None for j in range(numModels)] for i in range(nFolds)]
if SplitType == 'Random':
datasets = self._kFold(dataset)
print(SplitType)
elif SplitType == 'Stratified':
datasets = self.stratify_data(dataset)
print(SplitType)
for i in range(nFolds):
validation = datasets[i][1].cache()
train = datasets[i][0].cache()
tasks = map(
inheritable_thread_target,
_parallelFitTasks(est, train, eva, validation, epm, collectSubModelsParam),
)
for j, metric, subModel in pool.imap_unordered(lambda f: f(), tasks):
metrics_all[i][j] = metric
if collectSubModelsParam:
assert subModels is not None
subModels[i][j] = subModel
validation.unpersist()
train.unpersist()
metrics, std_metrics = CrossValidator._gen_avg_and_std_metrics(metrics_all)
if eva.isLargerBetter():
bestIndex = np.argmax(metrics)
else:
bestIndex = np.argmin(metrics)
bestModel = est.fit(dataset, epm[bestIndex])
return self._copyValues(
CrossValidatorModel(bestModel, metrics, cast(List[List[Model]], subModels), std_metrics)
)
With original CrossValidator, the result is always much more stable and better, even after multiple runs :
lr = LogisticRegression(featuresCol = 'features', labelCol = 'label')
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1, 5]).build()
evaluator = BinaryClassificationEvaluator()
scv = CustomCrossValidator(
estimator=lr,
estimatorParamMaps=grid,
evaluator=evaluator,
numFolds=4,
SplitType='Random'
)
Origin CrossValidator :
avgMetrics
stdMetrics
0.5
0
0.8457
0.0425
0.8457
0.0425
My Custom CrossValidator with Random SplitType :
avgMetrics
stdMetrics
0.5
0
0.8278
0.0937
0.8278
0.0937
Anyone can help to explain the reasons please?
Many thanks,

How to use nn.MultiheadAttention together with nn.LSTM?

I'm trying to build a Pytorch network for image captioning.
Currently I have a working network of Encoder and Decoder, and I want to add nn.MultiheadAttnetion layer to it (to be used as self attention).
Currently my decode looks like this:
class Decoder(nn.Module):
def __init__(self, hidden_size, embed_dim, vocab_size, layers = 1):
super(Decoder, self).__init__()
self.embed_dim = embed_dim
self.vocab_size = vocab_size
self.layers = layers
self.hidden_size = hidden_size
self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=0)
self.lstm = nn.LSTM(input_size = embed_dim, hidden_size = hidden_size, batch_first = True, num_layers = layers)
#self.attention = nn.MultiheadAttention(hidden_size, num_heads=1, batch_first= True)
self.fc = nn.Linear(hidden_size, self.vocab_size)
def init_hidden(self, batch_size):
h = torch.zeros(self.layers, batch_size, self.hidden_size).to(device)
c = torch.zeros(self.layers, batch_size, self.hidden_size).to(device)
return h,c
def forward(self, features, caption):
batch_size = caption.size(0)
caption_size = caption.size(1)
h,c = self.init_hidden(batch_size)
embeddings = self.embedding(caption)
lstm_input = torch.cat((features.unsqueeze(1), embeddings[:,:-1,:]), dim=1)
output, (h,c) = self.lstm(lstm_input, (h,c))
#output, _ = self.attention(output, output, output)
output = self.fc(output)
return output
def generate_caption(self, features, max_caption_size = MAX_LEN):
h,c = self.init_hidden(1)
caption = ""
embeddings = features.unsqueeze(1)
for i in range(max_caption_size):
output, (h, c) = self.lstm(embeddings, (h,c))
#output, _ = self.attention(output, output, output)
output = self.fc(output)
_, word_index = torch.max(output, dim=2) # take the word with highest probability
if word_index == vocab.get_index(END_WORD):
break
caption += vocab.get_word(word_index) + " "
embeddings = self.embedding(torch.LongTensor([word_index]).view(1,-1).to(device))
return caption
and it gives relatively good results for image captioning.
I want to add the commented out lines so the model will use Attention. But- when I do that- the model breaks, although the loss becomes extremely low (decreasing from 2.7 to 0.2 during training instead of 2.7 to 1 without the attention) - the caption generation is not really working (predicts the same word over and over again).
My questions are:
Am I using the nn.MultiheadAttention correctly? it is very weird to me that it should be used after the LSTM, but I saw this online, and it works from dimension sizes perspective
Any idea why my model breaks when I use Attention?
EDIT: I also tried to put the Attention before the LSTM, and it didn't work as well (network predicted the same caption for every picture)

Accuracy is not increasing, though loss is decreasing

I am feeding cnn features into gpflow model. I am writing the chunks of code from my program here. I am using tape.gradient with Adam optimizer (scheduled lr). My accuracy gets stuck on 47% and surprisingly , my loss still gets reducing. Its very weird. I have debugged the program. CNN features are ok but gp model is not learning .Please can you check the training loop and let me know where am I wrong.
def optimization_step(gp_model: gpflow.models.SVGP, image_data,labels):
with tf.GradientTape(watch_accessed_variables=False)as tape:
tape.watch(gp_model.trainable_variables)
cnn_feat = cnn_model(image_data,training=False)
cnn_feat=tf.cast(cnn_feat,dtype=default_float())
labels=tf.cast(labels,dtype=np.int64)
data=(cnn_feat, labels)
loss = gp_model.training_loss(data)
gp_grads=tape.gradient(loss, gp_model.trainable_variables)
gp_optimizer.apply_gradients(zip(gp_grads, gp_model.trainable_variables))
return loss, cnn_feat
the loop for training is
def simple_training_loop(gp_model: gpflow.models.SVGP, epochs: int = 3, logging_epoch_freq: int = 10):
total_loss = []
features=[]
tf_optimization_step = tf.function(optimization_step, autograph=False)
for epoch in range(epochs):
lr.assign(max(args.learning_rate_clip, args.learning_rate * (args.decay_rate ** epoch)))
data_loader.shuffle_data(args.is_training)
for b in range(data_loader.n_batches):
batch_x, batch_y= data_loader.next_batch(b)
batch_x=tf.convert_to_tensor(batch_x)
batch_y=tf.convert_to_tensor(batch_y)
loss,features_CNN=tf_optimization_step(gp_model, batch_x,batch_y)
I am restoring weights for CNN from checkpoints saved during transfer learning.
With more epochs , loss continue to decrease but accuracy starts decreasing as well.
The gp model declaration is as follows
kernel = gpflow.kernels.Matern32() + gpflow.kernels.White(variance=0.01)
invlink = gpflow.likelihoods.RobustMax(C)
likelihood = gpflow.likelihoods.MultiClass(C, invlink=invlink)
the test Function
cnn_feat=cnn_model(test_x,training=False)
cnn_feat = tf.cast(cnn_feat, dtype=default_float())
mean, var = gp_model.predict_f(cnn_feat)
preds = np.argmax(mean, 1).reshape(test_labels.shape)
correct = (preds == test_labels.numpy().astype(int))
acc = np.average(correct.astype(float)) * 100
Can you please just check that whether the training loop is correctly written
The training loop looks fine. However, there are bits that should be modified for clarity and for optimisation sake.
def simple_training_loop(gp_model: gpflow.models.SVGP, epochs: int = 3, logging_epoch_freq: int = 10):
total_loss = []
features=[]
#tf.function
def compute_cnn_feat(x: tf.Tensor) -> tf.Tensor:
return tf.cast(cnn_model(x, training=False), dtype=default_float())
#tf.function
def optimization_step(cnn_feat: tf.Tensor, labels: tf.Tensor): # **Change 1.**
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(gp_model.trainable_variables)
data = (cnn_feat, labels)
loss = gp_model.training_loss(data)
gp_grads = tape.gradient(loss, gp_model.trainable_variables) # **Change 2.**
gp_optimizer.apply_gradients(zip(gp_grads, gp_model.trainable_variables))
return loss
for epoch in range(epochs):
lr.assign(max(args.learning_rate_clip, args.learning_rate * (args.decay_rate ** epoch)))
data_loader.shuffle_data(args.is_training)
for b in range(data_loader.n_batches):
batch_x, batch_y= data_loader.next_batch(b)
batch_x = tf.convert_to_tensor(batch_x)
batch_y = tf.convert_to_tensor(batch_y, dtype=default_float())
cnn_feat = compute_cnn_feat(batch_x) # **Change 3.**
loss = optimization_step(cnn_feat, batch_y)
Change 1. Signature of a function that you wrap with tf.function should not have mutable objects.
Change 2. The gradient tape will track all computations inside the context manager, including the computation of the gradients i.e. tape.gradient(...). In turn, that means your code performs an unnecessary calculation.
Change 3. For the same reason as in "Change 2." I moved the CNN feature extraction outside of the gradient tape.

Where the weights get updated in this code?

I want to train a model in distributed system. I have found a code in github for distributed training where the worker node send gradient to the parameter server and the parameter server sends the average gradient to the workers. But in client/worker side code, i couldn't understand where the received gradient updates the weights and biases.
Here is client/worker side the code, it receives initial gradients from the parameter server and then calculates loss, gradients and sends the gradient value to the server again.
from __future__ import division
from __future__ import print_function
import numpy as np
import sys
import pickle as pickle
import socket
from datetime import datetime
import time
import tensorflow as tf
import cifar10
TCP_IP = 'some IP'
TCP_PORT = 5014
port = 0
port_main = 0
s = 0
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/home/ubuntu/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 5000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
tf.app.flags.DEFINE_integer('log_frequency', 10,
"""How often to log results to the console.""")
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.30)
def safe_recv(size, server_socket):
data = ""
temp = ""
data = bytearray()
recv_size = 0
while 1:
try:
temp = server_socket.recv(size-len(data))
data.extend(temp)
recv_size = len(data)
if recv_size >= size:
break
except:
print("Error")
data = bytes(data)
return data
def train():
"""Train CIFAR-10 for a number of steps."""
g1 = tf.Graph()
with g1.as_default():
global_step = tf.Variable(-1, name='global_step',
trainable=False, dtype=tf.int32)
increment_global_step_op = tf.assign(global_step, global_step+1)
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
grads = cifar10.train_part1(loss, global_step)
only_gradients = [g for g, _ in grads]
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
self._start_time = time.time()
def before_run(self, run_context):
self._step += 1
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
if self._step % FLAGS.log_frequency == 0:
current_time = time.time()
duration = current_time - self._start_time
self._start_time = current_time
loss_value = run_values.results
examples_per_sec = FLAGS.log_frequency * FLAGS.batch_size / duration
sec_per_batch = float(duration / FLAGS.log_frequency)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print(format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
# log_device_placement=FLAGS.log_device_placement, gpu_options=gpu_options)) as mon_sess:
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
global port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, port_main))
recv_size = safe_recv(17, s)
recv_size = pickle.loads(recv_size)
recv_data = safe_recv(recv_size, s)
var_vals = pickle.loads(recv_data)
s.close()
feed_dict = {}
i = 0
for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):
feed_dict[v] = var_vals[i]
i = i+1
print("Received variable values from ps")
# Opening the socket and connecting to server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, port))
while not mon_sess.should_stop():
gradients, step_val = mon_sess.run(
[only_gradients, increment_global_step_op], feed_dict=feed_dict)
# sending the gradients
send_data = pickle.dumps(gradients, pickle.HIGHEST_PROTOCOL)
to_send_size = len(send_data)
send_size = pickle.dumps(to_send_size, pickle.HIGHEST_PROTOCOL)
s.sendall(send_size)
s.sendall(send_data)
# receiving the variable values
recv_size = safe_recv(17, s)
recv_size = pickle.loads(recv_size)
recv_data = safe_recv(recv_size, s)
var_vals = pickle.loads(recv_data)
feed_dict = {}
i = 0
for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):
feed_dict[v] = var_vals[i]
i = i+1
s.close()
def main(argv=None): # pylint: disable=unused-argument
global port
global port_main
global s
if(len(sys.argv) != 3):
print("<port> <worker-id> required")
sys.exit()
port = int(sys.argv[1]) + int(sys.argv[2])
port_main = int(sys.argv[1])
print("Connecting to port ", port)
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
total_start_time = time.time()
train()
print("--- %s seconds ---" % (time.time() - total_start_time))
if __name__ == '__main__':
tf.app.run()
EDIT:
Here is the train_part1() code:
def train_part1(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
return grads
To me it seems that line
gradients, step_val = mon_sess.run(
[only_gradients, increment_global_step_op], feed_dict=feed_dict)
receieves new values for variables in feed_dict, assign these values to variables, and makes a training step, during which it only calculates and returns the gradients, that are later sent to the parameter server. I would expect cifar10.train_part1 (the one that returns only_gradients) to depend on variable values and define the update.
Update: I looked into the code and changed my mind. Had to google and found next answer that shed some light on what is happening.
Gradients are actually not applied in this code anywhere implicitly. Instead, gradients are sent to the parameter server, parameter server averages gradients and applies them to weights, it returns the weights to the local worker, * recieved weights are used instead of local weights during session run through feed_dict* i.e. local weights are never actually updated and do not actually matter at all. The key, is that feed_dict allows to rewrite any tensor output of the session run and this code rewrites variables.

How can I measure Precision and Recall on Logistic Regression with PySpark?

I am using a Logistic Regression model on PySpark through databricks but i am not able to get my precision and recall. Everything works fine and I am able to get my ROC but there is not attribute or lib for Precision and Recall
lrModel = LogisticRegression()
predictions = bestModel.transform(testData)
# Instantiate metrics object
results = predictions.select(['probability', 'label'])
results_collect = results.collect()
results_list = [(float(i[0][0]), 1.0-float(i[1])) for i in results_collect]
scoreAndLabels = sc.parallelize(results_list)
metrics = MulticlassMetrics(scoreAndLabels)
# Overall statistics
precision = metrics.precision()
recall = metrics.recall()
f1Score = metrics.fMeasure()
print("Summary Stats")
print("Precision = %s" % precision)
print("Recall = %s" % recall)
print("F1 Score = %s" % f1Score)
>>>Summary Stats
>>>Precision = 0.0
>>>Recall = 0.0
>>>F1 Score = 0.0
I was able to create my own function to do so. It returns everything and more. I am using the "MulticlassMetrics()" from mllib package. Since its a multiclass it calculates metrics for each label so, you have to specify which label you want to retrieve.
### Model Evaluator User Defined Functions
def udfModelEvaluator(dfPredictions, labelColumn='label'):
colSelect = dfPredictions.select(
[F.col('prediction').cast(DoubleType())
,F.col(labelColumn).cast(DoubleType()).alias('label')])
metrics = MulticlassMetrics(colSelect.rdd)
mAccuracy = metrics.accuracy
mPrecision = metrics.precision(1)
mRecall = metrics.recall(1)
mF1 = metrics.fMeasure(1.0, 1.0)
mMatrix = metrics.confusionMatrix().toArray().astype(int)
mTP = metrics.confusionMatrix().toArray()[1][1]
mTN = metrics.confusionMatrix().toArray()[0][0]
mFP = metrics.confusionMatrix().toArray()[0][1]
mFN = metrics.confusionMatrix().toArray()[1][0]
mResults = [mAccuracy, mPrecision, mRecall, mF1, mMatrix, mTP, mTN, mFP, mFN, "Return [[0]=Accuracy, [1]=Precision, [2]=Recall, [3]=F1, [4]=ConfusionMatrix, [5]=TP, [6]=TN, [7]=FP, [8]=FN]"]
return mResults
To call the function:
metricsList = udfModelEvaluator(predictionsData, "label")
metricsList