How to simplify DataLoader for Autoencoder in Pytorch - autoencoder

Is there any easier way to set up the dataloader, because input and target data is the same in case of an autoencoder and to load the data during training? The DataLoader always requires two inputs.
Currently I define my dataloader like this:
X_train = rnd.random((300,100))
X_val = rnd.random((75,100))
train = data_utils.TensorDataset(torch.from_numpy(X_train).float(), torch.from_numpy(X_train).float())
val = data_utils.TensorDataset(torch.from_numpy(X_val).float(), torch.from_numpy(X_val).float())
train_loader= data_utils.DataLoader(train, batch_size=1)
val_loader = data_utils.DataLoader(val, batch_size=1)
and train like this:
for epoch in range(50):
for batch_idx, (data, target) in enumerate(train_loader):
data, target = Variable(data), Variable(target).detach()
optimizer.zero_grad()
output = model(data, x)
loss = criterion(output, target)

Why not subclassing TensorDataset to make it compatible with unlabeled data ?
class UnlabeledTensorDataset(TensorDataset):
"""Dataset wrapping unlabeled data tensors.
Each sample will be retrieved by indexing tensors along the first
dimension.
Arguments:
data_tensor (Tensor): contains sample data.
"""
def __init__(self, data_tensor):
self.data_tensor = data_tensor
def __getitem__(self, index):
return self.data_tensor[index]
And something along these lines for training your autoencoder
X_train = rnd.random((300,100))
train = UnlabeledTensorDataset(torch.from_numpy(X_train).float())
train_loader= data_utils.DataLoader(train, batch_size=1)
for epoch in range(50):
for batch in train_loader:
data = Variable(batch)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, data)

I believe this is as simple as it gets. Other than that, I guess you will have to implement your own dataset. A sample code is below.
class ImageLoader(torch.utils.data.Dataset):
def __init__(self, root, tform=None, imgloader=PIL.Image.open):
super(ImageLoader, self).__init__()
self.root=root
self.filenames=sorted(glob(root))
self.tform=tform
self.imgloader=imgloader
def __len__(self):
return len(self.filenames)
def __getitem__(self, i):
out = self.imgloader(self.filenames[i]) # io.imread(self.filenames[i])
if self.tform:
out = self.tform(out)
return out
You can then use it as follows.
source_dataset=ImageLoader(root='/dldata/denoise_ae/clean/*.png', tform=source_depth_transform)
target_dataset=ImageLoader(root='/dldata/denoise_ae/clean_cam_n9dmaps/*.png', tform=target_depth_transform)
source_dataloader=torch.utils.data.DataLoader(source_dataset, batch_size=32, shuffle=False, drop_last=True, num_workers=15)
target_dataloader=torch.utils.data.DataLoader(target_dataset, batch_size=32, shuffle=False, drop_last=True, num_workers=15)
To test the 1st batch go as follows.
dataiter = iter(source_dataloader)
images = dataiter.next()
print(images.size())
And finally you can enumerate on the loaded data in the batch training loop as follows.
for i, (source, target) in enumerate(zip(source_dataloader, target_dataloader), 0):
source, target = Variable(source.float().cuda()), Variable(target.float().cuda())
Have fun.
PS. The code samples I shared so not load validation data.

Related

IndexError target is out of bounds

I'm working on a custom dataset of images and using a Neural Net to classify them.
The data set is about 6000 images of 58 classes. But on training I keep getting a "target is out of bounds" error.
I've double checked the number of classes and image size but still get the same error.
#hyperprams
learning_rate = 5e-4
#3 for RGB values
in_channel = 3
#classes from data set
num_classes = 58
# arbitray choice
batch_size = 32
#total number of epochs used to train the model
epochs = 3
traffic_dataset = TrafficSigns(csv_file='annotations.csv',
root_directory='/Users/*****/Desktop/images/',
transform = transforms.ToTensor())
train_size = int(0.8 * len(traffic_dataset))
test_size = len(traffic_dataset) - train_size
train, test = torch.utils.data.random_split(traffic_dataset,
[train_size, test_size])
train_loader = torch.utils.data.DataLoader(train,
batch_size= batch_size,
shuffle= True,
num_workers= 4)
test_loader = torch.utils.data.DataLoader(test,
batch_size = batch_size,
shuffle= True,
num_workers= 4)
#Create a fully connected nn
class Net(nn.Module):
#use the constructor w/ arguments size of data and number of classes
def __init__(self,
input_size,
num_classes):
super(Net, self).__init__()
self.fc1 = nn.Linear(input_size, 60)
self.fc2 = nn.Linear(60, num_classes)
#define your forward step function with relu as the non-linear function of the weights
#x will be the datapassed to the model
def forward(self, x):
x=f.relu(self.fc1(x))
x = self.fc2(x)
return x
#sanity check
test = Net(2028, num_classes)
x = torch.randn(24, 2028)
print(test(x).shape)
#instantiate the class object of NN
net = Net(2028, num_classes)
criterion = nn.CrossEntropyLoss()
nn_optimizer = optim.Adam(net.parameters(),
lr = learning_rate)
#train on multiple epochs using the criterion and gradient decent algorthim estabilished above
for epoch in range(1):
for i, (data, target) in enumerate(tqdm.tqdm(train_loader)):
data = data.reshape(data.shape[0], -1)
#forward
outputs = net(data)
loss = criterion(outputs, target)
#backward propigation
nn_optimizer.zero_grad()
loss.backward()
#gradiant decent choosen
nn_optimizer.step()
Im also using a custom dataset class to import the images and labels.
My first thought was that the class is not iterating over the CSV and images correctly but I can't seem to find where they might be not matching up.
class TrafficSigns(Dataset):
#constructure will need csv file of labels images and the transform function defined above
def __init__(self,
csv_file,
root_directory,
transform = None):
self.labels = pd.read_csv(csv_file)
self.root_directory = root_directory
self.transform = transform
#returns the length
def __len__(self):
return len(self.labels)
#get data index by indes
def __getitem__(self, i):
image_path = os.path.join(self.root_directory, self.labels.iloc[i,0])
image = io.imread(image_path)
y_label = torch.tensor(int(self.labels.iloc[i, 1]))
#if statement needed since transform can be set to None
if self.transform:
image = self.transform(image)
return (image, y_label)
Any help would be awesome, thank you.
Here is the full stacktrace error that's getting thrown.
IndexError Traceback (most recent call last)
/var/folders/t_/rcfcs8g56jn7trwnsvmdyh_r0000gn/T/ipykernel_34551/1839343274.py in <module>
11 #forward
12 outputs = net(data)
---> 13 loss = criterion(outputs, target)
14 #backward propigation
15 nn_optimizer.zero_grad()
~/Library/Python/3.8/lib/python/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1101 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102 return forward_call(*input, **kwargs)
1103 # Do not call functions when jit is used
1104 full_backward_hooks, non_full_backward_hooks = [], []
~/Library/Python/3.8/lib/python/site-packages/torch/nn/modules/loss.py in forward(self, input, target)
1148
1149 def forward(self, input: Tensor, target: Tensor) -> Tensor:
-> 1150 return F.cross_entropy(input, target, weight=self.weight,
1151 ignore_index=self.ignore_index, reduction=self.reduction,
1152 label_smoothing=self.label_smoothing)
~/Library/Python/3.8/lib/python/site-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction, label_smoothing)
2844 if size_average is not None or reduce is not None:
2845 reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 2846 return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing)
2847
2848
IndexError: Target 125 is out of bounds.
I came across same issue where I used sequential model (LSTM) for next sequence prediction. I check data loader where labels contained -1 because of which cross entropy loss throwing exception. here is my sequence chunks where model found -1 sequence as label in data loader:
Solved please check your null rows and remove those or set accordingly.

Embeddings in sentiment analysis task with PyTorch and Multilayer Perception

I have the body of the code, which should work fine, I think it doesn't because of something I'm messing up here, probably having to do with the embedding.
import torch.nn as nn
class MultilayerPerceptron(nn.Module):
def __init__(self, input_size, hidden_size): # I removed output size
# Call initializer function of the super class
super(MultilayerPerceptron, self).__init__()
self.embedding = nn.Embedding(INPUT_DIM, EMBEDDING_DIM) #added this myself, maybe wrong
#self.mlp = nn.MultilayerPerceptron(EMBEDDING_DIM, HIDDEN_DIM) #also added
self.INPUT_DIM = INPUT_DIM
self.HIDDEN_DIM = HIDDEN_DIM
self.OUTPUT_DIM = OUTPUT_DIM
self.EMBEDDING_DIM = EMBEDDING_DIM
#whenever this model is called, those layers in the sequential block
#will be processed in the order given to the block.
self.model = nn.Sequential(
#nn.Flatten(), # adding this hopefully it works (it didn't)
#embeds = embedded.mean(dim=1) #god help
nn.Linear(self.INPUT_DIM, self.HIDDEN_DIM), #later on, multiply by embedding dimensionality #I removed
nn.ReLU(),
nn.Linear(self.HIDDEN_DIM, self.OUTPUT_DIM), #one layer neural network
nn.ReLU(), # do I need this?
nn.Sigmoid(),
)
def forward(self, x):
embedded = self.embedding(x)
#embedded = [sent len, batch size, emb dim]
output, hidden = self.model(embedded)
output = self.model(x) #call the model defined above for forward propagation.
return output
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100 #how do I fit this into the model??
HIDDEN_DIM = 256
OUTPUT_DIM = 1
model = MultilayerPerceptron(INPUT_DIM, HIDDEN_DIM) #MLP instead
The error I get is "mat1 and mat2 shapes cannot be multiplied (50176x100 and 25002x256)".

Accuracy is not increasing, though loss is decreasing

I am feeding cnn features into gpflow model. I am writing the chunks of code from my program here. I am using tape.gradient with Adam optimizer (scheduled lr). My accuracy gets stuck on 47% and surprisingly , my loss still gets reducing. Its very weird. I have debugged the program. CNN features are ok but gp model is not learning .Please can you check the training loop and let me know where am I wrong.
def optimization_step(gp_model: gpflow.models.SVGP, image_data,labels):
with tf.GradientTape(watch_accessed_variables=False)as tape:
tape.watch(gp_model.trainable_variables)
cnn_feat = cnn_model(image_data,training=False)
cnn_feat=tf.cast(cnn_feat,dtype=default_float())
labels=tf.cast(labels,dtype=np.int64)
data=(cnn_feat, labels)
loss = gp_model.training_loss(data)
gp_grads=tape.gradient(loss, gp_model.trainable_variables)
gp_optimizer.apply_gradients(zip(gp_grads, gp_model.trainable_variables))
return loss, cnn_feat
the loop for training is
def simple_training_loop(gp_model: gpflow.models.SVGP, epochs: int = 3, logging_epoch_freq: int = 10):
total_loss = []
features=[]
tf_optimization_step = tf.function(optimization_step, autograph=False)
for epoch in range(epochs):
lr.assign(max(args.learning_rate_clip, args.learning_rate * (args.decay_rate ** epoch)))
data_loader.shuffle_data(args.is_training)
for b in range(data_loader.n_batches):
batch_x, batch_y= data_loader.next_batch(b)
batch_x=tf.convert_to_tensor(batch_x)
batch_y=tf.convert_to_tensor(batch_y)
loss,features_CNN=tf_optimization_step(gp_model, batch_x,batch_y)
I am restoring weights for CNN from checkpoints saved during transfer learning.
With more epochs , loss continue to decrease but accuracy starts decreasing as well.
The gp model declaration is as follows
kernel = gpflow.kernels.Matern32() + gpflow.kernels.White(variance=0.01)
invlink = gpflow.likelihoods.RobustMax(C)
likelihood = gpflow.likelihoods.MultiClass(C, invlink=invlink)
the test Function
cnn_feat=cnn_model(test_x,training=False)
cnn_feat = tf.cast(cnn_feat, dtype=default_float())
mean, var = gp_model.predict_f(cnn_feat)
preds = np.argmax(mean, 1).reshape(test_labels.shape)
correct = (preds == test_labels.numpy().astype(int))
acc = np.average(correct.astype(float)) * 100
Can you please just check that whether the training loop is correctly written
The training loop looks fine. However, there are bits that should be modified for clarity and for optimisation sake.
def simple_training_loop(gp_model: gpflow.models.SVGP, epochs: int = 3, logging_epoch_freq: int = 10):
total_loss = []
features=[]
#tf.function
def compute_cnn_feat(x: tf.Tensor) -> tf.Tensor:
return tf.cast(cnn_model(x, training=False), dtype=default_float())
#tf.function
def optimization_step(cnn_feat: tf.Tensor, labels: tf.Tensor): # **Change 1.**
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(gp_model.trainable_variables)
data = (cnn_feat, labels)
loss = gp_model.training_loss(data)
gp_grads = tape.gradient(loss, gp_model.trainable_variables) # **Change 2.**
gp_optimizer.apply_gradients(zip(gp_grads, gp_model.trainable_variables))
return loss
for epoch in range(epochs):
lr.assign(max(args.learning_rate_clip, args.learning_rate * (args.decay_rate ** epoch)))
data_loader.shuffle_data(args.is_training)
for b in range(data_loader.n_batches):
batch_x, batch_y= data_loader.next_batch(b)
batch_x = tf.convert_to_tensor(batch_x)
batch_y = tf.convert_to_tensor(batch_y, dtype=default_float())
cnn_feat = compute_cnn_feat(batch_x) # **Change 3.**
loss = optimization_step(cnn_feat, batch_y)
Change 1. Signature of a function that you wrap with tf.function should not have mutable objects.
Change 2. The gradient tape will track all computations inside the context manager, including the computation of the gradients i.e. tape.gradient(...). In turn, that means your code performs an unnecessary calculation.
Change 3. For the same reason as in "Change 2." I moved the CNN feature extraction outside of the gradient tape.

Correct data loading, splitting and augmentation in Pytorch

The tutorial doesn't seem to explain how we should load, split and do proper augmentation.
Let's have a dataset consisting of cars and cats. The folder structure would be:
data
cat
0101.jpg
0201.jpg
...
dogs
0101.jpg
0201.jpg
...
At first, I loaded the dataset by datasets.ImageFolder function. Image Function has command "TRANSFORM" where we can set some augmentation commands, but we don't want to apply augmentation to test dataset! So let's stay with transform=None.
data = datasets.ImageFolder(root='data')
Apparently, we don't have folder structure train and test and therefore I assume a good approach would be to use split_dataset function
train_size = int(split * len(data))
test_size = len(data) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(data, [train_size, test_size])
Now let's load the data the following way.
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=8,
shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=8,
shuffle=True)
How can I apply transformations (data augmentation) to the "train_loader" images?
Basically I need to: 1. load data from the folder structure explained above
2. split the data into test/train parts
3. apply augmentations on train part.
I am not sure if there is a recommended way of doing this, but this is how I would workaround this problem:
Given that torch.utils.data.random_split() returns Subset, we cannot (can we? not 100% sure here I double-checked, we cannot) exploit their inner datasets, because they are the same (the only diference is in the indices). In this context, I would implement a simple class to apply transformations, something like this:
from torch.utils.data import Dataset
class ApplyTransform(Dataset):
"""
Apply transformations to a Dataset
Arguments:
dataset (Dataset): A Dataset that returns (sample, target)
transform (callable, optional): A function/transform to be applied on the sample
target_transform (callable, optional): A function/transform to be applied on the target
"""
def __init__(self, dataset, transform=None, target_transform=None):
self.dataset = dataset
self.transform = transform
self.target_transform = target_transform
# yes, you don't need these 2 lines below :(
if transform is None and target_transform is None:
print("Am I a joke to you? :)")
def __getitem__(self, idx):
sample, target = self.dataset[idx]
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.dataset)
And then use it before passing the dataset to the dataloader:
import torchvision.transforms as transforms
train_transform = transforms.Compose([
transforms.ToTensor(),
# ...
])
train_dataset = ApplyTransform(train_dataset, transform=train_transform)
# continue with DataLoaders...
I think you can see this https://gist.github.com/kevinzakka/d33bf8d6c7f06a9d8c76d97a7879f5cb
def get_train_valid_loader(data_dir,
batch_size,
augment,
random_seed,
valid_size=0.1,
shuffle=True,
show_sample=False,
num_workers=4,
pin_memory=False):
"""
Utility function for loading and returning train and valid
multi-process iterators over the CIFAR-10 dataset. A sample
9x9 grid of the images can be optionally displayed.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- data_dir: path directory to the dataset.
- batch_size: how many samples per batch to load.
- augment: whether to apply the data augmentation scheme
mentioned in the paper. Only applied on the train split.
- random_seed: fix seed for reproducibility.
- valid_size: percentage split of the training set used for
the validation set. Should be a float in the range [0, 1].
- shuffle: whether to shuffle the train/validation indices.
- show_sample: plot 9x9 sample grid of the dataset.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- train_loader: training set iterator.
- valid_loader: validation set iterator.
"""
error_msg = "[!] valid_size should be in the range [0, 1]."
assert ((valid_size >= 0) and (valid_size <= 1)), error_msg
normalize = transforms.Normalize(
mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010],
)
# define transforms
valid_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
if augment:
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
else:
train_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
# load the dataset
train_dataset = datasets.CIFAR10(
root=data_dir, train=True,
download=True, transform=train_transform,
)
valid_dataset = datasets.CIFAR10(
root=data_dir, train=True,
download=True, transform=valid_transform,
)
num_train = len(train_dataset)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
if shuffle:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=train_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=batch_size, sampler=valid_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
# visualize some images
if show_sample:
sample_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=9, shuffle=shuffle,
num_workers=num_workers, pin_memory=pin_memory,
)
data_iter = iter(sample_loader)
images, labels = data_iter.next()
X = images.numpy().transpose([0, 2, 3, 1])
plot_images(X, labels)
return (train_loader, valid_loader)
Seems that he use sampler=train_sampler to do the split.

Using the deep neural network package "Chainer" to train a simple dataset

I'm trying to use the chainer package for a large project I'm working on. I have read through the tutorial on their website which gives an example of applying it to the MNIST dataset, but it doesn't seem to scale easily to other examples, and there's simply not enough documentation otherwise.
Their example code is as follows:
class MLP(Chain):
def __init__(self, n_units, n_out):
super(MLP, self).__init__(
# the size of the inputs to each layer will be inferred
l1=L.Linear(None, n_units), # n_in -> n_units
l2=L.Linear(None, n_units), # n_units -> n_units
l3=L.Linear(None, n_out), # n_units -> n_out
)
def __call__(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
y = self.l3(h2)
return y
train, test = datasets.get_mnist()
train_iter = iterators.SerialIterator(train, batch_size=5, shuffle=True)
test_iter = iterators.SerialIterator(test, batch_size=2, repeat=False, shuffle=False)
model = L.Classifier(MLP(100, 10)) # the input size, 784, is inferred
optimizer = optimizers.SGD()
optimizer.setup(model)
updater = training.StandardUpdater(train_iter, optimizer)
trainer = training.Trainer(updater, (4, 'epoch'), out='result')
trainer.extend(extensions.Evaluator(test_iter, model))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(['epoch', 'main/accuracy', 'validation/main/accuracy']))
trainer.extend(extensions.ProgressBar())
trainer.run()
Could someone point me in the direction of how to simple fit a straight line to a few data points in 2D? If I can understand a simple fit such as this I should be able to scale appropriately.
Thanks for the help!
I pasted simple regression modeling here.
You can use original train data and test data as tuple.
train = (data, label)
Here, data.shape = (Number of data, Number of data dimesion)
And, label.shape = (Number of data,)
Both of their data type should be numpy.float32.
import chainer
from chainer.functions import *
from chainer.links import *
from chainer.optimizers import *
from chainer import training
from chainer.training import extensions
from chainer import reporter
from chainer import datasets
import numpy
class MyNet(chainer.Chain):
def __init__(self):
super(MyNet, self).__init__(
l0=Linear(None, 30, nobias=True),
l1=Linear(None, 1, nobias=True),
)
def __call__(self, x, t):
l0 = self.l0(x)
f0 = relu(l0)
l1 = self.l1(f0)
f1 = flatten(l1)
self.loss = mean_squared_error(f1, t)
reporter.report({'loss': self.loss}, self)
return self.loss
def get_optimizer():
return Adam()
def training_main():
model = MyNet()
optimizer = get_optimizer()
optimizer.setup(model)
train, test = datasets.get_mnist(label_dtype=numpy.float32)
train_iter = chainer.iterators.SerialIterator(train, 50)
test_iter = chainer.iterators.SerialIterator(test, 50,
repeat=False,
shuffle=False)
updater = training.StandardUpdater(train_iter, optimizer)
trainer = training.Trainer(updater, (10, 'epoch'))
trainer.extend(extensions.ProgressBar())
trainer.extend(extensions.Evaluator(test_iter, model))
trainer.extend(
extensions.PlotReport(['main/loss', 'validation/main/loss'],
'epoch'))
trainer.run()
if __name__ == '__main__':
training_main()