Neural Network Is Taking 1000 Epochs Just To Solve XOR - neural-network

My Neural Network takes 1000 epochs just to solve XOR and when I increase number of neurons loss increases and it doesnt work.
Also before when I was using np.random.rand it did'nt work with a learning rate less than 1.
import numpy as np
input = np.array([[0, 1] ,[1, 0], [1, 1], [0, 0]])
labels = np.array([[1], [1], [0], [0]])
np.random.seed(0)
class NN:
def __init__(self):
self.layers = []
def add(self, layer):
self.layers.append(layer)
def loss(self, layer):
self.loss = layer
def predict(self, input):
output = input
for layer in self.layers:
output = layer.forward(output)
return output
def train(self, input, labels):
prediction = self.predict(input)
loss = self.loss.forward(prediction, labels)
gradient = self.loss.back() * 0.1
for layer in self.layers[::-1]:
gradient = layer.back(gradient)
return loss
Dense Layer
class Dense:
def __init__(self, input_size, output_size):
self.weights = np.random.randn(input_size, output_size) - 0.5
self.bias = np.random.randn(1, output_size) - 0.5
def forward(self, input):
self.input = input
output = np.dot(input, self.weights) + self.bias
return output
def back(self, gradient):
gradientW = np.dot(self.input.T, gradient)
self.weights -= gradientW
self.bias -= np.mean(gradient)
return np.dot(gradient, self.weights.T)
Sigmoid
class Sigmoid:
def forward(self, input):
output = 1 / (1+np.exp(-input))
self.output = output
return output
def back(self, gradient):
output = self.output * (1 - self.output)
gradient *= output
return gradient
I am using Mean Squaresd Error
class MSE:
def forward(self, input, labels):
self.input = input
self.labels = labels
return np.mean((labels - input)**2)
def back(self):
output = 2 * (self.input - self.labels)
return output
Calling the Class
N = NN()
N.add(Dense(2, 10))
N.add(Sigmoid())
N.add(Dense(10, 1))
N.add(Sigmoid())
N.loss(MSE())
Training
for i in range(1000):
loss = (N.train(input, labels))
if i%99==0:
print(loss)
print(N.predict(input))
Edit: I Tried this and now it works with 100 epochs. I needed an additional layer to increase neurons why? I tried to train it on AND but it doesnt work.
N = NN()
N.add(Dense(2, 50))
N.add(Sigmoid())
N.add(Dense(50, 1000))
N.add(Sigmoid())
N.add(Dense(1000, 50))
N.add(Sigmoid())
N.add(Dense(50, 1))
N.add(Sigmoid())
N.loss(MSE())

Related

pyTorch mat1 and mat2 cannot be multiplied

I am getting the following error:
RuntimeError: mat1 and mat2 shapes cannot be multiplied (32x33856 and 640000x256)
I don't understand how do I need to change the parameters of my net. I took the net created in this paper and tried to modify the parameters to meet my needs.This is the code, I changed the parameters of the first convolution but still get the error:
class ChordClassificationNetwork(nn.Module):
def __init__(self, train_model=False):
super(ChordClassificationNetwork, self).__init__()
self.train_model = train_model
self.flatten = nn.Flatten()
self.firstConv = nn.Conv2d(3, 64, (3, 3))
self.secondConv = nn.Conv2d(64, 64, (3, 3))
self.pool = nn.MaxPool2d(2)
self.drop = nn.Dropout(0.25)
self.fc1 = nn.Linear(100*100*64, 256)
self.fc2 = nn.Linear(256, 256)
self.outLayer = nn.Linear(256, 7)
def forward(self, x):
x = self.firstConv(x)
x = F.relu(x)
x = self.pool(x)
x = self.secondConv(x)
x = F.relu(x)
x = self.pool(x)
x = self.drop(x)
x = self.flatten(x)
x = self.fc1(x)
x = F.relu(x)
x = self.drop(x)
x = self.fc2(x)
x = F.relu(x)
x = self.drop(x)
x = self.outLayer(x)
output = F.softmax(x, dim=1)
return output
and this is the training file:
device = ("cuda" if torch.cuda.is_available() else "cpu")
transformations = transforms.Compose([
transforms.Resize((100, 100))
])
num_epochs = 10
learning_rate = 0.001
train_CNN = False
batch_size = 32
shuffle = True
pin_memory = True
num_workers = 1
dataset = GuitarDataset("../chords_data/cropped_images/train", transform=transformations)
train_set, validation_set = torch.utils.data.random_split(dataset, [int(0.8 * len(dataset)), len(dataset) - int(0.8*len(dataset))])
train_loader = DataLoader(dataset=train_set, shuffle=shuffle, batch_size=batch_size, num_workers=num_workers,
pin_memory=pin_memory)
validation_loader = DataLoader(dataset=validation_set, shuffle=shuffle, batch_size=batch_size, num_workers=num_workers,
pin_memory=pin_memory)
model = ChordClassificationNetwork().to(device)
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
def check_accuracy(loader, model):
if loader == train_loader:
print("Checking accuracy on training data")
else:
print("Checking accuracy on validation data")
num_correct = 0
num_samples = 0
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device=device)
y = y.to(device=device)
scores = model(x)
predictions = torch.tensor([1.0 if i >= 0.5 else 0.0 for i in scores]).to(device)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
print(
f"Got {num_correct} / {num_samples} with accuracy {float(num_correct) / float(num_samples) * 100:.2f}"
)
return f"{float(num_correct) / float(num_samples) * 100:.2f}"
def train():
model.train()
for epoch in range(num_epochs):
loop = tqdm(train_loader, total=len(train_loader), leave=True)
if epoch % 2 == 0:
loop.set_postfix(val_acc=check_accuracy(validation_loader, model))
for imgs, labels in loop:
imgs = imgs.to(device)
labels = labels.to(device)
outputs = model(imgs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loop.set_description(f"Epoch [{epoch}/{num_epochs}]")
loop.set_postfix(loss=loss.item())
if __name__ == "__main__":
train()
What am I doing wrong?
Look at the error message, the issue comes from the fc1 layer which doesn't have the required number of neurons. It is receiving a tensor of shape (batch_size, 33856) but expects (batch_size, 640000). The reduction in dimensionality is caused by the different layers you have applied to your input tensor before fc1.
You can fix this by defining fc1 with:
self.fc1 = nn.Linear(33856, 256)
Alternatively, you can use nn.LazyLinear which will initialize its weights with the appropriate number of neurons at runtime depending on the input it receives. But that's lazy:
self.fc1 = nn.LazyLinear(256)

Constant loss through epochs

I code this neural network to make a gaussian regression but I don't understand why my loss doesn't change through epochs. I set the learning rate to 1 to see the loss decreases but it does not. I chose to take 2000 poitns to train my Neural network. I watched several algorithms on this website and I don't really understand why my algorithm do not achieve what I expect.
I have already imported all libraries needed.
Thank you for your help
def f(x):
return x * np.sin(x) # function to predict
m =2000
X_bis = np.zeros((1,m),dtype = float)
X_bis=np.random.random(m)*10
## Create my training,validation and test set
X_train = X_bis[0:600]
X_val = X_bis[600:800]
X_test = X_bis[800:]
y_train = f(X_train)
y_val = f(X_val)
y_test = f(X_test)
mean_X_train = np.mean(X_train)
std_X_train = np.std(X_train)
mean_y_train = np.mean(y_train)
std_y_train =np.std(y_train)
class MyDataset(data.Dataset):
def __init__(self, data_feature, data_target):
self.data_feature = data_feature
self.data_target = data_target
def __len__(self):
return len(self.data_feature)
def __getitem__(self, index):
X_train_normalized = (self.data_feature[index] - mean_X_train) / std_X_train
y_train_normalized = (self.data_target[index] - mean_y_train) / std_y_train
return torch.from_numpy(np.array(X_train_normalized,ndmin=1)).float(), torch.from_numpy(np.array(y_train_normalized, ndmin = 1)).float()
training_set = MyDataset(X_train,y_train)
train_loading = torch.utils.data.DataLoader(training_set, batch_size= 100)
val_set = MyDataset(X_val, y_val)
val_loading = torch.utils.data.DataLoader(val_set, batch_size= 10)
test_set = MyDataset(X_test,y_test)
test_loading = torch.utils.data.DataLoader(test_set, batch_size= 100)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.FC1 = nn.Linear(1,10)
self.FC2 = nn.Linear(10, 1)
def forward(self, x):
x = F.relu(self.FC1(x))
x = self.FC2(x)
return x
model = Net()
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(),
lr=1, weight_decay= 0.01, momentum = 0.9)
def train(net, train_loader, optimizer, epoch):
net.train()
total_loss=0
for idx,(data, target) in enumerate(train_loader, 0):
outputs = net(data)
loss = criterion(outputs,target)
total_loss +=loss.cpu().item()
optimizer.step()
print('Epoch:', epoch , 'average training loss ', total_loss/ len(train_loader))
def test(net,test_loader):
net.eval()
total_loss = 0
for idx,(data, target) in enumerate(test_loader,0):
outputs = net(data)
outputs = outputs * std_X_train + mean_X_train
target = target * std_y_train + mean_y_train
loss = criterion(outputs,target)
total_loss += sqrt(loss.cpu().item())
print('average testing loss', total_loss/len(test_loader))
for epoch in range(50):
train(model,train_loading,optimizer,epoch)
test(model,val_loading)
'''
I'm wondering why you don't have loss.backward() after the line that you compute the loss (i.e., loss = criterion(outputs,target)) in your training snippet. This will help backpropagating and ultimately updating the parameters of your network upon optimizer.step(). Also, try using lower learning rates as lr=1 normally is too much in training such networks. Try using learning rates in between 0.001-0.01 to see if your network is learning the mapping between input X and target Y.

Network Training is too slow for Custom Network even though network is not too large(Resnet + LSTM type network)

I have made a custom network for the image data. But the training on this network is very slow though the network is not too huge.
When I am training on resnet150 with lstm, the training is quite fast. Not sure which operation is lagging my training speed?
I have tried reducing Batch_size, seq_dim and network parameters. My guess is some operation is reducing the speed
The dataset I am using are images and they are also very small size(96*96) gray scale images.
***** CODE *****
class Residual(nn.Module):
def __init__(self, input_channels, num_channels,
use_1x1conv=True, strides=1, dilation=2, padding=1, kernel_size=5):
super(Residual, self).__init__()
self.conv1 = nn.Conv2d(input_channels, num_channels,
kernel_size=kernel_size, padding=padding, stride=strides, dilation=dilation)
self.conv2 = nn.Conv2d(num_channels, num_channels,
kernel_size=kernel_size, padding=2 * padding, dilation=(2 * dilation))
if use_1x1conv:
self.conv3 = nn.Conv2d(input_channels, num_channels,
kernel_size=1, stride=strides)
else:
self.conv3 = None
self.bn1 = nn.BatchNorm2d(num_channels)
self.bn2 = nn.BatchNorm2d(num_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, X):
Y = F.relu(self.bn1(self.conv1(X)))
Y = self.bn2(self.conv2(Y))
if self.conv3:
X = self.conv3(X)
Y += X
return F.relu(Y)
def weight_init(m):
'''
Usage:
model = Model()
model.apply(weight_init)
'''
if isinstance(m, nn.Conv1d):
init.normal_(m.weight.data)
if m.bias is not None:
init.normal_(m.bias.data)
elif isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight.data)
if m.bias is not None:
init.normal_(m.bias.data)
elif isinstance(m, nn.BatchNorm1d):
init.normal_(m.weight.data, mean=1, std=0.02)
init.constant_(m.bias.data, 0)
elif isinstance(m, nn.BatchNorm2d):
init.normal_(m.weight.data, mean=1, std=0.02)
init.constant_(m.bias.data, 0)
elif isinstance(m, nn.Linear):
init.xavier_normal_(m.weight.data)
init.normal_(m.bias.data)
elif isinstance(m, nn.LSTM):
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
class Network(nn.Module):
def __init__(self):
super(Network, self).__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
num_channels = 64
strides = 1
dilation = 2
padding = 4
kernel_size = 5
input_channel = 3
self.fc = nn.Linear(config['hidden_dim'], config['output_dim'])
self.lstm = None
b1 = Residual(input_channels=input_channel, num_channels=num_channels,
use_1x1conv=True, strides=strides, dilation=dilation, padding=padding, kernel_size=kernel_size)
b2 = Residual(input_channels=num_channels, num_channels=2 * num_channels,
use_1x1conv=True, strides=strides, dilation=2 * dilation, padding=2 * padding,
kernel_size=kernel_size)
b3 = Residual(input_channels=2 * num_channels, num_channels=4 * num_channels,
use_1x1conv=True, strides=strides, dilation=4 * dilation, padding=4 * padding,
kernel_size=kernel_size)
self.net = nn.Sequential(b1, b2, b3, nn.AdaptiveMaxPool2d((2, 2)))
self.apply(weight_init)
def forward(self, x):
x = self.net(x)
x = x.view(config['batch_size'], config['seq_dim'], -1)
if self.lstm is None:
self.lstm = nn.LSTM(x.size(2), config['hidden_dim'], 1, batch_first=True).to(self.device)
for param in self.lstm.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
h0 = torch.zeros(config['layer_dim'], x.size(0), config['hidden_dim']).to(self.device)
# Initialize cell state
c0 = torch.zeros(config['layer_dim'], x.size(0), config['hidden_dim']).to(self.device)
output, (hn, cn) = self.lstm(x, (h0,c0))
output = output[:, :, :]
output = self.fc(output)
return output, (hn, cn)
for epoch in range(config['num_epochs']):
print('epoch', epoch)
running_loss = 0
nb_classes = config['output_dim']
confusion_matrix = torch.zeros(nb_classes, nb_classes)
for i, (image, label) in enumerate(trainLoader):
print('batch: ',i)
image = image.float().to(device)
label = label.to(device)
optimizer.zero_grad()
batch_size, timesteps, H, W, C = image.size()
# Change Image shape
image = image.view(batch_size * timesteps, H, W, C)
image = image.permute(0, 3, 1, 2) # from NHWC to NCHW
output, (hn,cn) = model(image)
label = label.view(-1)
output = output.view(-1, output.size(2))
loss = criterion(output, label)
loss *= config['seq_dim']
loss.backward() # Backward pass
optimizer.step() # Now we can do an optimizer step
running_loss += loss.item()

L1 regulariser Pytorch acting opposite to what I expect

I'm trying to add an L1 penalty to a specific layer of a neural network, and I have the code below (in which I attempt to add l1 penalty to the first layer). If I run it for lambda = 0 (i.e. no penalty), the output gets very close to the expected weights those being [10, 12, 2, 11, -0.25]) and if I run for enough epochs or reduce batch size it will get it exactly, as in the output below:
mlp.0.weight
Parameter containing:
tensor([[ 9.8657, -11.8305, 2.0242, 10.8913, -0.1978]],
requires_grad=True)
Then, when I run it for a large lambda, say 1000, I would expect these weights to shrink towards zero as there is a large penalty being added to the loss that we are trying to minimise. However, the opposite happens and the weights explode, as in the output below (for lam = 1000)
mlp.0.weight
Parameter containing:
tensor([[-13.9368, 9.9072, 2.2447, -11.6870, 26.7293]],
requires_grad=True)
If anyone could help me, that'd be great. I'm new to pytorch (but not the idea of regularisation), so I'm guessing it's something in my code that is the problem.
Thanks
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import numpy as np
from sklearn.linear_model import LinearRegression
class TrainDataset(Dataset):
def __init__(self, data):
self.data = data
def __len__(self):
return self.data.shape[0]
def __getitem__(self, ind):
x = self.data[ind][1:]
y = self.data[ind][0]
return x, y
class TestDataset(TrainDataset):
def __getitem__(self, ind):
x = self.data[ind]
return x
torch.manual_seed(94)
x_train = np.random.rand(1000, 5)
y_train = x_train[:, 0] * 10 - x_train[:, 1] * 12 + x_train[:, 2] * 2 + x_train[:, 3] * 11 - x_train[:, 4] * 0.25
y_train = y_train.reshape(1000, 1)
x_train.shape
y_train.shape
train_data = np.concatenate((y_train, x_train), axis=1)
train_set = TrainDataset(train_data)
batch_size = 100
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.mlp = nn.Sequential(nn.Linear(5, 1, bias=False))
def forward(self, x_mlp):
out = self.mlp(x_mlp)
return out
device = 'cpu'
model = MLP()
optimizer = torch.optim.SGD(model.parameters(), lr=0.02, momentum=0.82)
criterion = nn.MSELoss()
epochs = 5
lam = 0
model.train()
for epoch in range(epochs):
losses = []
for batch_num, input_data in enumerate(train_loader):
optimizer.zero_grad()
x, y = input_data
x = x.to(device).float()
y = y.reshape(batch_size, 1)
y = y.to(device)
output = model(x)
for name, param in model.named_parameters():
if name == 'mlp.0.weight':
l1_norm = torch.norm(param, 1)
loss = criterion(output, y) + lam * l1_norm
loss.backward()
optimizer.step()
print('\tEpoch %d | Batch %d | Loss %6.2f' % (epoch, batch_num, loss.item()))
for name, param in model.named_parameters():
if param.requires_grad:
print(name)
print(param)
I found that if I use Adagrad as the optimiser instead of SGD, it acts as expected. Will need to look into the difference of those now, but this can be considered answered.

Initialising weights and bias with PyTorch - how to correct dimensions?

Using this model I'm attempting to initialise my network with my predefined weights and bias :
dimensions_input = 10
hidden_layer_nodes = 5
output_dimension = 10
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = torch.nn.Linear(dimensions_input,hidden_layer_nodes)
self.linear2 = torch.nn.Linear(hidden_layer_nodes,output_dimension)
self.linear.weight = torch.nn.Parameter(torch.zeros(dimensions_input,hidden_layer_nodes))
self.linear.bias = torch.nn.Parameter(torch.ones(hidden_layer_nodes))
self.linear2.weight = torch.nn.Parameter(torch.zeros(dimensions_input,hidden_layer_nodes))
self.linear2.bias = torch.nn.Parameter(torch.ones(hidden_layer_nodes))
def forward(self, x):
l_out1 = self.linear(x)
y_pred = self.linear2(l_out1)
return y_pred
model = Model()
criterion = torch.nn.MSELoss(size_average = False)
optim = torch.optim.SGD(model.parameters(), lr = 0.00001)
def train_model():
y_data = x_data.clone()
for i in range(10000):
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
if i % 5000 == 0:
print(loss)
optim.zero_grad()
loss.backward()
optim.step()
RuntimeError:
The expanded size of the tensor (10) must match the existing size (5)
at non-singleton dimension 1
My dimensions appear correct as they match the corresponding linear layers ?
The code provided doesn't run due to the fact that x_data isn't defined, so I can't be sure that this is the issue, but one thing that strikes me is that you should replace
self.linear2.weight = torch.nn.Parameter(torch.zeros(dimensions_input,hidden_layer_nodes))
self.linear2.bias = torch.nn.Parameter(torch.ones(hidden_layer_nodes))
with
self.linear2.weight = torch.nn.Parameter(torch.zeros(hidden_layer_nodes, output_dimension))
self.linear2.bias = torch.nn.Parameter(torch.ones(output_dimension))