pyTorch mat1 and mat2 cannot be multiplied - neural-network

I am getting the following error:
RuntimeError: mat1 and mat2 shapes cannot be multiplied (32x33856 and 640000x256)
I don't understand how do I need to change the parameters of my net. I took the net created in this paper and tried to modify the parameters to meet my needs.This is the code, I changed the parameters of the first convolution but still get the error:
class ChordClassificationNetwork(nn.Module):
def __init__(self, train_model=False):
super(ChordClassificationNetwork, self).__init__()
self.train_model = train_model
self.flatten = nn.Flatten()
self.firstConv = nn.Conv2d(3, 64, (3, 3))
self.secondConv = nn.Conv2d(64, 64, (3, 3))
self.pool = nn.MaxPool2d(2)
self.drop = nn.Dropout(0.25)
self.fc1 = nn.Linear(100*100*64, 256)
self.fc2 = nn.Linear(256, 256)
self.outLayer = nn.Linear(256, 7)
def forward(self, x):
x = self.firstConv(x)
x = F.relu(x)
x = self.pool(x)
x = self.secondConv(x)
x = F.relu(x)
x = self.pool(x)
x = self.drop(x)
x = self.flatten(x)
x = self.fc1(x)
x = F.relu(x)
x = self.drop(x)
x = self.fc2(x)
x = F.relu(x)
x = self.drop(x)
x = self.outLayer(x)
output = F.softmax(x, dim=1)
return output
and this is the training file:
device = ("cuda" if torch.cuda.is_available() else "cpu")
transformations = transforms.Compose([
transforms.Resize((100, 100))
])
num_epochs = 10
learning_rate = 0.001
train_CNN = False
batch_size = 32
shuffle = True
pin_memory = True
num_workers = 1
dataset = GuitarDataset("../chords_data/cropped_images/train", transform=transformations)
train_set, validation_set = torch.utils.data.random_split(dataset, [int(0.8 * len(dataset)), len(dataset) - int(0.8*len(dataset))])
train_loader = DataLoader(dataset=train_set, shuffle=shuffle, batch_size=batch_size, num_workers=num_workers,
pin_memory=pin_memory)
validation_loader = DataLoader(dataset=validation_set, shuffle=shuffle, batch_size=batch_size, num_workers=num_workers,
pin_memory=pin_memory)
model = ChordClassificationNetwork().to(device)
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
def check_accuracy(loader, model):
if loader == train_loader:
print("Checking accuracy on training data")
else:
print("Checking accuracy on validation data")
num_correct = 0
num_samples = 0
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device=device)
y = y.to(device=device)
scores = model(x)
predictions = torch.tensor([1.0 if i >= 0.5 else 0.0 for i in scores]).to(device)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
print(
f"Got {num_correct} / {num_samples} with accuracy {float(num_correct) / float(num_samples) * 100:.2f}"
)
return f"{float(num_correct) / float(num_samples) * 100:.2f}"
def train():
model.train()
for epoch in range(num_epochs):
loop = tqdm(train_loader, total=len(train_loader), leave=True)
if epoch % 2 == 0:
loop.set_postfix(val_acc=check_accuracy(validation_loader, model))
for imgs, labels in loop:
imgs = imgs.to(device)
labels = labels.to(device)
outputs = model(imgs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loop.set_description(f"Epoch [{epoch}/{num_epochs}]")
loop.set_postfix(loss=loss.item())
if __name__ == "__main__":
train()
What am I doing wrong?

Look at the error message, the issue comes from the fc1 layer which doesn't have the required number of neurons. It is receiving a tensor of shape (batch_size, 33856) but expects (batch_size, 640000). The reduction in dimensionality is caused by the different layers you have applied to your input tensor before fc1.
You can fix this by defining fc1 with:
self.fc1 = nn.Linear(33856, 256)
Alternatively, you can use nn.LazyLinear which will initialize its weights with the appropriate number of neurons at runtime depending on the input it receives. But that's lazy:
self.fc1 = nn.LazyLinear(256)

Related

Pytorch-GPU what am I forgetting to move over to the GPU?

I'm getting this error. What am I leaving out, I feel that I have tired everything.
Also is there not a easy way to just use the GPU and not the CPU I feel like I have tried all those options as well. As in not using .cuda() everywhere
This is one of my first neutral networks so please go easy on me. (most of it is from Class)
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu! (when checking arugment for argument target in method wrapper_nll_loss_forward)
import torch.cuda
import numpy as np
import time
from torchvision import datasets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
torch.cuda.set_device(0)
def load_data():
num_workers = 0
load_data.batch_size = 20
transform = transforms.ToTensor()
train_data = datasets.MNIST(root='data', train=True, download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False, download=True, transform=transform)
load_data.train_loader = torch.utils.data.DataLoader(train_data,
batch_size=load_data.batch_size, num_workers=num_workers, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_data,
batch_size=load_data.batch_size, num_workers=num_workers, pin_memory=True)
def visualize():
dataiter = iter(load_data.train_loader)
visualize.images, labels = dataiter.next()
visualize.images = visualize.images.numpy()
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(load_data.batch_size):
ax = fig.add_subplot(2, load_data.batch_size/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(visualize.images[idx]), cmap='gray')
ax.set_title(str(labels[idx].item()))
#plt.show()
def fig_values():
img = np.squeeze(visualize.images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
#plt.show()
load_data()
visualize()
fig_values()
class NeuralNet(nn.Module):
def __init__(self, gpu = True):
super(NeuralNet, self ).__init__()
self.fc1 = nn.Linear(28 * 28, 16).cuda()
self.fc2 = nn.Linear(16, 10).cuda()
def forward(self, x):
x = x.view(-1, 28 * 28).cuda()
x = F.relu(self.fc1(x)).cuda()
x = self.fc2(x).cuda()
return x
def training():
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
n_epochs = 100
model.train().cuda()
for epoch in range(n_epochs):
train_loss = 0.0
for data, target in load_data.train_loader:
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
train_loss += loss.item()*data.size(0)
train_loss = train_loss/len(load_data.train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = NeuralNet().to(device)
summary(model, input_size=(1, 28, 28))
training()
Your data, and target are not in GPU (considering the removal of repeated cuda calls).
You are also doing of unnecessary cuda() which is not needed. Simply, see where your data and model are. Take the model to GPU, take the data and label to GPU, finally feed data to the model.
Don't use, cuda(), use to.device(), it's safer in the long run and easily customizable in multi-gpu setup.
import torch.cuda
import numpy as np
import time
from torchvision import datasets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
torch.cuda.set_device(0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def load_data():
num_workers = 0
load_data.batch_size = 20
transform = transforms.ToTensor()
train_data = datasets.MNIST(root='data', train=True, download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False, download=True, transform=transform)
load_data.train_loader = torch.utils.data.DataLoader(train_data,
batch_size=load_data.batch_size, num_workers=num_workers, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_data,
batch_size=load_data.batch_size, num_workers=num_workers, pin_memory=True)
def visualize():
dataiter = iter(load_data.train_loader)
visualize.images, labels = dataiter.next()
visualize.images = visualize.images.numpy()
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(load_data.batch_size):
ax = fig.add_subplot(2, load_data.batch_size/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(visualize.images[idx]), cmap='gray')
ax.set_title(str(labels[idx].item()))
#plt.show()
def fig_values():
img = np.squeeze(visualize.images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
#plt.show()
load_data()
visualize()
fig_values()
class NeuralNet(nn.Module):
def __init__(self, gpu = True):
super(NeuralNet, self ).__init__()
self.fc1 = nn.Linear(28 * 28, 16)
self.fc2 = nn.Linear(16, 10)
def forward(self, x):
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def training():
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
n_epochs = 100
model.train()
for epoch in range(n_epochs):
train_loss = 0.0
for data, target in load_data.train_loader:
optimizer.zero_grad()
###################################
data = data.to(device)
target = target.to(device)
###################################
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
train_loss += loss.item()*data.size(0)
train_loss = train_loss/len(load_data.train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
model = NeuralNet().to(device)
summary(model, input_size=(1, 28, 28))
training()
Clearly your target variable is not on GPU.
Also, its a bad idea to call .cuda() inside forward()
def forward(self, x):
x = x.view(-1, 28 * 28).cuda() # BAD
x = F.relu(self.fc1(x)).cuda() # BAD
x = self.fc2(x).cuda() #BAD
return x
Rather, remove all .cuda() inside forward and do this is main loop
for data, target in load_data.train_loader:
data = data.cuda()
target = target.cuda()

Network Training is too slow for Custom Network even though network is not too large(Resnet + LSTM type network)

I have made a custom network for the image data. But the training on this network is very slow though the network is not too huge.
When I am training on resnet150 with lstm, the training is quite fast. Not sure which operation is lagging my training speed?
I have tried reducing Batch_size, seq_dim and network parameters. My guess is some operation is reducing the speed
The dataset I am using are images and they are also very small size(96*96) gray scale images.
***** CODE *****
class Residual(nn.Module):
def __init__(self, input_channels, num_channels,
use_1x1conv=True, strides=1, dilation=2, padding=1, kernel_size=5):
super(Residual, self).__init__()
self.conv1 = nn.Conv2d(input_channels, num_channels,
kernel_size=kernel_size, padding=padding, stride=strides, dilation=dilation)
self.conv2 = nn.Conv2d(num_channels, num_channels,
kernel_size=kernel_size, padding=2 * padding, dilation=(2 * dilation))
if use_1x1conv:
self.conv3 = nn.Conv2d(input_channels, num_channels,
kernel_size=1, stride=strides)
else:
self.conv3 = None
self.bn1 = nn.BatchNorm2d(num_channels)
self.bn2 = nn.BatchNorm2d(num_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, X):
Y = F.relu(self.bn1(self.conv1(X)))
Y = self.bn2(self.conv2(Y))
if self.conv3:
X = self.conv3(X)
Y += X
return F.relu(Y)
def weight_init(m):
'''
Usage:
model = Model()
model.apply(weight_init)
'''
if isinstance(m, nn.Conv1d):
init.normal_(m.weight.data)
if m.bias is not None:
init.normal_(m.bias.data)
elif isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight.data)
if m.bias is not None:
init.normal_(m.bias.data)
elif isinstance(m, nn.BatchNorm1d):
init.normal_(m.weight.data, mean=1, std=0.02)
init.constant_(m.bias.data, 0)
elif isinstance(m, nn.BatchNorm2d):
init.normal_(m.weight.data, mean=1, std=0.02)
init.constant_(m.bias.data, 0)
elif isinstance(m, nn.Linear):
init.xavier_normal_(m.weight.data)
init.normal_(m.bias.data)
elif isinstance(m, nn.LSTM):
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
class Network(nn.Module):
def __init__(self):
super(Network, self).__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
num_channels = 64
strides = 1
dilation = 2
padding = 4
kernel_size = 5
input_channel = 3
self.fc = nn.Linear(config['hidden_dim'], config['output_dim'])
self.lstm = None
b1 = Residual(input_channels=input_channel, num_channels=num_channels,
use_1x1conv=True, strides=strides, dilation=dilation, padding=padding, kernel_size=kernel_size)
b2 = Residual(input_channels=num_channels, num_channels=2 * num_channels,
use_1x1conv=True, strides=strides, dilation=2 * dilation, padding=2 * padding,
kernel_size=kernel_size)
b3 = Residual(input_channels=2 * num_channels, num_channels=4 * num_channels,
use_1x1conv=True, strides=strides, dilation=4 * dilation, padding=4 * padding,
kernel_size=kernel_size)
self.net = nn.Sequential(b1, b2, b3, nn.AdaptiveMaxPool2d((2, 2)))
self.apply(weight_init)
def forward(self, x):
x = self.net(x)
x = x.view(config['batch_size'], config['seq_dim'], -1)
if self.lstm is None:
self.lstm = nn.LSTM(x.size(2), config['hidden_dim'], 1, batch_first=True).to(self.device)
for param in self.lstm.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
h0 = torch.zeros(config['layer_dim'], x.size(0), config['hidden_dim']).to(self.device)
# Initialize cell state
c0 = torch.zeros(config['layer_dim'], x.size(0), config['hidden_dim']).to(self.device)
output, (hn, cn) = self.lstm(x, (h0,c0))
output = output[:, :, :]
output = self.fc(output)
return output, (hn, cn)
for epoch in range(config['num_epochs']):
print('epoch', epoch)
running_loss = 0
nb_classes = config['output_dim']
confusion_matrix = torch.zeros(nb_classes, nb_classes)
for i, (image, label) in enumerate(trainLoader):
print('batch: ',i)
image = image.float().to(device)
label = label.to(device)
optimizer.zero_grad()
batch_size, timesteps, H, W, C = image.size()
# Change Image shape
image = image.view(batch_size * timesteps, H, W, C)
image = image.permute(0, 3, 1, 2) # from NHWC to NCHW
output, (hn,cn) = model(image)
label = label.view(-1)
output = output.view(-1, output.size(2))
loss = criterion(output, label)
loss *= config['seq_dim']
loss.backward() # Backward pass
optimizer.step() # Now we can do an optimizer step
running_loss += loss.item()

InvalidType: Invalid operation is performed

I am trying to write a stacked autoencoder. Since this a stacked autoencoder we need to train the first autoencoder and pass the weights to the second autoencoder. So during training we need to define train_data_for_next_layer. Here I am getting error:
InvalidType:
Invalid operation is performed in: LinearFunction (Forward)
Expect: x.shape[1] == W.shape[1]
Actual: 784 != 250
I am having issue with the last line. Is this problem due to incorrect model layer, I want to know what is the issue here. I have faced this problem several times before and any detailed explanation is welcome. The code is as follows:
class AutoEncoder(chainer.Chain):
def __init__(self, n_in, n_out, activation='relu', tied=True):
if tied:
super(AutoEncoder, self).__init__(
l1 = L.Linear(n_in, n_out)
)
self.add_param('decoder_bias', n_in)
self.decoder_bias.data[...] = 0
else:
super(AutoEncoder, self).__init__(
l1 = L.Linear(n_in, n_out),
l2 = L.Linear(n_out, n_in)
)
self.tied = tied
self.n_in = n_in
self.n_out = n_out
self.activation = {'relu': F.relu, 'sigmoid': F.sigmoid,
'identity': F.identity}[activation]
def __call__(self, x, train=True):
h1 = F.dropout(self.activation(self.l1(x)), train=train)
if self.tied:
return self.activation(F.linear(h1, F.transpose(self.l1.W),
self.decoder_bias))
else:
return self.activation(self.l2(h1))
def encode(self, x, train=True):
return F.dropout(self.activation(self.l1(x)), train=train)
def decode(self, x, train=True):
if self.tied:
return self.activation(F.linear(x, F.transpose(self.l1.W),
self.decoder_bias))
else:
return self.activation(self.l2(x))
class StackedAutoEncoder(chainer.ChainList):
def __init__(self, autoencoders):
super(StackedAutoEncoder, self).__init__()
for ae in autoencoders:
self.add_link(ae)
def __call__(self, x, train=True, depth=0):
if depth == 0: depth = len(self)
h = x
for i in range(depth):
h = self[i].encode(h, train=train)
for i in range(depth):
if i == depth-1: # do not use dropout in the output layer
train = False
h = self[depth-1-i].decode(h, train=train)
return h
def encode(self, x, train=True, depth=0):
if depth == 0: depth = len(self)
h = x
for i in range(depth):
h = self[i].encode(h, train=train)
return h
def decode(self, x, train=True, depth=0):
if depth == 0: depth = len(self)
h = x
for i in range(depth):
if i == depth-1: # do not use dropout in the output layer
train = False
h = self[depth-1-i].decode(h, train=train)
return h
class Regression(chainer.Chain):
def __init__(self, predictor):
super(Regression, self).__init__(predictor=predictor)
def __call__(self, x, t):
y = self.predictor(x, True)
self.loss = F.mean_squared_error(y, t)
return self.loss
def dump(self, x):
return self.predictor(x, False)
initmodel = ''resume = ''
gpu = -1
epoch_pre = 20
epoch_fine = 20
batchsize = 100
noise = 0
optimizer = 'adam'
learningrate = 0.01
alpha = 0.001
unit = '1000, 500, 250, 2'
activation = 'sigmoid'
untied = False
batchsize = batchsize
n_epoch = epoch_pre
n_epoch_fine = epoch_fine
n_units = list(map(int, unit.split(',')))
activation = activation
mnist = fetch_mldata('MNIST original', data_home='.')
perm = np.random.permutation(len(mnist.data))
mnist.data = mnist.data.astype(np.float32) / 255
train_data = mnist.data[perm][:60000]
test_data = mnist.data[perm][60000:]
# prepare layers
aes = []
for idx in range(len(n_units)):
n_in = n_units[idx-1] if idx > 0 else 28*28
n_out = n_units[idx]
ae = AutoEncoder(n_in, n_out, activation, tied = True)
aes.append(ae)
# prepare train data for next layer
x = chainer.Variable(np.array(train_data))
train_data_for_next_layer = cuda.to_cpu(ae.encode(x, train=False))
The InvalidType error indicates that the input shape of the array given to F.linear is wrong.
Expect: x.shape[1] == W.shape[1]
Actual: 784 != 250
In this case, for the given input x and W, F.linear expects that
x.shape[1] is the same as W.shape[1], but it does not.
For more detailed description of the error message, see https://docs.chainer.org/en/stable/tips.html#how-do-i-fix-invalidtype-error to understand how to interpret that error message.

BNN with regression using Pymc3

I'm trying to build BNN in a regression task, and I get a result that seems not true.
My code
First, build toy data
#Toy model
def build_toy_dataset(N=50, noise_std=0.2):
x = np.linspace(-3, 3, num=N)
y = np.cos(x) + np.random.normal(0, noise_std, size=N)
x = x.reshape((N, 1))
x = scale(x)
x = x.astype(floatX)
y = y.astype(floatX)
return x, y
N = 50 # number of data points
D = 1 # number of features
X_train, Y_train = build_toy_dataset(N)
X_test, Y_test = build_toy_dataset(N)
fig, ax = plt.subplots()
ax.plot(X_test,Y_test,'ro',X_train,Y_train,'bx',alpha=0.2)
ax.legend(['Y_test','Y_train'])
ax.set(xlabel='X', ylabel='Y', title='Toy Regression data set');
X = scale(X)
X = X.astype(floatX)
Y = Y.astype(floatX)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.5)
Then, define BNN with output
#2 layers with 5 nodes each
def construct_nn_2Layers(ann_input, ann_output):
n_hidden = 5
n_features = ann_input.get_value().shape[1]
# Initialize random weights between each layer
init_1 = np.random.randn(n_features, n_hidden).astype(floatX)
init_2 = np.random.randn(n_hidden, n_hidden).astype(floatX)
init_out = np.random.randn(n_hidden).astype(floatX)
# Initialize random biases in each layer
init_b_1 = np.random.randn(n_hidden).astype(floatX)
init_b_2 = np.random.randn(n_hidden).astype(floatX)
init_b_out = np.random.randn(1).astype(floatX)
with pm.Model() as neural_network:
# Weights from input to hidden layer
weights_in_1 = pm.Normal('w_in_1', 0, sd=1,
shape=(n_features, n_hidden),
testval=init_1)
bias_1 = pm.Normal('b_1', mu=0, sd=1, shape=(n_hidden), testval=init_b_1)
# Weights from 1st to 2nd layer
weights_1_2 = pm.Normal('w_1_2', 0, sd=1,
shape=(n_hidden, n_hidden),
testval=init_2)
bias_2 = pm.Normal('b_2', mu=0, sd=1, shape=(n_hidden), testval=init_b_2)
# Weights from hidden layer to output
weights_2_out = pm.Normal('w_2_out', 0, sd=1,
shape=(n_hidden,),
testval=init_out)
bias_out = pm.Normal('b_out', mu=0, sd=1, shape=(1), testval=init_b_out)
# Build neural-network using tanh activation function
act_1 = pm.math.tanh(pm.math.dot(ann_input,
weights_in_1)+bias_1)
act_2 = pm.math.tanh(pm.math.dot(act_1,
weights_1_2)+bias_2)
act_out = pm.math.dot(act_2, weights_2_out)+bias_out
sd = pm.HalfNormal('sd', sd=1)
out = pm.Normal('out', mu=act_out, sd=sd, observed=ann_output)
return neural_network
Then construct:
ann_input = theano.shared(X_train)
ann_output = theano.shared(Y_train)
neural_network = construct_nn_2Layers(ann_input, ann_output)
run ADVI:
with neural_network:
inference_no_s = pm.ADVI()
# Checking convergence - Tracking parameters
tracker = pm.callbacks.Tracker(
mean=inference_no_s.approx.mean.eval, # callable that returns mean
std=inference_no_s.approx.std.eval # callable that returns std
)
approx_no_s = pm.fit(n=30000, method=inference_no_s, callbacks=[tracker])
Predict in test:
ann_input.set_value(X_test)
ann_output.set_value(Y_test)
with neural_network:
ppc = pm.sample_posterior_predictive(trace, samples=500, progressbar=False)
and this is what I get which seems not relevant. What am I doing wrong?

Initialising weights and bias with PyTorch - how to correct dimensions?

Using this model I'm attempting to initialise my network with my predefined weights and bias :
dimensions_input = 10
hidden_layer_nodes = 5
output_dimension = 10
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = torch.nn.Linear(dimensions_input,hidden_layer_nodes)
self.linear2 = torch.nn.Linear(hidden_layer_nodes,output_dimension)
self.linear.weight = torch.nn.Parameter(torch.zeros(dimensions_input,hidden_layer_nodes))
self.linear.bias = torch.nn.Parameter(torch.ones(hidden_layer_nodes))
self.linear2.weight = torch.nn.Parameter(torch.zeros(dimensions_input,hidden_layer_nodes))
self.linear2.bias = torch.nn.Parameter(torch.ones(hidden_layer_nodes))
def forward(self, x):
l_out1 = self.linear(x)
y_pred = self.linear2(l_out1)
return y_pred
model = Model()
criterion = torch.nn.MSELoss(size_average = False)
optim = torch.optim.SGD(model.parameters(), lr = 0.00001)
def train_model():
y_data = x_data.clone()
for i in range(10000):
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
if i % 5000 == 0:
print(loss)
optim.zero_grad()
loss.backward()
optim.step()
RuntimeError:
The expanded size of the tensor (10) must match the existing size (5)
at non-singleton dimension 1
My dimensions appear correct as they match the corresponding linear layers ?
The code provided doesn't run due to the fact that x_data isn't defined, so I can't be sure that this is the issue, but one thing that strikes me is that you should replace
self.linear2.weight = torch.nn.Parameter(torch.zeros(dimensions_input,hidden_layer_nodes))
self.linear2.bias = torch.nn.Parameter(torch.ones(hidden_layer_nodes))
with
self.linear2.weight = torch.nn.Parameter(torch.zeros(hidden_layer_nodes, output_dimension))
self.linear2.bias = torch.nn.Parameter(torch.ones(output_dimension))