animated boundary visualization of perceptron - neural-network

i buit from scratch the perceptron class in python, and now i'm trying to make the animated visualization of the decision boundary in every iteration of the learning process.The problem is that my code doesn't works, cuz looks like the "animation_func" is not called, i don't know why. can you help me.
class Perceptron:
def __init__(self,X=None, y=None,lr=0.001, niter=1000):
self.lr = lr
self.niter = niter
self.w = None
self.b = None
def fit(self,X,y):
for indice,X_i in enumerate(X):
self.w += self.lr*(y_[indice]-self.predic(X_i))* X_i
def animar_perceptron(self,X,y,niter):
samples,features = X.shape
self.w = np.zeros(features)
self.b = 0
y_ = np.array([1 if i>0 else 0 for i in y])
x0_1 = np.amin(X[:,0])
x0_2 = np.amax(X[:,0])
ymin = np.amin(X[:,1])
ymax= np.amax(X[:1])
fig, ax = plt.subplots()
ax.scatter(X[:,0], X[:,1],marker='o', c=y )
boundary, =ax.plot([X0_1, X0_2],[0, 0],'k')
def animation_func(_):
self.fit(X,y)
x1_1 = (-self.w[0]*x0_1-self.b )/self.w[1]
x1_2 = (-self.w[0]*x0_2-self.b )/self.w[1]
boundary.set_ydata([x1_1,x1_2])
return boundary,
return FuncAnimation(fig, func=animation_func, frames=np.arange(1,niter), interval=50)
def predic(self, X):
y_hat = np.dot(X,self.w) + self.b
y_hat =self.activate_fun(y_hat)
return y_hat
def activate_fun(self,z):
return np.where(z>0, 1,0)
def score(self, y_true, y_pred):
accuracy = np.sum(y_true == y_pred)/len(y_true)
return accuracy
p = Perceptron()
animacion = animar_perceptron(X,y,1000)

Related

Global fit coupled odes system lmfit

I'm trying to get the global fit of multiple set of data considering a system of 4 coupled ODEs.
I have the working code that solves the system of 4 coupled ODEs for a single set of data, and I have the working code that do the global fit with an arbitrary function (but not using odeint).
My problem is that I'm not able to merge the two codes...
Code for coupled ODEs
t =
data=
def gauss(x, amp, sigma, center):
"""Gaussian lineshape."""
return amp * np.exp(-(x-center)**2 / (2.*sigma**2))
def f(xs, t, ps):
"""Lotka-Volterra predator-prey model."""
try:
amp = ps['amp'].value
center = ps['center'].value
sigma = ps['sigma'].value
T1 = ps['T1'].value
Teq = ps['Teq'].value
except Exception:
amp, center, sigma, T1, Teq = ps
s0,s1,s2,s3 = xs
return [- gauss(t,amp,sigma,center) * (s0-s1),\
gauss(t,amp,sigma,center) * (s0-s1) - s1/T1,\
(s1/T1 - s2/Teq),\
(s2/Teq)]
def g(t, x0, ps):
x = odeint(f, x0, t, args=(ps,))
return x
def residual(ps, ts, data):
x0 = ps['s0'].value, ps['s1'].value, ps['s2'].value, ps['s3'].value
b = ps['b'].value
model = (((g(ts, x0, ps)[:,0]-g(ts, x0, ps)[:,1]+g(ts, x0, ps)[:,2]+b*g(ts, x0, ps)[:,3]))**2)/((g(ts, x0, ps)[0,0]))**2
return (model - data).ravel()
# set parameters incluing bounds
params = Parameters()
params.add('s0', value=1, vary=False)
params.add('s1', value=0, vary=False)
params.add('s2', value=0, vary=False)
params.add('s3', value=0, vary=False)
params.add('amp', value=0.02)
params.add('center', value=5)
params.add('sigma', value=0.1)
params.add('T1', value=0.3)
params.add('Teq', value=0.7)
params.add('b', value=-1)
# fit model and find predicted values
result = minimize(residual, params, args=(t, data), method='leastsq')
final = data + result.residual.reshape(data.shape)
Considering the code here: https://lmfit.github.io/lmfit-py/examples/example_fit_multi_datasets.html
I've tried to do by myself the code for global fit in this case
def gauss(x, amp, sigma, center):
"""Gaussian lineshape."""
return amp * np.exp(-(x-center)**2 / (2.*sigma**2))
def f(xs, t, ps):
"""Lotka-Volterra predator-prey model."""
try:
amp = ps['amp'].value
center = ps['center'].value
sigma = ps['sigma'].value
T1 = ps['T1'].value
Teq = ps['Teq'].value
except Exception:
amp, center, sigma, T1, Teq = ps
s0,s1,s2,s3 = xs
return [- gauss(t,amp,sigma,center) * (s0-s1),\
gauss(t,amp,sigma,center) * (s0-s1) - s1/T1,\
(s1/T1 - s2/Teq),\
(s2/Teq)]
def g(t, x0, params):
"""
Solution to the ODE x'(t) = f(t,x,k) with initial condition x(0) = x0
"""
x = odeint(f, x0, t, args=(params,))
return x
def testmodel(params, ts, data):
x0 = params['s0'].value, params['s1'].value, params['s2'].value, params['s3'].value
b = params['b'].value
model = (((g(ts, x0, params)[:,0]-g(ts, x0, params)[:,1]+g(ts, x0, params)[:,2]+b*g(ts, x0, params)[:,3]))**2)/((g(ts, x0, params)[0,0]))**2
return model
def testmodel_dataset(params, i, x):
"""Calculate Gaussian lineshape from parameters for data set."""
x0 = params[f's0_{i+1}'], params[f's1_{i+1}'], params[f's2_{i+1}'], params[f's3_{i+1}']
amp = params[f'amp_{i+1}']
center = params[f'center_{i+1}']
sigma = params[f'sigma_{i+1}']
T1 = params[f'T1_{i+1}']
Teq = params[f'Teq_{i+1}']
b = params[f'b_{i+1}']
return testmodel(params, x, data)
def objective(params, x, data):
"""Calculate total residual for fits of Gaussians to several data sets."""
ndata, _ = data.shape
resid = 0.0*data[:]
# make residual per data set
for i in range(ndata):
resid[i, :] = data[i, :] - testmodel_dataset(params, i, x)
# now flatten this to a 1D array, as minimize() needs
return resid.flatten()
fit_params = Parameters()
for iy, y in enumerate(data):
fit_params.add(f's0_{iy+1}', value=1)
fit_params.add(f's1_{iy+1}', value=0)
fit_params.add(f's2_{iy+1}', value=0)
fit_params.add(f's3_{iy+1}', value=0)
fit_params.add(f'amp_{iy+1}', value=0.5)
fit_params.add(f'center_{iy+1}', value=0.5)
fit_params.add(f'sigma_{iy+1}', value=0.5)
fit_params.add(f'T1_{iy+1}', value=0.5)
fit_params.add(f'Teq_{iy+1}', value=0.4)
fit_params.add(f'b_{iy+1}', value=0.3)
for iy in (2, 3, 4, 5, 6):
fit_params[f'sigma_{iy}'].expr = 'sigma_1'
out = minimize(objective, fit_params, args=(x, data))
report_fit(out.params)
Result -> KeyError: 's0'
There is a problem with x0 and s0,s1,s2,s3, population of the four states.
I'm sorry if the question may be very naive...
Thank you for your help.

pyTorch mat1 and mat2 cannot be multiplied

I am getting the following error:
RuntimeError: mat1 and mat2 shapes cannot be multiplied (32x33856 and 640000x256)
I don't understand how do I need to change the parameters of my net. I took the net created in this paper and tried to modify the parameters to meet my needs.This is the code, I changed the parameters of the first convolution but still get the error:
class ChordClassificationNetwork(nn.Module):
def __init__(self, train_model=False):
super(ChordClassificationNetwork, self).__init__()
self.train_model = train_model
self.flatten = nn.Flatten()
self.firstConv = nn.Conv2d(3, 64, (3, 3))
self.secondConv = nn.Conv2d(64, 64, (3, 3))
self.pool = nn.MaxPool2d(2)
self.drop = nn.Dropout(0.25)
self.fc1 = nn.Linear(100*100*64, 256)
self.fc2 = nn.Linear(256, 256)
self.outLayer = nn.Linear(256, 7)
def forward(self, x):
x = self.firstConv(x)
x = F.relu(x)
x = self.pool(x)
x = self.secondConv(x)
x = F.relu(x)
x = self.pool(x)
x = self.drop(x)
x = self.flatten(x)
x = self.fc1(x)
x = F.relu(x)
x = self.drop(x)
x = self.fc2(x)
x = F.relu(x)
x = self.drop(x)
x = self.outLayer(x)
output = F.softmax(x, dim=1)
return output
and this is the training file:
device = ("cuda" if torch.cuda.is_available() else "cpu")
transformations = transforms.Compose([
transforms.Resize((100, 100))
])
num_epochs = 10
learning_rate = 0.001
train_CNN = False
batch_size = 32
shuffle = True
pin_memory = True
num_workers = 1
dataset = GuitarDataset("../chords_data/cropped_images/train", transform=transformations)
train_set, validation_set = torch.utils.data.random_split(dataset, [int(0.8 * len(dataset)), len(dataset) - int(0.8*len(dataset))])
train_loader = DataLoader(dataset=train_set, shuffle=shuffle, batch_size=batch_size, num_workers=num_workers,
pin_memory=pin_memory)
validation_loader = DataLoader(dataset=validation_set, shuffle=shuffle, batch_size=batch_size, num_workers=num_workers,
pin_memory=pin_memory)
model = ChordClassificationNetwork().to(device)
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
def check_accuracy(loader, model):
if loader == train_loader:
print("Checking accuracy on training data")
else:
print("Checking accuracy on validation data")
num_correct = 0
num_samples = 0
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device=device)
y = y.to(device=device)
scores = model(x)
predictions = torch.tensor([1.0 if i >= 0.5 else 0.0 for i in scores]).to(device)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
print(
f"Got {num_correct} / {num_samples} with accuracy {float(num_correct) / float(num_samples) * 100:.2f}"
)
return f"{float(num_correct) / float(num_samples) * 100:.2f}"
def train():
model.train()
for epoch in range(num_epochs):
loop = tqdm(train_loader, total=len(train_loader), leave=True)
if epoch % 2 == 0:
loop.set_postfix(val_acc=check_accuracy(validation_loader, model))
for imgs, labels in loop:
imgs = imgs.to(device)
labels = labels.to(device)
outputs = model(imgs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loop.set_description(f"Epoch [{epoch}/{num_epochs}]")
loop.set_postfix(loss=loss.item())
if __name__ == "__main__":
train()
What am I doing wrong?
Look at the error message, the issue comes from the fc1 layer which doesn't have the required number of neurons. It is receiving a tensor of shape (batch_size, 33856) but expects (batch_size, 640000). The reduction in dimensionality is caused by the different layers you have applied to your input tensor before fc1.
You can fix this by defining fc1 with:
self.fc1 = nn.Linear(33856, 256)
Alternatively, you can use nn.LazyLinear which will initialize its weights with the appropriate number of neurons at runtime depending on the input it receives. But that's lazy:
self.fc1 = nn.LazyLinear(256)

Neural Network Is Taking 1000 Epochs Just To Solve XOR

My Neural Network takes 1000 epochs just to solve XOR and when I increase number of neurons loss increases and it doesnt work.
Also before when I was using np.random.rand it did'nt work with a learning rate less than 1.
import numpy as np
input = np.array([[0, 1] ,[1, 0], [1, 1], [0, 0]])
labels = np.array([[1], [1], [0], [0]])
np.random.seed(0)
class NN:
def __init__(self):
self.layers = []
def add(self, layer):
self.layers.append(layer)
def loss(self, layer):
self.loss = layer
def predict(self, input):
output = input
for layer in self.layers:
output = layer.forward(output)
return output
def train(self, input, labels):
prediction = self.predict(input)
loss = self.loss.forward(prediction, labels)
gradient = self.loss.back() * 0.1
for layer in self.layers[::-1]:
gradient = layer.back(gradient)
return loss
Dense Layer
class Dense:
def __init__(self, input_size, output_size):
self.weights = np.random.randn(input_size, output_size) - 0.5
self.bias = np.random.randn(1, output_size) - 0.5
def forward(self, input):
self.input = input
output = np.dot(input, self.weights) + self.bias
return output
def back(self, gradient):
gradientW = np.dot(self.input.T, gradient)
self.weights -= gradientW
self.bias -= np.mean(gradient)
return np.dot(gradient, self.weights.T)
Sigmoid
class Sigmoid:
def forward(self, input):
output = 1 / (1+np.exp(-input))
self.output = output
return output
def back(self, gradient):
output = self.output * (1 - self.output)
gradient *= output
return gradient
I am using Mean Squaresd Error
class MSE:
def forward(self, input, labels):
self.input = input
self.labels = labels
return np.mean((labels - input)**2)
def back(self):
output = 2 * (self.input - self.labels)
return output
Calling the Class
N = NN()
N.add(Dense(2, 10))
N.add(Sigmoid())
N.add(Dense(10, 1))
N.add(Sigmoid())
N.loss(MSE())
Training
for i in range(1000):
loss = (N.train(input, labels))
if i%99==0:
print(loss)
print(N.predict(input))
Edit: I Tried this and now it works with 100 epochs. I needed an additional layer to increase neurons why? I tried to train it on AND but it doesnt work.
N = NN()
N.add(Dense(2, 50))
N.add(Sigmoid())
N.add(Dense(50, 1000))
N.add(Sigmoid())
N.add(Dense(1000, 50))
N.add(Sigmoid())
N.add(Dense(50, 1))
N.add(Sigmoid())
N.loss(MSE())

InvalidType: Invalid operation is performed

I am trying to write a stacked autoencoder. Since this a stacked autoencoder we need to train the first autoencoder and pass the weights to the second autoencoder. So during training we need to define train_data_for_next_layer. Here I am getting error:
InvalidType:
Invalid operation is performed in: LinearFunction (Forward)
Expect: x.shape[1] == W.shape[1]
Actual: 784 != 250
I am having issue with the last line. Is this problem due to incorrect model layer, I want to know what is the issue here. I have faced this problem several times before and any detailed explanation is welcome. The code is as follows:
class AutoEncoder(chainer.Chain):
def __init__(self, n_in, n_out, activation='relu', tied=True):
if tied:
super(AutoEncoder, self).__init__(
l1 = L.Linear(n_in, n_out)
)
self.add_param('decoder_bias', n_in)
self.decoder_bias.data[...] = 0
else:
super(AutoEncoder, self).__init__(
l1 = L.Linear(n_in, n_out),
l2 = L.Linear(n_out, n_in)
)
self.tied = tied
self.n_in = n_in
self.n_out = n_out
self.activation = {'relu': F.relu, 'sigmoid': F.sigmoid,
'identity': F.identity}[activation]
def __call__(self, x, train=True):
h1 = F.dropout(self.activation(self.l1(x)), train=train)
if self.tied:
return self.activation(F.linear(h1, F.transpose(self.l1.W),
self.decoder_bias))
else:
return self.activation(self.l2(h1))
def encode(self, x, train=True):
return F.dropout(self.activation(self.l1(x)), train=train)
def decode(self, x, train=True):
if self.tied:
return self.activation(F.linear(x, F.transpose(self.l1.W),
self.decoder_bias))
else:
return self.activation(self.l2(x))
class StackedAutoEncoder(chainer.ChainList):
def __init__(self, autoencoders):
super(StackedAutoEncoder, self).__init__()
for ae in autoencoders:
self.add_link(ae)
def __call__(self, x, train=True, depth=0):
if depth == 0: depth = len(self)
h = x
for i in range(depth):
h = self[i].encode(h, train=train)
for i in range(depth):
if i == depth-1: # do not use dropout in the output layer
train = False
h = self[depth-1-i].decode(h, train=train)
return h
def encode(self, x, train=True, depth=0):
if depth == 0: depth = len(self)
h = x
for i in range(depth):
h = self[i].encode(h, train=train)
return h
def decode(self, x, train=True, depth=0):
if depth == 0: depth = len(self)
h = x
for i in range(depth):
if i == depth-1: # do not use dropout in the output layer
train = False
h = self[depth-1-i].decode(h, train=train)
return h
class Regression(chainer.Chain):
def __init__(self, predictor):
super(Regression, self).__init__(predictor=predictor)
def __call__(self, x, t):
y = self.predictor(x, True)
self.loss = F.mean_squared_error(y, t)
return self.loss
def dump(self, x):
return self.predictor(x, False)
initmodel = ''resume = ''
gpu = -1
epoch_pre = 20
epoch_fine = 20
batchsize = 100
noise = 0
optimizer = 'adam'
learningrate = 0.01
alpha = 0.001
unit = '1000, 500, 250, 2'
activation = 'sigmoid'
untied = False
batchsize = batchsize
n_epoch = epoch_pre
n_epoch_fine = epoch_fine
n_units = list(map(int, unit.split(',')))
activation = activation
mnist = fetch_mldata('MNIST original', data_home='.')
perm = np.random.permutation(len(mnist.data))
mnist.data = mnist.data.astype(np.float32) / 255
train_data = mnist.data[perm][:60000]
test_data = mnist.data[perm][60000:]
# prepare layers
aes = []
for idx in range(len(n_units)):
n_in = n_units[idx-1] if idx > 0 else 28*28
n_out = n_units[idx]
ae = AutoEncoder(n_in, n_out, activation, tied = True)
aes.append(ae)
# prepare train data for next layer
x = chainer.Variable(np.array(train_data))
train_data_for_next_layer = cuda.to_cpu(ae.encode(x, train=False))
The InvalidType error indicates that the input shape of the array given to F.linear is wrong.
Expect: x.shape[1] == W.shape[1]
Actual: 784 != 250
In this case, for the given input x and W, F.linear expects that
x.shape[1] is the same as W.shape[1], but it does not.
For more detailed description of the error message, see https://docs.chainer.org/en/stable/tips.html#how-do-i-fix-invalidtype-error to understand how to interpret that error message.

Initialising weights and bias with PyTorch - how to correct dimensions?

Using this model I'm attempting to initialise my network with my predefined weights and bias :
dimensions_input = 10
hidden_layer_nodes = 5
output_dimension = 10
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = torch.nn.Linear(dimensions_input,hidden_layer_nodes)
self.linear2 = torch.nn.Linear(hidden_layer_nodes,output_dimension)
self.linear.weight = torch.nn.Parameter(torch.zeros(dimensions_input,hidden_layer_nodes))
self.linear.bias = torch.nn.Parameter(torch.ones(hidden_layer_nodes))
self.linear2.weight = torch.nn.Parameter(torch.zeros(dimensions_input,hidden_layer_nodes))
self.linear2.bias = torch.nn.Parameter(torch.ones(hidden_layer_nodes))
def forward(self, x):
l_out1 = self.linear(x)
y_pred = self.linear2(l_out1)
return y_pred
model = Model()
criterion = torch.nn.MSELoss(size_average = False)
optim = torch.optim.SGD(model.parameters(), lr = 0.00001)
def train_model():
y_data = x_data.clone()
for i in range(10000):
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
if i % 5000 == 0:
print(loss)
optim.zero_grad()
loss.backward()
optim.step()
RuntimeError:
The expanded size of the tensor (10) must match the existing size (5)
at non-singleton dimension 1
My dimensions appear correct as they match the corresponding linear layers ?
The code provided doesn't run due to the fact that x_data isn't defined, so I can't be sure that this is the issue, but one thing that strikes me is that you should replace
self.linear2.weight = torch.nn.Parameter(torch.zeros(dimensions_input,hidden_layer_nodes))
self.linear2.bias = torch.nn.Parameter(torch.ones(hidden_layer_nodes))
with
self.linear2.weight = torch.nn.Parameter(torch.zeros(hidden_layer_nodes, output_dimension))
self.linear2.bias = torch.nn.Parameter(torch.ones(output_dimension))