this is my first time creating a FFN to train it to translate French to English using word prediction:
Input are two arrays of size 2 x window_size + 1 from source language and window_size target language. And the label of size 1
For e.g for window_size = 2:
["je","mange", "la", "pomme","avec"]
and
["I", "eat"]
So the input of size [5] and [2] after concatenating => 7
Label: "the" (refering to "la" in French)
The label is changed to one-hot-encoding before comparing with yHat
I'm using unique index for each word ( 1 to len(vocab) ) and train using the index (not the words)
The output of the FFN is a probability of the size of the vocab of the target language
The problem is that the FFN doesn't learn and the accuracy stays at 0.
When I print the size of y_final (target probability) and yHat (Model Hypo) they have different dimensions:
yHat.size()=[512, 7, 10212]
with 64 batch_size, 7 is the concatenated input size and 10212 size of target vocab, while
y_final.size()= [512, 10212]
And over all the forward method I have these sizes:
torch.Size([512, 5, 32])
torch.Size([512, 5, 64])
torch.Size([512, 5, 64])
torch.Size([512, 2, 256])
torch.Size([512, 2, 32])
torch.Size([512, 2, 64])
torch.Size([512, 2, 64])
torch.Size([512, 7, 64])
torch.Size([512, 7, 128])
torch.Size([512, 7, 10212])
Since the accuracy augments when yHat = y_final then I thought that it is never the case because they don't even have the same shapes (2D vs 3D). Is this the problem ?
Please refer to the code and if you need any other info please tell me.
The code is working fine, no errors.
trainingData = TensorDataset(encoded_source_windows, encoded_target_windows, encoded_labels)
# print(trainingData)
batchsize = 512
trainingLoader = DataLoader(trainingData, batch_size=batchsize, drop_last=True)
def ffnModel(vocabSize1,vocabSize2, learningRate=0.01):
class ffNetwork(nn.Module):
def __init__(self):
super().__init__()
self.embeds_src = nn.Embedding(vocabSize1, 256)
self.embeds_target = nn.Embedding(vocabSize2, 256)
# input layer
self.inputSource = nn.Linear(256, 32)
self.inputTarget = nn.Linear(256, 32)
# hidden layer 1
self.fc1 = nn.Linear(32, 64)
self.bnormS = nn.BatchNorm1d(5)
self.bnormT = nn.BatchNorm1d(2)
# Layer(s) afer Concatenation:
self.fc2 = nn.Linear(64,128)
self.output = nn.Linear(128, vocabSize2)
self.softmaaax = nn.Softmax(dim=0)
# forward pass
def forward(self, xSource, xTarget):
xSource = self.embeds_src(xSource)
xSource = F.relu(self.inputSource(xSource))
xSource = F.relu(self.fc1(xSource))
xSource = self.bnormS(xSource)
xTarget = self.embeds_target(xTarget)
xTarget = F.relu(self.inputTarget(xTarget))
xTarget = F.relu(self.fc1(xTarget))
xTarget = self.bnormT(xTarget)
xCat = torch.cat((xSource, xTarget), dim=1)#dim=128 or 1 ?
xCat = F.relu(self.fc2(xCat))
print(xCat.size())
xCat = self.softmaaax(self.output(xCat))
return xCat
# creating instance of the class
net = ffNetwork()
# loss function
lossfun = nn.CrossEntropyLoss()
# lossfun = nn.NLLLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=learningRate)
return net, lossfun, optimizer
def trainModel(vocabSize1,vocabSize2, learningRate):
# number of epochs
numepochs = 64
# create a new Model instance
net, lossfun, optimizer = ffnModel(vocabSize1,vocabSize2, learningRate)
# initialize losses
losses = torch.zeros(numepochs)
trainAcc = []
# loop over training data batches
batchAcc = []
batchLoss = []
for epochi in range(numepochs):
#Switching on training mode
net.train()
# loop over training data batches
batchAcc = []
batchLoss = []
for A, B, y in tqdm(trainingLoader):
# forward pass and loss
final_y = []
for i in range(y.size(dim=0)):
yy = [0] * target_vocab_length
yy[y[i]] = 1
final_y.append(yy)
final_y = torch.tensor(final_y)
yHat = net(A, B)
loss = lossfun(yHat, final_y)
################
print("\n yHat.size()")
print(yHat.size())
print("final_y.size()")
print(final_y.size())
# backprop
optimizer.zero_grad()
loss.backward()
optimizer.step()
# loss from this batch
batchLoss.append(loss.item())
print(f'batchLoss: {loss.item()}')
#Accuracy calculator:
matches = torch.argmax(yHat) == final_y # booleans (false/true)
matchesNumeric = matches.float() # convert to numbers (0/1)
accuracyPct = 100 * torch.mean(matchesNumeric) # average and x100
batchAcc.append(accuracyPct) # add to list of accuracies
print(f'accuracyPct: {accuracyPct}')
trainAcc.append(np.mean(batchAcc))
losses[epochi] = np.mean(batchLoss)
return trainAcc,losses,net
trainAcc,losses,net = trainModel(len(source_vocab),len(target_vocab), 0.01)
print(trainAcc)
I have a Bool array, I want some ones to appear in certain places like this:
[1,1,1,1,0,0,0,0,0,0] or [0,0,0,0,0,0,1,1,1,1],here I want four ones appear in array's head,or tail,just two places,how can I do this?
model = cp_model.CpModel()
solver = cp_model.CpSolver()
shifts = {}
ones={}
sequence = []
for i in range(10):
shifts[(i)] = model.NewIntVar(0, 10, "shifts(%i)" % i)
ones[(i)] = model.NewBoolVar( '%i' % i)
for i in range(10):
model.Add(shifts[(i)] ==8).OnlyEnforceIf(ones[(i)])
model.Add(shifts[(i)] == 0).OnlyEnforceIf(ones[(i)].Not())
#I want the four 8s in the array to only appear in two positions at the head or tail of the array, and not in other positions.
model.AddBoolAnd([ones[(0)],ones[(1)],ones[(2)],ones[(3)]])# appear in head
#model.AddBoolAnd([ones[(6)],ones[(7)],ones[(8)],ones[(9)]]) #appear in tauk ,error!
model.Add(sum(ones[(i)] for i in range(10)) == 4)
status = solver.Solve(model)
print("status:",status)
res=[]
for i in range(10):
res.append(solver.Value(shifts[(i)]))
print(res)
bold
italic
quote
Try AddAllowedAsignments:
model = cp_model.CpModel()
ones = [model.NewBoolVar("") for _ in range(10)]
model.AddAllowedAssignments(
ones, [[1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1]]
)
Edit: #Laurent's suggestion
model = cp_model.CpModel()
ones = [model.NewBoolVar("") for _ in range(10)]
head = model.NewBoolVar("head")
# HEAD
model.AddImplication(head, ones[0])
model.AddImplication(head, ones[1])
model.AddImplication(head, ones[2])
model.AddImplication(head, ones[3])
model.AddImplication(head, ones[4].Not())
model.AddImplication(head, ones[5].Not())
...
# TAIL
model.AddImplication(head.Not(), ones[0].Not())
model.AddImplication(head.Not(), ones[1].Not())
....
model.AddImplication(head.Not(), ones[6])
model.AddImplication(head.Not(), ones[7])
model.AddImplication(head.Not(), ones[8])
model.AddImplication(head.Not(), ones[9])
I don't know how to understand the origin in vtkImageData. The document says that the origin is the coordinate of (0,0,0) in image. However, I use the vtkImageReslice to get two resliced image, and the origins are different but the images are the same. My code is:
from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk
import vtk
import numpy as np
def vtkToNumpy(data):
temp = vtk_to_numpy(data.GetPointData().GetScalars())
dims = data.GetDimensions()
numpy_data = temp.reshape(dims[2], dims[1], dims[0])
numpy_data = numpy_data.transpose(2,1,0)
return numpy_data
def numpyToVTK(data):
flat_data_array = data.transpose(2,1,0).flatten()
vtk_data_array = numpy_to_vtk(flat_data_array)
vtk_data = numpy_to_vtk(num_array=vtk_data_array, deep=True, array_type=vtk.VTK_FLOAT)
img = vtk.vtkImageData()
img.GetPointData().SetScalars(vtk_data)
img.SetDimensions(data.shape)
return img
img = np.zeros(shape=[512,512,120])
img[0:300,0:100,:] = 1
vtkImg = numpyToVTK(img)
reslice = vtk.vtkImageReslice()
reslice.SetInputData(vtkImg)
reslice.SetAutoCropOutput(True)
reslice.SetOutputDimensionality(2)
reslice.SetInterpolationModeToCubic()
reslice.SetSlabNumberOfSlices(1)
reslice.SetOutputSpacing(1.0,1.0,1.0)
axialElement = [
1, 0, 0, 256,
0, 1, 0, 100,
0, 0, 1, 100,
0, 0, 0, 1
]
resliceAxes = vtk.vtkMatrix4x4()
resliceAxes.DeepCopy(axialElement)
reslice.SetResliceAxes(resliceAxes)
reslice.Update()
reslicedImg = reslice.GetOutput()
print('case 1', reslicedImg.GetOrigin())
reslicedNpImg = vtkToNumpy(reslicedImg)
import matplotlib.pyplot as plt
plt.figure()
plt.imshow(reslicedNpImg[:,:,0])
plt.show()
For another axialElement:
axialElement = [
1, 0, 0, 356,
0, 1, 0, 100,
0, 0, 1, 100,
0, 0, 0, 1
]
The two different axialElement would generate the same image, but the origin of image are different. So, I am confused about the origin in vtkImageData.
When you read in the image - you are using the numpy to vtk functions that read and create a vtkImageData object. Do these functions set the origins to be what you expect?
img.SetOrigin() needs to be called and fed the expected origins. Otherwise Origin will be set to a default value (the same for your two images). Printing out the imagedata and seeing if the spacing, origin, direction are what you expect is important.
Currently I stumbled across variational autoencoders and tried to make them work on MNIST using keras. I found a tutorial on github.
My question concerns the following lines of code:
# Build model
vae = Model(x, x_decoded_mean)
# Calculate custom loss
xent_loss = original_dim * metrics.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
vae_loss = K.mean(xent_loss + kl_loss)
# Compile
vae.add_loss(vae_loss)
vae.compile(optimizer='rmsprop')
Why is add_loss used instead of specifying it as compile option? Something like vae.compile(optimizer='rmsprop', loss=vae_loss) does not seem to work and throws the following error:
ValueError: The model cannot be compiled because it has no loss to optimize.
What is the difference between this function and a custom loss function, that I can add as an argument for Model.fit()?
Thanks in advance!
P.S.: I know there are several issues concerning this on github, but most of them were open and uncommented. If this has been resolved already, please share the link!
Edit 1
I removed the line which adds the loss to the model and used the loss argument of the compile function. It looks like this now:
# Build model
vae = Model(x, x_decoded_mean)
# Calculate custom loss
xent_loss = original_dim * metrics.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
vae_loss = K.mean(xent_loss + kl_loss)
# Compile
vae.compile(optimizer='rmsprop', loss=vae_loss)
This throws an TypeError:
TypeError: Using a 'tf.Tensor' as a Python 'bool' is not allowed. Use 'if t is not None:' instead of 'if t:' to test if a tensor is defined, and use TensorFlow ops such as tf.cond to execute subgraphs conditioned on the value of a tensor.
Edit 2
Thanks to #MarioZ's efforts, I was able to figure out a workaround for this.
# Build model
vae = Model(x, x_decoded_mean)
# Calculate custom loss in separate function
def vae_loss(x, x_decoded_mean):
xent_loss = original_dim * metrics.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
vae_loss = K.mean(xent_loss + kl_loss)
return vae_loss
# Compile
vae.compile(optimizer='rmsprop', loss=vae_loss)
...
vae.fit(x_train,
x_train, # <-- did not need this previously
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, x_test)) # <-- worked with (x_test, None) before
For some strange reason, I had to explicitly specify y and y_test while fitting the model. Originally, I didn't need to do this. The produced samples seem reasonable to me.
Although I could resolve this, I still don't know what the differences and disadvantages of these two methods are (other than needing a different syntax). Can someone give me more insight?
I'll try to answer the original question of why model.add_loss() is being used instead of specifying a custom loss function to model.compile(loss=...).
All loss functions in Keras always take two parameters y_true and y_pred. Have a look at the definition of the various standard loss functions available in Keras, they all have these two parameters. They are the 'targets' (the Y variable in many textbooks) and the actual output of the model. Most standard loss functions can be written as an expression of these two tensors. But some more complex losses cannot be written in that way. For your VAE example this is the case because the loss function also depends on additional tensors, namely z_log_var and z_mean, which are not available to the loss functions. Using model.add_loss() has no such restriction and allows you to write much more complex losses that depend on many other tensors, but it has the inconvenience of being more dependent on the model, whereas the standard loss functions work with just any model.
(Note: The code proposed in other answers here are somewhat cheating in as much as they just use global variables to sneak in the additional required dependencies. This makes the loss function not a true function in the mathematical sense. I consider this to be much less clean code and I expect it to be more error-prone.)
JIH's answer is right of course but maybe it is useful to add:
model.add_loss() has no restrictions, but it also removes the comfort of using for example targets in the model.fit().
If you have a loss that depends on additional parameters of the model, of other models or external variables, you can still use a Keras type encapsulated loss function by having an encapsulating function where you pass all the additional parameters:
def loss_carrier(extra_param1, extra_param2):
def loss(y_true, y_pred):
#x = complicated math involving extra_param1, extraparam2, y_true, y_pred
#remember to use tensor objects, so for example keras.sum, keras.square, keras.mean
#also remember that if extra_param1, extra_maram2 are variable tensors instead of simple floats,
#you need to have them defined as inputs=(main,extra_param1, extraparam2) in your keras.model instantiation.
#and have them defind as keras.Input or tf.placeholder with the right shape.
return x
return loss
model.compile(optimizer='adam', loss=loss_carrier)
The trick is the last row where you return a function as Keras expects them with just two parameters y_true and y_pred.
Possibly looks more complicated than the model.add_loss version, but the loss stays modular.
I was also wondering about the same query and some related stuff like how to add loss function within the intermediate layers. Here I'm sharing some of the observed information, hope it may help others. It's true that standard keras loss functions only take two arguments, y_true and y_pred. But during the experiment, there can some cases where we need some external parameter or coefficient while computing with these two values (y_true, y_pred). This can be needed at the last layer as usual or somewhere in the middle of the model's layer.
model.add_loss()
The accepted answer correctly said about the model.add_loss() functions. It potentially depends on the layer inputs (tensor). According to the official doc, when writing the call method of a custom layer or a subclassed model, we may want to compute scalar quantities that we want to minimize during training (e.g. regularization losses). We can use the add_loss() layer method to keep track of such loss terms. For instance, activity regularization losses dependent on the inputs passed when calling a layer. Here's an example of a layer that adds a sparsity regularization loss based on the L2 norm of the inputs:
from tensorflow.keras.layers import Layer
class MyActivityRegularizer(Layer):
"""Layer that creates an activity sparsity regularization loss."""
def __init__(self, rate=1e-2):
super(MyActivityRegularizer, self).__init__()
self.rate = rate
def call(self, inputs):
# We use `add_loss` to create a regularization loss
# that depends on the inputs.
self.add_loss(self.rate * tf.reduce_sum(tf.square(inputs)))
return inputs
Loss values added via add_loss can be retrieved in the .losses list property of any Layer or Model (they are recursively retrieved from every underlying layer):
from tensorflow.keras import layers
class SparseMLP(Layer):
"""Stack of Linear layers with a sparsity regularization loss."""
def __init__(self, output_dim):
super(SparseMLP, self).__init__()
self.dense_1 = layers.Dense(32, activation=tf.nn.relu)
self.regularization = MyActivityRegularizer(1e-2)
self.dense_2 = layers.Dense(output_dim)
def call(self, inputs):
x = self.dense_1(inputs)
x = self.regularization(x)
return self.dense_2(x)
mlp = SparseMLP(1)
y = mlp(tf.ones((10, 10)))
print(mlp.losses) # List containing one float32 scalar
Also note, when using model.fit(), such loss terms are handled automatically. When writing a custom training loop, we should retrieve these terms by hand from model.losses, like this:
loss_fn = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam()
# Iterate over the batches of a dataset.
for x, y in dataset:
with tf.GradientTape() as tape:
# Forward pass.
logits = model(x)
# Loss value for this batch.
loss_value = loss_fn(y, logits)
# Add extra loss terms to the loss value.
loss_value += sum(model.losses) # < ------------- HERE ---------
# Update the weights of the model to minimize the loss value.
gradients = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
Custom losses
With model.add_loss(), (AFAIK), we can use it somewhere in the middle of the network. Here we no longer bound with only two parameters i.e. y_true, y_pred. But what if we also want to impute external parameter or coefficient to the last layer loss functions of the network. Nric answer is correct. But it can also be implemented by subclassing the tf.keras.losses.Loss class by implementing the following two methods:
__init__(self): accept parameters to pass during the call of your loss function
call(self, y_true, y_pred): use the targets (y_true) and the model predictions (y_pred) to compute the model's loss
Here is an example of a custom MSE by subclassing the tf.keras.losses.Loss class. And here we also no longer bound only two parameters i.e. y_ture, y_pred.
class CustomMSE(keras.losses.Loss):
def __init__(self, regularization_factor=0.1, name="custom_mse"):
super().__init__(name=name)
self.regularization_factor = regularization_factor
def call(self, y_true, y_pred):
mse = tf.math.reduce_mean(tf.square(y_true - y_pred))
reg = tf.math.reduce_mean(tf.square(0.5 - y_pred))
return mse + reg * self.regularization_factor
model.compile(optimizer=..., loss=CustomMSE())
Try this:
import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
from scipy import stats
import tensorflow as tf
import seaborn as sns
from pylab import rcParams
from sklearn.model_selection import train_test_split
from keras.models import Model, load_model, Sequential
from keras.layers import Input, Lambda, Dense, Dropout, Layer, Bidirectional, Embedding, Lambda, LSTM, RepeatVector, TimeDistributed, BatchNormalization, Activation, Merge
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras import regularizers
from keras import backend as K
from keras import metrics
from scipy.stats import norm
from keras.utils import to_categorical
from keras import initializers
bias = bias_initializer='zeros'
from keras import objectives
np.random.seed(22)
data1 = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0], dtype='int32')
data2 = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0], dtype='int32')
data3 = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0], dtype='int32')
#train = np.zeros(shape=(992,54))
#test = np.zeros(shape=(921,54))
train = np.zeros(shape=(300,54))
test = np.zeros(shape=(300,54))
for n, i in enumerate(train):
if (n<=100):
train[n] = data1
elif (n>100 and n<=200):
train[n] = data2
elif(n>200):
train[n] = data3
for n, i in enumerate(test):
if (n<=100):
test[n] = data1
elif(n>100 and n<=200):
test[n] = data2
elif(n>200):
test[n] = data3
batch_size = 5
original_dim = train.shape[1]
intermediate_dim45 = 45
intermediate_dim35 = 35
intermediate_dim25 = 25
intermediate_dim15 = 15
intermediate_dim10 = 10
intermediate_dim5 = 5
latent_dim = 3
epochs = 50
epsilon_std = 1.0
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0.,
stddev=epsilon_std)
return z_mean + K.exp(z_log_var / 2) * epsilon
x = Input(shape=(original_dim,), name = 'first_input_mario')
h1 = Dense(intermediate_dim45, activation='relu', name='h1')(x)
hD = Dropout(0.5)(h1)
h2 = Dense(intermediate_dim25, activation='relu', name='h2')(hD)
h3 = Dense(intermediate_dim10, activation='relu', name='h3')(h2)
h = Dense(intermediate_dim5, activation='relu', name='h')(h3) #bilo je relu
h = Dropout(0.1)(h)
z_mean = Dense(latent_dim, activation='relu')(h)
z_log_var = Dense(latent_dim, activation='relu')(h)
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
decoder_h = Dense(latent_dim, activation='relu')
decoder_h1 = Dense(intermediate_dim5, activation='relu')
decoder_h2 = Dense(intermediate_dim10, activation='relu')
decoder_h3 = Dense(intermediate_dim25, activation='relu')
decoder_h4 = Dense(intermediate_dim45, activation='relu')
decoder_mean = Dense(original_dim, activation='sigmoid')
h_decoded = decoder_h(z)
h_decoded1 = decoder_h1(h_decoded)
h_decoded2 = decoder_h2(h_decoded1)
h_decoded3 = decoder_h3(h_decoded2)
h_decoded4 = decoder_h4(h_decoded3)
x_decoded_mean = decoder_mean(h_decoded4)
vae = Model(x, x_decoded_mean)
def vae_loss(x, x_decoded_mean):
xent_loss = objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = -0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var))
loss = xent_loss + kl_loss
return loss
vae.compile(optimizer='rmsprop', loss=vae_loss)
vae.fit(train, train, batch_size = batch_size, epochs=epochs, shuffle=True,
validation_data=(test, test))
vae = Model(x, x_decoded_mean)
encoder = Model(x, z_mean)
decoder_input = Input(shape=(latent_dim,))
_h_decoded = decoder_h (decoder_input)
_h_decoded1 = decoder_h1 (_h_decoded)
_h_decoded2 = decoder_h2 (_h_decoded1)
_h_decoded3 = decoder_h3 (_h_decoded2)
_h_decoded4 = decoder_h4 (_h_decoded3)
_x_decoded_mean = decoder_mean(_h_decoded4)
generator = Model(decoder_input, _x_decoded_mean)
generator.summary()
You need to change the compile row to
vae.compile(optimizer='rmsprop', loss=vae_loss)
I am new to WinBUGS and I could not get the code below to work. The model is syntactically correct (it is a hierarchical logit model with random effects), but when I click load data the error message that appears is expected key word structure. What does that mean? Can anyone please help me work the code below please?
My data set is bigger, but to simplify the question I am only working here with n=2 (number of groups) and k=5 (number of subjects in each group).
model{
for(i in 1:n){
for(j in 1:k){
yij[i,j] ~ dbern(p[i,j])
logit(p[i,j]) <- alpha + beta*xij[i,j] + ui[i]
}
ui[i] ~ dnorm(0,tau)
}
alpha ~ dnorm(0,0.001)
beta ~ dnorm(0,0.001)
tau ~ dunif(0,100)
}
with data:
list(n=2, k=5, yij=structure(.Data=c(1, 1, 1, 1, 1, 0, 0, 0, 0, 0), .Dim=c(2,5)),
xij = c(0.0494063719, -0.078101264, 0.2748560749, 0.1245743393, -2.531242809, .6849338859, 0.5302062384, 0.7828560148, -0.012341452, 0.5128471157),
ui = c(0.5031197054, 0.5031197054, 0.5031197054, 0.5031197054, 0.5031197054, -2.13785637, -2.13785637, 2.13785637, -2.13785637, -2.13785637))
xij looks to be matrix in the BUGS model, but you have at as a vector in the data. This should work:
list(n=2, k=5, yij=structure(.Data=c(1, 1, 1, 1, 1, 0, 0, 0, 0, 0), .Dim=c(2,5)),
xij = structure(.Data=c(0.0494063719, -0.078101264, 0.2748560749, 0.1245743393, -2.531242809, .6849338859, 0.5302062384, 0.7828560148, -0.012341452, 0.5128471157), .Dim=c(2,5)),
ui = c(0.5031197054, 0.5031197054, 0.5031197054, 0.5031197054, 0.5031197054, -2.13785637, -2.13785637, 2.13785637, -2.13785637, -2.13785637))