How to code a matrix in WinBUGS? - winbugs

I am trying to code the 2X2 matrix sigma with the 4 elements. Not sure how to code in WINBUGS. My goal is to get the posterior p's, their means and variances and create an ellipse region covered by the two posterior p's. Heres my code below:
model{
#likelihood
for(j in 1 : Nf){
p1[j, 1:2 ] ~ dmnorm(gamma[1:2], T[1:2 ,1:2])
for (i in 1:2){
logit(p[j,i]) <- p1[j,i]
Y[j,i] ~ dbin(p[j,i],n)
}
X_mu[j,1]<-p[j,1]-mean(p[,1])
X_mu[j,2]<-p[j,2]-mean(p[,2])
v1<-sd(p[,1])*sd(p[,1])
v2<-sd(p[,2])*sd(p[,2])
v12<-(inprod(X_mu[j,1],X_mu[j,2]))/(sd(p[,1])*sd(p[,2]))
sigma[1,1]<-v1
sigma[1,2]<-v12
sigma[2,1]<-v12
sigma[2,2]<-v2
sigmaInv[1:2, 1:2] <- inverse(sigma[,])
T1[j,1]<-inprod(sigmaInv[1,],X_mu[j,1])
T1[j,2]<-inprod(sigmaInv[2,],X_mu[j,2])
ell[j,1]<-inprod(X_mu[j,1],T1[j,1])
ell[j,2]<-inprod(X_mu[j,2],T1[j,2])
}
#priors
gamma[1:2] ~ dmnorm(mn[1:2],prec[1:2 ,1:2])
expit[1] <- exp(gamma[1])/(1+exp(gamma[1]))
expit[2] <- exp(gamma[2])/(1+exp(gamma[2]))
T[1:2 ,1:2] ~ dwish(R[1:2 ,1:2], 2)
sigma2[1:2, 1:2] <- inverse(T[,])
rho <- sigma2[1,2]/sqrt(sigma2[1,1]*sigma2[2,2])
}
# Data
list(Nf =20, mn=c(-0.69, -1.06), n=60,
prec = structure(.Data = c(.001, 0,
0, .001),.Dim = c(2, 2)),
R = structure(.Data = c(.001, 0,
0, .001),.Dim = c(2, 2)),
Y= structure(.Data=c(32,13,
32,12,
10,4,
28,11,
10,5,
25,10,
4,1,
16,5,
28,10,
21,7,
19,9,
18,12,
31,12,
13,3,
10,4,
18,7,
3,2,
27,5,
8,1,
8,4),.Dim = c(20, 2))

You have to specify each element in turn. You can use the inverse function (rather than solve) to invert the matrix.
model{
sigma[1,1]<-v1
sigma[1,2]<-v12
sigma[2,1]<-v21
sigma[2,2]<-v2
sigmaInv[1:2, 1:2] <- inverse(sigma[,])
}

Related

mutlievel data simulation using simglm: how to i simulate random effects at level 1

I want to add random effects at level 1. Below is the working code with level 2 simulated.
I have two questions i hope folks can help with
How do i get a reasonable estimate of level 2 variance (assuming i have sample data). Can i just square the between person SD on the dv?
how do simulate level 1 variance and how do i determine a reasonable value at level 1.
I've tried: randomeffect = list(int_neighborhood = list(variance = 8, var_level = 2),
weight= list(variance = 8, var_level = 1 ))
but that kicks an error
This code works without level 1
ctrl <- lmeControl(opt='optim');
sim_arguments <- list(
formula = y ~ 1 + weight + age + sex + (1 | neighborhood),
reg_weights = c(4, -0.03, 0.2, 0.33),
fixed = list(weight = list(var_type = 'continuous', mean = 180, sd = 30),
age = list(var_type = 'ordinal', levels = 30:60),
sex = list(var_type = 'factor', levels = c('male', 'female'))),
randomeffect = list(int_neighborhood = list(variance = 8, var_level = 2)),
sample_size = list(level1 = 62, level2 = 60)
)
nested_data <- sim_arguments %>%
simulate_fixed(data = NULL, .) %>%
simulate_randomeffect(sim_arguments) %>%
simulate_error(sim_arguments) %>%
generate_response(sim_arguments)
RandomIntercept <- lme(fixed= y ~1 + weight + age + sex ,
random= ~ 1 | neighborhood,
correlation = corAR1(),
data=nested_data,
control=ctrl,
na.action=na.exclude)
summary(RandomIntercept)
RandomSlope <-lme(fixed= y ~1 + weight + age + sex ,
random= ~ 1 +weight| neighborhood,
correlation = corAR1(),
data=nested_data,
control=ctrl,
na.action=na.exclude)
summary(RandomSlope)
anova(RandomIntercept,RandomSlope)

Using the GPU with Lux and NeuralPDE Julia

I am trying to run a model using the GPU, no problem with the CPU. I think somehow using measured boundary conditions is causing the issue but I am not sure. I am following this example: https://docs.sciml.ai/dev/modules/NeuralPDE/tutorials/gpu/. I am following this example for using measured boundary conditions: https://docs.sciml.ai/dev/modules/MethodOfLines/tutorials/icbc_sampled/
using Random
using NeuralPDE, Lux, CUDA, Random
using Optimization
using OptimizationOptimisers
using NNlib
import ModelingToolkit: Interval
using Interpolations
# Measured Boundary Conditions (Arbitrary For Example)
bc1 = 1.0:1:1001.0 .|> Float32
bc2 = 1.0:1:1001.0 .|> Float32
ic1 = zeros(101) .|> Float32
ic2 = zeros(101) .|> Float32;
# Interpolation Functions Registered as Symbolic
itp1 = interpolate(bc1, BSpline(Cubic(Line(OnGrid()))))
up_cond_1_f(t::Float32) = itp1(t)
#register_symbolic up_cond_1_f(t)
itp2 = interpolate(bc2, BSpline(Cubic(Line(OnGrid()))))
up_cond_2_f(t::Float32) = itp2(t)
#register_symbolic up_cond_2_f(t)
itp3 = interpolate(ic1, BSpline(Cubic(Line(OnGrid()))))
init_cond_1_f(x::Float32) = itp3(x)
#register_symbolic init_cond_1_f(x)
itp4 = interpolate(ic2, BSpline(Cubic(Line(OnGrid()))))
init_cond_2_f(x::Float32) = itp4(x)
#register_symbolic init_cond_2_f(x);
# Parameters and differentials
#parameters t, x
#variables u1(..), u2(..)
Dt = Differential(t)
Dx = Differential(x);
# Arbitrary Equations
eqs = [Dt(u1(t, x)) + Dx(u2(t, x)) ~ 0.,
Dt(u1(t, x)) * u1(t,x) + Dx(u2(t, x)) + 9.81 ~ 0.]
# Boundary Conditions with Measured Data
bcs = [
u1(t,1) ~ up_cond_1_f(t),
u2(t,1) ~ up_cond_2_f(t),
u1(1,x) ~ init_cond_1_f(x),
u2(1,x) ~ init_cond_2_f(x)
]
# Space and time domains
domains = [t ∈ Interval(1.0,1001.0),
x ∈ Interval(1.0,101.0)];
# Neural network
input_ = length(domains)
n = 10
chain = Chain(Dense(input_,n,NNlib.tanh_fast),Dense(n,n,NNlib.tanh_fast),Dense(n,4))
strategy = GridTraining(.25)
ps = Lux.setup(Random.default_rng(), chain)[1]
ps = ps |> Lux.ComponentArray |> gpu .|> Float32
discretization = PhysicsInformedNN(chain,
strategy,
init_params=ps)
# Model Setup
#named pdesystem = PDESystem(eqs,bcs,domains,[t,x],[u1(t, x),u2(t, x)])
prob = discretize(pdesystem,discretization);
sym_prob = symbolic_discretize(pdesystem,discretization);
# Losses and Callbacks
pde_inner_loss_functions = sym_prob.loss_functions.pde_loss_functions
bcs_inner_loss_functions = sym_prob.loss_functions.bc_loss_functions
callback = function (p, l)
println("loss: ", l)
println("pde_losses: ", map(l_ -> l_(p), pde_inner_loss_functions))
println("bcs_losses: ", map(l_ -> l_(p), bcs_inner_loss_functions))
return false
end;
# Train Model (Throws Error)
res = Optimization.solve(prob,Adam(0.01); callback = callback, maxiters=5000)
phi = discretization.phi;
I get the following error:
GPU broadcast resulted in non-concrete element type Union{}.
This probably means that the function you are broadcasting contains an error or type instability.
Please Advise.

Simulating Ogata's Thinning Algorithm in R

I am trying to implement Ogata's Thinning Algorithm exactly as given in Algorithm 3 in https://www.math.fsu.edu/~ychen/research/Thinning%20algorithm.pdf with the parameters they specify to generate Figure 2.
This is the code that replicates it verbatim.
# Simulation of a Univariate Hawkes Poisson with
# Exponential Kernel γ(u) = αe^(−βu), on [0, T].
# Based on: https://www.math.fsu.edu/~ychen/research/Thinning%20algorithm.pdf
library(tidyverse)
# Initialize parameters that remains the same for all realizations
mu <- 1.2
alpha <- 0.6
beta <- 0.8
T <- 10
# Initialize other variables that change through realizations
bigTau <- vector('numeric')
bigTau <- c(bigTau,0)
s <- 0
n <- 0
lambda_vec_accepted <- c(mu)
# Begin loop
while (s < T) {
# -------------------------------
# Compute lambda_bar
# -------------------------------
sum_over_big_Tau <- 0
for (i in c(1:length(bigTau))) {
sum_over_big_Tau <- sum_over_big_Tau + alpha*exp(-beta*(s-bigTau[i]))
}
lambda_bar <- mu + sum_over_big_Tau
u <- runif(1)
# so that w ∼ exponential(λ_bar)
w <- -log(u)/lambda_bar
# so that s is the next candidate point
s <- s+w
# Generate D ∼ uniform(0,1)
D <- runif(1)
# -------------------------------
# Compute lambda_s
# -------------------------------
sum_over_big_Tau <- 0
for (i in c(1:length(bigTau))) {
sum_over_big_Tau <- sum_over_big_Tau + alpha*exp(-beta*(s-bigTau[i]))
}
lambda_s <- mu + sum_over_big_Tau
# -------------------------------
# Accepting with prob. λ_s/λ_bar
# -------------------------------
if (D*lambda_bar <= lambda_s ) {
lambda_vec_accepted <- c(lambda_vec_accepted,lambda_s)
# updating the number of points accepted
n <- n+1
# naming it t_n
t_n <- s
# adding t_n to the ordered set bigTau
bigTau <- c(bigTau,t_n)
}
}
df<-data.frame(x=bigTau,y=lambda_vec_accepted)
ggplot(df) + geom_line(aes(x=bigTau,y=lambda_vec_accepted))
However, the plot I managed to get (running several times) for lambda vs time is something like this and nowhere near what they got in Figure 2 (exponentially decreasing).
I am not sure what is the mistake I am doing. It will be great if anyone can help. This is needed for my research. I am from biology and so please excuse if I am doing something silly. Thanks.

SSP Algorithm minimal subset of length k

Suppose S is a set with t elements modulo n. There are indeed, 2^t subsets of any length. Illustrate a PARI/GP program which finds the smallest subset U (in terms of length) of distinct elements such that the sum of all elements in U is 0 modulo n. It is easy to write a program which searches via brute force, but brute force is infeasible as t and n get larger, so would appreciate help writing a program which doesn't use brute force to solve this instance of the subset sum problem.
Dynamic Approach:
def isSubsetSum(st, n, sm) :
# The value of subset[i][j] will be
# true if there is a subset of
# set[0..j-1] with sum equal to i
subset=[[True] * (sm+1)] * (n+1)
# If sum is 0, then answer is true
for i in range(0, n+1) :
subset[i][0] = True
# If sum is not 0 and set is empty,
# then answer is false
for i in range(1, sm + 1) :
subset[0][i] = False
# Fill the subset table in botton
# up manner
for i in range(1, n+1) :
for j in range(1, sm+1) :
if(j < st[i-1]) :
subset[i][j] = subset[i-1][j]
if (j >= st[i-1]) :
subset[i][j] = subset[i-1][j] or subset[i - 1][j-st[i-1]]
"""uncomment this code to print table
for i in range(0,n+1) :
for j in range(0,sm+1) :
print(subset[i][j],end="")
print(" ")"""
return subset[n][sm];
I got this code from here I don't know weather it seems to work.
function getSummingItems(a,t){
return a.reduce((h,n) => Object.keys(h)
.reduceRight((m,k) => +k+n <= t ? (m[+k+n] = m[+k+n] ? m[+k+n].concat(m[k].map(sa => sa.concat(n)))
: m[k].map(sa => sa.concat(n)),m)
: m, h), {0:[[]]})[t];
}
var arr = Array(20).fill().map((_,i) => i+1), // [1,2,..,20]
tgt = 42,
res = [];
console.time("test");
res = getSummingItems(arr,tgt);
console.timeEnd("test");
console.log("found",res.length,"subsequences summing to",tgt);
console.log(JSON.stringify(res));

different clusters with same method

I am stuck in a problem with hierarchical clustering. I want to make a dendrogram and a heatmap, with a distance method of correlation (d_mydata=dist(1-cor(t(mydata))) and ward.D2 as clustering method.
As a gadget in the package pheatmap you can plot the dendrogram on the left side to visualize the clusters.
The pipeline of my analysis would be this:
create the dendrogram
test how many cluster would be the optimal (k)
extract the subjects in each cluster
create a heatmap
My surprise comes up when the dendrogram plotted in the heatmap is not the same as the one plotted before even when methods are the same.
So I decided to create a pheatmap colouring by the clusters classified before by cutree and test if the colours correspond to the clusters in the dendrogram.
This is my code:
# Create test matrix
test = matrix(rnorm(200), 20, 10)
test[1:10, seq(1, 10, 2)] = test[1:10, seq(1, 10, 2)] + 3
test[11:20, seq(2, 10, 2)] = test[11:20, seq(2, 10, 2)] + 2
test[15:20, seq(2, 10, 2)] = test[15:20, seq(2, 10, 2)] + 4
colnames(test) = paste("Test", 1:10, sep = "")
rownames(test) = paste("Gene", 1:20, sep = "")
test<-as.data.frame(test)
# Create a dendrogram with this test matrix
dist_test<-dist(test)
hc=hclust(dist_test, method="ward.D2")
plot(hc)
dend<-as.dendrogram(hc, check=F, nodePar=list(cex = .000007),leaflab="none", cex.main=3, axes=F, adjust=F)
clus2 <- as.factor(cutree(hc, k=2)) # cut tree into 2 clusters
groups<-data.frame(clus2)
groups$id<-rownames(groups)
#-----------DATAFRAME WITH mydata AND THE CLASSIFICATION OF CLUSTERS AS FACTORS---------------------
test$id<-rownames(test)
clusters<-merge(groups, test, by.x="id")
rownames(clusters)<-clusters$id
clusters$clus2<-as.character(clusters$clus2)
clusters$clus2[clusters$clus2== "1"]= "cluster1"
clusters$clus2[clusters$clus2=="2"]<-"cluster2"
plot(dend,
main = "test",
horiz = TRUE, leaflab = "none")
d_clusters<-dist(1-cor(t(clusters[,7:10])))
hc_cl=hclust(d_clusters, method="ward.D2")
annotation_col = data.frame(
Path = factor(colnames(clusters[3:12]))
)
rownames(annotation_col) = colnames(clusters[3:12])
annotation_row = data.frame(
Group = factor(clusters$clus2)
)
rownames(annotation_row) = rownames(clusters)
# Specify colors
ann_colors = list(
Path= c(Test1="darkseagreen", Test2="lavenderblush2", Test3="lightcyan3", Test4="mediumpurple", Test5="red", Test6="blue", Test7="brown", Test8="pink", Test9="black", Test10="grey"),
Group = c(cluster1="yellow", cluster2="blue")
)
require(RColorBrewer)
library(RColorBrewer)
cols <- colorRampPalette(brewer.pal(10, "RdYlBu"))(20)
library(pheatmap)
pheatmap(clusters[ ,3:12], color = rev(cols),
scale = "column",
kmeans_k = NA,
show_rownames = F, show_colnames = T,
main = "Heatmap CK14, CK5/6, GATA3 and FOXA1 n=492 SCALE",
clustering_method = "ward.D2",
cluster_rows = TRUE, cluster_cols = TRUE,
clustering_distance_rows = "correlation",
clustering_distance_cols = "correlation",
annotation_row = annotation_row,
annotation_col = annotation_col,
annotation_colors=ann_colors
)
anyone with the same issue? Am I making an stupid mistake?
Thank you in advance