show r squared, aic, bic, and deviance for multiple models using gtsummary - gtsummary

looking to have the r squared, aic, bic, and deviance values presented for each of the four models here in the merged output
mod0 <- lm(surv_time ~ Age + Gender + Education + `Standardized MoCA`, data = surv_tbldata_converters)
mod1 <- lm(surv_time ~ Age + Gender + Education + `Global Composite`, data = surv_tbldata_converters)
mod2 <- lm(surv_time ~ Age + Gender + Education + `Standardized MoCA`, data = surv_tbldata)
mod3 <- lm(surv_time ~ Age + Gender + Education + `Global Composite`, data = surv_tbldata)
mod0_tbl <- mod0 %>%
tbl_regression(exponentiate = F) %>%
add_glance_source_note(include = c(r.squared, AIC, BIC, deviance))
mod1_tbl <- mod1 %>%
tbl_regression(exponentiate = F) %>%
add_glance_source_note(include = c(r.squared, AIC, BIC, deviance))
mod2_tbl <- mod2 %>%
tbl_regression(exponentiate = F) %>%
add_glance_source_note(include = c(r.squared, AIC, BIC, deviance))
mod3_tbl <- mod3 %>%
tbl_regression(exponentiate = F) %>%
add_glance_source_note(include = c(r.squared, AIC, BIC, deviance))
# merge tables
tbl_merge(tbls = list(mod2_tbl, mod3_tbl, mod0_tbl, mod1_tbl),
tab_spanner = c("**MoCA Model (Full Sample)**", "**Global Composite Model (Full Sample)**", "**MoCA Model (Converters Only)**", "**Global Composite Model (Converters Only)**")) %>%
modify_header(label = "**Predictor**") %>%
modify_caption("**Table 3. Study One Model Comparisons**") %>%
bold_labels()

The add_glance_source_note() function adds the statistics as a source note, and the table may only have one source note. Use the add_glance_table() function to add the statistics to the bottom of the table, and you'll be able to merge the tables without issue. Example below!
library(gtsummary)
packageVersion("gtsummary")
#> [1] '1.4.1'
lm1 <- lm(age ~ trt + grade, trial)
lm2 <- lm(marker ~ trt + grade, trial)
tbl1 <-
tbl_regression(lm1) %>%
add_glance_table(include = c(r.squared, AIC, BIC, deviance))
tbl2 <-
tbl_regression(lm2) %>%
add_glance_table(include = c(r.squared, AIC, BIC, deviance))
tbl_all <- tbl_merge(list(tbl1, tbl2))
Created on 2021-06-08 by the reprex package (v2.0.0)

Related

split frequency and percentage in tbl_summary in to two separate columns

I would to split the values of N and % into two separate columns ie, N and % columns
library(gtsummary)
trial %>%
select(response, grade) %>%
tbl_summary()
The easiest way to do this is with tbl_merge(), constructing two tables (one with N the other with %). Example below!
library(gtsummary)
#> #BlackLivesMatter
library(tidyverse)
tbl <-
# iterate over these two statistics
c("{n} / {N}", "{p}%") %>%
# build tbl_summary using each of the stats
map(
~trial %>%
select(response, grade) %>%
tbl_summary(
statistic = all_categorical() ~ .x,
missing = "no"
)
) %>%
# merge the two tables together
tbl_merge() %>%
# some formatting to make it cute
modify_spanning_header(everything() ~ NA) %>%
modify_footnote(everything() ~ NA) %>%
modify_header(list(stat_0_1 ~ "**n / N**", stat_0_2 ~ "**percent**"))
Created on 2021-08-06 by the reprex package (v2.0.0)

tbl_summary ( gtsummary) transpose with p-values

I was wondering if there is a way to compose such a table :
library(gtsummary)
data(trial)
add_p_ex1 <-
trial[c("marker", "trt")] %>%
tbl_summary(by = trt) %>%
add_p()
add_p_ex2 <-
trial[c("marker", "death")] %>%
tbl_summary(by = death) %>%
add_p()
as1=add_p_ex1$table_body
as2=add_p_ex2$table_body
write.csv(rbind(as1, as2), file='temp1.csv')
With an output that is transposed version of as1 and as2 like this :
Ideally for K - continuous variable ( eg marker 1 , marker 2, ... maker k) and a P number of categorical variables.
Marker Level (ng/mL) Pvalue
trt
drug A 0.84 (0.24, 1.57) 0.084746992242773
drug B 0.52 (0.19, 1.20)
Death
0 0.73 (0.23, 1.33) 0.605276987642371
1 0.57 (0.20, 1.45)
It's possible to get your table like this, and I've provided a code example below. I am not sure if it's the easiest way to get what you're looking for, however.
library(gtsummary)
library(tidyverse)
# function to transpose a tbl_summary table
gtsummary_transpose <- function(data, con_var, cat_var) {
tbl <- data[c(con_var, cat_var)] %>%
tbl_summary(by = cat_var, missing = "no") %>%
add_p() %>%
modify_header(stat_by = "{level}",
p.value = "p.value",
label = "label") %>%
as_tibble() %>%
select(-label) %>%
pivot_longer(cols = -p.value) %>%
select(name, value, p.value)
# add a header row
bind_rows(
tibble(name = attr(data[[cat_var]], "label") %||% cat_var),
tbl
) %>%
fill(p.value, .direction = "up") %>%
mutate(p.value = ifelse(row_number() == 1, p.value, NA))
}
gtsummary_transpose(trial, "marker", "trt")
# transose multiple tables, and stack them
tibble(variable = c("trt", "grade")) %>%
mutate(
tbl = map(variable, ~gtsummary_transpose(trial, "marker", .x))
) %>%
unnest(cols = tbl) %>%
gt::gt() %>%
gt::cols_hide(vars(variable)) %>%
gt::cols_label(name = "Characteristic", value = "Marker Level", p.value = "p-value") %>%
gt::fmt_missing(columns = everything(), missing_text = "")

Tidypredict does not incorporate Recipes

I fit a model with logistic regression by tidymodels. The function tidypredict_fit extract the formula but I note that it does not incorporate the preprocess in the recipes. How can I extract the model formula with the necessary preprocess?
set.seed(123)
split <- initial_split(lepto, prop = 0.75, strata = 'composite_outcome')
lepto_train <- training(split)
letp_test <- testing(split)
rec2 <- recipe(composite_outcome ~ letargy + creatinine + cholestatic_syndrome + pallor + pulmonary_involvement +
reduced_diuresis + heart_rate + ast + hypotension + leucocytes, data = lepto_train) %>%
step_medianimpute(all_numeric()) %>%
step_BoxCox(all_numeric()) %>%
step_normalize(all_numeric()) %>%
step_corr(all_numeric(), threshold = 0.9) %>%
step_unknown(all_nominal(), -all_outcomes(), new_level = 'without_information') %>%
step_dummy(all_nominal(), -all_outcomes()) %>%
step_lincomb(all_predictors()) %>%
step_nzv(all_predictors())
## --- Model glm --------------------------------------- ##
model_glm <- logistic_reg(mode = 'classification') %>%
set_engine('glm')
## -- workflow ---------------------------------------- ##
work_glm <- workflow() %>%
add_recipe(rec2) %>%
add_model(model_glm)
## -------Fit --------------------------------------- ##
test_glm <- work_glm %>%
last_fit(split)
test_glm %>% collect_metrics()
final_glm <- work_glm %>%
fit(lepto_train)
final_glm %>%
pull_workflow_fit() %>%
tidypredict_fit()
model_logistic <- final_glm %>%
pull_workflow_fit()

R2WinBUGS error (no prior specified for this initial value)

The original WinBUGS code is as follows:
model { for (i in 1:n) {for (j in 1:J) {y[i,j] <- equals(D[i],j)
D[i] ~ dcat(p[i,])
p[i,j] <- phi[i,j] / sum(phi[i,])
LL[i,j] <- y[i,j]*log(p[i,j])}
for (j in 2:J) {
S.ed[i,j] <- c1[j]*ed.c[i]+equals(degree,2)*c2[j]*ed.c[i]*ed.c[i]+inprod(delta.c[j,],Spline[i,])
log(phi[i,j]) <- beta0[j] + beta1[j]*white[i] + beta2[j]*exp.c[i] + S.ed[i,j]}
LLt[i] <- sum(LL[i,])
phi[i,1] <- 1
H[i] <- 1/exp(LLt[i])
exp.c[i] <- exp[i]-mean(exp[])
ed.c[i] <- ed[i]-mean(ed[])}
# Priors
beta0[1] <- 0; beta1[1] <- 0; beta2[1] <- 0; c1[1] <- 0; c2[1] <- 0
for (j in 2:J) {beta1[j] ~ dnorm(0,0.0001)
beta2[j] ~ dnorm(0,0.0001)
c1[j] ~ dnorm(0,0.0001)
c2[j] ~ dnorm(0,0.0001)
beta0[j] ~ dnorm(0,0.0001)}
Dv <- -2*sum(LLt[])
for (k in 1:K) {knot.c[k] <- knot[k]-mean(ed[])
# degree =1 for linear spline, =2 for quadratic spline
for (i in 1:337) { S[i,k] <- (ed.c[i]-knot.c[k])*step(ed[i]-knot[k])
Spline[i,k] <- pow(S[i,k],degree)}}
# Random spline coefficients
for (j in 2:J) {for (k in 1:K) {delta[j,k] ~ dnorm(0,tau[j]); delta.c[j,k] <- delta[j,k]-mean(delta[j,])}
# full conditionals for spline precision
tau[j] ~ dgamma(As[j],Bs[j]); As[j] <- 0.1 + K/2; Bs[j] <- 0.1 + inprod(delta.c[j,],delta.c[j,])/2}}
*INITIS*
list(beta1=c(NA,0,0,0,0),beta2=c(NA,0,0,0,0),c1=c(NA,0,0,0,0),
c2=c(NA,0,0,0,0),tau=c(NA,1,1,1,1),
beta0=c(NA,0,0,0,0),delta=structure(.Data=c(NA,NA,NA,NA,NA,NA,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),.Dim=c(5,6)))
list(beta1=c(NA,-0.5,0.5,0,0),beta2=c(NA,-0.5,0.5,0,0),
c1=c(NA,-0.5,0.5,0,0),c2=c(NA,-0.5,0.5,0,0),tau=c(NA,1,1,1,1),
beta0=c(NA,-0.5,0.5,0,0),delta=structure(.Data=c(NA,NA,NA,NA,NA,NA,
-0.5,0.5,-0.5,0.5,-0.5,0.5,-0.5,0.5,-0.5,0.5,-0.5,0.5,-0.5,
0.5,-0.5,0.5,-0.5,0.5,-0.5,0.5,-0.5,0.5,0,0),.Dim=c(5,6)))
*DATA*
my own R2WinBUGS code is like this.
rm(list=ls())
setwd("C:/Users/~~/BMCD")
Data <- read.table("model604data.txt")
D<-Data[,1]; exp<-Data[,2]; ed<-Data[,3]; white<-Data[,4]; J=5; K=6; n=337; knot=c(9.6,12,13.6,14,16,16.4); degree=2;
data <- list(D=D, exp=exp, ed=ed, white=white, J=J, K=K, n=n, knot=knot, degree=degree)
parameters <- c("beta0","beta1","beta2")
inits =list(list(beta1=c(NA,0,0,0,0),beta2=c(NA,0,0,0,0),c1=c(NA,0,0,0,0),c2=c(NA,0,0,0,0),tau=c(NA,1,1,1,1),beta0=c(NA,0,0,0,0),delta=structure(.Data=c(NA,NA,NA,NA,NA,NA,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),.Dim=c(5,6))),
list(beta1=c(NA,-0.5,0.5,0,0),beta2=c(NA,-0.5,0.5,0,0),c1=c(NA,-0.5,0.5,0,0),c2=c(NA,-0.5,0.5,0,0),tau=c(NA,1,1,1,1),beta0=c(NA,-0.5,0.5,0,0),delta=structure(.Data=c(NA,NA,NA,NA,NA,NA,
-0.5,0.5,-0.5,0.5,-0.5,0.5,-0.5,0.5,-0.5,0.5,-0.5,0.5,-0.5,0.5,-0.5,0.5,-0.5,0.5,-0.5,0.5,-0.5,0.5,0,0),.Dim=c(5,6))) )
model604 <- bugs(data, inits, parameters,model.file="C:/Users/~~/model604.odc",
debug=TRUE,n.chains=2, n.iter=2000, n.burnin=500, bugs.directory="C:/Program Files/WinBUGS14/" )'
How can I fix this error?
I tried to change NA in inits to 0, but it didn't work and give me another error message.
The data follows rectangular format but it's alright
the syntax translation for model and data will be okay, I guess.
Should I add the prior condition?

WINBugs: array index is greater than array upper bound

I need help to find the error in my WINBUGS code (v. 1.4.3).
While in the "Model Specification" step, the model looks syntatically correct. However, in my attempt to load the data, I got this error:
array index is greater than array upper bound for phi3
Could someone please help me? My model is provided below:
model {
for(w in 1: W){
m[w] <- n[w]-y1[w]
h[w] <- n[w]-y1[w]-y2[w]
y1[w] ~ dbin(delta[w],n[w])
y2[w] ~ dbin(theta[w],m[w])
y3[w] ~ dbin(eta[w],h[w])
y4[w] <- n[w]-y1[w]-y2[w]-y3[w]
logit(delta[w]) <- mu1+theta1[a[w]]+phi1[p[w]]+psi1[c[w]]
logit(theta[w]) <- mu2+theta2[a[w]]+phi2[p[w]]+psi2[c[w]]
logit(eta[w]) <- mu3+theta3[a[w]]+phi3[p[w]]+psi3[c[w]]
}
## Autoregressive prior model for p effects
phi1mean[1] <- 0.0
phi1prec[1] <- tauphi1*1.0E-6
phi1mean[2] <- 0.0
phi1prec[2] <- tauphi1*1.0E-6
phi2mean[1] <- 0.0
phi2prec[1] <- tauphi2*1.0E-6
phi2mean[2] <- 0.0
phi2prec[2] <- tauphi2*1.0E-6
phi3mean[1] <- 0.0
phi3prec[1] <- tauphi3*1.0E-6
phi3mean[2] <- 0.0
phi3prec[2] <- tauphi3*1.0E-6
phi4mean[1] <- 0.0
phi4prec[1] <- tauphi4*1.0E-6
phi4mean[2] <- 0.0
phi4prec[2] <- tauphi4*1.0E-6
for (j in 3:JJ) {
phi1mean[j] <- 2*phi1[j-1]-phi1[j-2]
phi1prec[j] <- tauphi1
phi2mean[j] <- 2*phi2[j-1]-phi2[j-2]
phi2prec[j] <- tauphi2
phi3mean[j] <- 2*phi3[j-1]-phi3[j-2]
phi3prec[j] <- tauphi3
phi4mean[j] <- 2*phi4[j-1]-phi4[j-2]
phi4prec[j] <- tauphi4
}
# Sampling p effects and subtracting mean for observed p
for (j in 1:JJ) {
phi1[j] ~ dnorm(phi1mean[j],phi1prec[j])
phi2[j] ~ dnorm(phi2mean[j],phi2prec[j])
phi3[j] ~ dnorm(phi3mean[j],phi3prec[j])
phi4[j] ~ dnorm(phi4mean[j],phi4prec[j])
phi1c[j] <- phi1[j]-mean(phi1[1:J])
phi2c[j] <- phi2[j]-mean(phi2[1:J])
phi3c[j] <- phi3[j]-mean(phi3[1:J])
phi4c[j] <- phi4[j]-mean(phi4[1:J])
}
# Hyppriors for the precision parameters
tauphi1 ~ dgamma(1.0E-1,1.0E-1)
tauphi2 ~ dgamma(1.0E-1,1.0E-1)
tauphi3 ~ dgamma(1.0E-1,1.0E-1)
tauphi4 ~ dgamma(1.0E-1,1.0E-1)
sigmaphi1 <- 1/sqrt(tauphi1)
sigmaphi2 <- 1/sqrt(tauphi2)
sigmaphi3 <- 1/sqrt(tauphi3)
sigmaphi4 <- 1/sqrt(tauphi4)
## Autoregressive prior model for c effects
psi1mean[1] <- 0.0
psi1prec[1] <- taupsi1*1.0E-6
psi1mean[2] <- 0.0
psi1prec[2] <- taupsi1*1.0E-6
psi2mean[1] <- 0.0
psi2prec[1] <- taupsi2*1.0E-6
psi2mean[2] <- 0.0
psi2prec[2] <- taupsi2*1.0E-6
psi3mean[1] <- 0.0
psi3prec[1] <- taupsi3*1.0E-6
psi3mean[2] <- 0.0
psi3prec[2] <- taupsi3*1.0E-6
psi4mean[1] <- 0.0
psi4prec[1] <- taupsi4*1.0E-6
psi4mean[2] <- 0.0
psi4prec[2] <- taupsi4*1.0E-6
for (l in 3:LL) {
psi1mean[l] <- 2*psi1[l-1]-psi1[l-2]
psi1prec[l] <- taupsi1
psi2mean[l] <- 2*psi2[l-1]-psi2[l-2]
psi2prec[l] <- taupsi2
psi3mean[l] <- 2*psi3[l-1]-psi3[l-2]
psi3prec[l] <- taupsi3
psi4mean[l] <- 2*psi4[l-1]-psi4[l-2]
psi4prec[l] <- taupsi4
}
# Sampling c effects and subtracting mean for observed c
for (l in 1:LL) {
psi1[l] ~ dnorm(psi1mean[l],psi1prec[l])
psi2[l] ~ dnorm(psi2mean[l],psi2prec[l])
psi3[l] ~ dnorm(psi3mean[l],psi3prec[l])
psi4[l] ~ dnorm(psi4mean[l],psi4prec[l])
psi1c[l] <- psi1[l]-mean(psi1[1:L])
psi2c[l] <- psi2[l]-mean(psi2[1:L])
psi3c[l] <- psi3[l]-mean(psi3[1:L])
psi4c[l] <- psi4[l]-mean(psi4[1:L])
}
# Hyppriors for the precision parameters
taupsi1 ~ dgamma(1.0E-1,1.0E-1)
taupsi2 ~ dgamma(1.0E-1,1.0E-1)
taupsi3 ~ dgamma(1.0E-1,1.0E-1)
taupsi4 ~ dgamma(1.0E-1,1.0E-1)
sigmapsi1 <- 1/sqrt(taupsi1)
sigmapsi2 <- 1/sqrt(taupsi2)
sigmapsi3 <- 1/sqrt(taupsi3)
sigmapsi4 <- 1/sqrt(taupsi4)
## Autoregressive prior model for a effects
theta1mean[1] <- 0.0
theta1prec[1] <- tautheta1*1.0E-6
theta1mean[2] <- 0.0
theta1prec[2] <- tautheta1*1.0E-6
theta2mean[1] <- 0.0
theta2prec[1] <- tautheta2*1.0E-6
theta2mean[2] <- 0.0
theta2prec[2] <- tautheta2*1.0E-6
theta3mean[1] <- 0.0
theta3prec[1] <- tautheta3*1.0E-6
theta3mean[2] <- 0.0
theta3prec[2] <- tautheta3*1.0E-6
theta4mean[1] <- 0.0
theta4prec[1] <- tautheta4*1.0E-6
theta4mean[2] <- 0.0
theta4prec[2] <- tautheta4*1.0E-6
for (i in 3:I) {
theta1mean[i] <- 2*theta1[i-1]-theta1[i-2]
theta1prec[i] <- tautheta1
theta2mean[i] <- 2*theta2[i-1]-theta2[i-2]
theta2prec[i] <- tautheta2
theta3mean[i] <- 2*theta3[i-1]-theta3[i-2]
theta3prec[i] <- tautheta3
theta4mean[i] <- 2*theta4[i-1]-theta4[i-2]
theta4prec[i] <- tautheta4
}
# Sampling a effects
for (i in 1:I) {
theta1[i] ~ dnorm(theta1mean[i],theta1prec[i])
theta2[i] ~ dnorm(theta2mean[i],theta2prec[i])
theta3[i] ~ dnorm(theta3mean[i],theta3prec[i])
theta4[i] ~ dnorm(theta4mean[i],theta4prec[i])
}
# Hyppriors for the precision parameters
tautheta1 ~ dgamma(1.0E-1,1.0E-1)
tautheta2 ~ dgamma(1.0E-1,1.0E-1)
tautheta3 ~ dgamma(1.0E-1,1.0E-1)
tautheta4 ~ dgamma(1.0E-1,1.0E-1)
sigmatheta1 <- 1/sqrt(tautheta1)
sigmatheta2 <- 1/sqrt(tautheta2)
sigmatheta3 <- 1/sqrt(tautheta3)
sigmatheta4 <- 1/sqrt(tautheta4)
# Removing linear trends from a
for (i in 1:I) {
ivec1[i] <- i-(I+1)/2
aivec1[i] <- ivec1[i]*theta1[i]
theta1c[i] <- theta1[i]-ivec1[i]*sum(aivec1[])/(I*(I+1)*(I-1)/12)
ivec2[i] <- i-(I+1)/2
aivec2[i] <- ivec2[i]*theta2[i]
theta2c[i] <- theta2[i]-ivec2[i]*sum(aivec2[])/(I*(I+1)*(I-1)/12)
ivec3[i] <- i-(I+1)/2
aivec3[i] <- ivec3[i]*theta3[i]
theta3c[i] <- theta3[i]-ivec3[i]*sum(aivec3[])/(I*(I+1)*(I-1)/12)
ivec4[i] <- i-(I+1)/2
aivec4[i] <- ivec4[i]*theta4[i]
theta4c[i] <- theta4[i]-ivec4[i]*sum(aivec4[])/(I*(I+1)*(I-1)/12)
}
## Computing fitted and projected probabilities
for (i in 1:I) {
for (j in 1:JJ) {
deltapred[i,j] <- exp(mu1+theta1[i]+phi1[j]+psi1[I+j-i])
thetapred[i,j] <- exp(mu2+theta2[i]+phi2[j]+psi2[I+j-i])
etapred[i,j] <- exp(mu3+theta3[i]+phi3[j]+psi3[I+j-i])
p1[i,j] <- deltapred[i,j]
p2[i,j] <- thetapred[i,j]*(1-deltapred[i,j])
p3[i,j] <- etapred[i,j]*(1-deltapred[i,j])*(1-thetapred[i,j])
p4[i,j] <- (1-deltapred[i,j])*(1-thetapred[i,j]-etapred[i,j]+(etapred[i,j]*thetapred[i,j]))
}
}
}
### Data
list(
y1=c(1538727,1444672,1206999,1002960,744597,390301,1640130,1472255,1383947,1109395,984775,697701,1769569,1573498,1489025,1351284,1111397,935166,1747764,1790841,1626852,1407388,1284583,995236,1676555,1787181,1655400,1527122,1421772,1309989,1561922,1643467,1598855,1570645,1495999,1319439,1456258,1561892,1567872,1555237,1551579,1532222,1243436,1387943,1436659,1511134,1549578,1539580),
y2=c(2634569,3031916,3138776,2875868,2495888,1886174,2148776,2567507,2747428,2696199,2593985,2138303,1662296,2224336,2489723,2698322,2655746,2450716,1304387,1734318,2180203,2396749,2629088,2555934,1087351,1380119,1616309,2109287,2408800,2369855,821642,1041702,1221283,1661647,2098345,2426842,708327,873092,952245,1237084,1628334,2123709,549763,666699,774205,981393,1243888,1538431),
y3=c(1245931,1664176,1659375,2313647,3850196,4254634,825634,1293382,1454776,1736181,2596719,3655532,554953,901957,1186747,1490664,2083400,2738988,335824,630232,847486,1239538,1702256,2296941,218213,373786,555286,907876,1397221,2005940,143202,237344,344229,594993,1012777,1510283,121187,151070,219731,351040,650930,1157146,87211,120279,140551,226530,393887,733699),
n=c(5862309,6673625,6534802,6942747,8329067,8152696,5049199,5913474,6268113,6253757,7298375,8260640,4319559,5245545,5840408,6306245,6785242,7492958,3588778,4553684,5259609,5813653,6517271,7001560,3105173,3797508,4271831,5180290,6086716,7002991,2591140,3063506,3428373,4305319,5326889,6217360,2329398,2661972,2886111,3418403,4327922,5565798,1906676,2224544,2444586,2864892,3473404,4362648),
a=c(1,1,1,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,5,5,5,5,5,5,6,6,6,6,6,6,7,7,7,7,7,7,8,8,8,8,8,8),
p=c(9,10,11,12,13,14,9,10,11,12,13,14,9,10,11,12,13,14,9,10,11,12,13,14,9,10,11,12,13,14,9,10,11,12,13,14,9,10,11,12,13,14,9,10,11,12,13,14),
c=c(8,9,10,11,12,13,7,8,9,10,11,12,6,7,8,9,10,11,5,6,7,8,9,10,4,5,6,7,8,9,3,4,5,6,7,8,2,3,4,5,6,7,1,2,3,4,5,6),
W=48,
I=8,
J=6,
JJ=8,
L=13,
LL=15
)
# Inits
list(
tauphi1=1,
tauphi2=1,
tauphi3=1,
tauphi4=1,
taupsi1=1,
taupsi2=1,
taupsi3=1,
taupsi4=1,
tautheta1=1,
tautheta2=1,
tautheta3=1,
tautheta4=1,
theta1=c(0,0,0,0,0,0,0,0),
theta2=c(0,0,0,0,0,0,0,0),
theta3=c(0,0,0,0,0,0,0,0),
theta4=c(0,0,0,0,0,0,0,0),
phi1=c(0,0,0,0,0,0),
phi2=c(0,0,0,0,0,0),
phi3=c(0,0,0,0,0,0),
phi4=c(0,0,0,0,0,0),
psi1=c(0,0,0,0,0,0,0,0,0,0,0,0,0),
psi2=c(0,0,0,0,0,0,0,0,0,0,0,0,0),
psi3=c(0,0,0,0,0,0,0,0,0,0,0,0,0),
psi4=c(0,0,0,0,0,0,0,0,0,0,0,0,0)
)
In the definition of logit(eta[w]) you have used phi3[p[w]], and p[w] takes values from 9 to 14. But definitions of phi3[j] are only given for j = 1 to JJ=8. Hence "the array index (9 to 14) is greater than the array upper bound (8)"