Data Preparation
dataset <- read.csv(file = params$file, header = T, sep = ",")
#run parallel cores
options(mc.cores = 8, brms.backend = "cmdstanr", brms.file_refit = "on_change")
#install.packages("loo")
#remotes::install_github("stan-dev/loo")
library(remotes)
library(loo)
library(psych)
library(relativeVariability)
library(brms)
library(cmdstanr)
library(data.table)
library(ggplot2)
library(dplyr)
library(haven)
#library(rstanarm)
library(knitr)
library(rstan)
library(shinystan)
Rescale Data
dataset$negemo_full_m <- (dataset$negemo_full_m -1)*(4/6)+1
dataset$posemo_full_m <- (dataset$posemo_full_m -1)*(4/6)+1
dataset$neuro_t <- (dataset$neuro_t -1)*(4/6)+1
hist(dataset$negemo_full_m)

Censoring Data
range(dataset$negemo_full_m, na.rm = T)
## [1] 1 5
range(dataset$posemo_full_m, na.rm = T)
## [1] 1 5
sd(dataset$negemo_full_m, na.rm = T)
## [1] 0.7831137
mean(dataset$negemo_full_m, na.rm = T)
## [1] 2.219312
sd(dataset$posemo_full_m, na.rm = T)
## [1] 0.5487185
mean(dataset$posemo_full_m, na.rm = T)
## [1] 1.210022
sd(dataset$neuro_t, na.rm = T)
## [1] 1.108445
mean(dataset$neuro_t, na.rm = T)
## [1] 2.797821
qplot(dataset$negemo_full_, binwidth = .1)

qplot(dataset$posemo_full_, binwidth = .1)

dataset$Acens <- case_when(dataset$negemo_full_m == 1 ~ "left",
dataset$negemo_full_m == 5 ~ "right",
TRUE ~ "none")
table(dataset$Acens)
##
## left none right
## 8 751 6
dataset$Acens_p <- case_when(dataset$posemo_full_m == 1 ~ "left",
dataset$posemo_full_m == 5 ~ "right",
TRUE ~ "none")
table(dataset$Acens_p)
##
## left none right
## 601 158 6
BCLSM Negative Emotion
Kn_model_neuro3 <- brm(bf(negemo_full_m | cens(Acens) ~ neuro_t + (1|person_id),
sigma ~ neuro_t+ (1|person_id)), data = dataset,
iter = 7000, warmup = 2000, chains = 4,
control = list(adapt_delta = .99), init = 0.1,
file = paste("models/", params$file, "Kn_model_neuro3"))
print(Kn_model_neuro3)
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: negemo_full_m | cens(Acens) ~ neuro_t + (1 | person_id)
## sigma ~ neuro_t + (1 | person_id)
## Data: dataset (Number of observations: 765)
## Draws: 4 chains, each with iter = 7000; warmup = 2000; thin = 1;
## total post-warmup draws = 20000
##
## Group-Level Effects:
## ~person_id (Number of levels: 112)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.61 0.05 0.52 0.72 1.00 2978 5042
## sd(sigma_Intercept) 0.37 0.05 0.28 0.47 1.00 5607 10450
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 1.73 0.17 1.40 2.06 1.00 1416 3373
## sigma_Intercept -1.09 0.13 -1.35 -0.84 1.00 6833 10537
## neuro_t 0.17 0.06 0.06 0.28 1.00 1615 4024
## sigma_neuro_t 0.11 0.04 0.03 0.20 1.00 7049 11478
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(Kn_model_neuro3)


pp_check(Kn_model_neuro3)
## Using 10 posterior draws for ppc type 'dens_overlay' by default.
## Warning: Censored responses are not shown in 'pp_check'.

prior_summary(Kn_model_neuro3)
## prior class coef group resp dpar nlpar lb ub source
## (flat) b default
## (flat) b neuro_t (vectorized)
## (flat) b sigma default
## (flat) b neuro_t sigma (vectorized)
## student_t(3, 2, 2.5) Intercept default
## student_t(3, 0, 2.5) Intercept sigma default
## student_t(3, 0, 2.5) sd 0 default
## student_t(3, 0, 2.5) sd sigma 0 default
## student_t(3, 0, 2.5) sd person_id 0 (vectorized)
## student_t(3, 0, 2.5) sd Intercept person_id 0 (vectorized)
## student_t(3, 0, 2.5) sd person_id sigma 0 (vectorized)
## student_t(3, 0, 2.5) sd Intercept person_id sigma 0 (vectorized)
Model comparison
scale vs. no scale parameter
Kn_model_neuro2 <- brm(negemo_full_m | cens(Acens) ~ neuro_t + (1|person_id), data = dataset,
iter = 6000, warmup = 2000, chains = 4,
control = list(adapt_delta = .98), inits = 0.1 ,
file = paste("models/", params$file, "Kn_model_neuro2"))
## Warning: Argument 'inits' is deprecated. Please use argument 'init' instead.
print(Kn_model_neuro2)
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: negemo_full_m | cens(Acens) ~ neuro_t + (1 | person_id)
## Data: dataset (Number of observations: 765)
## Draws: 8 chains, each with iter = 6000; warmup = 2000; thin = 1;
## total post-warmup draws = 32000
##
## Group-Level Effects:
## ~person_id (Number of levels: 112)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.62 0.05 0.53 0.72 1.00 6600 12445
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 1.75 0.17 1.42 2.08 1.00 4809 9683
## neuro_t 0.17 0.06 0.06 0.28 1.00 4803 10156
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.52 0.01 0.49 0.55 1.00 41885 25715
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
modelA <- Kn_model_neuro2
modelB <- Kn_model_neuro3
modelA <- add_criterion(modelA, "loo")
modelB <- add_criterion(modelB, "loo")
loo <- loo_compare(modelA,modelB, criterion = "loo")
loo <- as.data.frame(loo)
loo$Dataset <- params$file
loo <- tibble::rownames_to_column(loo, "model")
library("writexl")
write_xlsx(loo,paste0("loo", params$file, ".xlsx"))
kable(loo)
modelB |
0.00000 |
0.00000 |
-596.5377 |
25.06314 |
163.20233 |
9.865290 |
1193.075 |
50.12628 |
Dataset 3 public.csv |
modelA |
-40.51907 |
10.55582 |
-637.0568 |
23.62287 |
97.39616 |
5.680872 |
1274.114 |
47.24574 |
Dataset 3 public.csv |
censoring vs. no censoring
Kn_model_neuro4 <- brm(bf(negemo_full_m ~ neuro_t + (1|person_id),
sigma ~ neuro_t+ (1|person_id)), data = dataset,
iter = 7000, warmup = 2000, chains = 4,
control = list(adapt_delta = .9999), init = 0,
file = paste("models/", params$file, "Kn_model_neuro4"))
print(Kn_model_neuro4)
## Warning: There were 4 divergent transitions after warmup. Increasing adapt_delta above 0.99 may help. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: negemo_full_m ~ neuro_t + (1 | person_id)
## sigma ~ neuro_t + (1 | person_id)
## Data: dataset (Number of observations: 765)
## Draws: 4 chains, each with iter = 7000; warmup = 2000; thin = 1;
## total post-warmup draws = 20000
##
## Group-Level Effects:
## ~person_id (Number of levels: 112)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.60 0.04 0.52 0.69 1.00 4707 8132
## sd(sigma_Intercept) 0.42 0.05 0.32 0.53 1.00 5332 9106
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 1.76 0.16 1.45 2.07 1.00 2930 5551
## sigma_Intercept -1.15 0.14 -1.42 -0.88 1.00 9526 12171
## neuro_t 0.16 0.05 0.06 0.27 1.00 3252 6114
## sigma_neuro_t 0.12 0.05 0.03 0.22 1.00 9986 12899
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
extract_param <- function(model, parameter) {
ci <- posterior_summary(model, variable = parameter)
est <- sprintf("%.2f %.2f [%.2f;%.2f]", ci[,"Estimate"],ci[,"Est.Error"], ci[,"Q2.5"], ci[,"Q97.5"])
est
}
results_Cens <- data.frame(matrix(nrow = 2,
ncol = 6+1))
names(results_Cens) <- c("model", "negemo_b_neuro", "negemo_b_neuro_sigma", "negemo_sigma",
"posemo_b_neuro", "posemo_b_neuro_sigma", "posemo_sigma"
)
results_Cens$model <- c("modelCensoring", "modelnoCensoring")
#NA
results_Cens[1, "negemo_b_neuro"] <- extract_param(Kn_model_neuro3, "b_neuro_t")
results_Cens[1, "negemo_b_neuro_sigma"] <- extract_param(Kn_model_neuro3, "b_sigma_neuro_t")
results_Cens[1, "negemo_sigma"] <- extract_param(Kn_model_neuro3, "b_sigma_Intercept")
results_Cens[2, "negemo_b_neuro"] <- extract_param(Kn_model_neuro4, "b_neuro_t")
results_Cens[2, "negemo_b_neuro_sigma"] <- extract_param(Kn_model_neuro4, "b_sigma_neuro_t")
results_Cens[2, "negemo_sigma"] <- extract_param(Kn_model_neuro4, "b_sigma_Intercept")
BCLSM vs. model C (two-part model)
dataset <- dataset %>% left_join(dataset %>% distinct(person_id, neuro_t) %>% mutate(neuro_Q =Hmisc::cut2(neuro_t, g = 4)), by = c("person_id", "neuro_t"))
Kn_model_neuro_jinxed <- brm(bf(negemo_full_m | cens(Acens) ~ neuro_t + (1|gr(person_id, by = neuro_Q)),
sigma ~ neuro_t + (1|person_id)), data = dataset,
iter = 5000, warmup = 2000, chains = 4,
control = list(adapt_delta = .99), init = 0.1,
file = paste("models/", params$file, "Kn_model_neuro_jinxed"))
print(Kn_model_neuro_jinxed)
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: negemo_full_m | cens(Acens) ~ neuro_t + (1 | gr(person_id, by = neuro_Q))
## sigma ~ neuro_t + (1 | person_id)
## Data: dataset (Number of observations: 765)
## Draws: 4 chains, each with iter = 5000; warmup = 2000; thin = 1;
## total post-warmup draws = 12000
##
## Group-Level Effects:
## ~person_id (Number of levels: 112)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept:neuro_Q[1.00,1.92)) 0.56 0.09 0.41 0.77 1.00 2232 4213
## sd(Intercept:neuro_Q[1.92,2.75)) 0.53 0.09 0.38 0.73 1.00 2157 4413
## sd(Intercept:neuro_Q[2.75,3.75)) 0.87 0.13 0.66 1.15 1.00 1459 2859
## sd(Intercept:neuro_Q[3.75,4.92]) 0.45 0.09 0.30 0.66 1.00 2702 5173
## sd(sigma_Intercept) 0.37 0.05 0.28 0.47 1.00 3271 5734
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 1.77 0.15 1.49 2.07 1.00 1200 2402
## sigma_Intercept -1.09 0.13 -1.34 -0.83 1.00 3781 5945
## neuro_t 0.15 0.05 0.06 0.25 1.00 1313 2697
## sigma_neuro_t 0.11 0.04 0.03 0.20 1.00 3822 6093
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
modelB <- Kn_model_neuro3
modelC <- Kn_model_neuro_jinxed
modelB <- add_criterion(modelB, "loo")
modelC <- add_criterion(modelC, "loo")
loo_c <- loo_compare(modelB,modelC, criterion = "loo")
loo_c <- as.data.frame(loo_c)
loo_c$Dataset <- params$file
loo_c <- tibble::rownames_to_column(loo_c, "model")
library("writexl")
write_xlsx(loo_c,paste0("loo_c", params$file, ".xlsx"))
kable(loo_c)
modelC |
0.000000 |
0.000000 |
-592.0251 |
24.63983 |
159.3673 |
9.11699 |
1184.050 |
49.27966 |
Dataset 3 public.csv |
modelB |
-4.512554 |
2.388346 |
-596.5377 |
25.06314 |
163.2023 |
9.86529 |
1193.075 |
50.12628 |
Dataset 3 public.csv |
control for gender
dataset$gender <- as.factor(dataset$gender)
Kn_model_sex <- brm(bf(negemo_full_m | cens(Acens) ~ neuro_t + gender + (1|person_id),
sigma ~ neuro_t + gender), data = dataset,
iter = 9000, warmup = 2000, chains = 8,
control = list(adapt_delta = .99), inits = 0.1,
file = paste("models/", params$file, "Kn_model_sex"))
## Warning: Argument 'inits' is deprecated. Please use argument 'init' instead.
print(Kn_model_sex)
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: negemo_full_m | cens(Acens) ~ neuro_t + gender + (1 | person_id)
## sigma ~ neuro_t + gender
## Data: dataset (Number of observations: 765)
## Draws: 8 chains, each with iter = 9000; warmup = 2000; thin = 1;
## total post-warmup draws = 56000
##
## Group-Level Effects:
## ~person_id (Number of levels: 112)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.62 0.05 0.53 0.72 1.00 11109 20495
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 1.81 0.19 1.43 2.19 1.00 6669 13725
## sigma_Intercept -0.76 0.09 -0.93 -0.59 1.00 50930 41704
## neuro_t 0.16 0.06 0.05 0.28 1.00 7446 14444
## gender1 -0.08 0.13 -0.33 0.17 1.00 6704 14069
## sigma_neuro_t 0.07 0.03 0.02 0.11 1.00 54723 42621
## sigma_gender1 -0.17 0.06 -0.28 -0.06 1.00 54219 42149
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
pp_check(Kn_model_sex)
## Using 10 posterior draws for ppc type 'dens_overlay' by default.
## Warning: Censored responses are not shown in 'pp_check'.

plot(Kn_model_sex)


BCLSM Positive Emotion
Kp_model_neuro3 <- brm(bf(posemo_full_m | cens(Acens_p) ~ neuro_t + (1|person_id),
sigma ~ neuro_t + (1|person_id)), data = dataset,
chains = 4,
control = list(adapt_delta = .95), inits = 0.1,
iter = 7000, warmup = 2000,
file = paste("models/", params$file, "Kp_model_neuro3"))
## Warning: Argument 'inits' is deprecated. Please use argument 'init' instead.
print(Kp_model_neuro3)
## Warning: There were 1 divergent transitions after warmup. Increasing adapt_delta above 0.8 may help. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: posemo_full_m | cens(Acens_p) ~ neuro_t + (1 | person_id)
## sigma ~ neuro_t + (1 | person_id)
## Data: dataset (Number of observations: 765)
## Draws: 4 chains, each with iter = 7000; warmup = 2000; thin = 1;
## total post-warmup draws = 20000
##
## Group-Level Effects:
## ~person_id (Number of levels: 112)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 1.34 0.20 0.99 1.77 1.00 4085 6585
## sd(sigma_Intercept) 0.42 0.11 0.21 0.64 1.00 3111 2789
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -0.04 0.49 -1.05 0.89 1.00 7685 11530
## sigma_Intercept 0.10 0.24 -0.38 0.57 1.00 13240 14867
## neuro_t -0.09 0.16 -0.41 0.23 1.00 7189 11520
## sigma_neuro_t -0.06 0.08 -0.22 0.10 1.00 12536 15026
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
pp_check(Kp_model_neuro3)
## Using 10 posterior draws for ppc type 'dens_overlay' by default.
## Warning: Censored responses are not shown in 'pp_check'.

plot(Kp_model_neuro3)


prior_summary(Kp_model_neuro3)
## prior class coef group resp dpar nlpar lb ub source
## (flat) b default
## (flat) b neuro_t (vectorized)
## (flat) b sigma default
## (flat) b neuro_t sigma (vectorized)
## student_t(3, 1, 2.5) Intercept default
## student_t(3, 0, 2.5) Intercept sigma default
## student_t(3, 0, 2.5) sd 0 default
## student_t(3, 0, 2.5) sd sigma 0 default
## student_t(3, 0, 2.5) sd person_id 0 (vectorized)
## student_t(3, 0, 2.5) sd Intercept person_id 0 (vectorized)
## student_t(3, 0, 2.5) sd person_id sigma 0 (vectorized)
## student_t(3, 0, 2.5) sd Intercept person_id sigma 0 (vectorized)
Model comparison
scale vs. no scale parameter
Kp_model_neuro2 <- brm(posemo_full_m | cens(Acens_p) ~ neuro_t + (1|person_id), data = dataset,
iter = 7000, warmup = 2000, chains = 4,
control = list(adapt_delta = .95), inits = 0.1,
file = paste("models/", params$file, "Kp_model_neuro2"))
## Warning: Argument 'inits' is deprecated. Please use argument 'init' instead.
print(Kp_model_neuro2)
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: posemo_full_m | cens(Acens_p) ~ neuro_t + (1 | person_id)
## Data: dataset (Number of observations: 765)
## Draws: 4 chains, each with iter = 7000; warmup = 2000; thin = 1;
## total post-warmup draws = 20000
##
## Group-Level Effects:
## ~person_id (Number of levels: 112)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 1.44 0.18 1.13 1.81 1.00 7075 11464
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 0.03 0.46 -0.92 0.89 1.00 7676 11670
## neuro_t -0.14 0.15 -0.44 0.16 1.00 7544 12143
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 1.06 0.07 0.93 1.22 1.00 22369 15764
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
modelAp <- Kp_model_neuro2
modelBp <- Kp_model_neuro3
modelAp <- add_criterion(modelAp, "loo")
modelBp <- add_criterion(modelBp, "loo")
looP <- loo_compare(modelAp,modelBp, criterion = "loo")
looP <- as.data.frame(looP)
looP$Dataset <- params$file
looP <- tibble::rownames_to_column(looP, "model")
library("writexl")
write_xlsx(looP,paste0("looP", params$file, ".xlsx"))
kable(looP)
modelBp |
0.000000 |
0.000000 |
-412.6861 |
24.0345 |
86.52154 |
7.464080 |
825.3721 |
48.06901 |
Dataset 3 public.csv |
modelAp |
-8.741227 |
4.464136 |
-421.4273 |
24.4970 |
70.34715 |
7.020219 |
842.8546 |
48.99401 |
Dataset 3 public.csv |
censoring vs. no censoring
Kp_model_neuro4 <- brm(bf(posemo_full_m ~ neuro_t + (1|person_id),
sigma ~ neuro_t + (1|person_id)), data = dataset,
chains = 4,
control = list(adapt_delta = .9999), inits = 0,
iter = 7000, warmup = 2000,
file = paste("models/", params$file, "Kp_model_neuro4"))
## Warning: Argument 'inits' is deprecated. Please use argument 'init' instead.
print(Kp_model_neuro4)
## Warning: Parts of the model have not converged (some Rhats are > 1.05). Be careful when analysing the results! We recommend running more iterations and/or
## setting stronger priors.
## Warning: There were 2897 divergent transitions after warmup. Increasing adapt_delta above 0.9999 may help. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: posemo_full_m ~ neuro_t + (1 | person_id)
## sigma ~ neuro_t + (1 | person_id)
## Data: dataset (Number of observations: 765)
## Draws: 4 chains, each with iter = 7000; warmup = 2000; thin = 1;
## total post-warmup draws = 20000
##
## Group-Level Effects:
## ~person_id (Number of levels: 112)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.18 0.12 0.02 0.32 3.75 4 12
## sd(sigma_Intercept) 11.17 2.08 8.23 14.96 3.17 4 11
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 1.05 0.03 1.00 1.08 3.53 4 9
## sigma_Intercept -2.00 0.91 -3.28 -0.98 4.11 4 14
## neuro_t -0.00 0.01 -0.01 0.00 3.66 4 13
## sigma_neuro_t -0.32 0.23 -0.63 -0.01 2.97 5 12
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
#pa
results_Cens[1, "posemo_b_neuro"] <- extract_param(Kp_model_neuro3, "b_neuro_t")
results_Cens[1, "posemo_b_neuro_sigma"] <- extract_param(Kp_model_neuro3, "b_sigma_neuro_t")
results_Cens[1, "posemo_sigma"] <- extract_param(Kp_model_neuro3, "b_sigma_Intercept")
results_Cens[2, "posemo_b_neuro"] <- extract_param(Kp_model_neuro4, "b_neuro_t")
results_Cens[2, "posemo_b_neuro_sigma"] <- extract_param(Kp_model_neuro4, "b_sigma_neuro_t")
results_Cens[2, "posemo_sigma"] <- extract_param(Kp_model_neuro4, "b_sigma_Intercept")
BCLSM vs. model C (two-part model)
Kp_model_neuro_jinxed <- brm(bf(posemo_full_m | cens(Acens) ~ neuro_t + (1|gr(person_id, by = neuro_Q)),
sigma ~ neuro_t + (1|person_id)), data = dataset,
iter = 5000, warmup = 2000, chains = 4,
control = list(adapt_delta = .99), init = 0.1,
file = paste("models/", params$file, "Kp_model_neuro_jinxed"))
print(Kp_model_neuro_jinxed)
## Warning: Parts of the model have not converged (some Rhats are > 1.05). Be careful when analysing the results! We recommend running more iterations and/or
## setting stronger priors.
## Warning: There were 839 divergent transitions after warmup. Increasing adapt_delta above 0.99 may help. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: posemo_full_m | cens(Acens) ~ neuro_t + (1 | gr(person_id, by = neuro_Q))
## sigma ~ neuro_t + (1 | person_id)
## Data: dataset (Number of observations: 765)
## Draws: 4 chains, each with iter = 5000; warmup = 2000; thin = 1;
## total post-warmup draws = 12000
##
## Group-Level Effects:
## ~person_id (Number of levels: 112)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept:neuro_Q[1.00,1.92)) 0.28 0.20 0.10 0.61 3.38 4 14
## sd(Intercept:neuro_Q[1.92,2.75)) 0.50 0.27 0.31 0.96 3.27 4 11
## sd(Intercept:neuro_Q[2.75,3.75)) 0.44 0.20 0.24 0.73 4.09 4 11
## sd(Intercept:neuro_Q[3.75,4.92]) 0.35 0.31 0.07 0.86 2.95 5 20
## sd(sigma_Intercept) 9.38 2.79 7.50 14.32 2.31 5 11
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 1.07 0.04 1.02 1.11 3.37 4 11
## sigma_Intercept -2.18 0.45 -2.94 -1.65 3.42 4 11
## neuro_t 0.00 0.01 -0.01 0.01 3.25 4 15
## sigma_neuro_t -0.33 0.18 -0.53 -0.07 3.39 4 11
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
modelB <- Kp_model_neuro3
modelC <- Kp_model_neuro_jinxed
modelB <- add_criterion(modelB, "loo")
modelC <- add_criterion(modelC, "loo")
loo_cP <- loo_compare(modelB,modelC, criterion = "loo")
## Warning: Not all models have the same y variable. ('yhash' attributes do not match)
loo_cP <- as.data.frame(loo_cP)
loo_cP$Dataset <- params$file
#loo_cP <- tibble::rownames_to_column(loo_c, "model")
library("writexl")
write_xlsx(loo_cP,paste0("loo_cP", params$file, ".xlsx"))
kable(loo_cP)
modelB |
0.000 |
0.0000 |
-412.6861 |
24.0345 |
86.52154 |
7.46408 |
825.3721 |
48.06901 |
Dataset 3 public.csv |
modelC |
-2201.829 |
762.0937 |
-2614.5153 |
761.8813 |
5370.30633 |
785.31845 |
5229.0306 |
1523.76258 |
Dataset 3 public.csv |
extract_param <- function(model, parameter) {
ci <- posterior_summary(model, variable = parameter)
est <- sprintf("%.2f %.2f [%.2f;%.2f]", ci[,"Estimate"],ci[,"Est.Error"], ci[,"Q2.5"], ci[,"Q97.5"])
est
}
results_K <- data.frame(matrix(nrow = 7,
ncol = 8+1))
names(results_K) <- c("model", "negemo_b_neuro", "negemo_b_neuro_sigma", "negemo_sigma", "b_neg_sigma_sex",
"posemo_b_neuro", "posemo_b_neuro_sigma", "posemo_sigma", "b_pos_sigma_sex"
)
results_K$model <- c("model1", "model2", "model3",
"RSD", "RSD_weight", "SD", "gender")
#NA
results_K[2, "negemo_b_neuro"] <- extract_param(Kn_model_neuro2, "b_neuro_t")
results_K[2, "negemo_sigma"] <- extract_param(Kn_model_neuro2, "sigma")
results_K[3, "negemo_b_neuro"] <- extract_param(Kn_model_neuro3, "b_neuro_t")
results_K[3, "negemo_b_neuro_sigma"] <- extract_param(Kn_model_neuro3, "b_sigma_neuro_t")
results_K[3, "negemo_sigma"] <- extract_param(Kn_model_neuro3, "b_sigma_Intercept")
#gender
results_K[7, "negemo_b_neuro"] <- extract_param(Kn_model_sex, "b_neuro_t")
results_K[7, "negemo_b_neuro_sigma"] <- extract_param(Kn_model_sex, "b_sigma_neuro_t")
results_K[7, "negemo_sigma"] <- extract_param(Kn_model_sex, "b_sigma_Intercept")
results_K[7, "b_neg_sigma_sex"] <- extract_param(Kn_model_sex, "b_sigma_gender1")
#pa
results_K[2, "posemo_b_neuro"] <- extract_param(Kp_model_neuro2, "b_neuro_t")
results_K[2, "posemo_sigma"] <- extract_param(Kp_model_neuro2, "sigma")
results_K[3, "posemo_b_neuro"] <- extract_param(Kp_model_neuro3, "b_neuro_t")
results_K[3, "posemo_b_neuro_sigma"] <- extract_param(Kp_model_neuro3, "b_sigma_neuro_t")
results_K[3, "posemo_sigma"] <- extract_param(Kp_model_neuro3, "b_sigma_Intercept")
RVI (Relative Variability Index)
data_w <- unique(dataset[,2:5])
Unweighted RVI
data_w$RSD_NA <- NA
for (i in 1:nrow(data_w)) {
data_w$RSD_NA[i] <- relativeSD(dataset$negemo_full_m[dataset$person_id == data_w$person_id[i]],
1, 5)
}
range(data_w$RSD_NA, na.rm = T)
## [1] 0.06659452 1.00000000
mean(data_w$RSD_NA, na.rm = T)
## [1] 0.2698548
sd(data_w$RSD_NA, na.rm = T)
## [1] 0.1216304
data_w$logrsd_n <- log(data_w$RSD_NA)
#plot(data_w$logrsd_n)
m_rvi_na <- brm(logrsd_n ~ neuro_t, data= data_w,
file = paste("models/", params$file, "Kn_model_logrsd_uw"))
print(m_rvi_na)
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: logrsd_n ~ neuro_t
## Data: data_w (Number of observations: 111)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -1.62 0.11 -1.83 -1.40 1.00 4386 3078
## neuro_t 0.08 0.04 0.01 0.15 1.00 4256 2870
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.42 0.03 0.37 0.48 1.00 4371 3175
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
results_K[4,3] <- extract_param(m_rvi_na, "b_neuro_t")
data_w$RSD_PA <- NA
for (i in 1:nrow(data_w)) {
data_w$RSD_PA[i] <- relativeSD(dataset$posemo_full_m[dataset$person_id == data_w$person_id[i]],
1, 5)
}
range(data_w$RSD_PA)
## [1] NaN NaN
data_w$logrsd_p <- log(data_w$RSD_PA)
m_rvi_pa <- brm(logrsd_p ~ neuro_t, data= data_w,
file = paste("models/", params$file, "Kp_model_logrsd_uw"))
print(m_rvi_pa)
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: logrsd_p ~ neuro_t
## Data: data_w (Number of observations: 59)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -0.42 0.18 -0.77 -0.06 1.00 3575 2785
## neuro_t -0.02 0.06 -0.14 0.11 1.00 3766 2909
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.48 0.05 0.40 0.58 1.00 3493 2837
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
results_K[4,6] <- extract_param(m_rvi_pa, "b_neuro_t")
Weighted RVI
data_w$mean_NA <- NA
for (i in 1:nrow(data_w)) {
data_w$mean_NA[i] <- mean(dataset$negemo_full_m[dataset$person_id == data_w$person_id[i]],
na.rm = T)
}
mean(data_w$mean_NA)
## [1] 2.231716
sd(data_w$mean_NA)
## [1] 0.6386588
data_w$mean_PA <- NA
for (i in 1:nrow(data_w)) {
data_w$mean_PA[i] <- mean(dataset$posemo_full_m[dataset$person_id == data_w$person_id[i]],
na.rm = T)
}
mean(data_w$mean_PA)
## [1] 1.214796
sd(data_w$mean_PA)
## [1] 0.4414877
data_w$weight_NA <- NA
for (i in 1:nrow(data_w)) {
if (!is.na(data_w$mean_NA[i])) {
data_w$weight_NA[i] <- maximumSD(data_w$mean_NA[i], # Mittelwert
1, # Minimum
5, # Maximum
sum(!is.na(dataset$negemo_full_m[dataset$person_id == data_w$person_id[i]]))
)
# W as reported in paper
data_w$weight_NA[i] <- data_w$weight_NA[i]^2
}
}
mean(data_w$weight_NA)
## [1] 3.084018
sd(data_w$weight_NA)
## [1] 1.0337
range(data_w$weight_NA)
## [1] 0.000000 4.821181
m_rvi_na_w <- brm(logrsd_n| weights(weight_NA) ~ neuro_t, data= data_w,
file = paste("models/", params$file, "Kn_model_logrsd"))
## Warning: Rows containing NAs were excluded from the model.
print(m_rvi_na_w)
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: logrsd_n | weights(weight_NA) ~ neuro_t
## Data: data_w (Number of observations: 111)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -1.52 0.06 -1.63 -1.41 1.00 3501 2795
## neuro_t 0.06 0.02 0.02 0.10 1.00 3445 2588
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.37 0.01 0.34 0.40 1.00 3827 2670
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
results_K[5,3] <- extract_param(m_rvi_na_w, "b_neuro_t")
data_w$weight_PA <- NA
for (i in 1:nrow(data_w)) {
if (!is.na(data_w$mean_PA[i])) {
data_w$weight_PA[i] <- maximumSD(data_w$mean_PA[i], # Mittelwert
1, # Minimum
5, # Maximum
sum(!is.na(dataset$posemo_full_m[dataset$person_id == data_w$person_id[i]]))
)
# W as reported in paper
data_w$weight_PA[i] <- data_w$weight_PA[i]^2
}
}
m_rvi_pa_w <- brm(logrsd_p| weights(weight_PA) ~ neuro_t, data= data_w,
file = paste("models/", params$file, "Kp_model_logrsd"))
## Warning: Rows containing NAs were excluded from the model.
print(m_rvi_pa_w)
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: logrsd_p | weights(weight_PA) ~ neuro_t
## Data: data_w (Number of observations: 59)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -0.49 0.19 -0.87 -0.12 1.00 3783 2685
## neuro_t -0.13 0.07 -0.27 -0.00 1.00 3707 3022
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.47 0.05 0.39 0.57 1.00 3933 3128
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
results_K[5,6] <- extract_param(m_rvi_pa_w, "b_neuro_t")
SD
data_w$sd_NA <- NA
for (i in 1:nrow(data_w)) {
data_w$sd_NA[i] <- sd(dataset$negemo_full_m[dataset$person_id == data_w$person_id[i]],
na.rm = T)
}
data_w$sd_PA <- NA
for (i in 1:nrow(data_w)) {
data_w$sd_PA[i] <- sd(dataset$posemo_full_m[dataset$person_id == data_w$person_id[i]],
na.rm = T)
}
mean(data_w$sd_NA)
## [1] 0.4657867
mean(data_w$sd_PA)
## [1] 0.227631
data_w$sd_PA[data_w$sd_PA == 0] <- NA
data_w$sd_NA[data_w$sd_NA == 0] <- NA
data_w$logsd_NA <- log(data_w$sd_NA)
data_w$logsd_PA <- log(data_w$sd_PA)
m_sd_na <- brm(logsd_NA ~ neuro_t, data= data_w,
file = paste("models/", params$file, "Kn_model_logsd"))
## Warning: Rows containing NAs were excluded from the model.
m_sd_na
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: logsd_NA ~ neuro_t
## Data: data_w (Number of observations: 111)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -1.18 0.14 -1.45 -0.91 1.00 4025 2983
## neuro_t 0.11 0.05 0.02 0.19 1.00 3989 3027
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.53 0.04 0.46 0.61 1.00 4187 3174
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
results_K[6,3] <- extract_param(m_sd_na, "b_neuro_t")
m_sd_pa <- brm(logsd_PA ~ neuro_t, data= data_w,
file = paste("models/", params$file, "Kp_model_logsd"))
## Warning: Rows containing NAs were excluded from the model.
m_sd_pa
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: logsd_PA ~ neuro_t
## Data: data_w (Number of observations: 59)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -0.78 0.25 -1.28 -0.29 1.00 3853 2942
## neuro_t -0.09 0.09 -0.25 0.08 1.00 3849 3240
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.65 0.06 0.54 0.79 1.00 3281 2824
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
results_K[6,6] <- extract_param(m_sd_pa, "b_neuro_t")
library("writexl")
write_xlsx(results_K,paste0("", params$file, ".xlsx"))
Incremental Validity of SD
na_noneurot <- brm(bf(negemo_full_m | cens(Acens) ~ (1|person_id),
sigma ~ (1|person_id)), data = dataset,
iter = 7000, warmup = 2000,chains = 4,
control = list(adapt_delta = .99), init = 0.1,
file = "na_noneurot")
print(na_noneurot)
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: negemo_full_m | cens(Acens) ~ (1 | person_id)
## sigma ~ (1 | person_id)
## Data: dataset (Number of observations: 765)
## Draws: 4 chains, each with iter = 7000; warmup = 2000; thin = 1;
## total post-warmup draws = 20000
##
## Group-Level Effects:
## ~person_id (Number of levels: 112)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.64 0.05 0.55 0.75 1.00 3807 7154
## sd(sigma_Intercept) 0.38 0.05 0.29 0.48 1.00 5990 10572
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 2.21 0.06 2.09 2.34 1.00 2226 4780
## sigma_Intercept -0.77 0.05 -0.87 -0.68 1.00 8403 11523
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
rans <- coef(na_noneurot, summary = T)
rans_i <- as.data.frame(rans$person_id[,,"Intercept"]) %>% tibble::rownames_to_column("person_id")
rans_s <- as.data.frame(rans$person_id[,,"sigma_Intercept"]) %>% tibble::rownames_to_column("person_id")
nrow(rans_s)
## [1] 112
nrow(rans_i)
## [1] 112
nrow(data_w)
## [1] 112
dat <- merge(rans_s, rans_i, all = T, by= "person_id")
dat <- merge(dat, data_w, all = T, by= "person_id")
names(dat)[2] <- "Est.SD"
names(dat)[6] <- "Est.M"
fit1 <- lm(neuro_t ~ Est.SD + Est.M , data=dat)
summary(fit1)
##
## Call:
## lm(formula = neuro_t ~ Est.SD + Est.M, data = dat)
##
## Residuals:
## Min 1Q Median 3Q Max
## -1.99126 -0.88123 -0.04674 0.80651 2.02984
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 2.3903 0.6637 3.601 0.000478 ***
## Est.SD 0.5601 0.4096 1.368 0.174222
## Est.M 0.3759 0.1953 1.925 0.056865 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 1.061 on 109 degrees of freedom
## Multiple R-squared: 0.09595, Adjusted R-squared: 0.07936
## F-statistic: 5.784 on 2 and 109 DF, p-value: 0.004097
fit1.2 <- lm(neuro_t ~ Est.M , data=dat)
summary(fit1.2)
##
## Call:
## lm(formula = neuro_t ~ Est.M, data = dat)
##
## Residuals:
## Min 1Q Median 3Q Max
## -1.78033 -0.91907 -0.06441 0.87554 2.05186
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 1.6467 0.3821 4.309 3.58e-05 ***
## Est.M 0.5168 0.1666 3.102 0.00244 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 1.065 on 110 degrees of freedom
## Multiple R-squared: 0.08044, Adjusted R-squared: 0.07208
## F-statistic: 9.622 on 1 and 110 DF, p-value: 0.002443
aov <- anova(fit1.2, fit1)
aov
## Analysis of Variance Table
##
## Model 1: neuro_t ~ Est.M
## Model 2: neuro_t ~ Est.SD + Est.M
## Res.Df RSS Df Sum of Sq F Pr(>F)
## 1 110 124.77
## 2 109 122.66 1 2.105 1.8706 0.1742
summary(fit1)$r.squared-summary(fit1.2)$r.squared
## [1] 0.01551462
results_SDin <- data.frame(matrix(nrow = 1, ncol = 9))
names(results_SDin) <- c("Dataset","b_SD","Err.SD","p(b_SD)","b_M","Err.M","p(b_M)","ΔR²", "p")
results_SDin$Dataset <- params$file
results_SDin$`ΔR²` <- summary(fit1)$r.squared-summary(fit1.2)$r.squared
results_SDin$`p` <- aov$`Pr(>F)`[2]
results_SDin$Err.SD <- summary(fit1)$coefficients[2,2]
results_SDin$b_SD <- fit1$coefficients[2]
results_SDin$`p(b_SD)` <- summary(fit1)$coefficients[2,4]
results_SDin$b_M <- fit1$coefficients[3]
results_SDin$`p(b_M)` <- summary(fit1)$coefficients[3,4]
results_SDin$Err.M <- summary(fit1)$coefficients[3,2]
library("writexl")
write_xlsx(results_SDin,paste0("SD", params$file, ".xlsx"))