Data Preparation
#Plot NA
ggplot(daily_w, aes(panas_n)) +
geom_histogram(binwidth = 0.1) +
ggtitle("Distribution of Negative Emotion Study 12") +
xlab("Negative Emotion") + ylab("Frequency") +
scale_x_continuous(breaks = seq(1, 5, 1)) +
scale_y_continuous(breaks = seq(0, 4000, 500)) +
theme(text = element_text(size = 10, family = "Times New Roman"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black")) +
coord_cartesian(xlim = c(1, 5), ylim = c(0, 4000))

ggsave("NAS12.png", width = 4, height = 3)
#PLotPA
ggplot(daily_w, aes(panas_p)) +
geom_histogram(binwidth = 0.1) +
ggtitle("Distribution of Negative Emotion Study 12") +
xlab("Positive Emotion") + ylab("Frequency") +
scale_x_continuous(breaks = seq(1, 5, 1)) +
scale_y_continuous(breaks = seq(0, 1500, 200)) +
theme(text = element_text(size = 10, family = "Times New Roman"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black")) +
coord_cartesian(xlim = c(1, 5), ylim = c(0, 1500))

ggsave("PAS12.png", width = 4, height = 3)
Censoring
daily_w$Acens <- case_when(daily_w$panas_n == 1 ~ "left",
daily_w$panas_n == 5 ~ "right",
TRUE ~ "none")
table(daily_w$Acens)
##
## left none right
## 3819 22870 1
daily_w$Acens_p <- case_when(daily_w$panas_p == 1 ~ "left",
daily_w$panas_p == 5 ~ "right",
TRUE ~ "none")
table(daily_w$Acens_p)
##
## left none right
## 265 26393 32
mean(daily_w$panas_n)
## [1] 1.804633
mean(daily_w$panas_p)
## [1] 2.8509
mean(daily_w$neuro, na.rm=T)
## [1] 3.185307
sd(daily_w$neuro, na.rm=T)
## [1] 0.766614
range(daily_w$panas_p)
## [1] 1 5
BCLSM Negative
Emotion
wn_model_neuro3 <- brm(bf(panas_n | cens(Acens) ~ neuro+ (1|id),
sigma ~ neuro + (1|id)), data = daily_w,
iter = 7000, warmup = 2000,chains = 4, init = 0.1,
file = "wn_model_neuro3")
print(wn_model_neuro3)
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: panas_n | cens(Acens) ~ neuro + (1 | id)
## sigma ~ neuro + (1 | id)
## Data: daily_w (Number of observations: 21548)
## Draws: 4 chains, each with iter = 7000; warmup = 2000; thin = 1;
## total post-warmup draws = 20000
##
## Group-Level Effects:
## ~id (Number of levels: 865)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.53 0.01 0.50 0.56 1.00 2808 5334
## sd(sigma_Intercept) 0.33 0.01 0.31 0.35 1.00 7232 11073
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 0.84 0.08 0.69 1.00 1.00 1415 3160
## sigma_Intercept -0.92 0.05 -1.03 -0.82 1.00 6896 11742
## neuro 0.27 0.02 0.23 0.32 1.00 1441 2912
## sigma_neuro 0.10 0.02 0.06 0.13 1.00 6500 11199
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(wn_model_neuro3)


prior_summary(wn_model_neuro3)
## prior class coef group resp dpar nlpar lb ub
## (flat) b
## (flat) b neuro
## (flat) b sigma
## (flat) b neuro sigma
## student_t(3, 1.6, 2.5) Intercept
## student_t(3, 0, 2.5) Intercept sigma
## student_t(3, 0, 2.5) sd 0
## student_t(3, 0, 2.5) sd sigma 0
## student_t(3, 0, 2.5) sd id 0
## student_t(3, 0, 2.5) sd Intercept id 0
## student_t(3, 0, 2.5) sd id sigma 0
## student_t(3, 0, 2.5) sd Intercept id sigma 0
## source
## default
## (vectorized)
## default
## (vectorized)
## default
## default
## default
## default
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
#posterior_summary(wn_model_neuro3)
Model comparison
scale vs. no scale
parameter
wn_model_neuro2 <- brm(panas_n | cens(Acens) ~ neuro + (1|id), data = daily_w,
iter = 2000, warmup = 1000,
file = "wn_model_neuro2")
print(wn_model_neuro2)
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: panas_n | cens(Acens) ~ neuro + (1 | id)
## Data: daily_w (Number of observations: 21548)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Group-Level Effects:
## ~id (Number of levels: 865)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.54 0.01 0.51 0.56 1.01 591 1228
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 0.80 0.08 0.64 0.95 1.01 398 791
## neuro 0.29 0.02 0.24 0.33 1.01 380 915
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.59 0.00 0.59 0.60 1.00 6381 2894
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(wn_model_neuro2)

modelA <- wn_model_neuro2
modelB <- wn_model_neuro3
modelA <- add_criterion(modelA, "loo")
modelB <- add_criterion(modelB, "loo")
loo <- loo_compare(modelA,modelB, criterion = "loo")
loo <- as.data.frame(loo)
loo$Dataset <- "12"
loo <- tibble::rownames_to_column(loo, "model")
library("writexl")
write_xlsx(loo,"~/looDataset12.xlsx")
kable(loo)
modelB |
0.000 |
0.00000 |
-18194.94 |
129.7586 |
1664.4616 |
32.437721 |
36389.88 |
259.5173 |
12 |
modelA |
-1196.815 |
60.72739 |
-19391.76 |
125.8291 |
809.8647 |
9.544642 |
38783.51 |
251.6582 |
12 |
censoring vs. no
censoring
wn_model_neuro4 <- brm(bf(panas_n ~ neuro+ (1|id),
sigma ~ neuro + (1|id)), data = daily_w,
iter = 7000, warmup = 2000,chains = 4, init = 0.1,
file = "wn_model_neuro4")
print(wn_model_neuro4)
## Warning: There were 1757 divergent transitions after
## warmup. Increasing adapt_delta above 0.8 may help. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: panas_n ~ neuro + (1 | id)
## sigma ~ neuro + (1 | id)
## Data: daily_w (Number of observations: 21548)
## Draws: 4 chains, each with iter = 7000; warmup = 2000; thin = 1;
## total post-warmup draws = 20000
##
## Group-Level Effects:
## ~id (Number of levels: 865)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.46 0.01 0.44 0.49 1.00 1078 1991
## sd(sigma_Intercept) 0.46 0.01 0.44 0.49 1.00 2878 5336
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 1.01 0.07 0.88 1.14 1.00 498 1348
## sigma_Intercept -1.44 0.07 -1.58 -1.31 1.00 2063 3304
## neuro 0.24 0.02 0.20 0.28 1.00 578 1446
## sigma_neuro 0.21 0.02 0.16 0.25 1.00 1810 2755
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
extract_param <- function(model, parameter) {
ci <- posterior_summary(model, variable = parameter)
est <- sprintf("%.2f %.2f [%.2f;%.2f]", ci[,"Estimate"],ci[,"Est.Error"], ci[,"Q2.5"], ci[,"Q97.5"])
est
}
results_Cens<- data.frame(matrix(nrow = 2, # Modelle & RVI
ncol = 6+1)) # 2 Affekte a 3 Spalten
names(results_Cens) <- c("model", "negemo_b_neuro", "negemo_b_neuro_sigma", "negemo_sigma",
"posemo_b_neuro", "posemo_b_neuro_sigma", "posemo_sigma"
)
results_Cens$model <- c("modelCensoring", "modelnoCensoring")
#NA
results_Cens[1, "negemo_b_neuro"] <- extract_param(wn_model_neuro3, "b_neuro")
results_Cens[1, "negemo_b_neuro_sigma"] <- extract_param(wn_model_neuro3, "b_sigma_neuro")
results_Cens[1, "negemo_sigma"] <- extract_param(wn_model_neuro3, "b_sigma_Intercept")
results_Cens[2, "negemo_b_neuro"] <- extract_param(wn_model_neuro4, "b_neuro")
results_Cens[2, "negemo_b_neuro_sigma"] <- extract_param(wn_model_neuro4, "b_sigma_neuro")
results_Cens[2, "negemo_sigma"] <- extract_param(wn_model_neuro4, "b_sigma_Intercept")
BCLSM vs. model C
(two-part model)
daily_w <- daily_w %>% left_join(daily_w %>% distinct(id, neuro) %>% mutate(neuro_Q =Hmisc::cut2(neuro, g = 4)), by = c("id", "neuro"))
wn_model_neuro_modelC <- brm(bf(panas_n | cens(Acens) ~ neuro + (1|gr(id, by = neuro_Q)),
sigma ~ neuro + (1|id)), data = daily_w,
iter = 5000, warmup = 2000, chains = 4,
control = list(adapt_delta = .95), init = 0.1,
file = "wn_model_neuro_modelC")
print(wn_model_neuro_modelC)
FALSE Family: gaussian
FALSE Links: mu = identity; sigma = log
FALSE Formula: panas_p | cens(Acens) ~ neuro + (1 | gr(id, by = neuro_Q))
FALSE sigma ~ neuro + (1 | id)
FALSE Data: daily_w (Number of observations: 21548)
FALSE Draws: 4 chains, each with iter = 5000; warmup = 2000; thin = 1;
FALSE total post-warmup draws = 12000
FALSE
FALSE Group-Level Effects:
FALSE ~id (Number of levels: 865)
FALSE Estimate Est.Error l-95% CI u-95% CI Rhat
FALSE sd(Intercept:neuro_Q[1.14,2.71)) 0.53 0.03 0.48 0.59 1.00
FALSE sd(Intercept:neuro_Q[2.71,3.29)) 0.51 0.03 0.46 0.56 1.00
FALSE sd(Intercept:neuro_Q[3.29,4.00)) 0.54 0.03 0.49 0.59 1.00
FALSE sd(Intercept:neuro_Q[4.00,5.00]) 0.57 0.03 0.50 0.64 1.00
FALSE sd(sigma_Intercept) 0.30 0.01 0.28 0.32 1.00
FALSE Bulk_ESS Tail_ESS
FALSE sd(Intercept:neuro_Q[1.14,2.71)) 2070 4042
FALSE sd(Intercept:neuro_Q[2.71,3.29)) 1927 3484
FALSE sd(Intercept:neuro_Q[3.29,4.00)) 1320 2144
FALSE sd(Intercept:neuro_Q[4.00,5.00]) 1740 3208
FALSE sd(sigma_Intercept) 4537 6908
FALSE
FALSE Population-Level Effects:
FALSE Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
FALSE Intercept 3.13 0.08 2.97 3.28 1.00 1006 1868
FALSE sigma_Intercept -0.58 0.05 -0.67 -0.48 1.00 4468 6123
FALSE neuro -0.12 0.02 -0.17 -0.08 1.00 1020 1833
FALSE sigma_neuro -0.01 0.02 -0.04 0.02 1.00 4740 6376
FALSE
FALSE Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
FALSE and Tail_ESS are effective sample size measures, and Rhat is the potential
FALSE scale reduction factor on split chains (at convergence, Rhat = 1).
modelB <- wn_model_neuro3
modelC <- wn_model_neuro_modelC
modelB <- add_criterion(modelB, "loo")
modelC <- add_criterion(modelC, "loo")
loo_c <- loo_compare(modelB,modelC, criterion = "loo")
## Warning: Not all models have the same y variable. ('yhash' attributes do not
## match)
loo_c <- as.data.frame(loo_c)
loo_c$Dataset <- "12"
loo_c <- tibble::rownames_to_column(loo_c, "model")
library("writexl")
write_xlsx(loo_c,paste0("loo_c12",".xlsx"))
kable(loo_c)
modelC |
0.000 |
0.0000 |
-17029.02 |
111.3319 |
1406.648 |
20.58909 |
34058.05 |
222.6639 |
12 |
modelB |
-1165.919 |
160.8169 |
-18194.94 |
129.7586 |
1664.462 |
32.43772 |
36389.88 |
259.5173 |
12 |
control for
gender
daily_w$vp_female <- as.factor(daily_w$vp_female)
wn_model_sex <- brm(bf(panas_n | cens(Acens) ~ neuro + vp_female + (1|id),
sigma ~ neuro + vp_female + (1|id)), data = daily_w,
iter = 2000, warmup = 1000,
file = "wn_model_sex")
print(wn_model_sex)
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: panas_n | cens(Acens) ~ neuro + vp_female + (1 | id)
## sigma ~ neuro + vp_female + (1 | id)
## Data: daily_w (Number of observations: 21523)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Group-Level Effects:
## ~id (Number of levels: 864)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.53 0.01 0.50 0.55 1.02 559 1079
## sd(sigma_Intercept) 0.33 0.01 0.31 0.35 1.00 1446 2548
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 0.91 0.09 0.74 1.08 1.01 241 437
## sigma_Intercept -1.01 0.06 -1.13 -0.89 1.00 1005 1992
## neuro 0.28 0.03 0.23 0.33 1.01 238 378
## vp_female1 -0.10 0.06 -0.22 0.01 1.02 222 426
## sigma_neuro 0.09 0.02 0.06 0.12 1.00 1059 1470
## sigma_vp_female1 0.13 0.04 0.06 0.21 1.01 1108 2205
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(wn_model_sex)


prior_summary(wn_model_sex)
## prior class coef group resp dpar nlpar lb ub
## (flat) b
## (flat) b neuro
## (flat) b vp_female1
## (flat) b sigma
## (flat) b neuro sigma
## (flat) b vp_female1 sigma
## student_t(3, 1.6, 2.5) Intercept
## student_t(3, 0, 2.5) Intercept sigma
## student_t(3, 0, 2.5) sd 0
## student_t(3, 0, 2.5) sd sigma 0
## student_t(3, 0, 2.5) sd id 0
## student_t(3, 0, 2.5) sd Intercept id 0
## student_t(3, 0, 2.5) sd id sigma 0
## student_t(3, 0, 2.5) sd Intercept id sigma 0
## source
## default
## (vectorized)
## (vectorized)
## default
## (vectorized)
## (vectorized)
## default
## default
## default
## default
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
BCLSM Positive
Emotion
wp_model_neuro3 <- brm(bf(panas_p | cens(Acens_p) ~ neuro + (1|id),
sigma ~ neuro + (1|id)), data = daily_w,
control = list(adapt_delta = .99),
iter = 2000, warmup = 1000,
file = "wp_model_neuro3")
print(wp_model_neuro3)
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: panas_p | cens(Acens_p) ~ neuro + (1 | id)
## sigma ~ neuro + (1 | id)
## Data: daily_w (Number of observations: 21548)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Group-Level Effects:
## ~id (Number of levels: 865)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.51 0.01 0.49 0.54 1.01 334 739
## sd(sigma_Intercept) 0.30 0.01 0.28 0.32 1.00 1356 2496
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 3.48 0.07 3.33 3.61 1.01 212 466
## sigma_Intercept -0.66 0.05 -0.76 -0.57 1.00 862 1690
## neuro -0.20 0.02 -0.24 -0.15 1.01 204 483
## sigma_neuro 0.02 0.01 -0.01 0.05 1.00 869 1659
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(wp_model_neuro3)


prior_summary(wp_model_neuro3)
## prior class coef group resp dpar nlpar lb ub
## (flat) b
## (flat) b neuro
## (flat) b sigma
## (flat) b neuro sigma
## student_t(3, 2.9, 2.5) Intercept
## student_t(3, 0, 2.5) Intercept sigma
## student_t(3, 0, 2.5) sd 0
## student_t(3, 0, 2.5) sd sigma 0
## student_t(3, 0, 2.5) sd id 0
## student_t(3, 0, 2.5) sd Intercept id 0
## student_t(3, 0, 2.5) sd id sigma 0
## student_t(3, 0, 2.5) sd Intercept id sigma 0
## source
## default
## (vectorized)
## default
## (vectorized)
## default
## default
## default
## default
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
pp_check(wp_model_neuro3)
## Using 10 posterior draws for ppc type 'dens_overlay' by default.
## Warning: Censored responses are not shown in 'pp_check'.

Model comparison
scale vs. no scale
parameter
wp_model_neuro2 <- brm(panas_p | cens(Acens_p) ~ neuro + (1|id), data = daily_w,
iter = 2000, warmup = 1000,
file = "wp_model_neuro2")
print(wp_model_neuro2)
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: panas_p | cens(Acens_p) ~ neuro + (1 | id)
## Data: daily_w (Number of observations: 21548)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Group-Level Effects:
## ~id (Number of levels: 865)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.51 0.01 0.49 0.54 1.00 474 914
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 3.47 0.08 3.32 3.62 1.02 349 555
## neuro -0.19 0.02 -0.24 -0.15 1.01 353 520
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.59 0.00 0.58 0.59 1.00 3724 2887
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
modelAp <- wp_model_neuro2
modelBp <- wp_model_neuro3
modelAp <- add_criterion(modelAp, "loo")
modelBp <- add_criterion(modelBp, "loo")
looP <- loo_compare(modelAp,modelBp, criterion = "loo")
looP <- as.data.frame(looP)
looP$Dataset <- "12"
looP <- tibble::rownames_to_column(looP, "model")
library("writexl")
write_xlsx(looP,"~/looPDataset12.xlsx")
kable(looP)
modelBp |
0.000 |
0.0000 |
-18298.93 |
115.3239 |
1453.2602 |
20.852349 |
36597.86 |
230.6478 |
12 |
modelAp |
-1341.338 |
53.4421 |
-19640.27 |
119.0629 |
812.4282 |
8.641516 |
39280.53 |
238.1259 |
12 |
censoring vs. no
censoring
wp_model_neuro4 <- brm(bf(panas_p ~ neuro + (1|id),
sigma ~ neuro + (1|id)), data = daily_w,
control = list(adapt_delta = .99),
iter = 2000, warmup = 1000,
file = "wp_model_neuro4")
print(wp_model_neuro4)
## Warning: Parts of the model have not converged (some Rhats are > 1.05). Be
## careful when analysing the results! We recommend running more iterations and/or
## setting stronger priors.
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: panas_p ~ neuro + (1 | id)
## sigma ~ neuro + (1 | id)
## Data: daily_w (Number of observations: 21548)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Group-Level Effects:
## ~id (Number of levels: 865)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.51 0.01 0.48 0.53 1.01 370 678
## sd(sigma_Intercept) 0.30 0.01 0.28 0.31 1.00 1296 1590
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 3.47 0.08 3.32 3.62 1.05 167 490
## sigma_Intercept -0.66 0.05 -0.75 -0.56 1.00 975 1364
## neuro -0.19 0.02 -0.24 -0.15 1.05 165 450
## sigma_neuro 0.01 0.01 -0.02 0.04 1.00 909 1387
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
results_Cens[1, "posemo_b_neuro"] <- extract_param(wp_model_neuro3, "b_neuro")
results_Cens[1, "posemo_b_neuro_sigma"] <- extract_param(wp_model_neuro3, "b_sigma_neuro")
results_Cens[1, "posemo_sigma"] <- extract_param(wp_model_neuro3, "b_sigma_Intercept")
results_Cens[2, "posemo_b_neuro"] <- extract_param(wp_model_neuro4, "b_neuro")
results_Cens[2, "posemo_b_neuro_sigma"] <- extract_param(wp_model_neuro4, "b_sigma_neuro")
results_Cens[2, "posemo_sigma"] <- extract_param(wp_model_neuro4, "b_sigma_Intercept")
BCLSM vs. model C
(two-part model)
wp_model_neuro_modelC <- brm(bf(panas_p | cens(Acens) ~ neuro + (1|gr(id, by = neuro_Q)),
sigma ~ neuro + (1|id)), data = daily_w,
iter = 5000, warmup = 2000, chains = 4,
control = list(adapt_delta = .95), init = 0.1,
file = "wn_model_neuro_modelC")
print(wp_model_neuro_modelC)
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: panas_p | cens(Acens) ~ neuro + (1 | gr(id, by = neuro_Q))
## sigma ~ neuro + (1 | id)
## Data: daily_w (Number of observations: 21548)
## Draws: 4 chains, each with iter = 5000; warmup = 2000; thin = 1;
## total post-warmup draws = 12000
##
## Group-Level Effects:
## ~id (Number of levels: 865)
## Estimate Est.Error l-95% CI u-95% CI Rhat
## sd(Intercept:neuro_Q[1.14,2.71)) 0.53 0.03 0.48 0.59 1.00
## sd(Intercept:neuro_Q[2.71,3.29)) 0.51 0.03 0.46 0.56 1.00
## sd(Intercept:neuro_Q[3.29,4.00)) 0.54 0.03 0.49 0.59 1.00
## sd(Intercept:neuro_Q[4.00,5.00]) 0.57 0.03 0.50 0.64 1.00
## sd(sigma_Intercept) 0.30 0.01 0.28 0.32 1.00
## Bulk_ESS Tail_ESS
## sd(Intercept:neuro_Q[1.14,2.71)) 2070 4042
## sd(Intercept:neuro_Q[2.71,3.29)) 1927 3484
## sd(Intercept:neuro_Q[3.29,4.00)) 1320 2144
## sd(Intercept:neuro_Q[4.00,5.00]) 1740 3208
## sd(sigma_Intercept) 4537 6908
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 3.13 0.08 2.97 3.28 1.00 1006 1868
## sigma_Intercept -0.58 0.05 -0.67 -0.48 1.00 4468 6123
## neuro -0.12 0.02 -0.17 -0.08 1.00 1020 1833
## sigma_neuro -0.01 0.02 -0.04 0.02 1.00 4740 6376
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
modelB <- wp_model_neuro3
modelC <- wp_model_neuro_modelC
modelB <- add_criterion(modelB, "loo")
modelC <- add_criterion(modelC, "loo")
loo_cP <- loo_compare(modelB,modelC, criterion = "loo")
## Warning: Not all models have the same y variable. ('yhash' attributes do not
## match)
loo_cP <- as.data.frame(loo_cP)
loo_cP$Dataset <- "12"
#loo_cP <- tibble::rownames_to_column(loo_c, "model")
library("writexl")
write_xlsx(loo_cP,paste0("loo_cP12", ".xlsx"))
kable(loo_cP)
modelC |
0.000 |
0.00000 |
-17029.02 |
111.3319 |
1406.648 |
20.58909 |
34058.05 |
222.6639 |
12 |
modelB |
-1269.905 |
68.36656 |
-18298.93 |
115.3239 |
1453.260 |
20.85235 |
36597.86 |
230.6478 |
12 |
extract_param <- function(model, parameter) {
ci <- posterior_summary(model, variable = parameter)
est <- sprintf("%.2f %.2f [%.2f;%.2f]", ci[,"Estimate"],ci[,"Est.Error"], ci[,"Q2.5"], ci[,"Q97.5"])
est
}
results_wd <- data.frame(matrix(nrow = 7, # Modelle & RVI
ncol = 8+1)) # 2 Affekte a 3 Spalten
names(results_wd) <- c("model", "negemo_b_neuro", "negemo_b_neuro_sigma", "negemo_sigma", "b_neg_sigma_sex",
"posemo_b_neuro", "posemo_b_neuro_sigma", "posemo_sigma", "b_pos_sigma_sex"
)
results_wd$model <- c("model1", "model2", "model3",
"RSD", "RSD_weight", "SD", "gender")
#NA
results_wd[2, "negemo_b_neuro"] <- extract_param(wn_model_neuro2, "b_neuro")
results_wd[2, "negemo_sigma"] <- extract_param(wn_model_neuro2, "sigma")
results_wd[3, "negemo_b_neuro"] <- extract_param(wn_model_neuro3, "b_neuro")
results_wd[3, "negemo_b_neuro_sigma"] <- extract_param(wn_model_neuro3, "b_sigma_neuro")
results_wd[3, "negemo_sigma"] <- extract_param(wn_model_neuro3, "b_sigma_Intercept")
#gender
results_wd[7, "negemo_b_neuro"] <- extract_param(wn_model_sex, "b_neuro")
results_wd[7, "negemo_b_neuro_sigma"] <- extract_param(wn_model_sex, "b_sigma_neuro")
results_wd[7, "negemo_sigma"] <- extract_param(wn_model_sex, "b_sigma_Intercept")
results_wd[7, "b_neg_sigma_sex"] <- extract_param(wn_model_sex, "b_sigma_vp_female1")
#pa
results_wd[2, "posemo_b_neuro"] <- extract_param(wp_model_neuro2, "b_neuro")
results_wd[2, "posemo_sigma"] <- extract_param(wp_model_neuro2, "sigma")
results_wd[3, "posemo_b_neuro"] <- extract_param(wp_model_neuro3, "b_neuro")
results_wd[3, "posemo_b_neuro_sigma"] <- extract_param(wp_model_neuro3, "b_sigma_neuro")
results_wd[3, "posemo_sigma"] <- extract_param(wp_model_neuro3, "b_sigma_Intercept")
RVI
(Relative-Variability-Index)
Unweighted RVI
ffm$RSD_NA <- NA
for (i in 1:nrow(ffm)) {
# aktuelle Zeilennummer
if (ffm$id[i] %in% daily_w$id) {
if (
sum(!is.na(daily_w$panas_n[daily_w$id == ffm$id[i]])) >= 5
) {
ffm$RSD_NA[i] <- relativeSD(daily_w$panas_n[daily_w$id == ffm$id[i]],
1, 5)
}
}
}
## Warning in checkOutput(M, MIN, MAX): NaN returned. Data has a mean equal the
## minimum
range(ffm$RSD_NA, na.rm=T)
## [1] 0.05190888 0.73282811
ffm$logrsd_n <- log(ffm$RSD_NA)
w_rvi_na <- brm(logrsd_n ~ neuro, data= ffm,
file = "wn_logrsd_uw")
results_wd[4,3] <- extract_param(w_rvi_na, "b_neuro")
ffm$RSD_PA <- NA
for (i in 1:nrow(ffm)) {
# aktuelle Zeilennummer
if (ffm$id[i] %in% daily_w$id) {
if (
sum(!is.na(daily_w$panas_p[daily_w$id == ffm$id[i]])) >= 5
) {
ffm$RSD_PA[i] <- relativeSD(daily_w$panas_p[daily_w$id == ffm$id[i]],
1, 5)
}
}
}
range(ffm$RSD_PA)
## [1] 0.06645009 0.70566286
ffm$logrsd_p <- log(ffm$RSD_PA)
w_rvi_pa <- brm(logrsd_p ~ neuro, data= ffm,
file = "wp_logrsd_uw")
print(w_rvi_pa)
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: logrsd_p ~ neuro
## Data: ffm (Number of observations: 865)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -1.38 0.05 -1.47 -1.28 1.00 4009 3216
## neuro 0.03 0.01 -0.00 0.06 1.00 3953 3112
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.33 0.01 0.31 0.35 1.00 3883 2883
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
results_wd[4,6] <- extract_param(w_rvi_pa, "b_neuro")
Weighted RVI
ffm$mean_NA <- NA
for (i in 1:nrow(ffm)) {
if (ffm$id[i] %in% daily_w$id) {
if (
sum(!is.na(daily_w$panas_n[daily_w$id == ffm$id[i]])) >= 5
) {
ffm$mean_NA[i] <- mean(daily_w$panas_n[daily_w$id == ffm$id[i]],
na.rm = T)
}
}
}
ffm$mean_PA <- NA
for (i in 1:nrow(ffm)) {
if (ffm$id[i] %in% daily_w$id) {
if (
sum(!is.na(daily_w$panas_p[daily_w$id == ffm$id[i]])) >= 5
) {
ffm$mean_PA[i] <- mean(daily_w$panas_p[daily_w$id == ffm$id[i]],
na.rm = T)
}
}
}
ffm$weight_NA <- NA
for (i in 1:nrow(ffm)) {
if (ffm$id[i] %in% daily_w$id) {
if (!is.na(ffm$mean_NA[i])) {
# Wenn mean nicht fehlt
ffm$weight_NA[i] <- maximumSD(ffm$mean_NA[i], # Mittelwert
1, # Minimum
5, # Maximum
sum(!is.na(daily_w$panas_n[daily_w$id == ffm$id[i]]))
)
# W as reported in paper
ffm$weight_NA[i] <- ffm$weight_NA[i]^2
}
}
}
w_rvi_na_w <- brm(logrsd_n|weights(weight_NA) ~ neuro, data= ffm,
file = "wn_logrsd_w")
print(w_rvi_na_w)
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: logrsd_n | weights(weight_NA) ~ neuro
## Data: ffm (Number of observations: 864)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -1.16 0.03 -1.22 -1.10 1.00 3524 2823
## neuro 0.01 0.01 -0.00 0.03 1.00 3852 2891
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.30 0.00 0.29 0.31 1.00 3130 3038
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
results_wd[5,3] <- extract_param(w_rvi_na_w, "b_neuro")
ffm$weight_PA <- NA
for (i in 1:nrow(ffm)) {
if (ffm$id[i] %in% daily_w$id) {
if (!is.na(ffm$mean_PA[i])) {
# Wenn mean nicht fehlt
ffm$weight_PA[i] <- maximumSD(ffm$mean_PA[i], # Mittelwert
1, # Minimum
5, # Maximum
sum(!is.na(daily_w$panas_p[daily_w$id == ffm$id[i]]))
)
# W as reported in paper
ffm$weight_PA[i] <- ffm$weight_PA[i]^2
}
}
}
w_rvi_Pa_w <- brm(logrsd_p| weights(weight_PA) ~ neuro, data= ffm,
file = "wp_logrsd_w")
print(w_rvi_Pa_w)
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: logrsd_p | weights(weight_PA) ~ neuro
## Data: ffm (Number of observations: 865)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -1.38 0.03 -1.43 -1.34 1.00 4396 3255
## neuro 0.03 0.01 0.01 0.04 1.00 4362 3048
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.33 0.00 0.32 0.34 1.00 3679 2764
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
results_wd[5,6] <- extract_param(w_rvi_Pa_w, "b_neuro")
SD
ffm$sd_NA <- NA
for (i in 1:nrow(ffm)) {
ffm$sd_NA[i] <- sd(daily_w$panas_n[daily_w$id == ffm$id[i]],
na.rm = T)
}
ffm$sd_PA <- NA
for (i in 1:nrow(ffm)) {
ffm$sd_PA[i] <- sd(daily_w$panas_p[daily_w$id == ffm$id[i]],
na.rm = T)
}
ffm$sd_PA[ffm$sd_PA == 0] <- NA
ffm$sd_NA[ffm$sd_NA == 0] <- NA
ffm$logsd_NA <- log(ffm$sd_NA)
ffm$logsd_PA <- log(ffm$sd_PA)
m_sd_na <- brm(logsd_NA ~ neuro, data= ffm,
file = "wn_logsd")
m_sd_na
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: logsd_NA ~ neuro
## Data: ffm (Number of observations: 864)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -1.44 0.07 -1.57 -1.31 1.00 4035 3029
## neuro 0.20 0.02 0.16 0.24 1.00 4080 2923
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.45 0.01 0.43 0.47 1.00 3752 2924
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
results_wd[6,3] <- extract_param(m_sd_na, "b_neuro")
m_sd_pa <- brm(logsd_PA ~ neuro, data= ffm,
file = "wp_logsd")
m_sd_pa
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: logsd_PA ~ neuro
## Data: ffm (Number of observations: 865)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -0.68 0.05 -0.77 -0.58 1.00 4136 2873
## neuro 0.01 0.01 -0.02 0.04 1.00 3995 2882
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.33 0.01 0.32 0.35 1.00 3881 2812
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
results_wd[6,6] <- extract_param(m_sd_pa, "b_neuro")
Incremental Validity of
SD
na_noneurot <- brm(bf(panas_n | cens(Acens) ~ (1|id),
sigma ~ (1|id)), data = daily_w,
iter = 7000, warmup = 2000,chains = 4,
control = list(adapt_delta = .99), init = 0.1,
file = "na_noneurot12")
print(na_noneurot)
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: panas_n | cens(Acens) ~ (1 | id)
## sigma ~ (1 | id)
## Data: daily_w (Number of observations: 26690)
## Draws: 4 chains, each with iter = 7000; warmup = 2000; thin = 1;
## total post-warmup draws = 20000
##
## Group-Level Effects:
## ~id (Number of levels: 1390)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.58 0.01 0.56 0.61 1.00 3342 6453
## sd(sigma_Intercept) 0.34 0.01 0.32 0.36 1.00 7093 11541
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 1.77 0.02 1.74 1.80 1.00 1856 3412
## sigma_Intercept -0.59 0.01 -0.62 -0.57 1.00 8406 12305
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
rans <- coef(na_noneurot, summary = T)
rans_i <- as.data.frame(rans$id[,,"Intercept"]) %>% tibble::rownames_to_column("id")
rans_s <- as.data.frame(rans$id[,,"sigma_Intercept"]) %>% tibble::rownames_to_column("id")
nrow(rans_s)
## [1] 1390
nrow(rans_i)
## [1] 1390
dat <- merge(rans_s, rans_i, all = T, by= "id")
dat <- merge(dat, daily_w, all = T, by= "id")
names(dat)[2] <- "Est.SD"
names(dat)[6] <- "Est.M"
fit1 <- lm(neuro ~ Est.SD + Est.M , data=dat)
summary(fit1)
##
## Call:
## lm(formula = neuro ~ Est.SD + Est.M, data = dat)
##
## Residuals:
## Min 1Q Median 3Q Max
## -1.9515 -0.5133 0.0187 0.4969 1.9457
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 2.589686 0.020446 126.66 <2e-16 ***
## Est.SD 0.321215 0.016387 19.60 <2e-16 ***
## Est.M 0.462261 0.008882 52.05 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.7084 on 21545 degrees of freedom
## (5142 observations deleted due to missingness)
## Multiple R-squared: 0.1462, Adjusted R-squared: 0.1461
## F-statistic: 1845 on 2 and 21545 DF, p-value: < 2.2e-16
fit1.2 <- lm(neuro ~ Est.M , data=dat)
summary(fit1.2)
##
## Call:
## lm(formula = neuro ~ Est.M, data = dat)
##
## Residuals:
## Min 1Q Median 3Q Max
## -1.91510 -0.51110 0.01111 0.50103 1.88307
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 2.331113 0.015759 147.92 <2e-16 ***
## Est.M 0.499094 0.008758 56.99 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.7147 on 21546 degrees of freedom
## (5142 observations deleted due to missingness)
## Multiple R-squared: 0.131, Adjusted R-squared: 0.131
## F-statistic: 3248 on 1 and 21546 DF, p-value: < 2.2e-16
aov <- anova(fit1.2, fit1)
aov
## Analysis of Variance Table
##
## Model 1: neuro ~ Est.M
## Model 2: neuro ~ Est.SD + Est.M
## Res.Df RSS Df Sum of Sq F Pr(>F)
## 1 21546 11004
## 2 21545 10812 1 192.82 384.24 < 2.2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
summary(fit1)$r.squared-summary(fit1.2)$r.squared
## [1] 0.0152266
results_SDin <- data.frame(matrix(nrow = 1, ncol = 9))
names(results_SDin) <- c("Dataset","b_SD","Err.SD","p(b_SD)","b_M","Err.M","p(b_M)","ΔR²", "p")
results_SDin$Dataset <- "12"
results_SDin$`ΔR²` <- summary(fit1)$r.squared-summary(fit1.2)$r.squared
results_SDin$`p` <- aov$`Pr(>F)`[2]
results_SDin$Err.SD <- summary(fit1)$coefficients[2,2]
results_SDin$b_SD <- fit1$coefficients[2]
results_SDin$`p(b_SD)` <- summary(fit1)$coefficients[2,4]
results_SDin$b_M <- fit1$coefficients[3]
results_SDin$`p(b_M)` <- summary(fit1)$coefficients[3,4]
results_SDin$Err.M <- summary(fit1)$coefficients[3,2]