Data Preparation
dataset <- read.csv(file = params$file, header = T, sep = ",")
#run parallel cores
options(mc.cores = 8, brms.backend = "cmdstanr", brms.file_refit = "on_change")
#install.packages("loo")
#remotes::install_github("stan-dev/loo")
library(remotes)
library(loo)
## This is loo version 2.5.1
## - Online documentation and vignettes at mc-stan.org/loo
## - As of v2.0.0 loo defaults to 1 core but we recommend using as many as possible. Use the 'cores' argument or set options(mc.cores = NUM_CORES) for an entire session.
library(psych)
library(relativeVariability)
##
## Attaching package: 'relativeVariability'
## The following object is masked from 'package:stats':
##
## IQR
library(brms)
## Loading required package: Rcpp
## Loading 'brms' package (version 2.18.0). Useful instructions
## can be found by typing help('brms'). A more detailed introduction
## to the package is available through vignette('brms_overview').
##
## Attaching package: 'brms'
## The following object is masked from 'package:psych':
##
## cs
## The following object is masked from 'package:stats':
##
## ar
library(cmdstanr)
## This is cmdstanr version 0.5.3
## - CmdStanR documentation and vignettes: mc-stan.org/cmdstanr
## - CmdStan path: /home/sc.uni-leipzig.de/no628luhi/.cmdstan/cmdstan-2.29.2
## - CmdStan version: 2.29.2
##
## A newer version of CmdStan is available. See ?install_cmdstan() to install it.
## To disable this check set option or environment variable CMDSTANR_NO_VER_CHECK=TRUE.
library(data.table)
library(ggplot2)
##
## Attaching package: 'ggplot2'
## The following objects are masked from 'package:psych':
##
## %+%, alpha
library(dplyr)
##
## Attaching package: 'dplyr'
## The following objects are masked from 'package:data.table':
##
## between, first, last
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
library(haven)
#library(rstanarm)
library(knitr)
library(rstan)
## Loading required package: StanHeaders
##
## rstan version 2.26.13 (Stan version 2.26.1)
## For execution on a local, multicore CPU with excess RAM we recommend calling
## options(mc.cores = parallel::detectCores()).
## To avoid recompilation of unchanged Stan programs, we recommend calling
## rstan_options(auto_write = TRUE)
## For within-chain threading using `reduce_sum()` or `map_rect()` Stan functions,
## change `threads_per_chain` option:
## rstan_options(threads_per_chain = 1)
##
## Attaching package: 'rstan'
## The following object is masked from 'package:psych':
##
## lookup
library(shinystan)
## Loading required package: shiny
##
## This is shinystan version 2.6.0
Rescale Data
dataset$negemo_full_m <- (dataset$negemo_full_m -1)*(4/6)+1
dataset$posemo_full_m <- (dataset$posemo_full_m -1)*(4/6)+1
dataset$neuro_t <- (dataset$neuro_t -1)*(4/6)+1
hist(dataset$negemo_full_m)

Censoring Data
range(dataset$negemo_full_m, na.rm = T)
## [1] 1 5
range(dataset$posemo_full_m, na.rm = T)
## [1] 1 5
sd(dataset$negemo_full_m, na.rm = T)
## [1] 0.8981705
mean(dataset$negemo_full_m, na.rm = T)
## [1] 1.799744
sd(dataset$posemo_full_m, na.rm = T)
## [1] 1.125941
mean(dataset$posemo_full_m, na.rm = T)
## [1] 3.213341
sd(dataset$neuro_t, na.rm = T)
## [1] 0.7601907
mean(dataset$neuro_t, na.rm = T)
## [1] 3.323102
qplot(dataset$negemo_full_, binwidth = .1)
## Warning: `qplot()` was deprecated in ggplot2 3.4.0.
## Warning: Removed 827 rows containing non-finite values (`stat_bin()`).

qplot(dataset$posemo_full_, binwidth = .1)
## Warning: Removed 827 rows containing non-finite values (`stat_bin()`).

dataset$Acens <- case_when(dataset$negemo_full_m == 1 ~ "left",
dataset$negemo_full_m == 5 ~ "right",
TRUE ~ "none")
table(dataset$Acens)
##
## left none right
## 1385 7621 96
dataset$Acens_p <- case_when(dataset$posemo_full_m == 1 ~ "left",
dataset$posemo_full_m == 5 ~ "right",
TRUE ~ "none")
table(dataset$Acens_p)
##
## left none right
## 296 8501 305
BCLSM Negative
Emotion
Kn_model_neuro3 <- brm(bf(negemo_full_m | cens(Acens) ~ neuro_t + (1|person_id),
sigma ~ neuro_t+ (1|person_id)), data = dataset,
iter = 7000, warmup = 2000, chains = 4,
control = list(adapt_delta = .99), init = 0.1,
file = paste("models/", params$file, "Kn_model_neuro3"))
## Warning: Rows containing NAs were excluded from the model.
print(Kn_model_neuro3)
## Warning: There were 12243 divergent transitions after
## warmup. Increasing adapt_delta above 0.99 may help. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: negemo_full_m | cens(Acens) ~ neuro_t + (1 | person_id)
## sigma ~ neuro_t + (1 | person_id)
## Data: dataset (Number of observations: 8275)
## Draws: 4 chains, each with iter = 7000; warmup = 2000; thin = 1;
## total post-warmup draws = 20000
##
## Group-Level Effects:
## ~person_id (Number of levels: 101)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.84 0.07 0.72 0.98 1.01 858 1899
## sd(sigma_Intercept) 0.53 0.04 0.46 0.61 1.00 796 1719
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 0.89 0.38 0.15 1.62 1.00 407 762
## sigma_Intercept -0.40 0.24 -0.86 0.07 1.00 653 1278
## neuro_t 0.22 0.11 0.00 0.44 1.00 424 842
## sigma_neuro_t 0.03 0.07 -0.11 0.16 1.01 664 1161
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(Kn_model_neuro3)


pp_check(Kn_model_neuro3)
## Using 10 posterior draws for ppc type 'dens_overlay' by default.
## Warning: Censored responses are not shown in 'pp_check'.

prior_summary(Kn_model_neuro3)
## prior class coef group resp dpar nlpar lb ub
## (flat) b
## (flat) b neuro_t
## (flat) b sigma
## (flat) b neuro_t sigma
## student_t(3, 1.5, 2.5) Intercept
## student_t(3, 0, 2.5) Intercept sigma
## student_t(3, 0, 2.5) sd 0
## student_t(3, 0, 2.5) sd sigma 0
## student_t(3, 0, 2.5) sd person_id 0
## student_t(3, 0, 2.5) sd Intercept person_id 0
## student_t(3, 0, 2.5) sd person_id sigma 0
## student_t(3, 0, 2.5) sd Intercept person_id sigma 0
## source
## default
## (vectorized)
## default
## (vectorized)
## default
## default
## default
## default
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
Model comparison
scale vs. no scale
parameter
Kn_model_neuro2 <- brm(negemo_full_m | cens(Acens) ~ neuro_t + (1|person_id), data = dataset,
iter = 6000, warmup = 2000, chains = 4,
control = list(adapt_delta = .98), inits = 0.1 ,
file = paste("models/", params$file, "Kn_model_neuro2"))
## Warning: Argument 'inits' is deprecated. Please use argument 'init' instead.
## Warning: Rows containing NAs were excluded from the model.
print(Kn_model_neuro2)
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: negemo_full_m | cens(Acens) ~ neuro_t + (1 | person_id)
## Data: dataset (Number of observations: 8275)
## Draws: 4 chains, each with iter = 6000; warmup = 2000; thin = 1;
## total post-warmup draws = 16000
##
## Group-Level Effects:
## ~person_id (Number of levels: 101)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.70 0.05 0.61 0.81 1.00 1297 1834
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 0.97 0.32 0.34 1.59 1.01 692 1835
## neuro_t 0.21 0.09 0.03 0.40 1.01 685 1688
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.83 0.01 0.81 0.84 1.00 15268 11594
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
modelA <- Kn_model_neuro2
modelB <- Kn_model_neuro3
modelA <- add_criterion(modelA, "loo")
modelB <- add_criterion(modelB, "loo")
loo <- loo_compare(modelA,modelB, criterion = "loo")
loo <- as.data.frame(loo)
loo$Dataset <- params$file
loo <- tibble::rownames_to_column(loo, "model")
library("writexl")
write_xlsx(loo,paste0("loo", params$file, ".xlsx"))
kable(loo)
modelB |
0.000 |
0.00000 |
-8047.984 |
130.3005 |
383.514 |
35.786582 |
16095.97 |
260.601 |
Dataset 11 public.csv |
modelA |
-1344.303 |
91.56663 |
-9392.287 |
100.3165 |
105.499 |
3.106006 |
18784.57 |
200.633 |
Dataset 11 public.csv |
censoring vs. no
censoring
Kn_model_neuro4 <- brm(bf(negemo_full_m ~ neuro_t + (1|person_id),
sigma ~ neuro_t+ (1|person_id)), data = dataset,
iter = 7000, warmup = 2000, chains = 4,
control = list(adapt_delta = .9999), init = 0,
file = paste("models/", params$file, "Kn_model_neuro4"))
## Warning: Rows containing NAs were excluded from the model.
print(Kn_model_neuro4)
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: negemo_full_m ~ neuro_t + (1 | person_id)
## sigma ~ neuro_t + (1 | person_id)
## Data: dataset (Number of observations: 8275)
## Draws: 4 chains, each with iter = 7000; warmup = 2000; thin = 1;
## total post-warmup draws = 20000
##
## Group-Level Effects:
## ~person_id (Number of levels: 101)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.52 0.04 0.45 0.60 1.00 1372 2836
## sd(sigma_Intercept) 0.47 0.04 0.41 0.55 1.00 1394 2916
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 1.26 0.23 0.80 1.70 1.01 849 2010
## sigma_Intercept -0.81 0.21 -1.23 -0.39 1.00 936 1865
## neuro_t 0.16 0.07 0.03 0.29 1.01 828 1980
## sigma_neuro_t 0.09 0.06 -0.04 0.21 1.00 950 1620
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
extract_param <- function(model, parameter) {
ci <- posterior_summary(model, variable = parameter)
est <- sprintf("%.2f %.2f [%.2f;%.2f]", ci[,"Estimate"],ci[,"Est.Error"], ci[,"Q2.5"], ci[,"Q97.5"])
est
}
results_Cens <- data.frame(matrix(nrow = 2,
ncol = 6+1))
names(results_Cens) <- c("model", "negemo_b_neuro", "negemo_b_neuro_sigma", "negemo_sigma",
"posemo_b_neuro", "posemo_b_neuro_sigma", "posemo_sigma"
)
results_Cens$model <- c("modelCensoring", "modelnoCensoring")
#NA
results_Cens[1, "negemo_b_neuro"] <- extract_param(Kn_model_neuro3, "b_neuro_t")
results_Cens[1, "negemo_b_neuro_sigma"] <- extract_param(Kn_model_neuro3, "b_sigma_neuro_t")
results_Cens[1, "negemo_sigma"] <- extract_param(Kn_model_neuro3, "b_sigma_Intercept")
results_Cens[2, "negemo_b_neuro"] <- extract_param(Kn_model_neuro4, "b_neuro_t")
results_Cens[2, "negemo_b_neuro_sigma"] <- extract_param(Kn_model_neuro4, "b_sigma_neuro_t")
results_Cens[2, "negemo_sigma"] <- extract_param(Kn_model_neuro4, "b_sigma_Intercept")
BCLSM vs. model C
(two-part model)
dataset <- dataset %>% left_join(dataset %>% distinct(person_id, neuro_t) %>% mutate(neuro_Q =Hmisc::cut2(neuro_t, g = 4)), by = c("person_id", "neuro_t"))
Kn_model_neuro_jinxed <- brm(bf(negemo_full_m | cens(Acens) ~ neuro_t + (1|gr(person_id, by = neuro_Q)),
sigma ~ neuro_t + (1|person_id)), data = dataset,
iter = 5000, warmup = 2000, chains = 4,
control = list(adapt_delta = .99), init = 0.1,
file = paste("models/", params$file, "Kn_model_neuro_jinxed"))
## Warning: Rows containing NAs were excluded from the model.
print(Kn_model_neuro_jinxed)
## Warning: Parts of the model have not converged (some Rhats are > 1.05). Be
## careful when analysing the results! We recommend running more iterations and/or
## setting stronger priors.
## Warning: There were 4572 divergent transitions after
## warmup. Increasing adapt_delta above 0.95 may help. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: negemo_full_m | cens(Acens) ~ neuro_t + (1 | gr(person_id, by = neuro_Q))
## sigma ~ neuro_t + (1 | person_id)
## Data: dataset (Number of observations: 8275)
## Draws: 4 chains, each with iter = 5000; warmup = 2000; thin = 1;
## total post-warmup draws = 12000
##
## Group-Level Effects:
## ~person_id (Number of levels: 101)
## Estimate Est.Error l-95% CI u-95% CI Rhat
## sd(Intercept:neuro_Q[1.38,2.88)) 0.67 0.11 0.49 0.91 1.02
## sd(Intercept:neuro_Q[2.88,3.50)) 0.66 0.10 0.49 0.88 1.08
## sd(Intercept:neuro_Q[3.50,4.00)) 1.50 0.26 1.09 2.12 1.01
## sd(Intercept:neuro_Q[4.00,5.00]) 0.56 0.10 0.41 0.80 1.02
## sd(sigma_Intercept) 0.53 0.04 0.46 0.61 1.01
## Bulk_ESS Tail_ESS
## sd(Intercept:neuro_Q[1.38,2.88)) 192 883
## sd(Intercept:neuro_Q[2.88,3.50)) 41 240
## sd(Intercept:neuro_Q[3.50,4.00)) 121 176
## sd(Intercept:neuro_Q[4.00,5.00]) 223 591
## sd(sigma_Intercept) 205 479
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 0.97 0.27 0.42 1.49 1.03 126 348
## sigma_Intercept -0.48 0.27 -1.08 0.02 1.17 16 19
## neuro_t 0.19 0.08 0.04 0.35 1.03 168 433
## sigma_neuro_t 0.05 0.08 -0.10 0.22 1.15 19 23
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
modelB <- Kn_model_neuro3
modelC <- Kn_model_neuro_jinxed
modelB <- add_criterion(modelB, "loo")
modelC <- add_criterion(modelC, "loo")
loo_c <- loo_compare(modelB,modelC, criterion = "loo")
loo_c <- as.data.frame(loo_c)
loo_c$Dataset <- params$file
loo_c <- tibble::rownames_to_column(loo_c, "model")
library("writexl")
write_xlsx(loo_c,paste0("loo_c", params$file, ".xlsx"))
kable(loo_c)
modelC |
0.000000 |
0.000000 |
-8046.438 |
130.2764 |
381.8441 |
35.45009 |
16092.88 |
260.5528 |
Dataset 11 public.csv |
modelB |
-1.546163 |
2.571908 |
-8047.984 |
130.3005 |
383.5140 |
35.78658 |
16095.97 |
260.6010 |
Dataset 11 public.csv |
control for
gender
dataset$gender <- as.factor(dataset$gender)
Kn_model_sex <- brm(bf(negemo_full_m | cens(Acens) ~ neuro_t + gender + (1|person_id),
sigma ~ neuro_t + gender), data = dataset,
iter = 9000, warmup = 2000, chains = 8,
control = list(adapt_delta = .99), inits = 0.1,
file = paste("models/", params$file, "Kn_model_sex"))
## Warning: Argument 'inits' is deprecated. Please use argument 'init' instead.
## Warning: Rows containing NAs were excluded from the model.
print(Kn_model_sex)
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: negemo_full_m | cens(Acens) ~ neuro_t + gender + (1 | person_id)
## sigma ~ neuro_t + gender
## Data: dataset (Number of observations: 8275)
## Draws: 8 chains, each with iter = 9000; warmup = 2000; thin = 1;
## total post-warmup draws = 56000
##
## Group-Level Effects:
## ~person_id (Number of levels: 101)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.71 0.05 0.61 0.82 1.00 3244 6717
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 0.95 0.34 0.28 1.62 1.00 1671 3350
## sigma_Intercept -0.73 0.04 -0.82 -0.64 1.00 31619 37305
## neuro_t 0.21 0.10 0.02 0.41 1.00 1736 3493
## gender1 0.13 0.21 -0.28 0.56 1.00 2924 5955
## sigma_neuro_t 0.14 0.01 0.12 0.17 1.00 32990 37915
## sigma_gender1 0.38 0.03 0.33 0.44 1.00 31610 36360
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
pp_check(Kn_model_sex)
## Using 10 posterior draws for ppc type 'dens_overlay' by default.
## Warning: Censored responses are not shown in 'pp_check'.

plot(Kn_model_sex)


BCLSM Positive
Emotion
Kp_model_neuro3 <- brm(bf(posemo_full_m | cens(Acens_p) ~ neuro_t + (1|person_id),
sigma ~ neuro_t + (1|person_id)), data = dataset,
chains = 4,
control = list(adapt_delta = .95), inits = 0.1,
iter = 7000, warmup = 2000,
file = paste("models/", params$file, "Kp_model_neuro3"))
## Warning: Argument 'inits' is deprecated. Please use argument 'init' instead.
## Warning: Rows containing NAs were excluded from the model.
print(Kp_model_neuro3)
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: posemo_full_m | cens(Acens_p) ~ neuro_t + (1 | person_id)
## sigma ~ neuro_t + (1 | person_id)
## Data: dataset (Number of observations: 8275)
## Draws: 4 chains, each with iter = 7000; warmup = 2000; thin = 1;
## total post-warmup draws = 20000
##
## Group-Level Effects:
## ~person_id (Number of levels: 101)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.94 0.07 0.81 1.09 1.00 1316 2615
## sd(sigma_Intercept) 0.40 0.03 0.35 0.47 1.00 2148 3460
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 3.50 0.43 2.68 4.32 1.00 834 1781
## sigma_Intercept -0.13 0.18 -0.50 0.23 1.00 1402 2698
## neuro_t -0.09 0.13 -0.33 0.16 1.00 807 1454
## sigma_neuro_t -0.03 0.05 -0.14 0.08 1.00 1334 2906
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
pp_check(Kp_model_neuro3)
## Using 10 posterior draws for ppc type 'dens_overlay' by default.
## Warning: Censored responses are not shown in 'pp_check'.

plot(Kp_model_neuro3)


prior_summary(Kp_model_neuro3)
## prior class coef group resp dpar nlpar lb ub
## (flat) b
## (flat) b neuro_t
## (flat) b sigma
## (flat) b neuro_t sigma
## student_t(3, 3.4, 2.5) Intercept
## student_t(3, 0, 2.5) Intercept sigma
## student_t(3, 0, 2.5) sd 0
## student_t(3, 0, 2.5) sd sigma 0
## student_t(3, 0, 2.5) sd person_id 0
## student_t(3, 0, 2.5) sd Intercept person_id 0
## student_t(3, 0, 2.5) sd person_id sigma 0
## student_t(3, 0, 2.5) sd Intercept person_id sigma 0
## source
## default
## (vectorized)
## default
## (vectorized)
## default
## default
## default
## default
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
Model comparison
scale vs. no scale
parameter
Kp_model_neuro2 <- brm(posemo_full_m | cens(Acens_p) ~ neuro_t + (1|person_id), data = dataset,
iter = 7000, warmup = 2000, chains = 4,
control = list(adapt_delta = .95), inits = 0.1,
file = paste("models/", params$file, "Kp_model_neuro2"))
## Warning: Argument 'inits' is deprecated. Please use argument 'init' instead.
## Warning: Rows containing NAs were excluded from the model.
print(Kp_model_neuro2)
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: posemo_full_m | cens(Acens_p) ~ neuro_t + (1 | person_id)
## Data: dataset (Number of observations: 8275)
## Draws: 4 chains, each with iter = 7000; warmup = 2000; thin = 1;
## total post-warmup draws = 20000
##
## Group-Level Effects:
## ~person_id (Number of levels: 101)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.87 0.06 0.76 1.01 1.01 733 1376
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 3.46 0.39 2.63 4.19 1.02 464 551
## neuro_t -0.07 0.11 -0.29 0.17 1.02 464 640
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.86 0.01 0.84 0.87 1.00 10138 12032
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
modelAp <- Kp_model_neuro2
modelBp <- Kp_model_neuro3
modelAp <- add_criterion(modelAp, "loo")
modelBp <- add_criterion(modelBp, "loo")
looP <- loo_compare(modelAp,modelBp, criterion = "loo")
looP <- as.data.frame(looP)
looP$Dataset <- params$file
looP <- tibble::rownames_to_column(looP, "model")
library("writexl")
write_xlsx(looP,paste0("looP", params$file, ".xlsx"))
kable(looP)
modelBp |
0.0000 |
0.00000 |
-9435.468 |
93.54916 |
264.9172 |
13.415805 |
18870.94 |
187.0983 |
Dataset 11 public.csv |
modelAp |
-911.2286 |
56.17434 |
-10346.696 |
87.70507 |
103.4572 |
2.291345 |
20693.39 |
175.4101 |
Dataset 11 public.csv |
censoring vs. no
censoring
Kp_model_neuro4 <- brm(bf(posemo_full_m ~ neuro_t + (1|person_id),
sigma ~ neuro_t + (1|person_id)), data = dataset,
chains = 4,
control = list(adapt_delta = .9999), inits = 0,
iter = 7000, warmup = 2000,
file = paste("models/", params$file, "Kp_model_neuro4"))
## Warning: Argument 'inits' is deprecated. Please use argument 'init' instead.
## Warning: Rows containing NAs were excluded from the model.
print(Kp_model_neuro4)
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: posemo_full_m ~ neuro_t + (1 | person_id)
## sigma ~ neuro_t + (1 | person_id)
## Data: dataset (Number of observations: 8275)
## Draws: 4 chains, each with iter = 7000; warmup = 2000; thin = 1;
## total post-warmup draws = 20000
##
## Group-Level Effects:
## ~person_id (Number of levels: 101)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.80 0.06 0.69 0.92 1.00 1160 2610
## sd(sigma_Intercept) 0.33 0.02 0.28 0.38 1.00 3028 5809
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 3.43 0.35 2.73 4.13 1.00 966 1685
## sigma_Intercept -0.20 0.15 -0.49 0.10 1.00 1701 3593
## neuro_t -0.07 0.10 -0.27 0.14 1.01 986 1632
## sigma_neuro_t -0.03 0.04 -0.12 0.05 1.00 1837 3863
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
#pa
results_Cens[1, "posemo_b_neuro"] <- extract_param(Kp_model_neuro3, "b_neuro_t")
results_Cens[1, "posemo_b_neuro_sigma"] <- extract_param(Kp_model_neuro3, "b_sigma_neuro_t")
results_Cens[1, "posemo_sigma"] <- extract_param(Kp_model_neuro3, "b_sigma_Intercept")
results_Cens[2, "posemo_b_neuro"] <- extract_param(Kp_model_neuro4, "b_neuro_t")
results_Cens[2, "posemo_b_neuro_sigma"] <- extract_param(Kp_model_neuro4, "b_sigma_neuro_t")
results_Cens[2, "posemo_sigma"] <- extract_param(Kp_model_neuro4, "b_sigma_Intercept")
BCLSM vs. model C
(two-part model)
Kp_model_neuro_jinxed <- brm(bf(posemo_full_m | cens(Acens) ~ neuro_t + (1|gr(person_id, by = neuro_Q)),
sigma ~ neuro_t + (1|person_id)), data = dataset,
iter = 5000, warmup = 2000, chains = 4,
control = list(adapt_delta = .99), init = 0.1,
file = paste("models/", params$file, "Kp_model_neuro_jinxed"))
## Warning: Rows containing NAs were excluded from the model.
print(Kp_model_neuro_jinxed)
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: posemo_full_m | cens(Acens) ~ neuro_t + (1 | gr(person_id, by = neuro_Q))
## sigma ~ neuro_t + (1 | person_id)
## Data: dataset (Number of observations: 8275)
## Draws: 4 chains, each with iter = 5000; warmup = 2000; thin = 1;
## total post-warmup draws = 12000
##
## Group-Level Effects:
## ~person_id (Number of levels: 101)
## Estimate Est.Error l-95% CI u-95% CI Rhat
## sd(Intercept:neuro_Q[1.38,2.88)) 0.90 0.14 0.67 1.22 1.00
## sd(Intercept:neuro_Q[2.88,3.50)) 0.96 0.13 0.74 1.27 1.01
## sd(Intercept:neuro_Q[3.50,4.00)) 0.85 0.14 0.62 1.17 1.00
## sd(Intercept:neuro_Q[4.00,5.00]) 0.90 0.14 0.67 1.23 1.00
## sd(sigma_Intercept) 0.34 0.03 0.29 0.39 1.00
## Bulk_ESS Tail_ESS
## sd(Intercept:neuro_Q[1.38,2.88)) 1492 2523
## sd(Intercept:neuro_Q[2.88,3.50)) 1221 2180
## sd(Intercept:neuro_Q[3.50,4.00)) 1683 2951
## sd(Intercept:neuro_Q[4.00,5.00]) 1764 3463
## sd(sigma_Intercept) 2166 3755
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 2.87 0.41 2.05 3.69 1.01 752 1695
## sigma_Intercept -0.15 0.16 -0.45 0.16 1.00 1519 2722
## neuro_t 0.04 0.12 -0.20 0.28 1.01 770 1602
## sigma_neuro_t -0.04 0.05 -0.13 0.05 1.00 1500 2814
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
modelB <- Kp_model_neuro3
modelC <- Kp_model_neuro_jinxed
modelB <- add_criterion(modelB, "loo")
modelC <- add_criterion(modelC, "loo")
loo_cP <- loo_compare(modelB,modelC, criterion = "loo")
## Warning: Not all models have the same y variable. ('yhash' attributes do not
## match)
loo_cP <- as.data.frame(loo_cP)
loo_cP$Dataset <- params$file
#loo_cP <- tibble::rownames_to_column(loo_c, "model")
library("writexl")
write_xlsx(loo_cP,paste0("loo_cP", params$file, ".xlsx"))
kable(loo_cP)
modelC |
0.000 |
0.00000 |
-8100.249 |
89.26077 |
242.5554 |
12.51193 |
16200.50 |
178.5215 |
Dataset 11 public.csv |
modelB |
-1335.219 |
55.55877 |
-9435.468 |
93.54916 |
264.9172 |
13.41581 |
18870.94 |
187.0983 |
Dataset 11 public.csv |
extract_param <- function(model, parameter) {
ci <- posterior_summary(model, variable = parameter)
est <- sprintf("%.2f %.2f [%.2f;%.2f]", ci[,"Estimate"],ci[,"Est.Error"], ci[,"Q2.5"], ci[,"Q97.5"])
est
}
results_K <- data.frame(matrix(nrow = 7,
ncol = 8+1))
names(results_K) <- c("model", "negemo_b_neuro", "negemo_b_neuro_sigma", "negemo_sigma", "b_neg_sigma_sex",
"posemo_b_neuro", "posemo_b_neuro_sigma", "posemo_sigma", "b_pos_sigma_sex"
)
results_K$model <- c("model1", "model2", "model3",
"RSD", "RSD_weight", "SD", "gender")
#NA
results_K[2, "negemo_b_neuro"] <- extract_param(Kn_model_neuro2, "b_neuro_t")
results_K[2, "negemo_sigma"] <- extract_param(Kn_model_neuro2, "sigma")
results_K[3, "negemo_b_neuro"] <- extract_param(Kn_model_neuro3, "b_neuro_t")
results_K[3, "negemo_b_neuro_sigma"] <- extract_param(Kn_model_neuro3, "b_sigma_neuro_t")
results_K[3, "negemo_sigma"] <- extract_param(Kn_model_neuro3, "b_sigma_Intercept")
#gender
results_K[7, "negemo_b_neuro"] <- extract_param(Kn_model_sex, "b_neuro_t")
results_K[7, "negemo_b_neuro_sigma"] <- extract_param(Kn_model_sex, "b_sigma_neuro_t")
results_K[7, "negemo_sigma"] <- extract_param(Kn_model_sex, "b_sigma_Intercept")
results_K[7, "b_neg_sigma_sex"] <- extract_param(Kn_model_sex, "b_sigma_gender1")
#pa
results_K[2, "posemo_b_neuro"] <- extract_param(Kp_model_neuro2, "b_neuro_t")
results_K[2, "posemo_sigma"] <- extract_param(Kp_model_neuro2, "sigma")
results_K[3, "posemo_b_neuro"] <- extract_param(Kp_model_neuro3, "b_neuro_t")
results_K[3, "posemo_b_neuro_sigma"] <- extract_param(Kp_model_neuro3, "b_sigma_neuro_t")
results_K[3, "posemo_sigma"] <- extract_param(Kp_model_neuro3, "b_sigma_Intercept")
RVI (Relative
Variability Index)
data_w <- unique(dataset[,2:5])
Unweighted RVI
data_w$RSD_NA <- NA
for (i in 1:nrow(data_w)) {
data_w$RSD_NA[i] <- relativeSD(dataset$negemo_full_m[dataset$person_id == data_w$person_id[i]],
1, 5)
}
range(data_w$RSD_NA, na.rm = T)
## [1] 0.144357 0.902849
mean(data_w$RSD_NA, na.rm = T)
## [1] 0.4529559
sd(data_w$RSD_NA, na.rm = T)
## [1] 0.1598396
data_w$logrsd_n <- log(data_w$RSD_NA)
#plot(data_w$logrsd_n)
m_rvi_na <- brm(logrsd_n ~ neuro_t, data= data_w,
file = paste("models/", params$file, "Kn_model_logrsd_uw"))
print(m_rvi_na)
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: logrsd_n ~ neuro_t
## Data: data_w (Number of observations: 101)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -0.82 0.17 -1.15 -0.50 1.00 3985 3068
## neuro_t -0.01 0.05 -0.11 0.09 1.00 4019 2927
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.38 0.03 0.33 0.43 1.00 3475 2854
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
results_K[4,3] <- extract_param(m_rvi_na, "b_neuro_t")
data_w$RSD_PA <- NA
for (i in 1:nrow(data_w)) {
data_w$RSD_PA[i] <- relativeSD(dataset$posemo_full_m[dataset$person_id == data_w$person_id[i]],
1, 5)
}
range(data_w$RSD_PA)
## [1] 0.1562052 0.9161443
data_w$logrsd_p <- log(data_w$RSD_PA)
m_rvi_pa <- brm(logrsd_p ~ neuro_t, data= data_w,
file = paste("models/", params$file, "Kp_model_logrsd_uw"))
print(m_rvi_pa)
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: logrsd_p ~ neuro_t
## Data: data_w (Number of observations: 101)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -0.81 0.16 -1.12 -0.51 1.00 3936 2899
## neuro_t -0.03 0.05 -0.12 0.06 1.00 3918 2850
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.35 0.03 0.30 0.40 1.00 3728 3099
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
results_K[4,6] <- extract_param(m_rvi_pa, "b_neuro_t")
Weighted RVI
data_w$mean_NA <- NA
for (i in 1:nrow(data_w)) {
data_w$mean_NA[i] <- mean(dataset$negemo_full_m[dataset$person_id == data_w$person_id[i]],
na.rm = T)
}
mean(data_w$mean_NA)
## [1] 1.79452
sd(data_w$mean_NA)
## [1] 0.539677
data_w$mean_PA <- NA
for (i in 1:nrow(data_w)) {
data_w$mean_PA[i] <- mean(dataset$posemo_full_m[dataset$person_id == data_w$person_id[i]],
na.rm = T)
}
mean(data_w$mean_PA)
## [1] 3.209256
sd(data_w$mean_PA)
## [1] 0.7905054
data_w$weight_NA <- NA
for (i in 1:nrow(data_w)) {
if (!is.na(data_w$mean_NA[i])) {
data_w$weight_NA[i] <- maximumSD(data_w$mean_NA[i], # Mittelwert
1, # Minimum
5, # Maximum
sum(!is.na(dataset$negemo_full_m[dataset$person_id == data_w$person_id[i]]))
)
# W as reported in paper
data_w$weight_NA[i] <- data_w$weight_NA[i]^2
}
}
mean(data_w$weight_NA)
## [1] 2.254948
sd(data_w$weight_NA)
## [1] 1.029623
range(data_w$weight_NA)
## [1] 0.1264198 3.9996230
m_rvi_na_w <- brm(logrsd_n| weights(weight_NA) ~ neuro_t, data= data_w,
file = paste("models/", params$file, "Kn_model_logrsd"))
print(m_rvi_na_w)
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: logrsd_n | weights(weight_NA) ~ neuro_t
## Data: data_w (Number of observations: 101)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -0.99 0.12 -1.22 -0.75 1.00 4255 2839
## neuro_t 0.04 0.03 -0.03 0.11 1.00 4459 2801
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.38 0.02 0.34 0.41 1.00 3837 2962
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
results_K[5,3] <- extract_param(m_rvi_na_w, "b_neuro_t")
data_w$weight_PA <- NA
for (i in 1:nrow(data_w)) {
if (!is.na(data_w$mean_PA[i])) {
data_w$weight_PA[i] <- maximumSD(data_w$mean_PA[i], # Mittelwert
1, # Minimum
5, # Maximum
sum(!is.na(dataset$posemo_full_m[dataset$person_id == data_w$person_id[i]]))
)
# W as reported in paper
data_w$weight_PA[i] <- data_w$weight_PA[i]^2
}
}
m_rvi_pa_w <- brm(logrsd_p| weights(weight_PA) ~ neuro_t, data= data_w,
file = paste("models/", params$file, "Kp_model_logrsd"))
print(m_rvi_pa_w)
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: logrsd_p | weights(weight_PA) ~ neuro_t
## Data: data_w (Number of observations: 101)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -0.82 0.08 -0.97 -0.67 1.00 3733 2869
## neuro_t -0.03 0.02 -0.07 0.02 1.00 3682 2744
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.33 0.01 0.30 0.36 1.00 3907 2563
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
results_K[5,6] <- extract_param(m_rvi_pa_w, "b_neuro_t")
SD
data_w$sd_NA <- NA
for (i in 1:nrow(data_w)) {
data_w$sd_NA[i] <- sd(dataset$negemo_full_m[dataset$person_id == data_w$person_id[i]],
na.rm = T)
}
data_w$sd_PA <- NA
for (i in 1:nrow(data_w)) {
data_w$sd_PA[i] <- sd(dataset$posemo_full_m[dataset$person_id == data_w$person_id[i]],
na.rm = T)
}
mean(data_w$sd_NA)
## [1] 0.6556998
mean(data_w$sd_PA)
## [1] 0.7716933
data_w$sd_PA[data_w$sd_PA == 0] <- NA
data_w$sd_NA[data_w$sd_NA == 0] <- NA
data_w$logsd_NA <- log(data_w$sd_NA)
data_w$logsd_PA <- log(data_w$sd_PA)
m_sd_na <- brm(logsd_NA ~ neuro_t, data= data_w,
file = paste("models/", params$file, "Kn_model_logsd"))
m_sd_na
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: logsd_NA ~ neuro_t
## Data: data_w (Number of observations: 101)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -0.82 0.22 -1.25 -0.40 1.00 4230 2734
## neuro_t 0.09 0.06 -0.04 0.21 1.00 4284 2874
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.48 0.03 0.42 0.55 1.00 3953 2968
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
results_K[6,3] <- extract_param(m_sd_na, "b_neuro_t")
m_sd_pa <- brm(logsd_PA ~ neuro_t, data= data_w,
file = paste("models/", params$file, "Kp_model_logsd"))
m_sd_pa
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: logsd_PA ~ neuro_t
## Data: data_w (Number of observations: 101)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -0.20 0.15 -0.50 0.10 1.00 3109 2539
## neuro_t -0.03 0.04 -0.12 0.05 1.00 3109 2556
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 0.34 0.02 0.29 0.39 1.00 3283 2625
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
results_K[6,6] <- extract_param(m_sd_pa, "b_neuro_t")
library("writexl")
write_xlsx(results_K,paste0("", params$file, ".xlsx"))
Incremental Validity of
SD
na_noneurot <- brm(bf(negemo_full_m | cens(Acens) ~ (1|person_id),
sigma ~ (1|person_id)), data = dataset,
iter = 7000, warmup = 2000,chains = 4,
control = list(adapt_delta = .99), init = 0.1,
file = "na_noneurot")
## Warning: Rows containing NAs were excluded from the model.
print(na_noneurot)
## Warning: There were 10290 divergent transitions after
## warmup. Increasing adapt_delta above 0.99 may help. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## Family: gaussian
## Links: mu = identity; sigma = log
## Formula: negemo_full_m | cens(Acens) ~ (1 | person_id)
## sigma ~ (1 | person_id)
## Data: dataset (Number of observations: 8275)
## Draws: 4 chains, each with iter = 7000; warmup = 2000; thin = 1;
## total post-warmup draws = 20000
##
## Group-Level Effects:
## ~person_id (Number of levels: 101)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.86 0.07 0.73 1.00 1.00 1003 1926
## sd(sigma_Intercept) 0.53 0.04 0.46 0.61 1.00 1122 2234
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 1.62 0.08 1.45 1.78 1.01 215 383
## sigma_Intercept -0.32 0.05 -0.42 -0.21 1.00 449 1077
##
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
rans <- coef(na_noneurot, summary = T)
rans_i <- as.data.frame(rans$person_id[,,"Intercept"]) %>% tibble::rownames_to_column("person_id")
rans_s <- as.data.frame(rans$person_id[,,"sigma_Intercept"]) %>% tibble::rownames_to_column("person_id")
nrow(rans_s)
## [1] 101
nrow(rans_i)
## [1] 101
nrow(data_w)
## [1] 101
dat <- merge(rans_s, rans_i, all = T, by= "person_id")
dat <- merge(dat, data_w, all = T, by= "person_id")
names(dat)[2] <- "Est.SD"
names(dat)[6] <- "Est.M"
fit1 <- lm(neuro_t ~ Est.SD + Est.M , data=dat)
summary(fit1)
##
## Call:
## lm(formula = neuro_t ~ Est.SD + Est.M, data = dat)
##
## Residuals:
## Min 1Q Median 3Q Max
## -1.87229 -0.51974 -0.01075 0.66779 1.68795
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 3.05337 0.17018 17.942 <2e-16 ***
## Est.SD 0.07389 0.14754 0.501 0.6177
## Est.M 0.18093 0.09065 1.996 0.0487 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.7555 on 98 degrees of freedom
## Multiple R-squared: 0.04093, Adjusted R-squared: 0.02136
## F-statistic: 2.091 on 2 and 98 DF, p-value: 0.129
fit1.2 <- lm(neuro_t ~ Est.M , data=dat)
summary(fit1.2)
##
## Call:
## lm(formula = neuro_t ~ Est.M, data = dat)
##
## Residuals:
## Min 1Q Median 3Q Max
## -1.86755 -0.50160 0.03936 0.63829 1.67176
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 3.03216 0.16420 18.47 <2e-16 ***
## Est.M 0.17968 0.09027 1.99 0.0493 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.7526 on 99 degrees of freedom
## Multiple R-squared: 0.03848, Adjusted R-squared: 0.02877
## F-statistic: 3.962 on 1 and 99 DF, p-value: 0.0493
aov <- anova(fit1.2, fit1)
aov
## Analysis of Variance Table
##
## Model 1: neuro_t ~ Est.M
## Model 2: neuro_t ~ Est.SD + Est.M
## Res.Df RSS Df Sum of Sq F Pr(>F)
## 1 99 56.077
## 2 98 55.934 1 0.14313 0.2508 0.6177
summary(fit1)$r.squared-summary(fit1.2)$r.squared
## [1] 0.002454167
results_SDin <- data.frame(matrix(nrow = 1, ncol = 9))
names(results_SDin) <- c("Dataset","b_SD","Err.SD","p(b_SD)","b_M","Err.M","p(b_M)","ΔR²", "p")
results_SDin$Dataset <- params$file
results_SDin$`ΔR²` <- summary(fit1)$r.squared-summary(fit1.2)$r.squared
results_SDin$`p` <- aov$`Pr(>F)`[2]
results_SDin$Err.SD <- summary(fit1)$coefficients[2,2]
results_SDin$b_SD <- fit1$coefficients[2]
results_SDin$`p(b_SD)` <- summary(fit1)$coefficients[2,4]
results_SDin$b_M <- fit1$coefficients[3]
results_SDin$`p(b_M)` <- summary(fit1)$coefficients[3,4]
results_SDin$Err.M <- summary(fit1)$coefficients[3,2]
library("writexl")
write_xlsx(results_SDin,paste0("SD", params$file, ".xlsx"))