library(kirkegaard)
## Loading required package: tidyverse
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr 1.1.4 ✔ readr 2.1.5
## ✔ forcats 1.0.1 ✔ stringr 1.6.0
## ✔ ggplot2 4.0.1.9000 ✔ tibble 3.3.0
## ✔ lubridate 1.9.4 ✔ tidyr 1.3.1
## ✔ purrr 1.2.0
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag() masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
## Loading required package: magrittr
##
##
## Attaching package: 'magrittr'
##
##
## The following object is masked from 'package:purrr':
##
## set_names
##
##
## The following object is masked from 'package:tidyr':
##
## extract
##
##
## Loading required package: weights
##
## Loading required package: assertthat
##
##
## Attaching package: 'assertthat'
##
##
## The following object is masked from 'package:tibble':
##
## has_name
##
##
## Loading required package: psych
##
##
## Attaching package: 'psych'
##
##
## The following objects are masked from 'package:ggplot2':
##
## %+%, alpha
##
##
## Loading required package: robustbase
##
##
## Attaching package: 'kirkegaard'
##
##
## The following object is masked from 'package:psych':
##
## rescale
##
##
## The following object is masked from 'package:assertthat':
##
## are_equal
##
##
## The following object is masked from 'package:purrr':
##
## is_logical
##
##
## The following object is masked from 'package:base':
##
## +
load_packages(
googlesheets4,
sf, rnaturalearth,
fixest,
patchwork,
lme4,
simex
)
## Linking to GEOS 3.10.2, GDAL 3.4.1, PROJ 8.2.1; sf_use_s2() is TRUE
## Loading required package: Matrix
##
## Attaching package: 'Matrix'
##
## The following objects are masked from 'package:tidyr':
##
## expand, pack, unpack
##
##
## Attaching package: 'simex'
##
## The following object is masked from 'package:lme4':
##
## refit
theme_set(theme_bw())
options(
digits = 3
)
#multithreading
#library(future)
#plan(multisession(workers = 8))
insert_row = function(df, values, at = 1) {
# browser()
assertthat::assert_that(ncol(df) == length(values))
if (at == 1) {
idx_before = numeric()
idx_after = 1:nrow(df)
} else if (at == nrow(df)) {
idx_before = 1:nrow(df)
idx_after = numeric()
} else {
idx_before = 1:(at-1)
idx_after = at:nrow(df)
}
bind_rows(
df[idx_before, ],
matrix(values, nrow = 1) %>% {colnames(.) = colnames(df); .} %>% as_tibble(),
df[idx_after, ]
)
}
summarize_fixest <- function(models) {
library(dplyr)
library(purrr)
library(broom)
# significance stars
pstars <- function(p){
ifelse(p < 0.001, "***",
ifelse(p < 0.01, "**",
ifelse(p < 0.05, "*", "")))
}
# extract formatted coefficients
coef_list <- map(models, function(m){
tidy(m) %>%
mutate(
formatted = sprintf("%.2f (%.3f%s)", estimate, std.error, pstars(p.value))
) %>%
select(term, formatted)
})
# all coefficient terms
all_terms <- coef_list |> map(~.$term) |> unlist() |> unique()
# coefficient table
results <- tibble(`Predictor/Model` = all_terms)
for (i in seq_along(coef_list)) {
results <- results %>%
left_join(coef_list[[i]], by = c("Predictor/Model" = "term")) %>%
rename(!!as.character(i) := formatted)
}
# extract model stats safely
stat_list <- map(models, function(m){
s <- summary(m)
tibble(
adj_r2 = ifelse(is.null(s$adj_r2), NA, s$adj_r2),
n = s$nobs
)
})
stats_df <- bind_rows(stat_list)
# R2 row
r2_row <- tibble(`Predictor/Model` = "R2 adj.")
for (i in seq_len(nrow(stats_df))) {
r2_row[[as.character(i)]] <-
ifelse(is.na(stats_df$adj_r2[i]), NA_character_,
sprintf("%.3f", stats_df$adj_r2[i]))
}
# N row — **convert to character**
n_row <- tibble(`Predictor/Model` = "N")
for (i in seq_len(nrow(stats_df))) {
n_row[[as.character(i)]] <- as.character(stats_df$n[i])
}
# all columns character except first
results <- results %>% mutate(across(-1, as.character))
r2_row <- r2_row %>% mutate(across(-1, as.character))
n_row <- n_row %>% mutate(across(-1, as.character))
# stack all rows
final <- bind_rows(results, r2_row, n_row)
final
}
#NIQs
gs4_deauth()
d_NIQs = read_sheet("https://docs.google.com/spreadsheets/d/1cReoeIZLlbxOrN4_wnj52Q6UWFepXSJGi3Gj5m6ZAZg/edit?gid=1739623793#gid=1739623793", sheet = "Seb2024")
## ✔ Reading from "National IQ datasets".
## ✔ Range ''Seb2024''.
#SPI
d_spi = readxl::read_excel("data/2014-2019-SPI-Public.xlsx", sheet = 2) %>%
df_legalize_names() %>%
rename(ISO = Code)
#regional coding
d_regions = read_sheet("https://docs.google.com/spreadsheets/d/1ToJWNbwYY--w0_nh05slEv4ZCko4a-XZc1rAkymTxAA/edit?gid=0#gid=0") %>%
df_legalize_names() %>%
rename(ISO = ISO3)
## ✔ Reading from "Countries regional codings".
## ✔ Range 'Sheet1'.
#join
d = smart_join(
d_NIQs,
d_spi,
id_col = "ISO"
) %>% smart_join(
d_regions,
id_col = "ISO"
)
#no dups
assert_that(!anyDuplicated(d$ISO))
## [1] TRUE
#world map
sp_world = ne_countries(scale = "medium", returnclass = "sf") %>%
mutate(
ISO = adm0_a3
)
#fix error in South Sudan
sp_world$ISO = case_when(
sp_world$ISO == "SDS" ~ "SSD", #south sudan
sp_world$ISO == "SAH" ~ "ESH", #western sahara
sp_world$ISO == "PSX" ~ "PSE", #palestine
sp_world$ISO == "ALD" ~ "ALA", #Ã…land
sp_world$ISO == "KOS" ~ "XKX", #Kosovo
#otherwise leave as they are
.default = sp_world$ISO
)
#join
d2 = smart_join(
sp_world,
d,
id_col = "ISO"
) %>%
filter(!is.na(geometry))
#which have missing regional codes?
d2 %>% filter(is.na(Region1)) %>% select(name, ISO, iso_a2, Region1, Region2, Region3) %>% st_drop_geometry()
#pseudo centroids
d2 %<>% mutate(
lon = label_x,
lat = label_y
)
#can also try largest cities
cities <- ne_download(
scale = "medium",
type = "populated_places",
category = "cultural",
returnclass = "sf"
)
## Reading 'ne_50m_populated_places.zip' from naturalearth...
largest_cities <- cities %>%
filter(FEATURECLA == "Populated place") %>%
group_by(ADM0NAME) %>%
slice_max(POP_MAX, n = 1)
#regional maps
p_r1 = d2 %>%
ggplot() +
geom_sf(aes(fill = Region1), color = NA) +
theme_minimal() +
guides(fill = guide_legend(
keyheight = unit(3, "mm"),
keywidth = unit(3, "mm"),
title.position = "top",
ncol = 1 # try 2–3 if many categories
)) +
theme_minimal() +
theme(
legend.title = element_text(size = 8),
legend.text = element_text(size = 6),
legend.key.size = unit(2, "mm"),
legend.spacing.y = unit(1, "mm"),
legend.box.spacing = unit(1, "mm")
)
p_r2 = d2 %>%
ggplot() +
geom_sf(aes(fill = Region2), color = NA) +
theme_minimal() +
guides(fill = guide_legend(
keyheight = unit(3, "mm"),
keywidth = unit(3, "mm"),
title.position = "top",
ncol = 2 # try 2–3 if many categories
)) +
theme_minimal() +
theme(
legend.title = element_text(size = 8),
legend.text = element_text(size = 6),
legend.key.size = unit(2, "mm"),
legend.spacing.y = unit(1, "mm"),
legend.box.spacing = unit(1, "mm")
)
p_r3 = d2 %>%
ggplot() +
geom_sf(aes(fill = Region3), color = NA) +
theme_minimal() +
guides(fill = guide_legend(
keyheight = unit(3, "mm"),
keywidth = unit(3, "mm"),
title.position = "top",
ncol = 2 # try 2–3 if many categories
)) +
theme_minimal() +
theme(
legend.title = element_text(size = 8),
legend.text = element_text(size = 6),
legend.key.size = unit(2, "mm"),
legend.spacing.y = unit(1, "mm"),
legend.box.spacing = unit(1, "mm")
)
patchwork::wrap_plots(
p_r1,
p_r2,
p_r3,
ncol = 1
)
GG_save("figs/regions.png")
#country points / centroids
d2 %>%
ggplot() +
geom_sf(aes(fill = Region1), color = NA) +
geom_point(aes(lon, lat)) +
theme_minimal()
## Warning: Removed 20 rows containing missing values or values outside the scale range
## (`geom_point()`).
#region counts
d2$Region1 %>% table2(include_NA = F) %>% nrow()
## [1] 5
d2$Region2 %>% table2(include_NA = F) %>% nrow()
## [1] 17
d2$Region3 %>% table2(include_NA = F) %>% nrow()
## [1] 23
#SPI ~ NIQ + regions
mods1 = list(
lm(Social_Progress_Index ~ NIQ, data = d2),
lm(Social_Progress_Index ~ NIQ + Region1, data = d2),
lm(Social_Progress_Index ~ NIQ + Region2, data = d2),
lm(Social_Progress_Index ~ NIQ + Region3, data = d2)
)
summarize_models(mods1) %>% filter(!str_detect(`Predictor/Model`, "^Region")) %>%
insert_row(at = 3, values = c("Regions", "None", "5", "17", "23")) %>%
flextable::flextable()
Predictor/Model | 1 | 2 | 3 | 4 |
|---|---|---|---|---|
(Intercept) | -35.53 (5.213***) | -23.69 (7.280**) | -10.57 (14.152) | -15.87 (14.423) |
NIQ | 1.21 (0.062***) | 1.03 (0.101***) | 1.01 (0.133***) | 1.06 (0.137***) |
Regions | None | 5 | 17 | 23 |
R2 adj. | 0.722 | 0.754 | 0.777 | 0.793 |
N | 149 | 149 | 149 | 149 |
#std betas
mods1 %>% map_df(parameters::standardise_parameters) %>% filter(!str_detect(Parameter, "^Region"))
#true fixed effects
mods2 = list(
fixest::feols(Social_Progress_Index ~ NIQ, data = d2),
fixest::feols(Social_Progress_Index ~ NIQ | Region1, data = d2),
fixest::feols(Social_Progress_Index ~ NIQ | Region2, data = d2),
fixest::feols(Social_Progress_Index ~ NIQ | Region3, data = d2)
)
## NOTE: 113 observations removed because of NA values (LHS: 112, RHS: 64).
## NOTE: 113 observations removed because of NA values (LHS: 112, RHS: 64, Fixed-effects: 11).
## NOTE: 113 observations removed because of NA values (LHS: 112, RHS: 64, Fixed-effects: 11).
## NOTE: 113 observations removed because of NA values (LHS: 112, RHS: 64, Fixed-effects: 11).
map_df(mods2, broom::tidy)
mods2 %>% summarize_fixest()
#random effects
mods3 <- list(
lmer(Social_Progress_Index ~ NIQ + (NIQ | Region1), data = d2),
lmer(Social_Progress_Index ~ NIQ + (NIQ | Region2), data = d2),
lmer(Social_Progress_Index ~ NIQ + (NIQ | Region3), data = d2)
)
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
mods3
## [[1]]
## Linear mixed model fit by REML ['lmerMod']
## Formula: Social_Progress_Index ~ NIQ + (NIQ | Region1)
## Data: d2
## REML criterion at convergence: 1044
## Random effects:
## Groups Name Std.Dev. Corr
## Region1 (Intercept) 7.736
## NIQ 0.136 -1.00
## Residual 7.845
## Number of obs: 149, groups: Region1, 5
## Fixed Effects:
## (Intercept) NIQ
## -27.83 1.12
## optimizer (nloptwrap) convergence code: 0 (OK) ; 0 optimizer warnings; 1 lme4 warnings
##
## [[2]]
## Linear mixed model fit by REML ['lmerMod']
## Formula: Social_Progress_Index ~ NIQ + (NIQ | Region2)
## Data: d2
## REML criterion at convergence: 1040
## Random effects:
## Groups Name Std.Dev. Corr
## Region2 (Intercept) 7.574
## NIQ 0.134 -1.00
## Residual 7.503
## Number of obs: 149, groups: Region2, 15
## Fixed Effects:
## (Intercept) NIQ
## -29.56 1.14
## optimizer (nloptwrap) convergence code: 0 (OK) ; 0 optimizer warnings; 1 lme4 warnings
##
## [[3]]
## Linear mixed model fit by REML ['lmerMod']
## Formula: Social_Progress_Index ~ NIQ + (NIQ | Region3)
## Data: d2
## REML criterion at convergence: 1037
## Random effects:
## Groups Name Std.Dev. Corr
## Region3 (Intercept) 4.35390
## NIQ 0.00192 0.98
## Residual 7.23578
## Number of obs: 149, groups: Region3, 20
## Fixed Effects:
## (Intercept) NIQ
## -30.98 1.16
## optimizer (nloptwrap) convergence code: 0 (OK) ; 0 optimizer warnings; 1 lme4 warnings
#plot slopes by region
p_r1r = d2 %>%
ggplot(aes(NIQ, Social_Progress_Index, color = Region1)) +
geom_point() +
geom_smooth(method = "lm") +
scale_color_discrete(guide = "none")
p_r2r = d2 %>%
ggplot(aes(NIQ, Social_Progress_Index, color = Region2)) +
geom_point() +
geom_smooth(method = "lm") +
scale_color_discrete(guide = "none")
p_r3r = d2 %>%
ggplot(aes(NIQ, Social_Progress_Index, color = Region3)) +
geom_point() +
geom_smooth(method = "lm") +
scale_color_discrete(guide = "none")
patchwork::wrap_plots(
p_r1r,
p_r2r,
p_r3r,
ncol = 1
)
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 113 rows containing non-finite outside the scale range
## (`stat_smooth()`).
## Warning: Removed 113 rows containing missing values or values outside the scale range
## (`geom_point()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 113 rows containing non-finite outside the scale range
## (`stat_smooth()`).
## Warning in qt((1 - level)/2, df): NaNs produced
## Warning in qt((1 - level)/2, df): NaNs produced
## Warning in qt((1 - level)/2, df): NaNs produced
## Warning: Removed 113 rows containing missing values or values outside the scale range
## (`geom_point()`).
## Warning in max(ids, na.rm = TRUE): no non-missing arguments to max; returning
## -Inf
## Warning in max(ids, na.rm = TRUE): no non-missing arguments to max; returning
## -Inf
## Warning in max(ids, na.rm = TRUE): no non-missing arguments to max; returning
## -Inf
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 113 rows containing non-finite outside the scale range
## (`stat_smooth()`).
## Warning in qt((1 - level)/2, df): NaNs produced
## Warning in qt((1 - level)/2, df): NaNs produced
## Warning in qt((1 - level)/2, df): NaNs produced
## Warning: Removed 113 rows containing missing values or values outside the scale range
## (`geom_point()`).
## Warning in max(ids, na.rm = TRUE): no non-missing arguments to max; returning
## -Inf
## Warning in max(ids, na.rm = TRUE): no non-missing arguments to max; returning
## -Inf
## Warning in max(ids, na.rm = TRUE): no non-missing arguments to max; returning
## -Inf
GG_save("figs/SPI~NIQ, regions.png")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 113 rows containing non-finite outside the scale range
## (`stat_smooth()`).
## Warning: Removed 113 rows containing missing values or values outside the scale range
## (`geom_point()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 113 rows containing non-finite outside the scale range
## (`stat_smooth()`).
## Warning in qt((1 - level)/2, df): NaNs produced
## Warning in qt((1 - level)/2, df): NaNs produced
## Warning in qt((1 - level)/2, df): NaNs produced
## Warning: Removed 113 rows containing missing values or values outside the scale range
## (`geom_point()`).
## Warning in max(ids, na.rm = TRUE): no non-missing arguments to max; returning
## -Inf
## Warning in max(ids, na.rm = TRUE): no non-missing arguments to max; returning
## -Inf
## Warning in max(ids, na.rm = TRUE): no non-missing arguments to max; returning
## -Inf
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 113 rows containing non-finite outside the scale range
## (`stat_smooth()`).
## Warning in qt((1 - level)/2, df): NaNs produced
## Warning in qt((1 - level)/2, df): NaNs produced
## Warning in qt((1 - level)/2, df): NaNs produced
## Warning: Removed 113 rows containing missing values or values outside the scale range
## (`geom_point()`).
## Warning in max(ids, na.rm = TRUE): no non-missing arguments to max; returning
## -Inf
## Warning in max(ids, na.rm = TRUE): no non-missing arguments to max; returning
## -Inf
## Warning in max(ids, na.rm = TRUE): no non-missing arguments to max; returning
## -Inf
#simex
#subset data to complete cases
d_cc = d %>% select(
NIQ, Social_Progress_Index, SE_NIQ, Region1, Region2, Region3
) %>% na.omit()
set.seed(1)
simex_r0 = simex(
lm(Social_Progress_Index ~ NIQ, data = d_cc, x = T),
SIMEXvariable = "NIQ",
measurement.error = matrix(d_cc$SE_NIQ, ncol = 1),
asymptotic = F,
B = 1000
)
simex_r1 = simex(
lm(Social_Progress_Index ~ NIQ + Region1, data = d_cc, x = T),
SIMEXvariable = "NIQ",
measurement.error = matrix(d_cc$SE_NIQ, ncol = 1),
asymptotic = F,
B = 1000
)
simex_r2 = simex(
lm(Social_Progress_Index ~ NIQ + Region2, data = d_cc, x = T),
SIMEXvariable = "NIQ",
measurement.error = matrix(d_cc$SE_NIQ, ncol = 1),
asymptotic = F,
B = 1000
)
simex_r3 = simex(
lm(Social_Progress_Index ~ NIQ + Region3, data = d_cc, x = T),
SIMEXvariable = "NIQ",
measurement.error = matrix(d_cc$SE_NIQ, ncol = 1),
asymptotic = F,
B = 1000
)
#table of comparison
tibble(
regions = c(0, 5, 17, 23),
uncorrected = mods1 %>% map_df(broom::tidy) %>% filter(term == "NIQ") %>% pull(estimate),
simex = list(simex_r0, simex_r1, simex_r2, simex_r3) %>% map_dbl(~coef(.)[2])
) %>%
flextable::flextable()
regions | uncorrected | simex |
|---|---|---|
0 | 1.21 | 1.29 |
5 | 1.03 | 1.23 |
17 | 1.01 | 1.33 |
23 | 1.06 | 1.44 |
#versions
write_sessioninfo()
## R version 4.5.2 (2025-10-31)
## Platform: x86_64-pc-linux-gnu
## Running under: Linux Mint 21.1
##
## Matrix products: default
## BLAS: /usr/lib/x86_64-linux-gnu/blas/libblas.so.3.10.0
## LAPACK: /usr/lib/x86_64-linux-gnu/lapack/liblapack.so.3.10.0 LAPACK version 3.10.0
##
## locale:
## [1] LC_CTYPE=en_US.UTF-8 LC_NUMERIC=C
## [3] LC_TIME=en_US.UTF-8 LC_COLLATE=en_US.UTF-8
## [5] LC_MONETARY=en_DK.UTF-8 LC_MESSAGES=en_US.UTF-8
## [7] LC_PAPER=en_DK.UTF-8 LC_NAME=C
## [9] LC_ADDRESS=C LC_TELEPHONE=C
## [11] LC_MEASUREMENT=en_DK.UTF-8 LC_IDENTIFICATION=C
##
## time zone: Europe/Brussels
## tzcode source: system (glibc)
##
## attached base packages:
## [1] stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] broom_1.0.11 simex_1.8 lme4_1.1-37
## [4] Matrix_1.7-4 patchwork_1.3.2.9000 fixest_0.13.2
## [7] rnaturalearth_1.1.0 sf_1.0-21 googlesheets4_1.1.2
## [10] kirkegaard_2025-11-19 robustbase_0.99-6 psych_2.5.6
## [13] assertthat_0.2.1 weights_1.1.2 magrittr_2.0.4
## [16] lubridate_1.9.4 forcats_1.0.1 stringr_1.6.0
## [19] dplyr_1.1.4 purrr_1.2.0 readr_2.1.5
## [22] tidyr_1.3.1 tibble_3.3.0 ggplot2_4.0.1.9000
## [25] tidyverse_2.0.0
##
## loaded via a namespace (and not attached):
## [1] RColorBrewer_1.1-3 rstudioapi_0.17.1 jsonlite_2.0.0
## [4] shape_1.4.6.1 datawizard_1.3.0 TH.data_1.1-4
## [7] estimability_1.5.1 jomo_2.7-6 farver_2.1.2
## [10] nloptr_2.2.1 rmarkdown_2.30 fs_1.6.6
## [13] ragg_1.5.0 vctrs_0.6.5 minqa_1.2.8
## [16] askpass_1.2.1 base64enc_0.1-3 htmltools_0.5.8.1
## [19] curl_7.0.0 cellranger_1.1.0 Formula_1.2-5
## [22] mitml_0.4-5 sass_0.4.10 KernSmooth_2.23-26
## [25] bslib_0.9.0 htmlwidgets_1.6.4 plyr_1.8.9
## [28] sandwich_3.1-1 emmeans_1.11.2-8 zoo_1.8-14
## [31] cachem_1.1.0 uuid_1.2-1 lifecycle_1.0.4
## [34] iterators_1.0.14 pkgconfig_2.0.3 R6_2.6.1
## [37] fastmap_1.2.0 rbibutils_2.3 digest_0.6.39
## [40] numDeriv_2016.8-1.1 colorspace_2.1-2 textshaping_1.0.4
## [43] Hmisc_5.2-4 labeling_0.4.3 timechange_0.3.0
## [46] gdata_3.0.1 mgcv_1.9-3 httr_1.4.7
## [49] compiler_4.5.2 gargle_1.6.0 proxy_0.4-27
## [52] fontquiver_0.2.1 withr_3.0.2 htmlTable_2.4.3
## [55] S7_0.2.1 backports_1.5.0 DBI_1.2.3
## [58] pan_1.9 MASS_7.3-65 openssl_2.3.4
## [61] classInt_0.4-11 gtools_3.9.5 tools_4.5.2
## [64] units_1.0-0 foreign_0.8-90 googledrive_2.1.2
## [67] zip_2.3.3 nnet_7.3-20 glue_1.8.0
## [70] rnaturalearthdata_1.0.0 nlme_3.1-168 stringmagic_1.2.0
## [73] grid_4.5.2 checkmate_2.3.3 cluster_2.1.8.1
## [76] generics_0.1.4 gtable_0.3.6 tzdb_0.5.0
## [79] class_7.3-23 data.table_1.17.8 hms_1.1.3
## [82] xml2_1.4.0 foreach_1.5.2 pillar_1.11.1
## [85] splines_4.5.2 lattice_0.22-7 survival_3.8-3
## [88] dreamerr_1.5.0 tidyselect_1.2.1 fontLiberation_0.1.0
## [91] knitr_1.50 fontBitstreamVera_0.1.1 reformulas_0.4.1
## [94] gridExtra_2.3 xfun_0.53 DEoptimR_1.1-4
## [97] stringi_1.8.7 yaml_2.3.10 boot_1.3-32
## [100] evaluate_1.0.5 codetools_0.2-20 officer_0.7.0
## [103] gdtools_0.4.4 cli_3.6.5 rpart_4.1.24
## [106] parameters_0.28.2 xtable_1.8-4 systemfonts_1.3.1
## [109] Rdpack_2.6.4 jquerylib_0.1.4 Rcpp_1.1.0
## [112] readxl_1.4.5 coda_0.19-4.1 parallel_4.5.2
## [115] bayestestR_0.17.0 glmnet_4.1-10 mvtnorm_1.3-3
## [118] scales_1.4.0 e1071_1.7-16 insight_1.4.2
## [121] flextable_0.9.10 rlang_1.1.6.9000 multcomp_1.4-28
## [124] mnormt_2.1.1 mice_3.18.0
#write data to file for reuse
d %>% write_rds("data/data_for_reuse.rds", compress = "xz")
#OSF
if (F) {
library(osfr)
#login
osf_auth(readr::read_lines("~/.config/osf_token"))
#the project we will use
osf_proj = osf_retrieve_node("https://osf.io/XXX/")
#upload all files in project
#overwrite existing (versioning)
osf_upload(
osf_proj,
path = c("data", "figures", "papers", "notebook.Rmd", "notebook.html", "sessions_info.txt"),
conflicts = "overwrite"
)
}