library(kirkegaard)
## Loading required package: tidyverse
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr 1.1.2 ✔ readr 2.1.4
## ✔ forcats 1.0.0 ✔ stringr 1.5.0
## ✔ ggplot2 3.4.2 ✔ tibble 3.2.1
## ✔ lubridate 1.9.2 ✔ tidyr 1.3.0
## ✔ purrr 1.0.1
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag() masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
## Loading required package: magrittr
##
##
## Attaching package: 'magrittr'
##
##
## The following object is masked from 'package:purrr':
##
## set_names
##
##
## The following object is masked from 'package:tidyr':
##
## extract
##
##
## Loading required package: weights
##
## Loading required package: Hmisc
##
##
## Attaching package: 'Hmisc'
##
##
## The following objects are masked from 'package:dplyr':
##
## src, summarize
##
##
## The following objects are masked from 'package:base':
##
## format.pval, units
##
##
## Loading required package: assertthat
##
##
## Attaching package: 'assertthat'
##
##
## The following object is masked from 'package:tibble':
##
## has_name
##
##
## Loading required package: psych
##
##
## Attaching package: 'psych'
##
##
## The following object is masked from 'package:Hmisc':
##
## describe
##
##
## The following objects are masked from 'package:ggplot2':
##
## %+%, alpha
##
##
##
## Attaching package: 'kirkegaard'
##
##
## The following object is masked from 'package:psych':
##
## rescale
##
##
## The following object is masked from 'package:assertthat':
##
## are_equal
##
##
## The following object is masked from 'package:purrr':
##
## is_logical
##
##
## The following object is masked from 'package:base':
##
## +
load_packages(
rms,
mirt,
googlesheets4,
patchwork
)
## Loading required package: stats4
## Loading required package: lattice
theme_set(theme_bw())
options(
digits = 3,
scipen = 999
)
#code likert to numbers
recode_likert_taboo = function(x) {
mapvalues(x, from = c("Not at all taboo", "A little taboo", "Somewhat taboo", "Very taboo", "Extremely taboo"), to = 1:5) %>% as.numeric()
}
#recode nonconsent to NA
recode_nonconsent = function(x) {
mapvalues(x, from = "CONSENT_REVOKED", to = NA, warn_missing = F)
}
#likert
recode_likert = function(x) {
mapvalues(x, from = c("Totally disagree", "Somewhat disagree", "Neutral", "Somewhat agree", "Totally agree"), to = c(1:5)) %>% as.numeric()
}
#prolific
prolific_meta = read_csv("data/prolific_export_62bfea38712944a2d934e957.csv") %>%
df_legalize_names()
## Rows: 528 Columns: 22
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr (14): Submission id, Participant id, Status, Completion code, Fluent la...
## dbl (4): Time taken, Total approvals, Total rejections, Approval rate
## dttm (4): Started at, Completed at, Reviewed at, Archived at
##
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
prolific_data = read_csv("data/20220830035106-SurveyExport.csv") %>%
df_legalize_names()
## New names:
## Rows: 572 Columns: 193
## ── Column specification
## ──────────────────────────────────────────────────────── Delimiter: "," chr
## (62): Status, Language, Referer, SessionID, User Agent, IP Address, Co... dbl
## (125): Response ID, Longitude, Latitude, How important do you think the... lgl
## (4): Contact ID, Legacy Comments, Comments, Tags dttm (2): Time Started, Date
## Submitted
## ℹ Use `spec()` to retrieve the full column specification for this data. ℹ
## Specify the column types or set `show_col_types = FALSE` to quiet this message.
## • `Physics` -> `Physics...32`
## • `History of Science` -> `History of Science...33`
## • `Mathematics` -> `Mathematics...34`
## • `Classics` -> `Classics...35`
## • `Philosophy` -> `Philosophy...36`
## • `Chemistry` -> `Chemistry...37`
## • `Economics` -> `Economics...38`
## • `Linguistics` -> `Linguistics...39`
## • `Statistics` -> `Statistics...40`
## • `Neuroscience` -> `Neuroscience...41`
## • `Engineering` -> `Engineering...42`
## • `Finance` -> `Finance...44`
## • `Psychology` -> `Psychology...45`
## • `Paleontology` -> `Paleontology...46`
## • `Genetics` -> `Genetics...47`
## • `Computer Science` -> `Computer Science...48`
## • `Music` -> `Music...49`
## • `International Relations` -> `International Relations...50`
## • `History` -> `History...51`
## • `Zoology` -> `Zoology...52`
## • `Creative Writing` -> `Creative Writing...53`
## • `Anthropology` -> `Anthropology...54`
## • `Political Science` -> `Political Science...55`
## • `Art history` -> `Art history...56`
## • `Biology` -> `Biology...57`
## • `Drama` -> `Drama...58`
## • `Education` -> `Education...59`
## • `Clinical Psychology` -> `Clinical Psychology...60`
## • `Sociology` -> `Sociology...61`
## • `Nursing` -> `Nursing...62`
## • `Criminology` -> `Criminology...63`
## • `Sports Science` -> `Sports Science...64`
## • `Gender studies` -> `Gender studies...65`
## • `Law` -> `Law...66`
## • `Agriculture, Natural Res. & Conservation` -> `Agriculture, Natural Res. &
## Conservation...67`
## • `Health & Medical Sciences` -> `Health & Medical Sciences...68`
## • `Earth, Atmospheric, & Marine Sciences` -> `Earth, Atmospheric, & Marine
## Sciences...69`
## • `Arts ─ Performance & Studio` -> `Arts ─ Performance & Studio...70`
## • `Foreign Languages & Literatures` -> `Foreign Languages & Literatures...71`
## • `Business Admin & Management` -> `Business Admin & Management...73`
## • `Banking & Finance` -> `Banking & Finance...74`
## • `Architecture & Environmental Design` -> `Architecture & Environmental
## Design...75`
## • `Communications & Journalism` -> `Communications & Journalism...76`
## • `Religion & Theology` -> `Religion & Theology...77`
## • `Library & Archival Sciences` -> `Library & Archival Sciences...78`
## • `Social Work` -> `Social Work...79`
## • `English Literature ` -> `English Literature ...80`
## • `Animal sciences` -> `Animal sciences...81`
## • `Marine biology` -> `Marine biology...82`
## • `Nutrition ` -> `Nutrition ...83`
## • `Environmental sciences` -> `Environmental sciences...84`
## • `Physical therapy` -> `Physical therapy...85`
## • `Public health` -> `Public health...86`
## • `Veterinary medicine` -> `Veterinary medicine...87`
## • `Marketing` -> `Marketing...88`
## • `Physics` -> `Physics...92`
## • `History of Science` -> `History of Science...93`
## • `Mathematics` -> `Mathematics...94`
## • `Classics` -> `Classics...95`
## • `Philosophy` -> `Philosophy...96`
## • `Chemistry` -> `Chemistry...97`
## • `Economics` -> `Economics...98`
## • `Linguistics` -> `Linguistics...99`
## • `Statistics` -> `Statistics...100`
## • `Neuroscience` -> `Neuroscience...101`
## • `Engineering` -> `Engineering...102`
## • `Finance` -> `Finance...103`
## • `Psychology` -> `Psychology...104`
## • `Paleontology` -> `Paleontology...105`
## • `Genetics` -> `Genetics...106`
## • `Computer Science` -> `Computer Science...107`
## • `Music` -> `Music...108`
## • `International Relations` -> `International Relations...109`
## • `History` -> `History...110`
## • `Zoology` -> `Zoology...112`
## • `Creative Writing` -> `Creative Writing...113`
## • `Anthropology` -> `Anthropology...114`
## • `Political Science` -> `Political Science...115`
## • `Art history` -> `Art history...116`
## • `Biology` -> `Biology...117`
## • `Drama` -> `Drama...118`
## • `Education` -> `Education...119`
## • `Clinical Psychology` -> `Clinical Psychology...120`
## • `Sociology` -> `Sociology...121`
## • `Nursing` -> `Nursing...122`
## • `Criminology` -> `Criminology...123`
## • `Sports Science` -> `Sports Science...124`
## • `Gender studies` -> `Gender studies...125`
## • `Law` -> `Law...126`
## • `Agriculture, Natural Res. & Conservation` -> `Agriculture, Natural Res. &
## Conservation...127`
## • `Health & Medical Sciences` -> `Health & Medical Sciences...128`
## • `Earth, Atmospheric, & Marine Sciences` -> `Earth, Atmospheric, & Marine
## Sciences...129`
## • `Arts ─ Performance & Studio` -> `Arts ─ Performance & Studio...130`
## • `Foreign Languages & Literatures` -> `Foreign Languages & Literatures...131`
## • `Business Admin & Management` -> `Business Admin & Management...133`
## • `Banking & Finance` -> `Banking & Finance...134`
## • `Architecture & Environmental Design` -> `Architecture & Environmental
## Design...135`
## • `Communications & Journalism` -> `Communications & Journalism...136`
## • `Religion & Theology` -> `Religion & Theology...137`
## • `Library & Archival Sciences` -> `Library & Archival Sciences...138`
## • `Social Work` -> `Social Work...139`
## • `English Literature ` -> `English Literature ...140`
## • `Animal sciences` -> `Animal sciences...141`
## • `Marine biology` -> `Marine biology...142`
## • `Nutrition ` -> `Nutrition ...143`
## • `Environmental sciences` -> `Environmental sciences...144`
## • `Physical therapy` -> `Physical therapy...145`
## • `Public health` -> `Public health...146`
## • `Veterinary medicine` -> `Veterinary medicine...147`
## • `Marketing` -> `Marketing...148`
#fields data
# gs4_auth("the.dfx@gmail.com")
gs4_deauth()
fields_sheet = read_sheet("https://docs.google.com/spreadsheets/d/18ViBhE31cH6KnkJvIHRdG0RB9FTNp0MuYQMwA-Uzzk4/edit#gid=1822179028", sheet = "clean_data")
## ✔ Reading from "Scientific fields study".
## ✔ Range ''clean_data''.
#join to overlap (we get rid of test data this way too)
d = left_join(
prolific_meta,
prolific_data %>% filter(Status == "Complete"),
by = c("Participant_id" = "Please_enter_your_Prolific_ID")
)
#no dups
assert_that(!anyDuplicated(d$Participant_id))
## [1] TRUE
#fields metadata
d %<>% mutate(
science_one_way_knowing = Science_is_just_one_way_of_knowing_no_more_valid_or_accurate_than_other_approaches_to_knowledge %>% recode_nonconsent() %>% recode_likert()
)
attention_check_vars = c("Set_the_slider_to_not_scientific",
"Set_the_slider_in_the_middle")
d %<>% mutate(
attention_check_1 = !Set_the_slider_to_not_scientific <= 10,
attention_check_2 = !is_between(Set_the_slider_in_the_middle, a = 40, b = 60),
attention_check_fails = attention_check_1 + attention_check_2
)
#remove fails
d_all = d
d = filter(d, attention_check_fails == 0)
#distribution
d_all$attention_check_fails %>% table2()
d_all %>% select(contains("attention_check")) %>% describe2()
#ids of fails
# d_all %>% filter(attention_check_fails > 0) %>% select(Participant_id, Set_the_slider_to_not_scientific, Set_the_slider_in_the_middle, attention_check_1, attention_check_2, attention_check_fails) %>% View("Rejected IDs")
d$race = d$Ethnicity_simplified %>% fct_relevel("White") %>% recode_nonconsent()
d$race %>% table2()
d$Age %<>% as.numeric()
d$Age %>% describe2()
d$Sex %<>% recode_nonconsent()
d$Sex %>% table2()
d$vote_US_2020 = d$Who_did_you_vote_for_in_the_last_presidential_election_2020 %>% recode_nonconsent() %>% fct_relevel("Democrats (Biden)")
d$vote_US_2020 %>% table2()
#variable table
d_table = df_var_table(d)
fields_science_ratings = d %>% select(Physics_32:Engineering_42, Finance_44:Marketing_88)
fields_politics_ratings = d %>% select(Physics_92:History_110, Zoology_112:Marketing_148)
#make data
fields = tibble(
scientificness = fields_science_ratings %>% colMeans(),
politics = fields_politics_ratings %>% colMeans() %>% divide_by(100),
field = scientificness %>% names() %>% str_replace("\\d+", "") %>% str_clean() %>% str_trim() %>% str_to_title(),
Republican_ratio = fields_sheet$`Voting D:R ratio`,
Republican_vote = 1/Republican_ratio,
GRE_verbal = fields_sheet$`Mean Verbal`,
GRE_verbal_z = standardize(GRE_verbal),
GRE_math = fields_sheet$`Mean Quant`,
GRE_math_z = standardize(GRE_math)
) %>% mutate(
GRE_total_z = select(., GRE_verbal, GRE_math) %>% df_standardize() %>% rowMeans() %>% standardize(),
GRE_math_tilt = GRE_math_z - GRE_verbal_z
)
#scores by moderators
#political placement left-right
tmp = score_by(fields_politics_ratings, d$Relative_to_the_Democratic_party_and_the_Republican_party_where_would_you_put_yourself)
fields$politics_repub_placement = tmp[2, -1] %>% as.numeric() %>% divide_by(100)
fields$politics_dem_placement = tmp[1, -1] %>% as.numeric() %>% divide_by(100)
tmp = score_by(fields_science_ratings, d$Relative_to_the_Democratic_party_and_the_Republican_party_where_would_you_put_yourself)
fields$scientificness_repub_placement = tmp[2, -1] %>% as.numeric()
fields$scientificness_dem_placement = tmp[1, -1] %>% as.numeric()
#voting
tmp = score_by(fields_politics_ratings, d$Who_did_you_vote_for_in_the_last_presidential_election_2020)
fields$politics_repub_vote = tmp[5, -1] %>% as.numeric() %>% divide_by(100)
fields$politics_dem_vote = tmp[1, -1] %>% as.numeric() %>% divide_by(100)
tmp = score_by(fields_science_ratings, d$Who_did_you_vote_for_in_the_last_presidential_election_2020)
fields$scientificness_repub_vote = tmp[5, -1] %>% as.numeric()
fields$scientificness_dem_vote = tmp[1, -1] %>% as.numeric()
#engineering is an outlier
fields_no_engi = fields %>% filter(field != "Engineering")
#main fields variables
fields_vars = c("Republican_vote", "GRE_total_z", "GRE_math_tilt", "politics_repub_vote", "politics_dem_vote", "scientificness_repub_vote", "scientificness_dem_vote")
#correlations
combine_upperlower(
.upper.tri = fields %>% select(!!fields_vars) %>% cor_matrix(p_val = T, asterisks_only = F),
.lower.tri = fields_no_engi %>% select(!!fields_vars) %>% cor_matrix(p_val = T, asterisks_only = F)
)
## Republican_vote GRE_total_z GRE_math_tilt
## Republican_vote NA "0.31 [0.148]" "0.65 [<0.001***]"
## GRE_total_z "0.60 [0.003**]" NA "0.00 [1]"
## GRE_math_tilt "0.75 [<0.001***]" "-0.02 [0.895]" NA
## politics_repub_vote "0.69 [<0.001***]" "0.26 [0.059]" "0.56 [<0.001***]"
## politics_dem_vote "0.49 [0.022]" "0.23 [0.092]" "0.44 [<0.001***]"
## scientificness_repub_vote "0.49 [0.021]" "-0.02 [0.874]" "0.34 [0.011]"
## scientificness_dem_vote "0.49 [0.022]" "-0.09 [0.499]" "0.35 [0.008*]"
## politics_repub_vote politics_dem_vote
## Republican_vote "0.64 [0.001**]" "0.45 [0.031]"
## GRE_total_z "0.26 [0.048]" "0.24 [0.079]"
## GRE_math_tilt "0.58 [<0.001***]" "0.46 [<0.001***]"
## politics_repub_vote NA "0.91 [<0.001***]"
## politics_dem_vote "0.91 [<0.001***]" NA
## scientificness_repub_vote "0.26 [0.06]" "-0.09 [0.509]"
## scientificness_dem_vote "0.20 [0.148]" "-0.14 [0.321]"
## scientificness_repub_vote scientificness_dem_vote
## Republican_vote "0.41 [0.055]" "0.40 [0.055]"
## GRE_total_z "-0.01 [0.929]" "-0.08 [0.543]"
## GRE_math_tilt "0.36 [0.006*]" "0.37 [0.005**]"
## politics_repub_vote "0.28 [0.04]" "0.22 [0.105]"
## politics_dem_vote "-0.07 [0.599]" "-0.12 [0.39]"
## scientificness_repub_vote NA "0.98 [<0.001***]"
## scientificness_dem_vote "0.98 [<0.001***]" NA
# Plots
GG_scatter(fields, "politics", "scientificness", case_names = "field") +
geom_smooth()
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using method = 'loess' and formula = 'y ~ x'
GG_save("figs/perceived politics vs. scientificness.png")
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using method = 'loess' and formula = 'y ~ x'
#votes vs. extrapolated
GG_scatter(fields, "politics_repub_vote", "politics_repub_placement", case_names = "field", repel_names = T) +
geom_abline(slope = 1, linetype = "dotted") +
geom_smooth()
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using method = 'loess' and formula = 'y ~ x'
## Warning: ggrepel: 25 unlabeled data points (too many overlaps). Consider
## increasing max.overlaps
#between the political division
GG_scatter(fields, "politics_repub_vote", "politics_dem_vote", case_names = "field", repel_names = T) +
geom_abline(slope = 1, linetype = "dotted") +
geom_smooth()
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using method = 'loess' and formula = 'y ~ x'
## Warning: ggrepel: 19 unlabeled data points (too many overlaps). Consider
## increasing max.overlaps
GG_save("figs/perceive politics by voting.png")
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using method = 'loess' and formula = 'y ~ x'
## Warning: ggrepel: 10 unlabeled data points (too many overlaps). Consider
## increasing max.overlaps
#between the political division
GG_scatter(fields, "scientificness_repub_vote", "scientificness_dem_vote", case_names = "field") +
geom_abline(slope = 1, linetype = "dotted") +
geom_smooth()
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using method = 'loess' and formula = 'y ~ x'
GG_save("figs/scientificness by voting.png")
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using method = 'loess' and formula = 'y ~ x'
R1 = GG_scatter(fields, "politics_repub_vote", "scientificness_repub_vote", case_names = "field") +
geom_smooth() +
scale_x_continuous("% of professors who vote Republican\nestimated by Republican voters", labels = scales::percent) +
scale_y_continuous("Scientificness of field\nestimated by Republican voters")
D1 = GG_scatter(fields, "politics_dem_vote", "scientificness_dem_vote", case_names = "field") +
scale_x_continuous("% of professors who vote Republican\nestimated by Democrat voters", labels = scales::percent) +
scale_y_continuous("Scientificness of field\nestimated by Democrat voters") +
geom_smooth()
R1 + D1
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using method = 'loess' and formula = 'y ~ x'
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using method = 'loess' and formula = 'y ~ x'
GG_save("figs/centrism and science by party.png")
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using method = 'loess' and formula = 'y ~ x'
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using method = 'loess' and formula = 'y ~ x'
#stereotype accuracy
GG_scatter(fields, "politics", "Republican_vote", case_names = "field", repel_names = T) +
geom_abline(slope = 1, linetype = "dotted") +
geom_smooth() +
scale_x_continuous(labels = scales::percent) +
scale_y_continuous(labels = scales::percent)
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using method = 'loess' and formula = 'y ~ x'
## Warning: ggrepel: 6 unlabeled data points (too many overlaps). Consider
## increasing max.overlaps
GG_scatter(fields_no_engi, "politics", "Republican_vote", case_names = "field", repel_names = T) +
geom_abline(slope = 1, linetype = "dotted") +
geom_smooth() +
scale_x_continuous(labels = scales::percent) +
scale_y_continuous(labels = scales::percent)
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using method = 'loess' and formula = 'y ~ x'
## Warning: ggrepel: 1 unlabeled data points (too many overlaps). Consider
## increasing max.overlaps
GG_scatter(fields_no_engi, "politics_dem_vote", "Republican_vote", case_names = "field", repel_names = T) +
geom_abline(slope = 1, linetype = "dotted") +
geom_smooth() +
scale_x_continuous(labels = scales::percent) +
scale_y_continuous(labels = scales::percent)
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using method = 'loess' and formula = 'y ~ x'
GG_scatter(fields_no_engi, "politics_repub_vote", "Republican_vote", case_names = "field", repel_names = T) +
geom_abline(slope = 1, linetype = "dotted") +
geom_smooth() +
scale_x_continuous(labels = scales::percent) +
scale_y_continuous(labels = scales::percent)
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using method = 'loess' and formula = 'y ~ x'
write_sessioninfo()
## R version 4.3.2 (2023-10-31)
## Platform: x86_64-pc-linux-gnu (64-bit)
## Running under: Linux Mint 21.1
##
## Matrix products: default
## BLAS: /usr/lib/x86_64-linux-gnu/blas/libblas.so.3.10.0
## LAPACK: /usr/lib/x86_64-linux-gnu/lapack/liblapack.so.3.10.0
##
## locale:
## [1] LC_CTYPE=en_DK.UTF-8 LC_NUMERIC=C
## [3] LC_TIME=en_DK.UTF-8 LC_COLLATE=en_DK.UTF-8
## [5] LC_MONETARY=en_DK.UTF-8 LC_MESSAGES=en_DK.UTF-8
## [7] LC_PAPER=en_DK.UTF-8 LC_NAME=C
## [9] LC_ADDRESS=C LC_TELEPHONE=C
## [11] LC_MEASUREMENT=en_DK.UTF-8 LC_IDENTIFICATION=C
##
## time zone: Europe/Berlin
## tzcode source: system (glibc)
##
## attached base packages:
## [1] stats4 stats graphics grDevices utils datasets methods
## [8] base
##
## other attached packages:
## [1] patchwork_1.1.2 googlesheets4_1.1.1 mirt_1.39
## [4] lattice_0.22-5 rms_6.7-0 kirkegaard_2023-04-30
## [7] psych_2.3.3 assertthat_0.2.1 weights_1.0.4
## [10] Hmisc_5.1-0 magrittr_2.0.3 lubridate_1.9.2
## [13] forcats_1.0.0 stringr_1.5.0 dplyr_1.1.2
## [16] purrr_1.0.1 readr_2.1.4 tidyr_1.3.0
## [19] tibble_3.2.1 ggplot2_3.4.2 tidyverse_2.0.0
##
## loaded via a namespace (and not attached):
## [1] mnormt_2.1.1 pbapply_1.7-0 gridExtra_2.3
## [4] permute_0.9-7 sandwich_3.0-2 rlang_1.1.1
## [7] multcomp_1.4-24 polspline_1.1.22 compiler_4.3.2
## [10] mgcv_1.9-1 gdata_2.19.0 systemfonts_1.0.4
## [13] vctrs_0.6.3 quantreg_5.95 crayon_1.5.2
## [16] pkgconfig_2.0.3 shape_1.4.6 fastmap_1.1.1
## [19] backports_1.4.1 labeling_0.4.2 utf8_1.2.3
## [22] rmarkdown_2.22 tzdb_0.4.0 nloptr_2.0.3
## [25] ragg_1.2.5 bit_4.0.5 MatrixModels_0.5-1
## [28] xfun_0.39 glmnet_4.1-7 jomo_2.7-6
## [31] cachem_1.0.8 jsonlite_1.8.5 highr_0.10
## [34] pan_1.6 Deriv_4.1.3 broom_1.0.5
## [37] parallel_4.3.2 cluster_2.1.6 R6_2.5.1
## [40] bslib_0.5.0 stringi_1.7.12 boot_1.3-28
## [43] rpart_4.1.23 cellranger_1.1.0 jquerylib_0.1.4
## [46] Rcpp_1.0.10 iterators_1.0.14 knitr_1.43
## [49] zoo_1.8-12 base64enc_0.1-3 Matrix_1.6-3
## [52] splines_4.3.2 nnet_7.3-19 timechange_0.2.0
## [55] tidyselect_1.2.0 rstudioapi_0.14 yaml_2.3.7
## [58] vegan_2.6-4 codetools_0.2-19 dcurver_0.9.2
## [61] curl_5.2.0 withr_2.5.0 evaluate_0.21
## [64] foreign_0.8-86 survival_3.5-7 pillar_1.9.0
## [67] mice_3.16.0 checkmate_2.2.0 foreach_1.5.2
## [70] generics_0.1.3 vroom_1.6.3 hms_1.1.3
## [73] munsell_0.5.0 scales_1.2.1 minqa_1.2.5
## [76] gtools_3.9.4 glue_1.6.2 tools_4.3.2
## [79] data.table_1.14.8 lme4_1.1-33 SparseM_1.81
## [82] fs_1.6.2 mvtnorm_1.2-2 grid_4.3.2
## [85] colorspace_2.1-0 nlme_3.1-163 googledrive_2.1.1
## [88] htmlTable_2.4.1 Formula_1.2-5 cli_3.6.1
## [91] textshaping_0.3.6 fansi_1.0.4 gargle_1.5.1
## [94] gtable_0.3.3 sass_0.4.6 digest_0.6.31
## [97] ggrepel_0.9.3 GPArotation_2023.3-1 TH.data_1.1-2
## [100] farver_2.1.1 htmlwidgets_1.6.2 htmltools_0.5.5
## [103] lifecycle_1.0.3 httr_1.4.6 mitml_0.4-5
## [106] bit64_4.0.5 MASS_7.3-60
fields %>% write_csv("data/fields.csv")