rawdata = read_sav("data/AccessDB_T0T4Merge_7.12.19.1.sav")
names(rawdata)[1] = "SubjectID"
D <- rawdata
###check the number of participants at each timepoint
D <- D %>%
filter(T0SY1Y2C_T1C == 1 |T0SY1Y2C_T2C == 1 | T0SY1Y2C_T3C == 1 | T0SY1Y2C_T4C == 1) ### participants attend at least 1 timepionts
a <- nrow(D)
focal.var = c("T1AWMADMSS_N","T2AWMADMSS_N","T3AWMADMSS_N","T4AWMADMSS_N",
"T1AWMADMRS_N","T2AWMADMRS_N","T3AWMADMRS_N","T4AWMADMRS_N",
"T1GMPA", "T2GMPA", "T3GMPA", "T4GMPA",
"T1GPPA", "T2GPPA", "T3GPPA", "T4GPPA",
"T1PRWMPAE", "T2PRWMPAE", "T3PRWMPAE", "T4PRWMPAE",
"T1PMPA", "T2PMPA", "T3PMPA", "T4PMPA",
"T1NL10W_PAE","T2NL10W_PAE","T3NL10W_PAE","T4NL10W_PAE",
"T1NL100W_PAE", "T1NL1000W_PAE", "T2NL100W_PAE", "T2NL1000W_PAE",
"T3NL100W_PAE", "T3NL1000W_PAE", "T4NL100W_PAE", "T4NL1000W_PAE",
"T1PKC_PA", "T2PKC_PA", "T3PKC_PA", "T4PKC_PA",
"T1WJCW", "T2WJCW", "T3WJCW", "T4WJCW",
"T1WJCRS", "T2WJCRS", "T3WJCRS", "T4WJCRS",
"T1ASWmac", "T2ASWmac", "T3ASWmac", "T4ASWmac",
"T1LWIDWS", "T2LWIDWS", "T3LWIDWS", "T4LWIDWS")
D = D[rowSums(is.na(D[,focal.var])) != length(focal.var),]
D <- D %>%
dplyr::select("SubjectID", "T0SY1Y2G_Y1Grade", "T0SY1Y2G_Y2Grade",
"T0SY1Y2G_StartGrade", "T0SGENGender", "T0PDEMOMaxParentEducation",
"T0PDEMOIncomeCode", "T0PDEMOIncomeMid","T0PDEMORaceHispanic",
"T1AWMADMSS_N", "T2AWMADMSS_N", "T3AWMADMSS_N", "T4AWMADMSS_N",
"T1AWMADMRS_N","T2AWMADMRS_N","T3AWMADMRS_N","T4AWMADMRS_N",
"T1GMPA", "T2GMPA", "T3GMPA", "T4GMPA",
"T1GPPA", "T2GPPA", "T3GPPA", "T4GPPA",
"T1PRWMPAE", "T2PRWMPAE", "T3PRWMPAE", "T4PRWMPAE",
"T1PMPA", "T2PMPA", "T3PMPA", "T4PMPA",
"T1NL10W_PAE","T2NL10W_PAE","T3NL10W_PAE","T4NL10W_PAE",
"T1NL100W_PAE", "T1NL1000W_PAE", "T2NL100W_PAE", "T2NL1000W_PAE",
"T3NL100W_PAE", "T3NL1000W_PAE", "T4NL100W_PAE", "T4NL1000W_PAE",
"T1PKC_PA", "T2PKC_PA", "T3PKC_PA", "T4PKC_PA",
"T1WJCW", "T2WJCW", "T3WJCW", "T4WJCW",
"T1WJCRS", "T2WJCRS", "T3WJCRS", "T4WJCRS",
"T1ASWmac", "T2ASWmac", "T3ASWmac", "T4ASWmac",
"T1LWIDWS", "T2LWIDWS", "T3LWIDWS", "T4LWIDWS")
D[D>=(-999) & D<=(-50)] <- NA
##drop participants who repeated a grade
D1_dummy = D[c("SubjectID", "T0SY1Y2G_Y1Grade", "T0SY1Y2G_Y2Grade",
"T0SY1Y2G_StartGrade")]
D1_dummy$repeated = ifelse(D1_dummy$T0SY1Y2G_Y1Grade == D1_dummy$T0SY1Y2G_Y2Grade, 1,0)
repeatgrade = D1_dummy[which(D1_dummy$repeated==1),"SubjectID"]
D1 = D[which(D$SubjectID %!in% repeatgrade$SubjectID),] ##exclude 3 children who repeat grade
table(D1$T0SY1Y2G_StartGrade)
##
## -1 0 1 2 3
## 103 149 131 130 99
D1$GMPA_na = rowSums(is.na(D1[c("T1GMPA","T2GMPA","T3GMPA","T4GMPA")]))
D1$GPPA_na = rowSums(is.na(D1[c("T1GPPA","T2GPPA","T3GPPA","T4GPPA")]))
D1$AWMADMSS_na = rowSums(is.na(D1[c("T1AWMADMSS_N","T2AWMADMSS_N","T3AWMADMSS_N","T4AWMADMSS_N")]))
D1$PRWM_na = rowSums(is.na(D1[c("T1PRWMPAE","T2PRWMPAE","T3PRWMPAE","T4PRWMPAE")]))
D1$PKC_na = rowSums(is.na(D1[c("T1PKC_PA", "T2PKC_PA", "T3PKC_PA", "T4PKC_PA")]))
D1$WJCW_na = rowSums(is.na(D1[c("T1WJCW", "T2WJCW", "T3WJCW", "T4WJCW")]))
D1$ASW_na = rowSums(is.na(D1[c("T1ASWmac", "T2ASWmac", "T3ASWmac", "T4ASWmac")]))
D1$PMPA_na = rowSums(is.na(D1[c("T1PMPA", "T2PMPA", "T3PMPA", "T4PMPA")]))
D1$LWID_na = rowSums(is.na(D1[c("T1LWIDWS", "T2LWIDWS", "T3LWIDWS", "T4LWIDWS")]))
participant_summary = D1[c("SubjectID","T0SY1Y2G_Y1Grade","T0SY1Y2G_StartGrade","T0SY1Y2G_Y2Grade","GMPA_na","GPPA_na","AWMADMSS_na","PRWM_na","PKC_na","WJCW_na","ASW_na","PMPA_na", "LWID_na")]
table(participant_summary$T0SY1Y2G_StartGrade, participant_summary$T0SY1Y2G_Y1Grade)
##
## -1 0 1 2 3
## -1 88 0 0 0 0
## 0 0 143 0 0 0
## 1 0 0 126 0 0
## 2 0 0 0 116 0
## 3 0 0 0 0 95
## Mental rotation
participant_summary$mr = participant_summary$GMPA_na + participant_summary$GPPA_na
## Exact calculation
participant_summary$ec = participant_summary$PKC_na + participant_summary$WJCW_na
nrow(participant_summary)
## [1] 614
###exclude participants who missed more than two time points of the focal variables
participant_summary_2 <- participant_summary
participant_summary_2 = subset(participant_summary, AWMADMSS_na <3)
participant_summary_2 = subset(participant_summary_2, mr <7)
participant_summary_2 = subset(participant_summary_2, PMPA_na <3)
#participant_summary_2 = subset(participant_summary_2, mt <7)
participant_summary_2 = subset(participant_summary_2, ec <7)
participant_summary_2 = subset(participant_summary_2, ASW_na <3)
participant_summary_2 = subset(participant_summary_2, LWID_na <3)
participant_summary_2$SubjectID = as.factor(as.character(participant_summary_2$SubjectID))
###agg_spatial_proportional_data_nona_importantcols_names_alltimes_no20_noshortrt_spread is from "sf_recalculatepae.Rmd"
spatial_proportional_data_nona_importantcols_names_alltimes_no20_noshortrt<-read_csv("data/sf_proportionalreasoning_nltask_cleaned.csv")
names(spatial_proportional_data_nona_importantcols_names_alltimes_no20_noshortrt)[2] = "SubjectID"
spatial_proportional_data_nona_importantcols_names_alltimes_no20_noshortrt$SubjectID = as.factor(as.character(spatial_proportional_data_nona_importantcols_names_alltimes_no20_noshortrt$SubjectID))
participant_summary_2 = participant_summary_2 %>%
left_join(spatial_proportional_data_nona_importantcols_names_alltimes_no20_noshortrt[c("SubjectID","PRWMPAE_na")],"SubjectID")
participant_summary_2 = subset(participant_summary_2, PRWMPAE_na < 3)
nrow(participant_summary_2)
## [1] 421
#This is the amended proportional reasoning data (PAE was recalculated)
spatial_profiles_database = D1
spatial_profiles_database$SubjectID = as.factor(as.character(spatial_profiles_database$SubjectID))
spatial_profiles_database_atleast2tp = spatial_profiles_database[which(spatial_profiles_database$SubjectID %in% participant_summary_2$SubjectID),]
#This step adds proportional reasoning database
spatial_profiles_database_atleast2tp = spatial_profiles_database_atleast2tp %>%
left_join(spatial_proportional_data_nona_importantcols_names_alltimes_no20_noshortrt[c(2:6)], by = "SubjectID")
spatial_profiles_database_atleast2tp$T1PRWMPAE.asin = 2*asin(sqrt(spatial_profiles_database_atleast2tp$t1))
spatial_profiles_database_atleast2tp$T2PRWMPAE.asin = 2*asin(sqrt(spatial_profiles_database_atleast2tp$t2))
spatial_profiles_database_atleast2tp$T3PRWMPAE.asin = 2*asin(sqrt(spatial_profiles_database_atleast2tp$t3))
spatial_profiles_database_atleast2tp$T4PRWMPAE.asin = 2*asin(sqrt(spatial_profiles_database_atleast2tp$t4))
nrow(spatial_profiles_database_atleast2tp)
## [1] 421
var_zy1 = c("T1AWMADMSS_N","T2AWMADMSS_N",
"T1GMPA", "T2GMPA",
"T1GPPA", "T2GPPA",
"t1", "t2",
"T1PRWMPAE.asin", "T2PRWMPAE.asin",
"T1PMPA","T2PMPA",
"T1WJCW", "T2WJCW",
"T1PKC_PA", "T2PKC_PA",
"T1ASWmac", "T2ASWmac",
"T1LWIDWS", "T2LWIDWS")
var_zy2 = c("T3AWMADMSS_N","T4AWMADMSS_N",
"T3GPPA", "T4GPPA",
"T3GMPA", "T4GMPA",
"t3", "t4",
"T3PRWMPAE.asin", "T4PRWMPAE.asin",
"T3PMPA", "T4PMPA",
"T3WJCW", "T4WJCW",
"T3PKC_PA", "T4PKC_PA",
"T3ASWmac", "T4ASWmac",
"T3LWIDWS", "T4LWIDWS")
for (v in var_zy1) {
vs <- str_c(v, ".s")
spatial_profiles_database_atleast2tp[[vs]] <- ave(spatial_profiles_database_atleast2tp[[v]], spatial_profiles_database_atleast2tp$T0SY1Y2G_StartGrade, FUN=scale)
}
for (v in var_zy2) {
vs <- str_c(v, ".s")
spatial_profiles_database_atleast2tp[[vs]] <- ave(spatial_profiles_database_atleast2tp[[v]], spatial_profiles_database_atleast2tp$T0SY1Y2G_Y2Grade, FUN=scale)
}
# reverse code NL & PM scores
spatial_profiles_database_atleast2tp$T1PRWMPAE.asinR = -spatial_profiles_database_atleast2tp$T1PRWMPAE.asin
spatial_profiles_database_atleast2tp$T2PRWMPAE.asinR = -spatial_profiles_database_atleast2tp$T2PRWMPAE.asin
spatial_profiles_database_atleast2tp$T3PRWMPAE.asinR = -spatial_profiles_database_atleast2tp$T3PRWMPAE.asin
spatial_profiles_database_atleast2tp$T4PRWMPAE.asinR = -spatial_profiles_database_atleast2tp$T4PRWMPAE.asin
spatial_profiles_database_atleast2tp$T1PRWMPAE.asin.sR = - spatial_profiles_database_atleast2tp$T1PRWMPAE.asin.s
spatial_profiles_database_atleast2tp$T2PRWMPAE.asin.sR = - spatial_profiles_database_atleast2tp$T2PRWMPAE.asin.s
spatial_profiles_database_atleast2tp$T3PRWMPAE.asin.sR = - spatial_profiles_database_atleast2tp$T3PRWMPAE.asin.s
spatial_profiles_database_atleast2tp$T4PRWMPAE.asin.sR = - spatial_profiles_database_atleast2tp$T4PRWMPAE.asin.s
spatial_profiles_database_atleast2tp$T1MR.s = ifelse(spatial_profiles_database_atleast2tp$T0SY1Y2G_StartGrade <1, spatial_profiles_database_atleast2tp$T1GPPA.s,spatial_profiles_database_atleast2tp$T1GMPA.s)
spatial_profiles_database_atleast2tp$T2MR.s = ifelse(spatial_profiles_database_atleast2tp$T0SY1Y2G_StartGrade <1, spatial_profiles_database_atleast2tp$T2GPPA.s,spatial_profiles_database_atleast2tp$T2GMPA.s)
spatial_profiles_database_atleast2tp$T3MR.s = ifelse(spatial_profiles_database_atleast2tp$T0SY1Y2G_Y2Grade <1, spatial_profiles_database_atleast2tp$T3GPPA.s,spatial_profiles_database_atleast2tp$T3GMPA.s)
spatial_profiles_database_atleast2tp$T4MR.s = ifelse(spatial_profiles_database_atleast2tp$T0SY1Y2G_Y2Grade <1, spatial_profiles_database_atleast2tp$T4GPPA.s,spatial_profiles_database_atleast2tp$T4GMPA.s)
# spatial_profiles_database_atleast2tp$T1MT.s = ifelse(spatial_profiles_database_atleast2tp$T0SY1Y2G_StartGrade <1, spatial_profiles_database_atleast2tp$T1CMTTPA.s,spatial_profiles_database_atleast2tp$T1THPA.s)
#
# spatial_profiles_database_atleast2tp$T2MT.s = ifelse(spatial_profiles_database_atleast2tp$T0SY1Y2G_StartGrade <1, spatial_profiles_database_atleast2tp$T2CMTTPA.s,spatial_profiles_database_atleast2tp$T2THPA.s)
#
# spatial_profiles_database_atleast2tp$T3MT.s = ifelse(spatial_profiles_database_atleast2tp$T0SY1Y2G_Y2Grade <1, spatial_profiles_database_atleast2tp$T3CMTTPA.s,spatial_profiles_database_atleast2tp$T3THPA.s)
#
# spatial_profiles_database_atleast2tp$T4MT.s = ifelse(spatial_profiles_database_atleast2tp$T0SY1Y2G_Y2Grade <1, spatial_profiles_database_atleast2tp$T4CMTTPA.s,spatial_profiles_database_atleast2tp$T4THPA.s)
#Exact Calculations
spatial_profiles_database_atleast2tp$T1EC.s = ifelse(spatial_profiles_database_atleast2tp$T0SY1Y2G_StartGrade <1, spatial_profiles_database_atleast2tp$T1PKC_PA.s,spatial_profiles_database_atleast2tp$T1WJCW.s)
spatial_profiles_database_atleast2tp$T2EC.s = ifelse(spatial_profiles_database_atleast2tp$T0SY1Y2G_StartGrade <1, spatial_profiles_database_atleast2tp$T2PKC_PA.s,spatial_profiles_database_atleast2tp$T2WJCW.s)
spatial_profiles_database_atleast2tp$T3EC.s = ifelse(spatial_profiles_database_atleast2tp$T0SY1Y2G_Y2Grade <1, spatial_profiles_database_atleast2tp$T3PKC_PA.s,spatial_profiles_database_atleast2tp$T3WJCW.s)
spatial_profiles_database_atleast2tp$T4EC.s = ifelse(spatial_profiles_database_atleast2tp$T0SY1Y2G_Y2Grade <1, spatial_profiles_database_atleast2tp$T4PKC_PA.s,spatial_profiles_database_atleast2tp$T4WJCW.s)
nrow(spatial_profiles_database_atleast2tp)
## [1] 421
# write.csv(spatial_profiles_database_atleast2tp, "data/spatial_profiles_database_atleast2tp_062424_n421.csv")
#write.csv(spatial_profiles_database_atleast2tp, "data/spatial_profiles_database_atleast2tp_080524_n421.csv")
#table(spatial_profiles_database_atleast2tp$T0SY1Y2G_Y2Grade, spatial_profiles_database_atleast2tp$T0SY1Y2G_StartGrade)
D_info = read_sav("data/AccessDB_T0T4Merge_7.12.19.1.sav")
names(D_info)[1] = "SubjectID"
D_info <- D_info %>%
dplyr::select("SubjectID", "T0SY1Y2G_StartGrade", "T0SGENGender", "T0PDEMOMaxParentEducation", "T0PDEMOIncomeCode", "T0PDEMOIncomeMid", "T0PDEMORaceHispanic")
D_info = D_info[which(D_info$SubjectID %in% participant_summary_2$SubjectID),]
TAll_complete_info = D_info
nrow(TAll_complete_info)
## [1] 421
table(TAll_complete_info$T0SGENGender) #1=boys, 2=girls
##
## 1 2
## 230 190
table(TAll_complete_info$T0SY1Y2G_StartGrade)
##
## -1 0 1 2 3
## 63 105 90 96 67
table(TAll_complete_info$T0PDEMORaceHispanic)
##
## 1 2 3 4 5 6 7 10
## 1 13 185 1 82 1 39 53
#1 = American Indian/Alaskan Native
#2 = Asian/ Asian American,
#3 = Black/ African American,
#4 = Native Hawaiian/ Other Pacific Islander,
#5 = White,
#6 = Other,
#7 = Multirace,
#10 = Hispanic# ##
#frequency of race
race <- TAll_complete_info %>%
dplyr::group_by(T0PDEMORaceHispanic)%>%
dplyr::summarise(cnt = n())%>%
dplyr::mutate(freq = round (cnt/sum(cnt),3))%>%
arrange(desc(freq))
race
## # A tibble: 9 × 3
## T0PDEMORaceHispanic cnt freq
## <dbl+lbl> <int> <dbl>
## 1 3 [Black/ African American] 185 0.439
## 2 5 [White] 82 0.195
## 3 10 [Hispanic] 53 0.126
## 4 NA 46 0.109
## 5 7 [Multirace] 39 0.093
## 6 2 [Asian/ Asian American] 13 0.031
## 7 1 [American Indian/Alaskan Native] 1 0.002
## 8 4 [Native Hawaiian/ Other Pacific Islander] 1 0.002
## 9 6 [Other] 1 0.002
###parent educational level###
table(TAll_complete_info$T0PDEMOMaxParentEducation)
##
## 10 12 13 14 16 17 18
## 17 53 65 57 64 41 80
psych::describe(TAll_complete_info$T0PDEMOMaxParentEducation)
## vars n mean sd median trimmed mad min max range skew kurtosis se
## X1 1 377 14.88 2.4 14 14.96 2.97 10 18 8 -0.16 -1.18 0.12
###family income###
table(TAll_complete_info$T0PDEMOIncomeMid)
##
## 7.5 25 42.5 62.5 87.5 100
## 49 90 61 58 33 66
des <- psych::describe(TAll_complete_info$T0PDEMOIncomeMid)
print(des,digits=3) #show the output, but round to 3 (trailing) digits
## vars n mean sd median trimmed mad min max range skew kurtosis se
## X1 1 357 51.32 32.15 42.5 50.73 29.65 7.5 100 92.5 0.291 -1.29 1.702
#spatial_profiles_database_atleast2tp <- read_csv("data/spatial_profiles_database_atleast2tp_062424_n421.csv")
spatial_profiles_database_atleast2tp <- read_csv("data/spatial_profiles_database_atleast2tp_080524_n421.csv")
nrow(spatial_profiles_database_atleast2tp)
## [1] 421
T1 <- spatial_profiles_database_atleast2tp %>%
dplyr::select("SubjectID","T0SY1Y2G_StartGrade",
"T1AWMADMSS_N.s",
"T1MR.s",
"T1PRWMPAE.asin.sR",
"T1PMPA.s","T1EC.s","T1ASWmac.s", "T1LWIDWS.s")
T1 = as.data.frame(T1)
hist(T1[c(3:7)], pch = 19)
ggpairs(T1[c(3:7)], pch = 19)
ggpairs(T1[c(3:6)], pch = 19)
T1 <- spatial_profiles_database_atleast2tp %>%
dplyr::select("SubjectID","T0SY1Y2G_StartGrade",
"T1AWMADMSS_N.s",
"T1MR.s",
"T1PRWMPAE.asin.sR",
"T1PMPA.s")
T1 = as.data.frame(T1)
T1_na = T1
T1_na$T1na = rowSums(is.na(T1_na[c( "T0SY1Y2G_StartGrade", "T1AWMADMSS_N.s","T1MR.s",
"T1PRWMPAE.asin.sR", "T1PMPA.s")]))
T1_complete = T1
T1_complete = subset(T1_complete, !is.na(T0SY1Y2G_StartGrade))
#Two data points in T1
T1_complete = T1_complete[rowSums(is.na(T1_complete))<2,]
# ###346 rows in T1_complete
# write.csv(T1_complete, "data/T1_complete_062424_n346.csv")
T1_complete = read.csv("data/T1_complete_062424_n346.csv")
T1_complete_nomissing = T1_complete
set.seed(777)
T1_complete_nomissing[c(4:7)] = T1_complete_nomissing[c(4:7)] %>%
#dplyr::select(-c(X,SubjectID, T0SY1Y2G_StartGrade))%>%
single_imputation(method = "missForest")
mahal=mahalanobis(T1_complete_nomissing[c(4:7)], colMeans(T1_complete_nomissing[c(4:7)], na.rm=T),
cov(T1_complete_nomissing[c(4:7)], use="pairwise.complete.obs"))
cutoff=qchisq(1-.01, ncol(T1_complete_nomissing[c(4:7)]))
mahal_df = as.data.frame(mahal)
T1_complete_nomissing=cbind(T1_complete_nomissing, mahal_df)
summary(mahal<cutoff)
## Mode FALSE TRUE
## logical 7 339
ncol(T1_complete_nomissing[c(4:7)])
## [1] 4
noout=subset(T1_complete_nomissing, mahal<cutoff)
ggpairs(noout[c(4:7)], pch = 19)
#T1_complete_nomissing = cbind()
set.seed(777)
vLPA_missing = noout %>%
dplyr::select(-c(X,SubjectID, T0SY1Y2G_StartGrade, mahal))%>%
#single_imputation(method = "missForest") %>%
estimate_profiles(1:10)
vLPA_missing
## tidyLPA analysis using mclust:
##
## Model Classes AIC BIC Entropy prob_min prob_max n_min n_max BLRT_p
## 1 1 3686.37 3716.98 1.00 1.00 1.00 1.00 1.00
## 1 2 3579.75 3629.49 0.74 0.92 0.92 0.46 0.54 0.01
## 1 3 3546.11 3614.97 0.72 0.74 0.94 0.13 0.47 0.01
## 1 4 3539.34 3627.33 0.76 0.59 0.92 0.04 0.43 0.04
## 1 5 3525.59 3632.72 0.75 0.69 0.92 0.04 0.38 0.02
## 1 6 3513.67 3639.93 0.75 0.69 0.93 0.05 0.37 0.01
## 1 7 3505.51 3650.90 0.77 0.67 0.93 0.04 0.36 0.04
## 1 8 3501.48 3666.00 0.78 0.56 0.92 0.02 0.35 0.07
## 1 9 3508.11 3691.76 0.72 0.37 0.86 0.03 0.27 0.72
## 1 10 3514.42 3717.19 0.76 0.23 0.91 0.02 0.32 0.81
compare_solutions(vLPA_missing, statistics = c("AIC","BIC"))
## Compare tidyLPA solutions:
##
## Model Classes AIC BIC
## 1 1 3686.369 3716.977
## 1 2 3579.749 3629.487
## 1 3 3546.107 3614.975
## 1 4 3539.337 3627.335
## 1 5 3525.593 3632.721
## 1 6 3513.669 3639.927
## 1 7 3505.510 3650.898
## 1 8 3501.481 3665.999
## 1 9 3508.110 3691.758
## 1 10 3514.415 3717.193
##
## Best model according to AIC is Model 1 with 8 classes.
## Best model according to BIC is Model 1 with 3 classes.
##
## An analytic hierarchy process, based on the fit indices AIC, AWE, BIC, CLC, and KIC (Akogul & Erisoglu, 2017), suggests the best solution is Model 1 with 3 classes.
set.seed(777)
vLPA4 = noout %>%
dplyr::select(-c(X,SubjectID, T0SY1Y2G_StartGrade, mahal))%>%
#single_imputation(method = "missForest") %>%
estimate_profiles(3)
profile_data = as.data.frame(get_data(vLPA4))
profile_data = cbind(noout, profile_data)
table( profile_data$T0SY1Y2G_StartGrade,profile_data$Class)
##
## 1 2 3
## -1 21 4 13
## 0 35 16 38
## 1 28 9 37
## 2 35 10 37
## 3 17 6 33
table(profile_data$Class)
##
## 1 2 3
## 136 45 158
profile_data$SubjectID=as.factor(as.character(profile_data$SubjectID))
profile_data %>%
subset(., Class ==2)%>%
#nrow()
{ggpairs(.[c(4:7)], pch = 19)}
profile_data %>%
subset(., Class ==1)%>%
#nrow()
{ggpairs(.[c(4:7)], pch = 19)}
profile_data %>%
subset(., Class ==3)%>%
#nrow()
{ggpairs(.[c(4:7)], pch = 19)}
#
#
# nofactors=fa.parallel(T1_complete_nomissing[c(4:7)], fm="wls", fa="fa") ### <-- parallel analysis and scree plot
# sum(nofactors$fa.values>.7) ### Jollife criterion
# round3=fa(T1_complete_nomissing[c(4:7)], nfactors=3, rotate="oblimin",fm="wls") #Scree plot
# round3
# fa.diagram(round3, sort=TRUE, cut=.11, simple=F, errors=F, digits=2, e.size=.05, rsize=1)
## task Class N zscore sd se ci
## 1 T1AWMADMSS_N.s Low\nPerformance 37 -1.06676 0.5926 0.09742 0.19757
## 2 T1AWMADMSS_N.s Low\nMental Rotation 129 -0.13942 0.9035 0.07955 0.15741
## 3 T1AWMADMSS_N.s High\nPerformance 145 0.38348 0.9268 0.07696 0.15213
## 4 T1MR.s Low\nPerformance 44 -0.43454 0.6602 0.09953 0.20072
## 5 T1MR.s Low\nMental Rotation 134 -0.91826 0.4582 0.03958 0.07829
## 6 T1MR.s High\nPerformance 155 0.87058 0.5656 0.04543 0.08974
## 7 T1PMPA.s Low\nPerformance 39 -1.35610 0.7278 0.11655 0.23594
## 8 T1PMPA.s Low\nMental Rotation 119 0.07910 0.7053 0.06466 0.12804
## 9 T1PMPA.s High\nPerformance 132 0.42628 0.7322 0.06373 0.12608
## 10 T1PRWMPAE.asin.sR Low\nPerformance 42 -1.06627 0.6643 0.10250 0.20700
## 11 T1PRWMPAE.asin.sR Low\nMental Rotation 129 0.03053 0.8959 0.07888 0.15607
## 12 T1PRWMPAE.asin.sR High\nPerformance 147 0.25593 0.8363 0.06898 0.13633
TAll_complete_info$SubjectID = as.factor(as.character(TAll_complete_info$SubjectID))
# T1_complete_class$Class_num = as.factor(as.numeric(T1_complete_class$Class))
# T1_complete_class$Class = as.factor(as.numeric(T1_complete_class$Class))
# # T1_complete_class$Class = ifelse(T1_complete_class$Class==1, "lowmr",ifelse(T1_complete_class$Class==2, "low","high"))
# T1_complete_class$Class = as.factor(as.character(T1_complete_class$Class))
T1_complete_class$Class <- factor(T1_complete_class$Class,
levels = c(2,1,3))
levels(T1_complete_class$Class)[levels(T1_complete_class$Class ) == 2] <- "Low\nPerformance"
levels(T1_complete_class$Class)[levels(T1_complete_class$Class ) == 1] <- "Low\nMental Rotation"
levels(T1_complete_class$Class)[levels(T1_complete_class$Class ) == 3] <- "High\nPerformance"
T1_complete_class = T1_complete_class %>%
left_join(TAll_complete_info[c("SubjectID","T0SGENGender","T0PDEMOMaxParentEducation")], by ="SubjectID")
spatial_profiles_database_atleast2tp$SubjectID = as.factor(as.character(spatial_profiles_database_atleast2tp$SubjectID))
T1_complete_class = T1_complete_class %>%
left_join(spatial_profiles_database_atleast2tp[c("SubjectID","T1LWIDWS")], by ="SubjectID")
class_gender = as.data.frame(table(T1_complete_class$Class, T1_complete_class$T0SGENGender))
class_gender$title = "Gender"
graph_gender = ggplot(class_gender, aes(x = Var2, y = Freq, fill = Var1)) +
geom_bar(stat = "identity") +
facet_grid(.~ title)+
theme_bw()+
scale_fill_manual(values = c("#4575b4","#4d4d4d","#b2182b"), name = "Spatial\nProfiles")+
scale_y_continuous(breaks = seq(0,200,50), limits = c(0,200))+
scale_x_discrete(labels = c("Boys","Girls"))+
theme(legend.position="none",
axis.title.x=element_blank(),
axis.text.x = element_text(size=7.5,color ="black"),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_gender
class_grade = as.data.frame(table(T1_complete_class$Class, T1_complete_class$T0SY1Y2G_StartGrade))
class_grade$title = "Grade"
graph_grade = ggplot(class_grade, aes(x = Var2, y = Freq, fill = Var1)) +
geom_bar(stat = "identity") +
facet_grid(.~ title)+
theme_bw()+
scale_fill_manual(values = c("#4575b4","#4d4d4d","#b2182b"), name = "Spatial\nProfiles")+
scale_x_discrete(labels = c("Pre-K","K","1st","2nd","3rd"))+
scale_y_continuous(breaks = seq(0,100,25), limits = c(0,100))+
#scale_x_discrete(labels = c("Boys","Girls"))+
theme(legend.position="none",
axis.title.x=element_blank(),
axis.text.x = element_text(size=7.5,color ="black"),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_grade
class_edu = as.data.frame(table(T1_complete_class$Class, T1_complete_class$T0PDEMOMaxParentEducation))
class_edu$title = "Years of Parental Education"
graph_edu = ggplot(class_edu, aes(x = Var2, y = Freq, fill = Var1)) +
geom_bar(stat = "identity") +
facet_grid(.~ title)+
theme_bw()+
scale_fill_manual(values = c("#4575b4","#4d4d4d","#b2182b"), name = "Spatial\nProfiles")+
#scale_x_discrete(labels = c("Pre-K","K","1st","2nd","3rd"))+
scale_y_continuous(breaks = seq(0,75,25), limits = c(0,75))+
#scale_x_discrete(labels = c("Boys","Girls"))+
theme(legend.position="none",
axis.title.x=element_blank(),
axis.text.x = element_text(size=7.5,color ="black"),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_edu
multiplot(graph_grade, graph_gender, graph_edu, cols =3)
T1_complete_class_num = T1_complete_class
levels(T1_complete_class_num$Class)[levels(T1_complete_class_num$Class ) == "Low\nPerformance"] <- "low"
levels(T1_complete_class_num$Class)[levels(T1_complete_class_num$Class ) == "Low\nMental Rotation"] <- "lowmr"
levels(T1_complete_class_num$Class)[levels(T1_complete_class_num$Class ) == "High\nPerformance"] <- "high"
T1_complete_class_num$Class = relevel(T1_complete_class_num$Class, ref = "lowmr")
fit_reflowmr = multinom(Class ~ T0PDEMOMaxParentEducation
+ T0SGENGender
+ T0SY1Y2G_StartGrade
+ T1LWIDWS,
data = T1_complete_class_num)
## # weights: 18 (10 variable)
## initial value 307.611441
## iter 10 value 262.180221
## final value 258.856520
## converged
# Print the model summary
summary(fit_reflowmr)
## Call:
## multinom(formula = Class ~ T0PDEMOMaxParentEducation + T0SGENGender +
## T0SY1Y2G_StartGrade + T1LWIDWS, data = T1_complete_class_num)
##
## Coefficients:
## (Intercept) T0PDEMOMaxParentEducation T0SGENGender T0SY1Y2G_StartGrade
## low 2.601 -0.06912 0.9168 0.3973
## high -8.927 0.14478 0.2595 -0.4015
## T1LWIDWS
## low -0.01087
## high 0.01680
##
## Std. Errors:
## (Intercept) T0PDEMOMaxParentEducation T0SGENGender T0SY1Y2G_StartGrade
## low 0.7397 0.07658 0.3815 0.2185
## high 1.8674 0.05725 0.2745 0.2091
## T1LWIDWS
## low 0.003642
## high 0.004775
##
## Residual Deviance: 517.7
## AIC: 537.7
stargazer(fit_reflowmr, header=FALSE, type='text')
##
## ======================================================
## Dependent variable:
## ----------------------------
## low high
## (1) (2)
## ------------------------------------------------------
## T0PDEMOMaxParentEducation -0.069 0.145**
## (0.077) (0.057)
##
## T0SGENGender 0.917** 0.259
## (0.381) (0.275)
##
## T0SY1Y2G_StartGrade 0.397* -0.402*
## (0.218) (0.209)
##
## T1LWIDWS -0.011*** 0.017***
## (0.004) (0.005)
##
## Constant 2.601*** -8.927***
## (0.740) (1.867)
##
## ------------------------------------------------------
## Akaike Inf. Crit. 537.700 537.700
## ======================================================
## Note: *p<0.1; **p<0.05; ***p<0.01
T1_complete_class_num$Class = relevel(T1_complete_class_num$Class, ref = "low")
fit_reflow = multinom(Class ~ T0PDEMOMaxParentEducation
+ T0SGENGender
+ T0SY1Y2G_StartGrade
+ T1LWIDWS,
data = T1_complete_class_num)
## # weights: 18 (10 variable)
## initial value 307.611441
## iter 10 value 262.332017
## iter 20 value 258.856520
## iter 20 value 258.856520
## iter 20 value 258.856520
## final value 258.856520
## converged
# Print the model summary
summary(fit_reflow)
## Call:
## multinom(formula = Class ~ T0PDEMOMaxParentEducation + T0SGENGender +
## T0SY1Y2G_StartGrade + T1LWIDWS, data = T1_complete_class_num)
##
## Coefficients:
## (Intercept) T0PDEMOMaxParentEducation T0SGENGender T0SY1Y2G_StartGrade
## lowmr -2.601 0.06912 -0.9168 -0.3973
## high -11.528 0.21390 -0.6573 -0.7989
## T1LWIDWS
## lowmr 0.01087
## high 0.02767
##
## Std. Errors:
## (Intercept) T0PDEMOMaxParentEducation T0SGENGender T0SY1Y2G_StartGrade
## lowmr 1.126 0.07676 0.3815 0.2294
## high 1.072 0.07766 0.3863 0.2260
## T1LWIDWS
## lowmr 0.004148
## high 0.003988
##
## Residual Deviance: 517.7
## AIC: 537.7
stargazer(fit_reflow, header=FALSE, type='text')
##
## ======================================================
## Dependent variable:
## ----------------------------
## lowmr high
## (1) (2)
## ------------------------------------------------------
## T0PDEMOMaxParentEducation 0.069 0.214***
## (0.077) (0.078)
##
## T0SGENGender -0.917** -0.657*
## (0.381) (0.386)
##
## T0SY1Y2G_StartGrade -0.397* -0.799***
## (0.229) (0.226)
##
## T1LWIDWS 0.011*** 0.028***
## (0.004) (0.004)
##
## Constant -2.601** -11.530***
## (1.126) (1.072)
##
## ------------------------------------------------------
## Akaike Inf. Crit. 537.700 537.700
## ======================================================
## Note: *p<0.1; **p<0.05; ***p<0.01
T1_complete_class_gather = T1_complete_class_gather %>%
left_join(spatial_profiles_database_atleast2tp[c("SubjectID","T1LWIDWS.s")], by ="SubjectID")
T1_complete_class_gather = T1_complete_class_gather %>%
left_join(TAll_complete_info[c("SubjectID","T0SGENGender","T0PDEMOMaxParentEducation")], by ="SubjectID")
model_task_class <- lmer(zscore ~ Class * task + T1LWIDWS.s + T0SGENGender + T0PDEMOMaxParentEducation +
(1|SubjectID), data = T1_complete_class_gather, control = lmerControl(optimizer = "bobyqa"))
summary(model_task_class)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## zscore ~ Class * task + T1LWIDWS.s + T0SGENGender + T0PDEMOMaxParentEducation +
## (1 | SubjectID)
## Data: T1_complete_class_gather
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: 2399
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.418 -0.603 0.045 0.660 3.118
##
## Random effects:
## Groups Name Variance Std.Dev.
## SubjectID (Intercept) 0.00806 0.0898
## Residual 0.56746 0.7533
## Number of obs: 1033, groups: SubjectID, 280
##
## Fixed effects:
## Estimate Std. Error df
## (Intercept) -1.4686 0.2087 534.1638
## ClassLow\nMental Rotation 1.0365 0.1558 1017.1626
## ClassHigh\nPerformance 1.4920 0.1545 1015.4861
## taskT1MR.s 0.7102 0.1825 809.0851
## taskT1PMPA.s -0.2818 0.1888 841.3512
## taskT1PRWMPAE.asin.sR 0.1798 0.1836 815.1354
## T1LWIDWS.s 0.0701 0.0265 283.2278
## T0SGENGender 0.0361 0.0492 282.4219
## T0PDEMOMaxParentEducation 0.0186 0.0102 283.6602
## ClassLow\nMental Rotation:taskT1MR.s -1.4877 0.2097 800.2649
## ClassHigh\nPerformance:taskT1MR.s -0.1926 0.2059 802.5563
## ClassLow\nMental Rotation:taskT1PMPA.s 0.4258 0.2171 831.7282
## ClassHigh\nPerformance:taskT1PMPA.s 0.3502 0.2131 834.1963
## ClassLow\nMental Rotation:taskT1PRWMPAE.asin.sR -0.0171 0.2113 806.9424
## ClassHigh\nPerformance:taskT1PRWMPAE.asin.sR -0.2864 0.2078 810.6349
## t value Pr(>|t|)
## (Intercept) -7.04 0.0000000000061
## ClassLow\nMental Rotation 6.65 0.0000000000467
## ClassHigh\nPerformance 9.66 < 0.0000000000000002
## taskT1MR.s 3.89 0.00011
## taskT1PMPA.s -1.49 0.13591
## taskT1PRWMPAE.asin.sR 0.98 0.32782
## T1LWIDWS.s 2.65 0.00855
## T0SGENGender 0.74 0.46289
## T0PDEMOMaxParentEducation 1.81 0.07090
## ClassLow\nMental Rotation:taskT1MR.s -7.09 0.0000000000029
## ClassHigh\nPerformance:taskT1MR.s -0.94 0.34984
## ClassLow\nMental Rotation:taskT1PMPA.s 1.96 0.05021
## ClassHigh\nPerformance:taskT1PMPA.s 1.64 0.10065
## ClassLow\nMental Rotation:taskT1PRWMPAE.asin.sR -0.08 0.93570
## ClassHigh\nPerformance:taskT1PRWMPAE.asin.sR -1.38 0.16858
##
## (Intercept) ***
## ClassLow\nMental Rotation ***
## ClassHigh\nPerformance ***
## taskT1MR.s ***
## taskT1PMPA.s
## taskT1PRWMPAE.asin.sR
## T1LWIDWS.s **
## T0SGENGender
## T0PDEMOMaxParentEducation .
## ClassLow\nMental Rotation:taskT1MR.s ***
## ClassHigh\nPerformance:taskT1MR.s
## ClassLow\nMental Rotation:taskT1PMPA.s .
## ClassHigh\nPerformance:taskT1PMPA.s
## ClassLow\nMental Rotation:taskT1PRWMPAE.asin.sR
## ClassHigh\nPerformance:taskT1PRWMPAE.asin.sR
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Anova(model_task_class, test.statistic = "F", type= "III")
## Analysis of Deviance Table (Type III Wald F tests with Kenward-Roger df)
##
## Response: zscore
## F Df Df.res Pr(>F)
## (Intercept) 49.48 1 518 0.0000000000064 ***
## Class 47.61 2 1015 < 0.0000000000000002 ***
## task 10.98 3 795 0.0000004537906 ***
## T1LWIDWS.s 7.01 1 270 0.0086 **
## T0SGENGender 0.54 1 270 0.4630
## T0PDEMOMaxParentEducation 3.29 1 271 0.0710 .
## Class:task 32.45 6 785 < 0.0000000000000002 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
means_model_task_class = emmeans(model_task_class, pairwise ~ Class|task, mult.name = "task")
summary(means_model_task_class, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "bonferroni")
## $emmeans
## task = T1AWMADMSS_N.s:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -1.1386 0.1370 1017 -1.467 -0.8100 -8.309 <.0001
## Low\nMental Rotation -0.1021 0.0748 1016 -0.281 0.0774 -1.364 0.5186
## High\nPerformance 0.3534 0.0698 1016 0.186 0.5207 5.063 <.0001
##
## task = T1MR.s:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.4285 0.1237 1016 -0.725 -0.1318 -3.463 0.0017
## Low\nMental Rotation -0.8796 0.0732 1015 -1.055 -0.7042 -12.021 <.0001
## High\nPerformance 0.8709 0.0671 1015 0.710 1.0318 12.974 <.0001
##
## task = T1PMPA.s:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -1.4204 0.1326 1017 -1.738 -1.1025 -10.714 <.0001
## Low\nMental Rotation 0.0420 0.0786 1016 -0.147 0.2305 0.534 1.0000
## High\nPerformance 0.4218 0.0720 1016 0.249 0.5945 5.856 <.0001
##
## task = T1PRWMPAE.asin.sR:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.9588 0.1257 1016 -1.260 -0.6575 -7.630 <.0001
## Low\nMental Rotation 0.0607 0.0749 1016 -0.119 0.2402 0.810 1.0000
## High\nPerformance 0.2468 0.0700 1016 0.079 0.4146 3.526 0.0013
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
## Conf-level adjustment: bonferroni method for 3 estimates
## P value adjustment: bonferroni method for 3 tests
##
## $contrasts
## task = T1AWMADMSS_N.s:
## contrast estimate SE df lower.CL
## Low\nPerformance - Low\nMental Rotation -1.036 0.1558 1017 -1.410
## Low\nPerformance - High\nPerformance -1.492 0.1545 1015 -1.863
## Low\nMental Rotation - High\nPerformance -0.455 0.1027 1014 -0.702
## upper.CL t.ratio p.value
## -0.6628 -6.651 <.0001
## -1.1214 -9.654 <.0001
## -0.2091 -4.433 <.0001
##
## task = T1MR.s:
## contrast estimate SE df lower.CL
## Low\nPerformance - Low\nMental Rotation 0.451 0.1434 1016 0.107
## Low\nPerformance - High\nPerformance -1.299 0.1415 1014 -1.639
## Low\nMental Rotation - High\nPerformance -1.750 0.0997 1014 -1.990
## upper.CL t.ratio p.value
## 0.7950 3.147 0.0051
## -0.9599 -9.180 <.0001
## -1.5115 -17.559 <.0001
##
## task = T1PMPA.s:
## contrast estimate SE df lower.CL
## Low\nPerformance - Low\nMental Rotation -1.462 0.1539 1017 -1.831
## Low\nPerformance - High\nPerformance -1.842 0.1516 1015 -2.206
## Low\nMental Rotation - High\nPerformance -0.380 0.1070 1015 -0.636
## upper.CL t.ratio p.value
## -1.0933 -9.502 <.0001
## -1.4786 -12.153 <.0001
## -0.1232 -3.549 0.0012
##
## task = T1PRWMPAE.asin.sR:
## contrast estimate SE df lower.CL
## Low\nPerformance - Low\nMental Rotation -1.020 0.1460 1017 -1.370
## Low\nPerformance - High\nPerformance -1.206 0.1449 1014 -1.553
## Low\nMental Rotation - High\nPerformance -0.186 0.1029 1014 -0.433
## upper.CL t.ratio p.value
## -0.6693 -6.982 <.0001
## -0.8582 -8.322 <.0001
## 0.0606 -1.809 0.2122
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
## Conf-level adjustment: bonferroni method for 3 estimates
## P value adjustment: bonferroni method for 3 tests
T1_complete_class = T1_complete_class %>%
left_join(spatial_profiles_database_atleast2tp[c("SubjectID","T1LWIDWS.s")], by ="SubjectID")
T1_complete_class_lw = T1_complete_class[c("SubjectID","T1LWIDWS.s","Class")]
T1_complete_class_lw = T1_complete_class_lw[rowSums(is.na(T1_complete_class_lw))==0,]
ezANOVA(T1_complete_class_lw, dv = T1LWIDWS.s, between = Class, wid = SubjectID)
## $ANOVA
## Effect DFn DFd F p p<.05 ges
## 1 Class 2 311 13.71 0.000001962 * 0.08104
##
## $`Levene's Test for Homogeneity of Variance`
## DFn DFd SSn SSd F p p<.05
## 1 2 311 0.3479 115.6 0.4679 0.6267
summarySE(T1_complete_class_lw, "T1LWIDWS.s","Class", na.rm = T)
## Class N T1LWIDWS.s sd se ci
## 1 Low\nPerformance 44 -0.3508 0.9984 0.15052 0.3036
## 2 Low\nMental Rotation 128 -0.2607 0.9161 0.08097 0.1602
## 3 High\nPerformance 142 0.2840 0.9890 0.08300 0.1641
model_lw = lm(T1LWIDWS.s ~ Class , T1_complete_class_lw)
means_model_lw = emmeans(model_lw, pairwise ~ Class)
summary(means_model_lw, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emmeans
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.351 0.1449 311 -0.636 -0.0657 -2.421 0.0161
## Low\nMental Rotation -0.261 0.0850 311 -0.428 -0.0936 -3.069 0.0023
## High\nPerformance 0.284 0.0807 311 0.125 0.4427 3.521 0.0005
##
## Confidence level used: 0.95
##
## $contrasts
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.0901 0.168 311 -0.421 0.240
## Low\nPerformance - High\nPerformance -0.6348 0.166 311 -0.961 -0.308
## Low\nMental Rotation - High\nPerformance -0.5447 0.117 311 -0.775 -0.314
## t.ratio p.value
## -0.536 0.5923
## -3.828 0.0002
## -4.650 <.0001
##
## Confidence level used: 0.95
spatial_profiles_database_atleast2tp$SubjectID=as.factor(as.character(spatial_profiles_database_atleast2tp$SubjectID))
T1_complete_class = T1_complete_class %>%
left_join(spatial_profiles_database_atleast2tp[c("SubjectID","T1EC.s","T1ASWmac.s")], by ="SubjectID")
T1_complete_class_ec = T1_complete_class[c("SubjectID","T1EC.s","Class")]
T1_complete_class_ec = T1_complete_class_ec[rowSums(is.na(T1_complete_class_ec))==0,]
T1_complete_class_ec = T1_complete_class_ec %>%
left_join(spatial_profiles_database_atleast2tp[c("SubjectID","T1LWIDWS.s")], by ="SubjectID")
T1_complete_class_ec = subset(T1_complete_class_ec, !is.na(T1LWIDWS.s))
T1_complete_class_ec$Class = as.factor(T1_complete_class_ec$Class)
summary(lm(T1EC.s ~ Class + T1LWIDWS.s, T1_complete_class_ec))
##
## Call:
## lm(formula = T1EC.s ~ Class + T1LWIDWS.s, data = T1_complete_class_ec)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.6397 -0.4842 0.0276 0.4909 2.5344
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -0.7013 0.1343 -5.22 0.00000033970 ***
## ClassLow\nMental Rotation 0.5271 0.1544 3.41 0.00073 ***
## ClassHigh\nPerformance 0.9867 0.1556 6.34 0.00000000086 ***
## T1LWIDWS.s 0.2579 0.0513 5.03 0.00000087996 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.852 on 291 degrees of freedom
## Multiple R-squared: 0.244, Adjusted R-squared: 0.236
## F-statistic: 31.3 on 3 and 291 DF, p-value: <0.0000000000000002
Anova(lm(T1EC.s ~ Class + T1LWIDWS.s, T1_complete_class_ec), test.statistic = "F", type= "III")
## Anova Table (Type III tests)
##
## Response: T1EC.s
## Sum Sq Df F value Pr(>F)
## (Intercept) 19.8 1 27.3 0.0000003397 ***
## Class 32.0 2 22.0 0.0000000012 ***
## T1LWIDWS.s 18.3 1 25.2 0.0000008800 ***
## Residuals 211.2 291
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
model_ec = lm(T1EC.s ~ Class + T1LWIDWS.s, T1_complete_class_ec)
means_model_ec = emmeans(model_ec, pairwise ~ Class)
summary(means_model_ec, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emmeans
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.708 0.1341 291 -0.972 -0.444 -5.276 <.0001
## Low\nMental Rotation -0.181 0.0791 291 -0.336 -0.025 -2.284 0.0231
## High\nPerformance 0.279 0.0751 291 0.131 0.427 3.713 0.0002
##
## Confidence level used: 0.95
##
## $contrasts
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.527 0.154 291 -0.831 -0.223
## Low\nPerformance - High\nPerformance -0.987 0.156 291 -1.293 -0.680
## Low\nMental Rotation - High\nPerformance -0.460 0.111 291 -0.678 -0.241
## t.ratio p.value
## -3.415 0.0007
## -6.343 <.0001
## -4.143 <.0001
##
## Confidence level used: 0.95
summarySE(T1_complete_class_ec, "T1EC.s","Class", na.rm = T)
## Class N T1EC.s sd se ci
## 1 Low\nPerformance 41 -0.7938 0.9479 0.14804 0.2992
## 2 Low\nMental Rotation 119 -0.2438 0.8753 0.08024 0.1589
## 3 High\nPerformance 135 0.3607 0.8776 0.07553 0.1494
T1_complete_class_asw = T1_complete_class[c("SubjectID","T1ASWmac.s","Class")]
T1_complete_class_asw = T1_complete_class_asw[rowSums(is.na(T1_complete_class_asw))==0,]
T1_complete_class_asw = T1_complete_class_asw %>%
left_join(spatial_profiles_database_atleast2tp[c("SubjectID","T1LWIDWS.s")], by ="SubjectID")
T1_complete_class_asw = subset(T1_complete_class_asw, !is.na(T1LWIDWS.s))
T1_complete_class_asw$Class = as.factor(T1_complete_class_asw$Class)
summary(lm(T1ASWmac.s ~ Class + T1LWIDWS.s, T1_complete_class_asw))
##
## Call:
## lm(formula = T1ASWmac.s ~ Class + T1LWIDWS.s, data = T1_complete_class_asw)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.4053 -0.6684 0.0121 0.6841 2.1945
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -0.3014 0.1528 -1.97 0.0496 *
## ClassLow\nMental Rotation 0.1990 0.1748 1.14 0.2559
## ClassHigh\nPerformance 0.4968 0.1782 2.79 0.0057 **
## T1LWIDWS.s 0.1599 0.0603 2.65 0.0085 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.954 on 281 degrees of freedom
## Multiple R-squared: 0.0782, Adjusted R-squared: 0.0684
## F-statistic: 7.95 on 3 and 281 DF, p-value: 0.0000419
model_aswmac = lm(T1ASWmac.s ~ Class + T1LWIDWS.s, T1_complete_class_asw)
means_model_aswmac = emmeans(model_aswmac, pairwise ~ Class)
summary(means_model_aswmac, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emmeans
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.305 0.1526 281 -0.6057 -0.0050 -2.001 0.0463
## Low\nMental Rotation -0.106 0.0888 281 -0.2812 0.0686 -1.197 0.2324
## High\nPerformance 0.191 0.0869 281 0.0203 0.3626 2.202 0.0285
##
## Confidence level used: 0.95
##
## $contrasts
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.199 0.175 281 -0.543 0.1451
## Low\nPerformance - High\nPerformance -0.497 0.178 281 -0.848 -0.1460
## Low\nMental Rotation - High\nPerformance -0.298 0.126 281 -0.547 -0.0489
## t.ratio p.value
## -1.139 0.2559
## -2.788 0.0057
## -2.355 0.0192
##
## Confidence level used: 0.95
Anova(model_aswmac, test.statistic = "F", type= "II")
## Anova Table (Type II tests)
##
## Response: T1ASWmac.s
## Sum Sq Df F value Pr(>F)
## Class 8.9 2 4.86 0.0084 **
## T1LWIDWS.s 6.4 1 7.03 0.0085 **
## Residuals 255.8 281
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
T1_complete_class_lw$Class = as.factor(as.character(T1_complete_class_lw$Class))
levels(T1_complete_class_lw$Class)[levels(T1_complete_class_lw$Class ) == "2"] <- "Low\nPerformance"
levels(T1_complete_class_lw$Class)[levels(T1_complete_class_lw$Class ) == "1"] <- "Low\nMental Rotation"
levels(T1_complete_class_lw$Class)[levels(T1_complete_class_lw$Class ) == "3"] <- "High\nPerformance"
T1_complete_class_lw$title = "Letter-Word\nIdentification"
graph_t1_lw = ggplot(T1_complete_class_lw,aes(x = factor(Class, level=c("Low\nPerformance","Low\nMental Rotation","High\nPerformance")), y = T1LWIDWS.s)) +
geom_line(aes(group = interaction (SubjectID)),
alpha = .10,
size = .25)+
geom_dotplot(binaxis='y', stackdir='center', dotsize= 0.35, shape = 21, color = "#4d4d4d", fill = NA, alpha = .8)+
geom_bar(data = summarySE(T1_complete_class_lw, "T1LWIDWS.s",c("Class"), na.rm = T),stat = "identity", legend = FALSE, fill = NA, aes(color = Class), size =.75, width=.75) +
ylab("Standardized Scores\n(Z-scores)")+
scale_y_continuous(breaks=seq(-3.5, 3.5, .5), limits=c(-3.5,3.9))+
geom_hline(yintercept = 0, linetype = 2, size = .75, color = "black")+
stat_summary(fun.data = mean_cl_normal, geom = "errorbar",
position = position_dodge(width = 0.10), width = 0.001,color="black", size =.5)+
#stat_summary(fun.y = "mean", geom = "point", size = 1.5, shape = 23, aes(colour = Class, fill =Class))+
#scale_fill_manual(values = c("#e41a1c","#377eb8","#4daf4a","#984ea3"))+
scale_color_manual(values = c("#4d4d4d","#4575b4","#b2182b"))+
#scale_x_discrete(labels = c("Working\nMemory","AMS\nPrecision","Proportional\nReasoning","Mental\nRotation"))+
facet_grid(.~ title)+
theme_bw()+
theme(legend.position="none",
axis.title.x=element_blank(),
axis.text.x = element_text(size=7.5,color ="black"),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_t1_lw
T1_complete_class_ec$Class = as.factor(as.character(T1_complete_class_ec$Class))
levels(T1_complete_class_ec$Class)[levels(T1_complete_class_ec$Class ) == "2"] <- "Low\nPerformance"
levels(T1_complete_class_ec$Class)[levels(T1_complete_class_ec$Class ) == "1"] <- "Low\nMental Rotation"
levels(T1_complete_class_ec$Class)[levels(T1_complete_class_ec$Class ) == "3"] <- "High\nPerformance"
T1_complete_class_ec$title = "Exact\nCalculations"
graph_t1_ec = ggplot(T1_complete_class_ec,aes(x = factor(Class, level=c("Low\nPerformance","Low\nMental Rotation","High\nPerformance")), y = T1EC.s)) +
geom_line(aes(group = interaction (SubjectID)),
alpha = .10,
size = .25)+
geom_dotplot(binaxis='y', stackdir='center', dotsize= 0.35, shape = 21, color = "#4d4d4d", fill = NA, alpha = .8)+
geom_bar(data = summarySE(T1_complete_class_ec, "T1EC.s",c("Class"), na.rm = T),stat = "identity", legend = FALSE, fill = NA, aes(color = Class), size =.75, width=.75) +
ylab("Standardized Scores\n(Z-scores)")+
scale_y_continuous(breaks=seq(-3.5, 3.5, .5), limits=c(-3.5,3.9))+
geom_hline(yintercept = 0, linetype = 2, size = .75, color = "black")+
stat_summary(fun.data = mean_cl_normal, geom = "errorbar",
position = position_dodge(width = 0.10), width = 0.001,color="black", size =.5)+
#stat_summary(fun.y = "mean", geom = "point", size = 1.5, shape = 23, aes(colour = Class, fill =Class))+
#scale_fill_manual(values = c("#e41a1c","#377eb8","#4daf4a","#984ea3"))+
scale_color_manual(values = c("#4d4d4d","#4575b4","#b2182b"))+
#scale_x_discrete(labels = c("Working\nMemory","AMS\nPrecision","Proportional\nReasoning","Mental\nRotation"))+
facet_grid(.~ title)+
theme_bw()+
theme(legend.position="none",
axis.title.x=element_blank(),
axis.text.x = element_text(size=7.5,color ="black"),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_t1_ec
T1_complete_class_asw$Class = as.factor(as.character(T1_complete_class_asw$Class))
levels(T1_complete_class_asw$Class)[levels(T1_complete_class_asw$Class ) == "2"] <- "Low\nPerformance"
levels(T1_complete_class_asw$Class)[levels(T1_complete_class_asw$Class ) == "1"] <- "Low\nMental Rotation"
levels(T1_complete_class_asw$Class)[levels(T1_complete_class_asw$Class ) == "3"] <- "High\nPerformance"
T1_complete_class_asw$title = "Approximate\nCalculations"
graph_t1_ac = ggplot(T1_complete_class_asw,aes(x = factor(Class, level=c("Low\nPerformance","Low\nMental Rotation","High\nPerformance")), y = T1LWIDWS.s)) +
geom_line(aes(group = interaction (SubjectID)),
alpha = .10,
size = .25)+
geom_dotplot(binaxis='y', stackdir='center', dotsize= 0.35, shape = 21, color = "#4d4d4d", fill = NA, alpha = .8)+
geom_bar(data = summarySE(T1_complete_class_asw, "T1LWIDWS.s",c("Class"), na.rm = T),stat = "identity", legend = FALSE, fill = NA, aes(color = Class), size =.75, width=.75) +
ylab("Standardized Scores\n(Z-scores)")+
scale_y_continuous(breaks=seq(-3.5, 3.5, .5), limits=c(-3.5,3.9))+
geom_hline(yintercept = 0, linetype = 2, size = .75, color = "black")+
stat_summary(fun.data = mean_cl_normal, geom = "errorbar",
position = position_dodge(width = 0.10), width = 0.001,color="black", size =.5)+
#stat_summary(fun.y = "mean", geom = "point", size = 1.5, shape = 23, aes(colour = Class, fill =Class))+
#scale_fill_manual(values = c("#e41a1c","#377eb8","#4daf4a","#984ea3"))+
scale_color_manual(values = c("#4d4d4d","#4575b4","#b2182b"))+
#scale_x_discrete(labels = c("Working\nMemory","AMS\nPrecision","Proportional\nReasoning","Mental\nRotation"))+
facet_grid(.~ title)+
theme_bw()+
theme(legend.position="none",
axis.title.x=element_blank(),
axis.text.x = element_text(size=7.5,color ="black"),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_t1_ac
multiplot(graph_t1_ec,graph_t1_ac, graph_t1_lw, cols = 3)
panamath_longitudinal_nonz_gather = panamath_longitudinal_nonz_gather %>%
left_join(TAll_complete_info[c("SubjectID","T0SGENGender","T0PDEMOMaxParentEducation")], by ="SubjectID")
panamath_longitudinal_nonz_gather$T0SY1Y2G_StartGrade = as.factor(as.numeric(panamath_longitudinal_nonz_gather$T0SY1Y2G_StartGrade))
panamath_longitudinal_nonz_gather$time_num = as.numeric(as.factor(panamath_longitudinal_nonz_gather$time))
model_long_panamath <- lmer(PMPA ~ T0SY1Y2G_StartGrade*time_num + T0SGENGender + T0PDEMOMaxParentEducation +
(1|SubjectID), data = panamath_longitudinal_nonz_gather, control = lmerControl(optimizer = "bobyqa"))
summary(model_long_panamath)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## PMPA ~ T0SY1Y2G_StartGrade * time_num + T0SGENGender + T0PDEMOMaxParentEducation +
## (1 | SubjectID)
## Data: panamath_longitudinal_nonz_gather
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: -1490
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.446 -0.447 0.124 0.538 2.774
##
## Random effects:
## Groups Name Variance Std.Dev.
## SubjectID (Intercept) 0.00586 0.0766
## Residual 0.00840 0.0916
## Number of obs: 995, groups: SubjectID, 311
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 0.49706 0.04443 468.75345 11.19
## T0SY1Y2G_StartGrade0 0.18980 0.02983 931.05323 6.36
## T0SY1Y2G_StartGrade1 0.24113 0.03129 937.80293 7.71
## T0SY1Y2G_StartGrade2 0.31827 0.03081 939.50409 10.33
## T0SY1Y2G_StartGrade3 0.30202 0.03404 946.89484 8.87
## time_num 0.06266 0.00861 768.64065 7.28
## T0SGENGender -0.01688 0.01060 305.47312 -1.59
## T0PDEMOMaxParentEducation 0.00567 0.00223 308.50105 2.54
## T0SY1Y2G_StartGrade0:time_num -0.03570 0.00986 754.18586 -3.62
## T0SY1Y2G_StartGrade1:time_num -0.03816 0.01031 754.63886 -3.70
## T0SY1Y2G_StartGrade2:time_num -0.05907 0.01023 765.12757 -5.77
## T0SY1Y2G_StartGrade3:time_num -0.05215 0.01121 757.38759 -4.65
## Pr(>|t|)
## (Intercept) < 0.0000000000000002 ***
## T0SY1Y2G_StartGrade0 0.000000000311057 ***
## T0SY1Y2G_StartGrade1 0.000000000000033 ***
## T0SY1Y2G_StartGrade2 < 0.0000000000000002 ***
## T0SY1Y2G_StartGrade3 < 0.0000000000000002 ***
## time_num 0.000000000000854 ***
## T0SGENGender 0.11223
## T0PDEMOMaxParentEducation 0.01156 *
## T0SY1Y2G_StartGrade0:time_num 0.00031 ***
## T0SY1Y2G_StartGrade1:time_num 0.00023 ***
## T0SY1Y2G_StartGrade2:time_num 0.000000011273025 ***
## T0SY1Y2G_StartGrade3:time_num 0.000003915749444 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) T0SY1Y2G_StG0 T0SY1Y2G_StG1 T0SY1Y2G_StG2 T0SY1Y2G_StG3
## T0SY1Y2G_StG0 -0.477
## T0SY1Y2G_StG1 -0.502 0.698
## T0SY1Y2G_StG2 -0.504 0.709 0.678
## T0SY1Y2G_StG3 -0.466 0.642 0.614 0.623
## time_num -0.452 0.672 0.640 0.650 0.589
## T0SGENGendr -0.317 0.032 0.027 0.012 0.039
## T0PDEMOMxPE -0.733 -0.035 0.031 0.031 0.027
## T0SY1Y2G_SG0: 0.394 -0.777 -0.559 -0.567 -0.514
## T0SY1Y2G_SG1: 0.382 -0.561 -0.782 -0.543 -0.492
## T0SY1Y2G_SG2: 0.372 -0.566 -0.538 -0.784 -0.495
## T0SY1Y2G_SG3: 0.344 -0.516 -0.491 -0.499 -0.789
## tim_nm T0SGEN T0PDEM T0SY1Y2G_SG0: T0SY1Y2G_SG1: T0SY1Y2G_SG2:
## T0SY1Y2G_StG0
## T0SY1Y2G_StG1
## T0SY1Y2G_StG2
## T0SY1Y2G_StG3
## time_num
## T0SGENGendr 0.021
## T0PDEMOMxPE -0.007 -0.063
## T0SY1Y2G_SG0: -0.874 -0.024 0.011
## T0SY1Y2G_SG1: -0.836 -0.019 0.002 0.730
## T0SY1Y2G_SG2: -0.842 -0.014 0.017 0.735 0.703
## T0SY1Y2G_SG3: -0.768 -0.021 0.013 0.671 0.642 0.647
Anova(model_long_panamath, test.statistic = "F", type= "III")
## Analysis of Deviance Table (Type III Wald F tests with Kenward-Roger df)
##
## Response: PMPA
## F Df Df.res Pr(>F)
## (Intercept) 125.13 1 464 < 0.0000000000000002 ***
## T0SY1Y2G_StartGrade 31.07 4 936 < 0.0000000000000002 ***
## time_num 52.88 1 765 0.00000000000088 ***
## T0SGENGender 2.54 1 301 0.112
## T0PDEMOMaxParentEducation 6.45 1 304 0.012 *
## T0SY1Y2G_StartGrade:time_num 9.30 4 738 0.00000024763522 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
em=emtrends(model_long_panamath, pairwise ~ T0SY1Y2G_StartGrade, var="time_num", mult.name = "T0SY1Y2G_StartGrade")
summary(em, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emtrends
## T0SY1Y2G_StartGrade time_num.trend SE df lower.CL upper.CL t.ratio
## -1 0.06266 0.00862 765 0.04574 0.0796 7.272
## 0 0.02695 0.00480 705 0.01754 0.0364 5.620
## 1 0.02449 0.00566 718 0.01338 0.0356 4.326
## 2 0.00359 0.00553 753 -0.00726 0.0144 0.649
## 3 0.01051 0.00718 738 -0.00359 0.0246 1.463
## p.value
## <.0001
## <.0001
## <.0001
## 0.5162
## 0.1439
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## contrast estimate SE df lower.CL
## (T0SY1Y2G_StartGrade-1) - T0SY1Y2G_StartGrade0 0.03570 0.00986 751 0.01634
## (T0SY1Y2G_StartGrade-1) - T0SY1Y2G_StartGrade1 0.03816 0.01031 751 0.01792
## (T0SY1Y2G_StartGrade-1) - T0SY1Y2G_StartGrade2 0.05907 0.01023 762 0.03897
## (T0SY1Y2G_StartGrade-1) - T0SY1Y2G_StartGrade3 0.05215 0.01122 754 0.03012
## T0SY1Y2G_StartGrade0 - T0SY1Y2G_StartGrade1 0.00246 0.00742 713 -0.01211
## T0SY1Y2G_StartGrade0 - T0SY1Y2G_StartGrade2 0.02337 0.00732 733 0.00900
## T0SY1Y2G_StartGrade0 - T0SY1Y2G_StartGrade3 0.01644 0.00864 728 -0.00051
## T0SY1Y2G_StartGrade1 - T0SY1Y2G_StartGrade2 0.02090 0.00791 735 0.00537
## T0SY1Y2G_StartGrade1 - T0SY1Y2G_StartGrade3 0.01398 0.00915 731 -0.00397
## T0SY1Y2G_StartGrade2 - T0SY1Y2G_StartGrade3 -0.00692 0.00906 744 -0.02471
## upper.CL t.ratio p.value
## 0.0551 3.620 0.0003
## 0.0584 3.702 0.0002
## 0.0792 5.771 <.0001
## 0.0742 4.648 <.0001
## 0.0170 0.332 0.7402
## 0.0377 3.194 0.0015
## 0.0334 1.904 0.0573
## 0.0364 2.642 0.0084
## 0.0319 1.529 0.1268
## 0.0109 -0.764 0.4452
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
means_model_task_class = emmeans(model_task_class, pairwise ~ Class|task, mult.name = "task")
summary(means_model_task_class, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emmeans
## task = T1AWMADMSS_N.s:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -1.1386 0.1370 1017 -1.4075 -0.8697 -8.309 <.0001
## Low\nMental Rotation -0.1021 0.0748 1016 -0.2489 0.0448 -1.364 0.1729
## High\nPerformance 0.3534 0.0698 1016 0.2164 0.4903 5.063 <.0001
##
## task = T1MR.s:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.4285 0.1237 1016 -0.6712 -0.1857 -3.463 0.0006
## Low\nMental Rotation -0.8796 0.0732 1015 -1.0232 -0.7360 -12.021 <.0001
## High\nPerformance 0.8709 0.0671 1015 0.7392 1.0026 12.974 <.0001
##
## task = T1PMPA.s:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -1.4204 0.1326 1017 -1.6805 -1.1602 -10.714 <.0001
## Low\nMental Rotation 0.0420 0.0786 1016 -0.1123 0.1963 0.534 0.5935
## High\nPerformance 0.4218 0.0720 1016 0.2804 0.5631 5.856 <.0001
##
## task = T1PRWMPAE.asin.sR:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.9588 0.1257 1016 -1.2054 -0.7122 -7.630 <.0001
## Low\nMental Rotation 0.0607 0.0749 1016 -0.0863 0.2076 0.810 0.4180
## High\nPerformance 0.2468 0.0700 1016 0.1095 0.3841 3.526 0.0004
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## task = T1AWMADMSS_N.s:
## contrast estimate SE df lower.CL
## Low\nPerformance - Low\nMental Rotation -1.036 0.1558 1017 -1.342
## Low\nPerformance - High\nPerformance -1.492 0.1545 1015 -1.795
## Low\nMental Rotation - High\nPerformance -0.455 0.1027 1014 -0.657
## upper.CL t.ratio p.value
## -0.7307 -6.651 <.0001
## -1.1887 -9.654 <.0001
## -0.2538 -4.433 <.0001
##
## task = T1MR.s:
## contrast estimate SE df lower.CL
## Low\nPerformance - Low\nMental Rotation 0.451 0.1434 1016 0.170
## Low\nPerformance - High\nPerformance -1.299 0.1415 1014 -1.577
## Low\nMental Rotation - High\nPerformance -1.750 0.0997 1014 -1.946
## upper.CL t.ratio p.value
## 0.7325 3.147 0.0017
## -1.0216 -9.180 <.0001
## -1.5549 -17.559 <.0001
##
## task = T1PMPA.s:
## contrast estimate SE df lower.CL
## Low\nPerformance - Low\nMental Rotation -1.462 0.1539 1017 -1.764
## Low\nPerformance - High\nPerformance -1.842 0.1516 1015 -2.140
## Low\nMental Rotation - High\nPerformance -0.380 0.1070 1015 -0.590
## upper.CL t.ratio p.value
## -1.1604 -9.502 <.0001
## -1.5447 -12.153 <.0001
## -0.1698 -3.549 0.0004
##
## task = T1PRWMPAE.asin.sR:
## contrast estimate SE df lower.CL
## Low\nPerformance - Low\nMental Rotation -1.020 0.1460 1017 -1.306
## Low\nPerformance - High\nPerformance -1.206 0.1449 1014 -1.490
## Low\nMental Rotation - High\nPerformance -0.186 0.1029 1014 -0.388
## upper.CL t.ratio p.value
## -0.7329 -6.982 <.0001
## -0.9213 -8.322 <.0001
## 0.0158 -1.809 0.0707
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
mentalrotation_longitudinal_nonz_gather = mentalrotation_longitudinal_nonz_gather %>%
left_join(TAll_complete_info[c("SubjectID","T0SGENGender","T0PDEMOMaxParentEducation")], by ="SubjectID")
mentalrotation_longitudinal_nonz_gather$T0SY1Y2G_StartGrade = as.factor(as.numeric(mentalrotation_longitudinal_nonz_gather$T0SY1Y2G_StartGrade))
mentalrotation_longitudinal_nonz_gather$time_num = as.numeric(as.factor(mentalrotation_longitudinal_nonz_gather$time))
model_long_mr <- lmer(GMPA ~ T0SY1Y2G_StartGrade*time_num + T0SGENGender + T0PDEMOMaxParentEducation +
(1|SubjectID), data = mentalrotation_longitudinal_nonz_gather, control = lmerControl(optimizer = "bobyqa"))
summary(model_long_mr)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## GMPA ~ T0SY1Y2G_StartGrade * time_num + T0SGENGender + T0PDEMOMaxParentEducation +
## (1 | SubjectID)
## Data: mentalrotation_longitudinal_nonz_gather
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: -93.1
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.7097 -0.5277 0.0489 0.5224 2.6031
##
## Random effects:
## Groups Name Variance Std.Dev.
## SubjectID (Intercept) 0.0457 0.214
## Residual 0.0282 0.168
## Number of obs: 859, groups: SubjectID, 271
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) -0.27444 0.14234 695.81779 -1.93
## T0SY1Y2G_StartGrade1 0.42549 0.10867 751.50631 3.92
## T0SY1Y2G_StartGrade2 0.50868 0.10789 743.87733 4.71
## T0SY1Y2G_StartGrade3 0.64654 0.11127 772.10346 5.81
## time_num 0.14849 0.02809 602.56497 5.29
## T0SGENGender 0.04912 0.02896 262.79858 1.70
## T0PDEMOMaxParentEducation 0.00956 0.00617 262.56117 1.55
## T0SY1Y2G_StartGrade1:time_num -0.06012 0.02965 601.03989 -2.03
## T0SY1Y2G_StartGrade2:time_num -0.08576 0.02948 601.70868 -2.91
## T0SY1Y2G_StartGrade3:time_num -0.09120 0.03034 600.84724 -3.01
## Pr(>|t|)
## (Intercept) 0.0543 .
## T0SY1Y2G_StartGrade1 0.0000983614 ***
## T0SY1Y2G_StartGrade2 0.0000028885 ***
## T0SY1Y2G_StartGrade3 0.0000000091 ***
## time_num 0.0000001757 ***
## T0SGENGender 0.0910 .
## T0PDEMOMaxParentEducation 0.1227
## T0SY1Y2G_StartGrade1:time_num 0.0430 *
## T0SY1Y2G_StartGrade2:time_num 0.0038 **
## T0SY1Y2G_StartGrade3:time_num 0.0028 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) T0SY1Y2G_StG1 T0SY1Y2G_StG2 T0SY1Y2G_StG3 tim_nm T0SGEN
## T0SY1Y2G_StG1 -0.702
## T0SY1Y2G_StG2 -0.707 0.893
## T0SY1Y2G_StG3 -0.691 0.866 0.872
## time_num -0.692 0.905 0.911 0.884
## T0SGENGendr -0.227 0.008 -0.006 0.014 0.006
## T0PDEMOMxPE -0.627 0.036 0.043 0.041 -0.001 -0.115
## T0SY1Y2G_SG1: 0.657 -0.926 -0.864 -0.838 -0.948 -0.007
## T0SY1Y2G_SG2: 0.658 -0.862 -0.930 -0.842 -0.953 -0.002
## T0SY1Y2G_SG3: 0.639 -0.838 -0.844 -0.913 -0.926 -0.005
## T0PDEM T0SY1Y2G_SG1: T0SY1Y2G_SG2:
## T0SY1Y2G_StG1
## T0SY1Y2G_StG2
## T0SY1Y2G_StG3
## time_num
## T0SGENGendr
## T0PDEMOMxPE
## T0SY1Y2G_SG1: -0.001
## T0SY1Y2G_SG2: 0.002 0.903
## T0SY1Y2G_SG3: 0.003 0.877 0.882
Anova(model_long_mr, test.statistic = "F", type= "III")
## Analysis of Deviance Table (Type III Wald F tests with Kenward-Roger df)
##
## Response: GMPA
## F Df Df.res Pr(>F)
## (Intercept) 3.72 1 697 0.0543 .
## T0SY1Y2G_StartGrade 13.05 3 715 0.000000026 ***
## time_num 27.93 1 604 0.000000176 ***
## T0SGENGender 2.88 1 264 0.0910 .
## T0PDEMOMaxParentEducation 2.40 1 264 0.1227
## T0SY1Y2G_StartGrade:time_num 4.35 3 596 0.0048 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
em=emtrends(model_long_mr, pairwise ~ T0SY1Y2G_StartGrade, var="time_num", mult.name = "T0SY1Y2G_StartGrade")
summary(em, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emtrends
## T0SY1Y2G_StartGrade time_num.trend SE df lower.CL upper.CL t.ratio
## 0 0.1485 0.02810 604 0.0933 0.2037 5.285
## 1 0.0884 0.00947 589 0.0698 0.1070 9.334
## 2 0.0627 0.00893 594 0.0452 0.0803 7.021
## 3 0.0573 0.01146 592 0.0348 0.0798 4.999
## p.value
## <.0001
## <.0001
## <.0001
## <.0001
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## contrast estimate SE df lower.CL
## T0SY1Y2G_StartGrade0 - T0SY1Y2G_StartGrade1 0.06012 0.0297 602 0.001885
## T0SY1Y2G_StartGrade0 - T0SY1Y2G_StartGrade2 0.08576 0.0295 603 0.027861
## T0SY1Y2G_StartGrade0 - T0SY1Y2G_StartGrade3 0.09120 0.0303 602 0.031600
## T0SY1Y2G_StartGrade1 - T0SY1Y2G_StartGrade2 0.02565 0.0130 592 0.000081
## T0SY1Y2G_StartGrade1 - T0SY1Y2G_StartGrade3 0.03108 0.0149 591 0.001884
## T0SY1Y2G_StartGrade2 - T0SY1Y2G_StartGrade3 0.00543 0.0145 593 -0.023106
## upper.CL t.ratio p.value
## 0.1183 2.027 0.0431
## 0.1437 2.909 0.0038
## 0.1508 3.005 0.0028
## 0.0512 1.970 0.0493
## 0.0603 2.091 0.0370
## 0.0340 0.374 0.7087
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
means_model_task_class = emmeans(model_task_class, pairwise ~ Class|task, mult.name = "task")
summary(means_model_task_class, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emmeans
## task = T1AWMADMSS_N.s:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -1.1386 0.1370 1017 -1.4075 -0.8697 -8.309 <.0001
## Low\nMental Rotation -0.1021 0.0748 1016 -0.2489 0.0448 -1.364 0.1729
## High\nPerformance 0.3534 0.0698 1016 0.2164 0.4903 5.063 <.0001
##
## task = T1MR.s:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.4285 0.1237 1016 -0.6712 -0.1857 -3.463 0.0006
## Low\nMental Rotation -0.8796 0.0732 1015 -1.0232 -0.7360 -12.021 <.0001
## High\nPerformance 0.8709 0.0671 1015 0.7392 1.0026 12.974 <.0001
##
## task = T1PMPA.s:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -1.4204 0.1326 1017 -1.6805 -1.1602 -10.714 <.0001
## Low\nMental Rotation 0.0420 0.0786 1016 -0.1123 0.1963 0.534 0.5935
## High\nPerformance 0.4218 0.0720 1016 0.2804 0.5631 5.856 <.0001
##
## task = T1PRWMPAE.asin.sR:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.9588 0.1257 1016 -1.2054 -0.7122 -7.630 <.0001
## Low\nMental Rotation 0.0607 0.0749 1016 -0.0863 0.2076 0.810 0.4180
## High\nPerformance 0.2468 0.0700 1016 0.1095 0.3841 3.526 0.0004
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## task = T1AWMADMSS_N.s:
## contrast estimate SE df lower.CL
## Low\nPerformance - Low\nMental Rotation -1.036 0.1558 1017 -1.342
## Low\nPerformance - High\nPerformance -1.492 0.1545 1015 -1.795
## Low\nMental Rotation - High\nPerformance -0.455 0.1027 1014 -0.657
## upper.CL t.ratio p.value
## -0.7307 -6.651 <.0001
## -1.1887 -9.654 <.0001
## -0.2538 -4.433 <.0001
##
## task = T1MR.s:
## contrast estimate SE df lower.CL
## Low\nPerformance - Low\nMental Rotation 0.451 0.1434 1016 0.170
## Low\nPerformance - High\nPerformance -1.299 0.1415 1014 -1.577
## Low\nMental Rotation - High\nPerformance -1.750 0.0997 1014 -1.946
## upper.CL t.ratio p.value
## 0.7325 3.147 0.0017
## -1.0216 -9.180 <.0001
## -1.5549 -17.559 <.0001
##
## task = T1PMPA.s:
## contrast estimate SE df lower.CL
## Low\nPerformance - Low\nMental Rotation -1.462 0.1539 1017 -1.764
## Low\nPerformance - High\nPerformance -1.842 0.1516 1015 -2.140
## Low\nMental Rotation - High\nPerformance -0.380 0.1070 1015 -0.590
## upper.CL t.ratio p.value
## -1.1604 -9.502 <.0001
## -1.5447 -12.153 <.0001
## -0.1698 -3.549 0.0004
##
## task = T1PRWMPAE.asin.sR:
## contrast estimate SE df lower.CL
## Low\nPerformance - Low\nMental Rotation -1.020 0.1460 1017 -1.306
## Low\nPerformance - High\nPerformance -1.206 0.1449 1014 -1.490
## Low\nMental Rotation - High\nPerformance -0.186 0.1029 1014 -0.388
## upper.CL t.ratio p.value
## -0.7329 -6.982 <.0001
## -0.9213 -8.322 <.0001
## 0.0158 -1.809 0.0707
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
mentalrotation2_longitudinal_nonz_gather = mentalrotation2_longitudinal_nonz_gather %>%
left_join(TAll_complete_info[c("SubjectID","T0SGENGender","T0PDEMOMaxParentEducation")], by ="SubjectID")
mentalrotation2_longitudinal_nonz_gather$T0SY1Y2G_StartGrade = as.factor(as.numeric(mentalrotation2_longitudinal_nonz_gather$T0SY1Y2G_StartGrade))
mentalrotation2_longitudinal_nonz_gather$time_num = as.numeric(as.factor(mentalrotation2_longitudinal_nonz_gather$time))
model_long_mr2 <- lmer(GPPA ~ T0SY1Y2G_StartGrade*time_num + T0SGENGender + T0PDEMOMaxParentEducation +
(1|SubjectID), data = mentalrotation2_longitudinal_nonz_gather, control = lmerControl(optimizer = "bobyqa"))
summary(model_long_mr2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## GPPA ~ T0SY1Y2G_StartGrade * time_num + T0SGENGender + T0PDEMOMaxParentEducation +
## (1 | SubjectID)
## Data: mentalrotation2_longitudinal_nonz_gather
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: -219.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.7856 -0.6321 0.0488 0.6292 3.0732
##
## Random effects:
## Groups Name Variance Std.Dev.
## SubjectID (Intercept) 0.00785 0.0886
## Residual 0.01806 0.1344
## Number of obs: 287, groups: SubjectID, 118
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 0.15356 0.08621 131.43852 1.78
## T0SY1Y2G_StartGrade0 0.17949 0.04747 267.96725 3.78
## time_num 0.06270 0.01086 182.46448 5.77
## T0SGENGender 0.04158 0.02316 112.95406 1.80
## T0PDEMOMaxParentEducation 0.01643 0.00468 109.34072 3.51
## T0SY1Y2G_StartGrade0:time_num -0.03283 0.02382 176.84746 -1.38
## Pr(>|t|)
## (Intercept) 0.07717 .
## T0SY1Y2G_StartGrade0 0.00019 ***
## time_num 0.000000033 ***
## T0SGENGender 0.07521 .
## T0PDEMOMaxParentEducation 0.00065 ***
## T0SY1Y2G_StartGrade0:time_num 0.16978
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) T0SY1Y2G_StG0 tim_nm T0SGEN T0PDEM
## T0SY1Y2G_StG0 -0.235
## time_num -0.319 0.541
## T0SGENGendr -0.428 0.010 0.002
## T0PDEMOMxPE -0.837 -0.027 0.025 0.035
## T0SY1Y2G_SG0: 0.144 -0.842 -0.456 0.007 -0.013
Anova(model_long_mr2, test.statistic = "F", type= "III")
## Analysis of Deviance Table (Type III Wald F tests with Kenward-Roger df)
##
## Response: GPPA
## F Df Df.res Pr(>F)
## (Intercept) 3.17 1 129 0.07740 .
## T0SY1Y2G_StartGrade 14.29 1 268 0.00019 ***
## time_num 33.26 1 180 0.000000034 ***
## T0SGENGender 3.22 1 111 0.07548 .
## T0PDEMOMaxParentEducation 12.29 1 107 0.00067 ***
## T0SY1Y2G_StartGrade:time_num 1.90 1 175 0.16994
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
em=emtrends(model_long_mr2, pairwise ~ T0SY1Y2G_StartGrade, var="time_num", mult.name = "T0SY1Y2G_StartGrade")
summary(em, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emtrends
## T0SY1Y2G_StartGrade time_num.trend SE df lower.CL upper.CL t.ratio
## -1 0.0627 0.0109 180 0.0413 0.0842 5.767
## 0 0.0299 0.0212 173 -0.0120 0.0717 1.409
## p.value
## <.0001
## 0.1606
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## contrast estimate SE df lower.CL
## (T0SY1Y2G_StartGrade-1) - T0SY1Y2G_StartGrade0 0.0328 0.0238 175 -0.0142
## upper.CL t.ratio p.value
## 0.0799 1.378 0.1699
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
means_model_task_class = emmeans(model_task_class, pairwise ~ Class|task, mult.name = "task")
summary(means_model_task_class, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emmeans
## task = T1AWMADMSS_N.s:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -1.1386 0.1370 1017 -1.4075 -0.8697 -8.309 <.0001
## Low\nMental Rotation -0.1021 0.0748 1016 -0.2489 0.0448 -1.364 0.1729
## High\nPerformance 0.3534 0.0698 1016 0.2164 0.4903 5.063 <.0001
##
## task = T1MR.s:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.4285 0.1237 1016 -0.6712 -0.1857 -3.463 0.0006
## Low\nMental Rotation -0.8796 0.0732 1015 -1.0232 -0.7360 -12.021 <.0001
## High\nPerformance 0.8709 0.0671 1015 0.7392 1.0026 12.974 <.0001
##
## task = T1PMPA.s:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -1.4204 0.1326 1017 -1.6805 -1.1602 -10.714 <.0001
## Low\nMental Rotation 0.0420 0.0786 1016 -0.1123 0.1963 0.534 0.5935
## High\nPerformance 0.4218 0.0720 1016 0.2804 0.5631 5.856 <.0001
##
## task = T1PRWMPAE.asin.sR:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.9588 0.1257 1016 -1.2054 -0.7122 -7.630 <.0001
## Low\nMental Rotation 0.0607 0.0749 1016 -0.0863 0.2076 0.810 0.4180
## High\nPerformance 0.2468 0.0700 1016 0.1095 0.3841 3.526 0.0004
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## task = T1AWMADMSS_N.s:
## contrast estimate SE df lower.CL
## Low\nPerformance - Low\nMental Rotation -1.036 0.1558 1017 -1.342
## Low\nPerformance - High\nPerformance -1.492 0.1545 1015 -1.795
## Low\nMental Rotation - High\nPerformance -0.455 0.1027 1014 -0.657
## upper.CL t.ratio p.value
## -0.7307 -6.651 <.0001
## -1.1887 -9.654 <.0001
## -0.2538 -4.433 <.0001
##
## task = T1MR.s:
## contrast estimate SE df lower.CL
## Low\nPerformance - Low\nMental Rotation 0.451 0.1434 1016 0.170
## Low\nPerformance - High\nPerformance -1.299 0.1415 1014 -1.577
## Low\nMental Rotation - High\nPerformance -1.750 0.0997 1014 -1.946
## upper.CL t.ratio p.value
## 0.7325 3.147 0.0017
## -1.0216 -9.180 <.0001
## -1.5549 -17.559 <.0001
##
## task = T1PMPA.s:
## contrast estimate SE df lower.CL
## Low\nPerformance - Low\nMental Rotation -1.462 0.1539 1017 -1.764
## Low\nPerformance - High\nPerformance -1.842 0.1516 1015 -2.140
## Low\nMental Rotation - High\nPerformance -0.380 0.1070 1015 -0.590
## upper.CL t.ratio p.value
## -1.1604 -9.502 <.0001
## -1.5447 -12.153 <.0001
## -0.1698 -3.549 0.0004
##
## task = T1PRWMPAE.asin.sR:
## contrast estimate SE df lower.CL
## Low\nPerformance - Low\nMental Rotation -1.020 0.1460 1017 -1.306
## Low\nPerformance - High\nPerformance -1.206 0.1449 1014 -1.490
## Low\nMental Rotation - High\nPerformance -0.186 0.1029 1014 -0.388
## upper.CL t.ratio p.value
## -0.7329 -6.982 <.0001
## -0.9213 -8.322 <.0001
## 0.0158 -1.809 0.0707
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
workingmemory_longitudinal_nonz_gather = workingmemory_longitudinal_nonz_gather %>%
left_join(TAll_complete_info[c("SubjectID","T0SGENGender","T0PDEMOMaxParentEducation")], by ="SubjectID")
workingmemory_longitudinal_nonz_gather$T0SY1Y2G_StartGrade = as.factor(as.numeric(workingmemory_longitudinal_nonz_gather$T0SY1Y2G_StartGrade))
workingmemory_longitudinal_nonz_gather$time_num = as.numeric(as.factor(workingmemory_longitudinal_nonz_gather$time))
model_long_wm <- lmer(AWMADMRS_N ~ T0SY1Y2G_StartGrade*time_num + T0SGENGender + T0PDEMOMaxParentEducation +
(1|SubjectID), data = workingmemory_longitudinal_nonz_gather, control = lmerControl(optimizer = "bobyqa"))
summary(model_long_wm)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: AWMADMRS_N ~ T0SY1Y2G_StartGrade * time_num + T0SGENGender +
## T0PDEMOMaxParentEducation + (1 | SubjectID)
## Data: workingmemory_longitudinal_nonz_gather
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: 6059
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.696 -0.573 0.039 0.562 2.680
##
## Random effects:
## Groups Name Variance Std.Dev.
## SubjectID (Intercept) 7.71 2.78
## Residual 10.52 3.24
## Number of obs: 1092, groups: SubjectID, 311
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 2.3774 1.5378 438.3529 1.55
## T0SY1Y2G_StartGrade0 2.0542 1.0183 969.9144 2.02
## T0SY1Y2G_StartGrade1 5.5941 1.0530 969.7183 5.31
## T0SY1Y2G_StartGrade2 8.0849 1.0274 968.5050 7.87
## T0SY1Y2G_StartGrade3 11.2573 1.1265 972.3782 9.99
## time_num 1.5725 0.2689 817.9148 5.85
## T0SGENGender -0.1685 0.3753 301.8362 -0.45
## T0PDEMOMaxParentEducation 0.3499 0.0789 301.4215 4.44
## T0SY1Y2G_StartGrade0:time_num 0.1792 0.3203 816.0332 0.56
## T0SY1Y2G_StartGrade1:time_num -0.2161 0.3295 810.7887 -0.66
## T0SY1Y2G_StartGrade2:time_num -0.1977 0.3219 814.2002 -0.61
## T0SY1Y2G_StartGrade3:time_num -0.7027 0.3518 812.0247 -2.00
## Pr(>|t|)
## (Intercept) 0.123
## T0SY1Y2G_StartGrade0 0.044 *
## T0SY1Y2G_StartGrade1 0.0000001340539488 ***
## T0SY1Y2G_StartGrade2 0.0000000000000095 ***
## T0SY1Y2G_StartGrade3 < 0.0000000000000002 ***
## time_num 0.0000000071660447 ***
## T0SGENGender 0.654
## T0PDEMOMaxParentEducation 0.0000127782125618 ***
## T0SY1Y2G_StartGrade0:time_num 0.576
## T0SY1Y2G_StartGrade1:time_num 0.512
## T0SY1Y2G_StartGrade2:time_num 0.539
## T0SY1Y2G_StartGrade3:time_num 0.046 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) T0SY1Y2G_StG0 T0SY1Y2G_StG1 T0SY1Y2G_StG2 T0SY1Y2G_StG3
## T0SY1Y2G_StG0 -0.443
## T0SY1Y2G_StG1 -0.482 0.671
## T0SY1Y2G_StG2 -0.493 0.688 0.668
## T0SY1Y2G_StG3 -0.455 0.627 0.610 0.625
## time_num -0.429 0.628 0.608 0.623 0.569
## T0SGENGendr -0.307 0.013 0.022 -0.005 0.016
## T0PDEMOMxPE -0.750 -0.028 0.037 0.049 0.042
## T0SY1Y2G_SG0: 0.352 -0.757 -0.510 -0.523 -0.477
## T0SY1Y2G_SG1: 0.354 -0.512 -0.758 -0.509 -0.464
## T0SY1Y2G_SG2: 0.353 -0.524 -0.508 -0.756 -0.475
## T0SY1Y2G_SG3: 0.320 -0.480 -0.464 -0.476 -0.758
## tim_nm T0SGEN T0PDEM T0SY1Y2G_SG0: T0SY1Y2G_SG1: T0SY1Y2G_SG2:
## T0SY1Y2G_StG0
## T0SY1Y2G_StG1
## T0SY1Y2G_StG2
## T0SY1Y2G_StG3
## time_num
## T0SGENGendr 0.001
## T0PDEMOMxPE 0.016 -0.075
## T0SY1Y2G_SG0: -0.839 -0.001 -0.003
## T0SY1Y2G_SG1: -0.816 -0.011 -0.013 0.685
## T0SY1Y2G_SG2: -0.835 0.004 -0.009 0.701 0.681
## T0SY1Y2G_SG3: -0.764 0.006 -0.005 0.641 0.623 0.638
Anova(model_long_wm, test.statistic = "F", type= "III")
## Analysis of Deviance Table (Type III Wald F tests with Kenward-Roger df)
##
## Response: AWMADMRS_N
## F Df Df.res Pr(>F)
## (Intercept) 2.39 1 439 0.123
## T0SY1Y2G_StartGrade 39.76 4 976 < 0.0000000000000002 ***
## time_num 34.19 1 819 0.0000000072 ***
## T0SGENGender 0.20 1 303 0.654
## T0PDEMOMaxParentEducation 19.69 1 302 0.0000127757 ***
## T0SY1Y2G_StartGrade:time_num 2.50 4 808 0.041 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
prop_longitudinal_nonz_gather = prop_longitudinal_nonz_gather %>%
left_join(TAll_complete_info[c("SubjectID","T0SGENGender","T0PDEMOMaxParentEducation")], by ="SubjectID")
prop_longitudinal_nonz_gather$T0SY1Y2G_StartGrade = as.factor(as.numeric(prop_longitudinal_nonz_gather$T0SY1Y2G_StartGrade))
prop_longitudinal_nonz_gather$time_num = as.numeric(as.factor(prop_longitudinal_nonz_gather$time))
model_long_propreasoning <- lmer(PRWMPAE.asinR ~ T0SY1Y2G_StartGrade*time_num + T0SGENGender + T0PDEMOMaxParentEducation +
(1|SubjectID), data = prop_longitudinal_nonz_gather, control = lmerControl(optimizer = "bobyqa"))
summary(model_long_propreasoning)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: PRWMPAE.asinR ~ T0SY1Y2G_StartGrade * time_num + T0SGENGender +
## T0PDEMOMaxParentEducation + (1 | SubjectID)
## Data: prop_longitudinal_nonz_gather
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: -284.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.219 -0.423 0.106 0.561 2.707
##
## Random effects:
## Groups Name Variance Std.Dev.
## SubjectID (Intercept) 0.0218 0.148
## Residual 0.0299 0.173
## Number of obs: 1114, groups: SubjectID, 311
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) -1.58523 0.08303 458.17760 -19.09
## T0SY1Y2G_StartGrade0 0.12969 0.05528 996.00294 2.35
## T0SY1Y2G_StartGrade1 0.19948 0.05699 993.31368 3.50
## T0SY1Y2G_StartGrade2 0.39792 0.05568 992.34524 7.15
## T0SY1Y2G_StartGrade3 0.49856 0.06011 983.40230 8.29
## time_num 0.09687 0.01498 863.25565 6.47
## T0SGENGender 0.00352 0.01991 298.72289 0.18
## T0PDEMOMaxParentEducation 0.01980 0.00420 301.67952 4.72
## T0SY1Y2G_StartGrade0:time_num -0.00987 0.01758 853.59774 -0.56
## T0SY1Y2G_StartGrade1:time_num 0.00128 0.01804 848.25646 0.07
## T0SY1Y2G_StartGrade2:time_num -0.03266 0.01763 852.14221 -1.85
## T0SY1Y2G_StartGrade3:time_num -0.06806 0.01892 842.73014 -3.60
## Pr(>|t|)
## (Intercept) < 0.0000000000000002 ***
## T0SY1Y2G_StartGrade0 0.01916 *
## T0SY1Y2G_StartGrade1 0.00049 ***
## T0SY1Y2G_StartGrade2 0.00000000000171915 ***
## T0SY1Y2G_StartGrade3 0.00000000000000036 ***
## time_num 0.00000000016596876 ***
## T0SGENGender 0.85957
## T0PDEMOMaxParentEducation 0.00000363212220121 ***
## T0SY1Y2G_StartGrade0:time_num 0.57453
## T0SY1Y2G_StartGrade1:time_num 0.94345
## T0SY1Y2G_StartGrade2:time_num 0.06427 .
## T0SY1Y2G_StartGrade3:time_num 0.00034 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) T0SY1Y2G_StG0 T0SY1Y2G_StG1 T0SY1Y2G_StG2 T0SY1Y2G_StG3
## T0SY1Y2G_StG0 -0.463
## T0SY1Y2G_StG1 -0.498 0.688
## T0SY1Y2G_StG2 -0.514 0.704 0.687
## T0SY1Y2G_StG3 -0.483 0.652 0.636 0.652
## time_num -0.449 0.646 0.628 0.644 0.596
## T0SGENGendr -0.299 0.007 0.004 -0.014 0.017
## T0PDEMOMxPE -0.749 -0.016 0.049 0.065 0.054
## T0SY1Y2G_SG0: 0.377 -0.766 -0.535 -0.548 -0.507
## T0SY1Y2G_SG1: 0.371 -0.536 -0.765 -0.534 -0.494
## T0SY1Y2G_SG2: 0.380 -0.549 -0.534 -0.764 -0.506
## T0SY1Y2G_SG3: 0.352 -0.511 -0.497 -0.509 -0.760
## tim_nm T0SGEN T0PDEM T0SY1Y2G_SG0: T0SY1Y2G_SG1: T0SY1Y2G_SG2:
## T0SY1Y2G_StG0
## T0SY1Y2G_StG1
## T0SY1Y2G_StG2
## T0SY1Y2G_StG3
## time_num
## T0SGENGendr -0.009
## T0PDEMOMxPE 0.028 -0.071
## T0SY1Y2G_SG0: -0.852 0.004 -0.015
## T0SY1Y2G_SG1: -0.830 0.009 -0.022 0.707
## T0SY1Y2G_SG2: -0.850 0.014 -0.025 0.724 0.705
## T0SY1Y2G_SG3: -0.791 0.006 -0.017 0.674 0.657 0.672
Anova(model_long_propreasoning, test.statistic = "F", type= "III")
## Analysis of Deviance Table (Type III Wald F tests with Kenward-Roger df)
##
## Response: PRWMPAE.asinR
## F Df Df.res Pr(>F)
## (Intercept) 364.49 1 463 < 0.0000000000000002 ***
## T0SY1Y2G_StartGrade 28.29 4 983 < 0.0000000000000002 ***
## time_num 41.81 1 866 0.00000000017 ***
## T0SGENGender 0.03 1 302 0.86
## T0PDEMOMaxParentEducation 22.27 1 305 0.00000361770 ***
## T0SY1Y2G_StartGrade:time_num 6.60 4 831 0.00003126854 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
em=emtrends(model_long_propreasoning, pairwise ~ T0SY1Y2G_StartGrade, var="time_num", mult.name = "T0SY1Y2G_StartGrade")
summary(em, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emtrends
## T0SY1Y2G_StartGrade time_num.trend SE df lower.CL upper.CL t.ratio
## -1 0.0969 0.01498 866 0.06747 0.1263 6.466
## 0 0.0870 0.00921 830 0.06892 0.1051 9.445
## 1 0.0982 0.01006 818 0.07840 0.1179 9.755
## 2 0.0642 0.00930 827 0.04596 0.0825 6.905
## 3 0.0288 0.01157 811 0.00609 0.0515 2.489
## p.value
## <.0001
## <.0001
## <.0001
## <.0001
## 0.0130
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## contrast estimate SE df lower.CL
## (T0SY1Y2G_StartGrade-1) - T0SY1Y2G_StartGrade0 0.00987 0.0176 857 -0.02464
## (T0SY1Y2G_StartGrade-1) - T0SY1Y2G_StartGrade1 -0.00128 0.0180 851 -0.03670
## (T0SY1Y2G_StartGrade-1) - T0SY1Y2G_StartGrade2 0.03266 0.0176 855 -0.00195
## (T0SY1Y2G_StartGrade-1) - T0SY1Y2G_StartGrade3 0.06806 0.0189 846 0.03091
## T0SY1Y2G_StartGrade0 - T0SY1Y2G_StartGrade1 -0.01115 0.0136 823 -0.03793
## T0SY1Y2G_StartGrade0 - T0SY1Y2G_StartGrade2 0.02279 0.0131 828 -0.00290
## T0SY1Y2G_StartGrade0 - T0SY1Y2G_StartGrade3 0.05819 0.0148 818 0.02916
## T0SY1Y2G_StartGrade1 - T0SY1Y2G_StartGrade2 0.03394 0.0137 822 0.00705
## T0SY1Y2G_StartGrade1 - T0SY1Y2G_StartGrade3 0.06934 0.0153 814 0.03924
## T0SY1Y2G_StartGrade2 - T0SY1Y2G_StartGrade3 0.03540 0.0148 817 0.00626
## upper.CL t.ratio p.value
## 0.0444 0.561 0.5746
## 0.0341 -0.071 0.9435
## 0.0673 1.852 0.0643
## 0.1052 3.596 0.0003
## 0.0156 -0.818 0.4139
## 0.0485 1.741 0.0820
## 0.0872 3.935 0.0001
## 0.0608 2.477 0.0134
## 0.0994 4.522 <.0001
## 0.0645 2.385 0.0173
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
multiplot(graph_workingmemory_longitudinal_nonz, graph_prop_longitudinal_nonz,graph_mentalrotation2_longitudinal_nonz, graph_panamath_longitudinal_nz, graph_mentalrotation_longitudinal_nonz, cols = 3)
EC_longitudinal_nonz = spatial_profiles_database_atleast2tp %>%
dplyr::select("SubjectID","T0SY1Y2G_StartGrade",
#"T1WJCW","T2WJCW","T3WJCW","T4WJCW")
"T1WJCRS","T2WJCRS","T3WJCRS","T4WJCRS")
EC_longitudinal_nonz = EC_longitudinal_nonz[which(EC_longitudinal_nonz$SubjectID %in% T1_complete_class$SubjectID),]
EC_longitudinal_nonz_gather = gather(EC_longitudinal_nonz, "time", "WJCRS", -c("SubjectID","T0SY1Y2G_StartGrade"))
EC_longitudinal_nonz_gather_summary = summarySE(EC_longitudinal_nonz_gather, "WJCRS", c("time","T0SY1Y2G_StartGrade"), na.rm = T)
EC_longitudinal_nonz_gather_summary$time = as.factor(EC_longitudinal_nonz_gather_summary$time)
EC_longitudinal_nonz_gather_summary$T0SY1Y2G_StartGrade = as.factor(EC_longitudinal_nonz_gather_summary$T0SY1Y2G_StartGrade)
EC_longitudinal_nonz_gather_summary$task = "Exact Calculation for Older Children"
graph_EC_longitudinal_nonz = ggplot(EC_longitudinal_nonz_gather_summary, aes(x = time, y = WJCRS, group = T0SY1Y2G_StartGrade)) +
#geom_hline(yintercept = 0, linetype = "dashed")+
#scale_fill_manual(values = c("#000000","#228B22"))+
#scale_color_manual(values = c("#000000","#228B22"))+
geom_ribbon(aes(ymin = (WJCRS-se), ymax = (WJCRS+se), fill = T0SY1Y2G_StartGrade),
alpha = .15, color = NA)+
geom_line(aes(color = T0SY1Y2G_StartGrade)) +
scale_color_manual(values = c("#74a9cf","#3690c0","#0570b0","#045a8d","#023858"))+
scale_fill_manual(values = c("#74a9cf","#3690c0","#0570b0","#045a8d","#023858"))+
geom_line(aes(color = T0SY1Y2G_StartGrade)) +
facet_grid(.~task)+
# scale_y_continuous(breaks = seq(0,1,.25), limits = c(0,1.1))+
theme_bw()+
ylab("Exact Calculation for Older Children")+
xlab("Time")+
#scale_y_continuous(breaks = seq(-2.5,1,.5), limits = c(-2.7,1.0))+
theme(legend.position="bottom",
axis.title.x = element_text(size=size_text),
axis.text.x = element_text(size=size_text),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_EC_longitudinal_nonz
EC_longitudinal_nonz_gather = EC_longitudinal_nonz_gather %>%
left_join(TAll_complete_info[c("SubjectID","T0SGENGender","T0PDEMOMaxParentEducation")], by ="SubjectID")
EC_longitudinal_nonz_gather$T0SY1Y2G_StartGrade = as.factor(as.numeric(EC_longitudinal_nonz_gather$T0SY1Y2G_StartGrade))
EC_longitudinal_nonz_gather$time_num = as.numeric(as.factor(EC_longitudinal_nonz_gather$time))
model_long_ecolder <- lmer(WJCRS ~ T0SY1Y2G_StartGrade*time_num + T0SGENGender + T0PDEMOMaxParentEducation +
(1|SubjectID), data = EC_longitudinal_nonz_gather, control = lmerControl(optimizer = "bobyqa"))
summary(model_long_ecolder)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## WJCRS ~ T0SY1Y2G_StartGrade * time_num + T0SGENGender + T0PDEMOMaxParentEducation +
## (1 | SubjectID)
## Data: EC_longitudinal_nonz_gather
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: 3977
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -6.336 -0.473 0.017 0.543 2.401
##
## Random effects:
## Groups Name Variance Std.Dev.
## SubjectID (Intercept) 7.60 2.76
## Residual 5.86 2.42
## Number of obs: 775, groups: SubjectID, 271
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) -0.76118 2.06141 708.72964 -0.37
## T0SY1Y2G_StartGrade1 6.10779 1.69688 658.31422 3.60
## T0SY1Y2G_StartGrade2 10.51143 1.69248 652.69883 6.21
## T0SY1Y2G_StartGrade3 15.84014 1.73107 672.30390 9.15
## time_num 2.86043 0.45114 569.19170 6.34
## T0SGENGender 0.00705 0.38674 253.22165 0.02
## T0PDEMOMaxParentEducation 0.18666 0.08263 254.74192 2.26
## T0SY1Y2G_StartGrade1:time_num -0.13167 0.47312 563.51681 -0.28
## T0SY1Y2G_StartGrade2:time_num 0.12989 0.47188 564.25345 0.28
## T0SY1Y2G_StartGrade3:time_num -0.12253 0.48346 561.83598 -0.25
## Pr(>|t|)
## (Intercept) 0.71205
## T0SY1Y2G_StartGrade1 0.00034 ***
## T0SY1Y2G_StartGrade2 0.00000000094 ***
## T0SY1Y2G_StartGrade3 < 0.0000000000000002 ***
## time_num 0.00000000047 ***
## T0SGENGender 0.98546
## T0PDEMOMaxParentEducation 0.02473 *
## T0SY1Y2G_StartGrade1:time_num 0.78088
## T0SY1Y2G_StartGrade2:time_num 0.78322
## T0SY1Y2G_StartGrade3:time_num 0.80002
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) T0SY1Y2G_StG1 T0SY1Y2G_StG2 T0SY1Y2G_StG3 tim_nm T0SGEN
## T0SY1Y2G_StG1 -0.751
## T0SY1Y2G_StG2 -0.751 0.913
## T0SY1Y2G_StG3 -0.742 0.892 0.895
## time_num -0.743 0.927 0.929 0.908
## T0SGENGendr -0.203 -0.004 -0.017 0.005 -0.009
## T0PDEMOMxPE -0.559 0.004 0.007 0.010 -0.030 -0.109
## T0SY1Y2G_SG1: 0.710 -0.945 -0.886 -0.866 -0.953 0.006
## T0SY1Y2G_SG2: 0.707 -0.886 -0.948 -0.868 -0.956 0.012
## T0SY1Y2G_SG3: 0.695 -0.865 -0.867 -0.935 -0.933 0.004
## T0PDEM T0SY1Y2G_SG1: T0SY1Y2G_SG2:
## T0SY1Y2G_StG1
## T0SY1Y2G_StG2
## T0SY1Y2G_StG3
## time_num
## T0SGENGendr
## T0PDEMOMxPE
## T0SY1Y2G_SG1: 0.027
## T0SY1Y2G_SG2: 0.032 0.912
## T0SY1Y2G_SG3: 0.027 0.890 0.892
Anova(model_long_ecolder, test.statistic = "F", type= "III")
## Analysis of Deviance Table (Type III Wald F tests with Kenward-Roger df)
##
## Response: WJCRS
## F Df Df.res Pr(>F)
## (Intercept) 0.14 1 711 0.712
## T0SY1Y2G_StartGrade 62.79 3 701 < 0.0000000000000002 ***
## time_num 40.16 1 576 0.00000000047 ***
## T0SGENGender 0.00 1 262 0.985
## T0PDEMOMaxParentEducation 5.10 1 263 0.025 *
## T0SY1Y2G_StartGrade:time_num 0.71 3 537 0.546
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# em=emtrends(model_long_propreasoning, pairwise ~ T0SY1Y2G_StartGrade, var="time_num", mult.name = "T0SY1Y2G_StartGrade")
# summary(em, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
EC2_longitudinal_nonz = spatial_profiles_database_atleast2tp %>%
dplyr::select("SubjectID","T0SY1Y2G_StartGrade",
"T1PKC_PA","T2PKC_PA","T3PKC_PA","T4PKC_PA")
EC2_longitudinal_nonz = EC2_longitudinal_nonz[which(EC2_longitudinal_nonz$SubjectID %in% T1_complete_class$SubjectID),]
EC2_longitudinal_nonz_gather = gather(EC2_longitudinal_nonz, "time", "PKC_PA", -c("SubjectID","T0SY1Y2G_StartGrade"))
EC2_longitudinal_nonz_gather_summary = summarySE(EC2_longitudinal_nonz_gather, "PKC_PA", c("time","T0SY1Y2G_StartGrade"), na.rm = T)
EC2_longitudinal_nonz_gather_summary$time = as.factor(EC2_longitudinal_nonz_gather_summary$time)
EC2_longitudinal_nonz_gather_summary$T0SY1Y2G_StartGrade = as.factor(EC2_longitudinal_nonz_gather_summary$T0SY1Y2G_StartGrade)
EC2_longitudinal_nonz_gather_summary$task = "Exact Calculation for Younger Children"
graph_EC2_longitudinal_nonz = ggplot(EC2_longitudinal_nonz_gather_summary, aes(x = time, y = PKC_PA, group = T0SY1Y2G_StartGrade)) +
#geom_hline(yintercept = 0, linetype = "dashed")+
#scale_fill_manual(values = c("#000000","#228B22"))+
#scale_color_manual(values = c("#000000","#228B22"))+
geom_ribbon(aes(ymin = (PKC_PA-se), ymax = (PKC_PA+se), fill = T0SY1Y2G_StartGrade),
alpha = .15, color = NA)+
geom_line(aes(color = T0SY1Y2G_StartGrade)) +
scale_color_manual(values = c("#74a9cf","#3690c0","#0570b0","#045a8d","#023858"))+
scale_fill_manual(values = c("#74a9cf","#3690c0","#0570b0","#045a8d","#023858"))+
geom_line(aes(color = T0SY1Y2G_StartGrade)) +
facet_grid(.~task)+
scale_y_continuous(breaks = seq(0,1,.25), limits = c(0,1.1))+
theme_bw()+
ylab("Exact Calculation for Younger Children")+
xlab("Time")+
#scale_y_continuous(breaks = seq(-2.5,1,.5), limits = c(-2.7,1.0))+
theme(legend.position="bottom",
axis.title.x = element_text(size=size_text),
axis.text.x = element_text(size=size_text),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_EC2_longitudinal_nonz
multiplot(graph_EC2_longitudinal_nonz, graph_EC_longitudinal_nonz, cols =2)
EC2_longitudinal_nonz_gather = EC2_longitudinal_nonz_gather %>%
left_join(TAll_complete_info[c("SubjectID","T0SGENGender","T0PDEMOMaxParentEducation")], by ="SubjectID")
EC2_longitudinal_nonz_gather$T0SY1Y2G_StartGrade = as.factor(as.numeric(EC2_longitudinal_nonz_gather$T0SY1Y2G_StartGrade))
EC2_longitudinal_nonz_gather$time_num = as.numeric(as.factor(EC2_longitudinal_nonz_gather$time))
model_long_ecyounger <- lmer(PKC_PA ~ T0SY1Y2G_StartGrade*time_num + T0SGENGender + T0PDEMOMaxParentEducation +
(1|SubjectID), data = EC2_longitudinal_nonz_gather, control = lmerControl(optimizer = "bobyqa"))
summary(model_long_ecyounger)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## PKC_PA ~ T0SY1Y2G_StartGrade * time_num + T0SGENGender + T0PDEMOMaxParentEducation +
## (1 | SubjectID)
## Data: EC2_longitudinal_nonz_gather
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: -11.4
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.2859 -0.5355 -0.0862 0.4835 2.8767
##
## Random effects:
## Groups Name Variance Std.Dev.
## SubjectID (Intercept) 0.0477 0.218
## Residual 0.0247 0.157
## Number of obs: 272, groups: SubjectID, 118
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) -0.33840 0.16367 114.41809 -2.07
## T0SY1Y2G_StartGrade0 0.15107 0.06919 257.49519 2.18
## time_num 0.07694 0.01344 153.66516 5.72
## T0SGENGender 0.00162 0.04506 108.37372 0.04
## T0PDEMOMaxParentEducation 0.03023 0.00915 106.44731 3.30
## T0SY1Y2G_StartGrade0:time_num 0.03883 0.02901 153.51151 1.34
## Pr(>|t|)
## (Intercept) 0.0409 *
## T0SY1Y2G_StartGrade0 0.0299 *
## time_num 0.000000053 ***
## T0SGENGender 0.9714
## T0PDEMOMaxParentEducation 0.0013 **
## T0SY1Y2G_StartGrade0:time_num 0.1827
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) T0SY1Y2G_StG0 tim_nm T0SGEN T0PDEM
## T0SY1Y2G_StG0 -0.194
## time_num -0.199 0.456
## T0SGENGendr -0.441 0.021 0.011
## T0PDEMOMxPE -0.857 -0.051 0.002 0.035
## T0SY1Y2G_SG0: 0.087 -0.699 -0.463 -0.004 0.005
Anova(model_long_ecyounger, test.statistic = "F", type= "III")
## Analysis of Deviance Table (Type III Wald F tests with Kenward-Roger df)
##
## Response: PKC_PA
## F Df Df.res Pr(>F)
## (Intercept) 4.27 1 119 0.0409 *
## T0SY1Y2G_StartGrade 4.77 1 258 0.0299 *
## time_num 32.73 1 158 0.000000051 ***
## T0SGENGender 0.00 1 113 0.9714
## T0PDEMOMaxParentEducation 10.92 1 111 0.0013 **
## T0SY1Y2G_StartGrade:time_num 1.79 1 158 0.1829
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
AC_longitudinal_nonz = spatial_profiles_database_atleast2tp %>%
dplyr::select("SubjectID","T0SY1Y2G_StartGrade",
"T1ASWmac","T2ASWmac","T3ASWmac","T4ASWmac")
AC_longitudinal_nonz = AC_longitudinal_nonz[which(AC_longitudinal_nonz$SubjectID %in% T1_complete_class$SubjectID),]
AC_longitudinal_nonz_gather = gather(AC_longitudinal_nonz, "time", "ASWmac", -c("SubjectID","T0SY1Y2G_StartGrade"))
AC_longitudinal_nonz_gather_summary = summarySE(AC_longitudinal_nonz_gather, "ASWmac", c("time","T0SY1Y2G_StartGrade"), na.rm = T)
AC_longitudinal_nonz_gather_summary$time = as.factor(AC_longitudinal_nonz_gather_summary$time)
AC_longitudinal_nonz_gather_summary$T0SY1Y2G_StartGrade = as.factor(AC_longitudinal_nonz_gather_summary$T0SY1Y2G_StartGrade)
AC_longitudinal_nonz_gather_summary$task = "Approximate Calculation"
graph_AC_longitudinal_nonz = ggplot(AC_longitudinal_nonz_gather_summary, aes(x = time, y = ASWmac, group = T0SY1Y2G_StartGrade)) +
#geom_hline(yintercept = 0, linetype = "dashed")+
#scale_fill_manual(values = c("#000000","#228B22"))+
#scale_color_manual(values = c("#000000","#228B22"))+
geom_ribbon(aes(ymin = (ASWmac-se), ymax = (ASWmac+se), fill = T0SY1Y2G_StartGrade),
alpha = .15, color = NA)+
geom_line(aes(color = T0SY1Y2G_StartGrade)) +
scale_color_manual(values = c("#74a9cf","#3690c0","#0570b0","#045a8d","#023858"))+
scale_fill_manual(values = c("#74a9cf","#3690c0","#0570b0","#045a8d","#023858"))+
geom_line(aes(color = T0SY1Y2G_StartGrade)) +
facet_grid(.~task)+
# scale_y_continuous(breaks = seq(0,1,.25), limits = c(0,1.1))+
theme_bw()+
ylab("Approximate Calculation")+
xlab("Time")+
#scale_y_continuous(breaks = seq(-2.5,1,.5), limits = c(-2.7,1.0))+
theme(legend.position="bottom",
axis.title.x = element_text(size=size_text),
axis.text.x = element_text(size=size_text),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_AC_longitudinal_nonz
AC_longitudinal_nonz_gather = AC_longitudinal_nonz_gather %>%
left_join(TAll_complete_info[c("SubjectID","T0SGENGender","T0PDEMOMaxParentEducation")], by ="SubjectID")
AC_longitudinal_nonz_gather$T0SY1Y2G_StartGrade = as.factor(as.numeric(AC_longitudinal_nonz_gather$T0SY1Y2G_StartGrade))
AC_longitudinal_nonz_gather$time_num = as.numeric(as.factor(AC_longitudinal_nonz_gather$time))
model_long_ac <- lmer(ASWmac ~ T0SY1Y2G_StartGrade*time_num + T0SGENGender + T0PDEMOMaxParentEducation +
(1|SubjectID), data = AC_longitudinal_nonz_gather, control = lmerControl(optimizer = "bobyqa"))
summary(model_long_ac)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## ASWmac ~ T0SY1Y2G_StartGrade * time_num + T0SGENGender + T0PDEMOMaxParentEducation +
## (1 | SubjectID)
## Data: AC_longitudinal_nonz_gather
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: -1282
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.6508 -0.6112 0.0126 0.6269 2.7489
##
## Random effects:
## Groups Name Variance Std.Dev.
## SubjectID (Intercept) 0.00503 0.071
## Residual 0.01280 0.113
## Number of obs: 1069, groups: SubjectID, 311
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 0.36127 0.04721 527.33765 7.65
## T0SY1Y2G_StartGrade0 0.12744 0.03465 1044.11352 3.68
## T0SY1Y2G_StartGrade1 0.07647 0.03536 1041.60629 2.16
## T0SY1Y2G_StartGrade2 0.20878 0.03479 1042.96511 6.00
## T0SY1Y2G_StartGrade3 0.16817 0.03789 1043.10016 4.44
## time_num 0.04089 0.01013 864.75824 4.04
## T0SGENGender 0.01738 0.01075 295.21608 1.62
## T0PDEMOMaxParentEducation 0.00858 0.00227 299.31535 3.78
## T0SY1Y2G_StartGrade0:time_num -0.02936 0.01190 848.89667 -2.47
## T0SY1Y2G_StartGrade1:time_num 0.00222 0.01207 838.91950 0.18
## T0SY1Y2G_StartGrade2:time_num -0.04938 0.01188 843.72200 -4.16
## T0SY1Y2G_StartGrade3:time_num -0.02850 0.01284 834.43334 -2.22
## Pr(>|t|)
## (Intercept) 0.000000000000094 ***
## T0SY1Y2G_StartGrade0 0.00025 ***
## T0SY1Y2G_StartGrade1 0.03082 *
## T0SY1Y2G_StartGrade2 0.000000002693389 ***
## T0SY1Y2G_StartGrade3 0.000010039430217 ***
## time_num 0.000058943166806 ***
## T0SGENGender 0.10714
## T0PDEMOMaxParentEducation 0.00019 ***
## T0SY1Y2G_StartGrade0:time_num 0.01383 *
## T0SY1Y2G_StartGrade1:time_num 0.85403
## T0SY1Y2G_StartGrade2:time_num 0.000035789501215 ***
## T0SY1Y2G_StartGrade3:time_num 0.02674 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) T0SY1Y2G_StG0 T0SY1Y2G_StG1 T0SY1Y2G_StG2 T0SY1Y2G_StG3
## T0SY1Y2G_StG0 -0.516
## T0SY1Y2G_StG1 -0.549 0.705
## T0SY1Y2G_StG2 -0.555 0.717 0.705
## T0SY1Y2G_StG3 -0.512 0.658 0.647 0.658
## time_num -0.526 0.706 0.693 0.704 0.646
## T0SGENGendr -0.283 0.006 0.006 -0.009 0.007
## T0PDEMOMxPE -0.707 -0.021 0.039 0.042 0.035
## T0SY1Y2G_SG0: 0.442 -0.830 -0.589 -0.599 -0.550
## T0SY1Y2G_SG1: 0.446 -0.592 -0.827 -0.591 -0.542
## T0SY1Y2G_SG2: 0.443 -0.602 -0.590 -0.827 -0.551
## T0SY1Y2G_SG3: 0.405 -0.557 -0.546 -0.555 -0.828
## tim_nm T0SGEN T0PDEM T0SY1Y2G_SG0: T0SY1Y2G_SG1: T0SY1Y2G_SG2:
## T0SY1Y2G_StG0
## T0SY1Y2G_StG1
## T0SY1Y2G_StG2
## T0SY1Y2G_StG3
## time_num
## T0SGENGendr -0.007
## T0PDEMOMxPE 0.013 -0.070
## T0SY1Y2G_SG0: -0.851 0.001 -0.001
## T0SY1Y2G_SG1: -0.839 0.002 -0.017 0.714
## T0SY1Y2G_SG2: -0.852 0.008 -0.006 0.725 0.715
## T0SY1Y2G_SG3: -0.789 0.013 -0.001 0.671 0.661 0.672
Anova(model_long_ac, test.statistic = "F", type= "III")
## Analysis of Deviance Table (Type III Wald F tests with Kenward-Roger df)
##
## Response: ASWmac
## F Df Df.res Pr(>F)
## (Intercept) 58.54 1 534 0.000000000000094 ***
## T0SY1Y2G_StartGrade 11.91 4 1041 0.000000001836696 ***
## time_num 16.28 1 868 0.000059434699282 ***
## T0SGENGender 2.61 1 300 0.10716
## T0PDEMOMaxParentEducation 14.30 1 304 0.00019 ***
## T0SY1Y2G_StartGrade:time_num 9.81 4 809 0.000000094180766 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
em=emtrends(model_long_ac, pairwise ~ T0SY1Y2G_StartGrade, var="time_num", mult.name = "T0SY1Y2G_StartGrade")
summary(em, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emtrends
## T0SY1Y2G_StartGrade time_num.trend SE df lower.CL upper.CL t.ratio
## -1 0.04089 0.01013 868 0.021000 0.06078 4.035
## 0 0.01153 0.00626 810 -0.000756 0.02381 1.842
## 1 0.04311 0.00657 782 0.030212 0.05601 6.560
## 2 -0.00849 0.00622 791 -0.020702 0.00371 -1.366
## 3 0.01239 0.00790 788 -0.003117 0.02789 1.568
## p.value
## 0.0001
## 0.0658
## <.0001
## 0.1724
## 0.1172
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## contrast estimate SE df lower.CL
## (T0SY1Y2G_StartGrade-1) - T0SY1Y2G_StartGrade0 0.02936 0.01191 853 0.00599
## (T0SY1Y2G_StartGrade-1) - T0SY1Y2G_StartGrade1 -0.00222 0.01208 843 -0.02593
## (T0SY1Y2G_StartGrade-1) - T0SY1Y2G_StartGrade2 0.04938 0.01189 848 0.02605
## (T0SY1Y2G_StartGrade-1) - T0SY1Y2G_StartGrade3 0.02850 0.01285 838 0.00328
## T0SY1Y2G_StartGrade0 - T0SY1Y2G_StartGrade1 -0.03159 0.00907 795 -0.04940
## T0SY1Y2G_StartGrade0 - T0SY1Y2G_StartGrade2 0.02002 0.00882 801 0.00270
## T0SY1Y2G_StartGrade0 - T0SY1Y2G_StartGrade3 -0.00086 0.01008 797 -0.02064
## T0SY1Y2G_StartGrade1 - T0SY1Y2G_StartGrade2 0.05161 0.00905 786 0.03384
## T0SY1Y2G_StartGrade1 - T0SY1Y2G_StartGrade3 0.03072 0.01028 785 0.01055
## T0SY1Y2G_StartGrade2 - T0SY1Y2G_StartGrade3 -0.02088 0.01005 790 -0.04061
## upper.CL t.ratio p.value
## 0.05274 2.466 0.0139
## 0.02149 -0.184 0.8541
## 0.07272 4.154 <.0001
## 0.05372 2.218 0.0268
## -0.01377 -3.480 0.0005
## 0.03734 2.270 0.0235
## 0.01892 -0.085 0.9319
## 0.06937 5.703 <.0001
## 0.05090 2.990 0.0029
## -0.00115 -2.077 0.0381
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
LetterWordID_longitudinal_nonz = spatial_profiles_database_atleast2tp %>%
dplyr::select("SubjectID","T0SY1Y2G_StartGrade",
"T1LWIDWS","T2LWIDWS","T3LWIDWS","T4LWIDWS")
LetterWordID_longitudinal_nonz = LetterWordID_longitudinal_nonz[which(LetterWordID_longitudinal_nonz$SubjectID %in% T1_complete_class$SubjectID),]
LetterWordID_longitudinal_nonz_gather = gather(LetterWordID_longitudinal_nonz, "time", "LWIDWS", -c("SubjectID","T0SY1Y2G_StartGrade"))
LetterWordID_longitudinal_nonz_gather_summary = summarySE(LetterWordID_longitudinal_nonz_gather, "LWIDWS", c("time","T0SY1Y2G_StartGrade"), na.rm = T)
LetterWordID_longitudinal_nonz_gather_summary$time = as.factor(LetterWordID_longitudinal_nonz_gather_summary$time)
LetterWordID_longitudinal_nonz_gather_summary$T0SY1Y2G_StartGrade = as.factor(LetterWordID_longitudinal_nonz_gather_summary$T0SY1Y2G_StartGrade)
LetterWordID_longitudinal_nonz_gather_summary$task = "Letter Word ID"
graph_LetterWordID_longitudinal_nonz = ggplot(LetterWordID_longitudinal_nonz_gather_summary, aes(x = time, y = LWIDWS, group = T0SY1Y2G_StartGrade)) +
#geom_hline(yintercept = 0, linetype = "dashed")+
#scale_fill_manual(values = c("#000000","#228B22"))+
#scale_color_manual(values = c("#000000","#228B22"))+
geom_ribbon(aes(ymin = (LWIDWS-se), ymax = (LWIDWS+se), fill = T0SY1Y2G_StartGrade),
alpha = .15, color = NA)+
geom_line(aes(color = T0SY1Y2G_StartGrade)) +
scale_color_manual(values = c("#74a9cf","#3690c0","#0570b0","#045a8d","#023858"))+
scale_fill_manual(values = c("#74a9cf","#3690c0","#0570b0","#045a8d","#023858"))+
geom_line(aes(color = T0SY1Y2G_StartGrade)) +
facet_grid(.~task)+
# scale_y_continuous(breaks = seq(0,1,.25), limits = c(0,1.1))+
theme_bw()+
ylab("Letter Word ID")+
xlab("Time")+
#scale_y_continuous(breaks = seq(-2.5,1,.5), limits = c(-2.7,1.0))+
theme(legend.position="bottom",
axis.title.x = element_text(size=size_text),
axis.text.x = element_text(size=size_text),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_LetterWordID_longitudinal_nonz
LetterWordID_longitudinal_nonz_gather = LetterWordID_longitudinal_nonz_gather %>%
left_join(TAll_complete_info[c("SubjectID","T0SGENGender","T0PDEMOMaxParentEducation")], by ="SubjectID")
LetterWordID_longitudinal_nonz_gather$T0SY1Y2G_StartGrade = as.factor(as.numeric(LetterWordID_longitudinal_nonz_gather$T0SY1Y2G_StartGrade))
LetterWordID_longitudinal_nonz_gather$time_num = as.numeric(as.factor(LetterWordID_longitudinal_nonz_gather$time))
model_long_lwid <- lmer(LWIDWS ~ T0SY1Y2G_StartGrade*time_num + T0SGENGender + T0PDEMOMaxParentEducation +
(1|SubjectID), data = LetterWordID_longitudinal_nonz_gather, control = lmerControl(optimizer = "bobyqa"))
summary(model_long_lwid)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## LWIDWS ~ T0SY1Y2G_StartGrade * time_num + T0SGENGender + T0PDEMOMaxParentEducation +
## (1 | SubjectID)
## Data: LetterWordID_longitudinal_nonz_gather
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: 9040
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -5.192 -0.505 -0.008 0.511 3.094
##
## Random effects:
## Groups Name Variance Std.Dev.
## SubjectID (Intercept) 566 23.8
## Residual 135 11.6
## Number of obs: 1064, groups: SubjectID, 311
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 290.966 10.719 337.356 27.15
## T0SY1Y2G_StartGrade0 29.103 5.717 509.004 5.09
## T0SY1Y2G_StartGrade1 75.337 5.927 511.054 12.71
## T0SY1Y2G_StartGrade2 123.973 5.796 513.980 21.39
## T0SY1Y2G_StartGrade3 148.932 6.328 512.242 23.54
## time_num 13.599 1.001 762.255 13.59
## T0SGENGender 1.904 2.816 305.180 0.68
## T0PDEMOMaxParentEducation 2.121 0.592 304.929 3.58
## T0SY1Y2G_StartGrade0:time_num 6.961 1.181 760.951 5.89
## T0SY1Y2G_StartGrade1:time_num 3.310 1.217 760.184 2.72
## T0SY1Y2G_StartGrade2:time_num -4.939 1.200 761.384 -4.12
## T0SY1Y2G_StartGrade3:time_num -8.307 1.303 760.906 -6.38
## Pr(>|t|)
## (Intercept) < 0.0000000000000002 ***
## T0SY1Y2G_StartGrade0 0.00000050240 ***
## T0SY1Y2G_StartGrade1 < 0.0000000000000002 ***
## T0SY1Y2G_StartGrade2 < 0.0000000000000002 ***
## T0SY1Y2G_StartGrade3 < 0.0000000000000002 ***
## time_num < 0.0000000000000002 ***
## T0SGENGender 0.49944
## T0PDEMOMaxParentEducation 0.00039 ***
## T0SY1Y2G_StartGrade0:time_num 0.00000000573 ***
## T0SY1Y2G_StartGrade1:time_num 0.00666 **
## T0SY1Y2G_StartGrade2:time_num 0.00004281987 ***
## T0SY1Y2G_StartGrade3:time_num 0.00000000031 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) T0SY1Y2G_StG0 T0SY1Y2G_StG1 T0SY1Y2G_StG2 T0SY1Y2G_StG3
## T0SY1Y2G_StG0 -0.351
## T0SY1Y2G_StG1 -0.410 0.678
## T0SY1Y2G_StG2 -0.419 0.693 0.674
## T0SY1Y2G_StG3 -0.396 0.635 0.618 0.631
## time_num -0.224 0.412 0.398 0.407 0.373
## T0SGENGendr -0.329 0.017 0.018 -0.007 0.027
## T0PDEMOMxPE -0.805 -0.038 0.048 0.061 0.054
## T0SY1Y2G_SG0: 0.187 -0.490 -0.337 -0.345 -0.316
## T0SY1Y2G_SG1: 0.189 -0.339 -0.493 -0.335 -0.307
## T0SY1Y2G_SG2: 0.185 -0.344 -0.332 -0.494 -0.311
## T0SY1Y2G_SG3: 0.169 -0.317 -0.306 -0.313 -0.492
## tim_nm T0SGEN T0PDEM T0SY1Y2G_SG0: T0SY1Y2G_SG1: T0SY1Y2G_SG2:
## T0SY1Y2G_StG0
## T0SY1Y2G_StG1
## T0SY1Y2G_StG2
## T0SY1Y2G_StG3
## time_num
## T0SGENGendr 0.001
## T0PDEMOMxPE 0.004 -0.073
## T0SY1Y2G_SG0: -0.847 -0.002 0.000
## T0SY1Y2G_SG1: -0.823 -0.006 -0.006 0.697
## T0SY1Y2G_SG2: -0.834 0.003 -0.003 0.707 0.686
## T0SY1Y2G_SG3: -0.768 -0.001 0.000 0.651 0.632 0.641
Anova(model_long_lwid, test.statistic = "F", type= "III")
## Analysis of Deviance Table (Type III Wald F tests with Kenward-Roger df)
##
## Response: LWIDWS
## F Df Df.res Pr(>F)
## (Intercept) 736.86 1 336 < 0.0000000000000002 ***
## T0SY1Y2G_StartGrade 249.75 4 509 < 0.0000000000000002 ***
## time_num 184.55 1 761 < 0.0000000000000002 ***
## T0SGENGender 0.46 1 304 0.49945
## T0PDEMOMaxParentEducation 12.85 1 304 0.00039 ***
## T0SY1Y2G_StartGrade:time_num 75.15 4 758 < 0.0000000000000002 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
em=emtrends(model_long_lwid, pairwise ~ T0SY1Y2G_StartGrade, var="time_num", mult.name = "T0SY1Y2G_StartGrade")
summary(em, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emtrends
## T0SY1Y2G_StartGrade time_num.trend SE df lower.CL upper.CL t.ratio p.value
## -1 13.60 1.001 761 11.63 15.56 13.590 <.0001
## 0 20.56 0.628 756 19.33 21.79 32.760 <.0001
## 1 16.91 0.692 755 15.55 18.27 24.450 <.0001
## 2 8.66 0.662 758 7.36 9.96 13.080 <.0001
## 3 5.29 0.834 758 3.65 6.93 6.340 <.0001
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## contrast estimate SE df lower.CL
## (T0SY1Y2G_StartGrade-1) - T0SY1Y2G_StartGrade0 -6.96 1.181 760 -9.28
## (T0SY1Y2G_StartGrade-1) - T0SY1Y2G_StartGrade1 -3.31 1.217 759 -5.70
## (T0SY1Y2G_StartGrade-1) - T0SY1Y2G_StartGrade2 4.94 1.200 760 2.58
## (T0SY1Y2G_StartGrade-1) - T0SY1Y2G_StartGrade3 8.31 1.303 760 5.75
## T0SY1Y2G_StartGrade0 - T0SY1Y2G_StartGrade1 3.65 0.934 756 1.82
## T0SY1Y2G_StartGrade0 - T0SY1Y2G_StartGrade2 11.90 0.912 757 10.11
## T0SY1Y2G_StartGrade0 - T0SY1Y2G_StartGrade3 15.27 1.044 757 13.22
## T0SY1Y2G_StartGrade1 - T0SY1Y2G_StartGrade2 8.25 0.957 756 6.37
## T0SY1Y2G_StartGrade1 - T0SY1Y2G_StartGrade3 11.62 1.083 757 9.49
## T0SY1Y2G_StartGrade2 - T0SY1Y2G_StartGrade3 3.37 1.065 758 1.28
## upper.CL t.ratio p.value
## -4.641 -5.891 <.0001
## -0.922 -2.721 0.0067
## 7.295 4.115 <.0001
## 10.865 6.376 <.0001
## 5.484 3.909 0.0001
## 13.690 13.045 <.0001
## 17.317 14.628 <.0001
## 10.128 8.616 <.0001
## 13.744 10.723 <.0001
## 5.459 3.164 0.0016
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
names(spatial_profiles_database_atleast2tp)
## [1] "...1" "...2"
## [3] "SubjectID" "T0SY1Y2G_Y1Grade"
## [5] "T0SY1Y2G_Y2Grade" "T0SY1Y2G_StartGrade"
## [7] "T0SGENGender" "T0PDEMOMaxParentEducation"
## [9] "T0PDEMOIncomeCode" "T0PDEMOIncomeMid"
## [11] "T0PDEMORaceHispanic" "T1AWMADMSS_N"
## [13] "T2AWMADMSS_N" "T3AWMADMSS_N"
## [15] "T4AWMADMSS_N" "T1AWMADMRS_N"
## [17] "T2AWMADMRS_N" "T3AWMADMRS_N"
## [19] "T4AWMADMRS_N" "T1GMPA"
## [21] "T2GMPA" "T3GMPA"
## [23] "T4GMPA" "T1GPPA"
## [25] "T2GPPA" "T3GPPA"
## [27] "T4GPPA" "T1PRWMPAE"
## [29] "T2PRWMPAE" "T3PRWMPAE"
## [31] "T4PRWMPAE" "T1PMPA"
## [33] "T2PMPA" "T3PMPA"
## [35] "T4PMPA" "T1NL10W_PAE"
## [37] "T2NL10W_PAE" "T3NL10W_PAE"
## [39] "T4NL10W_PAE" "T1NL100W_PAE"
## [41] "T1NL1000W_PAE" "T2NL100W_PAE"
## [43] "T2NL1000W_PAE" "T3NL100W_PAE"
## [45] "T3NL1000W_PAE" "T4NL100W_PAE"
## [47] "T4NL1000W_PAE" "T1PKC_PA"
## [49] "T2PKC_PA" "T3PKC_PA"
## [51] "T4PKC_PA" "T1WJCW"
## [53] "T2WJCW" "T3WJCW"
## [55] "T4WJCW" "T1WJCRS"
## [57] "T2WJCRS" "T3WJCRS"
## [59] "T4WJCRS" "T1ASWmac"
## [61] "T2ASWmac" "T3ASWmac"
## [63] "T4ASWmac" "T1LWIDWS"
## [65] "T2LWIDWS" "T3LWIDWS"
## [67] "T4LWIDWS" "GMPA_na"
## [69] "GPPA_na" "AWMADMSS_na"
## [71] "PRWM_na" "PKC_na"
## [73] "WJCW_na" "ASW_na"
## [75] "PMPA_na" "LWID_na"
## [77] "t1" "t2"
## [79] "t3" "t4"
## [81] "T1PRWMPAE.asin" "T2PRWMPAE.asin"
## [83] "T3PRWMPAE.asin" "T4PRWMPAE.asin"
## [85] "T1AWMADMSS_N.s" "T2AWMADMSS_N.s"
## [87] "T1GMPA.s" "T2GMPA.s"
## [89] "T1GPPA.s" "T2GPPA.s"
## [91] "t1.s" "t2.s"
## [93] "T1PRWMPAE.asin.s" "T2PRWMPAE.asin.s"
## [95] "T1PMPA.s" "T2PMPA.s"
## [97] "T1WJCW.s" "T2WJCW.s"
## [99] "T1PKC_PA.s" "T2PKC_PA.s"
## [101] "T1ASWmac.s" "T2ASWmac.s"
## [103] "T1LWIDWS.s" "T2LWIDWS.s"
## [105] "T3AWMADMSS_N.s" "T4AWMADMSS_N.s"
## [107] "T3GPPA.s" "T4GPPA.s"
## [109] "T3GMPA.s" "T4GMPA.s"
## [111] "t3.s" "t4.s"
## [113] "T3PRWMPAE.asin.s" "T4PRWMPAE.asin.s"
## [115] "T3PMPA.s" "T4PMPA.s"
## [117] "T3WJCW.s" "T4WJCW.s"
## [119] "T3PKC_PA.s" "T4PKC_PA.s"
## [121] "T3ASWmac.s" "T4ASWmac.s"
## [123] "T3LWIDWS.s" "T4LWIDWS.s"
## [125] "T1PRWMPAE.asinR" "T2PRWMPAE.asinR"
## [127] "T3PRWMPAE.asinR" "T4PRWMPAE.asinR"
## [129] "T1PRWMPAE.asin.sR" "T2PRWMPAE.asin.sR"
## [131] "T3PRWMPAE.asin.sR" "T4PRWMPAE.asin.sR"
## [133] "T1MR.s" "T2MR.s"
## [135] "T3MR.s" "T4MR.s"
## [137] "T1EC.s" "T2EC.s"
## [139] "T3EC.s" "T4EC.s"
panamath_longitudinal_profiles = spatial_profiles_database_atleast2tp %>%
dplyr::select("SubjectID","T0SY1Y2G_StartGrade",
"T1PMPA.s","T2PMPA.s","T3PMPA.s","T4PMPA.s")
panamath_longitudinal_profiles_gather = gather(panamath_longitudinal_profiles, "time", "PMPA.s", -c("SubjectID","T0SY1Y2G_StartGrade"))
panamath_longitudinal_profiles_gather$SubjectID = as.factor(as.character(panamath_longitudinal_profiles_gather$SubjectID))
panamath_longitudinal_profiles_gather = panamath_longitudinal_profiles_gather %>%
left_join(T1_complete_class[c("SubjectID","Class")], by ="SubjectID")
panamath_longitudinal_gather_summary = summarySE(panamath_longitudinal_profiles_gather, "PMPA.s", c("time","Class"), na.rm = T)
panamath_longitudinal_gather_summary$time = as.factor(panamath_longitudinal_gather_summary$time)
panamath_longitudinal_gather_summary$Class = as.factor(panamath_longitudinal_gather_summary$Class)
panamath_longitudinal_gather_summary$task = "Panamath"
panamath_longitudinal_gather1 = subset(panamath_longitudinal_profiles_gather, !is.na(PMPA.s))
panamath_longitudinal_gather1$timenum = as.numeric(as.factor(panamath_longitudinal_gather1$time))
panamath_longitudinal_gather1$Class = as.factor(panamath_longitudinal_gather1$Class)
panamath_longitudinal_gather1 = panamath_longitudinal_gather1 %>%
left_join(TAll_complete_info[c("SubjectID","T0SGENGender","T0PDEMOMaxParentEducation")], by ="SubjectID")
panamath_model <- lmer(PMPA.s ~ Class * (timenum) + T0SGENGender + T0PDEMOMaxParentEducation + T0SY1Y2G_StartGrade +
(1 + timenum|SubjectID), data = subset(panamath_longitudinal_gather1, !is.na(Class)), control = lmerControl(optimizer = "bobyqa"))
summary(panamath_model)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## PMPA.s ~ Class * (timenum) + T0SGENGender + T0PDEMOMaxParentEducation +
## T0SY1Y2G_StartGrade + (1 + timenum | SubjectID)
## Data: subset(panamath_longitudinal_gather1, !is.na(Class))
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: 2372
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.397 -0.472 0.113 0.579 2.458
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## SubjectID (Intercept) 0.1180 0.343
## timenum 0.0157 0.125 0.44
## Residual 0.4440 0.666
## Number of obs: 974, groups: SubjectID, 304
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) -1.7046 0.2994 357.7728 -5.69
## ClassLow\nMental Rotation 1.5755 0.1785 271.0390 8.83
## ClassHigh\nPerformance 2.0778 0.1748 270.9161 11.89
## timenum 0.3408 0.0563 263.0555 6.05
## T0SGENGender -0.0582 0.0758 288.4794 -0.77
## T0PDEMOMaxParentEducation 0.0156 0.0160 289.5531 0.97
## T0SY1Y2G_StartGrade -0.0120 0.0304 294.9337 -0.39
## ClassLow\nMental Rotation:timenum -0.3333 0.0658 266.2229 -5.07
## ClassHigh\nPerformance:timenum -0.4308 0.0641 266.0611 -6.72
## Pr(>|t|)
## (Intercept) 0.00000002592 ***
## ClassLow\nMental Rotation < 0.0000000000000002 ***
## ClassHigh\nPerformance < 0.0000000000000002 ***
## timenum 0.00000000497 ***
## T0SGENGender 0.44
## T0PDEMOMaxParentEducation 0.33
## T0SY1Y2G_StartGrade 0.69
## ClassLow\nMental Rotation:timenum 0.00000075284 ***
## ClassHigh\nPerformance:timenum 0.00000000011 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) ClsLMR ClssHP timenm T0SGEN T0PDEM T0SY1Y ClLMR:
## ClssLwMntlR -0.441
## ClssHghPrfr -0.378 0.760
## timenum -0.381 0.638 0.652
## T0SGENGendr -0.359 0.068 0.038 -0.009
## T0PDEMOMxPE -0.758 -0.031 -0.105 0.007 -0.050
## T0SY1Y2G_SG -0.219 -0.019 -0.067 -0.020 0.042 0.158
## ClssLMRttn: 0.319 -0.741 -0.559 -0.857 0.017 -0.002 0.020
## ClssHPrfrm: 0.335 -0.561 -0.739 -0.878 0.001 -0.002 0.002 0.752
Anova(panamath_model, test.statistic = "F", type= "III")
## Analysis of Deviance Table (Type III Wald F tests with Kenward-Roger df)
##
## Response: PMPA.s
## F Df Df.res Pr(>F)
## (Intercept) 32.23 1 368 0.00000002783 ***
## Class 70.59 2 271 < 0.0000000000000002 ***
## timenum 36.53 1 252 0.00000000538 ***
## T0SGENGender 0.58 1 294 0.45
## T0PDEMOMaxParentEducation 0.94 1 295 0.33
## T0SY1Y2G_StartGrade 0.15 1 300 0.69
## Class:timenum 22.51 2 259 0.00000000096 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
plot(ggpredict(panamath_model, terms=c("timenum","Class")))
emeans=emmeans(panamath_model, pairwise ~ Class|timenum, mult.name = "Class", at=list(timenum=c(1,2,3,4)))
summary(emeans, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emmeans
## timenum = 1:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -1.2319 0.1184 278 -1.4649 -0.9988 -10.407 <.0001
## Low\nMental Rotation 0.0103 0.0698 286 -0.1271 0.1478 0.148 0.8824
## High\nPerformance 0.4152 0.0622 277 0.2926 0.5377 6.670 <.0001
##
## timenum = 2:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.8910 0.1034 288 -1.0945 -0.6876 -8.621 <.0001
## Low\nMental Rotation 0.0178 0.0614 297 -0.1030 0.1387 0.290 0.7719
## High\nPerformance 0.3252 0.0544 290 0.2182 0.4322 5.983 <.0001
##
## timenum = 3:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.5502 0.1171 281 -0.7807 -0.3197 -4.698 <.0001
## Low\nMental Rotation 0.0253 0.0705 301 -0.1135 0.1641 0.359 0.7200
## High\nPerformance 0.2352 0.0626 293 0.1119 0.3585 3.755 0.0002
##
## timenum = 4:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.2094 0.1520 272 -0.5087 0.0898 -1.378 0.1694
## Low\nMental Rotation 0.0328 0.0921 289 -0.1485 0.2141 0.356 0.7221
## High\nPerformance 0.1452 0.0824 282 -0.0169 0.3073 1.763 0.0790
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## timenum = 1:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -1.242 0.1372 279 -1.512 -0.9722
## Low\nPerformance - High\nPerformance -1.647 0.1346 279 -1.912 -1.3822
## Low\nMental Rotation - High\nPerformance -0.405 0.0940 282 -0.590 -0.2199
## t.ratio p.value
## -9.057 <.0001
## -12.241 <.0001
## -4.309 <.0001
##
## timenum = 2:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.909 0.1200 290 -1.145 -0.6728
## Low\nPerformance - High\nPerformance -1.216 0.1177 289 -1.448 -0.9846
## Low\nMental Rotation - High\nPerformance -0.307 0.0825 293 -0.470 -0.1449
## t.ratio p.value
## -7.576 <.0001
## -10.333 <.0001
## -3.724 0.0002
##
## timenum = 3:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.576 0.1365 286 -0.844 -0.3068
## Low\nPerformance - High\nPerformance -0.785 0.1336 285 -1.048 -0.5225
## Low\nMental Rotation - High\nPerformance -0.210 0.0949 300 -0.397 -0.0232
## t.ratio p.value
## -4.216 <.0001
## -5.880 <.0001
## -2.212 0.0277
##
## timenum = 4:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.242 0.1776 277 -0.592 0.1075
## Low\nPerformance - High\nPerformance -0.355 0.1734 276 -0.696 -0.0132
## Low\nMental Rotation - High\nPerformance -0.112 0.1240 289 -0.357 0.1317
## t.ratio p.value
## -1.363 0.1739
## -2.045 0.0418
## -0.906 0.3656
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
em=emtrends(panamath_model, pairwise ~ Class, var="timenum", mult.name = "Class")
summary(em, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emtrends
## Class timenum.trend SE df lower.CL upper.CL t.ratio
## Low\nPerformance 0.3408 0.0564 252 0.2298 0.4519 6.044
## Low\nMental Rotation 0.0075 0.0340 264 -0.0594 0.0744 0.220
## High\nPerformance -0.0900 0.0307 265 -0.1505 -0.0295 -2.927
## p.value
## <.0001
## 0.8258
## 0.0037
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation 0.3333 0.0658 255 0.20368 0.463
## Low\nPerformance - High\nPerformance 0.4308 0.0642 255 0.30438 0.557
## Low\nMental Rotation - High\nPerformance 0.0975 0.0458 265 0.00725 0.188
## t.ratio p.value
## 5.063 <.0001
## 6.710 <.0001
## 2.127 0.0343
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
######
graph_panamath_model = ggpredict(panamath_model, terms=c("timenum","Class"))
levels(graph_panamath_model$group)[levels(graph_panamath_model$group ) == "2"] <- "Low\nPerformance"
levels(graph_panamath_model$group)[levels(graph_panamath_model$group ) == "1"] <- "Low\nMental Rotation"
levels(graph_panamath_model$group)[levels(graph_panamath_model$group ) == "3"] <- "High\nPerformance"
graph_panamath_model$group = factor(graph_panamath_model$group, levels=c("Low\nPerformance",
"Low\nMental Rotation", "High\nPerformance"))
graph_panamath_model$x = as.factor(as.numeric(graph_panamath_model$x))
graph_panamath_model$title = "AMS\nPrecision"
graph_panamath_growth = ggplot(graph_panamath_model, aes(x =as.numeric(x), y = predicted, colour = group)) +
geom_hline(yintercept = .0, linetype = "dashed")+
#scale_fill_manual(values = c("#000000","#228B22"))+
#scale_color_manual(values = c("#000000","#228B22"))+
geom_ribbon(aes(ymin = conf.low, ymax = conf.high, fill = group),
alpha = .15, color = NA)+
geom_line() +
#scale_y_continuous(breaks = seq(0,1,.25), limits = c(0,1.1))+
theme_bw()+
ylab("Predicted (Standardized Scores)")+
xlab("Time")+
scale_color_manual(values = c("#4575b4","#4d4d4d","#b2182b"))+
scale_fill_manual(values = c("#4575b4","#4d4d4d","#b2182b"))+
scale_x_continuous(breaks=c(1,2,3,4), labels = c("T1","T2","T3","T4"))+
scale_y_continuous(breaks = seq(-2.5,1.5,.5), limits = c(-2.7,1.5))+
facet_grid(.~title)+
theme(legend.position="none",
axis.title.x = element_text(size=size_text),
axis.text.x = element_text(size=size_text),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_panamath_growth
graph_panamath_longitudinal = ggplot(subset(panamath_longitudinal_gather_summary,!is.na(Class)), aes(x = time, y = PMPA.s, group = Class)) +
geom_hline(yintercept = 0, linetype = "dashed")+
#scale_fill_manual(values = c("#000000","#228B22"))+
#scale_color_manual(values = c("#000000","#228B22"))+
geom_ribbon(aes(ymin = (PMPA.s-se), ymax = (PMPA.s+se), fill = Class),
alpha = .15, color = NA)+
scale_color_manual(values = c("#e41a1c","#377eb8","#4daf4a","#984ea3"))+
scale_fill_manual(values = c("#e41a1c","#377eb8","#4daf4a","#984ea3"))+
geom_line(aes(color = Class)) +
scale_y_continuous(breaks = seq(-2.5,1,.5), limits = c(-2.7,1.0))+
theme_bw()+
ylab("Panamath")+
xlab("Time")+
facet_grid(.~task)+
theme(legend.position="none",
axis.title.x = element_text(size=size_text),
axis.text.x = element_text(size=size_text),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_panamath_longitudinal
multiplot(graph_panamath_longitudinal,graph_panamath_growth, cols=2)
###mental rotation longitudinal
mentalrotation_longitudinal = spatial_profiles_database_atleast2tp %>%
dplyr::select("SubjectID","T0SY1Y2G_StartGrade",
"T1MR.s","T2MR.s","T3MR.s","T4MR.s")
mentalrotation_longitudinal_gather = gather(mentalrotation_longitudinal, "time", "MR.s", -c("SubjectID","T0SY1Y2G_StartGrade"))
mentalrotation_longitudinal_gather$SubjectID = as.factor(mentalrotation_longitudinal_gather$SubjectID)
mentalrotation_longitudinal_gather = mentalrotation_longitudinal_gather %>%
left_join(T1_complete_class[c("SubjectID","Class")], by ="SubjectID")
mentalrotation_longitudinal_gather_summary = summarySE(mentalrotation_longitudinal_gather, "MR.s", c("time","Class"), na.rm = T)
mentalrotation_longitudinal_gather_summary$time = as.factor(mentalrotation_longitudinal_gather_summary$time)
mentalrotation_longitudinal_gather_summary$Class = as.factor(mentalrotation_longitudinal_gather_summary$Class)
mentalrotation_longitudinal_gather_summary$task = "Mental Rotation"
mentalrotation_longitudinal_gather1 = subset(mentalrotation_longitudinal_gather, !is.na(MR.s))
mentalrotation_longitudinal_gather1$timenum = as.numeric(as.factor(mentalrotation_longitudinal_gather1$time))
mentalrotation_longitudinal_gather1$Class = as.factor(mentalrotation_longitudinal_gather1$Class)
mentalrotation_longitudinal_gather1 = mentalrotation_longitudinal_gather1 %>%
left_join(TAll_complete_info[c("SubjectID","T0SGENGender","T0PDEMOMaxParentEducation")], by ="SubjectID")
mentalrotation_model <- lmer(MR.s ~ Class * (timenum) + T0SGENGender + T0PDEMOMaxParentEducation + T0SY1Y2G_StartGrade +
(1|SubjectID), data = subset(mentalrotation_longitudinal_gather1, !is.na(Class)), control = lmerControl(optimizer = "bobyqa"))
summary(mentalrotation_model)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: MR.s ~ Class * (timenum) + T0SGENGender + T0PDEMOMaxParentEducation +
## T0SY1Y2G_StartGrade + (1 | SubjectID)
## Data: subset(mentalrotation_longitudinal_gather1, !is.na(Class))
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: 2593
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.569 -0.575 0.080 0.580 3.902
##
## Random effects:
## Groups Name Variance Std.Dev.
## SubjectID (Intercept) 0.262 0.512
## Residual 0.415 0.644
## Number of obs: 1122, groups: SubjectID, 304
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) -0.79321 0.28436 417.89284 -2.79
## ClassLow\nMental Rotation -0.42321 0.17521 993.13164 -2.42
## ClassHigh\nPerformance 1.44322 0.17149 989.23130 8.42
## timenum 0.04295 0.04662 824.07560 0.92
## T0SGENGender 0.14576 0.07167 299.05485 2.03
## T0PDEMOMaxParentEducation 0.00821 0.01506 297.27995 0.54
## T0SY1Y2G_StartGrade -0.06087 0.02865 302.35492 -2.12
## ClassLow\nMental Rotation:timenum 0.13426 0.05424 826.65704 2.48
## ClassHigh\nPerformance:timenum -0.17538 0.05296 828.41898 -3.31
## Pr(>|t|)
## (Intercept) 0.00552 **
## ClassLow\nMental Rotation 0.01590 *
## ClassHigh\nPerformance < 0.0000000000000002 ***
## timenum 0.35721
## T0SGENGender 0.04284 *
## T0PDEMOMaxParentEducation 0.58637
## T0SY1Y2G_StartGrade 0.03441 *
## ClassLow\nMental Rotation:timenum 0.01352 *
## ClassHigh\nPerformance:timenum 0.00097 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) ClsLMR ClssHP timenm T0SGEN T0PDEM T0SY1Y ClLMR:
## ClssLwMntlR -0.464
## ClssHghPrfr -0.396 0.761
## timenum -0.409 0.661 0.675
## T0SGENGendr -0.352 0.078 0.042 -0.003
## T0PDEMOMxPE -0.743 -0.031 -0.108 0.003 -0.062
## T0SY1Y2G_SG -0.229 0.007 -0.051 0.003 0.033 0.159
## ClssLMRttn: 0.349 -0.765 -0.580 -0.860 0.009 -0.003 0.000
## ClssHPrfrm: 0.355 -0.582 -0.764 -0.880 0.000 0.006 -0.011 0.757
Anova(mentalrotation_model, test.statistic = "F", type= "III")
## Analysis of Deviance Table (Type III Wald F tests with Kenward-Roger df)
##
## Response: MR.s
## F Df Df.res Pr(>F)
## (Intercept) 7.78 1 416 0.0055 **
## Class 127.66 2 989 < 0.0000000000000002 ***
## timenum 0.85 1 823 0.3572
## T0SGENGender 4.14 1 298 0.0429 *
## T0PDEMOMaxParentEducation 0.30 1 296 0.5864
## T0SY1Y2G_StartGrade 4.51 1 301 0.0344 *
## Class:timenum 34.48 2 831 0.0000000000000041 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
plot(ggpredict(mentalrotation_model, terms=c("timenum","Class")))
emeans=emmeans(mentalrotation_model, pairwise ~ Class|timenum, mult.name = "Class", at=list(timenum=c(1,2,3,4)))
summary(emeans, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emmeans
## timenum = 1:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.474 0.1191 603 -0.708 -0.2404 -3.982 0.0001
## Low\nMental Rotation -0.763 0.0702 599 -0.901 -0.6253 -10.866 <.0001
## High\nPerformance 0.793 0.0629 608 0.670 0.9170 12.617 <.0001
##
## timenum = 2:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.431 0.0996 320 -0.627 -0.2355 -4.332 <.0001
## Low\nMental Rotation -0.586 0.0591 324 -0.702 -0.4698 -9.922 <.0001
## High\nPerformance 0.661 0.0527 327 0.557 0.7648 12.538 <.0001
##
## timenum = 3:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.388 0.0999 325 -0.585 -0.1918 -3.886 0.0001
## Low\nMental Rotation -0.409 0.0598 339 -0.526 -0.2911 -6.832 <.0001
## High\nPerformance 0.529 0.0536 343 0.423 0.6340 9.872 <.0001
##
## timenum = 4:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.345 0.1200 616 -0.581 -0.1097 -2.878 0.0041
## Low\nMental Rotation -0.232 0.0722 637 -0.373 -0.0899 -3.208 0.0014
## High\nPerformance 0.396 0.0649 649 0.269 0.5238 6.101 <.0001
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## timenum = 1:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation 0.2889 0.1382 602 0.0175 0.560
## Low\nPerformance - High\nPerformance -1.2678 0.1354 599 -1.5338 -1.002
## Low\nMental Rotation - High\nPerformance -1.5568 0.0948 598 -1.7429 -1.371
## t.ratio p.value
## 2.091 0.0370
## -9.361 <.0001
## -16.431 <.0001
##
## timenum = 2:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation 0.1547 0.1157 321 -0.0729 0.382
## Low\nPerformance - High\nPerformance -1.0925 0.1135 321 -1.3158 -0.869
## Low\nMental Rotation - High\nPerformance -1.2471 0.0798 325 -1.4040 -1.090
## t.ratio p.value
## 1.337 0.1821
## -9.622 <.0001
## -15.639 <.0001
##
## timenum = 3:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation 0.0204 0.1164 328 -0.2086 0.249
## Low\nPerformance - High\nPerformance -0.9171 0.1142 328 -1.1418 -0.692
## Low\nMental Rotation - High\nPerformance -0.9375 0.0809 340 -1.0966 -0.778
## t.ratio p.value
## 0.175 0.8609
## -8.029 <.0001
## -11.590 <.0001
##
## timenum = 4:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.1138 0.1400 622 -0.3888 0.161
## Low\nPerformance - High\nPerformance -0.7417 0.1371 618 -1.0111 -0.472
## Low\nMental Rotation - High\nPerformance -0.6279 0.0976 639 -0.8195 -0.436
## t.ratio p.value
## -0.813 0.4165
## -5.408 <.0001
## -6.433 <.0001
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
em=emtrends(mentalrotation_model, pairwise ~ Class, var="timenum", mult.name = "Class")
summary(em, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emtrends
## Class timenum.trend SE df lower.CL upper.CL t.ratio
## Low\nPerformance 0.043 0.0466 823 -0.0486 0.1345 0.921
## Low\nMental Rotation 0.177 0.0277 833 0.1228 0.2316 6.391
## High\nPerformance -0.132 0.0251 842 -0.1817 -0.0831 -5.270
## p.value
## 0.3572
## <.0001
## <.0001
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.134 0.0542 826 -0.2407 -0.0278
## Low\nPerformance - High\nPerformance 0.175 0.0530 827 0.0714 0.2793
## Low\nMental Rotation - High\nPerformance 0.310 0.0374 837 0.2362 0.3831
## t.ratio p.value
## -2.475 0.0135
## 3.311 0.0010
## 8.274 <.0001
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
######
graph_mr_model = ggpredict(mentalrotation_model, terms=c("timenum","Class"))
levels(graph_mr_model$group)[levels(graph_mr_model$group ) == "2"] <- "Low\nPerformance"
levels(graph_mr_model$group)[levels(graph_mr_model$group ) == "1"] <- "Low\nMental Rotation"
levels(graph_mr_model$group)[levels(graph_mr_model$group ) == "3"] <- "High\nPerformance"
graph_mr_model$group = factor(graph_mr_model$group, levels=c("Low\nPerformance",
"Low\nMental Rotation", "High\nPerformance"))
graph_mr_model$x = as.factor(as.numeric(graph_mr_model$x))
graph_mr_model$title = "Mental\nRotation"
graph_mr_growth = ggplot(graph_mr_model, aes(x =as.numeric(x), y = predicted, colour = group)) +
geom_hline(yintercept = .0, linetype = "dashed")+
#scale_fill_manual(values = c("#000000","#228B22"))+
#scale_color_manual(values = c("#000000","#228B22"))+
geom_ribbon(aes(ymin = conf.low, ymax = conf.high, fill = group),
alpha = .15, color = NA)+
geom_line() +
#scale_y_continuous(breaks = seq(0,1,.25), limits = c(0,1.1))+
theme_bw()+
ylab("Predicted (Standardized Scores)")+
xlab("Time")+
scale_color_manual(values = c("#4575b4","#4d4d4d","#b2182b"))+
scale_fill_manual(values = c("#4575b4","#4d4d4d","#b2182b"))+
scale_x_continuous(breaks=c(1,2,3,4), labels = c("T1","T2","T3","T4"))+
scale_y_continuous(breaks = seq(-2.5,1.5,.5), limits = c(-2.7,1.5))+
facet_grid(.~title)+
theme(legend.position="none",
axis.title.x = element_text(size=size_text),
axis.text.x = element_text(size=size_text),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_mr_growth
multiplot(graph_panamath_growth, graph_mr_growth, cols=2)
graph_mentalrotation_longitudinal = ggplot(subset(mentalrotation_longitudinal_gather_summary,!is.na(Class)), aes(x = time, y = MR.s, group = Class)) +
geom_hline(yintercept = 0, linetype = "dashed")+
#scale_fill_manual(values = c("#000000","#228B22"))+
#scale_color_manual(values = c("#000000","#228B22"))+
geom_ribbon(aes(ymin = (MR.s-se), ymax = (MR.s+se), fill = Class),
alpha = .15, color = NA)+
geom_line(aes(color = Class)) +
scale_color_manual(values = c("#e41a1c","#377eb8","#4daf4a","#984ea3"))+
scale_fill_manual(values = c("#e41a1c","#377eb8","#4daf4a","#984ea3"))+
geom_line(aes(color = Class)) +
facet_grid(.~task)+
#scale_y_continuous(breaks = seq(0,1,.25), limits = c(0,1.1))+
theme_bw()+
ylab("Mental Rotation")+
xlab("Time")+
scale_y_continuous(breaks = seq(-2.5,1,.5), limits = c(-2.7,1.0))+
theme(legend.position="none",
axis.title.x = element_text(size=size_text),
axis.text.x = element_text(size=size_text),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_mentalrotation_longitudinal
workingmemory_longitudinal = spatial_profiles_database_atleast2tp %>%
dplyr::select("SubjectID","T0SY1Y2G_StartGrade",
"T1AWMADMSS_N.s","T2AWMADMSS_N.s","T3AWMADMSS_N.s","T4AWMADMSS_N.s")
workingmemory_longitudinal_gather = gather(workingmemory_longitudinal, "time", "AWMADMSS_N.s", -c("SubjectID","T0SY1Y2G_StartGrade"))
workingmemory_longitudinal_gather$SubjectID = as.factor(workingmemory_longitudinal_gather$SubjectID)
workingmemory_longitudinal_gather = workingmemory_longitudinal_gather %>%
left_join(T1_complete_class[c("SubjectID","Class")], by ="SubjectID")
workingmemory_longitudinal_gather_summary = summarySE(workingmemory_longitudinal_gather, "AWMADMSS_N.s", c("time","Class"), na.rm = T)
workingmemory_longitudinal_gather_summary$time = as.factor(workingmemory_longitudinal_gather_summary$time)
workingmemory_longitudinal_gather_summary$Class = as.factor(workingmemory_longitudinal_gather_summary$Class)
workingmemory_longitudinal_gather_summary$task = "Working Memory"
workingmemory_longitudinal_gather_summary = summarySE(workingmemory_longitudinal_gather, "AWMADMSS_N.s", c("time","Class"), na.rm = T)
workingmemory_longitudinal_gather_summary$time = as.factor(workingmemory_longitudinal_gather_summary$time)
workingmemory_longitudinal_gather_summary$Class = as.factor(workingmemory_longitudinal_gather_summary$Class)
workingmemory_longitudinal_gather_summary$task = "Working Memory"
workingmemory_longitudinal_gather1 = subset(workingmemory_longitudinal_gather, !is.na(AWMADMSS_N.s))
workingmemory_longitudinal_gather1$timenum = as.numeric(as.factor(workingmemory_longitudinal_gather1$time))
workingmemory_longitudinal_gather1$Class = as.factor(workingmemory_longitudinal_gather1$Class)
workingmemory_longitudinal_gather1 = workingmemory_longitudinal_gather1 %>%
left_join(TAll_complete_info[c("SubjectID","T0SGENGender","T0PDEMOMaxParentEducation")], by ="SubjectID")
workingmemory_model <- lmer(AWMADMSS_N.s ~ Class * (timenum)+ T0SGENGender +T0SY1Y2G_StartGrade + T0PDEMOMaxParentEducation +
(1+timenum|SubjectID), data = subset(workingmemory_longitudinal_gather1, !is.na(Class)), control = lmerControl(optimizer = "bobyqa"))
summary(workingmemory_model)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## AWMADMSS_N.s ~ Class * (timenum) + T0SGENGender + T0SY1Y2G_StartGrade +
## T0PDEMOMaxParentEducation + (1 + timenum | SubjectID)
## Data: subset(workingmemory_longitudinal_gather1, !is.na(Class))
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: 2767
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.6242 -0.5798 -0.0123 0.5804 2.7596
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## SubjectID (Intercept) 0.3679 0.607
## timenum 0.0175 0.132 -0.36
## Residual 0.5278 0.726
## Number of obs: 1068, groups: SubjectID, 304
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) -1.7914 0.3297 396.6165 -5.43
## ClassLow\nMental Rotation 0.7448 0.2120 292.7273 3.51
## ClassHigh\nPerformance 1.2754 0.2080 296.6143 6.13
## timenum 0.1362 0.0596 287.3066 2.28
## T0SGENGender -0.0133 0.0811 292.9427 -0.16
## T0SY1Y2G_StartGrade -0.0192 0.0325 297.7092 -0.59
## T0PDEMOMaxParentEducation 0.0622 0.0171 292.4053 3.64
## ClassLow\nMental Rotation:timenum -0.1155 0.0689 284.9465 -1.68
## ClassHigh\nPerformance:timenum -0.1817 0.0674 286.2074 -2.69
## Pr(>|t|)
## (Intercept) 0.0000000971 ***
## ClassLow\nMental Rotation 0.00051 ***
## ClassHigh\nPerformance 0.0000000028 ***
## timenum 0.02310 *
## T0SGENGender 0.86963
## T0SY1Y2G_StartGrade 0.55413
## T0PDEMOMaxParentEducation 0.00032 ***
## ClassLow\nMental Rotation:timenum 0.09503 .
## ClassHigh\nPerformance:timenum 0.00746 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) ClsLMR ClssHP timenm T0SGEN T0SY1Y T0PDEM ClLMR:
## ClssLwMntlR -0.491
## ClssHghPrfr -0.428 0.775
## timenum -0.449 0.698 0.711
## T0SGENGendr -0.348 0.074 0.042 0.002
## T0SY1Y2G_SG -0.229 0.016 -0.045 0.010 0.033
## T0PDEMOMxPE -0.725 -0.033 -0.107 -0.001 -0.060 0.161
## ClssLMRttn: 0.385 -0.795 -0.615 -0.865 0.005 -0.009 0.002
## ClssHPrfrm: 0.390 -0.618 -0.795 -0.884 -0.004 -0.015 0.013 0.765
Anova(workingmemory_model, test.statistic = "F", type= "III")
## Analysis of Deviance Table (Type III Wald F tests with Kenward-Roger df)
##
## Response: AWMADMSS_N.s
## F Df Df.res Pr(>F)
## (Intercept) 29.35 1 404 0.0000001038 ***
## Class 20.69 2 297 0.0000000038 ***
## timenum 5.21 1 284 0.02320 *
## T0SGENGender 0.03 1 296 0.87010
## T0SY1Y2G_StartGrade 0.35 1 301 0.55555
## T0PDEMOMaxParentEducation 13.17 1 295 0.00034 ***
## Class:timenum 3.80 2 280 0.02342 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
plot(ggpredict(workingmemory_model, terms=c("timenum","Class")))
emeans=emmeans(workingmemory_model, pairwise ~ Class|timenum, mult.name = "Class", at=list(timenum=c(1,2,3,4)))
summary(emeans, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emmeans
## timenum = 1:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.7760 0.1416 313 -1.0546 -0.4975 -5.481 <.0001
## Low\nMental Rotation -0.1467 0.0804 290 -0.3048 0.0115 -1.825 0.0691
## High\nPerformance 0.3177 0.0722 294 0.1755 0.4598 4.398 <.0001
##
## timenum = 2:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.6399 0.1143 307 -0.8647 -0.4150 -5.599 <.0001
## Low\nMental Rotation -0.1259 0.0666 294 -0.2570 0.0051 -1.891 0.0596
## High\nPerformance 0.2722 0.0595 296 0.1552 0.3892 4.578 <.0001
##
## timenum = 3:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.5037 0.1149 278 -0.7298 -0.2776 -4.385 <.0001
## Low\nMental Rotation -0.1052 0.0694 298 -0.2417 0.0313 -1.517 0.1303
## High\nPerformance 0.2267 0.0619 296 0.1048 0.3486 3.660 0.0003
##
## timenum = 4:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.3675 0.1430 272 -0.6490 -0.0860 -2.570 0.0107
## Low\nMental Rotation -0.0845 0.0871 291 -0.2559 0.0868 -0.971 0.3325
## High\nPerformance 0.1812 0.0782 286 0.0272 0.3352 2.316 0.0213
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## timenum = 1:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.629 0.1627 307 -0.950 -0.3092
## Low\nPerformance - High\nPerformance -1.094 0.1598 311 -1.408 -0.7793
## Low\nMental Rotation - High\nPerformance -0.464 0.1087 295 -0.678 -0.2505
## t.ratio p.value
## -3.868 0.0001
## -6.845 <.0001
## -4.273 <.0001
##
## timenum = 2:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.514 0.1321 303 -0.774 -0.2539
## Low\nPerformance - High\nPerformance -0.912 0.1298 305 -1.167 -0.6566
## Low\nMental Rotation - High\nPerformance -0.398 0.0900 297 -0.575 -0.2209
## t.ratio p.value
## -3.889 0.0001
## -7.026 <.0001
## -4.422 <.0001
##
## timenum = 3:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.399 0.1340 283 -0.662 -0.1346
## Low\nPerformance - High\nPerformance -0.730 0.1314 282 -0.989 -0.4717
## Low\nMental Rotation - High\nPerformance -0.332 0.0937 299 -0.516 -0.1475
## t.ratio p.value
## -2.973 0.0032
## -5.557 <.0001
## -3.542 0.0005
##
## timenum = 4:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.283 0.1673 276 -0.612 0.0463
## Low\nPerformance - High\nPerformance -0.549 0.1637 277 -0.871 -0.2264
## Low\nMental Rotation - High\nPerformance -0.266 0.1176 292 -0.497 -0.0342
## t.ratio p.value
## -1.692 0.0918
## -3.352 0.0009
## -2.259 0.0246
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
em=emtrends(workingmemory_model, pairwise ~ Class, var="timenum", mult.name = "Class")
summary(em, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emtrends
## Class timenum.trend SE df lower.CL upper.CL t.ratio
## Low\nPerformance 0.1362 0.0597 284 0.0187 0.2536 2.283
## Low\nMental Rotation 0.0207 0.0346 275 -0.0474 0.0888 0.598
## High\nPerformance -0.0455 0.0315 279 -0.1075 0.0165 -1.445
## p.value
## 0.0232
## 0.5501
## 0.1496
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation 0.1154 0.0690 282 -0.0203 0.251
## Low\nPerformance - High\nPerformance 0.1817 0.0675 283 0.0489 0.314
## Low\nMental Rotation - High\nPerformance 0.0662 0.0468 276 -0.0259 0.158
## t.ratio p.value
## 1.674 0.0952
## 2.693 0.0075
## 1.415 0.1582
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
######
graph_wm_model = ggpredict(workingmemory_model, terms=c("timenum","Class"))
levels(graph_wm_model$group)[levels(graph_wm_model$group ) == "2"] <- "Low\nPerformance"
levels(graph_wm_model$group)[levels(graph_wm_model$group ) == "1"] <- "Low\nMental Rotation"
levels(graph_wm_model$group)[levels(graph_wm_model$group ) == "3"] <- "High\nPerformance"
graph_wm_model$group = factor(graph_wm_model$group, levels=c("Low\nPerformance",
"Low\nMental Rotation", "High\nPerformance"))
graph_wm_model$x = as.factor(as.numeric(graph_wm_model$x))
graph_wm_model$title = "Working\nMemory"
graph_wm_growth = ggplot(graph_wm_model, aes(x =as.numeric(x), y = predicted, colour = group)) +
geom_hline(yintercept = .0, linetype = "dashed")+
#scale_fill_manual(values = c("#000000","#228B22"))+
#scale_color_manual(values = c("#000000","#228B22"))+
geom_ribbon(aes(ymin = conf.low, ymax = conf.high, fill = group),
alpha = .15, color = NA)+
geom_line() +
#scale_y_continuous(breaks = seq(0,1,.25), limits = c(0,1.1))+
theme_bw()+
ylab("Predicted (Standardized Scores)")+
xlab("Time")+
scale_color_manual(values = c("#4575b4","#4d4d4d","#b2182b"))+
scale_fill_manual(values = c("#4575b4","#4d4d4d","#b2182b"))+
scale_x_continuous(breaks=c(1,2,3,4), labels = c("T1","T2","T3","T4"))+
scale_y_continuous(breaks = seq(-2.5,1.5,.5), limits = c(-2.7,1.5))+
facet_grid(.~title)+
theme(legend.position="none",
axis.title.x = element_text(size=size_text),
axis.text.x = element_text(size=size_text),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_wm_growth
multiplot(graph_wm_growth,graph_panamath_growth, graph_mr_growth, cols=3)
graph_workingmemory_longitudinal = ggplot(subset(workingmemory_longitudinal_gather_summary,!is.na(Class)), aes(x = time, y = AWMADMSS_N.s, group = Class)) +
geom_hline(yintercept = 0, linetype = "dashed")+
#scale_fill_manual(values = c("#000000","#228B22"))+
#scale_color_manual(values = c("#000000","#228B22"))+
geom_ribbon(aes(ymin = (AWMADMSS_N.s-se), ymax = (AWMADMSS_N.s+se), fill = Class),
alpha = .15, color = NA)+
geom_line(aes(color = Class)) +
scale_color_manual(values = c("#e41a1c","#377eb8","#4daf4a","#984ea3"))+
scale_fill_manual(values = c("#e41a1c","#377eb8","#4daf4a","#984ea3"))+
geom_line(aes(color = Class)) +
facet_grid(.~task)+
#scale_y_continuous(breaks = seq(0,1,.25), limits = c(0,1.1))+
theme_bw()+
ylab("working memory")+
xlab("Time")+
scale_y_continuous(breaks = seq(-2.5,1,.5), limits = c(-2.7,1.0))+
theme(legend.position="none",
axis.title.x = element_text(size=size_text),
axis.text.x = element_text(size=size_text),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_workingmemory_longitudinal
prop_longitudinal = spatial_profiles_database_atleast2tp %>%
dplyr::select("SubjectID","T0SY1Y2G_StartGrade",
"T1PRWMPAE.asin.sR","T2PRWMPAE.asin.sR","T3PRWMPAE.asin.sR","T4PRWMPAE.asin.sR")
prop_longitudinal_gather = gather(prop_longitudinal, "time", "PRWMPAE.asin.sR", -c("SubjectID","T0SY1Y2G_StartGrade"))
prop_longitudinal_gather$SubjectID = as.factor(prop_longitudinal_gather$SubjectID)
prop_longitudinal_gather = prop_longitudinal_gather %>%
left_join(T1_complete_class[c("SubjectID","Class")], by ="SubjectID")
prop_longitudinal_gather_summary = summarySE(prop_longitudinal_gather, "PRWMPAE.asin.sR", c("time","Class"), na.rm = T)
prop_longitudinal_gather_summary$time = as.factor(prop_longitudinal_gather_summary$time)
prop_longitudinal_gather_summary$Class = as.factor(prop_longitudinal_gather_summary$Class)
prop_longitudinal_gather_summary$task = "Proportional Reasoning"
prop_longitudinal_gather_summary = summarySE(prop_longitudinal_gather, "PRWMPAE.asin.sR", c("time","Class"), na.rm = T)
prop_longitudinal_gather_summary$time = as.factor(prop_longitudinal_gather_summary$time)
prop_longitudinal_gather_summary$Class = as.factor(prop_longitudinal_gather_summary$Class)
prop_longitudinal_gather_summary$task = "Proportional Reasoning"
prop_longitudinal_gather1 = subset(prop_longitudinal_gather, !is.na(PRWMPAE.asin.sR))
prop_longitudinal_gather1$timenum = as.numeric(as.factor(prop_longitudinal_gather1$time))
prop_longitudinal_gather1$Class = as.factor(prop_longitudinal_gather1$Class)
prop_longitudinal_gather1 = prop_longitudinal_gather1 %>%
left_join(TAll_complete_info[c("SubjectID","T0SGENGender","T0PDEMOMaxParentEducation")], by ="SubjectID")
prop_model <- lmer(PRWMPAE.asin.sR ~ Class * (timenum)+T0SGENGender+T0PDEMOMaxParentEducation+ T0SY1Y2G_StartGrade+
(1+timenum|SubjectID), data = subset(prop_longitudinal_gather1, !is.na(Class)), control = lmerControl(optimizer = "bobyqa"))
summary(prop_model)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## PRWMPAE.asin.sR ~ Class * (timenum) + T0SGENGender + T0PDEMOMaxParentEducation +
## T0SY1Y2G_StartGrade + (1 + timenum | SubjectID)
## Data: subset(prop_longitudinal_gather1, !is.na(Class))
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: 2697
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.314 -0.494 0.128 0.582 2.243
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## SubjectID (Intercept) 0.18261 0.4273
## timenum 0.00433 0.0658 0.80
## Residual 0.48294 0.6949
## Number of obs: 1088, groups: SubjectID, 304
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) -1.82684 0.30763 376.87431 -5.94
## ClassLow\nMental Rotation 0.99808 0.18254 277.21722 5.47
## ClassHigh\nPerformance 1.21920 0.17883 280.99635 6.82
## timenum 0.13274 0.05189 263.28490 2.56
## T0SGENGender 0.04655 0.07815 294.66681 0.60
## T0PDEMOMaxParentEducation 0.05304 0.01653 299.02329 3.21
## T0SY1Y2G_StartGrade 0.00222 0.03119 296.77750 0.07
## ClassLow\nMental Rotation:timenum -0.14596 0.06066 266.63727 -2.41
## ClassHigh\nPerformance:timenum -0.13761 0.05922 267.78993 -2.32
## Pr(>|t|)
## (Intercept) 0.000000006542 ***
## ClassLow\nMental Rotation 0.000000101686 ***
## ClassHigh\nPerformance 0.000000000056 ***
## timenum 0.0111 *
## T0SGENGender 0.5519
## T0PDEMOMaxParentEducation 0.0015 **
## T0SY1Y2G_StartGrade 0.9433
## ClassLow\nMental Rotation:timenum 0.0168 *
## ClassHigh\nPerformance:timenum 0.0209 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) ClsLMR ClssHP timenm T0SGEN T0PDEM T0SY1Y ClLMR:
## ClssLwMntlR -0.441
## ClssHghPrfr -0.366 0.761
## timenum -0.375 0.635 0.648
## T0SGENGendr -0.367 0.098 0.058 0.012
## T0PDEMOMxPE -0.751 -0.046 -0.127 -0.008 -0.057
## T0SY1Y2G_SG -0.239 -0.003 -0.068 -0.003 0.032 0.173
## ClssLMRttn: 0.316 -0.736 -0.555 -0.855 -0.008 0.011 0.006
## ClssHPrfrm: 0.322 -0.557 -0.734 -0.876 -0.014 0.018 0.001 0.750
Anova(prop_model, test.statistic = "F", type= "III")
## Analysis of Deviance Table (Type III Wald F tests with Kenward-Roger df)
##
## Response: PRWMPAE.asin.sR
## F Df Df.res Pr(>F)
## (Intercept) 35.07 1 379 0.00000000712 ***
## Class 23.32 2 290 0.00000000041 ***
## timenum 6.54 1 263 0.0111 *
## T0SGENGender 0.35 1 297 0.5533
## T0PDEMOMaxParentEducation 10.23 1 302 0.0015 **
## T0SY1Y2G_StartGrade 0.01 1 300 0.9435
## Class:timenum 3.20 2 272 0.0423 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
plot(ggpredict(prop_model, terms=c("timenum","Class")))
emeans=emmeans(prop_model, pairwise ~ Class|timenum, mult.name = "Class", at=list(timenum=c(1,2,3,4)))
summary(emeans, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emmeans
## timenum = 1:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.8364 0.1242 292 -1.081 -0.592 -6.736 <.0001
## Low\nMental Rotation 0.0157 0.0729 295 -0.128 0.159 0.215 0.8296
## High\nPerformance 0.2452 0.0651 299 0.117 0.373 3.766 0.0002
##
## timenum = 2:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.7037 0.1071 294 -0.914 -0.493 -6.571 <.0001
## Low\nMental Rotation 0.0025 0.0633 295 -0.122 0.127 0.039 0.9686
## High\nPerformance 0.2403 0.0563 300 0.130 0.351 4.266 <.0001
##
## timenum = 3:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.5709 0.1136 279 -0.794 -0.347 -5.026 <.0001
## Low\nMental Rotation -0.0107 0.0684 297 -0.145 0.124 -0.157 0.8756
## High\nPerformance 0.2354 0.0611 299 0.115 0.356 3.852 0.0001
##
## timenum = 4:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.4382 0.1405 271 -0.715 -0.162 -3.120 0.0020
## Low\nMental Rotation -0.0239 0.0856 292 -0.193 0.145 -0.280 0.7800
## High\nPerformance 0.2306 0.0770 292 0.079 0.382 2.995 0.0030
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## timenum = 1:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.852 0.1439 293 -1.135 -0.5689
## Low\nPerformance - High\nPerformance -1.082 0.1412 297 -1.359 -0.8037
## Low\nMental Rotation - High\nPerformance -0.230 0.0983 299 -0.423 -0.0360
## t.ratio p.value
## -5.921 <.0001
## -7.659 <.0001
## -2.334 0.0203
##
## timenum = 2:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.706 0.1243 295 -0.951 -0.4616
## Low\nPerformance - High\nPerformance -0.944 0.1221 297 -1.184 -0.7037
## Low\nMental Rotation - High\nPerformance -0.238 0.0854 298 -0.406 -0.0697
## t.ratio p.value
## -5.683 <.0001
## -7.733 <.0001
## -2.783 0.0057
##
## timenum = 3:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.560 0.1324 284 -0.821 -0.2995
## Low\nPerformance - High\nPerformance -0.806 0.1299 284 -1.062 -0.5507
## Low\nMental Rotation - High\nPerformance -0.246 0.0924 300 -0.428 -0.0643
## t.ratio p.value
## -4.230 <.0001
## -6.207 <.0001
## -2.664 0.0081
##
## timenum = 4:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.414 0.1643 276 -0.738 -0.0908
## Low\nPerformance - High\nPerformance -0.669 0.1608 277 -0.985 -0.3521
## Low\nMental Rotation - High\nPerformance -0.255 0.1157 295 -0.482 -0.0268
## t.ratio p.value
## -2.521 0.0123
## -4.158 <.0001
## -2.200 0.0286
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
emeans=emmeans(prop_model, pairwise ~ Class, mult.name = "Class")
summary(emeans, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emmeans
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.6407 0.1071 288 -0.852 -0.430 -5.981 <.0001
## Low\nMental Rotation -0.0038 0.0639 297 -0.130 0.122 -0.059 0.9529
## High\nPerformance 0.2380 0.0569 300 0.126 0.350 4.183 <.0001
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.637 0.1246 290 -0.882 -0.392
## Low\nPerformance - High\nPerformance -0.879 0.1223 291 -1.119 -0.638
## Low\nMental Rotation - High\nPerformance -0.242 0.0862 299 -0.411 -0.072
## t.ratio p.value
## -5.113 <.0001
## -7.183 <.0001
## -2.803 0.0054
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
em=emtrends(prop_model, pairwise ~ Class, var="timenum", mult.name = "Class")
summary(em, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emtrends
## Class timenum.trend SE df lower.CL upper.CL t.ratio
## Low\nPerformance 0.13274 0.0519 263 0.0305 0.2349 2.558
## Low\nMental Rotation -0.01322 0.0314 275 -0.0751 0.0487 -0.420
## High\nPerformance -0.00486 0.0285 282 -0.0610 0.0513 -0.170
## p.value
## 0.0111
## 0.6746
## 0.8648
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation 0.14596 0.0607 266 0.0265 0.2655
## Low\nPerformance - High\nPerformance 0.13761 0.0592 267 0.0210 0.2542
## Low\nMental Rotation - High\nPerformance -0.00835 0.0425 278 -0.0919 0.0752
## t.ratio p.value
## 2.405 0.0169
## 2.323 0.0209
## -0.197 0.8442
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
#######
graph_prop_model = ggpredict(prop_model, terms=c("timenum","Class"))
levels(graph_prop_model$group)[levels(graph_prop_model$group ) == "2"] <- "Low\nPerformance"
levels(graph_prop_model$group)[levels(graph_prop_model$group ) == "1"] <- "Low\nMental Rotation"
levels(graph_prop_model$group)[levels(graph_prop_model$group ) == "3"] <- "High\nPerformance"
graph_prop_model$group = factor(graph_prop_model$group, levels=c("Low\nPerformance",
"Low\nMental Rotation", "High\nPerformance"))
graph_prop_model$x = as.factor(as.numeric(graph_prop_model$x))
graph_prop_model$title = "Proportional\nReasoning"
graph_prop_growth = ggplot(graph_prop_model, aes(x =as.numeric(x), y = predicted, colour = group)) +
geom_hline(yintercept = .0, linetype = "dashed")+
#scale_fill_manual(values = c("#000000","#228B22"))+
#scale_color_manual(values = c("#000000","#228B22"))+
geom_ribbon(aes(ymin = conf.low, ymax = conf.high, fill = group),
alpha = .15, color = NA)+
geom_line() +
#scale_y_continuous(breaks = seq(0,1,.25), limits = c(0,1.1))+
theme_bw()+
ylab("Predicted (Standardized Scores)")+
xlab("Time")+
scale_color_manual(values = c("#4575b4","#4d4d4d","#b2182b"))+
scale_fill_manual(values = c("#4575b4","#4d4d4d","#b2182b"))+
scale_x_continuous(breaks=c(1,2,3,4), labels = c("T1","T2","T3","T4"))+
scale_y_continuous(breaks = seq(-2.5,1.5,.5), limits = c(-2.7,1.5))+#scale_x_discrete(labels = c("T1","T2","T3","T4"))+
facet_grid(.~title)+
theme(legend.position="none",
axis.title.x = element_text(size=size_text),
axis.text.x = element_text(size=size_text),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_prop_growth
multiplot(graph_wm_growth,graph_panamath_growth,graph_prop_growth, graph_mr_growth, cols=4)
graph_prop_longitudinal = ggplot(subset(prop_longitudinal_gather_summary,!is.na(Class)), aes(x = time, y = PRWMPAE.asin.sR, group = Class)) +
geom_hline(yintercept = 0, linetype = "dashed")+
#scale_fill_manual(values = c("#000000","#228B22"))+
#scale_color_manual(values = c("#000000","#228B22"))+
geom_ribbon(aes(ymin = (PRWMPAE.asin.sR-se), ymax = (PRWMPAE.asin.sR+se), fill = Class),
alpha = .15, color = NA)+
geom_line(aes(color = Class)) +
scale_color_manual(values = c("#e41a1c","#377eb8","#4daf4a","#984ea3"))+
scale_fill_manual(values = c("#e41a1c","#377eb8","#4daf4a","#984ea3"))+
geom_line(aes(color = Class)) +
facet_grid(.~task)+
#scale_y_continuous(breaks = seq(0,1,.25), limits = c(0,1.1))+
theme_bw()+
ylab("Proportional Reasoning")+
xlab("Time")+
scale_y_continuous(breaks = seq(-2.5,1,.5), limits = c(-2.7,1.0))+
theme(legend.position="none",
axis.title.x = element_text(size=size_text),
axis.text.x = element_text(size=size_text),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_prop_longitudinal
exactcalculation_longitudinal = spatial_profiles_database_atleast2tp %>%
dplyr::select("SubjectID","T0SY1Y2G_StartGrade",
"T1EC.s","T2EC.s","T3EC.s","T4EC.s")
exactcalculation_longitudinal_gather = gather(exactcalculation_longitudinal, "time", "EC.s", -c("SubjectID","T0SY1Y2G_StartGrade"))
exactcalculation_longitudinal_gather$SubjectID = as.factor(as.character(exactcalculation_longitudinal_gather$SubjectID))
exactcalculation_longitudinal_gather = exactcalculation_longitudinal_gather %>%
left_join(T1_complete_class[c("SubjectID","Class")], by ="SubjectID")
exactcalculation_longitudinal_gather_summary = summarySE(exactcalculation_longitudinal_gather, "EC.s", c("time","Class"), na.rm = T)
exactcalculation_longitudinal_gather_summary$time = as.factor(exactcalculation_longitudinal_gather_summary$time)
exactcalculation_longitudinal_gather_summary$Class = as.factor(exactcalculation_longitudinal_gather_summary$Class)
exactcalculation_longitudinal_gather_summary$task = "Exact Calculation"
exactcalculation_longitudinal_gather1 = subset(exactcalculation_longitudinal_gather, !is.na(EC.s))
exactcalculation_longitudinal_gather1$timenum = as.numeric(as.factor(exactcalculation_longitudinal_gather1$time))
exactcalculation_longitudinal_gather1$Class = as.factor(exactcalculation_longitudinal_gather1$Class)
exactcalculation_longitudinal_gather1 = exactcalculation_longitudinal_gather1 %>%
left_join(TAll_complete_info[c("SubjectID","T0SGENGender","T0PDEMOMaxParentEducation")], by ="SubjectID")
exactcalculation_model <- lmer(EC.s ~ Class * (timenum) + T0SGENGender + T0PDEMOMaxParentEducation +T0SY1Y2G_StartGrade+
(1|SubjectID), data = subset(exactcalculation_longitudinal_gather1, !is.na(Class)), control = lmerControl(optimizer = "bobyqa"))
summary(exactcalculation_model)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: EC.s ~ Class * (timenum) + T0SGENGender + T0PDEMOMaxParentEducation +
## T0SY1Y2G_StartGrade + (1 | SubjectID)
## Data: subset(exactcalculation_longitudinal_gather1, !is.na(Class))
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: 2519
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -6.225 -0.549 0.036 0.572 2.366
##
## Random effects:
## Groups Name Variance Std.Dev.
## SubjectID (Intercept) 0.380 0.617
## Residual 0.446 0.668
## Number of obs: 1025, groups: SubjectID, 304
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) -1.5786 0.3297 395.0494 -4.79
## ClassLow\nMental Rotation 0.8291 0.1964 886.5576 4.22
## ClassHigh\nPerformance 1.4239 0.1921 881.2726 7.41
## timenum 0.1516 0.0498 728.4695 3.04
## T0SGENGender -0.0346 0.0840 294.9705 -0.41
## T0PDEMOMaxParentEducation 0.0428 0.0177 293.7840 2.42
## T0SY1Y2G_StartGrade -0.0310 0.0335 294.9627 -0.93
## ClassLow\nMental Rotation:timenum -0.1378 0.0584 733.0494 -2.36
## ClassHigh\nPerformance:timenum -0.1965 0.0569 734.6949 -3.45
## Pr(>|t|)
## (Intercept) 0.00000238916514 ***
## ClassLow\nMental Rotation 0.00002685298235 ***
## ClassHigh\nPerformance 0.00000000000029 ***
## timenum 0.00242 **
## T0SGENGender 0.68092
## T0PDEMOMaxParentEducation 0.01592 *
## T0SY1Y2G_StartGrade 0.35551
## ClassLow\nMental Rotation:timenum 0.01865 *
## ClassHigh\nPerformance:timenum 0.00059 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) ClsLMR ClssHP timenm T0SGEN T0PDEM T0SY1Y ClLMR:
## ClssLwMntlR -0.447
## ClssHghPrfr -0.378 0.758
## timenum -0.378 0.635 0.650
## T0SGENGendr -0.366 0.089 0.057 0.008
## T0PDEMOMxPE -0.751 -0.034 -0.114 -0.003 -0.055
## T0SY1Y2G_SG -0.226 0.003 -0.061 -0.006 0.035 0.157
## ClssLMRttn: 0.321 -0.739 -0.553 -0.852 0.000 0.001 0.002
## ClssHPrfrm: 0.332 -0.557 -0.737 -0.875 -0.018 0.009 -0.004 0.746
Anova(exactcalculation_model, test.statistic = "F", type= "III")
## Analysis of Deviance Table (Type III Wald F tests with Kenward-Roger df)
##
## Response: EC.s
## F Df Df.res Pr(>F)
## (Intercept) 22.92 1 398 0.0000023860931 ***
## Class 29.78 2 886 0.0000000000003 ***
## timenum 9.26 1 731 0.0024 **
## T0SGENGender 0.17 1 297 0.6809
## T0PDEMOMaxParentEducation 5.88 1 296 0.0159 *
## T0SY1Y2G_StartGrade 0.86 1 297 0.3555
## Class:timenum 6.00 2 743 0.0026 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
em=emtrends(exactcalculation_model, pairwise ~ Class, var="timenum", mult.name = "Class")
summary(em, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emtrends
## Class timenum.trend SE df lower.CL upper.CL t.ratio
## Low\nPerformance 0.1516 0.0498 731 0.0538 0.24940 3.044
## Low\nMental Rotation 0.0138 0.0306 747 -0.0462 0.07385 0.453
## High\nPerformance -0.0449 0.0276 758 -0.0990 0.00932 -1.626
## p.value
## 0.0024
## 0.6509
## 0.1045
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation 0.1378 0.0584 735 0.0230 0.253
## Low\nPerformance - High\nPerformance 0.1965 0.0570 737 0.0847 0.308
## Low\nMental Rotation - High\nPerformance 0.0587 0.0412 752 -0.0222 0.140
## t.ratio p.value
## 2.358 0.0187
## 3.450 0.0006
## 1.425 0.1545
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
emeans=emmeans(exactcalculation_model, pairwise ~ Class|timenum, mult.name = "Class", at=list(timenum=c(1,2,3,4)))
summary(emeans, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emmeans
## timenum = 1:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.877 0.1363 550 -1.1447 -0.6090 -6.431 <.0001
## Low\nMental Rotation -0.185 0.0806 552 -0.3438 -0.0271 -2.301 0.0218
## High\nPerformance 0.351 0.0717 551 0.2097 0.4915 4.887 <.0001
##
## timenum = 2:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.725 0.1166 317 -0.9547 -0.4958 -6.219 <.0001
## Low\nMental Rotation -0.172 0.0690 320 -0.3074 -0.0358 -2.486 0.0134
## High\nPerformance 0.306 0.0614 319 0.1849 0.4266 4.979 <.0001
##
## timenum = 3:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.574 0.1165 317 -0.8028 -0.3444 -4.924 <.0001
## Low\nMental Rotation -0.158 0.0700 335 -0.2955 -0.0201 -2.254 0.0249
## High\nPerformance 0.261 0.0626 340 0.1378 0.3840 4.168 <.0001
##
## timenum = 4:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.422 0.1360 550 -0.6892 -0.1548 -3.102 0.0020
## Low\nMental Rotation -0.144 0.0831 595 -0.3072 0.0193 -1.732 0.0838
## High\nPerformance 0.216 0.0747 606 0.0692 0.3628 2.890 0.0040
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## timenum = 1:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.691 0.1583 551 -1.002 -0.3805
## Low\nPerformance - High\nPerformance -1.227 0.1550 547 -1.532 -0.9230
## Low\nMental Rotation - High\nPerformance -0.536 0.1085 547 -0.749 -0.3230
## t.ratio p.value
## -4.369 <.0001
## -7.918 <.0001
## -4.943 <.0001
##
## timenum = 2:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.554 0.1354 318 -0.820 -0.2872
## Low\nPerformance - High\nPerformance -1.031 0.1328 317 -1.292 -0.7697
## Low\nMental Rotation - High\nPerformance -0.477 0.0931 319 -0.660 -0.2943
## t.ratio p.value
## -4.089 <.0001
## -7.762 <.0001
## -5.129 <.0001
##
## timenum = 3:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.416 0.1358 322 -0.683 -0.1486
## Low\nPerformance - High\nPerformance -0.835 0.1332 321 -1.097 -0.5725
## Low\nMental Rotation - High\nPerformance -0.419 0.0946 338 -0.605 -0.2325
## t.ratio p.value
## -3.062 0.0024
## -6.265 <.0001
## -4.424 <.0001
##
## timenum = 4:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.278 0.1593 562 -0.591 0.0349
## Low\nPerformance - High\nPerformance -0.638 0.1560 557 -0.944 -0.3317
## Low\nMental Rotation - High\nPerformance -0.360 0.1124 597 -0.581 -0.1391
## t.ratio p.value
## -1.745 0.0815
## -4.091 <.0001
## -3.201 0.0014
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
plot(ggpredict(exactcalculation_model, terms=c("timenum","Class")))
exactcalculation_longitudinal_gather1_prop = exactcalculation_longitudinal_gather1 %>%
left_join(profile_data[c("SubjectID","T1PRWMPAE.asin.sR")],by="SubjectID")
exactcalculation_longitudinal_gather1_prop = subset(exactcalculation_longitudinal_gather1_prop, T0SY1Y2G_StartGrade ==2 )
ec_propxtime_mod = lmer(EC.s ~ T1PRWMPAE.asin.sR * (timenum) + T0SGENGender + T0PDEMOMaxParentEducation +
(1|SubjectID), data = subset(exactcalculation_longitudinal_gather1_prop, !is.na(Class)), control = lmerControl(optimizer = "bobyqa"))
summary(ec_propxtime_mod)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## EC.s ~ T1PRWMPAE.asin.sR * (timenum) + T0SGENGender + T0PDEMOMaxParentEducation +
## (1 | SubjectID)
## Data: subset(exactcalculation_longitudinal_gather1_prop, !is.na(Class))
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: 650.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -5.763 -0.467 0.081 0.614 1.984
##
## Random effects:
## Groups Name Variance Std.Dev.
## SubjectID (Intercept) 0.338 0.581
## Residual 0.577 0.760
## Number of obs: 243, groups: SubjectID, 76
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -0.68334 0.55672 80.01810 -1.23 0.223
## T1PRWMPAE.asin.sR 0.26051 0.15441 226.82399 1.69 0.093 .
## timenum 0.00289 0.04378 176.06792 0.07 0.947
## T0SGENGender -0.03747 0.16823 71.65094 -0.22 0.824
## T0PDEMOMaxParentEducation 0.05066 0.03515 70.33132 1.44 0.154
## T1PRWMPAE.asin.sR:timenum -0.07871 0.04820 174.86799 -1.63 0.104
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) T1PRWMPAE.s.R timenm T0SGEN T0PDEM
## T1PRWMPAE.s.R 0.078
## timenum -0.236 0.025
## T0SGENGendr -0.328 -0.007 0.024
## T0PDEMOMxPE -0.860 -0.096 0.031 -0.133
## T1PRWMPAE..R: 0.002 -0.793 -0.040 0.015 -0.001
Anova(ec_propxtime_mod, test.statistic = "F", type= "III")
## Analysis of Deviance Table (Type III Wald F tests with Kenward-Roger df)
##
## Response: EC.s
## F Df Df.res Pr(>F)
## (Intercept) 1.51 1 79.1 0.223
## T1PRWMPAE.asin.sR 2.84 1 226.7 0.093 .
## timenum 0.00 1 175.3 0.948
## T0SGENGender 0.05 1 70.8 0.824
## T0PDEMOMaxParentEducation 2.08 1 69.5 0.154
## T1PRWMPAE.asin.sR:timenum 2.66 1 174.1 0.105
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
summarySE(exactcalculation_longitudinal_gather1,"EC.s",c("Class","T0SGENGender"))
## Class T0SGENGender N EC.s sd se ci
## 1 Low\nPerformance 1 67 -0.497068 0.9499 0.11605 0.2317
## 2 Low\nPerformance 2 93 -0.591024 1.1508 0.11933 0.2370
## 3 Low\nMental Rotation 1 287 -0.132708 0.8943 0.05279 0.1039
## 4 Low\nMental Rotation 2 172 -0.234565 0.9577 0.07303 0.1441
## 5 High\nPerformance 1 268 0.285553 0.9047 0.05526 0.1088
## 6 High\nPerformance 2 257 0.372036 0.8525 0.05317 0.1047
## 7 High\nPerformance NA 2 -0.865088 0.3695 0.26126 3.3197
## 8 <NA> 1 132 0.004027 1.1154 0.09709 0.1921
## 9 <NA> 2 114 -0.037210 1.0074 0.09435 0.1869
#######
graph_ec_model = ggpredict(exactcalculation_model, terms=c("timenum","Class"))
levels(graph_ec_model$group)[levels(graph_ec_model$group ) == "2"] <- "Low\nPerformance"
levels(graph_ec_model$group)[levels(graph_ec_model$group ) == "1"] <- "Low\nMental Rotation"
levels(graph_ec_model$group)[levels(graph_ec_model$group ) == "3"] <- "High\nPerformance"
graph_ec_model$group = factor(graph_ec_model$group, levels=c("Low\nPerformance",
"Low\nMental Rotation", "High\nPerformance"))
graph_ec_model$x = as.factor(as.numeric(graph_ec_model$x))
graph_ec_model$title = "Exact\nCalculations"
graph_ec_growth = ggplot(graph_ec_model, aes(x =as.numeric(x), y = predicted, colour = group)) +
geom_hline(yintercept = .0, linetype = "dashed")+
#scale_fill_manual(values = c("#000000","#228B22"))+
#scale_color_manual(values = c("#000000","#228B22"))+
geom_ribbon(aes(ymin = conf.low, ymax = conf.high, fill = group),
alpha = .15, color = NA)+
geom_line() +
#scale_y_continuous(breaks = seq(0,1,.25), limits = c(0,1.1))+
theme_bw()+
ylab("Predicted (Standardized Scores)")+
xlab("Time")+
scale_color_manual(values = c("#4575b4","#4d4d4d","#b2182b"))+
scale_fill_manual(values = c("#4575b4","#4d4d4d","#b2182b"))+
scale_x_continuous(breaks=c(1,2,3,4), labels = c("T1","T2","T3","T4"))+
scale_y_continuous(breaks = seq(-2.5,1.5,.5), limits = c(-2.7,1.5))+#scale_x_discrete(labels = c("T1","T2","T3","T4"))+
facet_grid(.~title)+
theme(legend.position="none",
axis.title.x = element_text(size=size_text),
axis.text.x = element_text(size=size_text),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_ec_growth
graph_exactcalculation_longitudinal = ggplot(subset(exactcalculation_longitudinal_gather_summary,!is.na(Class)), aes(x = time, y = EC.s, group = Class)) +
geom_hline(yintercept = 0, linetype = "dashed")+
#scale_fill_manual(values = c("#000000","#228B22"))+
#scale_color_manual(values = c("#000000","#228B22"))+
geom_ribbon(aes(ymin = (EC.s-se), ymax = (EC.s+se), fill = Class),
alpha = .15, color = NA)+
geom_line(aes(color = Class)) +
scale_color_manual(values = c("#e41a1c","#377eb8","#4daf4a","#984ea3"))+
scale_fill_manual(values = c("#e41a1c","#377eb8","#4daf4a","#984ea3"))+
geom_line(aes(color = Class)) +
facet_grid(.~task)+
#scale_y_continuous(breaks = seq(0,1,.25), limits = c(0,1.1))+
theme_bw()+
ylab("Exact Calculation")+
xlab("Time")+
scale_y_continuous(breaks = seq(-2.5,1,.5), limits = c(-2.7,1.0))+
theme(legend.position="none",
axis.title.x = element_text(size=size_text),
axis.text.x = element_text(size=size_text),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_exactcalculation_longitudinal
approx_longitudinal = spatial_profiles_database_atleast2tp %>%
dplyr::select("SubjectID","T0SY1Y2G_StartGrade",
"T1ASWmac.s","T2ASWmac.s","T3ASWmac.s","T4ASWmac.s")
approx_longitudinal_gather = gather(approx_longitudinal, "time", "ASWmac.s", -c("SubjectID","T0SY1Y2G_StartGrade"))
approx_longitudinal_gather$SubjectID = as.factor(as.character(approx_longitudinal_gather$SubjectID))
approx_longitudinal_gather = approx_longitudinal_gather %>%
left_join(T1_complete_class[c("SubjectID","Class")], by ="SubjectID")
approx_longitudinal_gather = approx_longitudinal_gather %>%
left_join(TAll_complete_info[c("SubjectID","T0SGENGender","T0PDEMOMaxParentEducation")], by ="SubjectID")
approx_longitudinal_gather_summary = summarySE(approx_longitudinal_gather, "ASWmac.s", c("time","Class"), na.rm = T)
approx_longitudinal_gather_summary$time = as.factor(approx_longitudinal_gather_summary$time)
#approx_longitudinal_gather_summary$Class = as.factor(approx_longitudinal_gather_summary$Class)
approx_longitudinal_gather_summary$task = "Approximate"
approx_longitudinal_gather$time_num = as.numeric(as.factor(approx_longitudinal_gather$time))
#approx_longitudinal_gather$Class = as.factor(as.character(approx_longitudinal_gather$Class))
approx_model <- lmer(ASWmac.s ~ Class * (time_num) + T0SGENGender + T0PDEMOMaxParentEducation +T0SY1Y2G_StartGrade+
(1|SubjectID), data = subset(approx_longitudinal_gather, !is.na(Class)), control = lmerControl(optimizer = "bobyqa"))
summary(approx_model)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## ASWmac.s ~ Class * (time_num) + T0SGENGender + T0PDEMOMaxParentEducation +
## T0SY1Y2G_StartGrade + (1 | SubjectID)
## Data: subset(approx_longitudinal_gather, !is.na(Class))
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: 2787
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.7697 -0.6300 -0.0022 0.6844 2.9730
##
## Random effects:
## Groups Name Variance Std.Dev.
## SubjectID (Intercept) 0.230 0.480
## Residual 0.651 0.807
## Number of obs: 1045, groups: SubjectID, 304
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) -1.36272 0.31646 481.13139 -4.31
## ClassLow\nMental Rotation 0.40480 0.21172 1023.86924 1.91
## ClassHigh\nPerformance 0.81002 0.20708 1022.63769 3.91
## time_num 0.04396 0.06044 769.21805 0.73
## T0SGENGender 0.14472 0.07604 292.14934 1.90
## T0PDEMOMaxParentEducation 0.04311 0.01607 296.66801 2.68
## T0SY1Y2G_StartGrade -0.00368 0.03049 299.12485 -0.12
## ClassLow\nMental Rotation:time_num -0.03219 0.07054 772.67491 -0.46
## ClassHigh\nPerformance:time_num -0.07305 0.06898 774.47316 -1.06
## Pr(>|t|)
## (Intercept) 0.000020 ***
## ClassLow\nMental Rotation 0.0562 .
## ClassHigh\nPerformance 0.000098 ***
## time_num 0.4672
## T0SGENGender 0.0580 .
## T0PDEMOMaxParentEducation 0.0077 **
## T0SY1Y2G_StartGrade 0.9039
## ClassLow\nMental Rotation:time_num 0.6483
## ClassHigh\nPerformance:time_num 0.2899
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) ClsLMR ClssHP tim_nm T0SGEN T0PDEM T0SY1Y CLMR:_
## ClssLwMntlR -0.504
## ClssHghPrfr -0.450 0.759
## time_num -0.485 0.711 0.726
## T0SGENGendr -0.342 0.072 0.042 0.003
## T0PDEMOMxPE -0.717 -0.023 -0.092 0.011 -0.057
## T0SY1Y2G_SG -0.222 0.002 -0.042 0.000 0.038 0.159
## ClssLMRtt:_ 0.414 -0.826 -0.622 -0.857 0.001 -0.010 0.004
## ClssHPrfr:_ 0.422 -0.624 -0.824 -0.876 -0.009 0.000 -0.014 0.751
Anova(approx_model, test.statistic = "F", type= "III")
## Analysis of Deviance Table (Type III Wald F tests with Kenward-Roger df)
##
## Response: ASWmac.s
## F Df Df.res Pr(>F)
## (Intercept) 18.54 1 484 0.00002 ***
## Class 8.96 2 1023 0.00014 ***
## time_num 0.53 1 771 0.46728
## T0SGENGender 3.62 1 294 0.05803 .
## T0PDEMOMaxParentEducation 7.20 1 299 0.00770 **
## T0SY1Y2G_StartGrade 0.01 1 301 0.90393
## Class:time_num 0.69 2 781 0.50104
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
em=emtrends(approx_model, pairwise ~ Class, var="time_num", mult.name = "Class")
summary(em, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emtrends
## Class time_num.trend SE df lower.CL upper.CL t.ratio
## Low\nPerformance 0.0440 0.0604 771 -0.0747 0.1626 0.727
## Low\nMental Rotation 0.0118 0.0364 784 -0.0597 0.0832 0.324
## High\nPerformance -0.0291 0.0333 793 -0.0944 0.0362 -0.874
## p.value
## 0.4673
## 0.7463
## 0.3822
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation 0.0322 0.0706 775 -0.1063 0.171
## Low\nPerformance - High\nPerformance 0.0731 0.0690 776 -0.0624 0.208
## Low\nMental Rotation - High\nPerformance 0.0409 0.0493 788 -0.0559 0.138
## t.ratio p.value
## 0.456 0.6484
## 1.059 0.2900
## 0.829 0.4075
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
emeans=emmeans(approx_model, pairwise ~ Class|time_num, mult.name = "Class", at=list(time_num=c(1,2,3,4)))
summary(emeans, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emmeans
## time_num = 1:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.4663 0.1366 723 -0.7345 -0.1980 -3.412 0.0007
## Low\nMental Rotation -0.0937 0.0807 715 -0.2521 0.0648 -1.160 0.2464
## High\nPerformance 0.2707 0.0722 735 0.1289 0.4125 3.747 0.0002
##
## time_num = 2:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.4223 0.1069 332 -0.6326 -0.2120 -3.949 0.0001
## Low\nMental Rotation -0.0819 0.0634 333 -0.2067 0.0429 -1.291 0.1977
## High\nPerformance 0.2416 0.0565 342 0.1305 0.3528 4.277 <.0001
##
## time_num = 3:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.3783 0.1073 332 -0.5893 -0.1673 -3.527 0.0005
## Low\nMental Rotation -0.0701 0.0646 352 -0.1972 0.0571 -1.084 0.2790
## High\nPerformance 0.2126 0.0581 370 0.0983 0.3268 3.657 0.0003
##
## time_num = 4:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.3344 0.1374 717 -0.6042 -0.0646 -2.433 0.0152
## Low\nMental Rotation -0.0583 0.0836 750 -0.2224 0.1057 -0.698 0.4854
## High\nPerformance 0.1835 0.0760 782 0.0343 0.3327 2.414 0.0160
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## time_num = 1:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.373 0.1586 724 -0.684 -0.0613
## Low\nPerformance - High\nPerformance -0.737 0.1553 720 -1.042 -0.4321
## Low\nMental Rotation - High\nPerformance -0.364 0.1087 718 -0.578 -0.1509
## t.ratio p.value
## -2.350 0.0190
## -4.746 <.0001
## -3.351 0.0008
##
## time_num = 2:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.340 0.1241 333 -0.585 -0.0963
## Low\nPerformance - High\nPerformance -0.664 0.1218 333 -0.904 -0.4242
## Low\nMental Rotation - High\nPerformance -0.324 0.0856 336 -0.492 -0.1552
## t.ratio p.value
## -2.743 0.0064
## -5.449 <.0001
## -3.781 0.0002
##
## time_num = 3:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.308 0.1250 337 -0.554 -0.0624
## Low\nPerformance - High\nPerformance -0.591 0.1229 339 -0.833 -0.3492
## Low\nMental Rotation - High\nPerformance -0.283 0.0876 360 -0.455 -0.1104
## t.ratio p.value
## -2.466 0.0141
## -4.808 <.0001
## -3.226 0.0014
##
## time_num = 4:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.276 0.1606 727 -0.591 0.0393
## Low\nPerformance - High\nPerformance -0.518 0.1577 727 -0.828 -0.2082
## Low\nMental Rotation - High\nPerformance -0.242 0.1135 761 -0.465 -0.0189
## t.ratio p.value
## -1.719 0.0861
## -3.283 0.0011
## -2.129 0.0336
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
plot(ggpredict(approx_model, terms=c("time_num","Class")))
#######
graph_ac_model = ggpredict(approx_model, terms=c("time_num","Class"))
levels(graph_ac_model$group)[levels(graph_ac_model$group ) == "2"] <- "Low\nPerformance"
levels(graph_ac_model$group)[levels(graph_ac_model$group ) == "1"] <- "Low\nMental Rotation"
levels(graph_ac_model$group)[levels(graph_ac_model$group ) == "3"] <- "High\nPerformance"
graph_ac_model$group = factor(graph_ac_model$group, levels=c("Low\nPerformance",
"Low\nMental Rotation", "High\nPerformance"))
graph_ac_model$x = as.factor(as.numeric(graph_ac_model$x))
graph_ac_model$title = "Approximate\nCalculations"
graph_ac_growth = ggplot(graph_ac_model, aes(x =as.numeric(x), y = predicted, colour = group)) +
geom_hline(yintercept = .0, linetype = "dashed")+
#scale_fill_manual(values = c("#000000","#228B22"))+
#scale_color_manual(values = c("#000000","#228B22"))+
geom_ribbon(aes(ymin = conf.low, ymax = conf.high, fill = group),
alpha = .15, color = NA)+
geom_line() +
#scale_y_continuous(breaks = seq(0,1,.25), limits = c(0,1.1))+
theme_bw()+
ylab("Predicted (Standardized Scores)")+
xlab("Time")+
scale_color_manual(values = c("#4575b4","#4d4d4d","#b2182b"))+
scale_fill_manual(values = c("#4575b4","#4d4d4d","#b2182b"))+
scale_x_continuous(breaks=c(1,2,3,4), labels = c("T1","T2","T3","T4"))+
scale_y_continuous(breaks = seq(-2.5,1.5,.5), limits = c(-2.7,1.5))+#scale_x_discrete(labels = c("T1","T2","T3","T4"))+
facet_grid(.~title)+
theme(legend.position="none",
axis.title.x = element_text(size=size_text),
axis.text.x = element_text(size=size_text),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_ac_growth
multiplot(graph_ec_growth, graph_ac_growth, cols=2)
graph_approx_longitudinal = ggplot(subset(approx_longitudinal_gather_summary,!is.na(Class)), aes(x = time, y = ASWmac.s, group = Class)) +
geom_hline(yintercept = 0, linetype = "dashed")+
#scale_fill_manual(values = c("#000000","#228B22"))+
#scale_color_manual(values = c("#000000","#228B22"))+
geom_ribbon(aes(ymin = (ASWmac.s-se), ymax = (ASWmac.s+se), fill = Class),
alpha = .15, color = NA)+
geom_line(aes(color = Class)) +
scale_color_manual(values = c("#e41a1c","#377eb8","#4daf4a","#984ea3"))+
scale_fill_manual(values = c("#e41a1c","#377eb8","#4daf4a","#984ea3"))+
geom_line(aes(color = Class)) +
facet_grid(.~task)+
scale_y_continuous(breaks = seq(-2.5,1,.5), limits = c(-2.7,1.0))+
theme_bw()+
ylab("Approximate")+
xlab("Time")+
theme(legend.position="none",
axis.title.x = element_text(size=size_text),
axis.text.x = element_text(size=size_text),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_approx_longitudinal
lw_longitudinal = spatial_profiles_database_atleast2tp %>%
dplyr::select("SubjectID","T0SY1Y2G_StartGrade",
"T1LWIDWS.s","T2LWIDWS.s","T3LWIDWS.s","T4LWIDWS.s")
lw_longitudinal_gather = gather(lw_longitudinal, "time", "LWIDWS.s", -c("SubjectID","T0SY1Y2G_StartGrade"))
lw_longitudinal_gather$SubjectID = as.factor(as.character(lw_longitudinal_gather$SubjectID))
lw_longitudinal_gather = lw_longitudinal_gather %>%
left_join(T1_complete_class[c("SubjectID","Class")], by ="SubjectID")
lw_longitudinal_gather = lw_longitudinal_gather %>%
left_join(TAll_complete_info[c("SubjectID","T0SGENGender","T0PDEMOMaxParentEducation")], by ="SubjectID")
lw_longitudinal_gather_summary = summarySE(lw_longitudinal_gather, "LWIDWS.s", c("time","Class"), na.rm = T)
lw_longitudinal_gather_summary$time = as.factor(lw_longitudinal_gather_summary$time)
#lw_longitudinal_gather_summary$Class = as.factor(lw_longitudinal_gather_summary$Class)
lw_longitudinal_gather_summary$task = "LetterWordID"
lw_longitudinal_gather$time_num = as.numeric(as.factor(lw_longitudinal_gather$time))
#lw_longitudinal_gather$Class = as.factor(as.character(lw_longitudinal_gather$Class))
lw_model <- lmer(LWIDWS.s ~ Class * (time_num) + T0SGENGender + T0PDEMOMaxParentEducation +T0SY1Y2G_StartGrade+
(1|SubjectID), data = subset(lw_longitudinal_gather, !is.na(Class)), control = lmerControl(optimizer = "bobyqa"))
summary(lw_model)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## LWIDWS.s ~ Class * (time_num) + T0SGENGender + T0PDEMOMaxParentEducation +
## T0SY1Y2G_StartGrade + (1 | SubjectID)
## Data: subset(lw_longitudinal_gather, !is.na(Class))
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: 1993
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.037 -0.527 -0.013 0.507 3.743
##
## Random effects:
## Groups Name Variance Std.Dev.
## SubjectID (Intercept) 0.713 0.844
## Residual 0.176 0.419
## Number of obs: 1043, groups: SubjectID, 304
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) -1.25285 0.37911 322.98906 -3.30
## ClassLow\nMental Rotation 0.05795 0.18492 487.82680 0.31
## ClassHigh\nPerformance 0.49943 0.18150 487.65554 2.75
## time_num -0.05881 0.03145 740.03321 -1.87
## T0SGENGender 0.09089 0.10213 298.07902 0.89
## T0PDEMOMaxParentEducation 0.05563 0.02150 297.47294 2.59
## T0SY1Y2G_StartGrade -0.00179 0.04073 299.24199 -0.04
## ClassLow\nMental Rotation:time_num 0.08471 0.03681 740.88317 2.30
## ClassHigh\nPerformance:time_num 0.06790 0.03593 741.78090 1.89
## Pr(>|t|)
## (Intercept) 0.0011 **
## ClassLow\nMental Rotation 0.7541
## ClassHigh\nPerformance 0.0061 **
## time_num 0.0619 .
## T0SGENGender 0.3742
## T0PDEMOMaxParentEducation 0.0101 *
## T0SY1Y2G_StartGrade 0.9650
## ClassLow\nMental Rotation:time_num 0.0217 *
## ClassHigh\nPerformance:time_num 0.0592 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) ClsLMR ClssHP tim_nm T0SGEN T0PDEM T0SY1Y CLMR:_
## ClssLwMntlR -0.371
## ClssHghPrfr -0.265 0.757
## time_num -0.202 0.416 0.424
## T0SGENGendr -0.380 0.113 0.060 0.000
## T0PDEMOMxPE -0.795 -0.044 -0.144 -0.001 -0.058
## T0SY1Y2G_SG -0.245 0.005 -0.077 -0.001 0.028 0.163
## ClssLMRtt:_ 0.174 -0.485 -0.362 -0.854 0.001 -0.001 0.003
## ClssHPrfr:_ 0.175 -0.365 -0.485 -0.875 -0.004 0.006 -0.004 0.748
Anova(lw_model, test.statistic = "F", type= "III")
## Analysis of Deviance Table (Type III Wald F tests with Kenward-Roger df)
##
## Response: LWIDWS.s
## F Df Df.res Pr(>F)
## (Intercept) 10.92 1 323 0.00106 **
## Class 7.46 2 491 0.00064 ***
## time_num 3.50 1 740 0.06191 .
## T0SGENGender 0.79 1 298 0.37424
## T0PDEMOMaxParentEducation 6.70 1 298 0.01014 *
## T0SY1Y2G_StartGrade 0.00 1 300 0.96499
## Class:time_num 2.68 2 743 0.06926 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
em=emtrends(lw_model, pairwise ~ Class, var="time_num", mult.name = "Class")
summary(em, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emtrends
## Class time_num.trend SE df lower.CL upper.CL t.ratio
## Low\nPerformance -0.05881 0.0315 740 -0.1206 0.00294 -1.870
## Low\nMental Rotation 0.02589 0.0191 744 -0.0117 0.06345 1.354
## High\nPerformance 0.00909 0.0174 748 -0.0250 0.04317 0.523
## p.value
## 0.0619
## 0.1763
## 0.6008
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.0847 0.0368 741 -0.1570 -0.01243
## Low\nPerformance - High\nPerformance -0.0679 0.0359 742 -0.1384 0.00263
## Low\nMental Rotation - High\nPerformance 0.0168 0.0258 745 -0.0339 0.06752
## t.ratio p.value
## -2.301 0.0217
## -1.890 0.0592
## 0.650 0.5156
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
emeans=emmeans(lw_model, pairwise ~ Class|time_num, mult.name = "Class", at=list(time_num=c(1,2,3,4)))
summary(emeans, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emmeans
## time_num = 1:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.353 0.1466 361 -0.6414 -0.0650 -2.410 0.0165
## Low\nMental Rotation -0.210 0.0868 363 -0.3811 -0.0399 -2.426 0.0157
## High\nPerformance 0.214 0.0776 370 0.0616 0.3667 2.760 0.0061
##
## time_num = 2:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.412 0.1400 302 -0.6875 -0.1365 -2.943 0.0035
## Low\nMental Rotation -0.185 0.0828 304 -0.3477 -0.0216 -2.228 0.0266
## High\nPerformance 0.223 0.0738 305 0.0780 0.3684 3.026 0.0027
##
## time_num = 3:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.471 0.1404 305 -0.7471 -0.1946 -3.354 0.0009
## Low\nMental Rotation -0.159 0.0833 309 -0.3226 0.0051 -1.906 0.0575
## High\nPerformance 0.232 0.0740 308 0.0868 0.3779 3.141 0.0019
##
## time_num = 4:
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.530 0.1476 371 -0.8199 -0.2393 -3.588 0.0004
## Low\nMental Rotation -0.133 0.0879 382 -0.3057 0.0401 -1.511 0.1317
## High\nPerformance 0.241 0.0781 380 0.0878 0.3950 3.090 0.0022
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## time_num = 1:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.143 0.170 362 -0.477 0.1919
## Low\nPerformance - High\nPerformance -0.567 0.167 362 -0.896 -0.2388
## Low\nMental Rotation - High\nPerformance -0.425 0.117 365 -0.655 -0.1943
## t.ratio p.value
## -0.838 0.4023
## -3.396 0.0008
## -3.625 0.0003
##
## time_num = 2:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.227 0.163 303 -0.547 0.0925
## Low\nPerformance - High\nPerformance -0.635 0.160 303 -0.949 -0.3213
## Low\nMental Rotation - High\nPerformance -0.408 0.112 304 -0.628 -0.1879
## t.ratio p.value
## -1.399 0.1628
## -3.982 0.0001
## -3.649 0.0003
##
## time_num = 3:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.312 0.163 306 -0.633 0.0088
## Low\nPerformance - High\nPerformance -0.703 0.160 306 -1.018 -0.3885
## Low\nMental Rotation - High\nPerformance -0.391 0.112 309 -0.612 -0.1703
## t.ratio p.value
## -1.914 0.0566
## -4.397 <.0001
## -3.485 0.0006
##
## time_num = 4:
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.397 0.172 374 -0.734 -0.0592
## Low\nPerformance - High\nPerformance -0.771 0.168 371 -1.102 -0.4404
## Low\nMental Rotation - High\nPerformance -0.374 0.118 380 -0.607 -0.1414
## t.ratio p.value
## -2.311 0.0214
## -4.585 <.0001
## -3.160 0.0017
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
emeans=emmeans(lw_model, pairwise ~ Class, mult.name = "Class")
summary(emeans, infer=c(TRUE,TRUE), null=0, type = "response", adjust = "none")
## $emmeans
## Class emmean SE df lower.CL upper.CL t.ratio p.value
## Low\nPerformance -0.439 0.1393 296 -0.713 -0.1646 -3.149 0.0018
## Low\nMental Rotation -0.173 0.0825 298 -0.335 -0.0105 -2.095 0.0370
## High\nPerformance 0.227 0.0734 298 0.083 0.3718 3.099 0.0021
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
##
## $contrasts
## contrast estimate SE df lower.CL upper.CL
## Low\nPerformance - Low\nMental Rotation -0.266 0.162 297 -0.584 0.0524
## Low\nPerformance - High\nPerformance -0.666 0.159 296 -0.978 -0.3538
## Low\nMental Rotation - High\nPerformance -0.400 0.111 298 -0.619 -0.1813
## t.ratio p.value
## -1.644 0.1013
## -4.197 <.0001
## -3.598 0.0004
##
## Results are averaged over the levels of: T0SGENGender
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
plot(ggpredict(lw_model, terms=c("time_num","Class")))
#######
graph_lw_model = ggpredict(lw_model, terms=c("time_num","Class"))
levels(graph_lw_model$group)[levels(graph_lw_model$group ) == "2"] <- "Low\nPerformance"
levels(graph_lw_model$group)[levels(graph_lw_model$group ) == "1"] <- "Low\nMental Rotation"
levels(graph_lw_model$group)[levels(graph_lw_model$group ) == "3"] <- "High\nPerformance"
graph_lw_model$group = factor(graph_lw_model$group, levels=c("Low\nPerformance",
"Low\nMental Rotation", "High\nPerformance"))
graph_lw_model$x = as.factor(as.numeric(graph_lw_model$x))
graph_lw_model$title = "Letter-Word\nIdentification"
graph_lw_growth = ggplot(graph_lw_model, aes(x =as.numeric(x), y = predicted, colour = group)) +
geom_hline(yintercept = .0, linetype = "dashed")+
#scale_fill_manual(values = c("#000000","#228B22"))+
#scale_color_manual(values = c("#000000","#228B22"))+
geom_ribbon(aes(ymin = conf.low, ymax = conf.high, fill = group),
alpha = .15, color = NA)+
geom_line() +
#scale_y_continuous(breaks = seq(0,1,.25), limits = c(0,1.1))+
theme_bw()+
ylab("Predicted (Standardized Scores)")+
xlab("Time")+
scale_color_manual(values = c("#4575b4","#4d4d4d","#b2182b"))+
scale_fill_manual(values = c("#4575b4","#4d4d4d","#b2182b"))+
scale_x_continuous(breaks=c(1,2,3,4), labels = c("T1","T2","T3","T4"))+
scale_y_continuous(breaks = seq(-2.5,1.5,.5), limits = c(-2.7,1.5))+#scale_x_discrete(labels = c("T1","T2","T3","T4"))+
facet_grid(.~title)+
theme(legend.position="none",
axis.title.x = element_text(size=size_text),
axis.text.x = element_text(size=size_text),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_lw_growth
multiplot(graph_ec_growth, graph_ac_growth,graph_lw_growth, cols=3)
graph_lw_longitudinal = ggplot(subset(lw_longitudinal_gather_summary,!is.na(Class)), aes(x = time, y = LWIDWS.s, group = Class)) +
geom_hline(yintercept = 0, linetype = "dashed")+
#scale_fill_manual(values = c("#000000","#228B22"))+
#scale_color_manual(values = c("#000000","#228B22"))+
geom_ribbon(aes(ymin = (LWIDWS.s-se), ymax = (LWIDWS.s+se), fill = Class),
alpha = .15, color = NA)+
geom_line(aes(color = Class)) +
scale_color_manual(values = c("#e41a1c","#377eb8","#4daf4a","#984ea3"))+
scale_fill_manual(values = c("#e41a1c","#377eb8","#4daf4a","#984ea3"))+
geom_line(aes(color = Class)) +
facet_grid(.~task)+
scale_y_continuous(breaks = seq(-2.5,1,.5), limits = c(-2.7,1.0))+
theme_bw()+
ylab("LetterWordID")+
xlab("Time")+
theme(legend.position="bottom",
axis.title.x = element_text(size=size_text),
axis.text.x = element_text(size=size_text),
#axis.title.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "grey50"),
strip.background =element_rect(fill="#f0f0f0"),
strip.text = element_text(size = size_text),
axis.text.y = element_text(size=size_text),
axis.title.y = element_text(size=size_text),
legend.text=element_text(size=size_text))
graph_lw_longitudinal
multiplot(graph_panamath_longitudinal,graph_mentalrotation_longitudinal,graph_prop_longitudinal,graph_exactcalculation_longitudinal,graph_workingmemory_longitudinal,graph_approx_longitudinal,graph_lw_longitudinal, cols = 4)