Dwell Time by Learning Group and Shape
#Calculate Dwell Time (i.e., sum of duration of fixations during one trial)
data$block<-ifelse(data$trial<25,1,ifelse(data$trial<49,2,ifelse(data$trial<73,3,4)))
#create block variable (used for graphs)
data_dw<-aggregate(CURRENT_FIX_DURATION ~ sbj+trial+group+cnd+block+CURRENT_FIX_INTEREST_AREA_LABEL+Response,data,sum,na.rm=T)
data_dw<-data_dw[order(data_dw$sbj, data_dw$trial,data_dw$CURRENT_FIX_INTEREST_AREA_LABEL),]
colnames(data_dw)[6]<-'IA_LABEL'
colnames(data_dw)[8]<-'DWELL_TIME'
data_dw$IA_LABEL<-as.factor(data_dw$IA_LABEL)
str(data_dw)
## 'data.frame': 2915 obs. of 8 variables:
## $ sbj : Factor w/ 48 levels "aggfyt95","agkart97",..: 1 1 1 1 1 1 1 1 1 1 ...
## $ trial : int 1 1 2 2 6 6 11 11 17 17 ...
## $ group : Factor w/ 2 levels "CAT","PA": 1 1 1 1 1 1 1 1 1 1 ...
## $ cnd : Factor w/ 1 level "mixed": 1 1 1 1 1 1 1 1 1 1 ...
## $ block : num 1 1 1 1 1 1 1 1 1 1 ...
## $ IA_LABEL : Factor w/ 2 levels "ideog","label": 1 2 1 2 1 2 1 2 1 2 ...
## $ Response : Factor w/ 2 levels "Correct","Wrong": 1 1 1 1 1 1 1 1 1 1 ...
## $ DWELL_TIME: int 1423 947 917 345 586 1730 649 258 1178 221 ...
#Calculate Dwell Time by Learning Group and Shape (coded as IA_Label: "label" vs. "ideog")
data_mean<-aggregate(DWELL_TIME~sbj+group+IA_LABEL,data_dw, mean,na.rm=T)
#Category Learning
#Dwell Time on label shapes
mean(data_mean[data_mean$group=='CAT'&data_mean$IA_LABEL=='label',]$DWELL_TIME)
## [1] 668.2807
sd(data_mean[data_mean$group=='CAT'&data_mean$IA_LABEL=='label',]$DWELL_TIME)
## [1] 213.0915
#Dwell Time on ideogram shapes
mean(data_mean[data_mean$group=='CAT'&data_mean$IA_LABEL=='ideog',]$DWELL_TIME)
## [1] 683.8226
sd(data_mean[data_mean$group=='CAT'&data_mean$IA_LABEL=='ideog',]$DWELL_TIME)
## [1] 220.485
#Paired-Associate Learning
#Dwell Time on label shapes
mean(data_mean[data_mean$group=='PA'&data_mean$IA_LABEL=='label',]$DWELL_TIME)
## [1] 726.596
sd(data_mean[data_mean$group=='PA'&data_mean$IA_LABEL=='label',]$DWELL_TIME)
## [1] 225.2125
#Dwell Time on ideogram shapes
mean(data_mean[data_mean$group=='PA'&data_mean$IA_LABEL=='ideog',]$DWELL_TIME)
## [1] 727.5203
sd(data_mean[data_mean$group=='PA'&data_mean$IA_LABEL=='ideog',]$DWELL_TIME)
## [1] 205.9584
Main Analyses
#Test which distribution better fits the data
library(fitdistrplus)
descdist(data_dw$DWELL_TIME, boot = 1000)

## summary statistics
## ------
## min: 93 max: 2464
## median: 590
## mean: 706.9029
## estimated sd: 462.2503
## estimated skewness: 0.9268738
## estimated kurtosis: 3.361398
fg <- fitdist(data_dw$DWELL_TIME, "gamma")
plot(fg)

fn<-fitdist(data_dw$DWELL_TIME, "norm")
plot(fn)

#it seems that gamma is preferred
bx <- boxcox(lm(DWELL_TIME~1,data_dw))

(lambda <- bx$x[which.max(bx$y)]) # https://stackoverflow.com/questions/33999512/how-to-use-the-box-cox-power-transformation-in-r
## [1] 0.1818182
fl<-fitdist((data_dw$DWELL_TIME^lambda -1)/lambda, "norm")
plot(fl)

# gamma still best
#Main model
library(lme4)
#recode group and IA_LABEL factors
data_dw$groupC <- ifelse(data_dw$group=="CAT",-0.5,+0.5)
data_dw$IA_LABELC <- ifelse(data_dw$IA_LABEL=="ideog",-0.5,+0.5)
# this is contr.sdif(2), but it's done "by hand" so that the coding stands also for random effects
data_dw$trialC <- scale(data_dw$trial)[,1]
# trial is scaled to facilitate convergence
m8<-glmer(DWELL_TIME~ groupC*IA_LABELC*trialC + (1+IA_LABELC*trialC|sbj),
family ="Gamma"(link=identity),data =data_dw,
control=glmerControl(optCtrl=list(maxfun=3e5)))
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge with max|grad| = 0.00555894 (tol = 0.002, component 1)
print(summary(m8),corr=F)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: Gamma ( identity )
## Formula: DWELL_TIME ~ groupC * IA_LABELC * trialC + (1 + IA_LABELC * trialC |
## sbj)
## Data: data_dw
## Control: glmerControl(optCtrl = list(maxfun = 3e+05))
##
## AIC BIC logLik deviance df.resid
## 42561.4 42675.0 -21261.7 42523.4 2896
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -1.4459 -0.7404 -0.1470 0.5528 4.8824
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sbj (Intercept) 1.865e+04 136.5712
## IA_LABELC 9.967e+01 9.9836 0.98
## trialC 2.742e+03 52.3648 0.12 0.32
## IA_LABELC:trialC 2.081e+03 45.6197 -0.15 -0.24 -0.50
## Residual 3.591e-01 0.5993
## Number of obs: 2915, groups: sbj, 48
##
## Fixed effects:
## Estimate Std. Error t value Pr(>|z|)
## (Intercept) 711.8686 7.8328 90.883 < 2e-16 ***
## groupC 55.3903 7.9748 6.946 3.77e-12 ***
## IA_LABELC -5.3668 7.0264 -0.764 0.44498
## trialC 5.3671 7.8818 0.681 0.49590
## groupC:IA_LABELC -0.1898 7.6526 -0.025 0.98021
## groupC:trialC -18.2894 6.6747 -2.740 0.00614 **
## IA_LABELC:trialC -22.9509 9.4362 -2.432 0.01501 *
## groupC:IA_LABELC:trialC 32.4847 7.9663 4.078 4.55e-05 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## optimizer (Nelder_Mead) convergence code: 0 (OK)
## Model failed to converge with max|grad| = 0.00555894 (tol = 0.002, component 1)
#There is a three-way interaction, so we analyse each group separately
Category-Learning Group
data_dw_CAT<-droplevels(data_dw[data_dw$group == "CAT",])
#str(data_dw_CAT)
##linear model
ml_CAT<-glmer(DWELL_TIME~ IA_LABELC*trialC + (1+IA_LABELC*trialC|sbj),
family ="Gamma"(link=identity),data =data_dw_CAT,
control=glmerControl(optCtrl=list(maxfun=3e5)))
summary(ml_CAT)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: Gamma ( identity )
## Formula: DWELL_TIME ~ IA_LABELC * trialC + (1 + IA_LABELC * trialC | sbj)
## Data: data_dw_CAT
## Control: glmerControl(optCtrl = list(maxfun = 3e+05))
##
## AIC BIC logLik deviance df.resid
## 20822.2 20901.3 -10396.1 20792.2 1419
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -1.4279 -0.7572 -0.1356 0.5479 4.4207
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sbj (Intercept) 1.916e+04 138.4209
## IA_LABELC 5.561e+00 2.3582 1.00
## trialC 4.094e+03 63.9826 0.21 0.21
## IA_LABELC:trialC 2.274e+03 47.6889 -0.39 -0.39 -0.45
## Residual 3.574e-01 0.5979
## Number of obs: 1434, groups: sbj, 24
##
## Fixed effects:
## Estimate Std. Error t value Pr(>|z|)
## (Intercept) 686.80 11.11 61.823 < 2e-16 ***
## IA_LABELC -13.73 10.84 -1.266 0.205347
## trialC 17.67 10.27 1.721 0.085252 .
## IA_LABELC:trialC -47.04 12.17 -3.864 0.000111 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) IA_LABELC trialC
## IA_LABELC 0.045
## trialC 0.022 0.081
## IA_LABELC:C -0.054 0.054 -0.012
## optimizer (Nelder_Mead) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
## GAMMs, to find out trials for which the label-ideog difference is significant
library(parallel)
library(mgcv)
library(itsadug)
nc <- detectCores()
cl <- makeCluster(nc-1) # change to nc to use all the cores
mg0_CAT<- bam(DWELL_TIME~ 1 + s(trial) + s(trial, sbj, bs="fs", m=1), data=data_dw_CAT, family="Gamma"(link=identity))
## Warning in gam.side(sm, X, tol = .Machine$double.eps^0.5): model has repeated 1-
## d smooths of same variable.
#plot(mg0_CAT,pages=1)
gam.check(mg0_CAT)
##
## Method: fREML Optimizer: perf newton
## full convergence after 11 iterations.
## Gradient range [-6.239031e-06,7.211825e-06]
## (score 1333.574 & scale 0.3449663).
## Hessian positive definite, eigenvalue range [6.238844e-06,716.8771].
## Model rank = 226 / 226
##
## Basis dimension (k) checking results. Low p-value (k-index<1) may
## indicate that k is too low, especially if edf is close to k'.
##
## k' edf k-index p-value
## s(trial) 9.0 1.0 1.03 0.99
## s(trial,sbj) 216.0 66.5 1.03 0.98
#seems ok
mg1_CAT<- bam(DWELL_TIME~ 1 + IA_LABEL + s(trial) + s(trial, sbj, bs="fs", m=1), data=data_dw_CAT, family="Gamma"(link=identity))
## Warning in gam.side(sm, X, tol = .Machine$double.eps^0.5): model has repeated 1-
## d smooths of same variable.
#plot(mg1_CAT,pages=1)
gam.check(mg1_CAT)
##
## Method: fREML Optimizer: perf newton
## full convergence after 11 iterations.
## Gradient range [-6.306982e-06,7.201564e-06]
## (score 1329.459 & scale 0.3451667).
## Hessian positive definite, eigenvalue range [6.306792e-06,716.3685].
## Model rank = 227 / 227
##
## Basis dimension (k) checking results. Low p-value (k-index<1) may
## indicate that k is too low, especially if edf is close to k'.
##
## k' edf k-index p-value
## s(trial) 9.0 1.0 1.03 0.99
## s(trial,sbj) 216.0 66.2 1.03 0.95
#seems ok
compareML(mg0_CAT,mg1_CAT)
## mg0_CAT: DWELL_TIME ~ 1 + s(trial) + s(trial, sbj, bs = "fs", m = 1)
##
## mg1_CAT: DWELL_TIME ~ 1 + IA_LABEL + s(trial) + s(trial, sbj, bs = "fs",
## m = 1)
##
## Chi-square test of fREML scores
## -----
## Model Score Edf Difference Df p.value Sig.
## 1 mg0_CAT 1333.574 5
## 2 mg1_CAT 1329.459 6 4.115 1.000 0.004 **
##
## AIC difference: -2.01, model mg0_CAT has lower AIC.
## Warning in compareML(mg0_CAT, mg1_CAT): Only small difference in fREML...
#mg1_PA provided better fit
mg2_CAT<- bam(DWELL_TIME~ 1 + IA_LABEL + s(trial, by=IA_LABEL) + s(trial, sbj, bs="fs", m=1), data=data_dw_CAT, family="Gamma"(link=identity))
#plot(mg2_CAT,pages=1)
gam.check(mg2_CAT)
##
## Method: fREML Optimizer: perf newton
## full convergence after 5 iterations.
## Gradient range [-1.462802e-08,6.594291e-09]
## (score 1321.335 & scale 0.3427116).
## Hessian positive definite, eigenvalue range [0.129848,715.8643].
## Model rank = 236 / 236
##
## Basis dimension (k) checking results. Low p-value (k-index<1) may
## indicate that k is too low, especially if edf is close to k'.
##
## k' edf k-index p-value
## s(trial):IA_LABELideog 9.00 1.87 1.02 0.98
## s(trial):IA_LABELlabel 9.00 1.77 1.02 0.96
## s(trial,sbj) 216.00 66.00 1.02 0.98
#seems ok
compareML(mg1_CAT,mg2_CAT)
## mg1_CAT: DWELL_TIME ~ 1 + IA_LABEL + s(trial) + s(trial, sbj, bs = "fs",
## m = 1)
##
## mg2_CAT: DWELL_TIME ~ 1 + IA_LABEL + s(trial, by = IA_LABEL) + s(trial,
## sbj, bs = "fs", m = 1)
##
## Chi-square test of fREML scores
## -----
## Model Score Edf Difference Df p.value Sig.
## 1 mg1_CAT 1329.459 6
## 2 mg2_CAT 1321.335 8 8.124 2.000 2.963e-04 ***
##
## AIC difference: 0.16, model mg2_CAT has lower AIC.
#mg2_CAT provides better fit
fgm_CAT<-mg2_CAT
summary(fgm_CAT)
##
## Family: Gamma
## Link function: identity
##
## Formula:
## DWELL_TIME ~ 1 + IA_LABEL + s(trial, by = IA_LABEL) + s(trial,
## sbj, bs = "fs", m = 1)
##
## Parametric coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 659.299 44.642 14.77 <2e-16 ***
## IA_LABELlabel -8.248 16.820 -0.49 0.624
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Approximate significance of smooth terms:
## edf Ref.df F p-value
## s(trial):IA_LABELideog 1.867 2.259 0.885 0.363
## s(trial):IA_LABELlabel 1.769 2.134 0.736 0.551
## s(trial,sbj) 65.996 215.000 2.986 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## R-sq.(adj) = 0.234 Deviance explained = 30.1%
## fREML = 1321.3 Scale est. = 0.34271 n = 1434
Paired-Associate-Learning Group
data_dw_PA<-droplevels(data_dw[data_dw$group == "PA",])
#str(data_dw_PA)
##linear model
ml_PA<-glmer(DWELL_TIME~ IA_LABELC*trialC + (1|sbj) + (0+IA_LABELC*trialC||sbj),
family ="Gamma"(link=identity),data =data_dw_PA,
control=glmerControl(optCtrl=list(maxfun=3e5)))
print(summary(ml_PA),corr=F)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: Gamma ( identity )
## Formula: DWELL_TIME ~ IA_LABELC * trialC + (1 | sbj) + (0 + IA_LABELC *
## trialC || sbj)
## Data: data_dw_PA
## Control: glmerControl(optCtrl = list(maxfun = 3e+05))
##
## AIC BIC logLik deviance df.resid
## 21736.0 21783.7 -10859.0 21718.0 1472
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -1.4390 -0.7252 -0.1690 0.5559 5.0103
##
## Random effects:
## Groups Name Variance Std.Dev.
## sbj (Intercept) 1.759e+04 132.6231
## sbj.1 IA_LABELC 9.551e+03 97.7290
## sbj.2 trialC 1.277e+03 35.7320
## sbj.3 IA_LABELC:trialC 3.515e+03 59.2867
## Residual 3.567e-01 0.5973
## Number of obs: 1481, groups: sbj, 24
##
## Fixed effects:
## Estimate Std. Error t value Pr(>|z|)
## (Intercept) 737.416 12.757 57.803 <2e-16 ***
## IA_LABELC -4.037 13.239 -0.305 0.760
## trialC -3.071 10.577 -0.290 0.772
## IA_LABELC:trialC 3.046 10.240 0.298 0.766
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#convergence issues with full random structure, resolved
## GAMMs
mg0_PA<- bam(DWELL_TIME~ 1 + s(trial) + s(trial, sbj, bs="fs", m=1), data=data_dw_PA, family="Gamma"(link=identity))
## Warning in gam.side(sm, X, tol = .Machine$double.eps^0.5): model has repeated 1-
## d smooths of same variable.
#plot(mg0_PA,pages=1)
gam.check(mg0_PA)
##
## Method: fREML Optimizer: perf newton
## full convergence after 7 iterations.
## Gradient range [-1.123803e-06,1.601364e-06]
## (score 1368.529 & scale 0.3535621).
## Hessian positive definite, eigenvalue range [0.7672763,739.7192].
## Model rank = 226 / 226
##
## Basis dimension (k) checking results. Low p-value (k-index<1) may
## indicate that k is too low, especially if edf is close to k'.
##
## k' edf k-index p-value
## s(trial) 9.0 4.9 1.1 1
## s(trial,sbj) 216.0 34.4 1.1 1
#seems ok
mg1_PA<- bam(DWELL_TIME~ 1 + IA_LABEL + s(trial) + s(trial, sbj, bs="fs", m=1), data=data_dw_PA, family="Gamma"(link=identity))
## Warning in gam.side(sm, X, tol = .Machine$double.eps^0.5): model has repeated 1-
## d smooths of same variable.
#plot(mg1_PA,pages=1)
gam.check(mg1_PA)
##
## Method: fREML Optimizer: perf newton
## full convergence after 7 iterations.
## Gradient range [-1.036692e-06,1.492673e-06]
## (score 1364.838 & scale 0.3538948).
## Hessian positive definite, eigenvalue range [0.748888,739.2196].
## Model rank = 227 / 227
##
## Basis dimension (k) checking results. Low p-value (k-index<1) may
## indicate that k is too low, especially if edf is close to k'.
##
## k' edf k-index p-value
## s(trial) 9.00 4.87 1.1 1
## s(trial,sbj) 216.00 34.46 1.1 1
#seems ok
compareML(mg0_PA, mg1_PA)
## mg0_PA: DWELL_TIME ~ 1 + s(trial) + s(trial, sbj, bs = "fs", m = 1)
##
## mg1_PA: DWELL_TIME ~ 1 + IA_LABEL + s(trial) + s(trial, sbj, bs = "fs",
## m = 1)
##
## Chi-square test of fREML scores
## -----
## Model Score Edf Difference Df p.value Sig.
## 1 mg0_PA 1368.529 5
## 2 mg1_PA 1364.838 6 3.690 1.000 0.007 **
##
## AIC difference: -1.90, model mg0_PA has lower AIC.
## Warning in compareML(mg0_PA, mg1_PA): Only small difference in fREML...
#mg1_PA is preferred
mg2_PA<-bam(DWELL_TIME~ 1 + IA_LABEL + s(trial, by=IA_LABEL) + s(trial, sbj, bs="fs", m=1), data=data_dw_PA, family="Gamma"(link=identity))
#plot(mg2_PA,pages=1)
gam.check(mg2_PA)
##
## Method: fREML Optimizer: perf newton
## full convergence after 12 iterations.
## Gradient range [-8.568301e-06,4.4604e-06]
## (score 1360.89 & scale 0.3546772).
## Hessian positive definite, eigenvalue range [8.568133e-06,738.716].
## Model rank = 236 / 236
##
## Basis dimension (k) checking results. Low p-value (k-index<1) may
## indicate that k is too low, especially if edf is close to k'.
##
## k' edf k-index p-value
## s(trial):IA_LABELideog 9.00 4.42 1.09 1
## s(trial):IA_LABELlabel 9.00 1.00 1.09 1
## s(trial,sbj) 216.00 34.17 1.09 1
compareML(mg1_PA, mg2_PA)
## mg1_PA: DWELL_TIME ~ 1 + IA_LABEL + s(trial) + s(trial, sbj, bs = "fs",
## m = 1)
##
## mg2_PA: DWELL_TIME ~ 1 + IA_LABEL + s(trial, by = IA_LABEL) + s(trial,
## sbj, bs = "fs", m = 1)
##
## Chi-square test of fREML scores
## -----
## Model Score Edf Difference Df p.value Sig.
## 1 mg1_PA 1364.838 6
## 2 mg2_PA 1360.890 8 3.949 2.000 0.019 *
##
## AIC difference: -6.81, model mg1_PA has lower AIC.
## Warning in compareML(mg1_PA, mg2_PA): Only small difference in fREML...
#mg2_PA is preferred
fgm_PA<-mg2_PA
summary(fgm_PA)
##
## Family: Gamma
## Link function: identity
##
## Formula:
## DWELL_TIME ~ 1 + IA_LABEL + s(trial, by = IA_LABEL) + s(trial,
## sbj, bs = "fs", m = 1)
##
## Parametric coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 720.773 43.041 16.746 <2e-16 ***
## IA_LABELlabel -9.611 19.336 -0.497 0.619
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Approximate significance of smooth terms:
## edf Ref.df F p-value
## s(trial):IA_LABELideog 4.417 5.427 1.492 0.141
## s(trial):IA_LABELlabel 1.000 1.000 0.180 0.671
## s(trial,sbj) 34.174 215.000 2.160 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## R-sq.(adj) = 0.17 Deviance explained = 21.1%
## fREML = 1360.9 Scale est. = 0.35468 n = 1481
Graphs
#get predicted values excluding random effects
p<-predict(m8, re.form = ~0, type="response")
#add predicted values to data frame
data_dw$pred<-p
#Calculation of 95% CIs for predictions
library(merTools)
CI<-predictInterval(m8, level=0.95, which="fixed")
## Warning in predictInterval(m8, level = 0.95, which = "fixed"): Prediction for
## NLMMs or GLMMs that are not mixed binomial regressions is not tested. Sigma set
## at 1.
## Warning: executing %dopar% sequentially: no parallel backend registered
#estimation of CIs for fixed factors only
#add CI to data frame
data_dw$lwr<-CI$lwr
data_dw$upr<-CI$upr
gr<-aggregate(pred~group+IA_LABEL+trial, data_dw, mean, na.rm=TRUE)
temp1<-aggregate(lwr~group+IA_LABEL+trial, data_dw, mean, na.rm=TRUE)
gr$lwr<-temp1$lwr
temp2<-aggregate(upr~group+IA_LABEL+trial, data_dw, mean, na.rm=TRUE)
gr$upr<-temp2$upr
#we need to calcucate CI
#https://rdrr.io/cran/lme4/man/predict.merMod.html it says that we use bootMer
#aggregate data by block, for plot
####################################
library(sciplot)
#This used to be "data_gr"
data_bl<-aggregate(DWELL_TIME~ sbj+block+IA_LABEL+group,data_dw,mean,na.rm=T)
data_bl<-data_bl[order(data_bl$sbj, data_bl$block),]
#this used to be "data_gr2"
data_gr<-aggregate(DWELL_TIME~ block+IA_LABEL+group,data_bl,mean,na.rm=T)
temp<-aggregate(DWELL_TIME~ block+IA_LABEL+group,data_bl,se)
data_gr$se<-temp$DWELL_TIME
data_gr<-data_gr[order(data_gr$block),]
data_gr$x<-c(12,12,12,12,36,36,36,36,60,60,60,60,84,84,84,84)
##################
# Graph for Paper
##################
library(gplots)
par(mfrow=c(1,2), oma=c(0,0,2,0))
ylm<-c(400, 1000)
main="Category-Learning Group"
ylb<-"Dwell Time (msec)"
xlb<-"Trial"
col<-c("red" ,"blue")
colbnd<-c(adjustcolor("#D0A0A0",alpha.f=0.5),adjustcolor("#A0A0D0" ,alpha.f=0.5))
lintyp<-c("solid", "solid")
pchar=c(15,17)
ylg<-550
xlg<-30
offset<-0.2
#Category Learning Group
plot(gr[gr$group=='CAT'& gr$IA_LABEL=='ideog',]$pred, ylim=ylm, type="l", main = main, las=2, bty="n", xaxt="n", ylab=ylb, xlab=xlb, col=col[2], cex.axis=0.8, lwd=2)
polygon(c(gr[gr$group=='CAT'& gr$IA_LABEL=='ideog',]$trial,rev(gr[gr$group=='CAT'& gr$IA_LABEL=='ideog',]$trial)),c(gr[gr$group=='CAT'& gr$IA_LABEL=='ideog',]$lwr,rev(gr[gr$group=='CAT'& gr$IA_LABEL=='ideog',]$upr)),col = colbnd[2], border = FALSE)
plotCI(data_gr[data_gr$group=='CAT' & data_gr$IA_LABEL=='ideog',]$x, data_gr[data_gr$group=='CAT'& data_gr$IA_LABEL=='ideog',]$DWELL_TIME,uiw=data_gr[data_gr$group=='CAT' & data_gr$IA_LABEL=='ideog',]$se, add=TRUE, pch=pchar[2], col=col[2])
lines(gr[gr$group=='CAT'& gr$IA_LABEL=='label',]$pred, col=col[1], lwd=2)
polygon(c(gr[gr$group=='CAT'& gr$IA_LABEL=='label',]$trial,rev(gr[gr$group=='CAT'& gr$IA_LABEL=='label',]$trial)),c(gr[gr$group=='CAT'& gr$IA_LABEL=='label',]$lwr,rev(gr[gr$group=='CAT'& gr$IA_LABEL=='label',]$upr)),col = colbnd[1], border = FALSE)
plotCI(data_gr[data_gr$group=='CAT' & data_gr$IA_LABEL=='label',]$x+offset, data_gr[data_gr$group=='CAT'& data_gr$IA_LABEL=='label',]$DWELL_TIME,uiw=data_gr[data_gr$group=='CAT' & data_gr$IA_LABEL=='label',]$se, add=TRUE, pch=pchar[1], col=col[1])
axis(side = 1, at =c(0,24,48,72,96), labels =c(0,24,48,72,96), cex.axis = 0.8)
leg<-c("Label Shapes", "Ideogram Shapes")
legend(x=xlg, y=ylg, legend=leg, lty=lintyp, bty="n", col=col, lwd=2, seg.len=2.0)
legend(x=xlg-2, y=ylg, legend=c("", ""), bty="n", col=col, pch=pchar)
#PA Learning Group
main="Paired-Associate Group"
plot(gr[gr$group=='PA'& gr$IA_LABEL=='ideog',]$pred, ylim=ylm, type="l", main = main, las=2, bty="n", xaxt="n", ylab=ylb, xlab=xlb, col=col[2], cex.axis=0.8, lwd=2)
polygon(c(gr[gr$group=='PA'& gr$IA_LABEL=='ideog',]$trial,rev(gr[gr$group=='PA'& gr$IA_LABEL=='ideog',]$trial)),c(gr[gr$group=='PA'& gr$IA_LABEL=='ideog',]$lwr,rev(gr[gr$group=='PA'& gr$IA_LABEL=='ideog',]$upr)),col = colbnd[2], border = FALSE)
plotCI(data_gr[data_gr$group=='PA' & data_gr$IA_LABEL=='ideog',]$x, data_gr[data_gr$group=='PA'& data_gr$IA_LABEL=='ideog',]$DWELL_TIME,uiw=data_gr[data_gr$group=='PA' & data_gr$IA_LABEL=='ideog',]$se, add=TRUE, pch=pchar[2], col=col[2])
lines(gr[gr$group=='PA'& gr$IA_LABEL=='label',]$pred, col=col[1], lwd=2)
polygon(c(gr[gr$group=='PA'& gr$IA_LABEL=='label',]$trial,rev(gr[gr$group=='PA'& gr$IA_LABEL=='label',]$trial)),c(gr[gr$group=='PA'& gr$IA_LABEL=='label',]$lwr,rev(gr[gr$group=='PA'& gr$IA_LABEL=='label',]$upr)),col = colbnd[1], border = FALSE)
plotCI(data_gr[data_gr$group=='PA' & data_gr$IA_LABEL=='label',]$x+offset, data_gr[data_gr$group=='PA'& data_gr$IA_LABEL=='label',]$DWELL_TIME,uiw=data_gr[data_gr$group=='PA' & data_gr$IA_LABEL=='label',]$se, add=TRUE, pch=pchar[1], col=col[1])
axis(side = 1, at =c(0,24,48,72,96), labels =c(0,24,48,72,96), cex.axis = 0.8)
leg<-c("Label Shapes", "Ideogram Shapes")
legend(x=xlg, y=ylg, legend=leg, lty=lintyp, bty="n", col=col, lwd=2, seg.len=2.0)
legend(x=xlg-2, y=ylg, legend=c("", ""), bty="n", col=col, pch=pchar)
#mtext('Discrimination Session, Average Dwell Time', outer = TRUE, cex = 1.4)
mtext('Discrimination Session, Average Dwell Time', outer = TRUE, font=2, cex=1.2)

par(mfrow=c(1,1), oma = c(0, 0, 0, 0))
###################
# Additional Graph for the difference in Dwell Time between Label and Ideogram Shapes
# estimated through gamms
par(mfrow=c(2,1), oma=c(0,0,3,0))
mn1<-"Category-Learning Group"
mn2<-"Paired-Associate Group"
plot_diff(fgm_CAT, view="trial", comp=list(IA_LABEL=c("label", "ideog")),rm.ranef=TRUE, main=mn1)
## Summary:
## * trial : numeric predictor; with 100 values ranging from 1.000000 to 96.000000.
## * sbj : factor; set to the value(s): aggfyt95. (Might be canceled as random effect, check below.)
## * NOTE : The following random effects columns are canceled: s(trial,sbj)
##
##
## Difference is not significant.
plot_diff(fgm_PA, view="trial", comp=list(IA_LABEL=c("label", "ideog")),rm.ranef=TRUE, main=mn2)
## Summary:
## * trial : numeric predictor; with 100 values ranging from 1.000000 to 96.000000.
## * sbj : factor; set to the value(s): agkart97. (Might be canceled as random effect, check below.)
## * NOTE : The following random effects columns are canceled: s(trial,sbj)
##
##
## Difference is not significant.
mtext('Discrimination Session\n Difference in Dwell Time Between Label and Ideogram Shapes', outer = TRUE, font=2, cex=1.2)

par(mfrow=c(1,1), oma = c(0, 0, 0, 0))