Accuracy Analyses for Categorization Task
Libraries and Data Files
#load libraries
library(dplyr)
library(ez)
library(sciplot)
library(gplots)
library(plotrix)
## Warning: package 'plotrix' was built under R version 4.3.2
library (afex)
## Warning: package 'afex' was built under R version 4.3.3
library(emmeans)
library(mgcv)
library(itsadug)
## Warning: package 'itsadug' was built under R version 4.3.2
## Warning: package 'plotfunctions' was built under R version 4.3.2
library(boot)
library(effsize)
## Warning: package 'effsize' was built under R version 4.3.3
#load data
data<-read.csv("AllData_Categorization.csv", header = T)
Data Preprocessing
#rename useful variables
data$acc<-data$Accuracy
data$sbj<-data$participant
data$block<-data$block.thisN+1
#covert to factors
data <- mutate_if(data, is.character, as.factor)
data$sbj<-as.factor(data$sbj)
#exclude one participant (20335) for not following instructions
data<-droplevels(data[data$sbj!="20335",])
str(data)
## 'data.frame': 21120 obs. of 37 variables:
## $ st_exp : int 4 30 48 18 9 55 24 62 12 34 ...
## $ p : int 48 21 35 13 6 40 17 45 50 25 ...
## $ hue : num -0.05 0.15 0.35 -0.25 -0.35 0.25 0.35 0.15 -0.05 -0.25 ...
## $ border : Factor w/ 2 levels "no","yes": 2 1 1 1 1 1 1 1 2 1 ...
## $ size : num 2.1 2.7 3.1 2.5 2.3 3.3 2.5 3.5 2.3 2.9 ...
## $ correctAns : Factor w/ 2 levels "a","b": 2 1 1 2 2 1 1 1 2 2 ...
## $ practice_trials.thisRepN : logi NA NA NA NA NA NA ...
## $ practice_trials.thisTrialN: logi NA NA NA NA NA NA ...
## $ practice_trials.thisN : logi NA NA NA NA NA NA ...
## $ practice_trials.thisIndex : logi NA NA NA NA NA NA ...
## $ block.thisRepN : int 0 0 0 0 0 0 0 0 0 0 ...
## $ block.thisTrialN : int 0 0 0 0 0 0 0 0 0 0 ...
## $ block.thisN : int 0 0 0 0 0 0 0 0 0 0 ...
## $ block.thisIndex : int 0 0 0 0 0 0 0 0 0 0 ...
## $ trials.thisRepN : int 0 0 0 0 0 0 0 0 0 0 ...
## $ trials.thisTrialN : int 0 1 2 3 4 5 6 7 8 9 ...
## $ trials.thisN : int 0 1 2 3 4 5 6 7 8 9 ...
## $ trials.thisIndex : int 60 36 29 5 34 9 23 33 50 19 ...
## $ thisRow.t : num 111 115 117 119 121 ...
## $ notes : logi NA NA NA NA NA NA ...
## $ Accuracy : int 0 0 1 1 1 1 1 1 1 1 ...
## $ RT : num 2.2247 0.3665 0.3849 0.4082 0.0632 ...
## $ participant : int 17202 17202 17202 17202 17202 17202 17202 17202 17202 17202 ...
## $ categories : Factor w/ 2 levels "AB","BA": 1 1 1 1 1 1 1 1 1 1 ...
## $ group : Factor w/ 4 levels "1100","1600",..: 3 3 3 3 3 3 3 3 3 3 ...
## $ date : Factor w/ 66 levels "2025-02-25_12h00.07.372",..: 2 2 2 2 2 2 2 2 2 2 ...
## $ expName : Factor w/ 1 level "Categorization": 1 1 1 1 1 1 1 1 1 1 ...
## $ psychopyVersion : Factor w/ 1 level "2023.2.3": 1 1 1 1 1 1 1 1 1 1 ...
## $ frameRate : num 60.1 60.1 60.1 60.1 60.1 ...
## $ expStart : Factor w/ 66 levels "2025-02-25 12h00.22.380300 +0200",..: 2 2 2 2 2 2 2 2 2 2 ...
## $ X : logi NA NA NA NA NA NA ...
## $ X.1 : logi NA NA NA NA NA NA ...
## $ X.2 : logi NA NA NA NA NA NA ...
## $ X.3 : logi NA NA NA NA NA NA ...
## $ acc : int 0 0 1 1 1 1 1 1 1 1 ...
## $ sbj : Factor w/ 66 levels "341","1003","1206",..: 10 10 10 10 10 10 10 10 10 10 ...
## $ block : num 1 1 1 1 1 1 1 1 1 1 ...
#calculate participant's average accuracy
data_av<-aggregate(data$acc, list(data$sbj, data$group), mean)
colnames(data_av) <-c("sbj","group" ,"acc")
#check learning criterion
#criterion was set to >=22/32 for one or more blocks of the task
cr=22/32
dat_b<-droplevels(data[data$border=="yes",])
dat_b_av<- aggregate(dat_b$acc, list(dat_b$sbj, dat_b$group, dat_b$block), mean)
colnames(dat_b_av)<-c("sbj","group", "block", "acc")
dat_b_av<-dat_b_av[order(dat_b_av$sbj, dat_b_av$block),]
#new column, "yes" if acc>=cr
dat_b_av$cr<-ifelse(dat_b_av$acc>=cr,"yes", "no")
#create a list containing sbj ids that do not fulfill the learning criterion (4 "no"s)
only_no<- dat_b_av[dat_b_av$cr == "no", ]
no_counts<- table(only_no$sbj)
total_blocks <- table(dat_b_av$sbj)
only_no_sbj<- names(no_counts[no_counts == 4 & total_blocks[names(no_counts)] == 4])
only_no_sbj
## [1] "43423" "48888" "63018" "77337" "77623"
rm(only_no,no_counts, total_blocks, cr)
#also exclude 5 sbjs that received "exclusion" instructions
#two of them (77337 and 77623) were already in the list of participants not meeting the learning criterion
excl<-c(only_no_sbj, "17202","57393", "94372", "40242", "89443")
#save exclude_list, for use in similarity data analysis
saveRDS(excl, file = "exclude_list.rds")
#keep data only from those participants not in exclude list
data<-droplevels(data[!(data$sbj %in% excl), ])
#Check that there are 14 sbjs per group
data %>%
distinct(sbj, group) %>%
count(group)
## group n
## 1 1100 14
## 2 1600 14
## 3 600 14
## 4 RD 14
#calculate participant's average accuracy
data_av<-aggregate(data$acc, list(data$sbj, data$group), mean)
colnames(data_av) <-c("sbj","group" ,"acc")
#write data frame, to use it in analysis examining correlation between learning accuracy and perceptual change
write.csv(data_av, "d_cat.csv")
Descriptive Statistics
#by-group accuracy
#"600"
mean(data_av[data_av$group=="600",]$acc)
## [1] 0.8455357
sd(data_av[data_av$group=="600",]$acc)
## [1] 0.03759432
#"1100"
mean(data_av[data_av$group=="1100",]$acc)
## [1] 0.8654018
sd(data_av[data_av$group=="1100",]$acc)
## [1] 0.0325329
"#1600"
## [1] "#1600"
mean(data_av[data_av$group=="1600",]$acc)
## [1] 0.8814732
sd(data_av[data_av$group=="1600",]$acc)
## [1] 0.02297915
#"RD"
mean(data_av[data_av$group=="RD",]$acc)
## [1] 0.8763393
sd(data_av[data_av$group=="RD",]$acc)
## [1] 0.0308073
Inferential Statistics - ANOVA
######################################################
# one-way ANOVA (factor: group) on learning accuracy
######################################################
#one-way anova
ezANOVA(data=data_av, dv=acc, wid=sbj, between=group, type=3)
## Coefficient covariances computed by hccm()
## $ANOVA
## Effect DFn DFd F p p<.05 ges
## 2 group 3 52 3.591778 0.01957389 * 0.1716492
##
## $`Levene's Test for Homogeneity of Variance`
## DFn DFd SSn SSd F p p<.05
## 1 3 52 0.0002664621 0.02787528 0.1656907 0.9190173
#same analysis, in format suitable for post-hoc comparisons
model <- aov(acc ~ group, data = data_av)
#calculate estimated marginal means, for post-hoc comparisons.
emms <- emmeans::emmeans(model, ~ group)
#post-hoc comparisons
pairwise_comparisons <- contrast(emms, method = "pairwise", adjust = "bonferroni")
summary(pairwise_comparisons)
## contrast estimate SE df t.ratio p.value
## 1100 - 1600 -0.01607 0.0119 52 -1.353 1.0000
## 1100 - 600 0.01987 0.0119 52 1.673 0.6022
## 1100 - RD -0.01094 0.0119 52 -0.921 1.0000
## 1600 - 600 0.03594 0.0119 52 3.026 0.0231
## 1600 - RD 0.00513 0.0119 52 0.432 1.0000
## 600 - RD -0.03080 0.0119 52 -2.594 0.0738
##
## P value adjustment: bonferroni method for 6 tests
# Calculate Cohen's d for significant differences.
cohen.d(data_av$acc[data_av$group == "1600"],
data_av$acc[data_av$group == "600"],
pooled = TRUE)
##
## Cohen's d
##
## d estimate: 1.153475 (large)
## 95 percent confidence interval:
## lower upper
## 0.3144359 1.9925144
cohen.d(data_av$acc[data_av$group == "RD"],
data_av$acc[data_av$group == "600"],
pooled = TRUE)
##
## Cohen's d
##
## d estimate: 0.8962667 (large)
## 95 percent confidence interval:
## lower upper
## 0.08127669 1.71125673
Learning-Curves Plot
# calculate average performance per sbj and block
data_bl<-aggregate(data$acc, list(data$sbj, data$group, data$block), mean)
colnames(data_bl) <-c("sbj","group", "block", "acc")
#calculate average accuracy and SE per block
data_gr<-aggregate(data_bl$acc, list(data_bl$group, data_bl$block), mean)
colnames(data_gr)<-c("group","block", "acc")
temp<- aggregate(data_bl$acc, list(data_bl$group, data_bl$block), se)
data_gr$se<-temp$x
rm(temp)
data_gr<-data_gr[order(data_gr$group, data_gr$block),]
#xlb="Block\n Error Bars: +/- 1 SE"
xlb="Block"
ylb="Proportion Correct"
mn="Categorization Accuracy"
off=0.05
pch=c(15,16,17,18)
lty=c(2,3,4,5)
col=c("grey70","grey60","grey50", "grey40" )
#"600"
plotCI(x=1:4, y=data_gr[data_gr$group=="600",]$acc, uiw=data_gr[data_gr$group=="600",]$se, bty="n", ylim=c(0.6,1),xlim=c(1, 4.5), xlab=xlb, ylab=ylb, main=mn, las=1, xaxt="n", yaxt="n", pch=pch[1], col=col[1], gap=0)
lines(x=1:4, y=data_gr[data_gr$group=="600",]$acc, lty=lty[1], col=col[1], lwd=2)
#"1100"
plotCI(x=1:4+off, y=data_gr[data_gr$group=="1100",]$acc, uiw=data_gr[data_gr$group=="600",]$se, add=T, pch=pch[2], col=col[2], gap=0)
lines(x=1:4+off, y=data_gr[data_gr$group=="1100",]$acc, lty=lty[2], col=col[2], lwd=2)
#"1600"
plotCI(x=1:4+2*off, y=data_gr[data_gr$group=="1600",]$acc, uiw=data_gr[data_gr$group=="600",]$se, add=T, pch=pch[3], col=col[3], gap=0)
lines(x=1:4+2*off, y=data_gr[data_gr$group=="1600",]$acc, lty=lty[3], col=col[3], lwd=2)
#"RD"
plotCI(x=1:4+3*off, y=data_gr[data_gr$group=="RD",]$acc, uiw=data_gr[data_gr$group=="600",]$se, add=T, pch=pch[4], col=col[4], gap=0)
lines(x=1:4+3*off, y=data_gr[data_gr$group=="RD",]$acc, lty=lty[4], col=col[4], lwd=2)
axis(side=1, at=c(1,2,3,4))
axis(side=2, at=c(0.6, 0.7, 0.8, 0.9, 1.0),labels=c("0.0", "0.7", "0.8", "0.9", "1.0") , las=2)
axis.break(2, 0.65, style="slash")
legend(x=2.5, y=0.8, legend=c("600 ms", "1100 ms", "1600 ms", "RD"), col=col, lty=lty, pch=pch, bty="n", seg.len=4, lwd=2)

Inferential Statistics - gam
#first create the trial variable
tr<-1:320
data$trial<-tr
#Null model
m0<-bam(acc~1 + s(trial) + s(trial, sbj, bs="fs", m=1), data=data, family=binomial)
## Warning in gam.side(sm, X, tol = .Machine$double.eps^0.5): model has repeated
## 1-d smooths of same variable.
gam.check(m0)

##
## Method: fREML Optimizer: perf newton
## full convergence after 6 iterations.
## Gradient range [-6.051167e-08,1.232092e-09]
## (score 25241.88 & scale 1).
## Hessian positive definite, eigenvalue range [2.471305,12.06464].
## Model rank = 514 / 514
##
## Basis dimension (k) checking results. Low p-value (k-index<1) may
## indicate that k is too low, especially if edf is close to k'.
##
## k' edf k-index p-value
## s(trial) 9.00 7.95 0.94 0.055 .
## s(trial,sbj) 504.00 106.12 0.94 0.070 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#edf is not close to k', so we are ok. We do not need to increase k
#add a fixed effect of group
m1<- bam(acc~ 1 + group + s(trial) + s(trial, sbj, bs="fs", m=1), data=data, family=binomial )
## Warning in gam.side(sm, X, tol = .Machine$double.eps^0.5): model has repeated
## 1-d smooths of same variable.
gam.check(m1)

##
## Method: fREML Optimizer: perf newton
## full convergence after 6 iterations.
## Gradient range [-6.042705e-08,1.128063e-09]
## (score 25256.22 & scale 1).
## Hessian positive definite, eigenvalue range [2.472525,11.62358].
## Model rank = 517 / 517
##
## Basis dimension (k) checking results. Low p-value (k-index<1) may
## indicate that k is too low, especially if edf is close to k'.
##
## k' edf k-index p-value
## s(trial) 9.00 7.95 0.94 0.21
## s(trial,sbj) 504.00 101.54 0.94 0.20
#edf is not close to k', so we are ok. We do not need to increase k
compareML(m0,m1)
## m0: acc ~ 1 + s(trial) + s(trial, sbj, bs = "fs", m = 1)
##
## m1: acc ~ 1 + group + s(trial) + s(trial, sbj, bs = "fs", m = 1)
##
## Model m0 preferred: lower fREML score (14.335), and lower df (3.000).
## -----
## Model Score Edf Difference Df
## 1 m1 25256.22 8
## 2 m0 25241.88 5 14.335 -3.000
##
## AIC difference: 1.18, model m1 has lower AIC.
#m1 preferred
#add a term modeling four smooth terms of trial, one for each group level
m2<-bam(acc~ 1 + group + s(trial) + s(trial, by=group) +s(trial, sbj, bs="fs", m=1), data=data, family=binomial )
## Warning in gam.side(sm, X, tol = .Machine$double.eps^0.5): model has repeated
## 1-d smooths of same variable.
gam.check(m2)

##
## Method: fREML Optimizer: perf newton
## full convergence after 13 iterations.
## Gradient range [-8.083924e-05,1.590057e-05]
## (score 25263.29 & scale 1).
## Hessian positive definite, eigenvalue range [6.257526e-06,11.18759].
## Model rank = 552 / 553
##
## Basis dimension (k) checking results. Low p-value (k-index<1) may
## indicate that k is too low, especially if edf is close to k'.
##
## k' edf k-index p-value
## s(trial) 9.00e+00 7.94e+00 0.95 0.15
## s(trial):group1100 9.00e+00 1.99e+00 0.95 0.20
## s(trial):group1600 9.00e+00 1.00e+00 0.95 0.14
## s(trial):group600 9.00e+00 1.00e+00 0.95 0.17
## s(trial):groupRD 9.00e+00 3.10e-04 0.95 0.14
## s(trial,sbj) 5.04e+02 9.97e+01 0.95 0.21
#edf is not close to k', so we are ok. We do not need to increase k
compareML(m2, m1)
## m2: acc ~ 1 + group + s(trial) + s(trial, by = group) + s(trial,
## sbj, bs = "fs", m = 1)
##
## m1: acc ~ 1 + group + s(trial) + s(trial, sbj, bs = "fs", m = 1)
##
## Model m1 preferred: lower fREML score (7.075), and lower df (8.000).
## -----
## Model Score Edf Difference Df
## 1 m2 25263.29 16
## 2 m1 25256.22 8 -7.075 8.000
##
## AIC difference: 3.38, model m1 has lower AIC.
#m1 preferred
m3<-bam(acc~ 1 + group + s(trial, by=group) +s(trial, sbj, bs="fs", m=1), data=data, family=binomial )
gam.check(m3)

##
## Method: fREML Optimizer: perf newton
## full convergence after 7 iterations.
## Gradient range [-6.16458e-07,1.217533e-08]
## (score 25251.06 & scale 1).
## Hessian positive definite, eigenvalue range [0.397295,10.94414].
## Model rank = 544 / 544
##
## Basis dimension (k) checking results. Low p-value (k-index<1) may
## indicate that k is too low, especially if edf is close to k'.
##
## k' edf k-index p-value
## s(trial):group1100 9.00 6.17 0.95 0.56
## s(trial):group1600 9.00 3.76 0.95 0.58
## s(trial):group600 9.00 4.90 0.95 0.61
## s(trial):groupRD 9.00 7.30 0.95 0.60
## s(trial,sbj) 504.00 98.80 0.95 0.59
compareML(m3, m1)
## m3: acc ~ 1 + group + s(trial, by = group) + s(trial, sbj, bs = "fs",
## m = 1)
##
## m1: acc ~ 1 + group + s(trial) + s(trial, sbj, bs = "fs", m = 1)
##
## Chi-square test of fREML scores
## -----
## Model Score Edf Difference Df p.value Sig.
## 1 m1 25256.22 8
## 2 m3 25251.06 14 5.160 6.000 0.112
##
## AIC difference: 21.84, model m1 has lower AIC.
#m1 preferred
m4<-bam(acc~ 1 + s(trial, by=group) +s(trial, sbj, bs="fs", m=1), data=data, family=binomial )
gam.check(m4)

##
## Method: fREML Optimizer: perf newton
## full convergence after 7 iterations.
## Gradient range [-8.847141e-07,1.763018e-08]
## (score 25236.33 & scale 1).
## Hessian positive definite, eigenvalue range [0.3740603,11.70784].
## Model rank = 541 / 541
##
## Basis dimension (k) checking results. Low p-value (k-index<1) may
## indicate that k is too low, especially if edf is close to k'.
##
## k' edf k-index p-value
## s(trial):group1100 9.00 6.17 0.94 0.22
## s(trial):group1600 9.00 3.68 0.94 0.26
## s(trial):group600 9.00 5.00 0.94 0.28
## s(trial):groupRD 9.00 7.26 0.94 0.26
## s(trial,sbj) 504.00 103.33 0.94 0.26
compareML(m4, m1)
## m4: acc ~ 1 + s(trial, by = group) + s(trial, sbj, bs = "fs", m = 1)
##
## m1: acc ~ 1 + group + s(trial) + s(trial, sbj, bs = "fs", m = 1)
##
## Chi-square test of fREML scores
## -----
## Model Score Edf Difference Df p.value Sig.
## 1 m1 25256.22 8
## 2 m4 25236.33 11 19.890 3.000 1.187e-08 ***
##
## AIC difference: 23.31, model m1 has lower AIC.
#m1 preferred
#best-fit model
fm<-m1
summary(fm)
##
## Family: binomial
## Link function: logit
##
## Formula:
## acc ~ 1 + group + s(trial) + s(trial, sbj, bs = "fs", m = 1)
##
## Parametric coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 1.90115 0.07090 26.813 <2e-16 ***
## group1600 0.13983 0.10124 1.381 0.167
## group600 -0.16031 0.09916 -1.617 0.106
## groupRD 0.09101 0.10083 0.903 0.367
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Approximate significance of smooth terms:
## edf Ref.df Chi.sq p-value
## s(trial) 7.949 8.692 132.3 <2e-16 ***
## s(trial,sbj) 101.537 500.000 217.6 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## R-sq.(adj) = 0.0276 Deviance explained = 3.59%
## fREML = 25256 Scale est. = 1 n = 17920
#to facilitate comparisons, we re-level the group variable
levels(data$group)
## [1] "1100" "1600" "600" "RD"
data$group_1_<-relevel(data$group, ref="600")
levels(data$group_1_)
## [1] "600" "1100" "1600" "RD"
fm_1<-bam(acc~ 1 + group_1_ + s(trial) + s(trial, sbj, bs="fs", m=1), data=data, family=binomial )
## Warning in gam.side(sm, X, tol = .Machine$double.eps^0.5): model has repeated
## 1-d smooths of same variable.
summary(fm_1)
##
## Family: binomial
## Link function: logit
##
## Formula:
## acc ~ 1 + group_1_ + s(trial) + s(trial, sbj, bs = "fs", m = 1)
##
## Parametric coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 1.74085 0.06941 25.079 < 2e-16 ***
## group_1_1100 0.16031 0.09916 1.617 0.10595
## group_1_1600 0.30013 0.10021 2.995 0.00275 **
## group_1_RD 0.25131 0.09980 2.518 0.01180 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Approximate significance of smooth terms:
## edf Ref.df Chi.sq p-value
## s(trial) 7.949 8.692 132.3 <2e-16 ***
## s(trial,sbj) 101.537 500.000 217.6 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## R-sq.(adj) = 0.0276 Deviance explained = 3.59%
## fREML = 25256 Scale est. = 1 n = 17920
data$group_2_<-relevel(data$group, ref="1600")
fm_2<-bam(acc~ 1 + group_2_ + s(trial) + s(trial, sbj, bs="fs", m=1), data=data, family=binomial )
## Warning in gam.side(sm, X, tol = .Machine$double.eps^0.5): model has repeated
## 1-d smooths of same variable.
summary(fm_2)
##
## Family: binomial
## Link function: logit
##
## Formula:
## acc ~ 1 + group_2_ + s(trial) + s(trial, sbj, bs = "fs", m = 1)
##
## Parametric coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 2.04098 0.07238 28.198 < 2e-16 ***
## group_2_1100 -0.13983 0.10124 -1.381 0.16724
## group_2_600 -0.30013 0.10021 -2.995 0.00275 **
## group_2_RD -0.04882 0.10187 -0.479 0.63180
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Approximate significance of smooth terms:
## edf Ref.df Chi.sq p-value
## s(trial) 7.949 8.692 132.3 <2e-16 ***
## s(trial,sbj) 101.537 500.000 217.6 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## R-sq.(adj) = 0.0276 Deviance explained = 3.59%
## fREML = 25256 Scale est. = 1 n = 17920
Model-Predictions Plot
labx="Trial"
#labx="Trial\n Error Bands: 95% CI"
main="Categorization Accuracy - Model Predictions"
col=c("grey70","grey60","grey50", "grey40" )
lty=c(2,3,4,5)
#"600"
plot_smooth(fm, view="trial", cond=list(group="600"), rm.ranef=c("s(trial,sbj)"), shade=T,se=1.96, print.summary=F, ylim=c(0.5,1), transform=inv.logit, xlab=labx, ylab="Proportion Correct", lwd=2, las=2, rug=F, col=col[1], lty=lty[1], hide.label=T, main=main, yaxs="i",xaxt="n", yaxt="n")
#"1100"
plot_smooth(fm, view="trial", cond=list(group="1100"), rm.ranef=c("s(trial,sbj)"), shade=T, se=1.96, print.summary=F,transform=inv.logit,lwd=2, rug=FALSE, col=col[2], lty=lty[2], add=T)
#"1600"
plot_smooth(fm, view="trial", cond=list(group="1600"), rm.ranef=c("s(trial,sbj)"), shade=T, se=1.96, print.summary=F,transform=inv.logit,lwd=2, rug=FALSE, col=col[3], lty=lty[3], add=T)
#"RD"
plot_smooth(fm, view="trial", cond=list(group="RD"), rm.ranef=c("s(trial,sbj)"), shade=T, se=1.96, print.summary=F,transform=inv.logit,lwd=2, rug=FALSE, col=col[4], lty=lty[4], add=T)
axis(side=1, at=c(0,50,100,150, 200, 250, 300, 320))
axis(side=2, at=c(0.5, 0.6, 0.7, 0.8, 0.9, 1.0),labels=c("0.0", "0.6", "0.7", "0.8", "0.9", "1.0") , las=2)
axis.break(2, 0.55, style="slash")
legend(x=200, y=0.75, legend=c("600 ms", "1100 ms", "1600 ms", "RD"), col=col, lty=lty, bty="n", seg.len=4, lwd=2)

Reaction Times Analysis
rt_by_sbj <- aggregate(RT ~ sbj + group, data = data, FUN = function(x) mean(x, na.rm = TRUE))
mean_rt_by_group <- aggregate(RT ~ group, data = rt_by_sbj, mean)
sd_rt_by_group <- aggregate(RT ~ group, data = rt_by_sbj, sd)
n_by_group <- aggregate(RT ~ group, data = rt_by_sbj, function(x) length(x))
#
rt_summary <- data.frame(
group = mean_rt_by_group$group,
mean_rt = mean_rt_by_group$RT,
sd_rt = sd_rt_by_group$RT,
n = n_by_group$RT
)
rt_summary$se <- rt_summary$sd_rt / sqrt(rt_summary$n)
rt_summary$ci95 <- 1.96 * rt_summary$se
#one-sample t-test for the RD group, comparison value = 0.6 sec
rt_by_sbj_RD<-droplevels(rt_by_sbj[rt_by_sbj$group=="RD",])
t.test(rt_by_sbj_RD$RT , mu = 0.600)
##
## One Sample t-test
##
## data: rt_by_sbj_RD$RT
## t = 3.9435, df = 13, p-value = 0.001682
## alternative hypothesis: true mean is not equal to 0.6
## 95 percent confidence interval:
## 0.6965478 0.9304929
## sample estimates:
## mean of x
## 0.8135203
Session Information
sessionInfo()
## R version 4.3.1 (2023-06-16 ucrt)
## Platform: x86_64-w64-mingw32/x64 (64-bit)
## Running under: Windows 10 x64 (build 19045)
##
## Matrix products: default
##
##
## locale:
## [1] LC_COLLATE=English_United States.utf8
## [2] LC_CTYPE=English_United States.utf8
## [3] LC_MONETARY=English_United States.utf8
## [4] LC_NUMERIC=C
## [5] LC_TIME=English_United States.utf8
##
## time zone: Europe/Athens
## tzcode source: internal
##
## attached base packages:
## [1] stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] effsize_0.8.1 boot_1.3-28.1 itsadug_2.4.1 plotfunctions_1.4
## [5] mgcv_1.8-42 nlme_3.1-162 emmeans_1.8.8 afex_1.4-1
## [9] lme4_1.1-34 Matrix_1.6-1 plotrix_3.8-4 gplots_3.1.3
## [13] sciplot_1.2-0 ez_4.4-0 dplyr_1.1.3
##
## loaded via a namespace (and not attached):
## [1] gtable_0.3.4 xfun_0.40 bslib_0.5.1
## [4] ggplot2_3.4.3 caTools_1.18.2 lattice_0.21-8
## [7] numDeriv_2016.8-1.1 vctrs_0.6.3 tools_4.3.1
## [10] bitops_1.0-7 generics_0.1.3 parallel_4.3.1
## [13] sandwich_3.0-2 tibble_3.2.1 fansi_1.0.4
## [16] pkgconfig_2.0.3 KernSmooth_2.23-21 lifecycle_1.0.3
## [19] compiler_4.3.1 stringr_1.5.0 munsell_0.5.0
## [22] codetools_0.2-19 lmerTest_3.1-3 carData_3.0-5
## [25] htmltools_0.5.6 sass_0.4.7 yaml_2.3.7
## [28] pillar_1.9.0 car_3.1-2 nloptr_2.0.3
## [31] jquerylib_0.1.4 MASS_7.3-60 cachem_1.0.8
## [34] abind_1.4-5 multcomp_1.4-25 gtools_3.9.4
## [37] tidyselect_1.2.0 digest_0.6.33 mvtnorm_1.2-3
## [40] stringi_1.7.12 reshape2_1.4.4 splines_4.3.1
## [43] fastmap_1.1.1 grid_4.3.1 colorspace_2.1-0
## [46] cli_3.6.1 magrittr_2.0.3 survival_3.5-5
## [49] utf8_1.2.3 TH.data_1.1-2 withr_2.5.0
## [52] scales_1.2.1 estimability_1.4.1 rmarkdown_2.24
## [55] zoo_1.8-12 evaluate_0.21 knitr_1.44
## [58] rlang_1.1.1 Rcpp_1.0.11 xtable_1.8-4
## [61] glue_1.6.2 rstudioapi_0.15.0 minqa_1.2.5
## [64] jsonlite_1.8.7 R6_2.5.1 plyr_1.8.8