Similarity Ratings Analyses
Libraries and Data Files
#load libraries
library(dplyr)
##
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
library(ez)
library(car)
## Loading required package: carData
##
## Attaching package: 'car'
## The following object is masked from 'package:dplyr':
##
## recode
library(blme)
## Warning: package 'blme' was built under R version 4.5.2
## Loading required package: lme4
## Loading required package: Matrix
library(parallel)
library(afex)
## Warning: package 'afex' was built under R version 4.5.2
## ************
## Welcome to afex. For support visit: http://afex.singmann.science/
## - Functions for ANOVAs: aov_car(), aov_ez(), and aov_4()
## - Methods for calculating p-values with mixed(): 'S', 'KR', 'LRT', and 'PB'
## - 'afex_aov' and 'mixed' objects can be passed to emmeans() for follow-up tests
## - Get and set global package options with: afex_options()
## - Set sum-to-zero contrasts globally: set_sum_contrasts()
## - For example analyses see: browseVignettes("afex")
## ************
##
## Attaching package: 'afex'
## The following object is masked from 'package:lme4':
##
## lmer
library(emmeans)
## Warning: package 'emmeans' was built under R version 4.5.2
## Welcome to emmeans.
## Caution: You lose important information if you filter this package's results.
## See '? untidy'
library(lmerTest)
## Warning: package 'lmerTest' was built under R version 4.5.2
##
## Attaching package: 'lmerTest'
##
## The following object is masked from 'package:lme4':
##
## lmer
## The following object is masked from 'package:stats':
##
## step
library(sciplot)
## Warning: package 'sciplot' was built under R version 4.5.2
library(gplots)
## Warning: package 'gplots' was built under R version 4.5.2
##
## Attaching package: 'gplots'
## The following object is masked from 'package:stats':
##
## lowess
#use all -1 cores
nc <- detectCores()
cl <- makeCluster(nc-1)
#load data
rm(list=ls())
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
data<-read.csv("AllData_SimilarityRatings.csv", header = T)
Data Pre-processing
#rename useful variables
data$sbj<-data$participant
data$resp<-data$slider_exp.response
data$block<-data$blocks.thisN+1
#convert to factor
data <- mutate_if(data, is.character, as.factor)
data$sbj<-as.factor(data$sbj)
str(data)
## 'data.frame': 4032 obs. of 38 variables:
## $ trl : int 3 15 8 11 18 16 19 5 13 2 ...
## $ cnd : Factor w/ 2 levels "between","within": 2 2 2 2 2 2 2 2 2 2 ...
## $ dim : Factor w/ 2 levels "irrel","rel": 1 2 1 1 2 2 2 1 2 1 ...
## $ size1 : num 2.2 3 3 2.6 2.6 3.4 3 3 2.2 2.2 ...
## $ hue1 : num 0.1 -0.3 0.3 0.1 0.1 -0.3 0.1 -0.3 -0.3 -0.1 ...
## $ size2 : num 2.6 3 3.4 3 2.6 3.4 3 3.4 2.2 2.6 ...
## $ hue2 : num 0.1 -0.1 0.3 0.1 0.3 -0.1 0.3 -0.3 -0.1 -0.1 ...
## $ practice_trials.thisRepN : logi NA NA NA NA NA NA ...
## $ practice_trials.thisTrialN: logi NA NA NA NA NA NA ...
## $ practice_trials.thisN : logi NA NA NA NA NA NA ...
## $ practice_trials.thisIndex : logi NA NA NA NA NA NA ...
## $ blocks.thisRepN : int 0 0 0 0 0 0 0 0 0 0 ...
## $ blocks.thisTrialN : int 0 0 0 0 0 0 0 0 0 0 ...
## $ blocks.thisN : int 0 0 0 0 0 0 0 0 0 0 ...
## $ blocks.thisIndex : int 0 0 0 0 0 0 0 0 0 0 ...
## $ trials.thisRepN : int 0 0 0 0 0 0 0 0 0 0 ...
## $ trials.thisTrialN : int 0 1 2 3 4 5 6 7 8 9 ...
## $ trials.thisN : int 0 1 2 3 4 5 6 7 8 9 ...
## $ trials.thisIndex : int 2 14 7 10 17 15 18 4 12 1 ...
## $ break_loop.thisRepN : logi NA NA NA NA NA NA ...
## $ break_loop.thisTrialN : logi NA NA NA NA NA NA ...
## $ break_loop.thisN : logi NA NA NA NA NA NA ...
## $ break_loop.thisIndex : logi NA NA NA NA NA NA ...
## $ thisRow.t : num 59.7 63.5 66 68.5 70.4 ...
## $ notes : logi NA NA NA NA NA NA ...
## $ group : Factor w/ 2 levels "Control","Oversampling": 1 1 1 1 1 1 1 1 1 1 ...
## $ slider_exp.response : int 4 8 7 5 7 8 7 6 7 5 ...
## $ slider_exp.rt : num 3.51 2.26 2.24 1.57 1.48 ...
## $ participant : int 48370 48370 48370 48370 48370 48370 48370 48370 48370 48370 ...
## $ session : Factor w/ 2 levels "Post","Pre": 1 1 1 1 1 1 1 1 1 1 ...
## $ date : Factor w/ 56 levels "2025-02-27_12h14.31.370",..: 2 2 2 2 2 2 2 2 2 2 ...
## $ expName : Factor w/ 2 levels "SimilarityRatings_CON",..: 1 1 1 1 1 1 1 1 1 1 ...
## $ psychopyVersion : Factor w/ 1 level "2023.2.3": 1 1 1 1 1 1 1 1 1 1 ...
## $ frameRate : num 60 60 60 60 60 ...
## $ expStart : Factor w/ 56 levels "2025-02-27 12h15.28.543857 +0200",..: 2 2 2 2 2 2 2 2 2 2 ...
## $ sbj : Factor w/ 28 levels "3375","5076",..: 12 12 12 12 12 12 12 12 12 12 ...
## $ resp : int 4 8 7 5 7 8 7 6 7 5 ...
## $ block : num 1 1 1 1 1 1 1 1 1 1 ...
#re-order the levels of the session factor
data$session <- factor(data$session, levels = c("Pre", "Post"))
#create a new factor, condition, for pair type
data$condition<-as.factor(ifelse(data$cnd=="between", "between", ifelse(data$dim=="rel","within_rel","within_irrel" )))
#calculate standardized score for the resp variable.
data <- data %>%
group_by(sbj) %>%
mutate(z_resp = (resp - mean(resp, na.rm = TRUE)) / sd(resp, na.rm = TRUE))
###visualize data from all participants
boxplot(data=data, resp~session*sbj, las=2, col=c("grey25", "grey75"), ylim=c(1,9), ylab="Simlarity Ratings", frame=F, yaxt="no" )
axis(side=2, at=1:9, las=2)

Descriptive Statistics
################################
#Descriptive Statistics
################################
################################
#Between-category differences
################################
data_btw<-droplevels(data[data$cnd=="between",])
data_btw_av<-aggregate(data_btw$z_resp, list(data_btw$sbj,data_btw$group, data_btw$session), mean)
colnames(data_btw_av)<-c("sbj", "group", "session", "z_resp")
data_btw_av<-data_btw_av[order(data_btw_av$sbj, data_btw_av$session),]
str(data_btw_av)
## 'data.frame': 56 obs. of 4 variables:
## $ sbj : Factor w/ 28 levels "3375","5076",..: 1 1 2 2 3 3 4 4 5 5 ...
## $ group : Factor w/ 2 levels "Control","Oversampling": 2 2 2 2 2 2 1 1 1 1 ...
## $ session: Factor w/ 2 levels "Pre","Post": 1 2 1 2 1 2 1 2 1 2 ...
## $ z_resp : num 0.327 0.213 0.433 -0.917 0.834 ...
#Oversampling
#Pre
mean(data_btw_av[data_btw_av$group=="Oversampling"& data_btw_av$session=="Pre",]$z_resp)
## [1] 0.529342
sd(data_btw_av[data_btw_av$group=="Oversampling"& data_btw_av$session=="Pre",]$z_resp)
## [1] 0.3795018
#Post
mean(data_btw_av[data_btw_av$group=="Oversampling"& data_btw_av$session=="Post",]$z_resp)
## [1] 0.1927432
sd(data_btw_av[data_btw_av$group=="Oversampling"& data_btw_av$session=="Post",]$z_resp)
## [1] 0.4956307
#Control
#Pre
mean(data_btw_av[data_btw_av$group=="Control"& data_btw_av$session=="Pre",]$z_resp)
## [1] 0.609059
sd(data_btw_av[data_btw_av$group=="Control"& data_btw_av$session=="Pre",]$z_resp)
## [1] 0.2760146
#Post
mean(data_btw_av[data_btw_av$group=="Control"& data_btw_av$session=="Post",]$z_resp)
## [1] 0.3094649
sd(data_btw_av[data_btw_av$group=="Control"& data_btw_av$session=="Post",]$z_resp)
## [1] 0.5983711
#graph
#graph
par(mfrow=c(1,2), oma = c(0, 0, 2, 0))
ylb="Standardized Similarity Ratings"
xlb="Session"
mn="Control Group"
clrs=c("grey85", "grey85")
boxplot(range=0, data=data_btw_av[data_btw_av$group=="Control",], z_resp~session, frame.plot=F, xlab=xlb, ylab=ylb, boxwex=0.5, col=clrs, main =mn, ylim=c(-1.5,1.5), las=1, lwd=2, whisklty=c(2,1))
#lines
sbjs <- unique(data_btw_av[data_btw_av$group=="Control",]$sbj)
for (s in sbjs) {
tmp <- data_btw_av[data_btw_av$sbj == s & data_btw_av$group=="Control", ]
# make sure rows are ordered by session: Pre, Post
tmp <- tmp[order(tmp$session), ]
# check that we have exactly two sessions
if (nrow(tmp) == 2 && all(c("Pre", "Post") %in% tmp$session)) {
# x positions: 1 = Pre, 2 = Post (as in the boxplot)
lines(x = c(1, 2),
y = tmp$z_resp,
col = "grey40", # line color for subjects
lwd = 1) # line width
}
}
mn="Oversampling Group"
clrs=c("grey35", "grey35")
boxplot(range=0, data=data_btw_av[data_btw_av$group=="Oversampling",], z_resp~session, frame.plot=F, xlab=xlb, ylab="", boxwex=0.5, col=clrs, main =mn, ylim=c(-1.5,1.5), las=1, lwd=2, whisklty=c(2,1))
#lines
sbjs <- unique(data_btw_av[data_btw_av$group=="Oversampling",]$sbj)
for (s in sbjs) {
tmp <- data_btw_av[data_btw_av$sbj == s & data_btw_av$group=="Oversampling", ]
# make sure rows are ordered by session: Pre, Post
tmp <- tmp[order(tmp$session), ]
# check that we have exactly two sessions
if (nrow(tmp) == 2 && all(c("Pre", "Post") %in% tmp$session)) {
# x positions: 1 = Pre, 2 = Post (as in the boxplot)
lines(x = c(1, 2),
y = tmp$z_resp,
col = "grey40", # line color for subjects
lwd = 1) # line width
}
}
mtext("Between-Category Pairs, Relevant Dimension", outer = TRUE, cex = 1.5, font = 1.5)

par(mfrow=c(1,1), oma = c(0, 0, 0, 0))
#################################################
#Within-category differences, Relevant Dimension,
#################################################
data_w_r<-droplevels(data[data$cnd=="within" & data$dim=="rel",])
data_w_r_av<-aggregate(data_w_r$z_resp, list(data_w_r$sbj, data_w_r$group, data_w_r$session), mean)
colnames(data_w_r_av)<-c("sbj", "group" ,"session", "z_resp")
#Oversampling
#Pre
mean(data_w_r_av[data_w_r_av$group=="Oversampling"& data_w_r_av$session=="Pre",]$z_resp)
## [1] 0.5404354
sd(data_w_r_av[data_w_r_av$group=="Oversampling"& data_w_r_av$session=="Pre",]$z_resp)
## [1] 0.3531334
#Post
mean(data_w_r_av[data_w_r_av$group=="Oversampling"& data_w_r_av$session=="Post",]$z_resp)
## [1] 0.2322841
sd(data_w_r_av[data_w_r_av$group=="Oversampling"& data_w_r_av$session=="Post",]$z_resp)
## [1] 0.4607606
#Control
#Pre
mean(data_w_r_av[data_w_r_av$group=="Control"& data_w_r_av$session=="Pre",]$z_resp)
## [1] 0.6671511
sd(data_w_r_av[data_w_r_av$group=="Control"& data_w_r_av$session=="Pre",]$z_resp)
## [1] 0.2670497
#Post
mean(data_w_r_av[data_w_r_av$group=="Control"& data_w_r_av$session=="Post",]$z_resp)
## [1] 0.4251857
sd(data_w_r_av[data_w_r_av$group=="Control"& data_w_r_av$session=="Post",]$z_resp)
## [1] 0.5111205
#Graph
par(mfrow=c(1,2), oma = c(0, 0, 2, 0))
ylb="Standardized Similarity Ratings"
xlb="Session"
mn="Control Group"
clrs=c("grey85", "grey85")
boxplot(range=0,data=data_w_r_av[data_w_r_av$group=="Control",], z_resp~session, frame.plot=F, ylim=c(-1.5,1.5), xlab=xlb, ylab=ylb, boxwex=0.5, col=clrs, main =mn, las=1, lwd=2, whisklty=c(2,1))
#lines
sbjs <- unique(data_w_r_av[data_w_r_av$group=="Control",]$sbj)
for (s in sbjs) {
tmp <- data_w_r_av[data_w_r_av$sbj == s & data_w_r_av$group=="Control", ]
# make sure rows are ordered by session: Pre, Post
tmp <- tmp[order(tmp$session), ]
# check that we have exactly two sessions
if (nrow(tmp) == 2 && all(c("Pre", "Post") %in% tmp$session)) {
# x positions: 1 = Pre, 2 = Post (as in the boxplot)
lines(x = c(1, 2),
y = tmp$z_resp,
col = "grey40", # line color for subjects
lwd = 1) # line width
}
}
mn="Oversampling Group"
clrs=c("grey35", "grey35")
boxplot(range=0, data=data_w_r_av[data_w_r_av$group=="Oversampling",], z_resp~session, frame.plot=F, ylim=c(-1.5,1.5), xlab=xlb, ylab="", boxwex=0.5, col=clrs, main =mn, las=1, lwd=2, whisklty=c(2,1))
#lines
sbjs <- unique(data_w_r_av[data_w_r_av$group=="Oversampling",]$sbj)
for (s in sbjs) {
tmp <- data_w_r_av[data_w_r_av$sbj == s & data_w_r_av$group=="Oversampling", ]
# make sure rows are ordered by session: Pre, Post
tmp <- tmp[order(tmp$session), ]
# check that we have exactly two sessions
if (nrow(tmp) == 2 && all(c("Pre", "Post") %in% tmp$session)) {
# x positions: 1 = Pre, 2 = Post (as in the boxplot)
lines(x = c(1, 2),
y = tmp$z_resp,
col = "grey40", # line color for subjects
lwd = 1) # line width
}
}
mtext("Within-Category Pairs, Relevant Dimension", outer = TRUE, cex = 1.5, font = 1.5)

par(mfrow=c(1,1), oma = c(0, 0, 0, 0))
#################################################
#Within-category differences, Irrelevant Dimension
#################################################
data_w_i<-droplevels(data[data$cnd=="within" & data$dim=="irrel",])
data_w_i_av<-aggregate(data_w_i$z_resp, list(data_w_i$sbj,data_w_i$group, data_w_i$session), mean)
colnames(data_w_i_av)<-c("sbj", "group","session", "z_resp")
#Oversampling
#Pre
mean(data_w_i_av[data_w_i_av$group=="Oversampling"& data_w_i_av$session=="Pre",]$z_resp)
## [1] -0.4301797
sd(data_w_i_av[data_w_i_av$group=="Oversampling"& data_w_i_av$session=="Pre",]$z_resp)
## [1] 0.2844851
#Post
mean(data_w_i_av[data_w_i_av$group=="Oversampling"& data_w_i_av$session=="Post",]$z_resp)
## [1] -0.3256617
sd(data_w_i_av[data_w_i_av$group=="Oversampling"& data_w_i_av$session=="Post",]$z_resp)
## [1] 0.5844494
#Control
#Pre
mean(data_w_i_av[data_w_i_av$group=="Control"& data_w_i_av$session=="Pre",]$z_resp)
## [1] -0.5841016
sd(data_w_i_av[data_w_i_av$group=="Control"& data_w_i_av$session=="Pre",]$z_resp)
## [1] 0.2780912
#Post
mean(data_w_i_av[data_w_i_av$group=="Control"& data_w_i_av$session=="Post",]$z_resp)
## [1] -0.4502975
sd(data_w_i_av[data_w_i_av$group=="Control"& data_w_i_av$session=="Post",]$z_resp)
## [1] 0.4926838
#Graph
par(mfrow=c(1,2), oma = c(0, 0, 2, 0))
ylb="Standardized Similarity Ratings"
xlb="Session"
mn="Control Group"
clrs=c("grey85", "grey85")
boxplot(range=0, data=data_w_i_av[data_w_i_av$group=="Control",], z_resp~session, frame.plot=F, ylim=c(-1.5, 1.5), xlab=xlb, ylab=ylb, boxwex=0.5, col=clrs, main =mn, las=1, lwd=2, whisklty=c(2,1))
#lines
sbjs <- unique(data_w_i_av[data_w_i_av$group=="Control",]$sbj)
for (s in sbjs) {
tmp <- data_w_i_av[data_w_i_av$sbj == s & data_w_i_av$group=="Control", ]
# make sure rows are ordered by session: Pre, Post
tmp <- tmp[order(tmp$session), ]
# check that we have exactly two sessions
if (nrow(tmp) == 2 && all(c("Pre", "Post") %in% tmp$session)) {
# x positions: 1 = Pre, 2 = Post (as in the boxplot)
lines(x = c(1, 2),
y = tmp$z_resp,
col = "grey40", # line color for subjects
lwd = 1) # line width
}
}
mn="Oversampling Group"
clrs=c("grey35", "grey35")
boxplot(range=0, data=data_w_i_av[data_w_i_av$group=="Oversampling",], z_resp~session, frame.plot=F, ylim=c(-1.5, 1.5), xlab=xlb, ylab="", boxwex=0.5, col=clrs, main =mn, las=1, lwd=2, whisklty=c(2,1))
#lines
sbjs <- unique(data_w_i_av[data_w_i_av$group=="Oversampling",]$sbj)
for (s in sbjs) {
tmp <- data_w_i_av[data_w_i_av$sbj == s & data_w_i_av$group=="Oversampling", ]
# make sure rows are ordered by session: Pre, Post
tmp <- tmp[order(tmp$session), ]
# check that we have exactly two sessions
if (nrow(tmp) == 2 && all(c("Pre", "Post") %in% tmp$session)) {
# x positions: 1 = Pre, 2 = Post (as in the boxplot)
lines(x = c(1, 2),
y = tmp$z_resp,
col = "grey40", # line color for subjects
lwd = 1) # line width
}
}
mtext("Within-Category Pairs, Irrelevant Dimension", outer = TRUE, cex = 1.5, font = 1.5)

par(mfrow=c(1,1), oma = c(0, 0, 0, 0))
Inferential Statistics
############
# ANOVA
############
ezANOVA(data=data, dv=z_resp, wid=sbj, within=.(session,condition), between=group, type=3)
## Warning: Collapsing data to cell means. *IF* the requested effects are a subset
## of the full design, you must use the "within_full" argument, else results may
## be inaccurate.
## $ANOVA
## Effect DFn DFd F p p<.05 ges
## 2 group 1 26 0.93473193 3.425441e-01 2.258709e-03
## 3 session 1 26 3.90488725 5.884658e-02 3.481276e-02
## 5 condition 2 52 51.90017700 4.067312e-13 * 5.026577e-01
## 4 group:session 1 26 0.07625752 7.846179e-01 7.038745e-04
## 6 group:condition 2 52 1.23371785 2.995896e-01 2.346137e-02
## 7 session:condition 2 52 7.63256922 1.240355e-03 * 5.297722e-02
## 8 group:session:condition 2 52 0.01244529 9.876348e-01 9.120597e-05
##
## $`Mauchly's Test for Sphericity`
## Effect W p p<.05
## 5 condition 0.2095955 3.290596e-09 *
## 6 group:condition 0.2095955 3.290596e-09 *
## 7 session:condition 0.4808579 1.059765e-04 *
## 8 group:session:condition 0.4808579 1.059765e-04 *
##
## $`Sphericity Corrections`
## Effect GGe p[GG] p[GG]<.05 HFe
## 5 condition 0.5585330 2.707022e-08 * 0.5658654
## 6 group:condition 0.5585330 2.817996e-01 0.5658654
## 7 session:condition 0.6582663 5.277252e-03 * 0.6795313
## 8 group:session:condition 0.6582663 9.535227e-01 0.6795313
## p[HF] p[HF]<.05
## 5 2.249075e-08 *
## 6 2.823515e-01
## 7 4.819659e-03 *
## 8 9.572773e-01
afex_options(type = 3, correction = "GG") # Type III, GG correction
fit <- aov_ez( id = "sbj", dv = "z_resp", data = data, within = c("session","condition"), between = "group", type = 3, return = "afex_aov")
## Warning: More than one observation per design cell, aggregating data using `fun_aggregate = mean`.
## To turn off this warning, pass `fun_aggregate = mean` explicitly.
## Contrasts set to contr.sum for the following variables: group
emm_sess_by_cond <- emmeans(fit, ~ session | condition)
prepost_tests <- contrast(emm_sess_by_cond, method = "pairwise", adjust = "holm")
print(prepost_tests)
## condition = between:
## contrast estimate SE df t.ratio p.value
## Pre - Post 0.318 0.1110 26 2.855 0.0083
##
## condition = within_irrel:
## contrast estimate SE df t.ratio p.value
## Pre - Post -0.119 0.1110 26 -1.072 0.2934
##
## condition = within_rel:
## contrast estimate SE df t.ratio p.value
## Pre - Post 0.275 0.0982 26 2.802 0.0095
##
## Results are averaged over the levels of: group
#for effect size calculations
t_vals <- c(between = 2.855,
within_irrel = -1.072,
within_rel = 2.802)
N <- 27
d_vals <- t_vals / sqrt(N)
results <- data.frame(
condition = names(t_vals),
t_value = round(t_vals, 3),
cohens_d = round(d_vals, 3)
)
print(results)
## condition t_value cohens_d
## between between 2.855 0.549
## within_irrel within_irrel -1.072 -0.206
## within_rel within_rel 2.802 0.539
###########################
# Warping trajectories
###########################
#lmer models
data$block<-as.factor(data$block)
m0<-lmer(z_resp~ (0+condition|sbj), data=data)
## boundary (singular) fit: see help('isSingular')
m1<-lmer(z_resp~group + (1|sbj), data=data)
## boundary (singular) fit: see help('isSingular')
m2<-lmer(z_resp~group*session + (1|sbj), data=data)
## boundary (singular) fit: see help('isSingular')
m3<-lmer(z_resp~group*session*condition + (1|sbj), data=data)
## boundary (singular) fit: see help('isSingular')
m3.1<-lmer(z_resp~group+session+condition + (1|sbj), data=data)
## boundary (singular) fit: see help('isSingular')
m3.2<-lmer(z_resp~group*session+condition + (1|sbj), data=data)
## boundary (singular) fit: see help('isSingular')
m4<-lmer(z_resp~group*session*condition + (1+session|sbj), data=data)
## boundary (singular) fit: see help('isSingular')
m5<-lmer(z_resp~group*session*condition*block + (1|sbj), data=data)
## boundary (singular) fit: see help('isSingular')
#all models: singular fit
#Bayesian GLMM
##############################
#by block
##############################
p<-length(fixef(lmer(z_resp ~ session * group * condition *block +(condition|sbj),
data = data)))
## boundary (singular) fit: see help('isSingular')
m1<- blmer(
z_resp ~ session * group * condition * block+(condition|sbj),
data = data,
control=lmerControl(optimizer="nlminbwrap",optCtrl=list(maxfun=1e5)),
contrasts=list(session=contr.sum, group=contr.sum, condition=contr.sum),
fixef.prior = normal(cov = diag(9,p))
)
m2<- blmer(
z_resp ~ session * group * condition * block+(session+condition|sbj),
data = data,
control=lmerControl(optimizer="nlminbwrap",optCtrl=list(maxfun=1e5)),
contrasts=list(session=contr.sum, group=contr.sum, condition=contr.sum),
fixef.prior = normal(cov = diag(9,p))
)
anova(m1,m2)
## refitting model(s) with ML (instead of REML)
## Data: data
## Models:
## m1: z_resp ~ session * group * condition * block + (condition | sbj)
## m2: z_resp ~ session * group * condition * block + (session + condition | sbj)
## npar AIC BIC logLik -2*log(L) Chisq Df Pr(>Chisq)
## m1 43 10058.0 10329 -4986.0 9972.0
## m2 47 9909.6 10206 -4907.8 9815.6 156.41 4 < 2.2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#m2 preferred
m3<- blmer(
z_resp ~ session * group * condition * block+(session*condition|sbj),
data = data,
control=lmerControl(optimizer="nlminbwrap",optCtrl=list(maxfun=1e5)),
contrasts=list(session=contr.sum, group=contr.sum, condition=contr.sum),
fixef.prior = normal(cov = diag(9,p))
)
anova(m3,m2)
## refitting model(s) with ML (instead of REML)
## Data: data
## Models:
## m2: z_resp ~ session * group * condition * block + (session + condition | sbj)
## m3: z_resp ~ session * group * condition * block + (session * condition | sbj)
## npar AIC BIC logLik -2*log(L) Chisq Df Pr(>Chisq)
## m2 47 9909.6 10206 -4907.8 9815.6
## m3 58 9784.3 10150 -4834.2 9668.3 147.21 11 < 2.2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#m3 preferred
m4<-blmer(
z_resp ~ session * group * condition * block+(block+ session*condition|sbj),
data = data,
control=lmerControl(optimizer="nlminbwrap",optCtrl=list(maxfun=1e5)),
contrasts=list(session=contr.sum, group=contr.sum, condition=contr.sum),
fixef.prior = normal(cov = diag(9,p))
)
## Warning in optwrap(optimizer, devfun, startingValues, lower = lowerBounds, :
## convergence code 1 from nlminbwrap: iteration limit reached without convergence
## (10)
#failed to converge
m5<-blmer(
z_resp ~ session * group * condition * block+(session*condition*block|sbj),
data = data,
control=lmerControl(optimizer="nlminbwrap",optCtrl=list(maxfun=1e5)),
contrasts=list(session=contr.sum, group=contr.sum, condition=contr.sum),
fixef.prior = normal(cov = diag(9,p))
)
## Warning in optwrap(optimizer, devfun, startingValues, lower = lowerBounds, :
## convergence code 1 from nlminbwrap: iteration limit reached without convergence
## (10)
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## unable to evaluate scaled gradient
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge: degenerate Hessian with 1 negative eigenvalues
#failed to converge
m6<-blmer(
z_resp ~ session * group * condition * block+(group+ session*condition|sbj),
data = data,
control=lmerControl(optimizer="nlminbwrap",optCtrl=list(maxfun=1e5)),
contrasts=list(session=contr.sum, group=contr.sum, condition=contr.sum),
fixef.prior = normal(cov = diag(9,p))
)
## Warning in optwrap(optimizer, devfun, startingValues, lower = lowerBounds, :
## convergence code 1 from nlminbwrap: iteration limit reached without convergence
## (10)
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge with max|grad| = 0.0119472 (tol = 0.002, component 1)
#failed to converge
m7<-blmer(
z_resp ~ session * group * condition * block+(group*session*condition|sbj),
data = data,
control=lmerControl(optimizer="nlminbwrap",optCtrl=list(maxfun=1e5)),
contrasts=list(session=contr.sum, group=contr.sum, condition=contr.sum),
fixef.prior = normal(cov = diag(9,p))
)
## Warning in optwrap(optimizer, devfun, startingValues, lower = lowerBounds, :
## convergence code 1 from nlminbwrap: iteration limit reached without convergence
## (10)
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge with max|grad| = 0.798446 (tol = 0.002, component 1)
#failed to converge
fm<-m3
print(anova(fm))
## Analysis of Variance Table
## npar Sum Sq Mean Sq F value
## session 1 0.002 0.002 0.0029
## group 1 0.000 0.000 0.0000
## condition 2 154.999 77.500 126.2808
## block 2 5.851 2.925 4.7667
## session:group 1 0.225 0.225 0.3664
## session:condition 2 5.083 2.542 4.1415
## group:condition 2 2.429 1.214 1.9788
## session:block 2 6.877 3.439 5.6030
## group:block 2 2.994 1.497 2.4389
## condition:block 4 6.005 1.501 2.4461
## session:group:condition 2 0.026 0.013 0.0208
## session:group:block 2 3.362 1.681 2.7391
## session:condition:block 4 1.757 0.439 0.7158
## group:condition:block 4 2.119 0.530 0.8631
## session:group:condition:block 4 0.594 0.149 0.2420
Anova(fm, type=3) #for statistical significance
## Analysis of Deviance Table (Type III Wald chisquare tests)
##
## Response: z_resp
## Chisq Df Pr(>Chisq)
## (Intercept) 31.0039 1 2.575e-08 ***
## session 9.6100 1 0.001935 **
## group 0.0433 1 0.835203
## condition 39.5810 2 2.542e-09 ***
## block 4.7040 2 0.095177 .
## session:group 0.6548 1 0.418387
## session:condition 8.5193 2 0.014127 *
## group:condition 2.9803 2 0.225341
## session:block 10.6309 2 0.004915 **
## group:block 3.5754 2 0.167341
## condition:block 9.7842 4 0.044224 *
## session:group:condition 0.1257 2 0.939104
## session:group:block 5.7092 2 0.057580 .
## session:condition:block 2.8632 4 0.580978
## group:condition:block 3.4522 4 0.485177
## session:group:condition:block 0.9679 4 0.914617
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#########################
# post-hoc comparisons
#########################
emm <- emmeans(m3, ~ session | group * block, pbkrtest.limit = 4032)
## NOTE: Results may be misleading due to involvement in interactions
session_contrasts <- contrast(emm, method = "pairwise", by = c("group", "block"), adjust = "bonferroni")
print(session_contrasts)
## group = Control, block = 1:
## contrast estimate SE df t.ratio p.value
## Pre - Post 0.2092 0.129 38.5 1.620 0.1135
##
## group = Oversampling, block = 1:
## contrast estimate SE df t.ratio p.value
## Pre - Post 0.3570 0.129 38.5 2.764 0.0087
##
## group = Control, block = 2:
## contrast estimate SE df t.ratio p.value
## Pre - Post 0.0219 0.129 38.5 0.170 0.8660
##
## group = Oversampling, block = 2:
## contrast estimate SE df t.ratio p.value
## Pre - Post 0.1462 0.129 38.5 1.132 0.2648
##
## group = Control, block = 3:
## contrast estimate SE df t.ratio p.value
## Pre - Post 0.1768 0.129 38.5 1.369 0.1790
##
## group = Oversampling, block = 3:
## contrast estimate SE df t.ratio p.value
## Pre - Post 0.0371 0.129 38.5 0.287 0.7757
##
## Results are averaged over the levels of: condition
## Degrees-of-freedom method: kenward-roger
means_df <- aggregate(z_resp ~ session + group + block, data = data, FUN = mean)
means_df$block <- as.numeric(as.character(means_df$block))
#graph
plot(
z_resp ~ block,
data = subset(means_df, session == "Post" & group == "Oversampling"),
type = "b", pch = 19, col = "blue",
ylim = range(means_df$z_resp),
xlab = "Block", ylab = "Mean z_resp",
main = "Mean z_resp by Session, Group, and Block"
)
lines(
z_resp ~ block,
data = subset(means_df, session == "Pre" & group == "Oversampling"),
type = "b", pch = 1, col = "blue"
)
# Post - Control
lines(
z_resp ~ block,
data = subset(means_df, session == "Post" & group == "Control"),
type = "b", pch = 19, col = "red"
)
# Pre - Control
lines(
z_resp ~ block,
data = subset(means_df, session == "Pre" & group == "Control"),
type = "b", pch = 1, col = "red"
)
legend("topright", legend = c("Oversampling - Post", "Oversampling - Pre", "Control - Post", "Control - Pre"),
col = c("blue", "blue", "red", "red"), pch = c(19, 1, 19, 1), lty = 1)

##################################
# show means from behavioral data
##################################
temp<-aggregate(z_resp~block+session+group, data=data, FUN=mean)
temp2<-aggregate(z_resp~block+session+group, data=data, FUN=se)
temp$se<-1.96*temp2$z_resp
xlab="Block"
ylab="Standardized Ratings"
col=c("black", "grey60")
cex=1.5
pch=c(0,15, 1,19)
offset=.03
#Oversampling Pre
plotCI(x=1:3, y=temp[temp$session=="Pre" & temp$group=="Oversampling",]$z_resp, uiw=temp[temp$session=="Pre" & temp$group=="Oversampling",]$se, bty="n", las=1, xaxt="n", xlab=xlab, ylab=ylab, col=col[1], ylim=c(-0.3, 0.3),xlim=c(1,3.3), cex=cex, lty=2, pch=pch[1])
lines(x=1:3, y=temp[temp$session=="Pre" & temp$group=="Oversampling",]$z_resp, col=col[1], lty=2, lwd=2)
axis(side=1, at=c(1,2,3))
#Oversampling Post
plotCI(x=1:3+offset, y=temp[temp$session=="Post" & temp$group=="Oversampling",]$z_resp, uiw=temp[temp$session=="Post" & temp$group=="Oversampling",]$se, pch=pch[2], col=col[1], add=T, cex=cex)
lines(x=1:3+offset, y=temp[temp$session=="Post" & temp$group=="Oversampling",]$z_resp, col=col[1], lty=1, lwd=2)
#Control Pre
plotCI(x=1:3+2*offset, y=temp[temp$session=="Pre" & temp$group=="Control",]$z_resp, uiw=temp[temp$session=="Pre" & temp$group=="Control",]$se, pch=pch[3], col=col[2], add=T, cex=cex, lty=2)
lines(x=1:3+2*offset, y=temp[temp$session=="Pre" & temp$group=="Control",]$z_resp, col=col[2], lty=2, lwd=2)
#Control Post
plotCI(x=1:3+3*offset, y=temp[temp$session=="Post" & temp$group=="Control",]$z_resp, uiw=temp[temp$session=="Post" & temp$group=="Control",]$se, pch=pch[4], col=col[2], add=T, cex=cex)
lines(x=1:3+3*offset, y=temp[temp$session=="Post" & temp$group=="Control",]$z_resp,col=col[2], lty=1, lwd=2)
legend(x=2.2, y=0.3, legend=c("Oversampling - Pre", "Oversampling - Post", "Control - Pre", "Control - Post"), col=c("black", "black", "grey60", "grey60"), lty=c(2,1,2,1), pch=pch, bty="n", seg.len=4, lwd=2)

##################################################################################
#check if the oversampling group gave overall higher ratings during the 1st block
##################################################################################
bl1<-data[data$block==1 & data$session=="Pre",]
d_bl1<-aggregate(z_resp~sbj+group, data=droplevels(data[data$block==1 & data$session=="Pre",]), FUN=mean )
leveneTest(z_resp~group, data=d_bl1, center=mean)
## Levene's Test for Homogeneity of Variance (center = mean)
## Df F value Pr(>F)
## group 1 1.1215 0.2993
## 26
t.test(z_resp~group, data=d_bl1, var.equal=T)
##
## Two Sample t-test
##
## data: z_resp by group
## t = -0.65752, df = 26, p-value = 0.5166
## alternative hypothesis: true difference in means between group Control and group Oversampling is not equal to 0
## 95 percent confidence interval:
## -0.4184056 0.2156012
## sample estimates:
## mean in group Control mean in group Oversampling
## 0.09464101 0.19604323
#no difference
Correlation Analyses
#########################################################################
#########################################################################
#Correlation Analysis between cat average accuracy and similarity change
#########################################################################
#########################################################################
############################
# Average Similarity Change
############################
cat_av<-read.csv("cat_av_acc.csv", header=T)
#we average all sim ratings for specific pairs
dat<- aggregate(z_resp~sbj+ session + size1+hue1+size2+hue2+ group, data=data, FUN=mean)
#we transform data
wide_dat <- reshape(dat, timevar = "session", idvar = c("sbj", "size1","hue1", "size2", "hue2", "group"), direction = "wide")
#calculate diff between Post and Pre for each pair
wide_dat$diff <- wide_dat$z_resp.Post - wide_dat$z_resp.Pre
#average across pairs
sim_av<-aggregate(diff~sbj+group, data=wide_dat, FUN=mean)
#merge data
merged <- merge(cat_av, sim_av, by="sbj")
#correlation test
cor.test(merged$diff,merged$acc)
##
## Pearson's product-moment correlation
##
## data: merged$diff and merged$acc
## t = 1.1866, df = 24, p-value = 0.247
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.1671957 0.5707141
## sample estimates:
## cor
## 0.2354017
##################################################
#Repeat analysis with accuracy in boundary stimuli
##################################################
cat_av_b<-read.csv("cat_av_acc_b.csv", header=T)
merged_b <- merge(cat_av_b, sim_av, by="sbj")
cor.test(merged_b$diff,merged_b$acc)
##
## Pearson's product-moment correlation
##
## data: merged_b$diff and merged_b$acc
## t = 0.037028, df = 24, p-value = 0.9708
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.3809090 0.3937573
## sample estimates:
## cor
## 0.007558119
###########################################################
# By pair type. Between-Category, Relevant Dimension Change
###########################################################
#subset, condition=="between"
dat<- aggregate(z_resp~sbj+ session + size1+hue1+size2+hue2+ group, data=data[data$condition=="between",], FUN=mean)
wide_dat <- reshape(dat, timevar = "session", idvar = c("sbj", "size1","hue1", "size2", "hue2", "group"), direction = "wide")
#calculate diff between Post and Pre for each pair
wide_dat$diff <- wide_dat$z_resp.Post - wide_dat$z_resp.Pre
#average across pairs
sim_av<-aggregate(diff~sbj+group, data=wide_dat, FUN=mean)
#merge data
merged <- merge(cat_av, sim_av, by="sbj")
#correlation test
cor.test(merged$diff,merged$acc)
##
## Pearson's product-moment correlation
##
## data: merged$diff and merged$acc
## t = 0.92104, df = 24, p-value = 0.3662
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.2182000 0.5339085
## sample estimates:
## cor
## 0.1847686
##################################################
#Repeat analysis with accuracy in boundary stimuli
##################################################
cat_av_b<-read.csv("cat_av_acc_b.csv", header=T)
merged_b <- merge(cat_av_b, sim_av, by="sbj")
cor.test(merged_b$diff,merged_b$acc)
##
## Pearson's product-moment correlation
##
## data: merged_b$diff and merged_b$acc
## t = -0.32025, df = 24, p-value = 0.7515
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.4414301 0.3304701
## sample estimates:
## cor
## -0.06523207
##########################################################
# By pair type. Within-Category, Relevant Dimension Change
##########################################################
#subset, condition=="within_rel"
dat<- aggregate(z_resp~sbj+ session + size1+hue1+size2+hue2+ group, data=data[data$condition=="within_rel",], FUN=mean)
wide_dat <- reshape(dat, timevar = "session", idvar = c("sbj", "size1","hue1", "size2", "hue2", "group"), direction = "wide")
# calculate diff between Post and Pre for each pair
wide_dat$diff <- wide_dat$z_resp.Post - wide_dat$z_resp.Pre
#average across pairs
sim_av<-aggregate(diff~sbj+group, data=wide_dat, FUN=mean)
#merge data
merged <- merge(cat_av, sim_av, by="sbj")
#correlation test
cor.test(merged$diff,merged$acc)
##
## Pearson's product-moment correlation
##
## data: merged$diff and merged$acc
## t = 1.1071, df = 24, p-value = 0.2792
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.1825144 0.5599628
## sample estimates:
## cor
## 0.2204207
##################################################
#Repeat analysis with accuracy in boundary stimuli
##################################################
cat_av_b<-read.csv("cat_av_acc_b.csv", header=T)
merged_b <- merge(cat_av_b, sim_av, by="sbj")
cor.test(merged_b$diff,merged_b$acc)
##
## Pearson's product-moment correlation
##
## data: merged_b$diff and merged_b$acc
## t = 0.35567, df = 24, p-value = 0.7252
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.3240299 0.4472187
## sample estimates:
## cor
## 0.07241049
############################################################
# By pair type. Within-Category, Irrelevant Dimension Change
############################################################
#subset, condition=="within_irrel"
dat<- aggregate(z_resp~sbj+ session + size1+hue1+size2+hue2+ group, data=data[data$condition=="within_irrel",], FUN=mean)
wide_dat <- reshape(dat, timevar = "session", idvar = c("sbj", "size1","hue1", "size2", "hue2", "group"), direction = "wide")
# calculate diff between Post and Pre for each pair
wide_dat$diff <- wide_dat$z_resp.Post - wide_dat$z_resp.Pre
#average across pairs
sim_av<-aggregate(diff~sbj+group, data=wide_dat, FUN=mean)
#merge data
merged <- merge(cat_av, sim_av, by="sbj")
#correlation test
cor.test(merged$diff,merged$acc)
##
## Pearson's product-moment correlation
##
## data: merged$diff and merged$acc
## t = 0.71252, df = 24, p-value = 0.483
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.2577961 0.5032249
## sample estimates:
## cor
## 0.1439282
##################################################
#Repeat analysis with accuracy in boundary stimuli
##################################################
cat_av_b<-read.csv("cat_av_acc_b.csv", header=T)
merged_b <- merge(cat_av_b, sim_av, by="sbj")
cor.test(merged_b$diff,merged_b$acc)
##
## Pearson's product-moment correlation
##
## data: merged_b$diff and merged_b$acc
## t = -0.064563, df = 24, p-value = 0.9491
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.3984956 0.3760939
## sample estimates:
## cor
## -0.01317776
##
##############################################################
#Consider only 1st block differences in similarity ratings
##############################################################
##
d<-data[data$block==1,]
############################
# Average Similarity Change
############################
cat_av<-read.csv("cat_av_acc.csv", header=T)
#we average all sim ratings for specific pairs
dat<- aggregate(z_resp~sbj+ session + size1+hue1+size2+hue2+ group, data=d, FUN=mean)
#we tranform data
wide_dat <- reshape(dat, timevar = "session", idvar = c("sbj", "size1","hue1", "size2", "hue2", "group"), direction = "wide")
#calculate diff between Post and Pre for each pair
wide_dat$diff <- wide_dat$z_resp.Post - wide_dat$z_resp.Pre
#average across pairs
sim_av<-aggregate(diff~sbj+group, data=wide_dat, FUN=mean)
#merge data
merged <- merge(cat_av, sim_av, by="sbj")
#correlation test
cor.test(merged$diff,merged$acc)
##
## Pearson's product-moment correlation
##
## data: merged$diff and merged$acc
## t = 0.85706, df = 24, p-value = 0.3999
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.2304026 0.5246591
## sample estimates:
## cor
## 0.1723292
##################################################
#Repeat analysis with accuracy in boundary stimuli
##################################################
cat_av_b<-read.csv("cat_av_acc_b.csv", header=T)
merged_b <- merge(cat_av_b, sim_av, by="sbj")
cor.test(merged_b$diff,merged_b$acc)
##
## Pearson's product-moment correlation
##
## data: merged_b$diff and merged_b$acc
## t = -0.14803, df = 24, p-value = 0.8836
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.4127263 0.3613768
## sample estimates:
## cor
## -0.0302029
###########################################################
# By pair type. Between-Category, Relevant Dimension Change
###########################################################
#subset, condition=="between"
dat<- aggregate(z_resp~sbj+ session + size1+hue1+size2+hue2+ group, data=d[d$condition=="between",], FUN=mean)
wide_dat <- reshape(dat, timevar = "session", idvar = c("sbj", "size1","hue1", "size2", "hue2", "group"), direction = "wide")
#calculate diff between Post and Pre for each pair
wide_dat$diff <- wide_dat$z_resp.Post - wide_dat$z_resp.Pre
#average across pairs
sim_av<-aggregate(diff~sbj+group, data=wide_dat, FUN=mean)
#merge data
merged <- merge(cat_av, sim_av, by="sbj")
#correlation test
cor.test(merged$diff,merged$acc)
##
## Pearson's product-moment correlation
##
## data: merged$diff and merged$acc
## t = 1.4268, df = 24, p-value = 0.1665
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.1208036 0.6018004
## sample estimates:
## cor
## 0.2796334
##################################################
#Repeat analysis with accuracy in boundary stimuli
##################################################
cat_av_b<-read.csv("cat_av_acc_b.csv", header=T)
merged_b <- merge(cat_av_b, sim_av, by="sbj")
cor.test(merged_b$diff,merged_b$acc)
##
## Pearson's product-moment correlation
##
## data: merged_b$diff and merged_b$acc
## t = 0.28086, df = 24, p-value = 0.7812
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.3376011 0.4349446
## sample estimates:
## cor
## 0.05723551
##########################################################
# By pair type. Within-Category, Relevant Dimension Change
##########################################################
#subset, condition=="within_rel"
dat<- aggregate(z_resp~sbj+ session + size1+hue1+size2+hue2+ group, data=d[d$condition=="within_rel",], FUN=mean)
wide_dat <- reshape(dat, timevar = "session", idvar = c("sbj", "size1","hue1", "size2", "hue2", "group"), direction = "wide")
# calculate diff between Post and Pre for each pair
wide_dat$diff <- wide_dat$z_resp.Post - wide_dat$z_resp.Pre
#average across pairs
sim_av<-aggregate(diff~sbj+group, data=wide_dat, FUN=mean)
#merge data
merged <- merge(cat_av, sim_av, by="sbj")
#correlation test
cor.test(merged$diff,merged$acc)
##
## Pearson's product-moment correlation
##
## data: merged$diff and merged$acc
## t = 1.9275, df = 24, p-value = 0.06584
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.02473188 0.65989328
## sample estimates:
## cor
## 0.3661275
# p = .066
##################################################
#Repeat analysis with accuracy in boundary stimuli
##################################################
cat_av_b<-read.csv("cat_av_acc_b.csv", header=T)
merged_b <- merge(cat_av_b, sim_av, by="sbj")
cor.test(merged_b$diff,merged_b$acc)
##
## Pearson's product-moment correlation
##
## data: merged_b$diff and merged_b$acc
## t = 1.2281, df = 24, p-value = 0.2313
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.1591774 0.5762425
## sample estimates:
## cor
## 0.2431677
############################################################
# By pair type. Within-Category, Irrelevant Dimension Change
############################################################
#subset, condition=="within_irrel"
dat<- aggregate(z_resp~sbj+ session + size1+hue1+size2+hue2+ group, data=d[d$condition=="within_irrel",], FUN=mean)
wide_dat <- reshape(dat, timevar = "session", idvar = c("sbj", "size1","hue1", "size2", "hue2", "group"), direction = "wide")
# calculate diff between Post and Pre for each pair
wide_dat$diff <- wide_dat$z_resp.Post - wide_dat$z_resp.Pre
#average across pairs
sim_av<-aggregate(diff~sbj+group, data=wide_dat, FUN=mean)
#merge data
merged <- merge(cat_av, sim_av, by="sbj")
#correlation test
cor.test(merged$diff,merged$acc)
##
## Pearson's product-moment correlation
##
## data: merged$diff and merged$acc
## t = -0.15342, df = 24, p-value = 0.8793
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.4136387 0.3604199
## sample estimates:
## cor
## -0.03130213
##################################################
#Repeat analysis with accuracy in boundary stimuli
##################################################
cat_av_b<-read.csv("cat_av_acc_b.csv", header=T)
merged_b <- merge(cat_av_b, sim_av, by="sbj")
cor.test(merged_b$diff,merged_b$acc)
##
## Pearson's product-moment correlation
##
## data: merged_b$diff and merged_b$acc
## t = -0.8802, df = 24, p-value = 0.3875
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.5280213 0.2259942
## sample estimates:
## cor
## -0.1768381
Session Information
sessionInfo()
## R version 4.5.1 (2025-06-13 ucrt)
## Platform: x86_64-w64-mingw32/x64
## Running under: Windows 11 x64 (build 26200)
##
## Matrix products: default
## LAPACK version 3.12.1
##
## locale:
## [1] LC_COLLATE=English_United States.utf8
## [2] LC_CTYPE=English_United States.utf8
## [3] LC_MONETARY=English_United States.utf8
## [4] LC_NUMERIC=C
## [5] LC_TIME=English_United States.utf8
##
## time zone: Europe/Athens
## tzcode source: internal
##
## attached base packages:
## [1] parallel stats graphics grDevices utils datasets methods
## [8] base
##
## other attached packages:
## [1] gplots_3.2.0 sciplot_1.2-0 lmerTest_3.1-3 emmeans_2.0.0 afex_1.5-0
## [6] blme_1.0-6 lme4_1.1-37 Matrix_1.7-3 car_3.1-3 carData_3.0-5
## [11] ez_4.4-0 dplyr_1.1.4
##
## loaded via a namespace (and not attached):
## [1] tidyselect_1.2.1 farver_2.1.2 S7_0.2.0
## [4] bitops_1.0-9 fastmap_1.2.0 TH.data_1.1-5
## [7] digest_0.6.37 estimability_1.5.1 lifecycle_1.0.4
## [10] survival_3.8-3 magrittr_2.0.4 compiler_4.5.1
## [13] rlang_1.1.6 sass_0.4.10 tools_4.5.1
## [16] yaml_2.3.10 knitr_1.50 plyr_1.8.9
## [19] RColorBrewer_1.1-3 multcomp_1.4-29 abind_1.4-8
## [22] KernSmooth_2.23-26 withr_3.0.2 purrr_1.1.0
## [25] numDeriv_2016.8-1.1 grid_4.5.1 caTools_1.18.3
## [28] xtable_1.8-4 ggplot2_4.0.0 scales_1.4.0
## [31] gtools_3.9.5 MASS_7.3-65 cli_3.6.5
## [34] mvtnorm_1.3-3 rmarkdown_2.30 reformulas_0.4.1
## [37] generics_0.1.4 rstudioapi_0.17.1 reshape2_1.4.4
## [40] minqa_1.2.8 cachem_1.1.0 stringr_1.5.2
## [43] splines_4.5.1 vctrs_0.6.5 boot_1.3-31
## [46] sandwich_3.1-1 jsonlite_2.0.0 pbkrtest_0.5.5
## [49] Formula_1.2-5 jquerylib_0.1.4 tidyr_1.3.1
## [52] glue_1.8.0 nloptr_2.2.1 codetools_0.2-20
## [55] stringi_1.8.7 gtable_0.3.6 tibble_3.3.0
## [58] pillar_1.11.1 htmltools_0.5.8.1 R6_2.6.1
## [61] Rdpack_2.6.4 evaluate_1.0.5 lattice_0.22-7
## [64] rbibutils_2.3 backports_1.5.0 broom_1.0.10
## [67] bslib_0.9.0 Rcpp_1.1.0 nlme_3.1-168
## [70] mgcv_1.9-3 xfun_0.53 zoo_1.8-14
## [73] pkgconfig_2.0.3