Exam 3: Where’s Waldo

The following data set includes an experiment with 120 participants, each of whom completed 45 repeated measurements of a visual search task across a number of conditions. In this task, they were shown a target, and then asked to find the target in a complex screen. Sometimes the target would be in that screen, and other times it would not (the ‘type’ column). They would first determine if the target was there or not, and then click on the target. There were a number of different targets (probeFile) and different screens (baseimage). Also we could measure the overall size of the target (size) in pixels, and centrality, in pixels from the center of the screen. We measured overall time to find the target (responsetime), and whether their response was correct (corr) for each trial. For each of the factors, recode with polynomial or helmert contrasts so that we can examine interactions.

##recode the factors that look like numbers
data <- read.csv("training-pooled.csv")
head(data)
data$subnum <- as.factor(data$subnum)
data$probeCond <- as.factor(data$probeCond)
data$probe <- as.factor(data$probe)

1 Identifying outliers and influential points and transforming dependent measure

To begin, we want to use response time as the dependent measure. However, we suspect that it will be non-normal, and might have some highly influential outilers. Examine the raw data and choose an appropriate transformation that will make it as normally-distributed as possible. Then, examine whether there are any highly-influential points that require removal. Discuss your rationale for removing any points, and demonstrate (using histograms, q-qplots, or violin plots) that the transformed data are reasonable, both overall and for each baseimage condition.

#install.packages("nortest")


defaultW <- getOption("warn") 

options(warn = -1)

library(rcompanion)
library(car)
library(nortest)
library(MASS)

#Let's first check if we have some NA values in our data

sum(is.na(data)) #we don't!
## [1] 0
#View(data)

#Checking normality for the variable "responsetime"

##visual checking
qqPlot(data$responsetime, main='Evidence of non-normally distributed data',ylab = 'sample quantiles') #normality assumption violated

## [1] 1171 4951
##more formal way
ad.test(data$responsetime) #very significant evidence that data does not follow normality.
## 
##  Anderson-Darling normality test
## 
## data:  data$responsetime
## A = 648.47, p-value < 2.2e-16
#Looking for the best transform to perform on this variable ---> Boxcox way

#bc<-boxcox(z~x,plotit=T)
rt = boxcox(data$responsetime ~ 1)

Cox = data.frame(rt$x, rt$y)            # Create a data frame with the results
Cox2 = Cox[with(Cox, order(-Cox$rt.y)),] # Order the new data frame by decreasing y
Cox2[1,]                                  # Display the lambda with the greatest value
lambda = Cox2[1, "rt.x"]
rt_transformed = (data$responsetime ^ lambda - 1)/lambda

#Utilizing Quantiles to remove outliers 1.5 times out of the 25% and 75% of the data

Q <- quantile(rt_transformed, probs=c(.25, .75), na.rm = FALSE)

iqr <- IQR(rt_transformed)
up <-  Q[2]+1.5*iqr # Upper Range  
low<- Q[1]-1.5*iqr # Lower Range

data$rt_transformed = rt_transformed
data2 = data[data$rt_transformed > (Q[1] - 1.5*iqr) & data$rt_transformed < (Q[2]+1.5*iqr),]


#Looking for another transform --> Tukey's Ladder of Powers that can only be tested on a smaller sample size (max 5000 points)
 
try <- transformTukey(data$responsetime[1:5000],plotit=F)  #suggested lambda = 0.05 --> if lambda > 0 --> TRANS = variable ^ lamda
## 
##     lambda      W Shapiro.p.value
## 403   0.05 0.9273       5.433e-44
## 
## if (lambda >  0){TRANS = x ^ lambda} 
## if (lambda == 0){TRANS = log(x)} 
## if (lambda <  0){TRANS = -1 * x ^ lambda}
diff_transform = data$responsetime ^ 0.05 #maybe taking the first 5000 points isn't the most robust criteria but I tried to randomize which points to include and the result was quite similar so I sticked with the easiest solution

#Similar procedure

Q2 <- quantile(diff_transform, probs=c(.25, .75), na.rm = FALSE)
iqr2 <- IQR(diff_transform)
up2 <-  Q2[2]+1.5*iqr2 # Upper Range  
low2<- Q2[1]-1.5*iqr2 # Lower Range


#length of variables using the 2 procedures


length(data2$rt_transformed) #this includes more points --> 5400 - 5131 = 269 points removed
## [1] 5131
length(diff_transform[diff_transform > low2 & diff_transform < up2])# 5400 - 5104 = 296 points removed
## [1] 5104
ad.test(data2$rt_transformed) 
## 
##  Anderson-Darling normality test
## 
## data:  data2$rt_transformed
## A = 31.184, p-value < 2.2e-16
ad.test(diff_transform[diff_transform > low2 & diff_transform < up2])
## 
##  Anderson-Darling normality test
## 
## data:  diff_transform[diff_transform > low2 & diff_transform < up2]
## A = 35.874, p-value < 2.2e-16
#same p values rejecting hypothesis normality distribution but the first transform includes more points and has a slightly lower A value so we will stick with it

options(warn = defaultW)
defaultW <- getOption("warn") 
options(warn = -1)

library(vioplot)

#visualizing transformed data

v1 = data2$rt_transformed[data2$baseImage == 'i264']
v2 = data2$rt_transformed[data2$baseImage == 'i294']
v3 = data2$rt_transformed[data2$baseImage == 'i9']

par(mfrow = c(1,2))
vioplot (data2$rt_transformed, v1, v2, v3, col=c('purple', 'gold', 'gold', 'gold'), names = c('All conditions', 'i264', 'i294', 'i9'), ylim =c (5.4,8))
title ( main =" Distributions of transformed rt variable in different conditions", ylab =" (rt ^ (-0.02) - 1)/(-0.02) ",xlab ="baseImage")

vioplot(data$responsetime, col=c('lightgreen'), names = c('Raw data'), ylim =c (0,26000))
title ( main =" Distribution of raw rt data", ylab ="rt")

#although the data does not formally follow a normal distribution the following vioplots show how the response time variable distribution improved and looks not more normally distributed than before generally and follow all 3 baseImage conditions.

options(warn = defaultW)

2. ANOVA for base image condition

For this question, pretend we did not have repeated measures (ignore the subnum variable), and build an ANOVA modeling using your transformed RT as a dependent measure. Consider only the correct responses, so filter out any whose corr values is 0. Include baseimage, type, and a baseImage x type interaction, and size and centrality as numerical covariates. Use a type-II ANOVA, and determine which variables are significant. For each one, discuss in words what that means. Also conduct a type-III ANOVA, and discuss whether either the main effect of type or baseimage changed, or the type x baseimage interaction. For the Type-II model, compute post-hoc tests for baseImage and the baseimage by type interaction to determine which levels differed from eachother.
Report the post-hoc tests, and the overall effect sizes, and discuss both the relative importance of these predictors, and the overall proportion of variance being predicted by this model. Is it good or bad?

defaultW <- getOption("warn") 
options(warn = -1)

library('car')
library(sjstats)

data3 = data2[data2$corr==1,] #filter for corr = 1

#checking whether both covariances matter

summary(lm(rt_transformed ~ Centrality,data=data3)) #this matters (p<2e-16)
## 
## Call:
## lm(formula = rt_transformed ~ Centrality, data = data3)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.16243 -0.29243 -0.04911  0.24161  1.28480 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept) 6.509e+00  1.662e-02  391.68   <2e-16 ***
## Centrality  6.817e-04  5.935e-05   11.49   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.4149 on 4627 degrees of freedom
## Multiple R-squared:  0.02772,    Adjusted R-squared:  0.02751 
## F-statistic: 131.9 on 1 and 4627 DF,  p-value: < 2.2e-16
summary(lm(rt_transformed ~ size,data=data3)) #this also matters (p<2e-16)
## 
## Call:
## lm(formula = rt_transformed ~ size, data = data3)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.17997 -0.29470 -0.04663  0.24609  1.32722 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept) 6.530e+00  1.715e-02  380.85   <2e-16 ***
## size        9.566e-04  9.792e-05    9.77   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.4165 on 4627 degrees of freedom
## Multiple R-squared:  0.02021,    Adjusted R-squared:   0.02 
## F-statistic: 95.45 on 1 and 4627 DF,  p-value: < 2.2e-16
#Testing ANOVA type II

ts = aov(rt_transformed~baseImage+type+baseImage:type+size+Centrality,data=data3)

typeii = Anova(ts) 
typeii
# Only the effects of our covariate centrality and the interaction between baseImage x type are significant.

#Changing contrasts to make the factor variables orthogonal for the Anova type III

data3$type = factor(data3$type)
data3$baseImage = factor(data3$baseImage)

contrasts(data3$type) = contr.poly (2)
contrasts(data3$baseImage) = contr.poly (3)

#Testing ANOVA type III

typeiii = Anova(aov(rt_transformed~baseImage+type+baseImage:type+size+Centrality,data=data3),type = 'III') #Here the result is the same, so even when we accounted for the main effects with respect to the interaction they are still not significant

typeiii
#Post-hoc of type II Anova
contrasts(data3$type) = contr.treatment(2) # go back to treatment contrasts for easier interpretation
contrasts(data3$baseImage) = contr.treatment(3)

TukeyHSD(ts ,which='baseImage')#it is worth mentioning that this approach is for type I so the results could be different. In fact, when doing an Anova type I the variable baseImage is significant (sequential sum of squares) while when we run an Anova type II it is not significant (no differences) because it takes into account the other main effects 
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = rt_transformed ~ baseImage + type + baseImage:type + size + Centrality, data = data3)
## 
## $baseImage
##                  diff         lwr         upr    p adj
## i294-i264  0.06781594  0.03247162  0.10316027 2.09e-05
## i9-i264   -0.08742835 -0.12171866 -0.05313805 0.00e+00
## i9-i294   -0.15524429 -0.19046357 -0.12002502 0.00e+00
interaction = TukeyHSD(ts,which='baseImage:type')

significant_int = interaction$`baseImage:type`[,4][interaction$`baseImage:type`[,4]<0.05]
significant_int
##     i9:ABSENT-i264:ABSENT  i294:PRESENT-i264:ABSENT    i9:PRESENT-i264:ABSENT 
##              2.623766e-02              2.543295e-03              4.132882e-08 
##     i9:ABSENT-i294:ABSENT    i9:PRESENT-i294:ABSENT    i294:PRESENT-i9:ABSENT 
##              1.703856e-06              4.044535e-08              4.138418e-08 
##      i9:PRESENT-i9:ABSENT i294:PRESENT-i264:PRESENT   i9:PRESENT-i264:PRESENT 
##              3.001957e-03              7.821767e-04              5.374890e-07 
##   i9:PRESENT-i294:PRESENT 
##              4.044535e-08
#for the variable baseImage significant differences can be found between all three levels of it (p<0.05). For the interation baseImage x type this table shows which interactions are significantly different from one another. In order to interpret these differences let's take for example i294:PRESENT - I9:ABSENT --> diff = 0.07882129 which means that the interaction when the target in the i294 screen is present the reaction time is longer by (0.07882129 * (-0.02020202)-1)/(-0.02020202) = 49.57882 ms(is it ms?) than when the target in the i9 screen is absent.

effectsize::cohens_f(typeii) 
#Multiple Cohen’s F effect size values for the type II Anova tell us that 2%, 3%, 3%, 9%, 5% (sums up to 22%) of the total variation can be explained by these variables in the model --> variables being baseImage, type, size, Centrality, baseImage:type respectively. 

effectsize::cohens_f(ts)
#It's also very interesting to report the Multiple Cohen’s F effect size values for the type I anova which tell us that 15%, 3%, 4%, 9%, 5% of the total variation can be explained by these variables in the model --> variables being baseImage, type, size, Centrality, baseImage:type respectively. Even though this model seems to predict more variation (36%) (baseImage 15%!), it is less conservative and is not taking in consideration the other main effect sizes, thus erroneous.

#Taking only the type II model effect size I would say that this model is not that great but it also can depend on the context.

options(warn = defaultW)

3. Repeated Measures

Now, incorporate subject number as a randomized factor, specifying error strata with Error(subnum/(baseImage*type)). Report the appropriate F test for the main effects of baseimage and type and their interaction (pulling them from the proper error strata of the output). Compute, report, and discuss the effect sizes eta^2 for the two main effects and the interaction.

defaultW <- getOption("warn") 
options(warn = -1)

model = aov(rt_transformed ~  baseImage * type + Error(subnum/(baseImage*type)),data=data3)
summary(model)
## 
## Error: subnum
##                 Df Sum Sq Mean Sq F value Pr(>F)
## baseImage        2   3.48  1.7414   1.321  0.271
## type             1   0.56  0.5566   0.422  0.517
## baseImage:type   2   0.27  0.1326   0.101  0.904
## Residuals      114 150.25  1.3180               
## 
## Error: subnum:baseImage
##                 Df Sum Sq Mean Sq F value Pr(>F)    
## baseImage        2  17.69   8.845  51.553 <2e-16 ***
## type             1   0.18   0.182   1.058  0.305    
## baseImage:type   2   0.31   0.156   0.907  0.405    
## Residuals      235  40.32   0.172                   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Error: subnum:type
##                 Df Sum Sq Mean Sq F value Pr(>F)  
## type             1  0.565  0.5648   3.125 0.0797 .
## baseImage:type   2  0.477  0.2384   1.319 0.2713  
## Residuals      116 20.964  0.1807                 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Error: subnum:baseImage:type
##                 Df Sum Sq Mean Sq F value   Pr(>F)    
## baseImage:type   2  2.612  1.3059   10.35 4.93e-05 ***
## Residuals      235 29.657  0.1262                     
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Error: Within
##             Df Sum Sq Mean Sq F value Pr(>F)
## Residuals 3913  551.8   0.141
# We can now generalize the effect of baseImage across subjects --> F(2,235) = 51.553, p < 2e-16
# the effect of type across subjects --> F(1,116) = 3.125, p = 0.0797 --> not significant
# the effect of their interaction across subjects --> F(2,235) =  10.35,p = 4.93e-05

library(sjstats)
eta_sq(model)
# The effect size for the screen variable (baseImage) --> n^2 = 0.022
# The effect size for the variable when the target appears or not (type) --> n^2 = 0.001
# The effect size for their interaction --> n^2 = 0.003
#These are all very small effect size.

options(warn = defaultW)

4. EzANOVA

Use an ezANOVA to build the same model, excluding the size and centrality variables if the model won’t incorporate continuous predictors. Examine the sphericity test. If it is significant, report the adjusted F tests values using the GG or HF correction, for baseimage and type and baseimage:type; otherwise report the uncorrected numbers. The ezANOVA does not like unbalanced designs, so use the original data (with no rows removed). You will need to specify both baseImage and type as within variables, which you can do with the argument within=.(baseImage,type) inside the ezANOVA function. Note: be sure to print() the ezANOVA results if you want them to show up in your .Rmd output.

defaultW <- getOption("warn") 
options(warn = -1)

library(ez)

data$type = factor(data$type)
data$baseImage = factor(data$baseImage) #probably not needed but no futile errors in ezAnova

m1 = ezANOVA(data=data, dv = rt_transformed, wid = subnum, within = .(baseImage,type), type = 3, detailed = TRUE)
m1$ANOVA
print(m1)
## $ANOVA
##           Effect DFn DFd          SSn       SSd            F             p
## 1    (Intercept)   1 119 3.257650e+04 32.375973 1.197370e+05 1.507451e-180
## 2      baseImage   2 238 4.038319e+00  9.492719 5.062406e+01  4.798244e-19
## 3           type   1 119 1.170593e+00  6.392995 2.178956e+01  8.048638e-06
## 4 baseImage:type   2 238 1.157172e-01  8.423448 1.634763e+00  1.971804e-01
##   p<.05         ges
## 1     * 0.998262960
## 2     * 0.066503444
## 3     * 0.020232965
## 4       0.002037244
## 
## $`Mauchly's Test for Sphericity`
##           Effect         W          p p<.05
## 2      baseImage 0.9459549 0.03770125     *
## 4 baseImage:type 0.9948165 0.73592714      
## 
## $`Sphericity Corrections`
##           Effect       GGe        p[GG] p[GG]<.05       HFe        p[HF]
## 2      baseImage 0.9487260 3.370402e-18         * 0.9636607 1.910081e-18
## 4 baseImage:type 0.9948432 1.973282e-01           1.0117158 1.971804e-01
##   p[HF]<.05
## 2         *
## 4
#Mauchly's Test of Sphericity indicated that the assumption of sphericity had been violated for the baseImage variable but not for the baseImage x type interaction --> w = 0.994, p = 0.735. Furthermore it can be seen that the "type" of screen used (baseImage) remains statistically different with respect to response time, even after the Greenhouse-Geisser and Huynh-Feldt sphericity corrections (p[GG] = 3.370402e-18 and p[HF] = 1.910081e-18).

#I wasn't sure to which data you were referring because data2 has less rows but it's not like data3 (where we only analyze points where corr =1). This is why I am also reporting results for data2 which comprehends outliers removal.

m2 = ezANOVA(data=data2, dv = rt_transformed, wid = subnum, within = .(baseImage,type), type = 3, detailed = TRUE)
m2$ANOVA
print(m2)
## $ANOVA
##           Effect DFn DFd          SSn       SSd            F             p
## 1    (Intercept)   1 119 3.242637e+04 24.866171 1.551802e+05 3.047131e-187
## 2      baseImage   2 238 3.474094e+00  6.128100 6.746253e+01  6.161562e-24
## 3           type   1 119 5.100101e-01  3.363767 1.804263e+01  4.310660e-05
## 4 baseImage:type   2 238 3.012239e-01  4.405376 8.136795e+00  3.817760e-04
##   p<.05         ges
## 1     * 0.998805999
## 2     * 0.082251386
## 3     * 0.012986138
## 4     * 0.007710909
## 
## $`Mauchly's Test for Sphericity`
##           Effect         W         p p<.05
## 2      baseImage 0.9926617 0.6475526      
## 4 baseImage:type 0.9872320 0.4685252      
## 
## $`Sphericity Corrections`
##           Effect       GGe        p[GG] p[GG]<.05      HFe        p[HF]
## 2      baseImage 0.9927152 8.810263e-24         * 1.009497 6.161562e-24
## 4 baseImage:type 0.9873929 4.073871e-04         * 1.003947 3.817760e-04
##   p[HF]<.05
## 2         *
## 4         *
#for data 2 the interaction between baseImage x type is significant p = 3.817760e-04 and Mauchly's Test of Sphericity indicated that the assumption of sphericity had not been violated, thus there is no need to report correction terms.

options(warn = defaultW)

5. (Extra Credit) Mixed-effects model

Use lme4 (lmer) or nlme (lme) to build the same model as a mixed effects model, treating subject as a randomized factor. Again, report the significance of the baseImage and type and their interaction. Create a second model without the interaction term, and compare the two models with an anova(). Calculate post-hoc pairwise contrasts or Tukey tests using the multcomp library. Discuss what you have found.

#creating mixed effects model

defaultW <- getOption("warn") 
options(warn = -1)

library(lme4)
library(multcomp)
library(lsmeans)
library(effects)

lme = lmer(data=data2, rt_transformed ~ baseImage * type + (1 | subnum))
lme2 = lmer(data=data2, rt_transformed ~ baseImage + type + (1 | subnum))

anova(lme,lme2)
# Adding the interaction term did lead to a significantly improved fit over the model without interaction (p = 0.0008522).

interaction_plot <- allEffects(lme)
plot(interaction_plot)

#post-hoc tests for best model

baseImage_posthoc = summary(glht(lme,mcp(baseImage = c("i9 - i294 = 0", 
                                                       "i9 - i264= 0",
                                                       "i294 - i264 = 0")),test = adjusted("holm")))
type_posthoc = summary(glht (lme, mcp(type = "Tukey"))) #no need to adjust for 1 pairing only
interaction_posthoc = lsmeans(lme,
pairwise ~ baseImage:type,
adjust = "holm",pbkrtest.limit = 5131)

baseImage_posthoc;type_posthoc;interaction_posthoc
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: User-defined Contrasts
## 
## 
## Fit: lmer(formula = rt_transformed ~ baseImage * type + (1 | subnum), 
##     data = data2)
## 
## Linear Hypotheses:
##                  Estimate Std. Error z value Pr(>|z|)    
## i9 - i294 == 0   -0.11832    0.01727  -6.852  < 0.001 ***
## i9 - i264 == 0   -0.06132    0.01721  -3.563  0.00102 ** 
## i294 - i264 == 0  0.05700    0.01736   3.283  0.00296 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- single-step method)
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: lmer(formula = rt_transformed ~ baseImage * type + (1 | subnum), 
##     data = data2)
## 
## Linear Hypotheses:
##                       Estimate Std. Error z value Pr(>|z|)  
## PRESENT - ABSENT == 0 -0.03960    0.01913   -2.07   0.0385 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- single-step method)
## $lsmeans
##  baseImage type    lsmean     SE  df lower.CL upper.CL
##  i264      ABSENT    6.74 0.0202 237     6.70     6.78
##  i294      ABSENT    6.80 0.0203 240     6.76     6.84
##  i9        ABSENT    6.68 0.0202 234     6.64     6.72
##  i264      PRESENT   6.70 0.0218 319     6.66     6.74
##  i294      PRESENT   6.78 0.0219 324     6.74     6.83
##  i9        PRESENT   6.57 0.0218 316     6.52     6.61
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## 
## $contrasts
##  contrast                    estimate     SE   df t.ratio p.value
##  i264 ABSENT - i294 ABSENT    -0.0570 0.0174 5008 -3.283  0.0052 
##  i264 ABSENT - i9 ABSENT       0.0613 0.0172 5007  3.563  0.0022 
##  i264 ABSENT - i264 PRESENT    0.0396 0.0191 5007  2.070  0.1155 
##  i264 ABSENT - i294 PRESENT   -0.0459 0.0192 5007 -2.387  0.0681 
##  i264 ABSENT - i9 PRESENT      0.1711 0.0191 5007  8.970  <.0001 
##  i294 ABSENT - i9 ABSENT       0.1183 0.0173 5008  6.852  <.0001 
##  i294 ABSENT - i264 PRESENT    0.0966 0.0192 5008  5.036  <.0001 
##  i294 ABSENT - i294 PRESENT    0.0111 0.0193 5008  0.574  0.5659 
##  i294 ABSENT - i9 PRESENT      0.2281 0.0191 5007 11.925  <.0001 
##  i9 ABSENT - i264 PRESENT     -0.0217 0.0190 5007 -1.141  0.5081 
##  i9 ABSENT - i294 PRESENT     -0.1072 0.0192 5007 -5.599  <.0001 
##  i9 ABSENT - i9 PRESENT        0.1098 0.0190 5007  5.780  <.0001 
##  i264 PRESENT - i294 PRESENT  -0.0855 0.0209 5007 -4.092  0.0003 
##  i264 PRESENT - i9 PRESENT     0.1315 0.0207 5007  6.338  <.0001 
##  i294 PRESENT - i9 PRESENT     0.2170 0.0208 5007 10.411  <.0001 
## 
## Degrees-of-freedom method: kenward-roger 
## P value adjustment: holm method for 15 tests
#the results show that the screen-type significantly differ between all screens with respect to reaction time (p<0.05). Similarly, the condition of existence or not of the target does significantly differ as well (p = 0.0385).

interaction_posthoc = as.data.frame(interaction_posthoc$contrasts)
sig = interaction_posthoc[interaction_posthoc$p.value < 0.05,]
sig
#For the interactions this table shows all significant interactions. To interpret this table let's take for example this pair --> i264 PRESENT - i9 PRESENT. Here, when the screen type 1264 shows target people tend to be slower in reacting to that target than when a target is shown in the i9 screen by 0.13147753 --> which needs to be transformed back to --> (0.13147753 * (-0.02020202)-1)/(-0.02020202) --> 49.63148 (ms ?).

options(warn = defaultW)