Load Packages and Helper Functions

rm(list=ls())
library(lattice)
suppressPackageStartupMessages(library(lme4))
library(knitr)
library(ggplot2)

Load Helper Functions

SummarySE

This function can be found on the website “Cookbook for R”.

http://www.cookbook-r.com/Manipulating_data/Summarizing_data/

It summarizes data, giving count, mean, standard deviation, standard error of the mean, and confidence interval (default 95%).

  • data: a data frame
  • measurevar: the name of a column that contains the variable to be summarized
  • groupvars: a vector containing names of columns that contain grouping variables
  • na.rm: a boolean that indicates whether to ignore NA’s
  • conf.interval: the percent range of the confidence interval (default is 95%)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
                      conf.interval=.95, .drop=TRUE) {
    require(plyr)

    # New version of length which can handle NA's: if na.rm==T, don't count them
    length2 <- function (x, na.rm=FALSE) {
        if (na.rm) sum(!is.na(x))
        else       length(x)
    }

    # This does the summary. For each group's data frame, return a vector with
    # N, mean, and sd
    datac <- ddply(data, groupvars, .drop=.drop,
      .fun = function(xx, col) {
        c(N    = length2(xx[[col]], na.rm=na.rm),
          mean = mean   (xx[[col]], na.rm=na.rm),
          sd   = sd     (xx[[col]], na.rm=na.rm)
        )
      },
      measurevar
    )

    # Rename the "mean" column    
    datac <- rename(datac, c("mean" = measurevar))

    datac$se <- datac$sd / sqrt(datac$N)  # Calculate standard error of the mean

    # Confidence interval multiplier for standard error
    # Calculate t-statistic for confidence interval: 
    # e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
    ciMult <- qt(conf.interval/2 + .5, datac$N-1)
    datac$ci <- datac$se * ciMult

    return(datac)
}

SummarySEwithin

This function can be found on the website “Cookbook for R”.

http://www.cookbook-r.com/Graphs/Plotting_means_and_error_bars_(ggplot2)/#Helper functions

From that website:

Summarizes data, handling within-subjects variables by removing inter-subject variability. It will still work if there are no within-S variables. Gives count, un-normed mean, normed mean (with same between-group mean), standard deviation, standard error of the mean, and confidence interval. If there are within-subject variables, calculate adjusted values using method from Morey (2008).

  • data: a data frame
  • measurevar: the name of a column that contains the variable to be summarized
  • betweenvars: a vector containing names of columns that are between-subjects variables
  • withinvars: a vector containing names of columns that are within-subjects variables
  • idvar: the name of a column that identifies each subject (or matched subjects)
  • na.rm: a boolean that indicates whether to ignore NA’s
  • conf.interval: the percent range of the confidence interval (default is 95%)
summarySEwithin <- function(data=NULL, measurevar, betweenvars=NULL, withinvars=NULL,
                            idvar=NULL, na.rm=FALSE, conf.interval=.95, .drop=TRUE) {

  # Ensure that the betweenvars and withinvars are factors
  factorvars <- vapply(data[, c(betweenvars, withinvars), drop=FALSE],
    FUN=is.factor, FUN.VALUE=logical(1))

  if (!all(factorvars)) {
    nonfactorvars <- names(factorvars)[!factorvars]
    message("Automatically converting the following non-factors to factors: ",
            paste(nonfactorvars, collapse = ", "))
    data[nonfactorvars] <- lapply(data[nonfactorvars], factor)
  }

  # Get the means from the un-normed data
  datac <- summarySE(data, measurevar, groupvars=c(betweenvars, withinvars),
                     na.rm=na.rm, conf.interval=conf.interval, .drop=.drop)

  # Drop all the unused columns (these will be calculated with normed data)
  datac$sd <- NULL
  datac$se <- NULL
  datac$ci <- NULL

  # Norm each subject's data
  ndata <- normDataWithin(data, idvar, measurevar, betweenvars, na.rm, .drop=.drop)

  # This is the name of the new column
  measurevar_n <- paste(measurevar, "_norm", sep="")

  # Collapse the normed data - now we can treat between and within vars the same
  ndatac <- summarySE(ndata, measurevar_n, groupvars=c(betweenvars, withinvars),
                      na.rm=na.rm, conf.interval=conf.interval, .drop=.drop)

  # Apply correction from Morey (2008) to the standard error and confidence interval
  #  Get the product of the number of conditions of within-S variables
  nWithinGroups    <- prod(vapply(ndatac[,withinvars, drop=FALSE], FUN=nlevels,
                           FUN.VALUE=numeric(1)))
  correctionFactor <- sqrt( nWithinGroups / (nWithinGroups-1) )

  # Apply the correction factor
  ndatac$sd <- ndatac$sd * correctionFactor
  ndatac$se <- ndatac$se * correctionFactor
  ndatac$ci <- ndatac$ci * correctionFactor

  # Combine the un-normed means with the normed results
  merge(datac, ndatac)
}

normDataWithin

This function is used by the SummarySEWithin fucntion above. It can be found on the website “Cookbook for R”.

http://www.cookbook-r.com/Graphs/Plotting_means_and_error_bars_(ggplot2)/#Helper functions

From that website: Norms the data within specified groups in a data frame; it normalizes each subject (identified by idvar) so that they have the same mean, within each group specified by betweenvars.

  • data: a data frame
  • idvar: the name of a column that identifies each subject (or matched subjects)
  • measurevar: the name of a column that contains the variable to be summarized
  • betweenvars: a vector containing names of columns that are between-subjects variables
  • na.rm: a boolean that indicates whether to ignore NA’s
normDataWithin <- function(data=NULL, idvar, measurevar, betweenvars=NULL,
                           na.rm=FALSE, .drop=TRUE) {
    #library(plyr)

    # Measure var on left, idvar + between vars on right of formula.
    data.subjMean <- ddply(data, c(idvar, betweenvars), .drop=.drop,
     .fun = function(xx, col, na.rm) {
        c(subjMean = mean(xx[,col], na.rm=na.rm))
      },
      measurevar,
      na.rm
    )

    # Put the subject means with original data
    data <- merge(data, data.subjMean)

    # Get the normalized data in a new column
    measureNormedVar <- paste(measurevar, "_norm", sep="")
    data[,measureNormedVar] <- data[,measurevar] - data[,"subjMean"] +
                               mean(data[,measurevar], na.rm=na.rm)

    # Remove this subject mean column
    data$subjMean <- NULL

    return(data)
}

myCenter

This function ouputs the centered values of an variable, which can be a numeric variable, a factor, or a data frame. It was taken from Florian Jaegers blog

https://hlplab.wordpress.com/2009/04/27/centering-several-variables/.

From his blog:

  • If the input is a numeric variable, the output is the centered variable
  • If the input is a factor, the output is a numeric variable with centered factor level values. That is, the factor’s levels are converted into numerical values in their inherent order (if not specified otherwise, R defaults to alphanumerical order). More specifically, this centers any binary factor so that the value below 0 will be the 1st level of the original factor, and the value above 0 will be the 2nd level.
  • If the input is a data frame or matrix, the output is a new matrix of the same dimension and with the centered values and column names that correspond to the colnames() of the input preceded by “c” (e.g. “Variable1” will be “cVariable1”).
myCenter= function(x) {
  if (is.numeric(x)) { return(x - mean(x, na.rm=T)) }
    if (is.factor(x)) {
        x= as.numeric(x)
        return(x - mean(x, na.rm=T))
    }
    if (is.data.frame(x) || is.matrix(x)) {
        m= matrix(nrow=nrow(x), ncol=ncol(x))
        colnames(m)= paste("c", colnames(x), sep="")
    
        for (i in 1:ncol(x)) {
        
            m[,i]= myCenter(x[,i])
        }
        return(as.data.frame(m))
    }
}

lizCenter

This function provides a wrapper around myCenter allowing you to center a specific list of variables from a dataframe.

  • x: data frame
  • listfname: a list of the variables to be centered (e.g. list(variable1,variable2))

The output is a copy of the data frame with a column (always a numeric variable) added for each of the centered variables. These columns are labelled with the each column’s previous name, but with “.ct” appended (e.g., “variable1” will become “variable1.ct”).

lizCenter= function(x, listfname) 
{
    for (i in 1:length(listfname)) 
    {
        fname = as.character(listfname[i])
        x[paste(fname,".ct", sep="")] = myCenter(x[fname])
    }
        
    return(x)
}

lizContrasts

This function can be used used to create two centered dummy variables which stand in place of a three way factor. This allows us to inspect each contrast separately, as well as their interactions with other factors. Other fixed effects in the model can be evaluated as the average effects across all levels of the factor.

  • d: dataframe
  • condition: three-way factor
  • baselevel: name of the level of the factor that is to be used as the baseline for contrasts

The function takes a dataframe (d), a factor from that database (condition), which must have three levels, and the name of the level of the factor which is to be used as the baseline for the contrasts (baselevel).

lizContrasts= function(d, condition, baselevel) 
{
    condition = factor(condition)
  condition = relevel(condition, baselevel)

    a= (contrasts(condition)-apply(contrasts(condition),2,mean))
    d$dummy1[condition== rownames(a)[1]] <- a[1] 
    d$dummy1[condition== rownames(a)[2]] <- a[2] 
    d$dummy1[condition== rownames(a)[3]] <- a[3] 
    d$dummy2[condition== rownames(a)[1]] <- a[4] 
    d$dummy2[condition== rownames(a)[2]] <- a[5] 
    d$dummy2[condition== rownames(a)[3]] <- a[6] 

    name1 = paste(baselevel, rownames(a)[2],sep="_VS_")
    name2 = paste(baselevel, rownames(a)[3],sep="_VS_")

    d[name1] = d$dummy1 
    d[name2] = d$dummy2 

    d$dummy1 <-NULL 
    d$dummy2 <-NULL 
    
    return(d)
}

get_coeffs

This function allows us to inspect particular coefficients from the output of an lme model by putting them in a table.

  • x: the output returned when running lmer or glmer (i.e. an objet of type lmerMod or glmerMod)
  • list: a list of names of the coefficients to be extracted (e.g. c(“variable1”,“variable1:variable2”))
get_coeffs <- function(x,list){(kable(as.data.frame(summary(x)$coefficients)[list,],digits=3))}

Bf

This function is equivalent to the Dienes (2008) calculator which can be found here: http://www.lifesci.sussex.ac.uk/home/Zoltan_Dienes/inference/Bayes.htm.

The code was provided by Baguely and Kayne (2010) and can be found here: http://www.academia.edu/427288/Review_of_Understanding_psychology_as_a_science_An_introduction_to_scientific_and_statistical_inference

Bf<-function(sd, obtained, uniform, lower=0, upper=1, meanoftheory=0,sdtheory=1, tail=2){
 area <- 0
 if(identical(uniform, 1)){
  theta <- lower
  range <- upper - lower
  incr <- range / 2000
  for (A in -1000:1000){
     theta <- theta + incr
     dist_theta <- 1 / range
     height <- dist_theta * dnorm(obtained, theta, sd)
     area <- area + height * incr
  }
 }else
   {theta <- meanoftheory - 5 * sdtheory
    incr <- sdtheory / 200
    for (A in -1000:1000){
      theta <- theta + incr
      dist_theta <- dnorm(theta, meanoftheory, sdtheory)
      if(identical(tail, 1)){
        if (theta <= 0){
          dist_theta <- 0
        } else {
          dist_theta <- dist_theta * 2
        }
      }
      height <- dist_theta * dnorm(obtained, theta, sd)
      area <- area + height * incr
    }
 }
 LikelihoodTheory <- area
 Likelihoodnull <- dnorm(obtained, 0, sd)
 BayesFactor <- LikelihoodTheory / Likelihoodnull
 ret <- list("LikelihoodTheory" = LikelihoodTheory,"Likelihoodnull" = Likelihoodnull, "BayesFactor" = BayesFactor)
 ret
} 

Bf_powercalc

This works with the Bf funciton above. It requires the same values as that function (i.e. the obtained mean and SE for the current sample, a value for the predicted mean, which is set to be sdtheory (with meanoftheory = 0), and the current number of participants N). However rather than return a BF for the current sample, it works out what the BF would be for a range of different subject numbers (assuming that the SE scales with sqrt(N)).

Bf_powercalc<-function(sd, obtained, uniform, lower=0, upper=1, meanoftheory=0, sdtheory=1, tail=2, N, min, max)
{
 
 x = c(0)
 y = c(0)
   # note: working out what the difference between N and df is (for the contrast between two groups, this is 2; for constraints where there is 4 groups this will be 3, etc.)  

 for(newN in min : max)
 {
  B = as.numeric(Bf(sd = sd*sqrt(N/newN), obtained, uniform, lower, upper, meanoftheory, sdtheory, tail)[3])
  x= append(x,newN)  
  y= append(y,B)
  output = cbind(x,y)
  
 } 
 output = output[-1,] 
 return(output) 
}

Bf_range

This works with the Bf function above. It requires some of the same values as that function (i.e. the obtained mean and SE for the current sample, a value for the predicted mean which is set to be sdtheory (with meanoftheory=0)). However rather than setting sdtheory to a single value, it is set to a vector of values (sdrange) covering the desired range to test over.

Bf_range<-function(sd, obtained, meanoftheory=0, sdtheoryrange, tail=2)
{
 
 x = c(0)
 y = c(0)

 for(sdi in sdtheoryrange)
 {
 B = as.numeric(Bf(sd, obtained, meanoftheory=0, uniform = 0, sdtheory=sdi, tail=tail)[3])
 
 x= append(x,sdi) 
 y= append(y,B)
 output = cbind(x,y)
 
 } 
 output = output[-1,] 
 colnames(output) = c("sdtheory", "BF")
 return(output) 
}

filter2

This is a function which filters a column of data, removing values which are some number of standard deviations above/below the mean for that participant, possibly in some condition/subcondition.

  • im: the input matrix (a data frame)
  • svn: a list of the names of factors to be group by (subject name + one or more conditions)
  • fn: the name of the column containing the data to be filtered
  • lim: how many standard deviations above/below the mean to filter

The function returns an input matrix identical to the input matrix but with additional columns giving the group means and the “filtered” data

filter2 = function(im, svn, fn, lim)
{
  ## work out means lisfor each subject for each word

x = list()
y = ""

for(n in svn) x=append(im[n],x)
for(n in svn) y=paste(y,n,sep="_")

means = aggregate(im[fn], by = x, mean, na.rm=T)
head(means)
nocols = dim(means)[2]
colnames(means)[nocols] = "means"

sds = aggregate(im[fn], by = x, sd, na.rm=T)
head(sds)
nocols = dim(sds)[2]
colnames(sds)[nocols] = "sds"

gs = merge(means,sds)

## because if there is just one value it doesn't have a stand deviation and don't want to just disregard all of these
gs$sds[is.na(gs$sds)] = 0 

gs$max = gs$means + lim*gs$sds
gs$min = gs$means- lim*gs$sds

im2 = merge(im,gs, sort=F)


im2[paste(fn,"filt",sep="")] = im2[fn]
cn= dim(im2)[2] ## get colnumber (last one added)

im2[,cn][im2[,fn]> im2$max] = ""

im2[,cn][im2[,fn]< im2$min] = ""

im2[,cn]= as.numeric(im2[,cn])

names(im2)[names(im2)=="means"] = paste("mean", y, sep="_") 
names(im2)[names(im2)=="sds"] = paste("sd", y, sep="_") 
names(im2)[names(im2)=="max"] = paste("max", y, sep="_") 
names(im2)[names(im2)=="min"] = paste("min", y, sep="_") 

return(im2)
}

Training data

Load Data and Set Up Data Frame

Note that “adult” year3" and “yea6 are used as names for the age groups referred to as adult, 7 year olds and 10 year olds.

train = read.csv("trainingdata.csv")
train$agegroup <- factor(train$agegroup , levels = c("year3", "year6", "a"))
train$var <- factor(train$var, levels = c("lv", "hv"))

Check for data loss - we expect 96 trials * 96 particiapnts = 9216

dim(train)
## [1] 9216   15

No data loss.

Figure 2

library(ggplot2)
aggregated.train = aggregate(correct ~ participant + var + agegroup , train, FUN = mean)
trainm<- summarySEwithin(aggregated.train, measurevar = "correct", betweenvars = c("agegroup"), withinvars = c("var"), idvar = "participant",  na.rm = FALSE, conf.interval = .95)
## Loading required package: plyr
agegroup_names = c('year3' = "7-8 Year Olds", 'year6' ="10-11 Year Olds", 'a' = "Adults")
agegroup_labeller <- function(variable,value){
  return(agegroup_names[value])
}


train_violin = ggplot(aggregated.train, aes(x=agegroup, y=correct,fill=var)) 
train_violin= train_violin  + geom_violin(colour="black", scale="count") 
train_violin = train_violin + scale_fill_manual(name = "Variability Condition", values = c("lv" = "grey", "hv" = "white"), labels = c("Low Variability", "High Variability"))
train_violin = train_violin + scale_x_discrete(labels = c('7-8 Year Olds','10-11 Year Olds','Adults'))
train_violin = train_violin + geom_errorbar(aes(ymin = correct-ci, ymax = correct+ci), width = .4, position = position_dodge(.9), data = trainm)

train_violin= train_violin + stat_summary(fun.y=mean, geom="point", shape=23, size=2,position = position_dodge(.9))
train_violin = train_violin + theme_bw()

train_violin

## Get Means

aggregated.train.pt.age = aggregate(correct ~ participant + agegroup, train, FUN = mean)
aggregate(correct ~ agegroup, aggregated.train.pt.age, FUN = mean)
##   agegroup   correct
## 1    year3 0.8385417
## 2    year6 0.8857422
## 3        a 0.9352214
aggregate(correct ~ agegroup, aggregated.train.pt.age, FUN = sd)
##   agegroup    correct
## 1    year3 0.07672924
## 2    year6 0.05527736
## 3        a 0.04231141
aggregated.train.pt.var = aggregate(correct ~ participant + var, train, FUN = mean)
aggregate(correct ~ var, aggregated.train.pt.var, FUN = mean)
##   var   correct
## 1  lv 0.8971354
## 2  hv 0.8758681
aggregate(correct ~ var, aggregated.train.pt.var, FUN = sd)
##   var    correct
## 1  lv 0.06467788
## 2  hv 0.09252571
aggregate(correct ~ var + agegroup, aggregated.train, FUN = sd)
##   var agegroup    correct
## 1  lv    year3 0.06407281
## 2  hv    year3 0.10637569
## 3  lv    year6 0.06210322
## 4  hv    year6 0.07079775
## 5  lv        a 0.04130810
## 6  hv        a 0.05428399

Statistical Analyses

Find slope structure for lme model

# first set up data frame with centered factors and contrasts
levels(train$talker)
## [1] "F1" "F2" "F3" "F4" "M1" "M2" "M3" "M4"
train <- lizCenter(train, list("var", "trialno"))
train = lizContrasts(train, train$agegroup, "year6")

train$trialno.ct = scale(train$trialno.ct) ## note: factor scaled due to warning  

#mod.t.full= glmer(correct ~
 #                  + (trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) 
  #                  + (var.ct|participant)
   #                 + (var.ct * (year6_VS_a + year6_VS_year3)|word)
    #                + (var.ct * (year6_VS_a + year6_VS_year3)|talker),
     #               data = train, family = binomial, control = glmerControl(optimizer = "bobyqa"))



# doesn't converge

# mod.t.a = glmer(correct ~
 #                   + (trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) 
  #                  + (var.ct|participant)
   #                + (var.ct *  (year6_VS_a + year6_VS_year3)|word)
    #                 + (var.ct * (year6_VS_a + year6_VS_year3)||talker),
     #                data = train, family=binomial, control=glmerControl(optimizer = "bobyqa"))

#mod.t.b = glmer(correct ~
#                    + (trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) 
#                 + (var.ct|participant)
#                     + (var.ct * (year6_VS_a + year6_VS_year3)||word)
#                     + (var.ct * (year6_VS_a + year6_VS_year3)| talker),
#                     data = train, family = binomial, control = glmerControl(optimizer = "bobyqa"))

# doesn't converge

mod.t.c = glmer(correct ~
                    + (trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) 
                     + (var.ct|participant)
                     + (var.ct * (year6_VS_a + year6_VS_year3)||word)
                     + (var.ct * (year6_VS_a + year6_VS_year3)||talker),
                     data = train, family = binomial, control = glmerControl(optimizer = "bobyqa"))
 
 mod.t.d = glmer(correct ~
                     + (trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) 
                     + (var.ct|participant)
                     + (var.ct + (year6_VS_a + year6_VS_year3)||word)
                     + (var.ct * (year6_VS_a + year6_VS_year3)||talker),  
                     data = train, family = binomial, control = glmerControl(optimizer = "bobyqa"))
 
anova(mod.t.c,mod.t.d)
## Data: train
## Models:
## mod.t.d: correct ~ +(trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) + 
## mod.t.d:     (var.ct | participant) + (var.ct + (year6_VS_a + year6_VS_year3) || 
## mod.t.d:     word) + (var.ct * (year6_VS_a + year6_VS_year3) || talker)
## mod.t.c: correct ~ +(trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) + 
## mod.t.c:     (var.ct | participant) + (var.ct * (year6_VS_a + year6_VS_year3) || 
## mod.t.c:     word) + (var.ct * (year6_VS_a + year6_VS_year3) || talker)
##         Df    AIC    BIC logLik deviance  Chisq Chi Df Pr(>Chisq)
## mod.t.d 25 5636.0 5814.2  -2793   5586.0                         
## mod.t.c 27 5639.9 5832.4  -2793   5585.9 0.0731      2     0.9641
## p >.2 so move to simple model

mod.t.e = glmer(correct ~
                     + (trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) 
                     + (var.ct|participant)
                     + (var.ct + (year6_VS_a + year6_VS_year3)||word)
                     + (var.ct + (year6_VS_a + year6_VS_year3)||talker),  
                     data = train, family = binomial, control = glmerControl(optimizer = "bobyqa"))


anova(mod.t.d, mod.t.e)
## Data: train
## Models:
## mod.t.e: correct ~ +(trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) + 
## mod.t.e:     (var.ct | participant) + (var.ct + (year6_VS_a + year6_VS_year3) || 
## mod.t.e:     word) + (var.ct + (year6_VS_a + year6_VS_year3) || talker)
## mod.t.d: correct ~ +(trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) + 
## mod.t.d:     (var.ct | participant) + (var.ct + (year6_VS_a + year6_VS_year3) || 
## mod.t.d:     word) + (var.ct * (year6_VS_a + year6_VS_year3) || talker)
##         Df    AIC    BIC  logLik deviance  Chisq Chi Df Pr(>Chisq)
## mod.t.e 23 5635.4 5799.4 -2794.7   5589.4                         
## mod.t.d 25 5636.0 5814.2 -2793.0   5586.0 3.3941      2     0.1832
# p < .2, can't remove slope for interaction var by talker 

mod.t.f= glmer(correct ~
                    + (trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) 
                    + (var.ct|participant)
                    + (var.ct ||word)
                     + (var.ct * (year6_VS_a + year6_VS_year3)||talker), 
                    data = train, family = binomial, control = glmerControl(optimizer = "bobyqa"))

anova(mod.t.d, mod.t.f)
## Data: train
## Models:
## mod.t.f: correct ~ +(trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) + 
## mod.t.f:     (var.ct | participant) + (var.ct || word) + (var.ct * (year6_VS_a + 
## mod.t.f:     year6_VS_year3) || talker)
## mod.t.d: correct ~ +(trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) + 
## mod.t.d:     (var.ct | participant) + (var.ct + (year6_VS_a + year6_VS_year3) || 
## mod.t.d:     word) + (var.ct * (year6_VS_a + year6_VS_year3) || talker)
##         Df    AIC    BIC  logLik deviance  Chisq Chi Df Pr(>Chisq)   
## mod.t.f 23 5643.3 5807.3 -2798.7   5597.3                            
## mod.t.d 25 5636.0 5814.2 -2793.0   5586.0 11.326      2   0.003472 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# p < .2, can't remove by word slope for variability

mod.t.g = glmer(correct ~
                    + (trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) 
                    + (var.ct|participant)
                    + (agegroup||word)
                     + (var.ct + (year6_VS_a + year6_VS_year3)||talker),   
                    data = train, family = binomial, control = glmerControl(optimizer = "bobyqa"))

# doesn't converge is removed - so retain slope
# doesn't converge if slope is removed - so retain slope

final.train.mod = mod.t.e

Display coefficients

round(summary(final.train.mod)$coefficients,3)
##                                  Estimate Std. Error z value Pr(>|z|)
## (Intercept)                         2.702      0.224  12.058    0.000
## trialno.ct                          0.859      0.045  19.005    0.000
## var.ct                             -0.103      0.144  -0.719    0.472
## year6_VS_a                          1.140      0.210   5.422    0.000
## year6_VS_year3                     -0.481      0.201  -2.389    0.017
## trialno.ct:var.ct                   0.022      0.090   0.246    0.806
## trialno.ct:year6_VS_a               0.603      0.123   4.895    0.000
## trialno.ct:year6_VS_year3          -0.045      0.085  -0.530    0.596
## var.ct:year6_VS_a                   0.251      0.274   0.917    0.359
## var.ct:year6_VS_year3              -0.284      0.179  -1.591    0.112
## trialno.ct:var.ct:year6_VS_a        0.158      0.246   0.641    0.522
## trialno.ct:var.ct:year6_VS_year3   -0.183      0.171  -1.073    0.283
## turn the model round to get the mising contrast; 
train = lizContrasts(train, train$agegroup, "a")

final.train.mod.v2 = glmer(correct ~
                     + (trialno.ct * var.ct * (a_VS_year3  + a_VS_year6 )) 
                     + (var.ct|participant)
                     + (var.ct + (year6_VS_a + year6_VS_year3)||word)
                     + (var.ct + (year6_VS_a + year6_VS_year3)||talker),  
                     data = train, family = binomial, control = glmerControl(optimizer = "bobyqa"))

anova(final.train.mod, final.train.mod.v2) # check its the same model
## Data: train
## Models:
## final.train.mod: correct ~ +(trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) + 
## final.train.mod:     (var.ct | participant) + (var.ct + (year6_VS_a + year6_VS_year3) || 
## final.train.mod:     word) + (var.ct + (year6_VS_a + year6_VS_year3) || talker)
## final.train.mod.v2: correct ~ +(trialno.ct * var.ct * (a_VS_year3 + a_VS_year6)) + 
## final.train.mod.v2:     (var.ct | participant) + (var.ct + (year6_VS_a + year6_VS_year3) || 
## final.train.mod.v2:     word) + (var.ct + (year6_VS_a + year6_VS_year3) || talker)
##                    Df    AIC    BIC  logLik deviance Chisq Chi Df
## final.train.mod    23 5635.4 5799.4 -2794.7   5589.4             
## final.train.mod.v2 23 5635.4 5799.4 -2794.7   5589.4     0      0
##                    Pr(>Chisq)
## final.train.mod              
## final.train.mod.v2          1
round(summary(final.train.mod.v2)$coefficients,3)
##                              Estimate Std. Error z value Pr(>|z|)
## (Intercept)                     2.702      0.224  12.058    0.000
## trialno.ct                      0.859      0.045  19.005    0.000
## var.ct                         -0.103      0.144  -0.719    0.472
## a_VS_year3                     -1.621      0.239  -6.771    0.000
## a_VS_year6                     -1.140      0.210  -5.421    0.000
## trialno.ct:var.ct               0.022      0.090   0.246    0.806
## trialno.ct:a_VS_year3          -0.648      0.119  -5.449    0.000
## trialno.ct:a_VS_year6          -0.603      0.123  -4.895    0.000
## var.ct:a_VS_year3              -0.535      0.265  -2.023    0.043
## var.ct:a_VS_year6              -0.251      0.274  -0.917    0.359
## trialno.ct:var.ct:a_VS_year3   -0.341      0.238  -1.433    0.152
## trialno.ct:var.ct:a_VS_year6   -0.158      0.246  -0.641    0.522

Note: where the contrast is coded with the older group first, in the paper these are reported with beta *-1 in the paper (so that consistently we look at contrasts from younger –> older)

Fit separate slopes for different age groups and display coefficients

final.train.mod.sep_age = glmer(correct ~
                    + agegroup : var.ct 
                    + (trialno.ct * var.ct * (a_VS_year6 + a_VS_year3)) 
                    - var.ct:(a_VS_year6 + a_VS_year3)
                    - var.ct
                    + (var.ct|participant)
                     + (var.ct + (year6_VS_a + year6_VS_year3)||word)
                     + (var.ct + (year6_VS_a + year6_VS_year3)||talker),  
                    data = train, family = binomial, control = glmerControl(optimizer = "bobyqa"))

anova(final.train.mod.sep_age, final.train.mod)
## Data: train
## Models:
## final.train.mod.sep_age: correct ~ +agegroup:var.ct + (trialno.ct * var.ct * (a_VS_year6 + 
## final.train.mod.sep_age:     a_VS_year3)) - var.ct:(a_VS_year6 + a_VS_year3) - var.ct + 
## final.train.mod.sep_age:     (var.ct | participant) + (var.ct + (year6_VS_a + year6_VS_year3) || 
## final.train.mod.sep_age:     word) + (var.ct + (year6_VS_a + year6_VS_year3) || talker)
## final.train.mod: correct ~ +(trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) + 
## final.train.mod:     (var.ct | participant) + (var.ct + (year6_VS_a + year6_VS_year3) || 
## final.train.mod:     word) + (var.ct + (year6_VS_a + year6_VS_year3) || talker)
##                         Df    AIC    BIC  logLik deviance Chisq Chi Df
## final.train.mod.sep_age 23 5635.4 5799.4 -2794.7   5589.4             
## final.train.mod         23 5635.4 5799.4 -2794.7   5589.4     0      0
##                         Pr(>Chisq)
## final.train.mod.sep_age           
## final.train.mod                  1
round(summary(final.train.mod.sep_age)$coefficients,3)
##                              Estimate Std. Error z value Pr(>|z|)
## (Intercept)                     2.702      0.224  12.058    0.000
## trialno.ct                      0.859      0.045  19.005    0.000
## a_VS_year6                     -1.140      0.210  -5.422    0.000
## a_VS_year3                     -1.621      0.239  -6.772    0.000
## agegroupyear3:var.ct           -0.376      0.155  -2.434    0.015
## agegroupyear6:var.ct           -0.092      0.171  -0.540    0.589
## agegroupa:var.ct                0.159      0.260   0.611    0.542
## var.ct:trialno.ct               0.022      0.090   0.246    0.806
## trialno.ct:a_VS_year6          -0.603      0.123  -4.896    0.000
## trialno.ct:a_VS_year3          -0.648      0.119  -5.450    0.000
## var.ct:trialno.ct:a_VS_year6   -0.158      0.246  -0.641    0.522
## var.ct:trialno.ct:a_VS_year3   -0.341      0.238  -1.433    0.152

Bayes Factor Analsysis

# note: the h1mean must be postivie, so *-1 for both h1mean and meanBF
meanBF = summary(final.train.mod.sep_age)$coefficients["agegroupyear6:var.ct", "Estimate"]*-1
seBF = summary(final.train.mod.sep_age)$coefficients["agegroupyear6:var.ct", "Std. Error"]
h1mean =  summary(final.train.mod.sep_age)$coefficients["agegroupyear3:var.ct", "Estimate"]*-1
Bf(seBF, meanBF, uniform = 0, meanoftheory = 0, sdtheory = h1mean, tail = 1)
## $LikelihoodTheory
## [1] 1.300764
## 
## $Likelihoodnull
## [1] 2.022643
## 
## $BayesFactor
## [1] 0.643101
meanBF = summary(final.train.mod.sep_age)$coefficients["agegroupa:var.ct", "Estimate"]*-1
seBF = summary(final.train.mod.sep_age)$coefficients["agegroupa:var.ct", "Std. Error"]
h1mean =  summary(final.train.mod.sep_age)$coefficients["agegroupyear3:var.ct", "Estimate"]*-1
Bf(seBF, meanBF, uniform = 0, meanoftheory = 0, sdtheory = h1mean, tail = 1)
## $LikelihoodTheory
## [1] 0.5076577
## 
## $Likelihoodnull
## [1] 1.271465
## 
## $BayesFactor
## [1] 0.39927

Production Test

Load Data and Set Up Data Frame

Note that “adult” year3" and “yea6 are used as names for the age groups referred to as adult, 7 year olds and 10 year olds.

prod = read.csv("prod.csv")
prod$agegroup <- factor(prod$agegroup, levels = c("year3", "year6", "a"))
prod$var <- factor(prod$var, levels = c("lv", "hv"))

Check for data loss - we expect 96 particiapnts * 24 trials = 2305

dim(prod)
## [1] 2304   12

No data loss.

Figure 3

aggregated.prod = aggregate(correct ~ participant + var + agegroup , data=prod, FUN = mean)


prodm<- summarySEwithin(aggregated.prod, measurevar = "correct", betweenvars = c("agegroup"), withinvars = c("var"), idvar = "participant",  na.rm = FALSE, conf.interval = .95)



prod_violin = ggplot(aggregated.prod, aes(x=agegroup, y=correct,fill=var)) 
prod_violin= prod_violin  + geom_violin(colour="black", scale="count") 
prod_violin = prod_violin + scale_fill_manual(name = "Variability Condition", values = c("lv" = "grey", "hv" = "white"), labels = c("Low Variability", "High Variability"))
prod_violin = prod_violin + scale_x_discrete(labels = c('7-8 Year Olds','10-11 Year Olds','Adults'))
prod_violin = prod_violin + geom_errorbar(aes(ymin = correct-ci, ymax = correct+ci), width = .4, position = position_dodge(.9),data= prodm)
prod_violin= prod_violin + stat_summary(fun.y=mean, geom="point", shape=23, size=2,position = position_dodge(.9))

prod_violin = prod_violin + theme_bw()

prod_violin

Get Means

aggregated.prod.pt.age = aggregate(correct ~ participant+ agegroup , prod, FUN = mean)
aggregated.prod.pt.var = aggregate(correct ~ participant+ var , prod, FUN = mean)

aggregate(correct ~ agegroup, aggregated.prod.pt.age, FUN = mean)
##   agegroup   correct
## 1    year3 0.1041667
## 2    year6 0.2070312
## 3        a 0.3828125
aggregate(correct ~ agegroup, aggregated.prod.pt.age, FUN = sd)
##   agegroup    correct
## 1    year3 0.09165445
## 2    year6 0.17373595
## 3        a 0.23076483
aggregate(correct ~ var, aggregated.prod.pt.var, FUN = mean)
##   var   correct
## 1  lv 0.2170139
## 2  hv 0.2456597
aggregate(correct ~ var, aggregated.prod.pt.var, FUN = sd)
##   var   correct
## 1  lv 0.2276498
## 2  hv 0.2443424
aggregate(correct ~ var + agegroup, aggregated.prod, FUN = mean)
##   var agegroup    correct
## 1  lv    year3 0.11979167
## 2  hv    year3 0.08854167
## 3  lv    year6 0.21614583
## 4  hv    year6 0.19791667
## 5  lv        a 0.31510417
## 6  hv        a 0.45052083
aggregate(correct ~ var + agegroup, aggregated.prod, FUN = sd)
##   var agegroup   correct
## 1  lv    year3 0.1320800
## 2  hv    year3 0.1098580
## 3  lv    year6 0.1926743
## 4  hv    year6 0.1866393
## 5  lv        a 0.2907893
## 6  hv        a 0.2547570

Statistical analysis

Find slope structure for lme model

prod <- lizCenter(prod, list("var"))
prod = lizContrasts(prod, prod$agegroup, "a")

mod.p.full = glmer(correct ~
              + (var.ct * (a_VS_year3 + a_VS_year6)) 
              + (var.ct|participant)
              + ((var.ct * (a_VS_year3 + a_VS_year6))|item),
              data = prod, family = binomial, control = glmerControl(optimizer = "bobyqa"))

# Converges

mod.p.b = glmer(correct ~
              + (var.ct * (a_VS_year3 + a_VS_year6)) 
              + (var.ct|participant)
              + ((var.ct * (a_VS_year3 + a_VS_year6))||item),
              data = prod, family=binomial, control=glmerControl(optimizer = "bobyqa"))

anova(mod.p.full, mod.p.b)
## Data: prod
## Models:
## mod.p.b: correct ~ +(var.ct * (a_VS_year3 + a_VS_year6)) + (var.ct | participant) + 
## mod.p.b:     ((var.ct * (a_VS_year3 + a_VS_year6)) || item)
## mod.p.full: correct ~ +(var.ct * (a_VS_year3 + a_VS_year6)) + (var.ct | participant) + 
## mod.p.full:     ((var.ct * (a_VS_year3 + a_VS_year6)) | item)
##            Df    AIC    BIC  logLik deviance  Chisq Chi Df Pr(>Chisq)
## mod.p.b    15 1866.6 1952.7 -918.30   1836.6                         
## mod.p.full 30 1880.9 2053.1 -910.43   1820.9 15.729     15     0.4003
#p > .2, so can remove correlations between slopes for items

mod.p.c = glmer(correct ~
              + (var.ct * (a_VS_year3 + a_VS_year6)) 
              + (var.ct|participant)
              + ((var.ct + (a_VS_year3 + a_VS_year6))||item),
              data = prod, family = binomial, control = glmerControl(optimizer = "bobyqa"))

anova(mod.p.b, mod.p.c)
## Data: prod
## Models:
## mod.p.c: correct ~ +(var.ct * (a_VS_year3 + a_VS_year6)) + (var.ct | participant) + 
## mod.p.c:     ((var.ct + (a_VS_year3 + a_VS_year6)) || item)
## mod.p.b: correct ~ +(var.ct * (a_VS_year3 + a_VS_year6)) + (var.ct | participant) + 
## mod.p.b:     ((var.ct * (a_VS_year3 + a_VS_year6)) || item)
##         Df    AIC    BIC  logLik deviance  Chisq Chi Df Pr(>Chisq)
## mod.p.c 13 1864.6 1939.2 -919.28   1838.6                         
## mod.p.b 15 1866.6 1952.7 -918.30   1836.6 1.9763      2     0.3723
# p > .2, so can remove slope for interaction

mod.p.d = glmer(correct ~
              + (var.ct * (a_VS_year3 + a_VS_year6)) 
              + (var.ct|participant)
              + (var.ct||item),
              data = prod, family = binomial, control = glmerControl(optimizer = "bobyqa"))

anova(mod.p.c, mod.p.d)
## Data: prod
## Models:
## mod.p.d: correct ~ +(var.ct * (a_VS_year3 + a_VS_year6)) + (var.ct | participant) + 
## mod.p.d:     (var.ct || item)
## mod.p.c: correct ~ +(var.ct * (a_VS_year3 + a_VS_year6)) + (var.ct | participant) + 
## mod.p.c:     ((var.ct + (a_VS_year3 + a_VS_year6)) || item)
##         Df    AIC    BIC  logLik deviance  Chisq Chi Df Pr(>Chisq)    
## mod.p.d 11 1875.6 1938.8 -926.80   1853.6                             
## mod.p.c 13 1864.6 1939.2 -919.28   1838.6 15.031      2  0.0005446 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# p < .2, so retain by-item slope for agegroup in the model

mod.p.e = glmer(correct ~
              + (var.ct * (a_VS_year3 + a_VS_year6)) 
              + (var.ct|participant)
              + ((a_VS_year3 + a_VS_year6)||item),
              data = prod, family = binomial, control = glmerControl(optimizer = "bobyqa"))

anova(mod.p.c, mod.p.e)
## Data: prod
## Models:
## mod.p.e: correct ~ +(var.ct * (a_VS_year3 + a_VS_year6)) + (var.ct | participant) + 
## mod.p.e:     ((a_VS_year3 + a_VS_year6) || item)
## mod.p.c: correct ~ +(var.ct * (a_VS_year3 + a_VS_year6)) + (var.ct | participant) + 
## mod.p.c:     ((var.ct + (a_VS_year3 + a_VS_year6)) || item)
##         Df    AIC    BIC  logLik deviance  Chisq Chi Df Pr(>Chisq)    
## mod.p.e 12 1873.5 1942.4 -924.76   1849.5                             
## mod.p.c 13 1864.6 1939.2 -919.28   1838.6 10.959      1  0.0009315 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# p < .2, so retain by item  slope for variablity-condition in the model

Display coefficients

final.mod.p = mod.p.c
round(summary(final.mod.p)$coefficients,3)
##                   Estimate Std. Error z value Pr(>|z|)
## (Intercept)         -2.089      0.373  -5.603    0.000
## var.ct               0.129      0.276   0.467    0.640
## a_VS_year3          -2.534      0.500  -5.070    0.000
## a_VS_year6          -1.397      0.425  -3.284    0.001
## var.ct:a_VS_year3   -1.533      0.467  -3.284    0.001
## var.ct:a_VS_year6   -1.082      0.418  -2.587    0.010
## turn the model round to get the mising contrast

prod = lizContrasts(prod, prod$agegroup, "year3")

final.mod.p.v2 = glmer(correct ~
              + (var.ct * (year3_VS_a + year3_VS_year6)) 
              + (var.ct|participant)
              + ((var.ct + (a_VS_year3 + a_VS_year6))||item),
              data = prod, family = binomial, control = glmerControl(optimizer = "bobyqa"))


anova(final.mod.p.v2, final.mod.p)
## Data: prod
## Models:
## final.mod.p.v2: correct ~ +(var.ct * (year3_VS_a + year3_VS_year6)) + (var.ct | 
## final.mod.p.v2:     participant) + ((var.ct + (a_VS_year3 + a_VS_year6)) || item)
## final.mod.p: correct ~ +(var.ct * (a_VS_year3 + a_VS_year6)) + (var.ct | participant) + 
## final.mod.p:     ((var.ct + (a_VS_year3 + a_VS_year6)) || item)
##                Df    AIC    BIC  logLik deviance Chisq Chi Df Pr(>Chisq)
## final.mod.p.v2 13 1864.6 1939.2 -919.28   1838.6                        
## final.mod.p    13 1864.6 1939.2 -919.28   1838.6     0      0  < 2.2e-16
##                   
## final.mod.p.v2    
## final.mod.p    ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
round(summary(final.mod.p.v2)$coefficients,3)
##                       Estimate Std. Error z value Pr(>|z|)
## (Intercept)             -2.089      0.373  -5.603    0.000
## var.ct                   0.129      0.276   0.467    0.640
## year3_VS_a               2.534      0.500   5.069    0.000
## year3_VS_year6           1.137      0.541   2.102    0.036
## var.ct:year3_VS_a        1.533      0.467   3.284    0.001
## var.ct:year3_VS_year6    0.451      0.464   0.971    0.331

Note: where the contrast is coded with the older group first, in the paper these are reported with beta *-1 in the paper (so that consistently we look at contrasts from younger –> older)

Fit separate slopes for different age groups and display coefficients

final.mod.p.sep_age = glmer(correct ~
              + agegroup : var.ct 
              + a_VS_year6 +  a_VS_year3
              + (var.ct|participant)
              + ((var.ct + (a_VS_year3 +  a_VS_year6))||item),
              data = prod, family = binomial, control = glmerControl(optimizer = "bobyqa"))

anova(final.mod.p.sep_age, final.mod.p)
## Data: prod
## Models:
## final.mod.p.sep_age: correct ~ +agegroup:var.ct + a_VS_year6 + a_VS_year3 + (var.ct | 
## final.mod.p.sep_age:     participant) + ((var.ct + (a_VS_year3 + a_VS_year6)) || item)
## final.mod.p: correct ~ +(var.ct * (a_VS_year3 + a_VS_year6)) + (var.ct | participant) + 
## final.mod.p:     ((var.ct + (a_VS_year3 + a_VS_year6)) || item)
##                     Df    AIC    BIC  logLik deviance Chisq Chi Df
## final.mod.p.sep_age 13 1864.6 1939.2 -919.28   1838.6             
## final.mod.p         13 1864.6 1939.2 -919.28   1838.6     0      0
##                     Pr(>Chisq)    
## final.mod.p.sep_age               
## final.mod.p          < 2.2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
round(summary(final.mod.p.sep_age)$coefficients,3)
##                      Estimate Std. Error z value Pr(>|z|)
## (Intercept)            -2.089      0.373  -5.603    0.000
## a_VS_year6             -1.397      0.425  -3.284    0.001
## a_VS_year3             -2.534      0.500  -5.069    0.000
## agegroupyear3:var.ct   -0.532      0.422  -1.262    0.207
## agegroupyear6:var.ct   -0.082      0.368  -0.222    0.825
## agegroupa:var.ct        1.001      0.344   2.911    0.004

Bayes Factor Analysis

7 year olds

adult_estimate = summary(final.mod.p.sep_age)$coefficients["agegroupa:var.ct", "Estimate"]

child7_estimate = summary(final.mod.p.sep_age)$coefficients["agegroupyear3:var.ct", "Estimate"]
child7_se = summary(final.mod.p.sep_age)$coefficients["agegroupyear3:var.ct", "Std. Error"]
Bf(child7_se, child7_estimate,  uniform = 0,meanoftheory = 0,sdtheory = adult_estimate, tail = 1)
## $LikelihoodTheory
## [1] 0.07886658
## 
## $Likelihoodnull
## [1] 0.4263713
## 
## $BayesFactor
## [1] 0.1849716

10 year olds

child10_estimate = summary(final.mod.p.sep_age)$coefficients["agegroupyear6:var.ct", "Estimate"]
child10_se = summary(final.mod.p.sep_age)$coefficients["agegroupyear6:var.ct", "Std. Error"]
Bf(child10_se, child10_estimate,  uniform = 0,meanoftheory = 0,sdtheory = adult_estimate, tail = 1)
## $LikelihoodTheory
## [1] 0.3094683
## 
## $Likelihoodnull
## [1] 1.057939
## 
## $BayesFactor
## [1] 0.2925198

Evidence that BOTH age groups have more support for the null than for the hypothesis that they have a difference like adults.

Find robustness regions (see footnote 4)

7 year olds

Bf_range(child7_se, child7_estimate,  meanoftheory = 0, sdtheoryrange = seq(0.5, 0.6, length.out = 10), tail = 1)
##        sdtheory        BF
##  [1,] 0.5000000 0.3416568
##  [2,] 0.5111111 0.3356604
##  [3,] 0.5222222 0.3298497
##  [4,] 0.5333333 0.3282067
##  [5,] 0.5444444 0.3227453
##  [6,] 0.5555556 0.3134585
##  [7,] 0.5666667 0.3083186
##  [8,] 0.5777778 0.3033299
##  [9,] 0.5888889 0.3024759
## [10,] 0.6000000 0.2977719
exp(0.52)
## [1] 1.682028
Bf_range(child10_se, child10_estimate,  meanoftheory = 0, sdtheoryrange = seq(0.8, 0.9, length.out = 10), tail = 1)
##        sdtheory        BF
##  [1,] 0.8000000 0.3603706
##  [2,] 0.8111111 0.3521906
##  [3,] 0.8222222 0.3480891
##  [4,] 0.8333333 0.3480635
##  [5,] 0.8444444 0.3441325
##  [6,] 0.8555556 0.3402832
##  [7,] 0.8666667 0.3325239
##  [8,] 0.8777778 0.3328205
##  [9,] 0.8888889 0.3252134
## [10,] 0.9000000 0.3256580
exp(0.88)
## [1] 2.4109

Comprehension Test

Load Data and Set up Dataframe

Note that “adult” year3" and “yea6 are used as names for the age groups referred to as adult, 7 year olds and 10 year olds.

comp = read.csv("comprgriddata.csv")
comp = droplevels(comp)
comp$agegroup <- factor(comp$agegroup, levels = c("year3", "year6", "a"))
comp$var <- factor(comp$var, levels = c("lv", "hv"))

One participant had extra data (due to having to restart computer). We remove trials where the participant is getting the same trial for the third or fourth time.

remove the extra trials and show that we have correct number of trials should be 24 trials * 96 participants = 2304 trials

comp = subset(comp, include == 1)
dim(comp)
## [1] 2304   11

Figure 4

aggregated.comp = aggregate(correct ~ participant + var + agegroup, comp, FUN = mean)
compm<- summarySEwithin(aggregated.comp, measurevar = "correct", betweenvars = c("agegroup"), withinvars = c("var"), idvar = "participant", na.rm = FALSE, conf.interval = .95)


comp_violin = ggplot(aggregated.comp, aes(x=agegroup, y=correct,fill=var)) 
comp_violin= comp_violin  + geom_violin(colour="black", scale="count") 
comp_violin = comp_violin + scale_fill_manual(name = "Variability Condition", values = c("lv" = "grey", "hv" = "white"), labels = c("Low Variability", "High Variability"))
comp_violin = comp_violin + scale_x_discrete(labels = c('7-8 Year Olds','10-11 Year Olds','Adults'))
comp_violin = comp_violin + geom_errorbar(aes(ymin = correct-ci, ymax = correct+ci), width = .4, position = position_dodge(.9), data = compm)
comp_violin= comp_violin + stat_summary(fun.y=mean, geom="point", shape=23, size=2,position = position_dodge(.9))
comp_violin = comp_violin + theme_bw()
comp_violin= comp_violin + geom_hline(yintercept = (1/12))

comp_violin

Get Means

aggregated.comp.pt.age = aggregate(correct ~ participant + agegroup, comp, FUN = mean)
aggregate(correct ~ agegroup, aggregated.comp.pt.age, FUN = mean)
##   agegroup   correct
## 1    year3 0.5208333
## 2    year6 0.6250000
## 3        a 0.8632812
aggregate(correct ~ agegroup, aggregated.comp.pt.age, FUN = sd)
##   agegroup   correct
## 1    year3 0.1676717
## 2    year6 0.2291972
## 3        a 0.1322852
aggregated.comp.pt.var = aggregate(correct ~ participant + var, comp, FUN = mean)
aggregate(correct ~ var, aggregated.comp.pt.var, FUN = mean)
##   var   correct
## 1  lv 0.6614583
## 2  hv 0.6779514
aggregate(correct ~ var, aggregated.comp.pt.var, FUN = sd)
##   var   correct
## 1  lv 0.2471512
## 2  hv 0.2600663
aggregate(correct ~ var + agegroup, aggregated.comp, FUN = mean)
##   var agegroup   correct
## 1  lv    year3 0.5364583
## 2  hv    year3 0.5052083
## 3  lv    year6 0.6015625
## 4  hv    year6 0.6484375
## 5  lv        a 0.8463542
## 6  hv        a 0.8802083
aggregate(correct ~ var + agegroup, aggregated.comp, FUN = sd)
##   var agegroup   correct
## 1  lv    year3 0.1795273
## 2  hv    year3 0.2318091
## 3  lv    year6 0.2564005
## 4  hv    year6 0.2429419
## 5  lv        a 0.1846596
## 6  hv        a 0.1434619

Statistical Analysis

Find Appropriate Slope Structure

comp <- lizCenter(comp, list("var"))
comp = lizContrasts(comp, comp$agegroup, "year3")

#mod.c.full = glmer(correct ~
 #             + (var.ct * (year3_VS_year6 + year3_VS_a)) 
  #            + (var.ct|participant)
   #           + ((var.ct * (year3_VS_year6 + year3_VS_a))|item),
    #          data = comp, family = binomial, control = glmerControl(optimizer = "bobyqa"))

# doesn't converge 

mod.c.b = glmer(correct ~
              + (var.ct * (year3_VS_year6 + year3_VS_a)) 
              + (var.ct|participant)
              + ((var.ct * (year3_VS_year6 + year3_VS_a))||item),
              data = comp, family = binomial, control = glmerControl(optimizer = "bobyqa"))

mod.c.c = glmer(correct ~
              + (var.ct * (year3_VS_year6 + year3_VS_a)) 
              + (var.ct|participant)
              + ((var.ct + (year3_VS_year6 + year3_VS_a))||item),
              data = comp, family = binomial, control = glmerControl(optimizer = "bobyqa"))

anova(mod.c.b, mod.c.c)
## Data: comp
## Models:
## mod.c.c: correct ~ +(var.ct * (year3_VS_year6 + year3_VS_a)) + (var.ct | 
## mod.c.c:     participant) + ((var.ct + (year3_VS_year6 + year3_VS_a)) || 
## mod.c.c:     item)
## mod.c.b: correct ~ +(var.ct * (year3_VS_year6 + year3_VS_a)) + (var.ct | 
## mod.c.b:     participant) + ((var.ct * (year3_VS_year6 + year3_VS_a)) || 
## mod.c.b:     item)
##         Df    AIC    BIC  logLik deviance  Chisq Chi Df Pr(>Chisq)   
## mod.c.c 13 2344.5 2419.2 -1159.3   2318.5                            
## mod.c.b 15 2337.1 2423.3 -1153.6   2307.1 11.397      2   0.003351 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# p < .2, so retain model with slope for interaction

See if there is a role for talker as a fixed effect in the data

comp <- lizCenter(comp, list("talker"))

mod.c.talker = glmer(correct ~
              + talker.ct
              + (var.ct * (year3_VS_year6 + year3_VS_a)) 
              + (var.ct|participant)
              + ((var.ct * (year3_VS_year6 + year3_VS_a))||item),
              data = comp, family = binomial, control = glmerControl(optimizer = "bobyqa"))

anova(mod.c.b, mod.c.talker)
## Data: comp
## Models:
## mod.c.b: correct ~ +(var.ct * (year3_VS_year6 + year3_VS_a)) + (var.ct | 
## mod.c.b:     participant) + ((var.ct * (year3_VS_year6 + year3_VS_a)) || 
## mod.c.b:     item)
## mod.c.talker: correct ~ +talker.ct + (var.ct * (year3_VS_year6 + year3_VS_a)) + 
## mod.c.talker:     (var.ct | participant) + ((var.ct * (year3_VS_year6 + year3_VS_a)) || 
## mod.c.talker:     item)
##              Df    AIC    BIC  logLik deviance  Chisq Chi Df Pr(>Chisq)
## mod.c.b      15 2337.1 2423.3 -1153.6   2307.1                         
## mod.c.talker 16 2338.9 2430.8 -1153.5   2306.9 0.1976      1     0.6567

p > .2; don’t include in model

Display coefficients for main model

final.mod.c = mod.c.b
comp = lizContrasts(comp, comp$agegroup, "year6")

final.mod.c.v2= glmer(correct ~
              + (var.ct * (year6_VS_a + year6_VS_year3)) 
              + (var.ct|participant)
              + ((var.ct * (year3_VS_year6 + year3_VS_a))||item),
              data = comp, family = binomial, control = glmerControl(optimizer = "bobyqa"))

anova(final.mod.c, final.mod.c.v2)
## Data: comp
## Models:
## final.mod.c: correct ~ +(var.ct * (year3_VS_year6 + year3_VS_a)) + (var.ct | 
## final.mod.c:     participant) + ((var.ct * (year3_VS_year6 + year3_VS_a)) || 
## final.mod.c:     item)
## final.mod.c.v2: correct ~ +(var.ct * (year6_VS_a + year6_VS_year3)) + (var.ct | 
## final.mod.c.v2:     participant) + ((var.ct * (year3_VS_year6 + year3_VS_a)) || 
## final.mod.c.v2:     item)
##                Df    AIC    BIC  logLik deviance Chisq Chi Df Pr(>Chisq)
## final.mod.c    15 2337.1 2423.3 -1153.6   2307.1                        
## final.mod.c.v2 15 2337.1 2423.3 -1153.6   2307.1     0      0  < 2.2e-16
##                   
## final.mod.c       
## final.mod.c.v2 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
round(summary(final.mod.c)$coefficients,3)
##                       Estimate Std. Error z value Pr(>|z|)
## (Intercept)              1.215      0.301   4.037    0.000
## var.ct                   0.179      0.182   0.983    0.326
## year3_VS_year6           0.673      0.331   2.030    0.042
## year3_VS_a               2.608      0.395   6.602    0.000
## var.ct:year3_VS_year6    0.466      0.434   1.074    0.283
## var.ct:year3_VS_a        0.552      0.484   1.139    0.254
round(summary(final.mod.c.v2)$coefficients,3)
##                       Estimate Std. Error z value Pr(>|z|)
## (Intercept)              1.215      0.301   4.037    0.000
## var.ct                   0.179      0.182   0.983    0.326
## year6_VS_a               1.936      0.410   4.718    0.000
## year6_VS_year3          -0.673      0.331  -2.030    0.042
## var.ct:year6_VS_a        0.086      0.565   0.153    0.879
## var.ct:year6_VS_year3   -0.466      0.434  -1.074    0.283

Fit separate slopes for different age groups and display coefficients

comp = lizContrasts(comp, comp$agegroup, "year6")

final.mod.c.sep_age = glmer(correct ~
               + agegroup : var.ct 
              +(year3_VS_year6 + year3_VS_a)
              + (var.ct|participant)
              + ((var.ct * (year3_VS_year6 + year3_VS_a))||item),
              data = comp, family = binomial, control = glmerControl(optimizer = "bobyqa"))



anova(final.mod.c, final.mod.c.sep_age)
## Data: comp
## Models:
## final.mod.c: correct ~ +(var.ct * (year3_VS_year6 + year3_VS_a)) + (var.ct | 
## final.mod.c:     participant) + ((var.ct * (year3_VS_year6 + year3_VS_a)) || 
## final.mod.c:     item)
## final.mod.c.sep_age: correct ~ +agegroup:var.ct + (year3_VS_year6 + year3_VS_a) + 
## final.mod.c.sep_age:     (var.ct | participant) + ((var.ct * (year3_VS_year6 + year3_VS_a)) || 
## final.mod.c.sep_age:     item)
##                     Df    AIC    BIC  logLik deviance Chisq Chi Df
## final.mod.c         15 2337.1 2423.3 -1153.6   2307.1             
## final.mod.c.sep_age 15 2337.1 2423.3 -1153.6   2307.1     0      0
##                     Pr(>Chisq)    
## final.mod.c                       
## final.mod.c.sep_age  < 2.2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
round(summary(final.mod.c.sep_age)$coefficients,3)
##                      Estimate Std. Error z value Pr(>|z|)
## (Intercept)             1.215      0.301   4.037    0.000
## year3_VS_year6          0.673      0.331   2.030    0.042
## year3_VS_a              2.608      0.395   6.602    0.000
## agegroupyear3:var.ct   -0.160      0.278  -0.575    0.565
## agegroupyear6:var.ct    0.305      0.335   0.913    0.361
## agegroupa:var.ct        0.392      0.397   0.988    0.323

Bayes Factor Analysis

7 year olds

adult_estimate_p = summary(final.mod.p.sep_age)$coefficients["agegroupa:var.ct", "Estimate"]

child7_estimate = summary(final.mod.c.sep_age)$coefficients["agegroupyear3:var.ct", "Estimate"]
child7_se = summary(final.mod.c.sep_age)$coefficients["agegroupyear3:var.ct", "Std. Error"]
Bf(child7_se, child7_estimate,  uniform = 0,meanoftheory = 0,sdtheory =(adult_estimate_p)    , tail = 1)
## $LikelihoodTheory
## [1] 0.2174108
## 
## $Likelihoodnull
## [1] 1.21405
## 
## $BayesFactor
## [1] 0.1790789

10 year olds

child10_estimate = summary(final.mod.c.sep_age)$coefficients["agegroupyear6:var.ct", "Estimate"]
child10_se = summary(final.mod.c.sep_age)$coefficients["agegroupyear6:var.ct", "Std. Error"]
Bf(child10_se, child10_estimate,  uniform = 0,meanoftheory = 0,sdtheory = adult_estimate_p, tail = 1)
## $LikelihoodTheory
## [1] 0.5832417
## 
## $Likelihoodnull
## [1] 0.7859103
## 
## $BayesFactor
## [1] 0.7421225

adults

adult_estimate = summary(final.mod.c.sep_age)$coefficients["agegroupa:var.ct", "Estimate"]
adult_se = summary(final.mod.c.sep_age)$coefficients["agegroupa:var.ct", "Std. Error"]

Bf(adult_se, adult_estimate,  uniform = 0,meanoftheory = 0,sdtheory = adult_estimate_p, tail = 1)
## $LikelihoodTheory
## [1] 0.5681368
## 
## $Likelihoodnull
## [1] 0.6175614
## 
## $BayesFactor
## [1] 0.919968

find robustness regionsfor 7 year olds (see footnote 6)

Bf_range(child7_se, child7_estimate,  meanoftheory = 0, sdtheoryrange = seq(0.5, 0.56, length.out = 10), tail = 1)
##        sdtheory        BF
##  [1,] 0.5000000 0.3376115
##  [2,] 0.5066667 0.3378381
##  [3,] 0.5133333 0.3301614
##  [4,] 0.5200000 0.3305370
##  [5,] 0.5266667 0.3230053
##  [6,] 0.5333333 0.3235220
##  [7,] 0.5400000 0.3161277
##  [8,] 0.5466667 0.3167781
##  [9,] 0.5533333 0.3095140
## [10,] 0.5600000 0.3102913
exp(0.51)
## [1] 1.665291

exploring participant variables

gender

training

train <- lizCenter(train, list("gender"))
mod.t.gender = glmer(correct ~
                     + (trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) 
                     + (var.ct|participant)
                     + gender.ct 
                     + (var.ct + (year6_VS_a + year6_VS_year3)||word)
                     + (var.ct + (year6_VS_a + year6_VS_year3)||talker),  
                     data = train, family = binomial, control = glmerControl(optimizer = "bobyqa"))
mod.t.gender.v2 = glmer(correct ~
                     + (trialno.ct * var.ct * (a_VS_year3 + a_VS_year6)) 
                     + (var.ct|participant)
                     + gender.ct 
                    + (var.ct + (year6_VS_a + year6_VS_year3)||word)
                     + (var.ct + (year6_VS_a + year6_VS_year3)||talker), 
                     data = train, family = binomial, control = glmerControl(optimizer = "bobyqa"))

anova(mod.t.gender.v2,mod.t.gender)
## Data: train
## Models:
## mod.t.gender.v2: correct ~ +(trialno.ct * var.ct * (a_VS_year3 + a_VS_year6)) + 
## mod.t.gender.v2:     (var.ct | participant) + gender.ct + (var.ct + (year6_VS_a + 
## mod.t.gender.v2:     year6_VS_year3) || word) + (var.ct + (year6_VS_a + year6_VS_year3) || 
## mod.t.gender.v2:     talker)
## mod.t.gender: correct ~ +(trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) + 
## mod.t.gender:     (var.ct | participant) + gender.ct + (var.ct + (year6_VS_a + 
## mod.t.gender:     year6_VS_year3) || word) + (var.ct + (year6_VS_a + year6_VS_year3) || 
## mod.t.gender:     talker)
##                 Df    AIC    BIC  logLik deviance Chisq Chi Df Pr(>Chisq)
## mod.t.gender.v2 24 5637.3 5808.4 -2794.7   5589.3                        
## mod.t.gender    24 5637.3 5808.4 -2794.7   5589.3     0      0          1
anova(mod.t.gender,final.train.mod)
## Data: train
## Models:
## final.train.mod: correct ~ +(trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) + 
## final.train.mod:     (var.ct | participant) + (var.ct + (year6_VS_a + year6_VS_year3) || 
## final.train.mod:     word) + (var.ct + (year6_VS_a + year6_VS_year3) || talker)
## mod.t.gender: correct ~ +(trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) + 
## mod.t.gender:     (var.ct | participant) + gender.ct + (var.ct + (year6_VS_a + 
## mod.t.gender:     year6_VS_year3) || word) + (var.ct + (year6_VS_a + year6_VS_year3) || 
## mod.t.gender:     talker)
##                 Df    AIC    BIC  logLik deviance  Chisq Chi Df Pr(>Chisq)
## final.train.mod 23 5635.4 5799.4 -2794.7   5589.4                         
## mod.t.gender    24 5637.3 5808.4 -2794.7   5589.3 0.0647      1     0.7992
round(summary(mod.t.gender.v2)$coefficients,3)
##                              Estimate Std. Error z value Pr(>|z|)
## (Intercept)                     2.702      0.224  12.061    0.000
## trialno.ct                      0.859      0.045  19.006    0.000
## var.ct                         -0.104      0.144  -0.724    0.469
## a_VS_year3                     -1.627      0.240  -6.769    0.000
## a_VS_year6                     -1.150      0.214  -5.384    0.000
## gender.ct                      -0.038      0.148  -0.256    0.798
## trialno.ct:var.ct               0.022      0.090   0.245    0.806
## trialno.ct:a_VS_year3          -0.648      0.119  -5.450    0.000
## trialno.ct:a_VS_year6          -0.603      0.123  -4.896    0.000
## var.ct:a_VS_year3              -0.535      0.265  -2.021    0.043
## var.ct:a_VS_year6              -0.250      0.274  -0.913    0.361
## trialno.ct:var.ct:a_VS_year3   -0.340      0.238  -1.432    0.152
## trialno.ct:var.ct:a_VS_year6   -0.157      0.246  -0.639    0.523
round(summary(mod.t.gender)$coefficients,3)
##                                  Estimate Std. Error z value Pr(>|z|)
## (Intercept)                         2.702      0.224  12.060    0.000
## trialno.ct                          0.859      0.045  19.005    0.000
## var.ct                             -0.104      0.144  -0.724    0.469
## year6_VS_a                          1.150      0.214   5.383    0.000
## year6_VS_year3                     -0.477      0.202  -2.366    0.018
## gender.ct                          -0.038      0.148  -0.256    0.798
## trialno.ct:var.ct                   0.022      0.090   0.245    0.807
## trialno.ct:year6_VS_a               0.603      0.123   4.896    0.000
## trialno.ct:year6_VS_year3          -0.045      0.085  -0.530    0.596
## var.ct:year6_VS_a                   0.250      0.274   0.913    0.361
## var.ct:year6_VS_year3              -0.285      0.179  -1.593    0.111
## trialno.ct:var.ct:year6_VS_a        0.157      0.246   0.639    0.523
## trialno.ct:var.ct:year6_VS_year3   -0.183      0.171  -1.073    0.283
tapply(train$correct, train$gender, mean, na.rm=T)
##         0         1 
## 0.8816964 0.8884804

gender p >.2 Pattern of results is the same with this included in the model compared with previous models.

production

prod <- lizCenter(prod, list("gender"))


mod.p.gender = glmer(correct ~
              + (var.ct * (a_VS_year3 + a_VS_year6)) 
              + (var.ct|participant)
              + gender.ct
              + ((var.ct + (a_VS_year3 + a_VS_year6))||item),
              data = prod, family = binomial, control = glmerControl(optimizer = "bobyqa"))


mod.p.gender.v2 = glmer(correct ~
              + (var.ct * (year3_VS_a + year3_VS_year6)) 
              + (var.ct|participant)
              + gender.ct
              + ((var.ct + (a_VS_year3 + a_VS_year6))||item),
              data = prod, family = binomial, control = glmerControl(optimizer = "bobyqa"))


anova(mod.p.gender.v2, mod.p.gender)
## Data: prod
## Models:
## mod.p.gender.v2: correct ~ +(var.ct * (year3_VS_a + year3_VS_year6)) + (var.ct | 
## mod.p.gender.v2:     participant) + gender.ct + ((var.ct + (a_VS_year3 + a_VS_year6)) || 
## mod.p.gender.v2:     item)
## mod.p.gender: correct ~ +(var.ct * (a_VS_year3 + a_VS_year6)) + (var.ct | participant) + 
## mod.p.gender:     gender.ct + ((var.ct + (a_VS_year3 + a_VS_year6)) || item)
##                 Df    AIC    BIC  logLik deviance Chisq Chi Df Pr(>Chisq)
## mod.p.gender.v2 14 1859.5 1939.9 -915.76   1831.5                        
## mod.p.gender    14 1859.5 1939.9 -915.76   1831.5     0      0          1
anova(mod.p.gender, final.mod.p)
## Data: prod
## Models:
## final.mod.p: correct ~ +(var.ct * (a_VS_year3 + a_VS_year6)) + (var.ct | participant) + 
## final.mod.p:     ((var.ct + (a_VS_year3 + a_VS_year6)) || item)
## mod.p.gender: correct ~ +(var.ct * (a_VS_year3 + a_VS_year6)) + (var.ct | participant) + 
## mod.p.gender:     gender.ct + ((var.ct + (a_VS_year3 + a_VS_year6)) || item)
##              Df    AIC    BIC  logLik deviance  Chisq Chi Df Pr(>Chisq)   
## final.mod.p  13 1864.6 1939.2 -919.28   1838.6                            
## mod.p.gender 14 1859.5 1939.9 -915.76   1831.5 7.0442      1   0.007952 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
round(summary(mod.p.gender.v2)$coefficients,3)
##                       Estimate Std. Error z value Pr(>|z|)
## (Intercept)             -2.084      0.370  -5.633    0.000
## var.ct                   0.124      0.275   0.451    0.652
## year3_VS_a               2.659      0.491   5.412    0.000
## year3_VS_year6           1.017      0.531   1.917    0.055
## gender.ct                0.914      0.335   2.727    0.006
## var.ct:year3_VS_a        1.539      0.467   3.299    0.001
## var.ct:year3_VS_year6    0.450      0.465   0.968    0.333
round(summary(mod.p.gender)$coefficients,3)
##                   Estimate Std. Error z value Pr(>|z|)
## (Intercept)         -2.084      0.370  -5.633    0.000
## var.ct               0.124      0.275   0.451    0.652
## a_VS_year3          -2.659      0.491  -5.412    0.000
## a_VS_year6          -1.642      0.423  -3.882    0.000
## gender.ct            0.914      0.335   2.727    0.006
## var.ct:a_VS_year3   -1.539      0.466  -3.299    0.001
## var.ct:a_VS_year6   -1.089      0.419  -2.598    0.009
tapply(prod$correct, prod$gender, mean, na.rm=T)
##         F         M 
## 0.2144608 0.2723214

gender does improve fit of the model (p<.01) however is direction of males outperforming females (i.e. not in line with previous literature). Pattern of results is the same with this included in the model compared with previous models.

comprehension

comp = lizCenter(comp, c("gender"))
mod.c.gender = glmer(correct ~
               gender.ct
              + (var.ct * (year3_VS_year6 + year3_VS_a)) 
              + (var.ct|participant)
              + ((var.ct * (year3_VS_year6 + year3_VS_a))||item),
              data = comp, family = binomial, control = glmerControl(optimizer = "bobyqa"))

mod.c.gender.v2= glmer(correct ~
               gender.ct
              + (var.ct * (year6_VS_a + year6_VS_year3)) 
              + (var.ct|participant)
              + ((var.ct * (year3_VS_year6 + year3_VS_a))||item),
              data = comp, family = binomial, control = glmerControl(optimizer = "bobyqa"))

anova(mod.c.gender, mod.c.gender.v2 )
## Data: comp
## Models:
## mod.c.gender: correct ~ gender.ct + (var.ct * (year3_VS_year6 + year3_VS_a)) + 
## mod.c.gender:     (var.ct | participant) + ((var.ct * (year3_VS_year6 + year3_VS_a)) || 
## mod.c.gender:     item)
## mod.c.gender.v2: correct ~ gender.ct + (var.ct * (year6_VS_a + year6_VS_year3)) + 
## mod.c.gender.v2:     (var.ct | participant) + ((var.ct * (year3_VS_year6 + year3_VS_a)) || 
## mod.c.gender.v2:     item)
##                 Df    AIC    BIC  logLik deviance Chisq Chi Df Pr(>Chisq)
## mod.c.gender    16 2336.2 2428.1 -1152.1   2304.2                        
## mod.c.gender.v2 16 2336.2 2428.1 -1152.1   2304.2     0      0  < 2.2e-16
##                    
## mod.c.gender       
## mod.c.gender.v2 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
anova(final.mod.c, mod.c.gender)
## Data: comp
## Models:
## final.mod.c: correct ~ +(var.ct * (year3_VS_year6 + year3_VS_a)) + (var.ct | 
## final.mod.c:     participant) + ((var.ct * (year3_VS_year6 + year3_VS_a)) || 
## final.mod.c:     item)
## mod.c.gender: correct ~ gender.ct + (var.ct * (year3_VS_year6 + year3_VS_a)) + 
## mod.c.gender:     (var.ct | participant) + ((var.ct * (year3_VS_year6 + year3_VS_a)) || 
## mod.c.gender:     item)
##              Df    AIC    BIC  logLik deviance  Chisq Chi Df Pr(>Chisq)  
## final.mod.c  15 2337.1 2423.3 -1153.6   2307.1                           
## mod.c.gender 16 2336.2 2428.1 -1152.1   2304.2 2.9003      1    0.08856 .
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
round(summary(mod.c.gender)$coefficients,3)
##                       Estimate Std. Error z value Pr(>|z|)
## (Intercept)              1.218      0.300   4.055    0.000
## gender.ct                0.520      0.302   1.723    0.085
## var.ct                   0.200      0.181   1.101    0.271
## year3_VS_year6           0.625      0.329   1.900    0.057
## year3_VS_a               2.701      0.398   6.794    0.000
## var.ct:year3_VS_year6    0.481      0.433   1.111    0.267
## var.ct:year3_VS_a        0.600      0.488   1.228    0.220
round(summary(mod.c.gender.v2)$coefficients,3)
##                       Estimate Std. Error z value Pr(>|z|)
## (Intercept)              1.218      0.300   4.055    0.000
## gender.ct                0.520      0.302   1.723    0.085
## var.ct                   0.200      0.181   1.101    0.271
## year6_VS_a               2.076      0.417   4.978    0.000
## year6_VS_year3          -0.625      0.329  -1.900    0.057
## var.ct:year6_VS_a        0.119      0.568   0.209    0.834
## var.ct:year6_VS_year3   -0.481      0.433  -1.111    0.267
tapply(comp$correct, comp$gender, mean, na.rm=T)
##         F         M 
## 0.6709559 0.6666667

gender marginally contributes to model (p<.1), here in direction that females out pefrom males. However key results remain unchanged, other than that the difference between two age groups of children is now marginal.

bilingualism

training

train <- lizCenter(train, list("bilingual"))

mod.t.bilingual = glmer(correct ~
                     + (trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) 
                     + (var.ct|participant)
                     + bilingual.ct 
                     + (var.ct + (year6_VS_a + year6_VS_year3)||word)
                     + (var.ct + (year6_VS_a + year6_VS_year3)||talker),  
                     data = train, family = binomial, control = glmerControl(optimizer = "bobyqa"))

mod.t.bilingual.v2 = glmer(correct ~
                    + (trialno.ct * var.ct * (a_VS_year3 + a_VS_year6)) 
                     + (var.ct|participant)
                     + bilingual.ct 
                     + (var.ct + (year6_VS_a + year6_VS_year3)||word)
                     + (var.ct + (year6_VS_a + year6_VS_year3)||talker),  
                     data = train, family = binomial, control = glmerControl(optimizer = "bobyqa"))



anova(mod.t.bilingual.v2,mod.t.bilingual)
## Data: train
## Models:
## mod.t.bilingual.v2: correct ~ +(trialno.ct * var.ct * (a_VS_year3 + a_VS_year6)) + 
## mod.t.bilingual.v2:     (var.ct | participant) + bilingual.ct + (var.ct + (year6_VS_a + 
## mod.t.bilingual.v2:     year6_VS_year3) || word) + (var.ct + (year6_VS_a + year6_VS_year3) || 
## mod.t.bilingual.v2:     talker)
## mod.t.bilingual: correct ~ +(trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) + 
## mod.t.bilingual:     (var.ct | participant) + bilingual.ct + (var.ct + (year6_VS_a + 
## mod.t.bilingual:     year6_VS_year3) || word) + (var.ct + (year6_VS_a + year6_VS_year3) || 
## mod.t.bilingual:     talker)
##                    Df    AIC    BIC  logLik deviance Chisq Chi Df
## mod.t.bilingual.v2 24 5637.3 5808.3 -2794.6   5589.3             
## mod.t.bilingual    24 5637.3 5808.3 -2794.6   5589.3     0      0
##                    Pr(>Chisq)
## mod.t.bilingual.v2           
## mod.t.bilingual             1
anova(mod.t.bilingual,final.train.mod)
## Data: train
## Models:
## final.train.mod: correct ~ +(trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) + 
## final.train.mod:     (var.ct | participant) + (var.ct + (year6_VS_a + year6_VS_year3) || 
## final.train.mod:     word) + (var.ct + (year6_VS_a + year6_VS_year3) || talker)
## mod.t.bilingual: correct ~ +(trialno.ct * var.ct * (year6_VS_a + year6_VS_year3)) + 
## mod.t.bilingual:     (var.ct | participant) + bilingual.ct + (var.ct + (year6_VS_a + 
## mod.t.bilingual:     year6_VS_year3) || word) + (var.ct + (year6_VS_a + year6_VS_year3) || 
## mod.t.bilingual:     talker)
##                 Df    AIC    BIC  logLik deviance Chisq Chi Df Pr(>Chisq)
## final.train.mod 23 5635.4 5799.4 -2794.7   5589.4                        
## mod.t.bilingual 24 5637.3 5808.3 -2794.6   5589.3 0.154      1     0.6947
round(summary(mod.t.bilingual)$coefficients,3)
##                                  Estimate Std. Error z value Pr(>|z|)
## (Intercept)                         2.702      0.224  12.058    0.000
## trialno.ct                          0.859      0.045  19.006    0.000
## var.ct                             -0.103      0.143  -0.718    0.473
## year6_VS_a                          1.125      0.214   5.268    0.000
## year6_VS_year3                     -0.493      0.204  -2.417    0.016
## bilingual.ct                        0.054      0.137   0.395    0.693
## trialno.ct:var.ct                   0.022      0.090   0.248    0.804
## trialno.ct:year6_VS_a               0.602      0.123   4.894    0.000
## trialno.ct:year6_VS_year3          -0.045      0.085  -0.529    0.597
## var.ct:year6_VS_a                   0.252      0.274   0.919    0.358
## var.ct:year6_VS_year3              -0.284      0.179  -1.587    0.112
## trialno.ct:var.ct:year6_VS_a        0.159      0.246   0.647    0.517
## trialno.ct:var.ct:year6_VS_year3   -0.182      0.171  -1.070    0.285
round(summary(mod.t.bilingual.v2)$coefficients,3)
##                              Estimate Std. Error z value Pr(>|z|)
## (Intercept)                     2.702      0.224  12.058    0.000
## trialno.ct                      0.859      0.045  19.006    0.000
## var.ct                         -0.103      0.143  -0.718    0.473
## a_VS_year3                     -1.618      0.240  -6.747    0.000
## a_VS_year6                     -1.125      0.214  -5.268    0.000
## bilingual.ct                    0.054      0.137   0.395    0.693
## trialno.ct:var.ct               0.022      0.090   0.248    0.804
## trialno.ct:a_VS_year3          -0.648      0.119  -5.447    0.000
## trialno.ct:a_VS_year6          -0.602      0.123  -4.894    0.000
## var.ct:a_VS_year3              -0.536      0.265  -2.024    0.043
## var.ct:a_VS_year6              -0.252      0.274  -0.920    0.358
## trialno.ct:var.ct:a_VS_year3   -0.342      0.238  -1.439    0.150
## trialno.ct:var.ct:a_VS_year6   -0.159      0.246  -0.647    0.517
tapply(train$correct, train$bilingual, mean, na.rm=T)
##         0         1 
## 0.8827614 0.8907407

bilingualism p >.2 Pattern of results is the same with this included in the model compared with previous models. ### production

prod <- lizCenter(prod, list("bilingual"))


mod.p.bilingual = glmer(correct ~
              + (var.ct * (a_VS_year3 + a_VS_year6)) 
              + (var.ct|participant)
              + bilingual.ct
              + ((var.ct + (a_VS_year3 + a_VS_year6))||item),
              data = prod, family = binomial, control = glmerControl(optimizer = "bobyqa"))


mod.p.bilingual.v2 = glmer(correct ~
              + (var.ct * (year3_VS_a + year3_VS_year6)) 
              + (var.ct|participant)
              + bilingual.ct
              + ((var.ct + (a_VS_year3 + a_VS_year6))||item),
              data = prod, family = binomial, control = glmerControl(optimizer = "bobyqa"))


anova(mod.p.bilingual.v2, mod.p.bilingual)
## Data: prod
## Models:
## mod.p.bilingual.v2: correct ~ +(var.ct * (year3_VS_a + year3_VS_year6)) + (var.ct | 
## mod.p.bilingual.v2:     participant) + bilingual.ct + ((var.ct + (a_VS_year3 + a_VS_year6)) || 
## mod.p.bilingual.v2:     item)
## mod.p.bilingual: correct ~ +(var.ct * (a_VS_year3 + a_VS_year6)) + (var.ct | participant) + 
## mod.p.bilingual:     bilingual.ct + ((var.ct + (a_VS_year3 + a_VS_year6)) || item)
##                    Df    AIC    BIC  logLik deviance Chisq Chi Df
## mod.p.bilingual.v2 14 1866.3 1946.7 -919.13   1838.3             
## mod.p.bilingual    14 1866.3 1946.7 -919.13   1838.3     0      0
##                    Pr(>Chisq)    
## mod.p.bilingual.v2               
## mod.p.bilingual     < 2.2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
anova(mod.p.bilingual, final.mod.p)
## Data: prod
## Models:
## final.mod.p: correct ~ +(var.ct * (a_VS_year3 + a_VS_year6)) + (var.ct | participant) + 
## final.mod.p:     ((var.ct + (a_VS_year3 + a_VS_year6)) || item)
## mod.p.bilingual: correct ~ +(var.ct * (a_VS_year3 + a_VS_year6)) + (var.ct | participant) + 
## mod.p.bilingual:     bilingual.ct + ((var.ct + (a_VS_year3 + a_VS_year6)) || item)
##                 Df    AIC    BIC  logLik deviance  Chisq Chi Df Pr(>Chisq)
## final.mod.p     13 1864.6 1939.2 -919.28   1838.6                         
## mod.p.bilingual 14 1866.3 1946.7 -919.13   1838.3 0.3012      1     0.5831
round(summary(mod.p.bilingual.v2)$coefficients,3)
##                       Estimate Std. Error z value Pr(>|z|)
## (Intercept)             -2.090      0.373  -5.606    0.000
## var.ct                   0.129      0.276   0.466    0.641
## year3_VS_a               2.542      0.500   5.084    0.000
## year3_VS_year6           1.099      0.545   2.017    0.044
## bilingual.ct            -0.175      0.317  -0.553    0.581
## var.ct:year3_VS_a        1.535      0.467   3.285    0.001
## var.ct:year3_VS_year6    0.452      0.465   0.972    0.331
round(summary(mod.p.bilingual)$coefficients,3)
##                   Estimate Std. Error z value Pr(>|z|)
## (Intercept)         -2.090      0.373  -5.606    0.000
## var.ct               0.129      0.276   0.466    0.641
## a_VS_year3          -2.542      0.500  -5.084    0.000
## a_VS_year6          -1.442      0.433  -3.328    0.001
## bilingual.ct        -0.175      0.317  -0.553    0.581
## var.ct:a_VS_year3   -1.535      0.467  -3.285    0.001
## var.ct:a_VS_year6   -1.083      0.419  -2.586    0.010
tapply(prod$correct, prod$bilingual, mean, na.rm=T)
##         0         1 
## 0.2279412 0.2351852

bilingualism p >.2 Pattern of results is the same with this included in the model compared with previous models.

comprehension

comp = lizCenter(comp, c("bilingual"))
mod.c.bilingual = glmer(correct ~
               bilingual.ct
              + (var.ct * (year3_VS_year6 + year3_VS_a)) 
              + (var.ct|participant)
              + ((var.ct * (year3_VS_year6 + year3_VS_a))||item),
              data = comp, family = binomial, control = glmerControl(optimizer = "bobyqa"))

mod.c.bilingual.v2= glmer(correct ~
               bilingual.ct
              + (var.ct * (year6_VS_a + year6_VS_year3)) 
              + (var.ct|participant)
              + ((var.ct * (year3_VS_year6 + year3_VS_a))||item),
              data = comp, family = binomial, control = glmerControl(optimizer = "bobyqa"))

anova(mod.c.bilingual, mod.c.bilingual.v2 )
## Data: comp
## Models:
## mod.c.bilingual: correct ~ bilingual.ct + (var.ct * (year3_VS_year6 + year3_VS_a)) + 
## mod.c.bilingual:     (var.ct | participant) + ((var.ct * (year3_VS_year6 + year3_VS_a)) || 
## mod.c.bilingual:     item)
## mod.c.bilingual.v2: correct ~ bilingual.ct + (var.ct * (year6_VS_a + year6_VS_year3)) + 
## mod.c.bilingual.v2:     (var.ct | participant) + ((var.ct * (year3_VS_year6 + year3_VS_a)) || 
## mod.c.bilingual.v2:     item)
##                    Df    AIC    BIC  logLik deviance Chisq Chi Df
## mod.c.bilingual    16 2338.8 2430.6 -1153.4   2306.8             
## mod.c.bilingual.v2 16 2338.8 2430.6 -1153.4   2306.8     0      0
##                    Pr(>Chisq)    
## mod.c.bilingual                  
## mod.c.bilingual.v2  < 2.2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
anova(final.mod.c, mod.c.bilingual)
## Data: comp
## Models:
## final.mod.c: correct ~ +(var.ct * (year3_VS_year6 + year3_VS_a)) + (var.ct | 
## final.mod.c:     participant) + ((var.ct * (year3_VS_year6 + year3_VS_a)) || 
## final.mod.c:     item)
## mod.c.bilingual: correct ~ bilingual.ct + (var.ct * (year3_VS_year6 + year3_VS_a)) + 
## mod.c.bilingual:     (var.ct | participant) + ((var.ct * (year3_VS_year6 + year3_VS_a)) || 
## mod.c.bilingual:     item)
##                 Df    AIC    BIC  logLik deviance  Chisq Chi Df Pr(>Chisq)
## final.mod.c     15 2337.1 2423.3 -1153.6   2307.1                         
## mod.c.bilingual 16 2338.8 2430.6 -1153.4   2306.8 0.3729      1     0.5414
round(summary(mod.c.bilingual)$coefficients,3)
##                       Estimate Std. Error z value Pr(>|z|)
## (Intercept)              1.214      0.301   4.037    0.000
## bilingual.ct             0.166      0.269   0.616    0.538
## var.ct                   0.181      0.182   0.995    0.320
## year3_VS_year6           0.709      0.336   2.110    0.035
## year3_VS_a               2.602      0.395   6.592    0.000
## var.ct:year3_VS_year6    0.468      0.434   1.078    0.281
## var.ct:year3_VS_a        0.556      0.484   1.149    0.250
round(summary(mod.c.bilingual.v2)$coefficients,3)
##                       Estimate Std. Error z value Pr(>|z|)
## (Intercept)              1.214      0.301   4.037    0.000
## bilingual.ct             0.166      0.269   0.616    0.538
## var.ct                   0.181      0.182   0.995    0.320
## year6_VS_a               1.893      0.415   4.559    0.000
## year6_VS_year3          -0.709      0.336  -2.110    0.035
## var.ct:year6_VS_a        0.089      0.564   0.157    0.875
## var.ct:year6_VS_year3   -0.468      0.434  -1.079    0.281
tapply(comp$correct, comp$bilingual, mean, na.rm=T)
##         0         1 
## 0.6478758 0.6944444

bilingualism p >.2 Pattern of results is the same with this included in the model compared with previous models.