pacman::p_load(plyr, ggplot2)
str(vor.data)
## 'data.frame': 18158 obs. of 7 variables:
## $ location : num 1 1 1 1 1 1 1 1 1 1 ...
## $ year : num 1 1 1 1 1 1 1 1 1 1 ...
## $ treatment: num 1 1 1 1 1 1 1 1 1 1 ...
## $ pasture : num 3 3 3 3 3 3 3 3 3 3 ...
## $ patch : num 3.1 3.1 3.1 3.1 3.1 3.1 3.1 3.1 3.1 3.1 ...
## $ point : num 1 2 3 4 5 6 7 8 9 10 ...
## $ vor : num 7.8 1 1 3.5 5 0.3 1.5 1.5 6 0.5 ...
The function, based on glmer:
RE.gam <- function(x) {
require(lme4)
var.mod <- glmer(vor+0.001~(1|year)+ (1|patch:year),
Gamma(link = "identity"), data=x)
patch.var <- (attr(VarCorr(var.mod)$'patch:year',"stddev"))^2
year.var <- (attr(VarCorr(var.mod)$'year',"stddev"))^2
var.results<-array(NA,c(1,2))
colnames(var.results)<-c("patch.var","year.var")
var.results[1,] <- round(c(patch.var, year.var), 4)
return(var.results) }
Operate the function across several data levels with ddply:
all.var <- ddply(.data=vor.data,
.(location, treatment, pasture),
.fun=RE.gam)
Example output
## 'data.frame': 27 obs. of 5 variables:
## $ location : num 1 1 1 1 1 2 2 2 2 3 ...
## $ treatment: num 0 0 1 1 1 0 0 1 1 0 ...
## $ pasture : num 1 2 3 4 5 2 6 3 7 1 ...
## $ patch.var: num 8.57 1.64 29.8 48.52 54.83 ...
## $ year.var : num 41.054 69.731 11.348 0.278 0 ...
Example output of the glmer variance partitioning model.
I found this script in folders from the 2012 and 2013 papers. I doubt it is the one that produced the graphs but it is really close. It barfs on the interaction between the random effects, says they are factors?
var.comp <- function(dat, year, past, trt) {
require(lme4)
vc <- VarCorr(lmer(litter ~ +(1|pasture) +
(1|patch*pasture) +
(1|pasture*patch*point), data))
vc.res <- (attr(vc,"sc"))
vc.1 <- (attr(vc$'pasture * patch * point',"stddev"))
vc.2 <- (attr(vc$'patch * pasture',"stddev"))
vc.3 <- (attr(vc$pasture,"stddev"))
vc.4 <- sum(vc.res, vc.1, vc.2, vc.3)
lit.mat <- matrix(c(vc.res, vc.1, vc.2, vc.3, vc.4),
dimnames = list(c("residual", "point",
"patch", "pasture", "total")))
# build the frame
lit.dat <- as.data.frame((lit.mat)^2)
lit.dat[,"prop.var"] <- lit.dat$V1/((vc.4)^2)
lit.dat[,"scale"] <- c(1:5)
lit.dat[,"treatment"] <- trt
lit.dat[,"year"] <- year
lit.dat[,"pasture"] <- past
return(lit.dat)
}
I couldn’t find the actual script I used to calculate Cohen’s d for the JAE paper, but this is the basic code I used.
# -- User provided parameters ---
m1 <- # mean group 1 (treatment)
m2 <- # mean group 2 (control)
n1 <- 5 # sample size group 1
n2 <- 5 # # sample size group 2
sp <-(sd1 + sd2)/2 # pooled sd
trials <- 1000 # number of simulated trials
# ----- Program does the rest ------------
cohend<-rep(9999,trials) ; xdiff<-rep(9999,trials) ; pVec<-rep(9999,trials)
for(i in 1:trials){
x1<-rnorm(n1,m1,sp)
x2<-rnorm(n2,m2,sp)
xdiff[i] <- mean(x2)-mean(x1)
spo <- sqrt(mean(var(x1),var(x2)))
cohend[i] <- xdiff[i]/spo
pVec[i]<-t.test(x1,x2)$p.value
}
# compute CI's as lower 5% and upper 95% values of d
ciL <- sort(cohend)[round(.05*trials)]
ciU <- sort(cohend)[round(.95*trials)]
z<-table(pVec<=.05)
if(dim(z)>1){
spower<-z[2]/trials
} else{
spower<-1
}