This is an R Markdown document. Markdown is a simple formatting syntax for authoring HTML, PDF, and MS Word documents. For more details on using R Markdown see http://rmarkdown.rstudio.com.
When you click the Knit button a document will be generated that includes both content as well as the output of any embedded R code chunks within the document. You can embed an R code chunk like this:
####################levene test##############
library(car)
library("httr")
library("RCurl")
library("foreign")
library("reshape2")
library("ez")
#######create data.frame
lev_result<-data.frame(ID=1,DF1=1,DF2=1,F_value=1,P_value=1)
lev_result<-lev_result[-1,]
#define onlynumber
onlynumber<-1
#subset levenetest's result
subset_lev<-function(data){
temp<-vector()
temp[1]<-data[1]$Df[1]
temp[2]<-data[1]$Df[2]
temp[3]<-data[2]$`F value`[1]
temp[4]<-data[3]$`Pr(>F)`[1]
return(temp)
}
#110
Data <- read.spss("110_soc_lm_no.sav", to.data.frame=TRUE)
Data.1 <- melt(Data, measure.vars=c(22:23), variable.name="Condition")
result110<-leveneTest(value~Gender*Condition,data=Data.1)
lev_result[onlynumber,1]<-110
lev_result[onlynumber,2:5]<-subset_lev(result110)
onlynumber<-onlynumber+1
#################row24 https://osf.io/sqim7/
library("httr")
library("RCurl")
library("foreign")
library("reshape2")
bass.data <- read.spss("24_cog_anova_yes.sav", to.data.frame=TRUE)
bass.data <- bass.data[1:49,]
#Data in spss wide format - reshape
bass.long <- melt(bass.data,
id.vars="id",
measure.vars=c("avg_time_sum_ac",
"avg_time_neutral_ac",
"avg_time_sum_mu",
"avg_time_neutral_mu"),
variable.name="cond",
value.name="RT")
bass.long$prime <- 1
bass.long$prime[grep("mu", bass.long$cond)] <- 2
bass.long <- transform(bass.long, prime=factor(prime, labels=c("ac", "mu")))
bass.long$sum <- 1
bass.long$sum[grep("neu", bass.long$cond)] <- 2
bass.long <- transform(bass.long, sum=factor(sum, labels=c("sum", "neutral")))
bass.long <- transform(bass.long, id=factor(id))
result24<-leveneTest(data=bass.long,RT~prime*sum)
lev_result[onlynumber,1]<-24
lev_result[onlynumber,2:5]<-subset_lev(result24)
onlynumber<-onlynumber+1
##############finished###############row 4 https://osf.io/8j9cg/
library("httr")
library("RCurl")
library("reshape2")
library("foreign")
library("ez")
library("schoRsch")
data<-read.spss("4_cog_anova_no.sav",to.data.frame=T,use.value.labels=F)
cols<-c(1,10,12:17) #relevant variables
dat<-data[data$EXCLUDED==0,cols] #filter unwanted cases (variable provided by replicator), restrict to relevant data
colnames(dat)<-tolower(colnames(dat)) #all varnames lowercase
names<-colnames(dat) #store varnames
# reshape to long format
storm<-reshape(dat,varying=list(names[5:8]),idvar=names[1]
,timevar="type",v.names="propcorrect"
,direction="long",times=names[5:8])
#generate factors coding the repeated measures design
storm$nrp<-as.factor(as.numeric(substr(storm$type,1,2)=="nr"))
storm$nre<-as.factor(as.numeric(substr(storm$type,4,4)=="n"|substr(storm$type,5,5)=="n"))
#transforming categorical variables to factors
storm$test_postrelearning<-as.factor(storm$test_postrelearning)
idx<-storm$nre==0 #condition analysis on relearned words
datause<-storm[idx,]
datause1<-na.omit(datause)
result4<-leveneTest(data=datause1,propcorrect~as.factor(type)*as.factor(condition))
lev_result[onlynumber,1]<-4
lev_result[onlynumber,2:5]<-subset_lev(result4)
onlynumber<-onlynumber+1
#####################row72 https://osf.io/2gx4k/
library("httr")
library("RCurl")
library("foreign")
data<-read.spss("72_soc_anova_yes.sav",to.data.frame =TRUE)
result72<-leveneTest(data=data,Attitude~arg_qual*value_imp)
lev_result[onlynumber,1]<-72
lev_result[onlynumber,2:5]<-subset_lev(result72)
onlynumber<-onlynumber+1
###############row 148 https://osf.io/blcj6/
library("httr")
library("RCurl")
library("car")
library('foreign')
library('dplyr')
library('reshape2')
data <- read.spss("148_soc_anova_no.sav", to.data.frame=TRUE)
colnames(data)[1] <- "Participant"
data.glm <- data %>%
dplyr::select(one_of('Participant', 'conceptionrisk', 'haspartner', 'singleavg', 'attachedavg')) %>%
melt(
measure.vars=c('singleavg', 'attachedavg'),
value.name='attractiveness_rating',
variable.name='target_relationship_status'
)
data.glm$target_relationship_status <- car::recode(
data.glm$target_relationship_status,
"'singleavg'='Single'; 'attachedavg'='Attached'"
)
result148<-leveneTest(data=data.glm,attractiveness_rating~target_relationship_status*conceptionrisk*haspartner)
lev_result[onlynumber,1]<-148
lev_result[onlynumber,2:5]<-subset_lev(result148)
onlynumber<-onlynumber+1
#################row 58 https://osf.io/ke43j/
library("httr")
library("RCurl")
library("reshape2")
Data <- read.csv("58_soc_anova_no.csv")
cleanedData <- subset(Data, X.4 != 1 , select = cbind(Binder.Info, X.6,
Final.Quest..II..Partner.s.felt.and.e.pressed.happiness, X.21))
names(cleanedData) <- c("Subject", "Condition", "Expressed", "Felt") #Better headers
cleanedData <- cleanedData[-1, ] # Clean headers on first line
cleanedData$Felt <- as.numeric(as.character(cleanedData$Felt)) #To be able to detect NA
cleanedData$Expressed <- as.numeric(as.character(cleanedData$Expressed)) #To detect NA
cleanData <- na.omit(cleanedData) #Delete the NA
dataLong <- melt(cleanData, id = c("Subject", "Condition"),
measure = c("Expressed", "Felt"),
value.name = "Score",
variable.name = "Rating")
dataLong$Subject <- as.factor(dataLong$Subject) # Making sure subjects are factor
#It's 2X2 analysis, everything that's not experimental is control
dataLong$Condition[dataLong$Condition == "control"] <- "Control" #correcting typo
dataLong$Condition[dataLong$Condition == "Insensitive"] <- "Control"
dataLong$Condition<-as.character(dataLong$Condition)
dataLong$Condition<-as.factor(dataLong$Condition)
result58<-leveneTest(data=dataLong,Score~Condition*Rating)
lev_result[onlynumber,1]<-58
lev_result[onlynumber,2:5]<-subset_lev(result58)
onlynumber<-onlynumber+1
##############row 52 https://osf.io/l8srm/
library("car")
library("foreign")
library("heplots")
library("httr")
library("RCurl")
df <- read.spss("52_soc_anova_no.sav", to.data.frame = TRUE)
df <- df[ ,c(1,9,112:114)]
names(df) <- c("subject","aff_coh", "Story_1","Story_2","Story_3")
data_df<-melt(df, id = c("subject", "aff_coh"),
measure = c("Story_1", "Story_2","Story_3"),
value.name = "Story",
variable.name = "lov")
data_df<-data_df[data_df$Story!=0,]
result52<-leveneTest(data=data_df,Story~aff_coh*lov)
lev_result[onlynumber,1]<-52
lev_result[onlynumber,2:5]<-subset_lev(result52)
onlynumber<-onlynumber+1
##################row 68 https://osf.io/sg3su/
library("httr")
library("RCurl")
data <- read.csv("68_soc_anova_no.csv")
result68<-leveneTest(data=data,Likelihood~as.factor(Load)*as.factor(Had.read))
lev_result[onlynumber,1]<-68
lev_result[onlynumber,2:5]<-subset_lev(result68)
onlynumber<-onlynumber+1
##################ROW 118 https://osf.io/3h29d/
library("httr")
library("RCurl")
library(plyr)
library(dplyr)
library(heplots)
data<-data.frame(read.csv("118_soc_anova_no.csv",header=T))
sugar<-ifelse((data[,2]=="sugar"),"glucose","placebo")
data<-cbind(data[,1],sugar,data[,3:8])
colnames(data)<-c("sub","sugar","assignment","aptA","aptB","aptC","aptD","time")
data$time<-as.numeric(data$time)
result118<-leveneTest(data=data,time~sugar*assignment)
lev_result[onlynumber,1]<-118
lev_result[onlynumber,2:5]<-subset_lev(result118)
onlynumber<-onlynumber+1
##################row 56 https://osf.io/xtsq6/
library("httr")
library("RCurl")
library("foreign")
Data <- read.spss("56_soc_anova_no.sav", to.data.frame=TRUE)
Data$group <- as.factor(Data$group)
Data$emotion <- as.factor(Data$emotion)
Data$consequences <- as.factor(Data$consequences)
result56<-leveneTest(data=Data,offer~emotion*consequences)
lev_result[onlynumber,1]<-56
lev_result[onlynumber,2:5]<-subset_lev(result56)
onlynumber<-onlynumber+1
######################row 50 https://osf.io/rgm6p/
library("car")
library("httr")
library("lsr")
library("RCurl")
library("reshape2")
library("stringr")
library("xlsx")
dir <- "50_soc_anova_no"
unzip("50_soc_anova_no.zip", exdir = dir)
file <- paste0(dir,"/merged_data/merged_data.xlsx")
df <- read.xlsx(file, sheetIndex = 1,
startRow = 2,
colIndex = c(1, 2, 9, 12, 30, 38, 51, 59, 67))
names(df) <- c("condition", "id", "age", "thoughts_all", "sex",
"happy", "psych_student", "vegetarian", "angry")
df$vegetarian <- car::recode(df$vegetarian, "'n' = 0; 'j' = 1")
df$sex <- car::recode(df$sex, "'m' = 0; 'w' = 1")
df$psych_student <- car::recode(df$psych_student, "'n' = 0; 'j' = 1")
df$condition <- as.character(df$condition)
df <- df[seq(1,nrow(df), by=20), ]
row.names(df) <- c(1:nrow(df))
df <- df[order(df$condition, df$id), ]
df$thoughts_all <- gsub("xxx", "***", df$thoughts_all, fixed = TRUE)
df$thoughts_all <- gsub("+++", "***", df$thoughts_all, fixed = TRUE)
df <- df[which( # only include cases that:
grepl("*", df$thoughts_all, fixed = TRUE) | # have "*" in "thouhgts" OR
is.na(df$thoughts_all) # have NA in "thouhgts"
), ]
x1 <- strsplit(as.character(df$thoughts_all), "*", fixed=T)
vowels <- "[aei]"
y1 <- list()
for (i in 1:length(x1)) {
ifelse (any(!str_detect(x1[[i]], vowels)),
y1[[i]] <- x1[[i]][-which(!str_detect(x1[[i]], vowels))],
y1[[i]] <- x1[[i]])
}
df$thoughts <- unlist(lapply(y1, function(x) length(x)))
df$task <- NULL
df$task[grep("PZ", df$condition)] <- 1
df$task[grep("PA", df$condition)] <- 2
df$prime <- rep(NA, nrow(df))
df$prime[grep("AW", df$condition)] <- 1
df$prime[grep("IW", df$condition)] <- 2
df$prime[grep("NW", df$condition)] <- 3
df$prime_bin <- car::recode(df$prime, "c(1,2)=1; 3=0")
## create "mood" and a binary "mood_bin" variables
df$mood <- df$happy-df$angry
df$mood_bin <- car::recode(df$mood, "0:hi = 1; lo:-1 = 0; else = NA")
## recode "condition" variable
df$condition[grep("AW_PA", df$condition)] <- 1
df$condition[grep("AW_PZ", df$condition)] <- 2
df$condition[grep("IW_PA", df$condition)] <- 3
df$condition[grep("IW_PZ", df$condition)] <- 4
df$condition[grep("NW_PA", df$condition)] <- 5
df$condition[grep("NW_PZ", df$condition)] <- 6
### convert appropriate variables into factors and assign labels for levels thereof
df$id <- factor(df$id)
df$prime_bin <- factor(df$prime_bin,
labels=c("no prime", "prime"))
df$vegetarian <- factor(df$vegetarian, labels=c("no", "yes"))
df$sex <- factor(df$sex, labels=c("male", "female"))
df$psych_student <- factor(df$psych_student, labels=c("no", "yes"))
df$mood_bin <- factor(df$mood_bin, labels=c("negative", "non-negative"))
df$prime <- factor(df$prime, labels=c("active", "inactive", "control"))
df$task <- factor(df$task, labels=c("active", "inctive"))
df$condition <- factor(df$condition, labels=c("action.p_inactive.t",
"action.p_active.t",
"inaction.p_inactive.t",
"inaction.p_active.t",
"control.p_inactive.t",
"control.p_active.t"))
# set contrasts
options(contrasts = c("contr.sum","contr.poly"))
result50<-leveneTest(data=df,thoughts~prime*task)
lev_result[onlynumber,1]<-50
lev_result[onlynumber,2:5]<-subset_lev(result50)
onlynumber<-onlynumber+1
#########################row 20 https://osf.io/bzdr2/
library("httr")
library("RCurl")
library("foreign")
library("reshape2")
saha.data <- read.dta("20_cog_anova_no.dta", convert.factors=TRUE)
saha.data$one2two = 0
saha.data$one2two[saha.data$subject < 31] <- 1
saha.data2 <- saha.data[saha.data$Memory==0,]
names(saha.data2)[grep("List_1_Si", names(saha.data2))] <- "lista1"
names(saha.data2)[grep("List_1_Mi", names(saha.data2) )] <- "lista2"
saha.data2$proportionList1 <- 100/16* saha.data2$List_1i
saha.data2$proportionList2 <- 100/16* saha.data2$List_2i
#Reshape to long format for ANOVA
saha.data.long <- melt(saha.data2,
id.vars=c("subject", "Treat", "Choice"),
measure.vars=c("lista1", "lista2"),
variable.name="a.name",
value.name = "lista")
saha.data.long$proportionListA = 100/8* saha.data.long$lista
saha.data.long$spaced = 1
saha.data.long$spaced[which(saha.data.long$a.name=="lista2")] <- 0
saha.data.long <- transform(saha.data.long, subject=factor(subject))
result20<-leveneTest(data=saha.data.long,proportionListA~as.factor(Treat)*as.factor(spaced))
lev_result[onlynumber,1]<-20
lev_result[onlynumber,2:5]<-subset_lev(result20)
onlynumber<-onlynumber+1
##################row 81 https://osf.io/xse7q/
library("httr")
library("RCurl")
library("foreign")
shn.data <- read.spss("81_soc_anova_no.sav", to.data.frame=TRUE)
shn.data <- transform(shn.data, ParticipantNumber=factor(ParticipantNumber))
result81<-leveneTest(data=shn.data,AvgWillingReconcileTime2~as.factor(VictimOrPerpV0P1)*as.factor(AcceptOrEmpowerA0E1))
lev_result[onlynumber,1]<-81
lev_result[onlynumber,2:5]<-subset_lev(result81)
onlynumber<-onlynumber+1
#####################row 97 https://osf.io/etg7c/
library("httr")
library("RCurl")
library(sas7bdat)
library(dplyr)
myData <- read.sas7bdat("97_soc_anova_no.sas7bdat")
myDataSub <- dplyr::select(myData, racecode, fair, trust) %>%
filter(racecode %in% c(1, 2))
myDataSub<-myDataSub[myDataSub$fair==1|myDataSub$fair==0,]
myDataSub$racecode <- as.factor(myDataSub$racecode)
myDataSub$fair <- as.factor(myDataSub$fair)
result97<-leveneTest(data=myDataSub,trust~racecode*fair)
lev_result[onlynumber,1]<-97
lev_result[onlynumber,2:5]<-subset_lev(result97)
onlynumber<-onlynumber+1
###############row 86 https://osf.io/j8bpa/
library("httr")
library("RCurl")
library("foreign")
Data <- read.spss("86_soc_anova_no.sav", to.data.frame = TRUE)
Data$condition <- as.factor(Data$condition)
result86<-leveneTest(data=Data,Zbias~condition)
lev_result[onlynumber,1]<-86
lev_result[onlynumber,2:5]<-subset_lev(result86)
onlynumber<-onlynumber+1
###############row 140 https://osf.io/vnsqg/
library("httr")
library("RCurl")
library("gdata")
library("xlsx")
myData <- read.xlsx("140_soc_anova_no.xlsx", sheetIndex=1, header=TRUE)
myData <- na.omit(myData)
myData$COND <- as.factor(myData$COND)
#myData$outcome <- rowMeans(myData[, c("SIB_BOND", "PAR_BOND", "HOME_BOND")], na.rm = TRUE)
#the_mod <- lm(outcome ~ COND, data = myData)
result140_1<-leveneTest(data=myData,SIB_BOND~COND)
result140_2<-leveneTest(data=myData,PAR_BOND~COND)
result140_3<-leveneTest(data=myData,HOME_BOND~COND)
lev_result[onlynumber,1]<-140
lev_result[onlynumber,2:5]<-subset_lev(result140_1)
onlynumber<-onlynumber+1
lev_result[onlynumber,1]<-140
lev_result[onlynumber,2:5]<-subset_lev(result140_2)
onlynumber<-onlynumber+1
lev_result[onlynumber,1]<-140
lev_result[onlynumber,2:5]<-subset_lev(result140_3)
onlynumber<-onlynumber+1
###################row 64 https://osf.io/mxryb/
library("httr")
library("RCurl")
library("car")
data <- read.csv("64_soc_anova_no.csv")
data2<- data[1:15,]
data2<- rbind(data2,data[17:22,])
data2<- rbind(data2,data[24:67,])
data2<- rbind(data2,data[69:74,])
data<-data2
result64<-leveneTest(data=data,aggressivenessScore~mapCondition*puzzleCondition)
lev_result[onlynumber,1]<-64
lev_result[onlynumber,2:5]<-subset_lev(result64)
onlynumber<-onlynumber+1
##################row 142 https://osf.io/k4y9i/
library("httr")
library("RCurl")
library("foreign")
library("stats")
library("reshape2")
data <- read.spss("142_soc_anova_yes.sav", to.data.frame = TRUE)
data <- data[data$Subject != 2, ]
data <- data[data$Subject != 23, ]
Data <- data.frame(as.factor(data$Subject))
colnames(Data) <- c("Subject")
Data$ConfrontationalAngryMusicScore <- apply(data.frame(data$Game1Angry1, data$Game1Angry2, data$Game1Angry3, data$Game2Angry1, data$Game2Angry2, data$Game2Angry3), 1, mean)
Data$ConfrontationalExcitingMusicScore <- apply(data.frame(data$Game1Exciting1, data$Game1Exciting2, data$Game1Exciting3, data$Game2Exciting1, data$Game2Exciting2, data$Game2Exciting3), 1, mean)
Data$ConfrontationalNeutralMusicScore <- apply(data.frame(data$Game1Neutral1, data$Game1Neutral2, data$Game1Neutral3, data$Game2Neutral1, data$Game2Neutral2, data$Game2Neutral3), 1, mean)
Data$ConfrontationalAngryRecallScore <- apply(data.frame(data$Game1AngryFriends, data$Game1AngryStrangers, data$Game1AngryFriends, data$Game2AngryStrangers), 1, mean)
Data$ConfrontationalExcitingRecallScore <- apply(data.frame(data$Game1ExcitedFriends, data$Game1ExcitedStrangers, data$Game1ExcitedFriends, data$Game2ExcitedStrangers), 1, mean)
Data$ConfrontationalNeutralRecallScore <- apply(data.frame(data$Game1CalmFriends, data$Game1CalmStrangers, data$Game1CalmFriends, data$Game2CalmStrangers), 1, mean)
Data$NonconfrontationalAngryMusicScore <- apply(data.frame(data$Game3Angry1, data$Game3Angry2, data$Game3Angry3, data$Game4Angry1, data$Game4Angry2, data$Game4Angry3), 1, mean)
Data$NonconfrontationalExcitingMusicScore <- apply(data.frame(data$Game3Exciting1, data$Game3Exciting2, data$Game3Exciting3, data$Game4Exciting1, data$Game4Exciting2, data$Game4Exciting3), 1, mean)
Data$NonconfrontationalNeutralMusicScore <- apply(data.frame(data$Game3Neutral1, data$Game3Neutral2, data$Game3Neutral3, data$Game4Neutral1, data$Game4Neutral2, data$Game4Neutral3), 1, mean)
Data$NonconfrontationalAngryRecallScore <- apply(data.frame(data$Game3AngryFriends, data$Game3AngryStrangers, data$Game3AngryFriends, data$Game4AngryStrangers), 1, mean)
Data$NonconfrontationalExcitingRecallScore <- apply(data.frame(data$Game3ExcitedFriends, data$Game3ExcitedStrangers, data$Game3ExcitedFriends, data$Game4ExcitedStrangers), 1, mean)
Data$NonconfrontationalNeutralRecallScore <- apply(data.frame(data$Game3CalmFriends, data$Game3CalmStrangers, data$Game3CalmFriends, data$Game4CalmStrangers), 1, mean)
# Reshape dataframe into long format
data_long <- melt(Data, id.vars = c("Subject"))
colnames(data_long)[2] <- "Groups"
colnames(data_long)[3] <- "Preference"
# Split grouping variable into seperate columns
data_long$GameType <- as.factor(ifelse(grepl("Confrontation", data_long$Groups), "Confrontation", "Nonconfrontation"))
data_long$Emotion <- NA
for(i in 1:nrow(data_long))
{
if(grepl("Angry", data_long$Groups)[i])
{
data_long$Emotion[i] <- "Angry"
} else if(grepl("Exciting", data_long$Groups)[i])
{
data_long$Emotion[i] <- "Exciting"
} else
{
data_long$Emotion[i] <- "Neutral"
}
}
data_long$Emotion <- as.factor(data_long$Emotion)
data_long$ActivityType <- as.factor(ifelse(grepl("Music", data_long$Groups), "Music", "Recall"))
result142<-leveneTest(data=data_long,Preference~GameType*Emotion*ActivityType)
lev_result[onlynumber,1]<-142
lev_result[onlynumber,2:5]<-subset_lev(result142)
onlynumber<-onlynumber+1
#####################row 63 https://osf.io/fejxb/
library("httr")
library("RCurl")
library("foreign")
library("car")
dat <- read.spss("63_soc_anova_no.sav",use.value.labels = FALSE, to.data.frame=TRUE)
#Replicate condition recoding
dat$Cond_WIT<-car::recode(dat$Cond, "1=1;2=1;3=2;4=2;5=3;6=3", as.factor.result=FALSE)
#Select only data with errorRate less than .80 (I think this is all the data)
newdat<-subset(dat, dat$errorRate<.80)
#Create contrast codes for ANOVA -1, .5, and .5
newdat$Ccode<-car::recode(newdat$Cond_WIT, "1=-1;2=.5;3=.5", as.factor.result=TRUE)
result63<-leveneTest(data=newdat,PSDslope~Ccode)
lev_result[onlynumber,1]<-63
lev_result[onlynumber,2:5]<-subset_lev(result63)
onlynumber<-onlynumber+1
####################row 43 https://osf.io/pz0my/
library("httr")
library("RCurl")
data <- read.csv("43_soc_anova_no.csv")
attraction.composite <- rowMeans(data[, 18:20], na.rm = T)
result43<-leveneTest(attraction.composite~data$Condition)
lev_result[onlynumber,1]<-43
lev_result[onlynumber,2:5]<-subset_lev(result43)
onlynumber<-onlynumber+1
###############row 111 https://osf.io/aaudl/
library("httr") #for reading in data
library("RCurl") #for reading in data
library("dplyr") #To manipulate data
library("ez") #To analyse data
library("lsr") #To calculate effect size (eta squared)
data<-read.csv("111_cog_anova_yes.csv")
data.selected<-subset(data,DROP=="0")
d<-dplyr::select(data.selected, Participant, Anchortype, magnitude,mean2)
d$Participant<-as.factor(d$Participant)
d$Anchortype<-as.factor(d$Anchortype)
d$magnitude<-as.factor(d$magnitude)
result111<-leveneTest(data=d,mean2~Anchortype*magnitude)
lev_result[onlynumber,1]<-111
lev_result[onlynumber,2:5]<-subset_lev(result111)
onlynumber<-onlynumber+1
###################row 55 https://osf.io/su6bm/
library("httr")
library("RCurl")
library("xlsx") # read XLSX file
library("ez") # needed for repeated measures anova
library("car") # needed for recode function (data organization)
library("reshape2") # needed for data manipulation
library("schoRsch") # compute p-eta-sq automatically
library("stringr")
dat <- read.xlsx("55_soc_anova_no.xlsx", 1)
dat$Participant <- factor(dat$Participant)
names(dat)[grep("Rating2_Rejected.", names(dat))] <- "Rating2_Rejected"
table(dat$Condition..1.Neutral..2.Positive.Non.Action..3.Action.Oriented)
dat$Condition <- factor(recode(dat$Condition..1.Neutral..2.Positive.Non.Action..3.Action.Oriented, "'1'='Neutral'; '2'='PositiveNonAction'; '3'='ActionOriented';"))
datDrop <- dat[,-2]
# remove 13 excluded participants (bolded in raw Excel file, for which a column was added called Marked_For_Removal that coded the participants for removal as either [1] remove aka TRUE or [0] do not remove aka FALSE)
datClean <- subset(datDrop, Marked_For_Removal != TRUE, select=-Marked_For_Removal)
datCleanMelt <- melt(datClean, id=c("Participant", "Condition"))
createFactors <- unlist(str_split(datCleanMelt$variable, "_"))
rating <- createFactors[seq(1, length(createFactors), by=2)]
chosenOrRejected <- createFactors[seq(2, length(createFactors), by=2)]
datCleanMelt$Rating <- factor(rating)
datCleanMelt$ChosenOrRejected <- factor(chosenOrRejected)
head(datCleanMelt)
datFinal <- datCleanMelt[,-3]
result55<-leveneTest(data=datFinal,value~Rating*ChosenOrRejected*Condition)
lev_result[onlynumber,1]<-55
lev_result[onlynumber,2:5]<-subset_lev(result55)
onlynumber<-onlynumber+1
####################row 143 https://osf.io/yuybh/
library("httr")
library("RCurl")
library(Hmisc)
library(dplyr)
library(reshape2)
myData <- spss.get("143.sav")
# Get columns and remove the three participants who need to be deleted
myDataSub <- dplyr::select(myData, Include.YesNo, ParticipantNo, Condition, Drawing.Total.Z, Belief.Avg.Z, Pet.Traits.AnthroSocial.Z) %>%
filter(Include.YesNo == "Yes")
names(myDataSub) <- c("toInclude", "ParticipantNo", "condition", "drawing_z", "belief_z", "pets_z")
dataLong <- melt(myDataSub[, -1], id.vars = c("ParticipantNo", "condition")) %>%
arrange(ParticipantNo)
result143<-leveneTest(data=dataLong,value~condition*variable)
lev_result[onlynumber,1]<-143
lev_result[onlynumber,2:5]<-subset_lev(result143)
onlynumber<-onlynumber+1
###################ROW 87 https://osf.io/abxcj/
library("dplyr")
library("httr")
library("RCurl")
library("foreign")
data <- read.spss("87.sav", to.data.frame=TRUE)
#code 'yes' and 'no' responses for LearnOutcomes
levels(data$Topic) <- c(levels(data$Topic), "1", "0")
data$Topic[data$Topic == "Yes"] <- "1"
data$Topic[data$Topic == "No"] <- "0"
#code 'yes' and 'no' responses for LearnOutcomes
levels(data$LearnOutcomes) <- c(levels(data$LearnOutcomes), "1", "0")
data$LearnOutcomes[data$LearnOutcomes == "Yes"] <- "1"
data$LearnOutcomes[data$LearnOutcomes == "No"] <- "0"
#filter cases "no" for suspicion and 'White (not hispanic)'
data.table <- as.data.frame(data, stringsAsFactors=TRUE)
data.filter <- subset(data.table, Suspicion == "No" & Ethnicity == "White (Not hispanic)")
data.filter$Topic<-as.numeric(data.filter$Topic)
data.filter$Topic<-as.factor(data.filter$Topic)
data.filter$LearnOutcomes<-as.numeric(data.filter$LearnOutcomes)
data.filter$LearnOutcomes<-as.factor(data.filter$LearnOutcomes)
result87<-leveneTest(data=data.filter,Distance~LearnOutcomes*Topic)
lev_result[onlynumber,1]<-87
lev_result[onlynumber,2:5]<-subset_lev(result87)
onlynumber<-onlynumber+1
#####################row 12
library("httr")
library("RCurl")
library("reshape2")
library("foreign")
dat <- read.spss("12.sav",use.value.labels = FALSE, to.data.frame=TRUE)
longdat <- melt(dat,
id.vars = c("NUMBER","AGE", "SEX", "LANG","TASK"), # Between subject variables
measure.vars = c("R_QUIET", "R_DISSIMILAR", "R_SIMILAR"), # Variables to gather
variable.name = "SOUND", # Name of within subjects variable
value.name = "RT") # Name of dependent variable)
longdat$NUMBER<-as.factor(longdat$NUMBER)
result12<-leveneTest(data=longdat,RT~TASK*SOUND)
lev_result[onlynumber,1]<-12
lev_result[onlynumber,2:5]<-subset_lev(result12)
onlynumber<-onlynumber+1
##########################row 161
library("httr")
library("RCurl")
library("foreign")
library("compute.es")
lobue.data <- read.spss("161.sav", to.data.frame=TRUE)
result161<-leveneTest(data=lobue.data,average~parentvschild*Stim_cond)
lev_result[onlynumber,1]<-161
lev_result[onlynumber,2:5]<-subset_lev(result161)
onlynumber<-onlynumber+1
#################row 80
library("httr")
library("RCurl")
Data <- read.delim("80.txt", header=TRUE, skip=3)
Data <- Data[Data$Include==1,] # Excluding cases
Data$Indecisive <- as.numeric(as.character(Data$Indecisive))
Data$Condition <- as.factor(Data$Condition)
Data$NotTorn <- Data$NotTorn*-1 # Reverse code item 2
Data$Indecisive <- Data$Indecisive-4 # Equating the scoring of item 3 (1 to 7) to
# the scoring of item 1 & 2 (-3 to 3)
#--------------Data$value <- (Data$StrongMixed + Data$NotTorn + Data$Indecisive)/3
# Creating the ambivalence score, based on the mean of item 1 to 3
#--------------Data[49,33] <- (Data[49,22]+Data[49,22])/2
Data$Indecisive[49]<-NA
Data$Indecisive<-as.numeric(Data$Indecisive)
# This participant did not provide an answer for item 3.
# The ambivalence score for this participant is calculated to be the mean of item 1 & 2.
result80_1<-leveneTest(data=Data,StrongMixed~Condition)
result80_2<-leveneTest(data=Data,NotTorn~Condition)
result80_3<-leveneTest(data=Data,Indecisive~Condition)
lev_result[onlynumber,1]<-80
lev_result[onlynumber,2:5]<-subset_lev(result80_1)
onlynumber<-onlynumber+1
lev_result[onlynumber,1]<-80
lev_result[onlynumber,2:5]<-subset_lev(result80_2)
onlynumber<-onlynumber+1
lev_result[onlynumber,1]<-80
lev_result[onlynumber,2:5]<-subset_lev(result80_2)
onlynumber<-onlynumber+1
#################row 65
library("httr")
library("RCurl")
library("car")
library("lsr")
data65 <-read.csv("65.csv", header = TRUE)
data65$sex = as.factor(data65$sex)
data65$condition = as.factor(data65$condition)
result65<-leveneTest(data=data65,vengefulness~sex*condition)
lev_result[onlynumber,1]<-65
lev_result[onlynumber,2:5]<-subset_lev(result65)
onlynumber<-onlynumber+1
##################row 151
library("httr")
library("RCurl")
library("foreign")
library("lsr")
Data <- read.spss("151.sav", to.data.frame=TRUE)
# Exclude participants who made up answers or due to experimenter error
Data$Exclude <- as.numeric(Data$Madeup_dummy)
Data[Data$Exclude==0,60] <- as.numeric(Data[Data$Exclude==0,]$Exp_Error)
result151<-leveneTest(data=Data[Data$Exclude==0,],mean_vignettes~as.factor(Condition))
lev_result[onlynumber,1]<-151
lev_result[onlynumber,2:5]<-subset_lev(result151)
onlynumber<-onlynumber+1
#########################row 158
library("httr")
library("RCurl")
library("reshape2")
library("foreign")
dat <- read.spss("158.sav",use.value.labels = FALSE, to.data.frame=TRUE)
longdat <- melt(dat,
id.vars = c("subject","gender", "age", "Ongoing_task"), # Between subject variables
measure.vars = c("PM_ErrAll_PMcuedimIrr_Incomp",
"PM_ErrAll_PMcuedimIrr_Comp",
"PM_ErrAll_PMcuedimRel_Incomp",
"PM_ErrAll_PMcuedimRel_Comp"), # Variables to gather
variable.name = "NAME", # Name of within subjects variable
value.name = "DV") # Name of dependent variable
longdat$subject<-as.factor(longdat$subject)
longdat$Ongoing_task<-as.factor(longdat$Ongoing_task)
# I've elaborated out the group naming here so that it is clear to later auditors
longdat$PMCUEDIM[longdat$NAME=="PM_ErrAll_PMcuedimIrr_Incomp"]<-1
longdat$PMCUEDIM[longdat$NAME=="PM_ErrAll_PMcuedimIrr_Comp"]<-1
longdat$PMCUEDIM[longdat$NAME=="PM_ErrAll_PMcuedimRel_Incomp"]<-2
longdat$PMCUEDIM[longdat$NAME=="PM_ErrAll_PMcuedimRel_Comp"]<-2
longdat$PMCUEDIM<-as.factor(longdat$PMCUEDIM)
longdat$COMPAT[longdat$NAME=="PM_ErrAll_PMcuedimIrr_Incomp"]<-1
longdat$COMPAT[longdat$NAME=="PM_ErrAll_PMcuedimIrr_Comp"]<-2
longdat$COMPAT[longdat$NAME=="PM_ErrAll_PMcuedimRel_Incomp"]<-1
longdat$COMPAT[longdat$NAME=="PM_ErrAll_PMcuedimRel_Comp"]<-2
longdat$COMPAT<-as.factor(longdat$COMPAT)
result158<-leveneTest(data=longdat,DV~Ongoing_task*PMCUEDIM*COMPAT)
lev_result[onlynumber,1]<-158
lev_result[onlynumber,2:5]<-subset_lev(result158)
onlynumber<-onlynumber+1
#########################row 8
library("httr")
library("RCurl")
library("foreign")
library("reshape2")
Data <- read.spss("8.sav", to.data.frame = TRUE)
Data <- Data[complete.cases(Data)==TRUE,]
# Cases with missing values are excluded before transforming the data into the long format
DataLong <- melt(Data, id = c("Subject","Condition"),
measure = c("PropR_StereoCor", "PropR_StereoInc", "PropR_CounterCor", "PropR_CounterInc"),
variable.name = "Within")
# The data is transformed into the long format, resulting in one outcome variable.
DataLong$Subject <- as.factor(DataLong$Subject)
DataLong$Accuracy <- 1
DataLong$Response <- 1
DataLong$Accuracy[DataLong$Within == "PropR_StereoInc"] <- 2
DataLong$Accuracy[DataLong$Within == "PropR_CounterInc"] <- 2
DataLong$Response[DataLong$Within == "PropR_CounterCor"] <- 2
DataLong$Response[DataLong$Within == "PropR_CounterInc"] <- 2
# The Within variable is recoded into 2 * 2 factors
result8<-leveneTest(data=DataLong,value~as.factor(Condition)*as.factor(Accuracy)*as.factor(Response))
lev_result[onlynumber,1]<-8
lev_result[onlynumber,2:5]<-subset_lev(result8)
onlynumber<-onlynumber+1
####################row 17
library("httr")
library("RCurl")
library("ez")
dats = read.csv("17.csv" ,sep=";")
datsExp1 = dats[dats$exp==1,]
subjNumb = unique(datsExp1[,1])
sampleDrawn = rep(NA, length(subjNumb))
for(loopSubjNumb in 1 : length(subjNumb)){
sampleDrawn[loopSubjNumb] = datsExp1[datsExp1$subj ==
subjNumb[loopSubjNumb] &
datsExp1$trial == 1,4]
}
estLab = paste("exp1_frequ", 1:12,sep="")
observed = datsExp1[,c("subj","dom","pro", "val")]
observFreqAll = numeric()
for(loopSubjNumb in 1 : length(subjNumb)){
subsetObsSubj = observed[observed$subj == subjNumb[loopSubjNumb],2:4]
observFreq = numeric()
for(loopDom in 1 : 2){
for(loopPro in 1 : 3){
for(loopVal in 1 : 2){
observFreq = c(observFreq,sum(subsetObsSubj[,1] == loopDom &
subsetObsSubj[,2] == loopPro &
subsetObsSubj[,3] == loopVal))
}}}
observFreqAll = rbind(observFreqAll,observFreq)
}
estimatedFrequency = numeric()
for(loopSubjNumb in 1 : length(subjNumb)){
estimatedFrequency = rbind(estimatedFrequency,
datsExp1[datsExp1$subj ==
subjNumb[loopSubjNumb],
estLab][1,])
}
## Set estimatedFrequency in correct order
## (i.e., remove counterbalancing)
estimatedFrequency[subjNumb%% 2 == 0,] =
estimatedFrequency[subjNumb%% 2 == 0,
c(5:6,3:4,1:2,11:12,9:10,7:8) ]
estimatedFrequenciesAll = matrix(rowSums(estimatedFrequency),ncol=12,
nrow=length(subjNumb))
relativeEstimatedFrequency = estimatedFrequency/
estimatedFrequenciesAll
observFreqAll[subjNumb%% 2 == 0,] =
observFreqAll[subjNumb%% 2 == 0,
c(5:6,3:4,1:2,11:12,9:10,7:8) ]
##
observedFrequenciesAll = matrix(rowSums(observFreqAll),ncol=12,
nrow=length(subjNumb))
relativeObservedFrequency = observFreqAll/observedFrequenciesAll
inaccuracyReplication = relativeEstimatedFrequency -
relativeObservedFrequency
dom = c(rep(1,6),rep(2,6)) # domain
val = rep(1:2,6) # valence
pro = rep(rep(1:3,each=2),2) # provider
part = rep(subjNumb,each=12) # participant
dat = cbind(c(t(inaccuracyReplication)),(dom),(val),
(pro),(part))
dat = data.frame(dat)
colnames(dat) = c("ina","dom","val","pro","part")
## indicating factors
dat$dom = as.factor(dat$dom)
dat$val = as.factor(dat$val)
dat$pro = as.factor(dat$pro)
dat$part = as.factor(dat$part)
result17<-leveneTest(data=dat,ina~dom*val*pro)
lev_result[onlynumber,1]<-17
lev_result[onlynumber,2:5]<-subset_lev(result17)
onlynumber<-onlynumber+1
############################row 32
library("httr")
library("RCurl")
library("xlsx")
library("reshape2")
Data <- read.xlsx("32.xlsx", sheetIndex=1, startRow=3, header=TRUE)
# Reshape into long format
Data <- melt(Data,
id.vars = c("Subject.ID", "Total.Time"),
measure.vars = c("KC", "TY", "OJ", "NO", "DD", "VY", "JC", "GP",
"EA", "VC", "WU", "HW"),
variable_name = "Chemical")
# Add first and second row of headers from the Excel file
Data <- cbind(Data[,1:2],
"A"=rep(c("A2", "A6"), each=228),
"B"=rep(c("B2", "B4", "B6"), each=76),
"Cause"=rep(c("Cause A", "Cause B"), each=38), Data[,3:4])
result32<-leveneTest(data=Data[Data$Cause=="Cause A",],value~A*B)
lev_result[onlynumber,1]<-32
lev_result[onlynumber,2:5]<-subset_lev(result32)
onlynumber<-onlynumber+1
##################row 13
library("httr")
library("RCurl")
library("foreign")
library("reshape2")
dat <- read.spss("13.sav", use.value.labels = FALSE,to.data.frame=TRUE)
names(dat)[1:6]<-c("ID", "Age","Sex", "Hand", "Lang", "Group")
# Get the data in long format
longdat <- melt(dat,
id.vars = c("ID","Age", "Sex", "Hand", "Lang", "Group"), # Between subject variables
measure.vars = c("Fact_RT",
"Mult_RT",
"New_RT"), # Variables to gather
variable.name = "ProbType", # Name of within subjects variable
value.name = "DV") # Name of dependent variable
# Create factors from ID, Problem Type, and Group
longdat$ID<-as.factor(longdat$ID)
longdat$ProbType<-as.factor(longdat$ProbType)
longdat$Group<-as.factor(longdat$Group)
result13<-leveneTest(data=longdat,DV~ProbType*Group)
lev_result[onlynumber,1]<-13
lev_result[onlynumber,2:5]<-subset_lev(result13)
onlynumber<-onlynumber+1
####################row 49
library("httr")
library("RCurl")
Data <- read.csv("49.csv")
result49<-leveneTest(data=Data,totalScore~condition)
lev_result[onlynumber,1]<-49
lev_result[onlynumber,2:5]<-subset_lev(result49)
onlynumber<-onlynumber+1
#######################row 124
library("httr")
library("RCurl")
library("foreign")
library("reshape2")
lau.data <- read.spss("124.sav", to.data.frame=TRUE)
#Data in spss wide format - reshape
lau.long <- melt(lau.data,
id.vars = c("VPNr","group"),
measure.vars = c("benevolent_mean", "non_benevolent_mean"),
variable.name="benevolence",
value.name="mean")
lau.long <- transform(lau.long, VPNr = factor(VPNr))
result124<-leveneTest(data=lau.long,mean~group*benevolence)
lev_result[onlynumber,1]<-124
lev_result[onlynumber,2:5]<-subset_lev(result124)
onlynumber<-onlynumber+1
##################row 5
library("httr")
library("RCurl")
library("foreign")
library("tidyr")
library("dplyr")
data<-read.spss("5.sav",to.data.frame=T)
#take necessary variables
d<-dplyr::select(data,PPNR,correct_intermixedAXBX_dif,correct_intermixedCXDY_dif,
correct_intermixedAXBX_same,correct_intermixedCXDY_same)
#Go to long data format
d.long<-gather(d, condition, score,correct_intermixedAXBX_dif:correct_intermixedCXDY_same )
#create factors
d.long$AX.BX_CX.DY<-as.factor(rep(c("axbx","cxdy"),each=48))
d.long$cond<-as.factor(rep(c("dif","same"),each=96))
d.long$PPNR<-as.factor(d.long$PPNR)
result5<-leveneTest(data=d.long,score~cond*AX.BX_CX.DY)
lev_result[onlynumber,1]<-5
lev_result[onlynumber,2:5]<-subset_lev(result5)
onlynumber<-onlynumber+1
#####################row 10
library("httr")
library("RCurl")
library("ez") # needed for repeated measures anova
library("car") # needed for recode function (data organization)
library("doBy") # needed for data manipulation
library("reshape2") # needed for data manipulation
dat <- read.csv("10_2.csv") # please upload and add Numerical_task.csv here
participantOrder <- read.csv("10_1.csv") # please upload and add Order.csv here
fullDat <- merge(dat, participantOrder, by="participant")
fullDat$participant <- factor(fullDat$participant)
fullDat$Notation <- factor(recode(fullDat$Notation, "'1'='Arabic'; '2'='Indian'; '3'='Mixed'"))
fullDat$Distance <- factor(recode(fullDat$Distance, "'1'='Small'; '2'='Large'; '0'='Same'"))
fullDat$Order <- factor(fullDat$Order)
# remove distance of "same" since we only want small/large
finalDat <- subset(fullDat, Distance != "Same")
# remove RT outliers mentioned in replication report
finalDat <- subset(finalDat, !(RT < 200) & !(RT > 2500))
# Filter= RT < 2500 & RT > 200 & participant ~= 24 & Distraction ~= 1 & Distance ~= 0 (FILTER)
# note that RT outlier removal and Distance removal (of "Same") was done on the prior two lines
finalDat <- subset(finalDat, participant != 24)
finalDat <- subset(finalDat, Distraction != 1)
finalDat$Participant <- factor(finalDat$participant)
finalDat$Distance <- factor(finalDat$Distance)
result10<-leveneTest(data=finalDat,RT~Order* Notation*Distance)
lev_result[onlynumber,1]<-10
lev_result[onlynumber,2:5]<-subset_lev(result10)
onlynumber<-onlynumber+1
#####################row 19
library("httr")
library("RCurl")
library("foreign")
library("ez")
library("reshape2")
data_wide <- read.spss("19.sav", to.data.frame = TRUE)
data <- melt(data_wide, id.vars = c("subject", "listtype"))
colnames(data) <- c("subject", "listtype", "condition", "reactiontime")
result19<-leveneTest(data=data,reactiontime~listtype*condition)
lev_result[onlynumber,1]<-19
lev_result[onlynumber,2:5]<-subset_lev(result19)
onlynumber<-onlynumber+1
######################row 27
library("httr") #to Read in data
library("RCurl")
library("xlsx")
library("gdata")
library("tidyr") #to manipulate data
d<-read.xlsx("27.xlsx",1)
#Go to long data
d.long<-gather(d,condition,score, Highfrequencyclear: Lowfrequencydegraded)
#Make factors
d.long$quality<-as.factor(rep(c("clear","degraded"),each=142))
d.long$frequency<-as.factor(rep(c("high","low"),each=71))
d.long$sub<-as.factor(rep(c(1:71)))
result27<-leveneTest(data=d.long,score~quality*frequency)
lev_result[onlynumber,1]<-27
lev_result[onlynumber,2:5]<-subset_lev(result27)
onlynumber<-onlynumber+1
####################row 28
library("httr")
library("RCurl")
library("car")
library("dplyr")
df = read.xlsx("28.xlsx", sheetName = "F1", header = TRUE)
data2<-data.frame(condition=names(df)[2],value=df[,2])
data3<-data.frame(condition=names(df)[3],value=df[,3])
data4<-data.frame(condition=names(df)[4],value=df[,4])
data5<-data.frame(condition=names(df)[5],value=df[,5])
temp1<-merge(data2,data3,all=TRUE)
temp2<-merge(data4,data5,all=TRUE)
data<-merge(temp1,temp2,all=TRUE)
result28<-leveneTest(data=data,value~condition)
lev_result[onlynumber,1]<-28
lev_result[onlynumber,2:5]<-subset_lev(result28)
onlynumber<-onlynumber+1
######################row 114
library("httr")
library("RCurl")
library("foreign")
o_data <-read.spss("114.sav", to.data.frame=T)
# Put data in data frame with easy overview and sensible names:
data <- matrix(ncol=5,nrow=0)
for (i in 1:nrow(o_data)){
for(j in c(14,16,18,20)){
newrow <- c(i,as.factor(o_data[i,8]),o_data[i,j],colnames(o_data[j]),0)
data <- rbind(data,newrow)
}
}
rownames(data) <- NULL
colnames(data) <- c("ID","Group","Score","Tense","YearWeek")
data[,4] <- rep(c("Past","Past","Future","Future"),8)
data[,5] <- rep(c("Week","Year"),16)
data <- as.data.frame(data)
data[,3] <- strtoi(data[,3])
result114<-leveneTest(data=data,Score~Group*Tense*YearWeek)
lev_result[onlynumber,1]<-114
lev_result[onlynumber,2:5]<-subset_lev(result114)
onlynumber<-onlynumber+1
#######################row 129
library("httr")
library("RCurl")
library('reshape2')
data <- read.table("129.csv", header = T, sep = ',')
# select subjects, motivation and 4 measures of the dependent variable
data <- data[c(1, 64, 67:70)]
# Transform to long format
DFlong <- melt(data,
id.vars = names(data)[1:2],
measure.vars = names(data)[3:6],
variable.name = 'condition')
# add the two factors to the data frame
expression <- as.factor(rep(1:2, each = nrow(data) * 2))
duration <- as.factor(rep(rep(1:2, each = nrow(data)), 2))
DFlong <- data.frame(DFlong, expression, duration)
result129<-leveneTest(data=DFlong,value~as.factor(ems_d)*as.factor(expression)*as.factor(duration))
lev_result[onlynumber,1]<-129
lev_result[onlynumber,2:5]<-subset_lev(result129)
onlynumber<-onlynumber+1
#####################row 3
library("car")
library("heplots")
library("httr")
library("lsr")
library("RCurl")
library("dplyr")
library("reshape2")
library("xlsx")
## read in the data
columns <- c("numeric", "Date", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric")
df <- read.xlsx("3_1.xlsx", sheetIndex = 1, startRow = 1, colClasses = columns)
df2 <- read.xlsx("3_2.xlsx", sheetIndex = 1, startRow = 1)
df3 <- read.xlsx("3_3.xlsx", sheetIndex = 1, startRow = 1)
# check sum score of Edi Handedness Inv
df.EHI <- df[,c(4:15)]
rowSums(df.EHI, na.rm=T) == as.numeric(df[ ,16])
### RECALL PERFORMANCE data manipulation
### using df3
# create switching list type variable
df3$list_type <- rep(NA, nrow(df3))
df3$list_type[which(df3$Switch_Condition_2Few5Many == 2 &
df3$Switch_DigitDegradation_0Normal1Degraded == 0)
] <- 1
df3$list_type[which(df3$Switch_Condition_2Few5Many == 2 &
df3$Switch_DigitDegradation_0Normal1Degraded == 1)
] <- 2
df3$list_type[which(df3$Switch_Condition_2Few5Many == 5 &
df3$Switch_DigitDegradation_0Normal1Degraded == 0)
] <- 3
# calculate hits
df3$hit <- rep(0, nrow(df3))
df3$hit[which(df3$WM_Target == df3$WM_Response)] <- 1
## average recall score
# aggregate number of hits per participant per list type
hit_sum <- aggregate(hit ~ SubjectID + list_type, data = df3, FUN = sum)
names(hit_sum) <- c("id", "list_type", "n_hits")
hit_sum$list_type <- factor(hit_sum$list_type, labels = c(
"low_switch/normal_stimuli", "low_switch/degraded_stimuli",
"high_swith/normal_stimuli"))
## absolute recall performance
# aggregate number of hits per participant, cycle,
# block, list type, and list length
hit_sum2 <- aggregate(hit ~ SubjectID + Cycle_1to4 + Block_1to9 +
list_type + WM_ListLength_468, data = df3, FUN = sum)
hit_sum2$recall <- hit_sum2$hit / hit_sum2$WM_ListLength_468
hit_sum2 <- aggregate(recall ~ SubjectID + list_type + WM_ListLength_468,
data = hit_sum2, FUN = mean)
# long to wide format
recall_wide <- dcast(hit_sum2, SubjectID ~ list_type + WM_ListLength_468)
names(recall_wide) <- c("id", "type1length4", "type1length6", "type1length8",
"type2length4", "type2length6", "type2length8",
"type3length4", "type3length6", "type3length8")
### DIGIT PROCESSING data manipulation
### using df2
## processing time
# create switching list type variable
df2$list_type <- rep(NA, nrow(df2))
df2$list_type[which(df2$Switch_Condition_2Few5Many == 2 &
df2$Switch_DigitDegradation_0Normal1Degraded == 0)
] <- 1
df2$list_type[which(df2$Switch_Condition_2Few5Many == 2 &
df2$Switch_DigitDegradation_0Normal1Degraded == 1)
] <- 2
df2$list_type[which(df2$Switch_Condition_2Few5Many == 5 &
df2$Switch_DigitDegradation_0Normal1Degraded == 0)
] <- 3
# aggregate reaction time per participant, cycle,
# block, list type, and list length
rt_sum <- aggregate(Switch_RT ~ SubjectID + Cycle_1to4 + Block_1to9 +
list_type + WM_ListLength_468, data = df2, FUN = sum)
# mean reaction time
rt_sum <- aggregate(Switch_RT ~ SubjectID + list_type + WM_ListLength_468,
data = rt_sum, FUN = mean)
# long to wide format
rt_wide <- dcast(rt_sum, SubjectID ~ list_type + WM_ListLength_468)
names(recall_wide) <- c("id", "type1length4", "type1length6", "type1length8",
"type2length4", "type2length6", "type2length8",
"type3length4", "type3length6", "type3length8")
## errors
# calculate switch filter accuracy
df2$accuracy <- rep(0, nrow(df2))
df2$accuracy[which(df2$Switch_Accuracy_1Hit2Error3Miss > 1)] <- 1
# aggregate accuracy per participant, cycle,
# block, list type, and list length
acc_sum <- aggregate(accuracy ~ SubjectID + Cycle_1to4 + Block_1to9 +
list_type + WM_ListLength_468, data = df2, FUN = sum)
acc_sum <- aggregate(accuracy ~ SubjectID + list_type + WM_ListLength_468,
data = acc_sum, FUN = mean)
acc_wide <- dcast(acc_sum, SubjectID ~ list_type + WM_ListLength_468)
names(acc_wide) <- c("id", "type1length4", "type1length6", "type1length8",
"type2length4", "type2length6", "type2length8",
"type3length4", "type3length6", "type3length8")
### MEMORY LOAD BY TASK SWITCHING data manipulation
### using df2
## response latency
# calculate switch filter accuracy
df2$prior_acc2 <- rep(0, nrow(df2))
for (i in 2:nrow(df2)) {
if(df2$Switch_Accuracy_1Hit2Error3Miss[i-1] > 1) {
df2$prior_acc2[i] <- 1
}
}
# calculate filter variable for filtering out trials with RTs < 100
df2$rt_filter <- rep(0, nrow(df2))
df2$rt_filter[which(df2$Switch_RT < 100)] <- 1
# comibined filter variable
df2$filter_comb <- rep(0, nrow(df2))
df2$filter_comb[which(df2$accuracy == 1 |
df2$prior_acc == 1 |
df2$rt_filter == 1)] <- 1
# filter out first trials of each list
df.filter <- df2[which(df2$Switch_TrialType_0First1Repetition2Switch != 0), ]
# calculate percentage of trials to be excluded
length(which(df.filter$filter_comb==1))/nrow(df.filter)
# exclude trials
df.filter <- df.filter[which(df.filter$filter_comb!=1), ]
# aggregate latencyuracy per participant,
# list type, list length, and trial type
latency <- aggregate(Switch_RT ~ SubjectID + list_type + WM_ListLength_468 +
Switch_TrialType_0First1Repetition2Switch,
data = df.filter, FUN = mean)
latency_wide <- dcast(latency, SubjectID ~ list_type + WM_ListLength_468 +
Switch_TrialType_0First1Repetition2Switch)
names(latency_wide) <- c("id", "type1length4trial1", "type1length4trial2",
"type1length6trial1", "type1length6trial2",
"type1length8trial1", "type1length8trial2",
"type2length4trial1", "type2length4trial2",
"type2length6trial1", "type2length6trial2",
"type2length8trial1", "type2length8trial2",
"type3length4trial1", "type3length4trial2",
"type3length6trial1", "type3length6trial2",
"type3length8trial1", "type3length8trial2")
## error rates
# aggregate accuracy per participant,
# list type, list length, and trial type
error <- aggregate(accuracy ~ SubjectID + list_type + WM_ListLength_468 +
Switch_TrialType_0First1Repetition2Switch,
data = df2, FUN = sum)
error2 <- aggregate(accuracy ~ SubjectID + list_type + WM_ListLength_468 +
Switch_TrialType_0First1Repetition2Switch,
data = df2, FUN = length)
error$error_rate <- error$accuracy/error2$accuracy
error <- error[which(error$Switch_TrialType_0First1Repetition2Switch != 0),
-5] # delete "accuracy variable
error_wide <- dcast(error, SubjectID ~ list_type + WM_ListLength_468 +
Switch_TrialType_0First1Repetition2Switch)
names(error_wide) <- c("id", "type1length4trial1", "type1length4trial2",
"type1length6trial1", "type1length6trial2",
"type1length8trial1", "type1length8trial2",
"type2length4trial1", "type2length4trial2",
"type2length6trial1", "type2length6trial2",
"type2length8trial1", "type2length8trial2",
"type3length4trial1", "type3length4trial2",
"type3length6trial1", "type3length6trial2",
"type3length8trial1", "type3length8trial2")
# create dependent variable
hits_bind <- cbind(
hit_sum$n_hits[which(as.numeric(hit_sum$list_type)==1)],
hit_sum$n_hits[which(as.numeric(hit_sum$list_type)==2)],
hit_sum$n_hits[which(as.numeric(hit_sum$list_type)==3)]
)
recall_bind <- cbind(recall_wide[ ,2],recall_wide[ ,3],recall_wide[ ,4],
recall_wide[ ,5],recall_wide[ ,6],recall_wide[ ,7],
recall_wide[ ,8],recall_wide[ ,9],recall_wide[ ,10])
rt_bind <- cbind(rt_wide[ ,2],rt_wide[ ,3],rt_wide[ ,4],
rt_wide[ ,5],rt_wide[ ,6],rt_wide[ ,7],
rt_wide[ ,8],rt_wide[ ,9],rt_wide[ ,10])
acc_bind <- cbind(acc_wide[ ,2],acc_wide[ ,3],acc_wide[ ,4],
acc_wide[ ,5],acc_wide[ ,6],acc_wide[ ,7],
acc_wide[ ,8],acc_wide[ ,9],acc_wide[ ,10])
latency_bind <- cbind(latency_wide[ ,2],latency_wide[ ,3],latency_wide[ ,4],
latency_wide[ ,5],latency_wide[ ,6],latency_wide[ ,7],
latency_wide[ ,8],latency_wide[ ,9],latency_wide[ ,10],
latency_wide[ ,11],latency_wide[ ,12],latency_wide[ ,13],
latency_wide[ ,14],latency_wide[ ,15],latency_wide[ ,16],
latency_wide[ ,17],latency_wide[ ,18],latency_wide[ ,19])
error_bind <- cbind(error_wide[ ,2], error_wide[ ,3], error_wide[ ,4],
error_wide[ ,5], error_wide[ ,6], error_wide[ ,7],
error_wide[ ,8], error_wide[ ,9], error_wide[ ,10],
error_wide[ ,11], error_wide[ ,12], error_wide[ ,13],
error_wide[ ,14], error_wide[ ,15], error_wide[ ,16],
error_wide[ ,17], error_wide[ ,18], error_wide[ ,19])
#hits_bind
hits_bind<-as.data.frame(hits_bind)
data1<-data.frame(condition=1,value=hits_bind[,1])
data2<-data.frame(condition=2,value=hits_bind[,2])
data3<-data.frame(condition=3,value=hits_bind[,3])
temp1<-merge(data1,data2,all=TRUE)
data<-merge(temp1,data3,all=TRUE)
result3_1<-leveneTest(data=data,value~as.factor(condition))
lev_result[onlynumber,1]<-3
lev_result[onlynumber,2:5]<-subset_lev(result3_1)
onlynumber<-onlynumber+1
#recall_bind
recall_bind<-as.data.frame(recall_bind)
data1<-data.frame(condition=1,value=recall_bind[,1])
data2<-data.frame(condition=2,value=recall_bind[,2])
data3<-data.frame(condition=3,value=recall_bind[,3])
data4<-data.frame(condition=4,value=recall_bind[,4])
data5<-data.frame(condition=5,value=recall_bind[,5])
data6<-data.frame(condition=6,value=recall_bind[,6])
data7<-data.frame(condition=7,value=recall_bind[,7])
data8<-data.frame(condition=8,value=recall_bind[,8])
data9<-data.frame(condition=9,value=recall_bind[,9])
data<-bind_rows(data1,data2,data3,data4,data5,data6,data7,data8,data9)
result3_2<-leveneTest(data=data,value~as.factor(condition))
lev_result[onlynumber,1]<-3
lev_result[onlynumber,2:5]<-subset_lev(result3_2)
onlynumber<-onlynumber+1
#rt_bind
rt_bind<-as.data.frame(rt_bind)
data1<-data.frame(condition=1,value=rt_bind[,1])
data2<-data.frame(condition=2,value=rt_bind[,2])
data3<-data.frame(condition=3,value=rt_bind[,3])
data4<-data.frame(condition=4,value=rt_bind[,4])
data5<-data.frame(condition=5,value=rt_bind[,5])
data6<-data.frame(condition=6,value=rt_bind[,6])
data7<-data.frame(condition=7,value=rt_bind[,7])
data8<-data.frame(condition=8,value=rt_bind[,8])
data9<-data.frame(condition=9,value=rt_bind[,9])
data<-bind_rows(data1,data2,data3,data4,data5,data6,data7,data8,data9)
result3_3<-leveneTest(data=data,value~as.factor(condition))
lev_result[onlynumber,1]<-3
lev_result[onlynumber,2:5]<-subset_lev(result3_3)
onlynumber<-onlynumber+1
#acc_bind
acc_bind<-as.data.frame(acc_bind)
data1<-data.frame(condition=1,value=acc_bind[,1])
data2<-data.frame(condition=2,value=acc_bind[,2])
data3<-data.frame(condition=3,value=acc_bind[,3])
data4<-data.frame(condition=4,value=acc_bind[,4])
data5<-data.frame(condition=5,value=acc_bind[,5])
data6<-data.frame(condition=6,value=acc_bind[,6])
data7<-data.frame(condition=7,value=acc_bind[,7])
data8<-data.frame(condition=8,value=acc_bind[,8])
data9<-data.frame(condition=9,value=acc_bind[,9])
data<-bind_rows(data1,data2,data3,data4,data5,data6,data7,data8,data9)
result3_4<-leveneTest(data=data,value~as.factor(condition))
lev_result[onlynumber,1]<-3
lev_result[onlynumber,2:5]<-subset_lev(result3_4)
onlynumber<-onlynumber+1
#latency_bind
latency_bind<-as.data.frame(latency_bind)
data1<-data.frame(condition=1,value=latency_bind[,1])
data2<-data.frame(condition=2,value=latency_bind[,2])
data3<-data.frame(condition=3,value=latency_bind[,3])
data4<-data.frame(condition=4,value=latency_bind[,4])
data5<-data.frame(condition=5,value=latency_bind[,5])
data6<-data.frame(condition=6,value=latency_bind[,6])
data7<-data.frame(condition=7,value=latency_bind[,7])
data8<-data.frame(condition=8,value=latency_bind[,8])
data9<-data.frame(condition=9,value=latency_bind[,9])
data10<-data.frame(condition=1,value=latency_bind[,10])
data11<-data.frame(condition=1,value=latency_bind[,11])
data12<-data.frame(condition=1,value=latency_bind[,12])
data13<-data.frame(condition=1,value=latency_bind[,13])
data14<-data.frame(condition=1,value=latency_bind[,14])
data15<-data.frame(condition=1,value=latency_bind[,15])
data16<-data.frame(condition=1,value=latency_bind[,16])
data17<-data.frame(condition=1,value=latency_bind[,17])
data18<-data.frame(condition=1,value=latency_bind[,18])
data<-bind_rows(data1,data2,data3,data4,data5,data6,data7,data8,data9,
data10,data11,data12,data13,data14,data15,data16,data17,data18)
result3_5<-leveneTest(data=data,value~as.factor(condition))
lev_result[onlynumber,1]<-3
lev_result[onlynumber,2:5]<-subset_lev(result3_5)
onlynumber<-onlynumber+1
#error_bind
error_bind<-as.data.frame(error_bind)
data1<-data.frame(condition=1,value=error_bind[,1])
data2<-data.frame(condition=2,value=error_bind[,2])
data3<-data.frame(condition=3,value=error_bind[,3])
data4<-data.frame(condition=4,value=error_bind[,4])
data5<-data.frame(condition=5,value=error_bind[,5])
data6<-data.frame(condition=6,value=error_bind[,6])
data7<-data.frame(condition=7,value=error_bind[,7])
data8<-data.frame(condition=8,value=error_bind[,8])
data9<-data.frame(condition=9,value=error_bind[,9])
data10<-data.frame(condition=1,value=error_bind[,10])
data11<-data.frame(condition=1,value=error_bind[,11])
data12<-data.frame(condition=1,value=error_bind[,12])
data13<-data.frame(condition=1,value=error_bind[,13])
data14<-data.frame(condition=1,value=error_bind[,14])
data15<-data.frame(condition=1,value=error_bind[,15])
data16<-data.frame(condition=1,value=error_bind[,16])
data17<-data.frame(condition=1,value=error_bind[,17])
data18<-data.frame(condition=1,value=error_bind[,18])
data<-bind_rows(data1,data2,data3,data4,data5,data6,data7,data8,data9,
data10,data11,data12,data13,data14,data15,data16,data17,data18)
result3_6<-leveneTest(data=data,value~as.factor(condition))
lev_result[onlynumber,1]<-3
lev_result[onlynumber,2:5]<-subset_lev(result3_6)
onlynumber<-onlynumber+1
##########################################0307
######################row 132
library("httr")
library("RCurl")
library("ez")
library("car")
Data <- read.table("132.txt", header=TRUE, fill=TRUE)
Data$Subject <- 1:30
Data$Subject <- as.factor(Data$Subject)
Data$Discussant <- as.factor(Data$Discussant)
Data$Condi <- as.factor(Data$Condi)
result132<-leveneTest(data=Data,Looktime~Discussant*Condi)
lev_result[onlynumber,1]<-132
lev_result[onlynumber,2:5]<-subset_lev(result132)
onlynumber<-onlynumber+1
#################row 1
library("httr")
library("RCurl")
library(foreign)
library(dplyr)
library(ez)
unzip("1.zip", exdir ="1")
# Read files:
file_list <- list.files("1//Data/Behavioual Data", pattern = "Roelofs_clean.txt", full.names = TRUE)
data <- do.call("rbind",lapply(file_list,FUN=function(files) {
read.table(files,header=T,sep="")
}))
data1 <- subset(data, AnyError==0) # The author did not include error responses
result1<-leveneTest(data=data1,VoiceTask.RT~TypeOfRelation*Task*Relatedness)
lev_result[onlynumber,1]<-1
lev_result[onlynumber,2:5]<-subset_lev(result1)
onlynumber<-onlynumber+1
##################row 2
library("httr")
library("RCurl")
library("tidyr")
library("magrittr")
library("ez")
library("schoRsch")
unzip("2.zip", exdir = "2")
Lines <- readLines(paste0("2","/SPSS/Syntax/CPR_byParticipant_1Way.sps"))
Begin <- grep("BEGIN DATA",Lines)
End <- grep("END DATA", Lines)
str <- paste(Lines[(Begin+1):(End-1)], collapse="\n")
Data <- read.table(text=str)
names(Data) <- c("id","Neighbor_Word","Neighbor_Nonword","Control_Word","Control_Nonword")
longData <- Data %>% gather("condition","value",c(Neighbor_Nonword,Control_Nonword))
result2<-leveneTest(data=longData,value~as.factor(condition))
lev_result[onlynumber,1]<-2
lev_result[onlynumber,2:5]<-subset_lev(result2)
onlynumber<-onlynumber+1
######################row 25
library("httr")
library("RCurl")
library(ez)
correct<-read.table("25_1.txt", head = T)
error<-read.table("25_2.txt", head = T,sep = "\t")
error_long <- data.frame(transposition = rep(1:16, 4),
prior_n = rep(1:4, each = 16),
proportion = unlist(error[, 2:5]))
error_long <- error_long[c(1:48, 50:64), ]
error_long$prior_n <- factor(error_long$prior_n)
result25<-leveneTest(data=error_long,proportion~prior_n)
lev_result[onlynumber,1]<-25
lev_result[onlynumber,2:5]<-subset_lev(result25)
onlynumber<-onlynumber+1
######################row 36
library("httr")
library("RCurl")
library('afex')
PP_data <- read.table("36.txt", header = T, sep="\t")
PP_data$targets <- ifelse(PP_data$targets == 29, c("029"), c("158"))
# Prepare aggregated dataset for confirmatory analysis
attach(PP_data)
PP_data_aggr_dependency <- aggregate(PP_data, by=list(Subject, digit_combination, targets, dependency), FUN=mean)
detach(PP_data)
PP_data_aggr_dependency <- PP_data_aggr_dependency[c(1:4,20)]
names(PP_data_aggr_dependency) <- c("Subject","digit_combination", "targets", "dependency", "Recognition.ACC" )
PP_data_aggr_dependency$Recognition.ACC <- PP_data_aggr_dependency$Recognition.ACC * 100
result36<-leveneTest(data=PP_data_aggr_dependency,Recognition.ACC~digit_combination*targets*dependency)
lev_result[onlynumber,1]<-36
lev_result[onlynumber,2:5]<-subset_lev(result36)
onlynumber<-onlynumber+1
##################row 133
library("httr") #To read in data
library("RCurl") #To read in data
library("xlsx") #To read in data
library("dplyr") #To organize data
library("ez") #To analyse
library("tidyr") #To organize data
require(schoRsch)
data<-read.xlsx("133.xls",1,header=TRUE)
#dependent= remembered words (proportion correct)
part.prop<-dplyr::select(data, Subject,Mean_RememberedWords_Survival,Mean_RememberedWords_Vacation)
part.rating<-dplyr::select(data, Subject,MeanRating_SurvivalWords,MeanRating_VacationWords)
#Go to long format
names<-colnames(part.prop)
part.prop[,1]<-1:38
part_prop_long<-reshape(part.prop,varying=list(names[2:3]),idvar=names[1]
,timevar="condition",v.names="score"
,direction="long",times=names[2:3])
part_prop_long$condition<-as.factor(part_prop_long$condition)
names<-colnames(part.rating)
part.rating[,1]<-1:38
part_rating_long<-reshape(part.rating,varying=list(names[2:3]),idvar=names[1]
,timevar="condition",v.names="score"
,direction="long",times=names[2:3])
part_rating_long$condition<-as.factor(part_rating_long$condition)
result133_1<-leveneTest(data=part_prop_long,score~condition)
result133_2<-leveneTest(data=part_rating_long,score~condition)
lev_result[onlynumber,1]<-133
lev_result[onlynumber,2:5]<-subset_lev(result133_1)
onlynumber<-onlynumber+1
lev_result[onlynumber,1]<-133
lev_result[onlynumber,2:5]<-subset_lev(result133_2)
onlynumber<-onlynumber+1
#######################row 11
library("httr")
library("RCurl")
library("foreign")
library("reshape2")
mirm.data <- read.spss("11.sav", to.data.frame=TRUE)
mirm.long <- melt(mirm.data,
id.vars="SUBJECT",
variable.name="cond",
value.name="RT")
mirm.long$nearness <- 1
mirm.long$nearness[mirm.long$cond == "COND1" | mirm.long$cond == "COND2"] <- 2
mirm.long$distance <- 1
mirm.long$distance[mirm.long$cond == "COND1" | mirm.long$cond == "COND3"] <- 2
mirm.long <- transform(mirm.long, distance=factor(distance))
mirm.long <- transform(mirm.long, nearness=factor(nearness))
result11<-leveneTest(data=mirm.long,RT~nearness*distance)
lev_result[onlynumber,1]<-11
lev_result[onlynumber,2:5]<-subset_lev(result11)
onlynumber<-onlynumber+1
####################row 22
library("httr")
library("RCurl")
library("foreign")
library("ez")
library("reshape2")
data <- read.spss("22.sav", to.data.frame = TRUE)
# Subset only the needed variables
data_wide <- data[,c(1,7,8,9,10,18)]
# Transform dataset to long format
data_long <- melt(data_wide, id.vars = c("Code", "Group"))
colnames(data_long) <- c("Code", "Group", "Lag", "Correct")
result22<-leveneTest(data=data_long,Correct~Group*Lag)
lev_result[onlynumber,1]<-22
lev_result[onlynumber,2:5]<-subset_lev(result22)
onlynumber<-onlynumber+1
####################row 167
library("httr")
library("RCurl")
library("foreign")
library("ez")
data<-read.spss("167.sav", to.data.frame =TRUE)
#Remove all Subjects with an reactiontime beyond 1000ms
data$RT.cleaned<-data$RT
data$RT.cleaned[data$RT>1000]=NA
data.cleaned<-na.omit(data)
#errorrate needs to be numeric to perform analysis
data$errorate<-as.numeric(data$Correct)
result167_1<-leveneTest(data=data.cleaned,RT~typisch)
result167_2<-leveneTest(data=data,errorate~typisch)
lev_result[onlynumber,1]<-167
lev_result[onlynumber,2:5]<-subset_lev(result167_1)
onlynumber<-onlynumber+1
lev_result[onlynumber,1]<-167
lev_result[onlynumber,2:5]<-subset_lev(result167_2)
onlynumber<-onlynumber+1
####################row 26
library("httr")
library("RCurl")
library("foreign")
library("reshape2")
data <- read.spss("26.sav", to.data.frame = T)
# select the items, z-scores and wordclassifications
data <- data[,c(1:2, 5:6)]
# transform to long format
dataLong <- melt(data,
id.vars = c('stimulusitem3', 'wordclassification'),
measure.vars = c('z_noswitch_latency', 'z_switch_latency'),
variable.name = 'condition',
value.name = 'zscore')
# set the between factor to factor
dataLong$wordclassification <- as.factor(dataLong$wordclassification)
result26<-leveneTest(data=dataLong,zscore~condition*wordclassification)
lev_result[onlynumber,1]<-26
lev_result[onlynumber,2:5]<-subset_lev(result26)
onlynumber<-onlynumber+1
#################row 150
library("httr")
library("RCurl")
library("readstata13") # read dta Stata 13 file
library("doBy") # organize data
library("plyr") # more data organization
library("schoRsch") # compute p-eta-sq more easily
library("ez") # required for ezANOVA repeated measures
library("lme4") # required to handle missing data repeated measures
dat <- read.dta13("150.dta")
dat$subject <- factor(dat$subject)
# code targetPosition variable based on screen position
dat$targetPosition <- NA
dat[which(dat$position == 5),]$targetPosition <- 1
dat[which(dat$position == 4),]$targetPosition <- 2
dat[which(dat$position == 3),]$targetPosition <- 3
dat[which(dat$position == 2),]$targetPosition <- 4
dat[which(dat$position == 1),]$targetPosition <- 5
dat[which(dat$position == 8),]$targetPosition <- 6
dat[which(dat$position == 7),]$targetPosition <- 7
dat[which(dat$position == 6),]$targetPosition <- 8
# drop trials missing trigger and those where trigger is less than 4000 (i.e., blank screens & fixation cross slides)
datClean <- subset(dat, !is.na(trigger) & trigger >= 4000)
# keep only correct trials in the analyses, i.e., those on which the target was eventually fixated upon
datClean$AOITarget <- FALSE
datClean[which(datClean$targetPosition == datClean$AOI),]$AOITarget <- 1
# exclude all trials where subject did not fixate on target at least once
didFixate <- summaryBy(AOITarget ~ subject + trigger, FUN=c(max, length), data=datClean)
# the following function accomplishes the above; note this takes a while, so we include a progress bar...
datClean$exclusion <- FALSE
computeExclusion <- function (didFixate, datClean) {
pb <- txtProgressBar(style=3)
for (i in 1:nrow(didFixate)) {
if (didFixate[i,]$AOITarget.max != 1)
datClean[which(datClean$subject == didFixate$subject[i] & datClean$trigger == didFixate$trigger[i]),]$exclusion <- TRUE
setTxtProgressBar(pb, i/nrow(didFixate))
}
return(datClean)
}
datCleanExclusion <- computeExclusion(didFixate, datClean)
# drop trials within the center left/right from the analysis
datCleanExclusionDrop <- subset(datCleanExclusion, targetPosition != 3 & targetPosition != 7)
# drop excluded trials
datCleanExclusionDropMore <- subset(datCleanExclusionDrop, exclusion != TRUE)
# drop trials where AOI equals 0
datCleanExclusionDropMoreAOI <- subset(datCleanExclusionDropMore, AOI != 0)
# sort data
datCleanExclusionDropMoreAOISorted <- datCleanExclusionDropMoreAOI[order(datCleanExclusionDropMoreAOI$subject, datCleanExclusionDropMoreAOI$trigger, datCleanExclusionDropMoreAOI$gazetime),]
# code trials by first fixation, using sort
datCleanExclusionDropMoreAOISorted$firstfix <- NA
firstfix <- ddply(datCleanExclusionDropMoreAOISorted,.(subject, trigger),.fun = function(x){
x$firstfix <- 1:nrow(x)
return(x)})
# keep only first fixation trials
firstfixOnly <- subset(firstfix, firstfix == 1)
# create coding for up factor
firstfixOnly$up <- 0
firstfixOnly[which(firstfixOnly$targetPosition == 1 | firstfixOnly$targetPosition == 2 | firstfixOnly$targetPosition == 8),]$up <- 1
# create coding for action factor
firstfixOnly$action <- 0
firstfixOnly[which(firstfixOnly$block == 1),]$action <- 1
# create coding for prototypical (proto) factor
firstfixOnly$prototypical <- firstfixOnly$target %% 2
# compute means within factors of interest for within subjects ANOVA
firstfixOnlyMeans <- summaryBy(AOITarget ~ subject + prototypical + up + action, FUN=mean, data=firstfixOnly)
firstfixOnlyMeans$subject <- factor(firstfixOnlyMeans$subject)
firstfixOnlyMeans$prototypical <- factor(firstfixOnlyMeans$prototypical)
firstfixOnlyMeans$up <- factor(firstfixOnlyMeans$up)
firstfixOnlyMeans$action <- factor(firstfixOnlyMeans$action)
# create a separate dataset dropping subject 8
firstfixOnlyMeansFix <- subset(firstfixOnlyMeans, subject != 8)
firstfixOnlyMeansFix$subject <- factor(firstfixOnlyMeansFix$subject)
result150<-leveneTest(data=firstfixOnlyMeansFix,AOITarget.mean~up*prototypical*action)
lev_result[onlynumber,1]<-150
lev_result[onlynumber,2:5]<-subset_lev(result150)
onlynumber<-onlynumber+1
##################row 37
library("httr")
library("RCurl")
library("foreign")
library("reshape2")
data <- read.spss("37.sav", to.data.frame = T)
# select relevant variables
data <- data[,c(1:3, 8:9, 14:15, 20:21)]
# transform to long format
DFlong <- melt(data, id.vars = c('subject'))
# add the indepent variables as factors
task <- as.factor(rep(1:2, each = nrow(DFlong) / 2))
cue <- as.factor(rep(rep(1:2, each = nrow(DFlong) / 4), 2))
size <- as.factor(rep(rep(1:2, each = nrow(data)), 4))
DF <- data.frame(DFlong, task, cue, size)
result37<-leveneTest(data=DF,value~task*cue*size)
lev_result[onlynumber,1]<-37
lev_result[onlynumber,2:5]<-subset_lev(result37)
onlynumber<-onlynumber+1
#####################row 117
library("car")
library("heplots")
library("httr")
library("RCurl")
library("reshape2")
library("xlsx")
df <- read.xlsx("117.xlsx", 1, startRow = 5,
colIndex = c(7:11), colClasses = "numeric")
df2 <- read.xlsx("117.xlsx", 1, startRow = 5,
colIndex = c(1:5), colClasses = "numeric")
for (i in 2:ncol(df)) {
df[ ,i] <- factor(df[ ,i])
}
for (i in 2:ncol(df2)) {
df2[ ,i] <- factor(df2[ ,i])
}
result117_1<-leveneTest(data=df,temporal.selection~efficacy*latency*precision)
result117_2<-leveneTest(data=df2,temporal.selection~efficacy*latency*precision)
lev_result[onlynumber,1]<-117
lev_result[onlynumber,2:5]<-subset_lev(result117_1)
onlynumber<-onlynumber+1
lev_result[onlynumber,1]<-117
lev_result[onlynumber,2:5]<-subset_lev(result117_2)
onlynumber<-onlynumber+1
#####################row 139
library("httr")
library("RCurl")
library("xlsx")
####subject 1
S1<-read.xlsx("139_1.xlsx",,sheetIndex=1,header=T,as.data.frame=T)
S1<-S1[,-c(1,2,3,7,8,12,16,20,24,28,32,36,37,41,45,49,53)]
S1<-S1[1:16,]
####subject 2
S2<-read.xlsx("139_2.xlsx",,sheetIndex=1,header=T,as.data.frame=T)
S2<-S2[,-c(1,2,3,4,8,12,16,20,24,28,32,36,40,44,48,52)]
S2<-S2[1:16,]
####subject 4
S4<-read.xlsx("139_4.xlsx",,sheetIndex=1,header=T,as.data.frame=T)
S4<-S4[,-c(1,2,3,4)]
S4<-S4[1:16,]
####subject 6
S6<-read.xlsx("139_6.xlsx",,sheetIndex=1,header=T,as.data.frame=T)
S6<-S6[,-c(1,2,3,4,8,12,16,20,24,28,32,36,40,44,48,52)]
S6<-S6[1:16,]
####subject 7
S7<-read.xlsx("139_7.xlsx",,sheetIndex=1,header=T,as.data.frame=T)
S7<-S7[,-c(1,2,3,4,8,12,16,20,24,28,32,36,40,44,48,52,53)]
S7<-S7[1:16,]
### Takes the arguments x ... data.frame and sub ... subject ID
USEfile<-function(x,sub){
library(stringr)
ISI<-list(NA)
N<-list(NA)
S1sumSUB<-x[,seq(1,36,3)]
for (i in 1:12) {
help<-do.call(rbind,str_split(S1sumSUB[,i], "_"))
ISI1 <-do.call(rbind,str_split(help[,1]," "))
ISI1<-data.frame(ISI1[,1])
N1<-do.call(rbind,str_split(help[,2]," "))
N1<-data.frame(N1[,1])
ISI[[i]]<-ISI1
N[[i]]<-N1
}
S1ISI<-do.call(cbind,ISI)
S1N<-do.call(cbind,N)
x<-cbind(x,S1ISI,S1N)
x<-as.matrix(x)
x<-data.frame(rbind(x[,c(1:3,37,49)],x[,c(4:6,38,50)],x[,c(7:9,39,51)],x[,c(10:12,40,52)],
x[,c(13:15,41,53)],x[,c(16:18,42,54)],x[,c(19:21,43,55)],
x[,c(22:24,44,56)],x[,c(25:27,45,57)],x[,c(28:30,46,58)],
x[,c(31:33,47,59)],x[,c(34:36,48,60)]))
block=rep(1:16,12)
session=rep(1:12,each=16)
x<-cbind(sub,session,block,x)
}
S1done<-USEfile(S1,1)
S2done<-USEfile(S2,2)
S4done<-USEfile(S4,4)
S6done<-USEfile(S6,6)
S7done<-USEfile(S7,7)
colnames(S1done)<-colnames(S2done)
data<-rbind(S1done,S2done,S4done,S6done,S7done)
data<-data[,-c(2:5)]
colnames(data)<-c("sub","d","ISI","N")
data$sub <- as.factor(data$sub)
data$ISI <- as.factor(data$ISI)
data$N <- as.factor(data$N)
data$d <- as.numeric(as.character(data$d))
result139<-leveneTest(data=data,d~ISI*N)
lev_result[onlynumber,1]<-139
lev_result[onlynumber,2:5]<-subset_lev(result139)
####################output###########################
write.table(lev_result,sep=",",file = "levenetest_result20170307_1.csv",row.names = FALSE,col.names = FALSE)
You can also embed plots, for example:
Note that the echo = FALSE parameter was added to the code chunk to prevent printing of the R code that generated the plot.