Report Details

This reproducibility report attempts to reproduce the target results of Experiment 1 of Gino, Kouchaki, and Galinsky (2015), published in Psychological Science. The repository for this reproducibility project can be found on GitHub.

articleID <- "11-5-2015_CLASS" # insert the article ID code here e.g., "10-3-2015_PS"
reportType <- "final" # specify whether this is the 'pilot' report or 'final' report
pilotNames <- "Marianna Zhang"
copilotNames <- "Mackenzie Leake"
pilotTTC <- 300 # insert the pilot's estimated time to complete (in minutes, fine to approximate) e.g., 120
copilotTTC <- 20 # insert the co-pilot's estimated time to complete (in minutes, fine to approximate) e.g., 120
pilotStartDate <- as.Date("11/03/18", format="%m/%d/%y") # insert the pilot's start date in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
copilotStartDate <- as.Date("11/04/18", format="%m/%d/%y") # insert the co-pilot's start date in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
completionDate <- as.Date("11/04/18", format="%m/%d/%y") # copilot insert the date of final report completion (after any necessary rounds of author assistance) in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")

Methods summary:

Participants were randomly assigned to a 2x2 between-subject design (behavior type x event type). For behavior type, those in the authentic condition were asked to write about a time they felt authentic, and those in the inauthentic condition were asked to write about a time they felt inauthentic. For event type, those in the general condition were not given any restrictions, and those in the unrelated condition were asked to write about a time that was not related to lying or deceiving others.

Participants then completed measures of their moral self-regard and feelings of impurity, in random order. Participants then completed two manipulation checks: a measure of self-alienation, and self-reporting whether they had written about a time they felt authentic, inauthentic, or neutral.

Participants who did not write an essay and participants who incorrectly answered what kind of essay they were going to write were excluded.

2x2 ANOVAs were used to analyze the participants’ average responses to the impurity and moral self-regard measures across the two sets of conditions.


Target outcomes:

Impurity and moral self-regard. Similar 2x2 ANOVAs using impurity and moral self-regard as dependent measures also revealed only a significant main effect of type of behavior. Participants in the inauthentic-behavior condition reported greater feelings of impurity (M = 3.56, SD = 1.86, 95% CI = [3.30, 3.85]) and lower moral self-regard (M = 2.90, SD = 1.50, 95% CI = [2.61, 3.16]) than did participants in the authentic-behavior condition (impurity: M = 1.51, SD = 1.29, 95% CI = [1.25, 1.78]; moral self-regard: M = 4.99, SD = 1.68, 95% CI = [4.72, 5.26]), F(1, 263) = 111.06, p < .001, \(\eta^{2}{p}\) = .30, and F(1, 263) = 115.25, p < .001, \(\eta^{2}{p}\) = .31, respectively. (from Gino, Kouchaki, & Galinsky, 2015, p. 986).


Step 1: Load packages and prepare report object

# load packages
library(tidyverse) # for data munging
library(knitr) # for kable table formating
library(haven) # import and export 'SPSS', 'Stata' and 'SAS' Files
library(readxl) # import excel files
library(CARPSreports) # custom report functions
library(lsr) # partial eta squared from ANOVA
# Prepare report object. This will be updated automatically by the reproCheck function each time values are compared
reportObject <- data.frame(dummyRow = TRUE, reportedValue = NA, obtainedValue = NA, valueType = NA, percentageError = NA, comparisonOutcome = NA, eyeballCheck = NA)

Step 2: Load data

data <- read_sav("data/data_Experiment_1.sav")

Step 3: Tidy data

data_tidy <- data %>% 
  mutate(subject = row_number()) %>% # add subject number
  gather(question, response, moralSR_1:alienation_4) %>% # each question one row
  separate(question, c("question_type", "question_number"), "_") %>% # organize questions by question type
  separate(condition, c("behavior_type", "event_type"), "_") %>%  # separate out each IV
  mutate(event_type=ifelse(is.na(event_type), "unrelated", event_type)) # add unrelated condition marker

data_tidy$behavior_type <- as.factor(data_tidy$behavior_type)
data_tidy$event_type <- as.factor(data_tidy$event_type)
data_tidy$question_type <- as.factor(data_tidy$question_type)

Step 4: Run analysis

Pre-processing

data_tidy_filtered <- data_tidy %>% 
  filter(failed_essay == 0 & # exclude those who did not write an essay
           failed_MC == 0 & # exclude those who failed manipulation check
           question_type != "alienation") # exclude alienation questions, since not in target analysis

# group by subject
data_tidy_filtered_subject <- data_tidy_filtered %>% 
  group_by(subject, behavior_type, event_type, question_type) %>% 
  summarize(subject_avg = mean(response, na.rm=TRUE))

Descriptive statistics

Q <- qnorm(.975) # 95% defined for confidence interval

#### BY TRIAL (what pilot tried first) ####
# summarize impurity, self-regard average across 2x2 conditions (by trial)
data_means <- data_tidy_filtered %>%
  group_by(behavior_type, event_type, question_type) %>% 
  summarize(avg = mean(response, na.rm=TRUE), 
            sd = sd(response, na.rm=TRUE),
            CI_95_lower = avg-Q*(sd/sqrt(n())),
            CI_95_upper = avg+Q*(sd/sqrt(n())))

# summarize impurity, self-regard average across behavior type conditions (by trial)
data_means_behavior <- data_tidy_filtered %>%
  group_by(behavior_type, question_type) %>% 
  summarize(avg = mean(response, na.rm=TRUE), 
            sd = sd(response, na.rm=TRUE), 
            CI_95_lower = avg-Q*(sd/sqrt(n())),
            CI_95_upper = avg+Q*(sd/sqrt(n())))



#### BY SUBJECT (what the original report did) ####

# summarize impurity, self-regard average across behavior type conditions (by subject average)
data_means_subject <- data_tidy_filtered_subject %>%
  group_by(behavior_type, event_type, question_type) %>% 
  summarize(avg = mean(subject_avg, na.rm=TRUE), 
            sd = sd(subject_avg, na.rm=TRUE), 
            CI_95_lower = avg-Q*(sd/sqrt(n())),
            CI_95_upper = avg+Q*(sd/sqrt(n())))
View(data_means_subject)

# summarize impurity, self-regard average across behavior type conditions (by subject average)
data_means_behavior_subject <- data_tidy_filtered_subject %>%
  group_by(behavior_type, question_type) %>% 
  summarize(avg = mean(subject_avg, na.rm=TRUE), 
            sd = sd(subject_avg, na.rm=TRUE), 
            CI_95_lower = avg-Q*(sd/sqrt(n())),
            CI_95_upper = avg+Q*(sd/sqrt(n())))
View(data_means_behavior_subject)




#### COMPARE VALUES TO ORIGINAL (*= error) ####

# inauthentic condition: impurity measure: average
reportObject <- reproCheck(reportedValue = "3.56", obtainedValue = round(data_means_behavior_subject$avg[3], 2), valueType = "mean")
## [1] "MATCH for mean. The reported value (3.56) and the obtained value (3.56) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# inauthentic condition: impurity measure: sd
reportObject <- reproCheck(reportedValue = "1.86", obtainedValue = round(data_means_behavior_subject$sd[3], 2), valueType = "sd")
## [1] "MATCH for sd. The reported value (1.86) and the obtained value (1.86) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# *inauthentic condition: impurity measure: CI lower bound
reportObject <- reproCheck(reportedValue = "3.30", obtainedValue = round(data_means_behavior_subject$CI_95_lower[3], 2), valueType = "other")
## [1] "MINOR_ERROR for other. The reported value (3.3) and the obtained value (3.24) differed by 1.82%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# *inauthentic condition: impurity measure: CI upper bound
reportObject <- reproCheck(reportedValue = "3.85", obtainedValue = round(data_means_behavior_subject$CI_95_upper[3], 2), valueType = "other")
## [1] "MINOR_ERROR for other. The reported value (3.85) and the obtained value (3.88) differed by 0.78%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# inauthentic condition: moral self-regard measure: average
reportObject <- reproCheck(reportedValue = "2.90", obtainedValue = round(data_means_behavior_subject$avg[4], 2), valueType = "mean")
## [1] "MATCH for mean. The reported value (2.9) and the obtained value (2.9) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# inauthentic condition: moral self-regard measure: sd
reportObject <- reproCheck(reportedValue = "1.50", obtainedValue = round(data_means_behavior_subject$sd[4], 2), valueType = "sd")
## [1] "MATCH for sd. The reported value (1.5) and the obtained value (1.5) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# *inauthentic condition: moral self-regard measure: CI lower bound
reportObject <- reproCheck(reportedValue = "2.61", obtainedValue = round(data_means_behavior_subject$CI_95_lower[4], 2), valueType = "other")
## [1] "MINOR_ERROR for other. The reported value (2.61) and the obtained value (2.64) differed by 1.15%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# *inauthentic condition: moral self-regard measure: CI upper bound
reportObject <- reproCheck(reportedValue = "3.16", obtainedValue = round(data_means_behavior_subject$CI_95_upper[4], 2), valueType = "other")
## [1] "MINOR_ERROR for other. The reported value (3.16) and the obtained value (3.15) differed by 0.32%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# authentic condition: impurity measure: average
reportObject <- reproCheck(reportedValue = "1.51", obtainedValue = round(data_means_behavior_subject$avg[1], 2), valueType = "mean")
## [1] "MATCH for mean. The reported value (1.51) and the obtained value (1.51) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# authentic condition: impurity measure: sd
reportObject <- reproCheck(reportedValue = "1.29", obtainedValue = round(data_means_behavior_subject$sd[1], 2), valueType = "sd")
## [1] "MATCH for sd. The reported value (1.29) and the obtained value (1.29) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# *authentic condition: impurity measure: CI lower bound
reportObject <- reproCheck(reportedValue = "1.25", obtainedValue = round(data_means_behavior_subject$CI_95_lower[1], 2), valueType = "other")
## [1] "MINOR_ERROR for other. The reported value (1.25) and the obtained value (1.3) differed by 4%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# *authentic condition: impurity measure: CI upper bound
reportObject <- reproCheck(reportedValue = "1.78", obtainedValue = round(data_means_behavior_subject$CI_95_upper[1], 2), valueType = "other")
## [1] "MINOR_ERROR for other. The reported value (1.78) and the obtained value (1.73) differed by 2.81%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# authentic condition: moral self-regard measure: average
reportObject <- reproCheck(reportedValue = "4.99", obtainedValue = round(data_means_behavior_subject$avg[2], 2), valueType = "mean")
## [1] "MATCH for mean. The reported value (4.99) and the obtained value (4.99) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# authentic condition: moral self-regard measure: sd
reportObject <- reproCheck(reportedValue = "1.68", obtainedValue = round(data_means_behavior_subject$sd[2], 2), valueType = "sd")
## [1] "MATCH for sd. The reported value (1.68) and the obtained value (1.68) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# *authentic condition: moral self-regard measure: CI lower bound
reportObject <- reproCheck(reportedValue = "4.72", obtainedValue = round(data_means_behavior_subject$CI_95_lower[2], 2), valueType = "other")
## [1] "MINOR_ERROR for other. The reported value (4.72) and the obtained value (4.71) differed by 0.21%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# *authentic condition: moral self-regard measure: CI upper bound
reportObject <- reproCheck(reportedValue = "5.26", obtainedValue = round(data_means_behavior_subject$CI_95_upper[2], 2), valueType = "other")
## [1] "MINOR_ERROR for other. The reported value (5.26) and the obtained value (5.27) differed by 0.19%. Note that the obtained value was rounded to 2 decimal places to match the reported value."

Inferential statistics

# 2x2 ANOVA on impurity
data_tidy_filtered_impurity <- data_tidy_filtered_subject %>%
  filter(question_type == "Impurity")

aov_impurity <- aov(subject_avg ~ behavior_type * event_type, data = data_tidy_filtered_impurity)
summary(aov_impurity)
##                           Df Sum Sq Mean Sq F value Pr(>F)    
## behavior_type              1  279.2  279.17 110.673 <2e-16 ***
## event_type                 1    0.0    0.01   0.004  0.949    
## behavior_type:event_type   1    4.8    4.83   1.914  0.168    
## Residuals                263  663.4    2.52                   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
etaSquared(aov_impurity, type=2, anova=FALSE)
##                                eta.sq  eta.sq.part
## behavior_type            2.941225e-01 2.957931e-01
## event_type               1.086089e-05 1.551023e-05
## behavior_type:event_type 5.095838e-03 7.224806e-03
# 2x2 ANOVA on moral self-regard
data_tidy_filtered_moralSR <- data_tidy_filtered_subject %>%
  filter(question_type == "moralSR")
aov_moralSR <- aov(subject_avg ~ behavior_type * event_type, data = data_tidy_filtered_moralSR)
summary(aov_moralSR)
##                           Df Sum Sq Mean Sq F value Pr(>F)    
## behavior_type              1  292.3  292.29 114.400 <2e-16 ***
## event_type                 1    1.3    1.26   0.492  0.484    
## behavior_type:event_type   1    2.2    2.25   0.879  0.349    
## Residuals                263  672.0    2.56                   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
etaSquared(aov_moralSR, type=2, anova=FALSE)
##                               eta.sq eta.sq.part
## behavior_type            0.303250766 0.303980231
## event_type               0.001299439 0.001867952
## behavior_type:event_type 0.002320884 0.003331395
#### COMPARE VALUES TO ORIGINAL (*=error) ####

# impurity measure between inauthentic and authentic conditions: df within-groups
reportObject <- reproCheck(reportedValue = "1", obtainedValue = "1", valueType = "df")
## [1] "MATCH for df. The reported value (1) and the obtained value (1) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
# impurity measure between inauthentic and authentic conditions: df between-groups
reportObject <- reproCheck(reportedValue = "263", obtainedValue = "263", valueType = "df")
## [1] "MATCH for df. The reported value (263) and the obtained value (263) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
# *impurity measure between inauthentic and authentic conditions: F value
reportObject <- reproCheck(reportedValue = "111.06", obtainedValue = "110.67", valueType = "F")
## [1] "MINOR_ERROR for F. The reported value (111.06) and the obtained value (110.67) differed by 0.35%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# impurity measure between inauthentic and authentic conditions: p value
reportObject <- reproCheck(reportedValue = "<.001", obtainedValue = "<2e-16", valueType = "p", eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
# impurity measure between inauthentic and authentic conditions: partial eta squared
reportObject <- reproCheck(reportedValue = ".30", obtainedValue = "2.957931e-01", valueType = "other")
## [1] "MATCH for other. The reported value (0.3) and the obtained value (0.3) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# moral self-regard measure between inauthentic and authentic conditions: df within-groups
reportObject <- reproCheck(reportedValue = "1", obtainedValue = "1", valueType = "df")
## [1] "MATCH for df. The reported value (1) and the obtained value (1) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
# moral self-regard measure between inauthentic and authentic conditions: df between-groups
reportObject <- reproCheck(reportedValue = "263", obtainedValue = "263", valueType = "df")
## [1] "MATCH for df. The reported value (263) and the obtained value (263) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
# *moral self-regard measure between inauthentic and authentic conditions: F value
reportObject <- reproCheck(reportedValue = "115.25", obtainedValue = "114.40", valueType = "F")
## [1] "MINOR_ERROR for F. The reported value (115.25) and the obtained value (114.4) differed by 0.74%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# moral self-regard measure between inauthentic and authentic conditions: p value
reportObject <- reproCheck(reportedValue = "<.001", obtainedValue = "<2e-16", valueType = "p", eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
# *moral self-regard measure between inauthentic and authentic conditions: partial eta squared (maybe a typo?)
reportObject <- reproCheck(reportedValue = ".31", obtainedValue = "0.303980231", valueType = "other")
## [1] "MINOR_ERROR for other. The reported value (0.31) and the obtained value (0.3) differed by 3.23%. Note that the obtained value was rounded to 2 decimal places to match the reported value."

Step 5: Conclusion

The reproducibility check was largely successful.

ANOVAs with impurity and moral self-regard as dependent measures revealed only a significant main effect of type of behavior, as in the original paper. Participants in the inauthentic-behavior condition reported greater feelings of impurity (M = 3.56, SD = 1.86, 95% CI = [3.24, 3.88]) and lower moral self-regard (M = 2.9, SD = 1.5, 95% CI = [2.64, 3.15]) than did participants in the authentic-behavior condition (impurity: M = 1.51, SD = 1.29, 95% CI = [1.3, 1.73]; moral self-regard: M = 4.99, SD = 1.68, 95% CI = [4.71, 5.27]; F(1, 263) = 110.67, p < .001, partial eta squared = .30; and F(1, 263) = 114.40, p < .001, partial eta squared = .30, respectively).

There are minor differences between the 4 confidence intervals, the 2 F-values, and the moral self-regard partial eta squared in the original paper and the ones reported here. The difference in the moral self-regard partial eta squared (original = .31, reproducibility attempt =.30) may have been a typo, but other differences did not have a clear causal locus (e.g. original F-values = 111.06 for impurity, 115.25 for moral self-regard; vs reproducibility attempt F-values = 110.67 for impurity, 114.40 for moral self-regard). These minor differences are unlikely to affect the conclusion of the original paper.

At first the pilot calculated descriptive statistics over all trials, instead of over subject summaries, which wasted some time. The final results are calculated over subject summaries.

Author_Assistance = FALSE

Insufficient_Information_Errors <- 0 # how many discrete insufficient information issues did you encounter?

# Assess the causal locus (discrete reproducibility issues) of any reproducibility errors. Note that there doesn't necessarily have to be a one-to-one correspondance between discrete reproducibility issues and reproducibility errors. For example, it could be that the original article neglects to mention that a Greenhouse-Geisser correct was applied to ANOVA outcomes. This might result in multiple reproducibility errors, but there is a single causal locus (discrete reproducibility issue).

locus_typo <- 1 # how many discrete issues did you encounter that related to typographical errors?
locus_specification <- 0 # how many discrete issues did you encounter that related to incomplete, incorrect, or unclear specification of the original analyses?
locus_analysis <- 0 # how many discrete issues did you encounter that related to errors in the authors' original analyses?
locus_data <- 0 # how many discrete issues did you encounter that related to errors in the data files shared by the authors?
locus_unidentified <- 10 # how many discrete issues were there for which you could not identify the cause

# How many of the above issues were resolved through author assistance?
locus_typo_resolved <- NA # how many discrete issues did you encounter that related to typographical errors?
locus_specification_resolved <- NA # how many discrete issues did you encounter that related to incomplete, incorrect, or unclear specification of the original analyses?
locus_analysis_resolved <- NA # how many discrete issues did you encounter that related to errors in the authors' original analyses?
locus_data_resolved <- NA # how many discrete issues did you encounter that related to errors in the data files shared by the authors?
locus_unidentified_resolved <- NA # how many discrete issues were there for which you could not identify the cause

Affects_Conclusion <- FALSE # Do any reproducibility issues encounter appear to affect the conclusions made in the original article? TRUE, FALSE, or NA. This is a subjective judgement, but you should taking into account multiple factors, such as the presence/absence of decision errors, the number of target outcomes that could not be reproduced, the type of outcomes that could or could not be reproduced, the difference in magnitude of effect sizes, and the predictions of the specific hypothesis under scrutiny.
reportObject <- reportObject %>%
  filter(dummyRow == FALSE) %>% # remove the dummy row
  select(-dummyRow) %>% # remove dummy row designation
  mutate(articleID = articleID) %>% # add variables to report 
  select(articleID, everything()) # make articleID first column

# decide on final outcome
if(any(reportObject$comparisonOutcome %in% c("MAJOR_ERROR", "DECISION_ERROR")) | Insufficient_Information_Errors > 0){
  finalOutcome <- "Failure without author assistance"
  if(Author_Assistance == T){
    finalOutcome <- "Failure despite author assistance"
  }
}else{
  finalOutcome <- "Success without author assistance"
  if(Author_Assistance == T){
    finalOutcome <- "Success with author assistance"
  }
}

# collate report extra details
reportExtras <- data.frame(articleID, pilotNames, copilotNames, pilotTTC, copilotTTC, pilotStartDate, copilotStartDate, completionDate, Author_Assistance, finalOutcome, Insufficient_Information_Errors, locus_typo, locus_specification, locus_analysis, locus_data, locus_unidentified, locus_typo_resolved, locus_specification_resolved, locus_analysis_resolved, locus_data_resolved, locus_unidentified_resolved)

# save report objects
if(reportType == "pilot"){
  write_csv(reportObject, "pilotReportDetailed.csv")
  write_csv(reportExtras, "pilotReportExtras.csv")
}

if(reportType == "final"){
  write_csv(reportObject, "finalReportDetailed.csv")
  write_csv(reportExtras, "finalReportExtras.csv")
}

Session information

devtools::session_info()
## - Session info ----------------------------------------------------------
##  setting  value                       
##  version  R version 3.5.0 (2018-04-23)
##  os       Windows 10 x64              
##  system   x86_64, mingw32             
##  ui       RTerm                       
##  language (EN)                        
##  collate  English_United States.1252  
##  ctype    English_United States.1252  
##  tz       America/Los_Angeles         
##  date     2018-11-04                  
## 
## - Packages --------------------------------------------------------------
##  package      * version date       lib
##  assertthat     0.2.0   2017-04-11 [1]
##  backports      1.1.2   2017-12-13 [1]
##  bindr          0.1.1   2018-03-13 [1]
##  bindrcpp     * 0.2.2   2018-03-29 [1]
##  broom          0.5.0   2018-07-17 [1]
##  callr          2.0.4   2018-05-15 [1]
##  CARPSreports * 0.1     2018-11-03 [1]
##  cellranger     1.1.0   2016-07-27 [1]
##  cli            1.0.0   2017-11-05 [1]
##  colorspace     1.3-2   2016-12-14 [1]
##  crayon         1.3.4   2017-09-16 [1]
##  desc           1.2.0   2018-05-01 [1]
##  devtools       2.0.1   2018-10-26 [1]
##  digest         0.6.15  2018-01-28 [1]
##  dplyr        * 0.7.4   2017-09-28 [1]
##  evaluate       0.11    2018-07-17 [1]
##  forcats      * 0.3.0   2018-02-19 [1]
##  fs             1.2.6   2018-08-23 [1]
##  ggplot2      * 3.0.0   2018-07-03 [1]
##  glue           1.2.0   2017-10-29 [1]
##  gtable         0.2.0   2016-02-26 [1]
##  haven        * 1.1.2   2018-06-27 [1]
##  hms            0.4.2   2018-03-10 [1]
##  htmltools      0.3.6   2017-04-28 [1]
##  httr           1.3.1   2017-08-20 [1]
##  jsonlite       1.5     2017-06-01 [1]
##  knitr        * 1.20    2018-02-20 [1]
##  lattice        0.20-35 2017-03-25 [2]
##  lazyeval       0.2.1   2017-10-29 [1]
##  lsr          * 0.5     2015-03-02 [1]
##  lubridate      1.7.4   2018-04-11 [1]
##  magrittr       1.5     2014-11-22 [1]
##  memoise        1.1.0   2017-04-21 [1]
##  modelr         0.1.2   2018-05-11 [1]
##  munsell        0.4.3   2016-02-13 [1]
##  nlme           3.1-137 2018-04-07 [1]
##  pillar         1.2.2   2018-04-26 [1]
##  pkgbuild       1.0.2   2018-10-16 [1]
##  pkgconfig      2.0.1   2017-03-21 [1]
##  pkgload        1.0.2   2018-10-29 [1]
##  plyr           1.8.4   2016-06-08 [1]
##  prettyunits    1.0.2   2015-07-13 [1]
##  processx       3.1.0   2018-05-15 [1]
##  purrr        * 0.2.5   2018-05-29 [1]
##  R6             2.2.2   2017-06-17 [1]
##  Rcpp           0.12.16 2018-03-13 [1]
##  readr        * 1.1.1   2017-05-16 [1]
##  readxl       * 1.1.0   2018-04-20 [1]
##  remotes        2.0.2   2018-10-30 [1]
##  rlang          0.2.0   2018-02-20 [1]
##  rmarkdown      1.8     2017-11-17 [1]
##  rprojroot      1.3-2   2018-01-03 [1]
##  rstudioapi     0.7     2017-09-07 [1]
##  rvest          0.3.2   2016-06-17 [1]
##  scales         0.5.0   2017-08-24 [1]
##  sessioninfo    1.1.0   2018-09-25 [1]
##  stringi        1.1.7   2018-03-12 [1]
##  stringr      * 1.3.1   2018-05-10 [1]
##  testthat       2.0.0   2017-12-13 [1]
##  tibble       * 1.4.2   2018-01-22 [1]
##  tidyr        * 0.8.1   2018-05-18 [1]
##  tidyselect     0.2.4   2018-02-26 [1]
##  tidyverse    * 1.2.1   2017-11-14 [1]
##  usethis        1.4.0   2018-08-14 [1]
##  withr          2.1.2   2018-03-15 [1]
##  xml2           1.2.0   2018-01-24 [1]
##  yaml           2.2.0   2018-07-25 [1]
##  source                                     
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.1)                             
##  CRAN (R 3.5.0)                             
##  Github (METRICS-CARPS/CARPSreports@89db4a9)
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.1)                             
##  CRAN (R 3.5.1)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.1)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.1)                             
##  CRAN (R 3.5.1)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.1)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.1)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.1)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.1)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.1)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.1)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.1)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.1)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.0)                             
##  CRAN (R 3.5.1)                             
## 
## [1] C:/Users/Marianna/Documents/R/win-library/3.5
## [2] C:/Program Files/R/R-3.5.0/library