Import the libraries
library(ggplot2)
library(tidyr)
library(dplyr)
##
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
library(agricolae)
library(rstatix)
##
## Attaching package: 'rstatix'
## The following object is masked from 'package:stats':
##
## filter
library(gridExtra)
##
## Attaching package: 'gridExtra'
## The following object is masked from 'package:dplyr':
##
## combine
Import the data
data("mpg")
head(mpg, 3)
## # A tibble: 3 × 11
## manufacturer model displ year cyl trans drv cty hwy fl class
## <chr> <chr> <dbl> <int> <int> <chr> <chr> <int> <int> <chr> <chr>
## 1 audi a4 1.8 1999 4 auto(l5) f 18 29 p compa…
## 2 audi a4 1.8 1999 4 manual(m5) f 21 29 p compa…
## 3 audi a4 2 2008 4 manual(m6) f 20 31 p compa…
dim(mpg)
## [1] 234 11
str(mpg)
## tibble [234 × 11] (S3: tbl_df/tbl/data.frame)
## $ manufacturer: chr [1:234] "audi" "audi" "audi" "audi" ...
## $ model : chr [1:234] "a4" "a4" "a4" "a4" ...
## $ displ : num [1:234] 1.8 1.8 2 2 2.8 2.8 3.1 1.8 1.8 2 ...
## $ year : int [1:234] 1999 1999 2008 2008 1999 1999 2008 1999 1999 2008 ...
## $ cyl : int [1:234] 4 4 4 4 6 6 6 4 4 4 ...
## $ trans : chr [1:234] "auto(l5)" "manual(m5)" "manual(m6)" "auto(av)" ...
## $ drv : chr [1:234] "f" "f" "f" "f" ...
## $ cty : int [1:234] 18 21 20 21 16 18 18 18 16 20 ...
## $ hwy : int [1:234] 29 29 31 30 26 26 27 26 25 28 ...
## $ fl : chr [1:234] "p" "p" "p" "p" ...
## $ class : chr [1:234] "compact" "compact" "compact" "compact" ...
colnames(mpg)
## [1] "manufacturer" "model" "displ" "year" "cyl"
## [6] "trans" "drv" "cty" "hwy" "fl"
## [11] "class"
mpg$class <- as.factor(mpg$class)
str(mpg)
## tibble [234 × 11] (S3: tbl_df/tbl/data.frame)
## $ manufacturer: chr [1:234] "audi" "audi" "audi" "audi" ...
## $ model : chr [1:234] "a4" "a4" "a4" "a4" ...
## $ displ : num [1:234] 1.8 1.8 2 2 2.8 2.8 3.1 1.8 1.8 2 ...
## $ year : int [1:234] 1999 1999 2008 2008 1999 1999 2008 1999 1999 2008 ...
## $ cyl : int [1:234] 4 4 4 4 6 6 6 4 4 4 ...
## $ trans : chr [1:234] "auto(l5)" "manual(m5)" "manual(m6)" "auto(av)" ...
## $ drv : chr [1:234] "f" "f" "f" "f" ...
## $ cty : int [1:234] 18 21 20 21 16 18 18 18 16 20 ...
## $ hwy : int [1:234] 29 29 31 30 26 26 27 26 25 28 ...
## $ fl : chr [1:234] "p" "p" "p" "p" ...
## $ class : Factor w/ 7 levels "2seater","compact",..: 2 2 2 2 2 2 2 2 2 2 ...
dat <- mpg
dat[1:15, ]
## # A tibble: 15 × 11
## manufacturer model displ year cyl trans drv cty hwy fl class
## <chr> <chr> <dbl> <int> <int> <chr> <chr> <int> <int> <chr> <fct>
## 1 audi a4 1.8 1999 4 auto… f 18 29 p comp…
## 2 audi a4 1.8 1999 4 manu… f 21 29 p comp…
## 3 audi a4 2 2008 4 manu… f 20 31 p comp…
## 4 audi a4 2 2008 4 auto… f 21 30 p comp…
## 5 audi a4 2.8 1999 6 auto… f 16 26 p comp…
## 6 audi a4 2.8 1999 6 manu… f 18 26 p comp…
## 7 audi a4 3.1 2008 6 auto… f 18 27 p comp…
## 8 audi a4 quattro 1.8 1999 4 manu… 4 18 26 p comp…
## 9 audi a4 quattro 1.8 1999 4 auto… 4 16 25 p comp…
## 10 audi a4 quattro 2 2008 4 manu… 4 20 28 p comp…
## 11 audi a4 quattro 2 2008 4 auto… 4 19 27 p comp…
## 12 audi a4 quattro 2.8 1999 6 auto… 4 15 25 p comp…
## 13 audi a4 quattro 2.8 1999 6 manu… 4 17 25 p comp…
## 14 audi a4 quattro 3.1 2008 6 auto… 4 17 25 p comp…
## 15 audi a4 quattro 3.1 2008 6 manu… 4 15 25 p comp…
dat[, 1:3]
## # A tibble: 234 × 3
## manufacturer model displ
## <chr> <chr> <dbl>
## 1 audi a4 1.8
## 2 audi a4 1.8
## 3 audi a4 2
## 4 audi a4 2
## 5 audi a4 2.8
## 6 audi a4 2.8
## 7 audi a4 3.1
## 8 audi a4 quattro 1.8
## 9 audi a4 quattro 1.8
## 10 audi a4 quattro 2
## # ℹ 224 more rows
dat[, c(1,3, 6)]
## # A tibble: 234 × 3
## manufacturer displ trans
## <chr> <dbl> <chr>
## 1 audi 1.8 auto(l5)
## 2 audi 1.8 manual(m5)
## 3 audi 2 manual(m6)
## 4 audi 2 auto(av)
## 5 audi 2.8 auto(l5)
## 6 audi 2.8 manual(m5)
## 7 audi 3.1 auto(av)
## 8 audi 1.8 manual(m5)
## 9 audi 1.8 auto(l5)
## 10 audi 2 manual(m6)
## # ℹ 224 more rows
dat[c(1,3,5:7), c(2,5,8)]
## # A tibble: 5 × 3
## model cyl cty
## <chr> <int> <int>
## 1 a4 4 18
## 2 a4 4 20
## 3 a4 6 16
## 4 a4 6 18
## 5 a4 6 18
dat1 <- dat[c(1,3,5:7), c(2,5,8)]
colnames(dat)
## [1] "manufacturer" "model" "displ" "year" "cyl"
## [6] "trans" "drv" "cty" "hwy" "fl"
## [11] "class"
One-sample t-test
dat |> t_test(hwy~1, mu=30)
## # A tibble: 1 × 7
## .y. group1 group2 n statistic df p
## * <chr> <chr> <chr> <int> <dbl> <dbl> <dbl>
## 1 hwy 1 null model 234 -16.9 233 3.34e-42
Calculate mean
mean(dat$hwy)
## [1] 23.44017
Check normality
dat |> shapiro_test(hwy)
## # A tibble: 1 × 3
## variable statistic p
## <chr> <dbl> <dbl>
## 1 hwy 0.959 0.00000300
## p-value < 0.005 means non-normality
hist(dat$hwy)

Wilcoxon sign-ranked test
dat |> wilcox_test(cyl~1, mu=4)
## # A tibble: 1 × 6
## .y. group1 group2 n statistic p
## * <chr> <chr> <chr> <int> <dbl> <dbl>
## 1 cyl 1 null model 234 11781 2.13e-28
Two-sample t-test
colnames(dat)
## [1] "manufacturer" "model" "displ" "year" "cyl"
## [6] "trans" "drv" "cty" "hwy" "fl"
## [11] "class"
convert cyle to factor
dat$year <- as.factor(dat$year)
dat |> t_test(hwy~year)
## # A tibble: 1 × 8
## .y. group1 group2 n1 n2 statistic df p
## * <chr> <chr> <chr> <int> <int> <dbl> <dbl> <dbl>
## 1 hwy 1999 2008 117 117 -0.0329 232. 0.974
dat |> group_by(year) |>
shapiro_test(hwy)
## # A tibble: 2 × 4
## year variable statistic p
## <fct> <chr> <dbl> <dbl>
## 1 1999 hwy 0.910 0.000000822
## 2 2008 hwy 0.970 0.0109
dat |> t_test(hwy~year, var.equal = T)
## # A tibble: 1 × 8
## .y. group1 group2 n1 n2 statistic df p
## * <chr> <chr> <chr> <int> <int> <dbl> <dbl> <dbl>
## 1 hwy 1999 2008 117 117 -0.0329 232 0.974
dat |> t_test(hwy~year)
## # A tibble: 1 × 8
## .y. group1 group2 n1 n2 statistic df p
## * <chr> <chr> <chr> <int> <int> <dbl> <dbl> <dbl>
## 1 hwy 1999 2008 117 117 -0.0329 232. 0.974
dat |> wilcox_test(hwy~year)
## # A tibble: 1 × 7
## .y. group1 group2 n1 n2 statistic p
## * <chr> <chr> <chr> <int> <int> <dbl> <dbl>
## 1 hwy 1999 2008 117 117 6526 0.538
## Supposing we do paired-sample t-test
dat |> t_test(hwy~year, paired = T)
## # A tibble: 1 × 8
## .y. group1 group2 n1 n2 statistic df p
## * <chr> <chr> <chr> <int> <int> <dbl> <dbl> <dbl>
## 1 hwy 1999 2008 117 117 -0.0399 116 0.968
Import the data from R
data("iris")
df =iris
Show the first 3 rows
head(df, 3)
## Sepal.Length Sepal.Width Petal.Length Petal.Width Species
## 1 5.1 3.5 1.4 0.2 setosa
## 2 4.9 3.0 1.4 0.2 setosa
## 3 4.7 3.2 1.3 0.2 setosa
Count numbers of rows and columns
dim(df)
## [1] 150 5
Check types of variables
str(df)
## 'data.frame': 150 obs. of 5 variables:
## $ Sepal.Length: num 5.1 4.9 4.7 4.6 5 5.4 4.6 5 4.4 4.9 ...
## $ Sepal.Width : num 3.5 3 3.2 3.1 3.6 3.9 3.4 3.4 2.9 3.1 ...
## $ Petal.Length: num 1.4 1.4 1.3 1.5 1.4 1.7 1.4 1.5 1.4 1.5 ...
## $ Petal.Width : num 0.2 0.2 0.2 0.2 0.2 0.4 0.3 0.2 0.2 0.1 ...
## $ Species : Factor w/ 3 levels "setosa","versicolor",..: 1 1 1 1 1 1 1 1 1 1 ...
Check column names
colnames(df)
## [1] "Sepal.Length" "Sepal.Width" "Petal.Length" "Petal.Width" "Species"
summarize the data
summary(df)
## Sepal.Length Sepal.Width Petal.Length Petal.Width
## Min. :4.300 Min. :2.000 Min. :1.000 Min. :0.100
## 1st Qu.:5.100 1st Qu.:2.800 1st Qu.:1.600 1st Qu.:0.300
## Median :5.800 Median :3.000 Median :4.350 Median :1.300
## Mean :5.843 Mean :3.057 Mean :3.758 Mean :1.199
## 3rd Qu.:6.400 3rd Qu.:3.300 3rd Qu.:5.100 3rd Qu.:1.800
## Max. :7.900 Max. :4.400 Max. :6.900 Max. :2.500
## Species
## setosa :50
## versicolor:50
## virginica :50
##
##
##
convert variables to character
df$Species <- as.character(df$Species)
convert variables to factor
df$Species <- as.factor(df$Species)
Do ANOVA (general linear model
)
colnames(df)
## [1] "Sepal.Length" "Sepal.Width" "Petal.Length" "Petal.Width" "Species"
## option 1
ano <-lm(Sepal.Length~Species, df)
anova(ano) # Show the result
## Analysis of Variance Table
##
## Response: Sepal.Length
## Df Sum Sq Mean Sq F value Pr(>F)
## Species 2 63.212 31.606 119.26 < 2.2e-16 ***
## Residuals 147 38.956 0.265
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Do ANOVA (anlaysis of variance
)
## option 2
ano1 <-aov(Sepal.Length~Species, df)
summary(ano1)
## Df Sum Sq Mean Sq F value Pr(>F)
## Species 2 63.21 31.606 119.3 <2e-16 ***
## Residuals 147 38.96 0.265
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Check the graph (error plot
)
library(mosaic)
## Registered S3 method overwritten by 'mosaic':
## method from
## fortify.SpatialPolygonsDataFrame ggplot2
##
## The 'mosaic' package masks several functions from core packages in order to add
## additional features. The original behavior of these functions should not be affected by this.
##
## Attaching package: 'mosaic'
## The following object is masked from 'package:Matrix':
##
## mean
## The following objects are masked from 'package:rstatix':
##
## cor_test, prop_test, t_test
## The following objects are masked from 'package:dplyr':
##
## count, do, tally
## The following object is masked from 'package:ggplot2':
##
## stat
## The following objects are masked from 'package:stats':
##
## binom.test, cor, cor.test, cov, fivenum, IQR, median, prop.test,
## quantile, sd, t.test, var
## The following objects are masked from 'package:base':
##
## max, mean, min, prod, range, sample, sum
mplot(ano, which=c(1:4))
## `geom_smooth()` using formula = 'y ~ x'
## Warning in simpleLoess(y, x, w, span, degree = degree, parametric = parametric,
## : pseudoinverse used at 4.9981
## Warning in simpleLoess(y, x, w, span, degree = degree, parametric = parametric,
## : neighborhood radius 1.5899
## Warning in simpleLoess(y, x, w, span, degree = degree, parametric = parametric,
## : reciprocal condition number 8.8027e-17
## Warning in simpleLoess(y, x, w, span, degree = degree, parametric = parametric,
## : There are other near singularities as well. 2.5278
## Warning in predLoess(object$y, object$x, newx = if (is.null(newdata)) object$x
## else if (is.data.frame(newdata))
## as.matrix(model.frame(delete.response(terms(object)), : pseudoinverse used at
## 4.9981
## Warning in predLoess(object$y, object$x, newx = if (is.null(newdata)) object$x
## else if (is.data.frame(newdata))
## as.matrix(model.frame(delete.response(terms(object)), : neighborhood radius
## 1.5899
## Warning in predLoess(object$y, object$x, newx = if (is.null(newdata)) object$x
## else if (is.data.frame(newdata))
## as.matrix(model.frame(delete.response(terms(object)), : reciprocal condition
## number 8.8027e-17
## Warning in predLoess(object$y, object$x, newx = if (is.null(newdata)) object$x
## else if (is.data.frame(newdata))
## as.matrix(model.frame(delete.response(terms(object)), : There are other near
## singularities as well. 2.5278
## `geom_smooth()` using formula = 'y ~ x'
## Warning in simpleLoess(y, x, w, span, degree = degree, parametric = parametric,
## : pseudoinverse used at 4.9981
## Warning in simpleLoess(y, x, w, span, degree = degree, parametric = parametric,
## : neighborhood radius 1.5899
## Warning in simpleLoess(y, x, w, span, degree = degree, parametric = parametric,
## : reciprocal condition number 8.8027e-17
## Warning in simpleLoess(y, x, w, span, degree = degree, parametric = parametric,
## : There are other near singularities as well. 2.5278
## Warning in predLoess(object$y, object$x, newx = if (is.null(newdata)) object$x
## else if (is.data.frame(newdata))
## as.matrix(model.frame(delete.response(terms(object)), : pseudoinverse used at
## 4.9981
## Warning in predLoess(object$y, object$x, newx = if (is.null(newdata)) object$x
## else if (is.data.frame(newdata))
## as.matrix(model.frame(delete.response(terms(object)), : neighborhood radius
## 1.5899
## Warning in predLoess(object$y, object$x, newx = if (is.null(newdata)) object$x
## else if (is.data.frame(newdata))
## as.matrix(model.frame(delete.response(terms(object)), : reciprocal condition
## number 8.8027e-17
## Warning in predLoess(object$y, object$x, newx = if (is.null(newdata)) object$x
## else if (is.data.frame(newdata))
## as.matrix(model.frame(delete.response(terms(object)), : There are other near
## singularities as well. 2.5278

Check normality
norm <- df %>% dplyr::group_by(Species) %>%
shapiro_test(Sepal.Length)
norm # Show the result
## # A tibble: 3 × 4
## Species variable statistic p
## <fct> <chr> <dbl> <dbl>
## 1 setosa Sepal.Length 0.978 0.460
## 2 versicolor Sepal.Length 0.978 0.465
## 3 virginica Sepal.Length 0.971 0.258
Check homogeneity
hom <- bartlett.test(Sepal.Length~Species, df)
hom # Show the result
##
## Bartlett test of homogeneity of variances
##
## data: Sepal.Length by Species
## Bartlett's K-squared = 16.006, df = 2, p-value = 0.0003345
## If p-value < 0.05, it means unequal variances
One-way ANOVA welch’s test
## For unequal variances
onet<-oneway.test(Sepal.Length~Species,
df, var.equal = F)
print(onet)
##
## One-way analysis of means (not assuming equal variances)
##
## data: Sepal.Length and Species
## F = 138.91, num df = 2.000, denom df = 92.211, p-value < 2.2e-16
Do pos-hoc test
## Post-hoc test is used for significant differences
colnames(df)
## [1] "Sepal.Length" "Sepal.Width" "Petal.Length" "Petal.Width" "Species"
lsd <-LSD.test(ano, "Species", p.adj = "bonferroni",
console = T)
##
## Study: ano ~ "Species"
##
## LSD t Test for Sepal.Length
## P value adjustment method: bonferroni
##
## Mean Square Error: 0.2650082
##
## Species, means and individual ( 95 %) CI
##
## Sepal.Length std r se LCL UCL Min Max Q25
## setosa 5.006 0.3524897 50 0.07280222 4.862126 5.149874 4.3 5.8 4.800
## versicolor 5.936 0.5161711 50 0.07280222 5.792126 6.079874 4.9 7.0 5.600
## virginica 6.588 0.6358796 50 0.07280222 6.444126 6.731874 4.9 7.9 6.225
## Q50 Q75
## setosa 5.0 5.2
## versicolor 5.9 6.3
## virginica 6.5 6.9
##
## Alpha: 0.05 ; DF Error: 147
## Critical Value of t: 2.421686
##
## Minimum Significant Difference: 0.2493317
##
## Treatments with the same letter are not significantly different.
##
## Sepal.Length groups
## virginica 6.588 a
## versicolor 5.936 b
## setosa 5.006 c
lsd
## $statistics
## MSerror Df Mean CV t.value MSD
## 0.2650082 147 5.843333 8.809859 2.421686 0.2493317
##
## $parameters
## test p.ajusted name.t ntr alpha
## Fisher-LSD bonferroni Species 3 0.05
##
## $means
## Sepal.Length std r se LCL UCL Min Max Q25
## setosa 5.006 0.3524897 50 0.07280222 4.862126 5.149874 4.3 5.8 4.800
## versicolor 5.936 0.5161711 50 0.07280222 5.792126 6.079874 4.9 7.0 5.600
## virginica 6.588 0.6358796 50 0.07280222 6.444126 6.731874 4.9 7.9 6.225
## Q50 Q75
## setosa 5.0 5.2
## versicolor 5.9 6.3
## virginica 6.5 6.9
##
## $comparison
## NULL
##
## $groups
## Sepal.Length groups
## virginica 6.588 a
## versicolor 5.936 b
## setosa 5.006 c
##
## attr(,"class")
## [1] "group"
Calculate mean
colnames(df)
## [1] "Sepal.Length" "Sepal.Width" "Petal.Length" "Petal.Width" "Species"
mean1 <- df %>% dplyr::group_by(Species) %>%
summarise(Length =mean(Sepal.Length),
SD = sd(Sepal.Length),
SE = SD/sqrt(n()))
mean1 # Show the result
## # A tibble: 3 × 4
## Species Length SD SE
## <fct> <dbl> <dbl> <dbl>
## 1 setosa 5.01 0.352 0.0498
## 2 versicolor 5.94 0.516 0.0730
## 3 virginica 6.59 0.636 0.0899
Plot barplot
ggplot(mean1, aes(Species, Length)) +
geom_col(aes(fill=Species)) +
geom_errorbar(aes(ymin=Length-SD,
ymax=Length+SD),
width=0.1, color="red")

Combine data
head(df, 3)
## Sepal.Length Sepal.Width Petal.Length Petal.Width Species
## 1 5.1 3.5 1.4 0.2 setosa
## 2 4.9 3.0 1.4 0.2 setosa
## 3 4.7 3.2 1.3 0.2 setosa
df1 <- df %>% tidyr::gather(key="part",
value="value",
1:4)
Check the first 3 rows
head(df1, 3)
## Species part value
## 1 setosa Sepal.Length 5.1
## 2 setosa Sepal.Length 4.9
## 3 setosa Sepal.Length 4.7
Count the cases
df1 %>% freq_table(part)
## # A tibble: 4 × 3
## part n prop
## <chr> <int> <dbl>
## 1 Petal.Length 150 25
## 2 Petal.Width 150 25
## 3 Sepal.Length 150 25
## 4 Sepal.Width 150 25
Create boxplot
p <- ggplot(df1, aes(Species, value)) +
geom_jitter(aes(color=Species,shape=Species), width = 0.1)+
facet_wrap(~part)+
theme(legend.position = "None")
p # Show the graph

p1 <-p +
scale_color_manual(values=c("#ff66ff",
"#00ff00", "#ff9933"))+
scale_shape_manual(values=c(3, 6, 10))
p1 + stat_summary(fun.y="mean", pch=3, color="blue")
## Warning: The `fun.y` argument of `stat_summary()` is deprecated as of ggplot2 3.3.0.
## ℹ Please use the `fun` argument instead.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
## Warning: Removed 3 rows containing missing values or values outside the scale range
## (`geom_segment()`).
## Removed 3 rows containing missing values or values outside the scale range
## (`geom_segment()`).
## Removed 3 rows containing missing values or values outside the scale range
## (`geom_segment()`).
## Removed 3 rows containing missing values or values outside the scale range
## (`geom_segment()`).

# Second option
ggplot(df1, aes(part, value)) +
geom_boxplot(aes(fill=Species))+
coord_flip()+ labs(y="Length (mm)", x="")

boxplot with mean
colnames(df)
## [1] "Sepal.Length" "Sepal.Width" "Petal.Length" "Petal.Width" "Species"
pp <-df %>% ggplot(aes(Species, Sepal.Length)) +
geom_boxplot(width=0.5) +
stat_summary(fun.y="mean", pch=3, color="blue")
pp + annotate("text", 1, 7, label= "go")
## Warning: Removed 3 rows containing missing values or values outside the scale range
## (`geom_segment()`).

x=c("setosa", "versicolor", "virginica")
y=c(6, 7.1, 8.1)
da <-data.frame(x, y)
pp + geom_text(data=da,
aes(x=x, y=y,label=c("c", "b", "a")))
## Warning: Removed 3 rows containing missing values or values outside the scale range
## (`geom_segment()`).

Two-way ANOVA
## Import existing data
data("ToothGrowth")
newdata = ToothGrowth
perform post-hoc test
LSD.test(fit, c("supp", "dose"), p.adj = "bonferroni",
console = T)
##
## Study: fit ~ c("supp", "dose")
##
## LSD t Test for len
## P value adjustment method: bonferroni
##
## Mean Square Error: 16.67205
##
## supp:dose, means and individual ( 95 %) CI
##
## len std r se LCL UCL Min Max Q25 Q50
## OJ:0.5 13.23 4.459709 10 1.291203 10.64341 15.81659 8.2 21.5 9.700 12.25
## OJ:1 22.70 3.910953 10 1.291203 20.11341 25.28659 14.5 27.3 20.300 23.45
## OJ:2 26.06 2.655058 10 1.291203 23.47341 28.64659 22.4 30.9 24.575 25.95
## VC:0.5 7.98 2.746634 10 1.291203 5.39341 10.56659 4.2 11.5 5.950 7.15
## VC:1 16.77 2.515309 10 1.291203 14.18341 19.35659 13.6 22.5 15.275 16.50
## VC:2 26.14 4.797731 10 1.291203 23.55341 28.72659 18.5 33.9 23.375 25.95
## Q75
## OJ:0.5 16.175
## OJ:1 25.650
## OJ:2 27.075
## VC:0.5 10.900
## VC:1 17.300
## VC:2 28.800
##
## Alpha: 0.05 ; DF Error: 56
## Critical Value of t: 3.066341
##
## Minimum Significant Difference: 5.599252
##
## Treatments with the same letter are not significantly different.
##
## len groups
## VC:2 26.14 a
## OJ:2 26.06 a
## OJ:1 22.70 a
## VC:1 16.77 b
## OJ:0.5 13.23 bc
## VC:0.5 7.98 c