DESIGNS OF EXPERIMENTS

Load the following library for the Design of Experiment

library(doebioresearch)
## Warning: package 'doebioresearch' was built under R version 4.3.1

Completely Randomized Design

data1<-data.frame(Treatments=c("T1","T2","T3","T4","T5","T6","T7","T1","T2","T3",
                               "T4","T5","T6","T7","T1","T2","T3","T4","T5","T6","T7"),
                 yield=c(25,21,21,18,25,28,24,25,24,24,16,21,20,17,16,19,14,15,13,11,25),
                 height=c(130,120,125,135,139,140,145,136,129,135,150,152,
                          140,148,130,135,145,160,145,130,160))
head(data1,5)
  Treatments yield height
1         T1    25    130
2         T2    21    120
3         T3    21    125
4         T4    18    135
5         T5    25    139

CRD analysis with LSD test for yield only

crd(data1[2],data1$Treatments,1)
$yield
$yield[[1]]
$yield[[1]][[1]]
Analysis of Variance Table

Response: data2
          Df Sum Sq Mean Sq F value Pr(>F)
trt        6  70.48  11.746  0.4312 0.8461
Residuals 14 381.33  27.238               

$yield[[1]][[2]]
[1] "All the treatment means are same so dont go for any multiple comparison test"

$yield[[1]][[3]]
[1] "R Square 0.156"

$yield[[1]][[4]]

    Shapiro-Wilk normality test

data:  model$residuals
W = 0.95693, p-value = 0.4565


$yield[[1]][[5]]
[1] "Normality assumption is not violated"

$yield[[1]][[6]]
[1] "SEm 3.0132 , SEd 4.2613"

$yield[[1]][[7]]
$yield[[1]][[7]][[1]]
  MSerror Df     Mean       CV  t.value      LSD
  27.2381 14 20.09524 25.97139 2.144787 9.139593

$yield[[1]][[7]][[2]]
      data2 groups
T1 22.00000      a
T7 22.00000      a
T2 21.33333      a
T3 19.66667      a
T5 19.66667      a
T6 19.66667      a
T4 16.33333      a

CRD analysis with LSD test for both yield and height

crd(data1[2:3],data1$Treatments,1)
$yield
$yield[[1]]
$yield[[1]][[1]]
Analysis of Variance Table

Response: data2
          Df Sum Sq Mean Sq F value Pr(>F)
trt        6  70.48  11.746  0.4312 0.8461
Residuals 14 381.33  27.238               

$yield[[1]][[2]]
[1] "All the treatment means are same so dont go for any multiple comparison test"

$yield[[1]][[3]]
[1] "R Square 0.156"

$yield[[1]][[4]]

    Shapiro-Wilk normality test

data:  model$residuals
W = 0.95693, p-value = 0.4565


$yield[[1]][[5]]
[1] "Normality assumption is not violated"

$yield[[1]][[6]]
[1] "SEm 3.0132 , SEd 4.2613"

$yield[[1]][[7]]
$yield[[1]][[7]][[1]]
  MSerror Df     Mean       CV  t.value      LSD
  27.2381 14 20.09524 25.97139 2.144787 9.139593

$yield[[1]][[7]][[2]]
      data2 groups
T1 22.00000      a
T7 22.00000      a
T2 21.33333      a
T3 19.66667      a
T5 19.66667      a
T6 19.66667      a
T4 16.33333      a




$height
$height[[1]]
$height[[1]][[1]]
Analysis of Variance Table

Response: data2
          Df Sum Sq Mean Sq F value Pr(>F)  
trt        6 1383.2 230.540   3.463  0.026 *
Residuals 14  932.0  66.571                 
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

$height[[1]][[2]]
[1] "The treatment means of one or more treatments are not same, so go for multiple comparison test"

$height[[1]][[3]]
[1] "R Square 0.597"

$height[[1]][[4]]

    Shapiro-Wilk normality test

data:  model$residuals
W = 0.98167, p-value = 0.9471


$height[[1]][[5]]
[1] "Normality assumption is not violated"

$height[[1]][[6]]
[1] "SEm 4.7107 , SEd 6.6619"

$height[[1]][[7]]
$height[[1]][[7]][[1]]
   MSerror Df     Mean       CV  t.value      LSD
  66.57143 14 139.4762 5.849838 2.144787 14.28836

$height[[1]][[7]][[2]]
      data2 groups
T7 151.0000      a
T4 148.3333     ab
T5 145.3333    abc
T6 136.6667    bcd
T3 135.0000    bcd
T1 132.0000     cd
T2 128.0000      d

Randomized Completely Block Design

data<-data.frame(GFY=c(16,13,14,16,16,17,16,17,16,16,17,16,15,15,15,13,15,14,
                       16,14,15,14,15,17,18,15,15,15,14,14,14,14,15,15,13,15,
                       14,14,13,13,13,12,15,12,15),
                 DMY=c(5,5,6,5,6,7,6,8,6,9,8,7,5,5,5,4,6,5,8,5,5,5,4,6,6,5,5,
                       6,6,6,5,5,5,5,5,6,5,5,5,4,5,4,5,5,5),
                 Rep=rep(c("R1","R2","R3"),each=15),
                 Trt=rep(c("T1","T2","T3","T4","T5","T6","T7","T8","T9","T10",
                           "T11","T12","T13","T14","T15"),3))
head(data,5)
  GFY DMY Rep Trt
1  16   5  R1  T1
2  13   5  R1  T2
3  14   6  R1  T3
4  16   5  R1  T4
5  16   6  R1  T5

RCBD analysis with duncan test for GFY only

rcbd(data[1],data$Trt,data$Rep,2)
$GFY
$GFY[[1]]
Analysis of Variance Table

Response: data2
            Df Sum Sq Mean Sq F value    Pr(>F)    
replication  2 26.533 13.2667  9.4122 0.0007475 ***
trt         14 17.200  1.2286  0.8716 0.5944508    
Residuals   28 39.467  1.4095                      
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

$GFY[[2]]
[1] "R Square 0.526"

$GFY[[3]]

    Shapiro-Wilk normality test

data:  model$residuals
W = 0.98487, p-value = 0.8148


$GFY[[4]]
[1] "Normality assumption is not violated"

$GFY[[5]]
[1] "SEm 0.6854 , SEd 0.9694"

$GFY[[6]]
[1] "All the treatment means are same so dont go for any multiple comparison test"

$GFY[[7]]
$GFY[[7]][[1]]
   MSerror Df Mean       CV
  1.409524 28 14.8 8.021849

$GFY[[7]][[2]]
      Table CriticalRange
2  2.896885      1.985669
3  3.043847      2.086404
4  3.138859      2.151530
5  3.206478      2.197879
6  3.257369      2.232763
7  3.297090      2.259989
8  3.328885      2.281783
9  3.354805      2.299550
10 3.376223      2.314231
11 3.394100      2.326485
12 3.409132      2.336788
13 3.421839      2.345499
14 3.432619      2.352887
15 3.441780      2.359167

$GFY[[7]][[3]]
       data2 groups
T10 15.66667      a
T4  15.66667      a
T6  15.66667      a
T8  15.33333      a
T9  15.33333      a
T11 15.00000      a
T13 15.00000      a
T15 14.66667      a
T7  14.66667      a
T1  14.33333      a
T12 14.33333      a
T3  14.33333      a
T5  14.33333      a
T2  14.00000      a
T14 13.66667      a

RCBD analysis with duncan test for both GFY and DMY

rcbd(data[1:2],data$Trt,data$Rep,2)
$GFY
$GFY[[1]]
Analysis of Variance Table

Response: data2
            Df Sum Sq Mean Sq F value    Pr(>F)    
replication  2 26.533 13.2667  9.4122 0.0007475 ***
trt         14 17.200  1.2286  0.8716 0.5944508    
Residuals   28 39.467  1.4095                      
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

$GFY[[2]]
[1] "R Square 0.526"

$GFY[[3]]

    Shapiro-Wilk normality test

data:  model$residuals
W = 0.98487, p-value = 0.8148


$GFY[[4]]
[1] "Normality assumption is not violated"

$GFY[[5]]
[1] "SEm 0.6854 , SEd 0.9694"

$GFY[[6]]
[1] "All the treatment means are same so dont go for any multiple comparison test"

$GFY[[7]]
$GFY[[7]][[1]]
   MSerror Df Mean       CV
  1.409524 28 14.8 8.021849

$GFY[[7]][[2]]
      Table CriticalRange
2  2.896885      1.985669
3  3.043847      2.086404
4  3.138859      2.151530
5  3.206478      2.197879
6  3.257369      2.232763
7  3.297090      2.259989
8  3.328885      2.281783
9  3.354805      2.299550
10 3.376223      2.314231
11 3.394100      2.326485
12 3.409132      2.336788
13 3.421839      2.345499
14 3.432619      2.352887
15 3.441780      2.359167

$GFY[[7]][[3]]
       data2 groups
T10 15.66667      a
T4  15.66667      a
T6  15.66667      a
T8  15.33333      a
T9  15.33333      a
T11 15.00000      a
T13 15.00000      a
T15 14.66667      a
T7  14.66667      a
T1  14.33333      a
T12 14.33333      a
T3  14.33333      a
T5  14.33333      a
T2  14.00000      a
T14 13.66667      a



$DMY
$DMY[[1]]
Analysis of Variance Table

Response: data2
            Df Sum Sq Mean Sq F value  Pr(>F)  
replication  2 12.133  6.0667  5.0157 0.01375 *
trt         14  7.200  0.5143  0.4252 0.95260  
Residuals   28 33.867  1.2095                  
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

$DMY[[2]]
[1] "R Square 0.363"

$DMY[[3]]

    Shapiro-Wilk normality test

data:  model$residuals
W = 0.97393, p-value = 0.3985


$DMY[[4]]
[1] "Normality assumption is not violated"

$DMY[[5]]
[1] "SEm 0.635 , SEd 0.898"

$DMY[[6]]
[1] "All the treatment means are same so dont go for any multiple comparison test"

$DMY[[7]]
$DMY[[7]][[1]]
   MSerror Df     Mean       CV
  1.209524 28 5.533333 19.87561

$DMY[[7]][[2]]
      Table CriticalRange
2  2.896885      1.839407
3  3.043847      1.932722
4  3.138859      1.993051
5  3.206478      2.035986
6  3.257369      2.068300
7  3.297090      2.093521
8  3.328885      2.113710
9  3.354805      2.130168
10 3.376223      2.143768
11 3.394100      2.155119
12 3.409132      2.164663
13 3.421839      2.172732
14 3.432619      2.179577
15 3.441780      2.185394

$DMY[[7]][[3]]
       data2 groups
T10 6.333333      a
T11 6.000000      a
T4  6.000000      a
T6  6.000000      a
T8  5.666667      a
T9  5.666667      a
T12 5.333333      a
T13 5.333333      a
T14 5.333333      a
T15 5.333333      a
T2  5.333333      a
T3  5.333333      a
T5  5.333333      a
T7  5.333333      a
T1  4.666667      a

Complete Factorial Experiment

data(factorialdata)
head(factorialdata,10)
   Replication Nitrogen Phosphorus Potassium Yield Plant_Height
1            1       n0         p0        k0   107           10
2            1       n0         p0        k1   120           12
3            1       n0         p1        k0   117           11
4            1       n0         p1        k1   112           16
5            1       n1         p0        k0   112           10
6            1       n1         p0        k1   132           15
7            1       n1         p1        k0   120           10
8            1       n1         p1        k1   115           12
9            2       n0         p0        k0   110            8
10           2       n0         p0        k1   125            9

Analysis of Factorial Completely Randomized design along with Dunccan test for Yield only

fcrd2fact(factorialdata[5],factorialdata$Nitrogen,factorialdata$Phosphorus,2)
$Yield
$Yield[[1]]
Analysis of Variance Table

Response: dependent.var
              Df Sum Sq Mean Sq F value Pr(>F)
fact.A         1   24.0  24.000  0.1386 0.7136
fact.B         1  112.7 112.667  0.6505 0.4294
fact.A:fact.B  1   42.7  42.667  0.2463 0.6251
Residuals     20 3464.0 173.200               

$Yield[[2]]
[1] "R Square 0.049"

$Yield[[3]]
[1] "SEm of A: 3.799 , SEd of A: 5.373 , SEm of B: 3.799 , SEd of B: 5.373 , SEm of AB: 5.373 , SEd of AB 7.598"

$Yield[[4]]

    Shapiro-Wilk normality test

data:  model$residuals
W = 0.85265, p-value = 0.002449


$Yield[[5]]
[1] "Normality assumption is violated"

$Yield[[6]]
[1] "All the first factor level means are same so dont go for any multiple comparison test"

$Yield[[7]]
$Yield[[7]][[1]]
  MSerror Df     Mean       CV
    173.2 20 122.6667 10.72871

$Yield[[7]][[2]]
     Table CriticalRange
2 2.949998       11.2074

$Yield[[7]][[3]]
   dependent.var groups
n1      123.6667      a
n0      121.6667      a


$Yield[[8]]
[1] "All the second factor level means are same so dont go for any multiple comparison test"

$Yield[[9]]
$Yield[[9]][[1]]
  MSerror Df     Mean       CV
    173.2 20 122.6667 10.72871

$Yield[[9]][[2]]
     Table CriticalRange
2 2.949998       11.2074

$Yield[[9]][[3]]
   dependent.var groups
p0      124.8333      a
p1      120.5000      a


$Yield[[10]]
[1] "The means of levels of interaction between two factors are same so dont go for any multiple comparison test"

$Yield[[11]]
$Yield[[11]][[1]]
  MSerror Df     Mean       CV
    173.2 20 122.6667 10.72871

$Yield[[11]][[2]]
     Table CriticalRange
2 2.949998      15.84966
3 3.096506      16.63682
4 3.189616      17.13708

$Yield[[11]][[3]]
      dependent.var groups
n1:p0      127.1667      a
n0:p0      122.5000      a
n0:p1      120.8333      a
n1:p1      120.1667      a

Analysis of Factorial Completely Randomized design along with Dunccan test for Yield & Plant Height

fcrd2fact(factorialdata[5:6],factorialdata$Nitrogen,factorialdata$Phosphorus,2)
$Yield
$Yield[[1]]
Analysis of Variance Table

Response: dependent.var
              Df Sum Sq Mean Sq F value Pr(>F)
fact.A         1   24.0  24.000  0.1386 0.7136
fact.B         1  112.7 112.667  0.6505 0.4294
fact.A:fact.B  1   42.7  42.667  0.2463 0.6251
Residuals     20 3464.0 173.200               

$Yield[[2]]
[1] "R Square 0.049"

$Yield[[3]]
[1] "SEm of A: 3.799 , SEd of A: 5.373 , SEm of B: 3.799 , SEd of B: 5.373 , SEm of AB: 5.373 , SEd of AB 7.598"

$Yield[[4]]

    Shapiro-Wilk normality test

data:  model$residuals
W = 0.85265, p-value = 0.002449


$Yield[[5]]
[1] "Normality assumption is violated"

$Yield[[6]]
[1] "All the first factor level means are same so dont go for any multiple comparison test"

$Yield[[7]]
$Yield[[7]][[1]]
  MSerror Df     Mean       CV
    173.2 20 122.6667 10.72871

$Yield[[7]][[2]]
     Table CriticalRange
2 2.949998       11.2074

$Yield[[7]][[3]]
   dependent.var groups
n1      123.6667      a
n0      121.6667      a


$Yield[[8]]
[1] "All the second factor level means are same so dont go for any multiple comparison test"

$Yield[[9]]
$Yield[[9]][[1]]
  MSerror Df     Mean       CV
    173.2 20 122.6667 10.72871

$Yield[[9]][[2]]
     Table CriticalRange
2 2.949998       11.2074

$Yield[[9]][[3]]
   dependent.var groups
p0      124.8333      a
p1      120.5000      a


$Yield[[10]]
[1] "The means of levels of interaction between two factors are same so dont go for any multiple comparison test"

$Yield[[11]]
$Yield[[11]][[1]]
  MSerror Df     Mean       CV
    173.2 20 122.6667 10.72871

$Yield[[11]][[2]]
     Table CriticalRange
2 2.949998      15.84966
3 3.096506      16.63682
4 3.189616      17.13708

$Yield[[11]][[3]]
      dependent.var groups
n1:p0      127.1667      a
n0:p0      122.5000      a
n0:p1      120.8333      a
n1:p1      120.1667      a



$Plant_Height
$Plant_Height[[1]]
Analysis of Variance Table

Response: dependent.var
              Df  Sum Sq Mean Sq F value Pr(>F)  
fact.A         1  10.667 10.6667  1.2673 0.2736  
fact.B         1   2.667  2.6667  0.3168 0.5798  
fact.A:fact.B  1  28.167 28.1667  3.3465 0.0823 .
Residuals     20 168.333  8.4167                 
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

$Plant_Height[[2]]
[1] "R Square 0.198"

$Plant_Height[[3]]
[1] "SEm of A: 0.837 , SEd of A: 1.184 , SEm of B: 0.837 , SEd of B: 1.184 , SEm of AB: 1.184 , SEd of AB 1.675"

$Plant_Height[[4]]

    Shapiro-Wilk normality test

data:  model$residuals
W = 0.9604, p-value = 0.4465


$Plant_Height[[5]]
[1] "Normality assumption is not violated"

$Plant_Height[[6]]
[1] "All the first factor level means are same so dont go for any multiple comparison test"

$Plant_Height[[7]]
$Plant_Height[[7]][[1]]
   MSerror Df     Mean       CV
  8.416667 20 12.41667 23.36496

$Plant_Height[[7]][[2]]
     Table CriticalRange
2 2.949998      2.470592

$Plant_Height[[7]][[3]]
   dependent.var groups
n1      13.08333      a
n0      11.75000      a


$Plant_Height[[8]]
[1] "All the second factor level means are same so dont go for any multiple comparison test"

$Plant_Height[[9]]
$Plant_Height[[9]][[1]]
   MSerror Df     Mean       CV
  8.416667 20 12.41667 23.36496

$Plant_Height[[9]][[2]]
     Table CriticalRange
2 2.949998      2.470592

$Plant_Height[[9]][[3]]
   dependent.var groups
p0      12.75000      a
p1      12.08333      a


$Plant_Height[[10]]
[1] "The means of levels of interaction between two factors are same so dont go for any multiple comparison test"

$Plant_Height[[11]]
$Plant_Height[[11]][[1]]
   MSerror Df     Mean       CV
  8.416667 20 12.41667 23.36496

$Plant_Height[[11]][[2]]
     Table CriticalRange
2 2.949998      3.493945
3 3.096506      3.667469
4 3.189616      3.777747

$Plant_Height[[11]][[3]]
      dependent.var groups
n1:p0      14.50000      a
n0:p1      12.50000      a
n1:p1      11.66667      a
n0:p0      11.00000      a

Partially Confounded Blocked Factorial Design

One of the main purposes for running a factorial experiment with few factors is to estimate all interactions. Yet if the design is confounded into blocks, the experimenter will lose the ability to estimate some interactions. However, including a few additional blocks in the design will allow estimation of all main effects and interactions using the method of partial confounding. This method consists of confounding one or more effects or interactions in one set of blocks, and confounding different effects or interactions in additional sets of blocks (or replicates). By combining all the replicates, all effects and interactions will be estimable, although not orthogonal to blocks. For example, if an experiment were to be performed to study the effect of two levels of A=calcium supplements and two levels of B=potassium supplements upon the blood pressure of hypertensive subjects, a 2^2 factorial experiment would be performed in order to estimate the two main effects and the interaction. However, if the experiments were blocked into two blocks of two experimental units (e.g., two pairs of identical twins) by confounding AB, this interaction would be lost. One way to remedy the problem would be to include four additional blocks, confounding main effect A with the difference in two additional blocks, and main effect B with the difference in two more blocks. Combining the six blocks, both main effects and their interaction would be estimable. This design is shown in Table 7.10, and this general class of designs is called partially confounded blocked factorials or PCBF. The model Block+A+B+A:B could then be t to data from the combined set of blocks. The type III sums of squares and least squares means would be used to analyze the data since the effects are not completely orthogonal to blocks. If each effect and interaction in the model is confounded an equal number of times in different sets of blocks, as in the example shown above, Butler (2006) shows the design will have favorable properties and the maximum treatment D-efficiency for estimating the factorial effects. Therefore, a design like this could be created using Cook and Nachtsheim’s algorithm or the similar algorithm available in the optBlock function of the AlgDesign package. The example below shows how this could be done.

Using Cook and Nachtsheim’s algorithm to Create Partially Counfounded Factorial Design

library(AlgDesign)
Blockdes <- gen.factorial(2, nVars = 2, factors = "all",
                          varNames = c("A","B"))
Blockdes <- optBlock( ~ A + B + A:B, withinData = Blockdes,
                      blocksizes = c(rep(2,6)), criterion = "D")
head(Blockdes,5)
$D
[1] 0.1049934

$diagonality
[1] 0.63

$Blocks
$Blocks$B1
  A B
2 2 1
4 2 2

$Blocks$B2
  A B
1 1 1
3 1 2

$Blocks$B3
  A B
1 1 1
2 2 1

$Blocks$B4
  A B
3 1 2
4 2 2

$Blocks$B5
  A B
1 1 1
4 2 2

$Blocks$B6
  A B
2 2 1
3 1 2


$design
   A B
2  2 1
4  2 2
1  1 1
3  1 2
11 1 1
21 2 1
31 1 2
41 2 2
12 1 1
42 2 2
22 2 1
32 1 2

$rows
 [1] 2 4 1 3 1 2 3 4 1 4 2 3

For mixed level factorial plans, Das (1960) has provided a method for constructing balanced confounded designs where (1) the information recovered for each degree of freedom for any partially confounded interaction is the same, and (2) any contrast of a partially confounded interaction is estimable independently of any contrast of another partially confounded interaction.

Constructing a design using Das’s method consists of converting the asymmetrical factorial into a fraction of a symmetrical factorial. The partial confounding is performed in replicates of the symmetrical factorial, and then each replicate is reduced by fractionation. Lawson et al. (2009) show how Das’s method can be implemented in SAS with proc plan and data step programming. They also provide a SAS macro for generating Das’s balanced confounded designs. Creating designs by this method will allow all interactions in the model to be estimated and will result in a design with equal precision for each single degree of freedom of partially confounded effects and interactions.

The optBlock function can also be used to find a partially confounded mixed level factorial that will allow estimation of all interactions, and there is no restriction on the block size. Using this method can sometimes produce a balanced confounded design like Das’s method, and in other cases it will find an approximately balanced design with more choices for the block size and total number of runs. Lawson et al. (2009) compare the properties of designs created by this method to designs created by Das’s method. The example below shows the use of the AlgDesign functions gen.factorial and optBlock to construct a partially confounded 3 × 2^2 factorial blocked in 12 blocks of 4.

Design Created by Das’s method

desf <- gen.factorial(c(2, 2, 3), factors = "all",
                      varNames = c("A", "B", "C"))
Blockdes <- optBlock( ~ A*B*C, withinData = desf,
                      blocksizes = c(rep(4, 12)),criterion = "D")
head(desf,5)
  A B C
1 1 1 1
2 2 1 1
3 1 2 1
4 2 2 1
5 1 1 2

As an example of the design and analysis of a partially confounded factorial, consider an experiment performed by Dossett et al. (2007). They were investigating methods of storing apple slices in brown bag lunches. The problem was that the apple slices in a sack lunch turn brown and look unappetizing before lunchtime. They wanted to compare the effects of dipping the slices in different treatment solutions prior to storage and to compare the effects of storing them in different storage containers to see if they could find conditions to reduce the amount of browning. Table 7.11 shows the factors and levels. They thought that different varieties of apples might brown at different rates and therefore wanted to block by apple variety. Their apple slicer cut the apples into six slices, therefore all 4×3 = 12 treatment combinations could not be tested within each block or apple. Since they were interested in the possible interaction of their treatment factors, they ran a partially confounded design.

The R code below can be used to create a 4×3 factorial design in 4 blocks of size 6 using gen.factorial and optBlock. The blocks represented four different varieties of apples; namely Fuji, Braeburn, Red Delicious, and Granny Smith. The experimental units would be the six slices from each apple, and these could be randomly assigned to one of the treatment combinations designated for each block.

Using gen.factorial and optBlock to Create the Design

des23 <- gen.factorial(c(4, 3), factors = "all",
                       varNames = c("A", "B"))
Blockdes <- optBlock( ~ A*B, withinData = des23,
                      blocksizes = c(rep(6, 4)),criterion = "D")

head(des23,5)
  A B
1 1 1
2 2 1
3 3 1
4 4 1
5 1 2

Dossett et al. (2007) actually used a slightly different procedure to generate the design, but their design had similar properties. After storing their treated apple slices for an hour and forty five minutes, each slice was compared to photographs of an apple slice at various stages of browning and assigned a rating between 1 and 11. The lowest rating was for the least amount of browning and the highest was for the most. All three team members independently rated the slices and the response was the average rating. Table 7.12 shows the results.

knitr::include_graphics("confounding.png")

This design and the results are stored in the data frame apple in the daewr package. The example below shows how to access and analyze this data.

library(daewr)
head(apple,10)
   Block A B rating
1      1 0 0   7.33
2      1 2 1   1.67
3      1 0 2   6.67
4      1 1 1   1.33
5      1 2 0   1.67
6      1 3 0   8.00
7      2 2 0   1.00
8      2 1 0   3.33
9      2 2 2   1.00
10     2 0 1   8.67

Estimate the Partially Confounded Blocked Factorial

library(daewr)
library(car)
modf <- lm(rating ~ Block + A + B + A:B, data = apple,
           contrasts = list(A = contr.sum, B = contr.sum,
                            Block = contr.sum))
Anova(modf,type="III")
Anova Table (Type III tests)

Response: rating
            Sum Sq Df F value    Pr(>F)    
(Intercept) 522.48  1 72.7428 1.323e-05 ***
Block         3.01  3  0.1396    0.9338    
A           145.96  3  6.7740    0.0110 *  
B             2.21  2  0.1535    0.8599    
A:B           7.73  6  0.1795    0.9755    
Residuals    64.64  9                      
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

It can be seen that the only thing significant was factor A, the treatment solution. The least squares means for the different treatment solutions can be found as shown below.

Tukey Honestyl Significance Difference

library(lsmeans)
lsmeans(modf, pairwise ~ A, adjust = ("tukey"))
$lsmeans
 A lsmean   SE df lower.CL upper.CL
 0   6.97 1.12  9    4.445     9.50
 1   2.97 1.12  9    0.444     5.50
 2   1.53 1.12  9   -0.998     4.05
 3   7.19 1.12  9    4.668     9.72

Results are averaged over the levels of: Block, B 
Confidence level used: 0.95 

$contrasts
 contrast estimate   SE df t.ratio p.value
 A0 - A1     4.001 1.61  9   2.484  0.1295
 A0 - A2     5.443 1.55  9   3.518  0.0276
 A0 - A3    -0.222 1.61  9  -0.138  0.9990
 A1 - A2     1.442 1.61  9   0.896  0.8074
 A1 - A3    -4.223 1.55  9  -2.729  0.0901
 A2 - A3    -5.666 1.61  9  -3.518  0.0276

Results are averaged over the levels of: Block, B 
P value adjustment: tukey method for comparing a family of 4 estimates 

Using the student Tukey’s HSD procedure as described in Section 2.8.2, it was found that dipping the apple slices in salt water reduces browning the most, but the amount of browning for slices dipped in lemon juice was not significantly worse. The results are summarized by the underlines in Table 7.13. The experimenters recommended further studies varying the

knitr::include_graphics("tukey.png")

Review of Important Concepts

When experimental units are heterogeneous and can be grouped into smaller blocks of more homogeneous experimental units, a blocked design should be used. When the number of experimental units per block or block size is smaller than the number of levels of the treatment factor or combinations of levels of treatment factors in a factorial design, an incomplete block design should be used.

When there is only one treatment factor, there is a choice between two types of incomplete block designs. Balanced incomplete block (BIB) designs require that every treatment level occurs in a block an equal number of times with every other treatment level. BIB designs can be created with the optBlock function in the AlgDesign package. The advantage of these designs is that the precision (or standard error) of the difference in every possible pair of treatment means will be equal. The disadvantage is that many blocks and experimental units may be required to achieve the balance. The other alter- native design for one treatment factor is the partially balanced incomplete block (PBIB) designs.

The advantage of PBIB designs is that the total number of blocks and experimental units required can be reduced. The disadvantage is that the standard error of differences in pairs of treatment means will not be constant. There are many different methods of creating PBIB designs, and some of the more useful designs have been tabled. One type of PBIB called a BTIB (balanced with respect to test treatments) can be easily created from a BIB design. This design is useful when there is more interest in comparing one treatment level (such as a control) to all other treatment levels than there is in comparisons among the other treatment levels. Another class of PBIB designs that can be easily created using the design.cyclic function in the agricolae package are called generalized cyclic designs. Latin-square designs introduced in Chapter 4 have two orthogonal blocking factors, and contain a complete block design in both the row blocks and column blocks. If an incomplete block design is required in either the row or column blocks, a row column design (RCD) can be utilized. When experimenting with multiple factors and the block size is not large enough to accommodate all possible treatment combinations, there are two alternative methods for creating an incomplete block design. The first method is to completely confound some interactions with blocks in a completely con- founded blocked factorial (CCBF) design, or a completely confounded blocked fractional factorial (CCBFF) design. The advantage of these designs is that the total number of blocks and experimental units can be minimized. The disadvantage is that some interactions will be completely confounded with blocks and will be inestimable. The other method is to use a partially con- founded blocked factorial (PCBF) design.

When each factor has only two or three levels, there is a classical method for creating completely confounded factorial designs, and tables are available for block defining contrasts that will minimize the number of low order inter- actions confounded. For factorials where all factors have p (a prime number) of levels, or where the experiment can be broken down into sub-experiments where all factors have the same prime number of levels, the classical method for creating a completely confounded design can also be used. A D-optimal search can also be used to create a confounded design to estimate all effects and interactions of interest and confound others with blocks. The optBlock function in the AlgDesign package can be to find these designs. The advantage of this approach is that it has a higher possibility of finding a design capable of estimating a specified list of effects and interactions, and it is more flexible in terms of block sizes available. The disadvantage of designs created with a D-optimal search is that treatment levels are not orthogonal to blocks and they are estimated less efficiently than they would be in a design created with the classical method. Partially confounded blocked factorials are used in factorials where there are only a few factors, like two or three, and there is a need to estimate all interactions. These designs require more blocks and experimental units than completely confounded designs, but do allow estimation of all interactions. These designs can be created by confounding all estimable effects an equal number of times in different replicates of blocks or by using a D-optimal search.

The analysis of incomplete block designs and partially confounded factorial designs is similar to the analysis of complete block designs, except care must be taken to always use the type III sums of squares and least squares means. For completely confounded factorial designs, the interactions that are confounded with blocks must be left out of the model, and the significant effects can be identified by graphical techniques such as a half-normal plot of effects.

Additional Factorial Design of Experiment

Import the data

fill_height <- read.csv("fill height.csv")
head(fill_height,10)
   Replication  A  B   C Fill.Height
1            1 10 25 200          -3
2            1 10 25 250           1
3            1 12 25 200           0
4            1 12 25 250           2
5            1 14 25 200           5
6            1 14 25 250           7
7            1 10 30 200          -1
8            1 10 30 250           1
9            1 12 30 200           2
10           1 12 30 250           6
attach(fill_height)

Create Factors

fill_height$Replication <- as.factor(fill_height$Replication)
fill_height$A <- as.factor(fill_height$A)
fill_height$B <- as.factor(fill_height$B)
fill_height$C <- as.factor(fill_height$C)
fill_height$Fill.Height <- as.numeric(fill_height$Fill.Height)
head(fill_height,10)
   Replication  A  B   C Fill.Height
1            1 10 25 200          -3
2            1 10 25 250           1
3            1 12 25 200           0
4            1 12 25 250           2
5            1 14 25 200           5
6            1 14 25 250           7
7            1 10 30 200          -1
8            1 10 30 250           1
9            1 12 30 200           2
10           1 12 30 250           6

Carry Out Factorial Experiment Without Confounding

Fact <- aov(Fill.Height ~ Replication+A*B*C, data = fill_height)
summary(Fact)
            Df Sum Sq Mean Sq F value   Pr(>F)    
Replication  1   0.37    0.37   0.508   0.4910    
A            2 238.58  119.29 161.503 7.03e-09 ***
B            1  40.04   40.04  54.210 1.43e-05 ***
C            1  26.04   26.04  35.256 9.76e-05 ***
A:B          2   8.08    4.04   5.472   0.0224 *  
A:C          2   0.08    0.04   0.056   0.9454    
B:C          1   0.37    0.37   0.508   0.4910    
A:B:C        2   2.25    1.13   1.523   0.2607    
Residuals   11   8.13    0.74                     
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Interaction Plot of A and B

library(gvlma)
library(daewr)
library(agricolae)
with(fill_height, (interaction.plot(A, B, Fill.Height, type = "b",
                             pch = c(18,24,22), leg.bty = "o",col = "blue",
            main = "Interaction Plot of Factor A and Factor B",
            xlab = "Factor A", 
            ylab = "Average Fill Height")))

NULL

Interaction Plot A and C

with(fill_height, (interaction.plot(A, C, Fill.Height, type = "b",
                             pch = c(18,24,22), leg.bty = "o",col = "blue",
            main = "Interaction Plot of Factor A and Factor C",
            xlab = "Factor A", 
            ylab = "Average Fill Height")))

NULL

Interaction Plot B and C

with(fill_height, (interaction.plot(B, C, Fill.Height, type = "b",
                             pch = c(18,24,22), leg.bty = "o",col = "blue",
            main = "Interaction Plot of Factor B and Factor C",
            xlab = "Factor A", 
            ylab = "Average Fill Height")))

NULL

Partial Confounding Class Work

Load the data

library(daewr)
partial <- read.csv("partial confounding.csv")
head(partial,10)
   Replication  A  B   C Filling
1            1 12 30 200      -4
2            1 12 30 250      -2
3            1 14 30 200       1
4            1 14 30 250       3
5            1 12 35 200      -2
6            1 12 35 250       2
7            1 14 35 200       3
8            1 14 35 250       7
9            2 12 30 200      -2
10           2 12 30 250       1
attach(partial)

Assigning the Factors

partial$Replication <- as.factor(partial$Replication)
partial$A <- as.factor(partial$A)
partial$B <- as.factor(partial$B)
partial$C <- as.factor(partial$C)
head(partial,10)
   Replication  A  B   C Filling
1            1 12 30 200      -4
2            1 12 30 250      -2
3            1 14 30 200       1
4            1 14 30 250       3
5            1 12 35 200      -2
6            1 12 35 250       2
7            1 14 35 200       3
8            1 14 35 250       7
9            2 12 30 200      -2
10           2 12 30 250       1

Normal Factoral Design

library(car)
modf1 <- aov(Filling ~ Replication + A*B*C, data = partial)
summary(modf1)
            Df Sum Sq Mean Sq F value   Pr(>F)    
Replication  1   4.00    4.00   3.111 0.121122    
A            1  64.00   64.00  49.778 0.000201 ***
B            1  30.25   30.25  23.528 0.001855 ** 
C            1  20.25   20.25  15.750 0.005402 ** 
A:B          1   0.25    0.25   0.194 0.672541    
A:C          1   0.25    0.25   0.194 0.672541    
B:C          1   1.00    1.00   0.778 0.407084    
A:B:C        1   1.00    1.00   0.778 0.407084    
Residuals    7   9.00    1.29                     
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Partial Confounding

To be continued!!!!!!!!!!

Split Plot Design

Sample Data

data2<-data.frame(GFY=c(16,13,14,16,16,17,16,17,16,16,17,16,15,15,15,13,15,14,
                        16,14,15,14,15,17,18,15,15,15,14,14,14,14,15,15,13,15,
                        14,14,13,13,13,12,15,12,15),
                  DMY=c(5,5,6,5,6,7,6,8,6,9,8,7,5,5,5,4,6,5,8,5,5,5,4,6,6,5,5,6,
                        6,6,5,5,5,5,5,6,5,5,5,4,5,4,5,5,5),
                  Rep=rep(c("R1","R2","R3"),each=15),
                  Trt=rep(c("T1","T2","T3","T4","T5","T6","T7","T8","T9","T10",
                            "T11","T12","T13","T14","T15"),3))
head(data2,5)
  GFY DMY Rep Trt
1  16   5  R1  T1
2  13   5  R1  T2
3  14   6  R1  T3
4  16   5  R1  T4
5  16   6  R1  T5

Carrying Out Split Plot Design Using In-Built Data

data("splitdata")
head(splitdata,10)
   Replication Date_of_Sowing Varities Yield Plant_Height
1            1              1        1  4800           98
2            1              1        2  4600          104
3            1              1        3  5000          106
4            1              1        4  5120          110
5            1              1        5  5130          120
6            1              1        6  5140          125
7            1              2        1  4700           98
8            1              2        2  4969           96
9            1              2        3  5013           99
10           1              2        4  4890          100

Using Date of sowing as Main-plot factor and varieties as sub-plot factor and using LSD test

Split plot analysis with LSD test for Yield

splitplot(splitdata[4],splitdata$Replication,splitdata$Date_of_Sowing,splitdata$Varities,1)
$Yield
$Yield[[1]]
$Yield[[1]][[1]]
Analysis of Variance Table

Response: dependent.var
                   Df Sum Sq Mean Sq F value  Pr(>F)  
block               2  61557   30779  0.1876 0.84205  
main.plot           1    187     187  0.0011 0.97615  
Ea                  2 328176  164088                  
sub.plot            5 510277  102055  2.9915 0.03556 *
main.plot:sub.plot  5 125447   25089  0.7354 0.60557  
Eb                 20 682307   34115                  
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

$Yield[[1]][[2]]
[1] "CV(a): 8.063 , CV(b) : 3.677"

$Yield[[1]][[3]]
[1] "R Square 0.601"

$Yield[[1]][[4]]

    Shapiro-Wilk normality test

data:  model$residuals
W = 0.94087, p-value = 0.05406


$Yield[[1]][[5]]
[1] "Normality assumption is not violated"

$Yield[[1]][[6]]
[1] "All the main plot factor level means are same so dont go for any multiple comparison test"

$Yield[[1]][[7]]
$Yield[[1]][[7]][[1]]
   MSerror Df     Mean       CV  t.value      LSD
  164087.9  2 5023.611 8.063474 4.302653 580.9694

$Yield[[1]][[7]][[2]]
  dependent.var groups
1      5025.889      a
2      5021.333      a


$Yield[[1]][[8]]
[1] "The means of one or more levels of sub plot factor are not same, so go for multiple comparison test"

$Yield[[1]][[9]]
$Yield[[1]][[9]][[1]]
   MSerror Df     Mean       CV  t.value      LSD
  34115.36 20 5023.611 3.676707 2.085963 222.4442

$Yield[[1]][[9]][[2]]
  dependent.var groups
6      5154.333      a
5      5075.833      a
4      5070.000      a
3      5053.833      a
2      5013.167      a
1      4774.500      b


$Yield[[1]][[10]]
[1] "All the interaction level means are same so dont go for any multiple comparison test"

$Yield[[1]][[11]]
$Yield[[1]][[11]][[1]]
   MSerror Df     Mean       CV  t.value      LSD
  34115.36 20 5023.611 3.676707 2.085963 314.5836

$Yield[[1]][[11]][[2]]
    dependent.var groups
2:6      5173.333      a
1:6      5135.333      a
2:5      5095.000      a
2:3      5074.333      a
1:4      5070.000      a
2:4      5070.000      a
2:2      5069.667      a
1:5      5056.667      a
1:3      5033.333      a
1:2      4956.667     ab
1:1      4903.333     ab
2:1      4645.667      b

Split plot analysis with LSD test for both Yield and Plant Height

splitplot(splitdata[4:5],splitdata$Replication,splitdata$Date_of_Sowing,splitdata$Varities,1)
$Yield
$Yield[[1]]
$Yield[[1]][[1]]
Analysis of Variance Table

Response: dependent.var
                   Df Sum Sq Mean Sq F value  Pr(>F)  
block               2  61557   30779  0.1876 0.84205  
main.plot           1    187     187  0.0011 0.97615  
Ea                  2 328176  164088                  
sub.plot            5 510277  102055  2.9915 0.03556 *
main.plot:sub.plot  5 125447   25089  0.7354 0.60557  
Eb                 20 682307   34115                  
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

$Yield[[1]][[2]]
[1] "CV(a): 8.063 , CV(b) : 3.677"

$Yield[[1]][[3]]
[1] "R Square 0.601"

$Yield[[1]][[4]]

    Shapiro-Wilk normality test

data:  model$residuals
W = 0.94087, p-value = 0.05406


$Yield[[1]][[5]]
[1] "Normality assumption is not violated"

$Yield[[1]][[6]]
[1] "All the main plot factor level means are same so dont go for any multiple comparison test"

$Yield[[1]][[7]]
$Yield[[1]][[7]][[1]]
   MSerror Df     Mean       CV  t.value      LSD
  164087.9  2 5023.611 8.063474 4.302653 580.9694

$Yield[[1]][[7]][[2]]
  dependent.var groups
1      5025.889      a
2      5021.333      a


$Yield[[1]][[8]]
[1] "The means of one or more levels of sub plot factor are not same, so go for multiple comparison test"

$Yield[[1]][[9]]
$Yield[[1]][[9]][[1]]
   MSerror Df     Mean       CV  t.value      LSD
  34115.36 20 5023.611 3.676707 2.085963 222.4442

$Yield[[1]][[9]][[2]]
  dependent.var groups
6      5154.333      a
5      5075.833      a
4      5070.000      a
3      5053.833      a
2      5013.167      a
1      4774.500      b


$Yield[[1]][[10]]
[1] "All the interaction level means are same so dont go for any multiple comparison test"

$Yield[[1]][[11]]
$Yield[[1]][[11]][[1]]
   MSerror Df     Mean       CV  t.value      LSD
  34115.36 20 5023.611 3.676707 2.085963 314.5836

$Yield[[1]][[11]][[2]]
    dependent.var groups
2:6      5173.333      a
1:6      5135.333      a
2:5      5095.000      a
2:3      5074.333      a
1:4      5070.000      a
2:4      5070.000      a
2:2      5069.667      a
1:5      5056.667      a
1:3      5033.333      a
1:2      4956.667     ab
1:1      4903.333     ab
2:1      4645.667      b




$Plant_Height
$Plant_Height[[1]]
$Plant_Height[[1]][[1]]
Analysis of Variance Table

Response: dependent.var
                   Df  Sum Sq Mean Sq F value Pr(>F)
block               2 3022.06 1511.03  6.7149 0.1296
main.plot           1    0.03    0.03  0.0001 0.9921
Ea                  2  450.06  225.03               
sub.plot            5 1167.14  233.43  1.7106 0.1782
main.plot:sub.plot  5  292.47   58.49  0.4287 0.8232
Eb                 20 2729.22  136.46               

$Plant_Height[[1]][[2]]
[1] "CV(a): 13.104 , CV(b) : 10.205"

$Plant_Height[[1]][[3]]
[1] "R Square 0.644"

$Plant_Height[[1]][[4]]

    Shapiro-Wilk normality test

data:  model$residuals
W = 0.96789, p-value = 0.3707


$Plant_Height[[1]][[5]]
[1] "Normality assumption is not violated"

$Plant_Height[[1]][[6]]
[1] "All the main plot factor level means are same so dont go for any multiple comparison test"

$Plant_Height[[1]][[7]]
$Plant_Height[[1]][[7]][[1]]
   MSerror Df     Mean       CV  t.value      LSD
  225.0278  2 114.4722 13.10442 4.302653 21.51459

$Plant_Height[[1]][[7]][[2]]
  dependent.var groups
2      114.5000      a
1      114.4444      a


$Plant_Height[[1]][[8]]
[1] "All the sub plot factor factor level means are same so dont go for any multiple comparison test"

$Plant_Height[[1]][[9]]
$Plant_Height[[1]][[9]][[1]]
   MSerror Df     Mean      CV  t.value      LSD
  136.4611 20 114.4722 10.2048 2.085963 14.06859

$Plant_Height[[1]][[9]][[2]]
  dependent.var groups
6      122.8333      a
5      121.6667     ab
1      112.5000     ab
2      112.1667     ab
4      109.0000     ab
3      108.6667      b


$Plant_Height[[1]][[10]]
[1] "All the interaction level means are same so dont go for any multiple comparison test"

$Plant_Height[[1]][[11]]
$Plant_Height[[1]][[11]][[1]]
   MSerror Df     Mean      CV  t.value      LSD
  136.4611 20 114.4722 10.2048 2.085963 19.89599

$Plant_Height[[1]][[11]][[2]]
    dependent.var groups
2:6      123.6667      a
1:5      123.0000      a
1:6      122.0000      a
2:5      120.3333      a
2:2      118.0000      a
1:1      115.6667      a
1:3      110.0000      a
1:4      109.6667      a
2:1      109.3333      a
2:4      108.3333      a
2:3      107.3333      a
1:2      106.3333      a

Latin Square Design

Latin Square

A latin square design is ideal for any experiment in which it is possible to measure each subject under every treatment and, in addition, it is necessary to control for changing conditions over the course of the experiment. A latin square is a design in which each treatment is assigned to each time period the same number of times and to each subject the same number of times (see Dean and Voss 1999, chap. 12). If there are t treatments, t time periods, and mt subjects then m latin squares (each with t treatment sequences) would be used.

Carry-over effects are controlled by using latin squares that are ‘counterbalanced’ (Cotton 1993). This means that, looking at the sequences of treatments assigned to all the subjects taken together, every treatment is preceded by every other treatment for the same number of subjects. Counterbalanced latin squares exist for any even number of treatments and for some odd numbers of treatments (e.g., t=9, 15, 21, 27; see Jones and Kenward 1989 Sect. 5.2.2, for references). For other odd numbers, a pair of latin squares can be used which between them give a set of 2t counterbalanced sequences.

If a carry-over effect is expected to persist for more than one time period, then the counterbalancing must be extended to treatments occurring more than one time period prior to the current treatment.

Latin Square Design

The Latin Square Design is the second experimental design that addresses sources of systematic variation other than the intended treatment. It assumes that one can characterize treatments, whether intended or otherwise, as belonging clearly to separate sets. These categories are arranged into two sets of rows. An example would be a developmental toxicity study in which the rows give the source litter of the test animal (with the first litter as row 1, the next as row 2, etc.), while the columns define a secondary category like the ages of the test animals (with 6–8 weeks as column 1, 8–10 weeks as column 2, and so on). Experimental units are then assigned so that each major treatment, such as control, low dose, intermediate dose, etc., appears once and only once in each row and each column. If one denotes test groups as A (control), B (low), C (intermediate) and D (high), such an assignment would resemble that shown in Table 30.10. Here it can be seen that randomized experimental units are assigned to a treatment group, and then randomly assigned to a place in the Latin square.

Load the data

data(lsddata)
head(lsddata,5)
  Row Column Treatment Yield Plant_Height
1   1      1         D  39.0           12
2   1      2         A  24.1           15
3   1      3         E  26.1           12
4   1      4         B  37.0           14
5   1      5         C  42.0           18

LSD analysis with LSD test for Yield only

lsd(lsddata[4],lsddata$Treatment,lsddata$Row,lsddata$Column,1)
$Yield
$Yield[[1]]
Analysis of Variance Table

Response: data2
          Df Sum Sq Mean Sq F value   Pr(>F)   
row        4  66.76  16.689  0.6374 0.645733   
column     4  42.33  10.582  0.4041 0.802178   
trt        4 957.97 239.491  9.1460 0.001253 **
Residuals 12 314.22  26.185                    
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

$Yield[[2]]
[1] "R Square 0.773"

$Yield[[3]]

    Shapiro-Wilk normality test

data:  model$residuals
W = 0.98241, p-value = 0.9283


$Yield[[4]]
[1] "Normality assumption is not violated"

$Yield[[5]]
[1] "SEm 2.2885 , SEd 3.2364"

$Yield[[6]]
[1] "The treatment means of one or more treatments are not same, so go for multiple comparison test"

$Yield[[7]]
$Yield[[7]][[1]]
  MSerror Df   Mean       CV  t.value      LSD
  26.1854 12 33.536 15.25873 2.178813 7.051468

$Yield[[7]][[2]]
  data2 groups
B 40.40      a
C 37.72      a
D 37.42      a
E 26.52      b
A 25.62      b

The Latin square design is a type of experimental design used to control for variation in multiple dimensions. Let’s break down the interpretation of the output:

Df (Degrees of Freedom)

This column shows the degrees of freedom associated with each source of variation. In your case, there are four degrees of freedom for each of the “row,” “column,” and “trt” (treatment) factors, as well as 12 degrees of freedom for the “Residuals” or error term.

Sum Sq (Sum of Squares)

This column represents the sum of squared deviations of the response variable from its mean, attributed to each source of variation. It indicates how much variability in the response variable can be explained by each factor.

Mean Sq (Mean Sum of Squares)

This is the sum of squares divided by the corresponding degrees of freedom. It represents the average variability explained by each factor.

F value

The F statistic is a ratio of mean squares, which helps determine whether the variability explained by a factor is significantly larger than the variability due to random chance (residual variability). It’s used to assess the significance of each factor.

Pr(F) (p-value)

This is the p-value associated with the F statistic. It indicates the probability of observing an F statistic as extreme as the one calculated, assuming the null hypothesis (no effect of the factor) is true. A smaller p-value suggests stronger evidence against the null hypothesis.

Now let’s interpret the results for each factor

row

The factor “row” has not shown a significant effect on the “Yield” response variable. The p-value (0.645733) is larger than the common significance threshold of 0.05, indicating that there’s not enough evidence to conclude that “row” has a significant impact on “Yield.”

column

Similar to “row,” the factor “column” also doesn’t seem to have a significant effect on “Yield.” The p-value (0.802178) is much larger than 0.05.

trt (treatment)

The factor “trt” (treatment) shows a significant effect on the “Yield” response variable. The p-value (0.001253) is much smaller than 0.05, suggesting that there is strong evidence to reject the null hypothesis and conclude that the “trt” factor significantly affects the “Yield.”

Residuals

This represents the unexplained variability in the response variable “Yield” that cannot be attributed to the factors “row,” “column,” or “trt.” It’s the variability that remains after accounting for the effects of these factors.

In summary, the Latin square design ANOVA indicates that the “trt” (treatment) factor has a significant effect on the “Yield” response variable, while the “row” and “column” factors do not. The “Residuals” term accounts for the variability that is not explained by any of the considered factors.

LSD analysis with LSD test for Yield and Plant Height

lsd(lsddata[4:5],lsddata$Treatment,lsddata$Row,lsddata$Column,1)
$Yield
$Yield[[1]]
Analysis of Variance Table

Response: data2
          Df Sum Sq Mean Sq F value   Pr(>F)   
row        4  66.76  16.689  0.6374 0.645733   
column     4  42.33  10.582  0.4041 0.802178   
trt        4 957.97 239.491  9.1460 0.001253 **
Residuals 12 314.22  26.185                    
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

$Yield[[2]]
[1] "R Square 0.773"

$Yield[[3]]

    Shapiro-Wilk normality test

data:  model$residuals
W = 0.98241, p-value = 0.9283


$Yield[[4]]
[1] "Normality assumption is not violated"

$Yield[[5]]
[1] "SEm 2.2885 , SEd 3.2364"

$Yield[[6]]
[1] "The treatment means of one or more treatments are not same, so go for multiple comparison test"

$Yield[[7]]
$Yield[[7]][[1]]
  MSerror Df   Mean       CV  t.value      LSD
  26.1854 12 33.536 15.25873 2.178813 7.051468

$Yield[[7]][[2]]
  data2 groups
B 40.40      a
C 37.72      a
D 37.42      a
E 26.52      b
A 25.62      b



$Plant_Height
$Plant_Height[[1]]
Analysis of Variance Table

Response: data2
          Df Sum Sq Mean Sq F value  Pr(>F)  
row        4 306.16  76.540  5.0666 0.01262 *
column     4  28.56   7.140  0.4726 0.75515  
trt        4  23.76   5.940  0.3932 0.80967  
Residuals 12 181.28  15.107                  
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

$Plant_Height[[2]]
[1] "R Square 0.664"

$Plant_Height[[3]]

    Shapiro-Wilk normality test

data:  model$residuals
W = 0.96767, p-value = 0.5866


$Plant_Height[[4]]
[1] "Normality assumption is not violated"

$Plant_Height[[5]]
[1] "SEm 1.7382 , SEd 2.4582"

$Plant_Height[[6]]
[1] "All the treatment means are same so dont go for any multiple comparison test"

$Plant_Height[[7]]
$Plant_Height[[7]][[1]]
   MSerror Df  Mean       CV  t.value      LSD
  15.10667 12 18.36 21.16955 2.178813 5.355922

$Plant_Height[[7]][[2]]
  data2 groups
A  19.8      a
D  18.8      a
B  18.6      a
E  17.6      a
C  17.0      a

Latin Square Design (Ordinary Way)

Data Entry

Yield <- c(257,230,279,287,202,
           245,283,245,280,260,
           182,252,280,246,250,
           203,294,227,193,259,
           231,271,266,334,338
           )

Row<- factor(x = rep(x = 1:5, each = 5))
Col <- factor(x = rep(x = 1:5, times = 5))
Trt <- c("B","E","A","C","D",
         "D","A","E","B","C",
         "E","B","C","D","A",
         "A","C","D","E","B",
         "C","D","B","A","E"
         )

Create the data of the entered data above

df <- data.frame(Row,Col, Trt, Yield)
head(df,5)
  Row Col Trt Yield
1   1   1   B   257
2   1   2   E   230
3   1   3   A   279
4   1   4   C   287
5   1   5   D   202

Make a Plot

library(ggplot2)
ggplot(data=df, mapping = aes(x = Trt, y = Yield))+
  geom_point()+
  geom_boxplot()+
  labs(
    title = "A Box Plot of Yields Across Treatments",
    x = "Treatments",
    y = "Yields"
  )

Fit the Model

fit.model <- lm(formula = Yield~Row+Col+Trt, data = df)
summary(fit.model )

Call:
lm(formula = Yield ~ Row + Col + Trt, data = df)

Residuals:
   Min     1Q Median     3Q    Max 
-39.48 -13.08  -3.08  10.72  62.12 

Coefficients:
            Estimate Std. Error t value Pr(>|t|)    
(Intercept)   232.88      25.40   9.170 9.06e-07 ***
Row2           11.60      22.27   0.521   0.6120    
Row3           -9.00      22.27  -0.404   0.6933    
Row4          -15.80      22.27  -0.709   0.4917    
Row5           37.00      22.27   1.661   0.1226    
Col2           42.40      22.27   1.904   0.0812 .  
Col3           35.80      22.27   1.607   0.1340    
Col4           44.40      22.27   1.993   0.0695 .  
Col5           38.20      22.27   1.715   0.1120    
TrtB           -7.00      22.27  -0.314   0.7587    
TrtC            0.60      22.27   0.027   0.9790    
TrtD          -31.60      22.27  -1.419   0.1814    
TrtE          -32.20      22.27  -1.446   0.1739    
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Residual standard error: 35.22 on 12 degrees of freedom
Multiple R-squared:  0.5828,    Adjusted R-squared:  0.1656 
F-statistic: 1.397 on 12 and 12 DF,  p-value: 0.2858

Convert the Linear Model to an Anova for a Latin Square Design

anova(fit.model)
Analysis of Variance Table

Response: Yield
          Df  Sum Sq Mean Sq F value Pr(>F)
Row        4  8604.6  2151.1  1.7342 0.2071
Col        4  6693.4  1673.3  1.3490 0.3083
Trt        4  5495.8  1373.9  1.1077 0.3973
Residuals 12 14884.9  1240.4               

Pairweise Comparison

library(agricolae)
out<- LSD.test(fit.model, "Trt", p.adj = "bonferron")
out
$statistics
   MSerror Df   Mean       CV  t.value     MSD
  1240.407 12 255.76 13.77049 3.428444 76.3676

$parameters
        test  p.ajusted name.t ntr alpha
  Fisher-LSD bonferroni    Trt   5  0.05

$means
  Yield      std r      LCL      UCL Min Max Q25 Q50 Q75
A 269.8 48.04893 5 235.4824 304.1176 203 334 250 279 283
B 262.8 10.84896 5 228.4824 297.1176 252 280 257 259 266
C 270.4 25.42243 5 236.0824 304.7176 231 294 260 280 287
D 238.2 25.58711 5 203.8824 272.5176 202 271 227 245 246
E 237.6 61.79239 5 203.2824 271.9176 182 338 193 230 245

$comparison
NULL

$groups
  Yield groups
C 270.4      a
A 269.8      a
B 262.8      a
D 238.2      a
E 237.6      a

attr(,"class")
[1] "group"

Alternative Pairwise Comparison

out2<- HSD.test(fit.model, "Trt", group = TRUE, console = TRUE, main = "Yield")

Study: Yield

HSD Test for Yield 

Mean Square Error:  1240.407 

Trt,  means

  Yield      std r Min Max
A 269.8 48.04893 5 203 334
B 262.8 10.84896 5 252 280
C 270.4 25.42243 5 231 294
D 238.2 25.58711 5 202 271
E 237.6 61.79239 5 182 338

Alpha: 0.05 ; DF Error: 12 
Critical Value of Studentized Range: 4.50771 

Minimun Significant Difference: 70.99913 

Treatments with the same letter are not significantly different.

  Yield groups
C 270.4      a
A 269.8      a
B 262.8      a
D 238.2      a
E 237.6      a
out2
$statistics
   MSerror Df   Mean       CV      MSD
  1240.407 12 255.76 13.77049 70.99913

$parameters
   test name.t ntr StudentizedRange alpha
  Tukey    Trt   5          4.50771  0.05

$means
  Yield      std r Min Max Q25 Q50 Q75
A 269.8 48.04893 5 203 334 250 279 283
B 262.8 10.84896 5 252 280 257 259 266
C 270.4 25.42243 5 231 294 260 280 287
D 238.2 25.58711 5 202 271 227 245 246
E 237.6 61.79239 5 182 338 193 230 245

$comparison
NULL

$groups
  Yield groups
C 270.4      a
A 269.8      a
B 262.8      a
D 238.2      a
E 237.6      a

attr(,"class")
[1] "group"

Additional Illustration of Latin Square Design

Consider the data below

knitr::include_graphics("lsqdata.png")

The data is arranged in 4x4 matrix square design and is entered as shown below

Yield_t_ha <- c(1.640,1.210,1.410,1.345,
           1.475,1.185,1.400,1.290,
           1.670,0.710,1.665,1.180,
           1.565,1.290,1.655,0.660
           )

Rows<- factor(x = rep(x = 1:4, each = 4))
Columns <- factor(x = rep(x = 1:4, times = 4))
Variety <- c("B","D","C","A",
         "C","A","D","B",
         "A","C","B","D",
         "D","B","A","C"
         )

Data Frame

df2 <- data.frame(Rows, Columns, Variety, Yield_t_ha)
head(df2,10)
   Rows Columns Variety Yield_t_ha
1     1       1       B      1.640
2     1       2       D      1.210
3     1       3       C      1.410
4     1       4       A      1.345
5     2       1       C      1.475
6     2       2       A      1.185
7     2       3       D      1.400
8     2       4       B      1.290
9     3       1       A      1.670
10    3       2       C      0.710

Check the structure of the data set

str(df2)
'data.frame':   16 obs. of  4 variables:
 $ Rows      : Factor w/ 4 levels "1","2","3","4": 1 1 1 1 2 2 2 2 3 3 ...
 $ Columns   : Factor w/ 4 levels "1","2","3","4": 1 2 3 4 1 2 3 4 1 2 ...
 $ Variety   : chr  "B" "D" "C" "A" ...
 $ Yield_t_ha: num  1.64 1.21 1.41 1.34 1.48 ...

Change varieties to factors

df2$Variety <- as.factor(df2$Variety)

Check the structure and header

head(df2,5)
  Rows Columns Variety Yield_t_ha
1    1       1       B      1.640
2    1       2       D      1.210
3    1       3       C      1.410
4    1       4       A      1.345
5    2       1       C      1.475
str(df2)
'data.frame':   16 obs. of  4 variables:
 $ Rows      : Factor w/ 4 levels "1","2","3","4": 1 1 1 1 2 2 2 2 3 3 ...
 $ Columns   : Factor w/ 4 levels "1","2","3","4": 1 2 3 4 1 2 3 4 1 2 ...
 $ Variety   : Factor w/ 4 levels "A","B","C","D": 2 4 3 1 3 1 4 2 1 3 ...
 $ Yield_t_ha: num  1.64 1.21 1.41 1.34 1.48 ...

Display the Matrix

library(reshape2)
data_matrix <- dcast(df2, Rows ~ Columns, value.var = "Yield_t_ha")

# Print the resulting matrix
print(data_matrix)
  Rows     1     2     3     4
1    1 1.640 1.210 1.410 1.345
2    2 1.475 1.185 1.400 1.290
3    3 1.670 0.710 1.665 1.180
4    4 1.565 1.290 1.655 0.660

Display the Matrix with Treatment in Parenthesis

# Create a function to format the matrix cells
format_cell <- function(Variety, Yield_t_ha) {
  paste0("[", Variety, "]", Yield_t_ha)
}

# Apply the format function to each cell
df2$FormattedCell <- mapply(format_cell, df2$Variety, df2$Yield_t_ha)

# Reshape the data to create a matrix
data_matrix <- dcast(df2, Rows ~ Columns, value.var = "FormattedCell")

# Print the resulting matrix
print(data_matrix)
  Rows        1        2        3        4
1    1  [B]1.64  [D]1.21  [C]1.41 [A]1.345
2    2 [C]1.475 [A]1.185   [D]1.4  [B]1.29
3    3  [A]1.67  [C]0.71 [B]1.665  [D]1.18
4    4 [D]1.565  [B]1.29 [A]1.655  [C]0.66

Estimate the Model

analysis <- lm(formula = Yield_t_ha~Rows+Columns+Variety, data = df2)
anova(analysis)
Analysis of Variance Table

Response: Yield_t_ha
          Df  Sum Sq  Mean Sq F value   Pr(>F)   
Rows       3 0.02811 0.009369  0.4424 0.731298   
Columns    3 0.82136 0.273785 12.9284 0.004988 **
Variety    3 0.43492 0.144973  6.8457 0.023032 * 
Residuals  6 0.12706 0.021177                    
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Mean Comparison Test

Use Boniferron

library(agricolae)
rslt <- LSD.test(analysis, "Variety", p.adj = "bonferroni")
rslt
$statistics
     MSerror Df     Mean       CV  t.value       MSD
  0.02117708  6 1.334375 10.90574 3.862991 0.3975042

$parameters
        test  p.ajusted  name.t ntr alpha
  Fisher-LSD bonferroni Variety   4  0.05

$means
  Yield_t_ha       std r       LCL      UCL   Min   Max    Q25   Q50     Q75
A    1.46375 0.2386900 4 1.2857084 1.641792 1.185 1.670 1.3050 1.500 1.65875
B    1.47125 0.2095382 4 1.2932084 1.649292 1.290 1.665 1.2900 1.465 1.64625
C    1.06375 0.4386224 4 0.8857084 1.241792 0.660 1.475 0.6975 1.060 1.42625
D    1.33875 0.1795538 4 1.1607084 1.516792 1.180 1.565 1.2025 1.305 1.44125

$comparison
NULL

$groups
  Yield_t_ha groups
B    1.47125      a
A    1.46375      a
D    1.33875     ab
C    1.06375      b

attr(,"class")
[1] "group"

Mean Differences Across Treatments

rslt1<- HSD.test(analysis, "Variety", group = TRUE, console = TRUE, main = "Yield_t_ha")

Study: Yield_t_ha

HSD Test for Yield_t_ha 

Mean Square Error:  0.02117708 

Variety,  means

  Yield_t_ha       std r   Min   Max
A    1.46375 0.2386900 4 1.185 1.670
B    1.47125 0.2095382 4 1.290 1.665
C    1.06375 0.4386224 4 0.660 1.475
D    1.33875 0.1795538 4 1.180 1.565

Alpha: 0.05 ; DF Error: 6 
Critical Value of Studentized Range: 4.895599 

Minimun Significant Difference: 0.3562123 

Treatments with the same letter are not significantly different.

  Yield_t_ha groups
B    1.47125      a
A    1.46375      a
D    1.33875     ab
C    1.06375      b
rslt1
$statistics
     MSerror Df     Mean       CV       MSD
  0.02117708  6 1.334375 10.90574 0.3562123

$parameters
   test  name.t ntr StudentizedRange alpha
  Tukey Variety   4         4.895599  0.05

$means
  Yield_t_ha       std r   Min   Max    Q25   Q50     Q75
A    1.46375 0.2386900 4 1.185 1.670 1.3050 1.500 1.65875
B    1.47125 0.2095382 4 1.290 1.665 1.2900 1.465 1.64625
C    1.06375 0.4386224 4 0.660 1.475 0.6975 1.060 1.42625
D    1.33875 0.1795538 4 1.180 1.565 1.2025 1.305 1.44125

$comparison
NULL

$groups
  Yield_t_ha groups
B    1.47125      a
A    1.46375      a
D    1.33875     ab
C    1.06375      b

attr(,"class")
[1] "group"

Mean Difference Acrosss Columns

rslt2<- HSD.test(analysis, "Columns", group = TRUE, console = TRUE, main = "Yield_t_ha")

Study: Yield_t_ha

HSD Test for Yield_t_ha 

Mean Square Error:  0.02117708 

Columns,  means

  Yield_t_ha        std r   Min   Max
1    1.58750 0.08703448 4 1.475 1.670
2    1.09875 0.26300745 4 0.710 1.290
3    1.53250 0.14733748 4 1.400 1.665
4    1.11875 0.31343194 4 0.660 1.345

Alpha: 0.05 ; DF Error: 6 
Critical Value of Studentized Range: 4.895599 

Minimun Significant Difference: 0.3562123 

Treatments with the same letter are not significantly different.

  Yield_t_ha groups
1    1.58750      a
3    1.53250      a
4    1.11875      b
2    1.09875      b
rslt2
$statistics
     MSerror Df     Mean       CV       MSD
  0.02117708  6 1.334375 10.90574 0.3562123

$parameters
   test  name.t ntr StudentizedRange alpha
  Tukey Columns   4         4.895599  0.05

$means
  Yield_t_ha        std r   Min   Max     Q25    Q50     Q75
1    1.58750 0.08703448 4 1.475 1.670 1.54250 1.6025 1.64750
2    1.09875 0.26300745 4 0.710 1.290 1.06625 1.1975 1.23000
3    1.53250 0.14733748 4 1.400 1.665 1.40750 1.5325 1.65750
4    1.11875 0.31343194 4 0.660 1.345 1.05000 1.2350 1.30375

$comparison
NULL

$groups
  Yield_t_ha groups
1    1.58750      a
3    1.53250      a
4    1.11875      b
2    1.09875      b

attr(,"class")
[1] "group"

Mean Difference Across Rows

rslt1<- HSD.test(analysis, "Rows", group = TRUE, console = TRUE, main = "Yield_t_ha")

Study: Yield_t_ha

HSD Test for Yield_t_ha 

Mean Square Error:  0.02117708 

Rows,  means

  Yield_t_ha       std r   Min   Max
1    1.40125 0.1796466 4 1.210 1.640
2    1.33750 0.1269186 4 1.185 1.475
3    1.30625 0.4591546 4 0.710 1.670
4    1.29250 0.4493421 4 0.660 1.655

Alpha: 0.05 ; DF Error: 6 
Critical Value of Studentized Range: 4.895599 

Minimun Significant Difference: 0.3562123 

Treatments with the same letter are not significantly different.

  Yield_t_ha groups
1    1.40125      a
2    1.33750      a
3    1.30625      a
4    1.29250      a
rslt1
$statistics
     MSerror Df     Mean       CV       MSD
  0.02117708  6 1.334375 10.90574 0.3562123

$parameters
   test name.t ntr StudentizedRange alpha
  Tukey   Rows   4         4.895599  0.05

$means
  Yield_t_ha       std r   Min   Max     Q25    Q50     Q75
1    1.40125 0.1796466 4 1.210 1.640 1.31125 1.3775 1.46750
2    1.33750 0.1269186 4 1.185 1.475 1.26375 1.3450 1.41875
3    1.30625 0.4591546 4 0.710 1.670 1.06250 1.4225 1.66625
4    1.29250 0.4493421 4 0.660 1.655 1.13250 1.4275 1.58750

$comparison
NULL

$groups
  Yield_t_ha groups
1    1.40125      a
2    1.33750      a
3    1.30625      a
4    1.29250      a

attr(,"class")
[1] "group"

Lattice Square Test

A balanced square lattice design is similar to a balanced incomplete block design with k2 treatments arranged in k(k + 1) blocks with k runs per block and r = k + 1 replications. So, each replication has k blocks and contains every treatment.

Balanced incomplete block (BIB) designs are the most efficient designs in the class of binary incomplete block designs but these designs require usually a large number of replications and are not available for all combinations of parametric values. To overcome these difficulties Yates (1936) introduced a class of designs known as quasi- factorials or lattice designs. The characteristic features of these designs are that the number of treatments is a perfect square and the block size is the square root of this number. Moreover, incomplete blocks are combined in groups to form separate replications. The numbers of replications of the treatments are flexible in these designs and are useful for situations in which a large number of treatments are to be tested. If the design has two replications of the treatments, it is called a simple lattice; if it has 3 replications it is called a triple-lattice and so on. In general, if the number of replications is m, it is called an m-ple lattice. Square lattice designs can be constructed as follows:

Method of Construction

Let there be v = s^2 treatments, numbered as 1, 2, …, s^2. Arrange these treatment numbers in the form of a s x s square which we call here as standard array. The contents of each of the s rows of this array are taken to form a block giving thereby s blocks each of size s. Further the contents of columns of this array are taken to form blocks giving another set of s blocks forming another complete replication. Next a s x s latin square is taken and is superimposed on the above standard array of treatment numbers. The treatment numbers that fall on a particular symbol of the latin square are taken to form a block. Thus we get s blocks corresponding to the s symbols of the latin square. Again, another latin square orthogonal to the previous one is taken and from this square also, another set of s blocks is obtained in the same manner. The process is repeated to get further replications. The process is continued till m - 2 (s - 1, if s is a prime or power of a prime) mutually orthogonal latin squares are utilized. When the complete set of (s-1) mutually orthogonal latin squares (if such a set exists) is utilized, the designs becomes a balanced (s + 1) - lattice. A balanced lattice is a BIB design.

Partially Balanced Lattice Square Design

Load the data

ALdata<-read.csv(file='https://gist.githubusercontent.com/ikmalmal/1819fc03dd0fb9219c3f42d62693be35/raw/e3f9120f2629d28d68fcb9a600eae29a46a1d288/ALdata.csv')
head(ALdata,10)
   SAMPLE Genotype BLOCK QTLclasses ENV REP       GY
1     112   GEN112     1          B  NS   1  8213.54
2     112   GEN112     1          B  NS   2  8762.89
3     113   GEN113     1          E  NS   1  8770.67
4     113   GEN113     1          E  NS   2 10724.80
5     114   GEN114     1          E  NS   1 12199.36
6     114   GEN114     1          E  NS   2 12127.68
7     116   GEN116     1          F  NS   1  7601.45
8     116   GEN116     1          F  NS   2  6410.28
9     117   GEN117     1          E  NS   1  4814.72
10    117   GEN117     1          E  NS   2  5195.84

Check the structure of the data set

str(ALdata)
'data.frame':   182 obs. of  7 variables:
 $ SAMPLE    : int  112 112 113 113 114 114 116 116 117 117 ...
 $ Genotype  : chr  "GEN112" "GEN112" "GEN113" "GEN113" ...
 $ BLOCK     : int  1 1 1 1 1 1 1 1 1 1 ...
 $ QTLclasses: chr  "B" "B" "E" "E" ...
 $ ENV       : chr  "NS" "NS" "NS" "NS" ...
 $ REP       : int  1 2 1 2 1 2 1 2 1 2 ...
 $ GY        : num  8214 8763 8771 10725 12199 ...

Default command line for AL design analysis

PBIB.test(block,trt,replication,y,k, method=c(“REML”,“ML”,“VC”), test = c(“lsd”,“tukey”), alpha=0.05, console=TRUE, group=TRUE)

REML is restricted maximum likelihood ML is maximum likelihood VC is variance components block is the block, trt is the genotype/treatment,replication is replication, y is the response variable/traits, k is block size we need to change Block,trt,replication into Factor

Passing in Factors to Predictor Variables

ALdata$REP<-as.factor(ALdata$REP)
ALdata$BLOCK<-as.factor(ALdata$BLOCK)
ALdata$Genotype<-as.factor(ALdata$Genotype)

Attach the data

attach(ALdata)

Fit the model using REML using data above and the command below

library(agricolae)
AL1<-PBIB.test(BLOCK,Genotype,REP,GY,7,method="REML", test = "lsd", alpha=0.05,console=TRUE, group=TRUE)

ANALYSIS PBIB:  GY 

Class level information
BLOCK : 26 
Genotype : 91

Number of observations:  182 

Estimation Method:  Residual (restricted) maximum likelihood 

Parameter Estimates
              Variance
BLOCK:REP 9.526432e-04
REP       2.809439e-03
Residual  7.604984e+05

                      Fit Statistics
AIC                        1741.6206
BIC                        1977.6414
-2 Res Log Likelihood      -776.8103

Analysis of Variance Table

Response: GY
          Df    Sum Sq  Mean Sq F value    Pr(>F)    
Genotype  90 977038152 10855979  14.275 < 2.2e-16 ***
Residuals 84  63881866   760498                      
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Coefficient of variation: 10.9 %
GY Means: 7967.263 

Parameters PBIB
            .
Genotype   91
BLOCK size  7
BLOCK/REP  13
REP         2

Efficiency factor 0.7894737 

Comparison test lsd 

Treatments with the same letter are not significantly different.

             GY.adj         groups
GEN206    13892.800              a
GEN249    13190.615             ab
GEN230    13015.465            abc
GEN234    12910.130            abc
GEN191    12732.800           abcd
GEN114    12163.520          abcde
GEN269    12059.415           bcde
GEN157    11958.080           bcde
GEN190    11442.775           cdef
GEN124    11155.255           defg
GEN154    11101.120           defg
GEN239    11083.575           defg
GEN128    11061.705           defg
GEN266    10747.625            efg
GEN192    10607.470           efgh
GEN213    10201.070           fghi
GEN251    10113.280          fghij
GEN229     9761.065         fghijk
NMR152     9756.000         fghijk
GEN113     9747.735         fghijk
UKM5       9625.600         ghijkl
GEN159     9617.035         ghijkl
GEN210     9545.865        ghijklm
GEN219     8996.335        hijklmn
GEN176     8948.160       hijklmno
GEN207     8861.335        ijklmno
GEN150     8657.070       ijklmnop
GEN133     8645.200       ijklmnop
GEN163     8630.745       ijklmnop
GEN188     8579.465       ijklmnop
GEN263     8540.845       ijklmnop
GEN132     8509.120       ijklmnop
GEN112     8488.215       ijklmnop
GEN250     8442.135        jklmnop
GEN168     8436.615        jklmnop
GEN118     8381.440       jklmnopq
GEN122     8361.865        klmnopq
GEN262     8182.880       klmnopqr
GEN247     8131.520       klmnopqr
GEN172     8077.760       klmnopqr
GEN268     8059.155       klmnopqr
GEN261     7956.960       lmnopqrs
GEN164     7926.745       lmnopqrs
GEN240     7869.760       mnopqrst
GEN231     7849.200       mnopqrst
GEN197     7747.945       nopqrstu
GEN165     7720.935       nopqrstu
GEN218     7709.065       nopqrstu
GEN193     7617.335      nopqrstuv
GEN140     7582.200      nopqrstuv
GEN147     7565.385      nopqrstuv
GEN158     7495.725     nopqrstuvw
GEN216     7384.265    nopqrstuvwx
GEN227     7219.945    opqrstuvwxy
GEN228     7104.400    pqrstuvwxyz
GEN167     7102.135    pqrstuvwxyz
GEN152     7073.065   pqrstuvwxyzA
GEN155     7061.515   pqrstuvwxyzA
GEN243     7028.800   pqrstuvwxyzA
GEN116     7005.865  pqrstuvwxyzAB
IR64-Sub1  6938.880 pqrstuvwxyzABC
GEN134     6682.400 qrstuvwxyzABCD
GEN238     6675.520 qrstuvwxyzABCD
GEN209     6658.135 qrstuvwxyzABCD
GEN236     6517.600 rstuvwxyzABCDE
GEN267     6275.200  stuvwxyzABCDE
GEN258     6153.335  tuvwxyzABCDEF
GEN166     6136.410  tuvwxyzABCDEF
GEN170     6037.120  uvwxyzABCDEFG
MR219      5959.815   vwxyzABCDEFG
GEN223     5946.560   vwxyzABCDEFG
GEN160     5817.095    wxyzABCDEFG
GEN148     5781.920    wxyzABCDEFG
GEN264     5778.745    wxyzABCDEFG
GEN146     5686.775     xyzABCDEFG
GEN204     5544.535      yzABCDEFG
GEN127     5521.580      yzABCDEFG
GEN222     5509.865      yzABCDEFG
GEN187     5451.465       zABCDEFG
GEN270     5355.015        ABCDEFG
GEN125     5294.080        BCDEFGH
GEN226     5232.000         CDEFGH
GEN138     5219.095         CDEFGH
GEN242     5060.960          DEFGH
GEN117     5005.280          DEFGH
GEN220     4865.600           EFGH
GEN129     4535.305            FGH
IR84984    4519.040            FGH
GEN185     4457.600            FGH
IR81896    4346.400             GH
GEN215     3616.535              H

<<< to see the objects: means, comparison and groups. >>>

Use VC and Tukey test

AL2<-PBIB.test(BLOCK,Genotype,REP,GY,7,method="VC", test = "tukey", alpha=0.05,console=TRUE, group=TRUE)

ANALYSIS PBIB:  GY 

Class level information
BLOCK : 26 
Genotype : 91

Number of observations:  182 

Estimation Method:  Variances component model 

    Fit Statistics
AIC       3040.512
BIC       3357.708

Analysis of Variance Table

Response: GY
               Df    Sum Sq  Mean Sq F value Pr(>F)    
REP             1    232147   232147  0.3017 0.5843    
Genotype.unadj 90 977038217 10855980 14.1077 <2e-16 ***
BLOCK/REP       6   4334433   722406  0.9388 0.4718    
Residual       84  64638779   769509                   
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Coefficient of variation: 11 %
GY Means: 7967.263 

Parameters PBIB
            .
Genotype   91
BLOCK size  7
BLOCK/REP  13
REP         2

Efficiency factor 0.7894737 

Comparison test tukey 

Treatments with the same letter are not significantly different.

             GY.adj           groups
GEN206    13892.800                a
GEN249    13190.615               ab
GEN230    13015.465              abc
GEN234    12910.130             abcd
GEN191    12732.800            abcde
GEN114    12163.520           abcdef
GEN269    12059.415          abcdefg
GEN157    11958.080         abcdefgh
GEN190    11442.775        abcdefghi
GEN124    11155.255       abcdefghij
GEN154    11101.120      abcdefghijk
GEN239    11083.575      abcdefghijk
GEN128    11061.705      abcdefghijk
GEN266    10747.625     abcdefghijkl
GEN192    10607.470    abcdefghijklm
GEN213    10201.070   abcdefghijklmn
GEN251    10113.280   abcdefghijklmn
GEN229     9761.065  abcdefghijklmno
NMR152     9756.000  abcdefghijklmno
GEN113     9747.735  abcdefghijklmno
UKM5       9625.600  abcdefghijklmno
GEN159     9617.035  abcdefghijklmno
GEN210     9545.865  abcdefghijklmno
GEN219     8996.335 abcdefghijklmnop
GEN176     8948.160 abcdefghijklmnop
GEN207     8861.335 abcdefghijklmnop
GEN150     8657.070 abcdefghijklmnop
GEN133     8645.200 abcdefghijklmnop
GEN163     8630.745 abcdefghijklmnop
GEN188     8579.465 abcdefghijklmnop
GEN263     8540.845 abcdefghijklmnop
GEN132     8509.120 abcdefghijklmnop
GEN112     8488.215 abcdefghijklmnop
GEN250     8442.135  bcdefghijklmnop
GEN168     8436.615  bcdefghijklmnop
GEN118     8381.440  bcdefghijklmnop
GEN122     8361.865  bcdefghijklmnop
GEN262     8182.880  bcdefghijklmnop
GEN247     8131.520  bcdefghijklmnop
GEN172     8077.760  bcdefghijklmnop
GEN268     8059.155  bcdefghijklmnop
GEN261     7956.960  bcdefghijklmnop
GEN164     7926.745  bcdefghijklmnop
GEN240     7869.760  bcdefghijklmnop
GEN231     7849.200  bcdefghijklmnop
GEN197     7747.945  bcdefghijklmnop
GEN165     7720.935   cdefghijklmnop
GEN218     7709.065   cdefghijklmnop
GEN193     7617.335   cdefghijklmnop
GEN140     7582.200   cdefghijklmnop
GEN147     7565.385    defghijklmnop
GEN158     7495.725    defghijklmnop
GEN216     7384.265     efghijklmnop
GEN227     7219.945      fghijklmnop
GEN228     7104.400      fghijklmnop
GEN167     7102.135      fghijklmnop
GEN152     7073.065      fghijklmnop
GEN155     7061.515      fghijklmnop
GEN243     7028.800      fghijklmnop
GEN116     7005.865      fghijklmnop
IR64-Sub1  6938.880      fghijklmnop
GEN134     6682.400       ghijklmnop
GEN238     6675.520       ghijklmnop
GEN209     6658.135       ghijklmnop
GEN236     6517.600        hijklmnop
GEN267     6275.200         ijklmnop
GEN258     6153.335         ijklmnop
GEN166     6136.410         ijklmnop
GEN170     6037.120         ijklmnop
MR219      5959.815          jklmnop
GEN223     5946.560          jklmnop
GEN160     5817.095          jklmnop
GEN148     5781.920          jklmnop
GEN264     5778.745          jklmnop
GEN146     5686.775           klmnop
GEN204     5544.535            lmnop
GEN127     5521.580            lmnop
GEN222     5509.865            lmnop
GEN187     5451.465            lmnop
GEN270     5355.015            lmnop
GEN125     5294.080             mnop
GEN226     5232.000             mnop
GEN138     5219.095             mnop
GEN242     5060.960              nop
GEN117     5005.280              nop
GEN220     4865.600              nop
GEN129     4535.305               op
IR84984    4519.040               op
GEN185     4457.600               op
IR81896    4346.400               op
GEN215     3616.535                p

<<< to see the objects: means, comparison and groups. >>>

EXtract specific components from the model estimated

Extract the Analysis of Variance

AL2$ANOVA
Analysis of Variance Table

Response: GY
               Df    Sum Sq  Mean Sq F value Pr(>F)    
REP             1    232147   232147  0.3017 0.5843    
Genotype.unadj 90 977038217 10855980 14.1077 <2e-16 ***
BLOCK/REP       6   4334433   722406  0.9388 0.4718    
Residual       84  64638779   769509                   
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Extract the BIC and AIC

AL2$Fstat
    Fit Statistics
AIC       3040.512
BIC       3357.708

Extract the Statistical Summaries of the Study Variable

AL2$means
                 GY    GY.adj      SE r         std      Min      Max       Q25
GEN112     8488.215  8488.215 612.082 2  388.449110  8213.54  8762.89  8350.878
GEN113     9747.735  9747.735 612.082 2 1381.778574  8770.67 10724.80  9259.202
GEN114    12163.520 12163.520 612.082 2   50.685414 12127.68 12199.36 12145.600
GEN116     7005.865  7005.865 612.082 2  842.284385  6410.28  7601.45  6708.073
GEN117     5005.280  5005.280 612.082 2  269.492536  4814.72  5195.84  4910.000
GEN118     8381.440  8381.440 612.082 2 1256.726740  7492.80  9270.08  7937.120
GEN122     8361.865  8361.865 612.082 2  491.036162  8014.65  8709.08  8188.257
GEN124    11155.255 11155.255 612.082 2 2635.266765  9291.84 13018.67 10223.548
GEN125     5294.080  5294.080 612.082 2  150.670313  5187.54  5400.62  5240.810
GEN127     5521.580  5521.580 612.082 2  247.699505  5346.43  5696.73  5434.005
GEN128    11061.705 11061.705 612.082 2 1165.050346 10237.89 11885.52 10649.798
GEN129     4535.305  4535.305 612.082 2  450.815928  4216.53  4854.08  4375.917
GEN132     8509.120  8509.120 612.082 2 1735.070335  7282.24  9736.00  7895.680
GEN133     8645.200  8645.200 612.082 2  537.401154  8265.20  9025.20  8455.200
GEN134     6682.400  6682.400 612.082 2  827.032091  6097.60  7267.20  6390.000
GEN138     5219.095  5219.095 612.082 2 1113.869957  4431.47  6006.72  4825.283
GEN140     7582.200  7582.200 612.082 2  552.391817  7191.60  7972.80  7386.900
GEN146     5686.775  5686.775 612.082 2  487.444060  5342.10  6031.45  5514.438
GEN147     7565.385  7565.385 612.082 2  391.532096  7288.53  7842.24  7426.958
GEN148     5781.920  5781.920 612.082 2  846.944218  5183.04  6380.80  5482.480
GEN150     8657.070  8657.070 612.082 2 2120.188973  7157.87 10156.27  7907.470
GEN152     7073.065  7073.065 612.082 2  165.936748  6955.73  7190.40  7014.397
GEN154    11101.120 11101.120 612.082 2  309.543065 10882.24 11320.00 10991.680
GEN155     7061.515  7061.515 612.082 2  240.324382  6891.58  7231.45  6976.547
GEN157    11958.080 11958.080 612.082 2 1840.061550 10656.96 13259.20 11307.520
GEN158     7495.725  7495.725 612.082 2  796.421439  6932.57  8058.88  7214.148
GEN159     9617.035  9617.035 612.082 2  383.739779  9345.69  9888.38  9481.362
GEN160     5817.095  5817.095 612.082 2  760.019582  5279.68  6354.51  5548.388
GEN163     8630.745  8630.745 612.082 2 1815.588585  7346.93  9914.56  7988.837
GEN164     7926.745  7926.745 612.082 2  480.570982  7586.93  8266.56  7756.837
GEN165     7720.935  7720.935 612.082 2 2213.901834  6155.47  9286.40  6938.202
GEN166     6136.410  6136.410 612.082 2  521.462967  5767.68  6505.14  5952.045
GEN167     7102.135  7102.135 612.082 2 1867.518507  5781.60  8422.67  6441.868
GEN168     8436.615  8436.615 612.082 2  737.010327  7915.47  8957.76  8176.042
GEN170     6037.120  6037.120 612.082 2   70.144993  5987.52  6086.72  6012.320
GEN172     8077.760  8077.760 612.082 2 2605.773341  6235.20  9920.32  7156.480
GEN176     8948.160  8948.160 612.082 2 2447.381423  7217.60 10678.72  8082.880
GEN185     4457.600  4457.600 612.082 2  255.237264  4277.12  4638.08  4367.360
GEN187     5451.465  5451.465 612.082 2  486.355115  5107.56  5795.37  5279.512
GEN188     8579.465  8579.465 612.082 2  364.294343  8321.87  8837.06  8450.667
GEN190    11442.775 11442.775 612.082 2  802.502557 10875.32 12010.23 11159.048
GEN191    12732.800 12732.800 612.082 2   70.894526 12682.67 12782.93 12707.735
GEN192    10607.470 10607.470 612.082 2  221.974961 10450.51 10764.43 10528.990
GEN193     7617.335  7617.335 612.082 2  418.289016  7321.56  7913.11  7469.448
GEN197     7747.945  7747.945 612.082 2  368.395562  7487.45  8008.44  7617.697
GEN204     5544.535  5544.535 612.082 2  473.669620  5209.60  5879.47  5377.068
GEN206    13892.800 13892.800 612.082 2  156.129177 13782.40 14003.20 13837.600
GEN207     8861.335  8861.335 612.082 2  144.073007  8759.46  8963.21  8810.397
GEN209     6658.135  6658.135 612.082 2    6.625591  6653.45  6662.82  6655.792
GEN210     9545.865  9545.865 612.082 2    5.239661  9542.16  9549.57  9544.013
GEN213    10201.070 10201.070 612.082 2  738.756881  9678.69 10723.45  9939.880
GEN215     3616.535  3616.535 612.082 2   69.388388  3567.47  3665.60  3592.003
GEN216     7384.265  7384.265 612.082 2  216.141330  7231.43  7537.10  7307.847
GEN218     7709.065  7709.065 612.082 2  705.600644  7210.13  8208.00  7459.597
GEN219     8996.335  8996.335 612.082 2   28.121637  8976.45  9016.22  8986.392
GEN220     4865.600  4865.600 612.082 2 1226.406001  3998.40  5732.80  4432.000
GEN222     5509.865  5509.865 612.082 2 1496.428868  4451.73  6568.00  4980.797
GEN223     5946.560  5946.560 612.082 2  389.375420  5671.23  6221.89  5808.895
GEN226     5232.000  5232.000 612.082 2  132.752227  5138.13  5325.87  5185.065
GEN227     7219.945  7219.945 612.082 2   16.270527  7208.44  7231.45  7214.193
GEN228     7104.400  7104.400 612.082 2  183.777052  6974.45  7234.35  7039.425
GEN229     9761.065  9761.065 612.082 2  127.484282  9670.92  9851.21  9715.993
GEN230    13015.465 13015.465 612.082 2  912.641509 12370.13 13660.80 12692.798
GEN231     7849.200  7849.200 612.082 2  303.051824  7634.91  8063.49  7742.055
GEN234    12910.130 12910.130 612.082 2  390.322943 12634.13 13186.13 12772.130
GEN236     6517.600  6517.600 612.082 2 1553.372177  5419.20  7616.00  5968.400
GEN238     6675.520  6675.520 612.082 2  453.623142  6354.76  6996.28  6515.140
GEN239    11083.575 11083.575 612.082 2  151.214785 10976.65 11190.50 11030.112
GEN240     7869.760  7869.760 612.082 2   33.120882  7846.34  7893.18  7858.050
GEN242     5060.960  5060.960 612.082 2   95.841253  4993.19  5128.73  5027.075
GEN243     7028.800  7028.800 612.082 2  295.330218  6819.97  7237.63  6924.385
GEN247     8131.520  8131.520 612.082 2  143.005275  8030.40  8232.64  8080.960
GEN249    13190.615 13190.615 612.082 2  303.030611 12976.34 13404.89 13083.478
GEN250     8442.135  8442.135 612.082 2  163.603296  8326.45  8557.82  8384.292
GEN251    10113.280 10113.280 612.082 2  180.934483  9985.34 10241.22 10049.310
GEN258     6153.335  6153.335 612.082 2  237.750513  5985.22  6321.45  6069.278
GEN261     7956.960  7956.960 612.082 2  427.997593  7654.32  8259.60  7805.640
GEN262     8182.880  8182.880 612.082 2  708.464426  7681.92  8683.84  7932.400
GEN263     8540.845  8540.845 612.082 2  308.121780  8322.97  8758.72  8431.907
GEN264     5778.745  5778.745 612.082 2   91.153135  5714.29  5843.20  5746.517
GEN266    10747.625 10747.625 612.082 2  133.183562 10653.45 10841.80 10700.538
GEN267     6275.200  6275.200 612.082 2  224.011428  6116.80  6433.60  6196.000
GEN268     8059.155  8059.155 612.082 2   92.383501  7993.83  8124.48  8026.492
GEN269    12059.415 12059.415 612.082 2  533.801980 11681.96 12436.87 11870.688
GEN270     5355.015  5355.015 612.082 2 1040.147004  4619.52  6090.51  4987.267
IR64-Sub1  6938.880  6938.880 612.082 2  217.223203  6785.28  7092.48  6862.080
IR81896    4346.400  4346.400 612.082 2   59.962655  4304.00  4388.80  4325.200
IR84984    4519.040  4519.040 612.082 2   90.283394  4455.20  4582.88  4487.120
MR219      5959.815  5959.815 612.082 2 1008.539331  5246.67  6672.96  5603.243
NMR152     9756.000  9756.000 612.082 2  406.162135  9468.80 10043.20  9612.400
UKM5       9625.600  9625.600 612.082 2  295.061518  9416.96  9834.24  9521.280
                Q50       Q75
GEN112     8488.215  8625.552
GEN113     9747.735 10236.267
GEN114    12163.520 12181.440
GEN116     7005.865  7303.657
GEN117     5005.280  5100.560
GEN118     8381.440  8825.760
GEN122     8361.865  8535.472
GEN124    11155.255 12086.963
GEN125     5294.080  5347.350
GEN127     5521.580  5609.155
GEN128    11061.705 11473.612
GEN129     4535.305  4694.693
GEN132     8509.120  9122.560
GEN133     8645.200  8835.200
GEN134     6682.400  6974.800
GEN138     5219.095  5612.908
GEN140     7582.200  7777.500
GEN146     5686.775  5859.112
GEN147     7565.385  7703.812
GEN148     5781.920  6081.360
GEN150     8657.070  9406.670
GEN152     7073.065  7131.732
GEN154    11101.120 11210.560
GEN155     7061.515  7146.483
GEN157    11958.080 12608.640
GEN158     7495.725  7777.302
GEN159     9617.035  9752.708
GEN160     5817.095  6085.802
GEN163     8630.745  9272.653
GEN164     7926.745  8096.653
GEN165     7720.935  8503.667
GEN166     6136.410  6320.775
GEN167     7102.135  7762.403
GEN168     8436.615  8697.188
GEN170     6037.120  6061.920
GEN172     8077.760  8999.040
GEN176     8948.160  9813.440
GEN185     4457.600  4547.840
GEN187     5451.465  5623.418
GEN188     8579.465  8708.263
GEN190    11442.775 11726.503
GEN191    12732.800 12757.865
GEN192    10607.470 10685.950
GEN193     7617.335  7765.222
GEN197     7747.945  7878.193
GEN204     5544.535  5712.003
GEN206    13892.800 13948.000
GEN207     8861.335  8912.272
GEN209     6658.135  6660.477
GEN210     9545.865  9547.717
GEN213    10201.070 10462.260
GEN215     3616.535  3641.067
GEN216     7384.265  7460.683
GEN218     7709.065  7958.533
GEN219     8996.335  9006.278
GEN220     4865.600  5299.200
GEN222     5509.865  6038.932
GEN223     5946.560  6084.225
GEN226     5232.000  5278.935
GEN227     7219.945  7225.697
GEN228     7104.400  7169.375
GEN229     9761.065  9806.137
GEN230    13015.465 13338.132
GEN231     7849.200  7956.345
GEN234    12910.130 13048.130
GEN236     6517.600  7066.800
GEN238     6675.520  6835.900
GEN239    11083.575 11137.038
GEN240     7869.760  7881.470
GEN242     5060.960  5094.845
GEN243     7028.800  7133.215
GEN247     8131.520  8182.080
GEN249    13190.615 13297.752
GEN250     8442.135  8499.978
GEN251    10113.280 10177.250
GEN258     6153.335  6237.392
GEN261     7956.960  8108.280
GEN262     8182.880  8433.360
GEN263     8540.845  8649.782
GEN264     5778.745  5810.972
GEN266    10747.625 10794.712
GEN267     6275.200  6354.400
GEN268     8059.155  8091.817
GEN269    12059.415 12248.142
GEN270     5355.015  5722.762
IR64-Sub1  6938.880  7015.680
IR81896    4346.400  4367.600
IR84984    4519.040  4550.960
MR219      5959.815  6316.388
NMR152     9756.000  9899.600
UKM5       9625.600  9729.920

Extract the Estimation Method Such as REML, VC and ML

AL2$method
[1] "Variances component model"

Extract the Design Parameters

AL2$parameters
        test   name.t treatments blockSize blocks r alpha
  PBIB-tukey Genotype         91         7     13 2  0.05

Extract the Variance Covariance Matrix

VCM<-AL2$vartau

Extract Groups

AL2$groups
             GY.adj           groups
GEN206    13892.800                a
GEN249    13190.615               ab
GEN230    13015.465              abc
GEN234    12910.130             abcd
GEN191    12732.800            abcde
GEN114    12163.520           abcdef
GEN269    12059.415          abcdefg
GEN157    11958.080         abcdefgh
GEN190    11442.775        abcdefghi
GEN124    11155.255       abcdefghij
GEN154    11101.120      abcdefghijk
GEN239    11083.575      abcdefghijk
GEN128    11061.705      abcdefghijk
GEN266    10747.625     abcdefghijkl
GEN192    10607.470    abcdefghijklm
GEN213    10201.070   abcdefghijklmn
GEN251    10113.280   abcdefghijklmn
GEN229     9761.065  abcdefghijklmno
NMR152     9756.000  abcdefghijklmno
GEN113     9747.735  abcdefghijklmno
UKM5       9625.600  abcdefghijklmno
GEN159     9617.035  abcdefghijklmno
GEN210     9545.865  abcdefghijklmno
GEN219     8996.335 abcdefghijklmnop
GEN176     8948.160 abcdefghijklmnop
GEN207     8861.335 abcdefghijklmnop
GEN150     8657.070 abcdefghijklmnop
GEN133     8645.200 abcdefghijklmnop
GEN163     8630.745 abcdefghijklmnop
GEN188     8579.465 abcdefghijklmnop
GEN263     8540.845 abcdefghijklmnop
GEN132     8509.120 abcdefghijklmnop
GEN112     8488.215 abcdefghijklmnop
GEN250     8442.135  bcdefghijklmnop
GEN168     8436.615  bcdefghijklmnop
GEN118     8381.440  bcdefghijklmnop
GEN122     8361.865  bcdefghijklmnop
GEN262     8182.880  bcdefghijklmnop
GEN247     8131.520  bcdefghijklmnop
GEN172     8077.760  bcdefghijklmnop
GEN268     8059.155  bcdefghijklmnop
GEN261     7956.960  bcdefghijklmnop
GEN164     7926.745  bcdefghijklmnop
GEN240     7869.760  bcdefghijklmnop
GEN231     7849.200  bcdefghijklmnop
GEN197     7747.945  bcdefghijklmnop
GEN165     7720.935   cdefghijklmnop
GEN218     7709.065   cdefghijklmnop
GEN193     7617.335   cdefghijklmnop
GEN140     7582.200   cdefghijklmnop
GEN147     7565.385    defghijklmnop
GEN158     7495.725    defghijklmnop
GEN216     7384.265     efghijklmnop
GEN227     7219.945      fghijklmnop
GEN228     7104.400      fghijklmnop
GEN167     7102.135      fghijklmnop
GEN152     7073.065      fghijklmnop
GEN155     7061.515      fghijklmnop
GEN243     7028.800      fghijklmnop
GEN116     7005.865      fghijklmnop
IR64-Sub1  6938.880      fghijklmnop
GEN134     6682.400       ghijklmnop
GEN238     6675.520       ghijklmnop
GEN209     6658.135       ghijklmnop
GEN236     6517.600        hijklmnop
GEN267     6275.200         ijklmnop
GEN258     6153.335         ijklmnop
GEN166     6136.410         ijklmnop
GEN170     6037.120         ijklmnop
MR219      5959.815          jklmnop
GEN223     5946.560          jklmnop
GEN160     5817.095          jklmnop
GEN148     5781.920          jklmnop
GEN264     5778.745          jklmnop
GEN146     5686.775           klmnop
GEN204     5544.535            lmnop
GEN127     5521.580            lmnop
GEN222     5509.865            lmnop
GEN187     5451.465            lmnop
GEN270     5355.015            lmnop
GEN125     5294.080             mnop
GEN226     5232.000             mnop
GEN138     5219.095             mnop
GEN242     5060.960              nop
GEN117     5005.280              nop
GEN220     4865.600              nop
GEN129     4535.305               op
IR84984    4519.040               op
GEN185     4457.600               op
IR81896    4346.400               op
GEN215     3616.535                p

Balanced Lattice Square Design

The lattice designs are one of the incomplete block designs. There are several types of lattice design including balanced and partially balanced lattice design. In the partially complete lattice design, the number of units in each block is the square root of the number of treatments. These incomplete blocks are combined into groups that form seperate, complete replication of all treatments.

Analysis

Assume that the design is for k^2 treatments with k+1 replicates. There are k blocks of k units each in each replication. The lines model which describes an observation from a balanced lattice design and on which the analysis is based is given as shown below.

knitr::include_graphics("lattice.png")

The Analysis of Variance

An analysis of variance table for balanced lattice design and the sum of squares in the analysis tables are computed using the following summary of the notation and equations.

knitr::include_graphics("anova.png")

Practical Example

An example of a balanced lattice design with nine treatments. (Number of treatments (t)= 9, block size(k)=3, number of replications (r)=4, and the number of blocks (b)=12).

knitr::include_graphics("lattice data.png")

Data Entry

### Create the replicate factor
rep <-rep(1:4, each = 9)
### Create a block vector
block <- rep(1:12, each = 3)
### Create treatment vector
trt <- c(1,2,3,4,5,6,7,8,9,
         1,4,7,2,5,8,3,6,9,
         1,5,9,2,6,7,3,4,8,
         1,6,8,2,4,9,3,5,7)

### Create the Response Variable
yield <- c(2.20,1.82,2.18,2.05,0.85,1.86,0.73,1.60,1.76,
           1.19,1.20,1.15,2.26,1.07,1.45,2.12,2.03,1.63,
           1.81,1.16,1.11,1.76,2.16,1.80,1.71,1.57,1.13,
           1.77,1.57,1.43,1.50,1.60,1.42,2.04,0.93,1.78)
bld <-data.frame(rep,block,trt,yield)
head(bld,10)
   rep block trt yield
1    1     1   1  2.20
2    1     1   2  1.82
3    1     1   3  2.18
4    1     2   4  2.05
5    1     2   5  0.85
6    1     2   6  1.86
7    1     3   7  0.73
8    1     3   8  1.60
9    1     3   9  1.76
10   2     4   1  1.19

Pass in factors

bld$rep <- as.factor(bld$rep)
bld$block <- as.factor(bld$block)
bld$trt <- as.factor(bld$trt)
head(bld,10)
   rep block trt yield
1    1     1   1  2.20
2    1     1   2  1.82
3    1     1   3  2.18
4    1     2   4  2.05
5    1     2   5  0.85
6    1     2   6  1.86
7    1     3   7  0.73
8    1     3   8  1.60
9    1     3   9  1.76
10   2     4   1  1.19

Specify the value of k and estimate the Lattice Design

model <- PBIB.test(block,trt,rep,yield, k=3, method = "VC")

<<< to see the objects: means, comparison and groups. >>>
model
$ANOVA
Analysis of Variance Table

Response: yield
          Df Sum Sq Mean Sq F value   Pr(>F)   
rep        3 0.0742 0.02475  0.3191 0.811442   
trt.unadj  8 3.2164 0.40205  5.1837 0.002549 **
block/rep  8 1.4199 0.17749  2.2883 0.075551 . 
Residual  16 1.2410 0.07756                    
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

$method
[1] "Variances component model"

$parameters
      test name.t treatments blockSize blocks r alpha
  PBIB-lsd    trt          9         3      3 4  0.05

$statistics
  Efficiency     Mean       CV
        0.75 1.594444 17.46672

$model

Call:
lm(formula = modelo)

Coefficients:
             (Intercept)              replication2              replication3  
                 2.09741                  -0.06593                  -0.51111  
            replication4                  trt.adj2                  trt.adj3  
                -0.06926                  -0.16889                   0.07667  
                trt.adj4                  trt.adj5                  trt.adj6  
                -0.03222                  -0.96222                  -0.05444  
                trt.adj7                  trt.adj8                  trt.adj9  
                -0.44889                  -0.39333                  -0.33667  
 replication1:block.adj2   replication2:block.adj2   replication3:block.adj2  
                -0.16111                        NA                        NA  
 replication4:block.adj2   replication1:block.adj3   replication2:block.adj3  
                      NA                  -0.34111                        NA  
 replication3:block.adj3   replication4:block.adj3   replication1:block.adj4  
                      NA                        NA                        NA  
 replication2:block.adj4   replication3:block.adj4   replication4:block.adj4  
                -0.69111                        NA                        NA  
 replication1:block.adj5   replication2:block.adj5   replication3:block.adj5  
                      NA                   0.07000                        NA  
 replication4:block.adj5   replication1:block.adj6   replication2:block.adj6  
                      NA                        NA                        NA  
 replication3:block.adj6   replication4:block.adj6   replication1:block.adj7  
                      NA                        NA                        NA  
 replication2:block.adj7   replication3:block.adj7   replication4:block.adj7  
                      NA                   0.20667                        NA  
 replication1:block.adj8   replication2:block.adj8   replication3:block.adj8  
                      NA                        NA                   0.54444  
 replication4:block.adj8   replication1:block.adj9   replication2:block.adj9  
                      NA                        NA                        NA  
 replication3:block.adj9   replication4:block.adj9  replication1:block.adj10  
                      NA                        NA                        NA  
replication2:block.adj10  replication3:block.adj10  replication4:block.adj10  
                      NA                        NA                  -0.28889  
replication1:block.adj11  replication2:block.adj11  replication3:block.adj11  
                      NA                        NA                        NA  
replication4:block.adj11  replication1:block.adj12  replication2:block.adj12  
                -0.34222                        NA                        NA  
replication3:block.adj12  replication4:block.adj12  
                      NA                        NA  


$Fstat
    Fit Statistics
AIC       22.92906
BIC       56.18296

$comparison
       Difference    stderr pvalue
1 - 2  0.05466280 0.2146116 0.8022
1 - 3 -0.16115270 0.2146116 0.4636
1 - 4  0.07822827 0.2146116 0.7202
1 - 5  0.86511184 0.2146116 0.0010
1 - 6 -0.04035957 0.2146116 0.8532
1 - 7  0.41769218 0.2146116 0.0694
1 - 8  0.37002684 0.2146116 0.1040
1 - 9  0.30425608 0.2146116 0.1754
2 - 3 -0.21581550 0.2146116 0.3296
2 - 4  0.02356547 0.2146116 0.9140
2 - 5  0.81044904 0.2146116 0.0016
2 - 6 -0.09502237 0.2146116 0.6638
2 - 7  0.36302938 0.2146116 0.1102
2 - 8  0.31536404 0.2146116 0.1610
2 - 9  0.24959328 0.2146116 0.2618
3 - 4  0.23938097 0.2146116 0.2812
3 - 5  1.02626454 0.2146116 0.0002
3 - 6  0.12079313 0.2146116 0.5814
3 - 7  0.57884488 0.2146116 0.0158
3 - 8  0.53117954 0.2146116 0.0248
3 - 9  0.46540878 0.2146116 0.0456
4 - 5  0.78688357 0.2146116 0.0020
4 - 6 -0.11858784 0.2146116 0.5882
4 - 7  0.33946391 0.2146116 0.1332
4 - 8  0.29179857 0.2146116 0.1928
4 - 9  0.22602781 0.2146116 0.3078
5 - 6 -0.90547141 0.2146116 0.0006
5 - 7 -0.44741966 0.2146116 0.0534
5 - 8 -0.49508500 0.2146116 0.0348
5 - 9 -0.56085576 0.2146116 0.0188
6 - 7  0.45805175 0.2146116 0.0486
6 - 8  0.41038641 0.2146116 0.0740
6 - 9  0.34461564 0.2146116 0.1278
7 - 8 -0.04766534 0.2146116 0.8270
7 - 9 -0.11343610 0.2146116 0.6044
8 - 9 -0.06577077 0.2146116 0.7632

$means
   yield yield.adj        SE r       std  Min  Max    Q25   Q50    Q75
1 1.7425 1.8042740 0.1556986 4 0.4162832 1.19 2.20 1.6250 1.790 1.9075
2 1.8350 1.7496112 0.1556986 4 0.3155419 1.50 2.26 1.6950 1.790 1.9300
3 2.0125 1.9654267 0.1556986 4 0.2096624 1.71 2.18 1.9575 2.080 2.1350
4 1.6050 1.7260457 0.1556986 4 0.3479943 1.20 2.05 1.4775 1.585 1.7125
5 1.0025 0.9391621 0.1556986 4 0.1388944 0.85 1.16 0.9100 1.000 1.0925
6 1.9050 1.8446335 0.1556986 4 0.2548856 1.57 2.16 1.7875 1.945 2.0625
7 1.3650 1.3865818 0.1556986 4 0.5199038 0.73 1.80 1.0450 1.465 1.7850
8 1.4025 1.4342471 0.1556986 4 0.1968714 1.13 1.60 1.3550 1.440 1.4875
9 1.4800 1.5000179 0.1556986 4 0.2836665 1.11 1.76 1.3425 1.525 1.6625

$groups
  yield.adj groups
3 1.9654267      a
6 1.8446335     ab
1 1.8042740    abc
2 1.7496112    abc
4 1.7260457    abc
9 1.5000179     bc
8 1.4342471     bc
7 1.3865818     cd
5 0.9391621      d

$vartau
             [,1]        [,2]        [,3]        [,4]        [,5]        [,6]
 [1,] 0.024242040 0.001212969 0.001212969 0.001212969 0.001212969 0.001212969
 [2,] 0.001212969 0.024242040 0.001212969 0.001212969 0.001212969 0.001212969
 [3,] 0.001212969 0.001212969 0.024242040 0.001212969 0.001212969 0.001212969
 [4,] 0.001212969 0.001212969 0.001212969 0.024242040 0.001212969 0.001212969
 [5,] 0.001212969 0.001212969 0.001212969 0.001212969 0.024242040 0.001212969
 [6,] 0.001212969 0.001212969 0.001212969 0.001212969 0.001212969 0.024242040
 [7,] 0.001212969 0.001212969 0.001212969 0.001212969 0.001212969 0.001212969
 [8,] 0.001212969 0.001212969 0.001212969 0.001212969 0.001212969 0.001212969
 [9,] 0.001212969 0.001212969 0.001212969 0.001212969 0.001212969 0.001212969
             [,7]        [,8]        [,9]
 [1,] 0.001212969 0.001212969 0.001212969
 [2,] 0.001212969 0.001212969 0.001212969
 [3,] 0.001212969 0.001212969 0.001212969
 [4,] 0.001212969 0.001212969 0.001212969
 [5,] 0.001212969 0.001212969 0.001212969
 [6,] 0.001212969 0.001212969 0.001212969
 [7,] 0.024242040 0.001212969 0.001212969
 [8,] 0.001212969 0.024242040 0.001212969
 [9,] 0.001212969 0.001212969 0.024242040

attr(,"class")
[1] "group"