Question 3.7c

Test Hypothesis

Ho: \(\mu_1 = \mu_2 = \mu_3 = \mu_4\) - Null Hypothesis

Ha: At least 1 differs - Alternative Hypothesis

Using Fisher LSD method

t1<-data.frame("ten_strength"=c(3129,3000,2865,2890))
t2<-data.frame("ten_strength"=c(3200,3300,2975,3150))
t3<-data.frame("ten_strength"=c(2800,2900,2985,3050))
t4<-data.frame("ten_strength"=c(2600,2700,2600,2765))

Mixgroup<-factor(rep(c("mixtr1", "mixtr2", "mixtr3", "mixtr4"), each = 4))

TensileS<-rbind(t1,t2,t3,t4)

TensileS$Mixgroup<-Mixgroup
#str(TensileS)

# First find the analysis of variance model of the experiment
TensileS_model<-aov(ten_strength~Mixgroup, data = TensileS)
#summary(TensileS_model)

# Using Fisher LSD method to make comparisons between the pairs of means
library(agricolae)
fishertest<-LSD.test(TensileS_model, "Mixgroup", p.adj="bonferroni", )
fishertest
## $statistics
##    MSerror Df     Mean       CV  t.value      MSD
##   12825.69 12 2931.812 3.862817 3.152681 252.4675
## 
## $parameters
##         test  p.ajusted   name.t ntr alpha
##   Fisher-LSD bonferroni Mixgroup   4  0.05
## 
## $means
##        ten_strength       std r      LCL      UCL  Min  Max     Q25    Q50
## mixtr1      2971.00 120.55704 4 2847.624 3094.376 2865 3129 2883.75 2945.0
## mixtr2      3156.25 135.97641 4 3032.874 3279.626 2975 3300 3106.25 3175.0
## mixtr3      2933.75 108.27242 4 2810.374 3057.126 2800 3050 2875.00 2942.5
## mixtr4      2666.25  80.97067 4 2542.874 2789.626 2600 2765 2600.00 2650.0
##            Q75
## mixtr1 3032.25
## mixtr2 3225.00
## mixtr3 3001.25
## mixtr4 2716.25
## 
## $comparison
## NULL
## 
## $groups
##        ten_strength groups
## mixtr2      3156.25      a
## mixtr1      2971.00      a
## mixtr3      2933.75      a
## mixtr4      2666.25      b
## 
## attr(,"class")
## [1] "group"

From the result we can tell what pair of means are significantly different and which ones aren’t. However we could plot the model for better visualization

plot(fishertest)

Conclusion

From the plot above treatments with same letter are not significantly different but we can see a clear deviation of mixture treatment 4 which represents \(\mu4\). Hence mixture treatment 4 significantly differs from other mixture treatments in the experiment.

Q3.7d

Normality test

plot(TensileS_model, 2)

Conclusion

In conclusion the validity of the normality assumption of the model adequacy is good as there seems to be nothing unusual after reviewing the normal probability plots of the residuals.

Q3.7e

#plot(TensileS_model$fitted.values, TensileS_model$residuals, xlab = "Predicted Tensile Strength", ylab = "Residuals", main = "Residuals vs Tensile strength")
plot(TensileS_model, 1)

Conclusion

From the plot, the data appears to be evenly spread across the treatment groups. However to accurately test variability we might want to do a levene’s test. Especially if we have alot of observations in the data.

Q3.7f

Scatterplot

#library(dplyr)
#group_by(TensileS, Mixgroup) %>% summarise(count=n(), mean=mean(ten_strength, na.rm=TRUE), sd=sd(ten_strength, na.rm=TRUE))
#install.packages("ggpubr")
library("ggpubr")
## Loading required package: ggplot2
ggline(TensileS, y="ten_strength", x="Mixgroup", add = c("mean_se", "jitter"), order = c("mixtr1", "mixtr2", "mixtr3", "mixtr4", col="red","blue"))


Question 3.10b

Test Hypothesis

Ho: \(\mu_1 = \mu_2 = \mu_3 = \mu_4 = \mu_5\) - Null Hypothesis

Ha: At least 1 differs - Alternative Hypothesis

w1<-data.frame("cottonstrength"=c(7,7,15,11,9))
w2<-data.frame("cottonstrength"=c(12,17,12,18,18))
w3<-data.frame("cottonstrength"=c(14,19,19,18,18))
w4<-data.frame("cottonstrength"=c(19,25,22,19,23))
w5<-data.frame("cottonstrength"=c(7,10,11,15,11))
  
cottonlv<-factor(rep(c("lv15%","lv20%","lv25%","lv30%","lv35%"), each=5))

wstrength<-rbind(w1,w2,w3,w4,w5)

wstrength$cottonlv<-cottonlv
#str(wstrength)

wstrength_model<-aov(cottonstrength~cottonlv, data=wstrength)
#summary(wstrength_model)

FishT<-LSD.test(wstrength_model, "cottonlv", p.adj="bonferroni", )
FishT
## $statistics
##   MSerror Df  Mean       CV  t.value      MSD
##      8.06 20 15.04 18.87642 3.153401 5.662089
## 
## $parameters
##         test  p.ajusted   name.t ntr alpha
##   Fisher-LSD bonferroni cottonlv   5  0.05
## 
## $means
##       cottonstrength      std r       LCL      UCL Min Max Q25 Q50 Q75
## lv15%            9.8 3.346640 5  7.151566 12.44843   7  15   7   9  11
## lv20%           15.4 3.130495 5 12.751566 18.04843  12  18  12  17  18
## lv25%           17.6 2.073644 5 14.951566 20.24843  14  19  18  18  19
## lv30%           21.6 2.607681 5 18.951566 24.24843  19  25  19  22  23
## lv35%           10.8 2.863564 5  8.151566 13.44843   7  15  10  11  11
## 
## $comparison
## NULL
## 
## $groups
##       cottonstrength groups
## lv30%           21.6      a
## lv25%           17.6     ab
## lv20%           15.4     bc
## lv35%           10.8      c
## lv15%            9.8      c
## 
## attr(,"class")
## [1] "group"

From the result we can tell what pair of means are significantly different and which ones aren’t. However we could plot the model for better visualization

plot(FishT)

Conclusion

From the plot treatments with same letter are not significantly different, however its clear that there is a significant difference between cotton weight percents at lv 30%, lv 25% and lv 20%. Only cotton weight percents at lv 35% and lv 15 % that is \(\mu4\) and \(\mu5\) indicate that there is no significant difference in the experiment.

Q3.10c

plot(wstrength_model)

Conclusion

From the residual plots, the normal probability plot appears to be normal with a little bit of skewness to the left with presence of outliers. However the residuals vs fitted plot shows that the observations are evenly distributed. Other than that, the model seems fairly adequate.


Question 3.44

Ho: \(\mu_1 = \mu_2 = \mu_3 = \mu_4\) - Null Hypothesis

Ha: At least 1 differs - Alternative Hypothesis

library(pwr)
x=c(50,60,50,60)
#mean(x)
#The difference in grandmean and treatment means is 5

pwr.anova.test(k=4,n=NULL,f=sqrt((5)^2/25),sig.level=0.05,power=.9)
## 
##      Balanced one-way analysis of variance power calculation 
## 
##               k = 4
##               n = 4.658119
##               f = 1
##       sig.level = 0.05
##           power = 0.9
## 
## NOTE: n is number in each group

We would require 5 sample size each so that we reject the null hypothesis Ho at a power of at least .90


Question 3.45a

Ho: \(\mu_1 = \mu_2 = \mu_3 = \mu_4\) - Null Hypothesis

Ha: At least 1 differs - Alternative Hypothesis

\(\sigma^2\) = 36

pwr.anova.test(k=4,n=NULL,f=sqrt((5)^2/36),sig.level=0.05,power=.9)
## 
##      Balanced one-way analysis of variance power calculation 
## 
##               k = 4
##               n = 6.180857
##               f = 0.8333333
##       sig.level = 0.05
##           power = 0.9
## 
## NOTE: n is number in each group

For a change with a reasonable estimate of the experimental error variance \(\sigma^2\) = 36, We would require 7 sample size each so that we reject the null hypothesis Ho at a power of at least .90

Question 3.45b

\(\sigma^2\) = 49

pwr.anova.test(k=4,n=NULL,f=sqrt((5)^2/49),sig.level=0.05,power=.9)
## 
##      Balanced one-way analysis of variance power calculation 
## 
##               k = 4
##               n = 7.998751
##               f = 0.7142857
##       sig.level = 0.05
##           power = 0.9
## 
## NOTE: n is number in each group

For a change with a reasonable estimate of the experimental error variance \(\sigma^2\) = 49, We would require 8 sample size each so that we reject the null hypothesis Ho at a power of at least .90

Question 3.45c

The estimate of variance has a positive correlation with the sample size if we were to maintain the same power in the experiment. As we increased the estimates of variance \(\sigma^2\) in the power test, there was an increase in the sample size n to retain power of at least .90

Question 3.45d

In practice, the general approach of choosing n the sample size depends on the effects the experimental wants to detect. If the experimental is interested in detecting a small effects, then a large sample size is required. If its a large effects then we need a small sample size.


#Question 3.7c
#Test Hypothesis

#**H~o~**: $\mu_1 =  \mu_2 =  \mu_3 =  \mu_4$ - Null Hypothesis
#**H~a~**: *At least 1 differs* - Alternative Hypothesis

#**Using Fisher LSD method**

t1<-data.frame("ten_strength"=c(3129,3000,2865,2890))
t2<-data.frame("ten_strength"=c(3200,3300,2975,3150))
t3<-data.frame("ten_strength"=c(2800,2900,2985,3050))
t4<-data.frame("ten_strength"=c(2600,2700,2600,2765))

Mixgroup<-factor(rep(c("mixtr1", "mixtr2", "mixtr3", "mixtr4"), each = 4))
TensileS<-rbind(t1,t2,t3,t4)
TensileS$Mixgroup<-Mixgroup
#str(TensileS)

# First find the analysis of variance model of the experiment
TensileS_model<-aov(ten_strength~Mixgroup, data = TensileS)
#summary(TensileS_model)

# Using Fisher LSD method to make comparisons between the pairs of means
#library(agricolae)
fishertest<-LSD.test(TensileS_model, "Mixgroup", p.adj="bonferroni", )
fishertest

plot(fishertest)


#**Q3.7d**
#**Normality test**

plot(TensileS_model, 2)

#**Q3.7e**

#plot(TensileS_model$fitted.values, TensileS_model$residuals, xlab = "Predicted Tensile Strength", ylab = "Residuals", main = "Residuals vs Tensile strength")
plot(TensileS_model, 1)

#**Q3.7f**
#**Scatterplot**

#library(dplyr)
#group_by(TensileS, Mixgroup) %>% summarise(count=n(), mean=mean(ten_strength, na.rm=TRUE), sd=sd(ten_strength, na.rm=TRUE))
#install.packages("ggpubr")
#library("ggpubr")

ggline(TensileS, y="ten_strength", x="Mixgroup", add = c("mean_se", "jitter"), order = c("mixtr1", "mixtr2", "mixtr3", "mixtr4", col="red","blue"))


#***
#**Question 3.10b**
#**Test Hypothesis**

#**H~o~**: $\mu_1 =  \mu_2 =  \mu_3 =  \mu_4 =  \mu_5$ - Null Hypothesis
#**H~a~**: *At least 1 differs* - Alternative Hypothesis

w1<-data.frame("cottonstrength"=c(7,7,15,11,9))
w2<-data.frame("cottonstrength"=c(12,17,12,18,18))
w3<-data.frame("cottonstrength"=c(14,19,19,18,18))
w4<-data.frame("cottonstrength"=c(19,25,22,19,23))
w5<-data.frame("cottonstrength"=c(7,10,11,15,11))
  
cottonlv<-factor(rep(c("lv15%","lv20%","lv25%","lv30%","lv35%"), each=5))
wstrength<-rbind(w1,w2,w3,w4,w5)
wstrength$cottonlv<-cottonlv
#str(wstrength)

wstrength_model<-aov(cottonstrength~cottonlv, data=wstrength)
#summary(wstrength_model)

FishT<-LSD.test(wstrength_model, "cottonlv", p.adj="bonferroni", )
FishT

#plot(FishT)

#**Q3.10c**

plot(wstrength_model)


#***
#**Question 3.44**

#**H~o~**: $\mu_1 =  \mu_2 =  \mu_3 =  \mu_4$ - Null Hypothesis
#**H~a~**: *At least 1 differs* - Alternative Hypothesis

#library(pwr)
x=c(50,60,50,60)
#mean(x)
# The difference in grandmean and treatment means is 5

pwr.anova.test(k=4,n=NULL,f=sqrt((5)^2/25),sig.level=0.05,power=.9)

#***
#**Question 3.45a**

#**H~o~**: $\mu_1 =  \mu_2 =  \mu_3 =  \mu_4$ - Null Hypothesis
#**H~a~**: *At least 1 differs* - Alternative Hypothesis
#$\sigma^2$ = 36

pwr.anova.test(k=4,n=NULL,f=sqrt((5)^2/36),sig.level=0.05,power=.9)


#**Question 3.45b**

#$\sigma^2$ = 49

pwr.anova.test(k=4,n=NULL,f=sqrt((5)^2/49),sig.level=0.05,power=.9)