library(pacman)
p_load(readxl, dplyr, tidyverse, plyr, magrittr, agricolae,
ggplot2, tidyr, rsample, forestmangr, broom)
Agraz_complete <- read_excel("C:/Users/pedro/OneDrive/Escritorio/Sustentacion final/Agraz.complete revision 22.11.22.xlsx",
sheet = "Sheet 1", range = "A1:W460")
tail(Agraz_complete)
## # A tibble: 6 × 23
## Ens SDT TIP N TTO P B B2 PL EDAD PSH PSTALLO
## <chr> <dbl> <chr> <chr> <chr> <chr> <dbl> <dbl> <chr> <dbl> <dbl> <dbl>
## 1 Ensayo 2 24 D 1.8N 1.8N/0… 0.5P 3 2 292 26 2.68 2.35
## 2 Ensayo 2 24 D 1.8N 1.8N/0… 0.5P 2 2 185 26 3.72 1.02
## 3 Ensayo 2 24 D 1.8N 1.8N/0… 0.5P 1 2 88 26 4.40 1.70
## 4 Ensayo 2 24 D 1.8N 1.8N/0… 0.5P 3 3 288 26 4.88 3.15
## 5 Ensayo 2 24 D 1.8N 1.8N/0… 0.5P 1 3 89 26 5.71 3.42
## 6 Ensayo 2 24 D 1.8N 1.8N/0… 0.5P 1 3 83 26 7.51 3.26
## # ℹ 11 more variables: PSR <dbl>, PST <dbl>, AF <dbl>, NH <dbl>, BR <dbl>,
## # R1 <dbl>, R2 <dbl>, R3 <dbl>, ALT <dbl>, LONG <dbl>, FCT <dbl>
tasa <- Agraz_complete %>% dplyr::arrange(., Ens, SDT, TTO, PST)
head(tasa)
## # A tibble: 6 × 23
## Ens SDT TIP N TTO P B B2 PL EDAD PSH PSTALLO
## <chr> <dbl> <chr> <chr> <chr> <chr> <dbl> <dbl> <chr> <dbl> <dbl> <dbl>
## 1 Ensayo 1 7 D 0.0N 0.0N/0… 0.0P 2 1 107 14 0.258 0.13
## 2 Ensayo 1 7 D 0.0N 0.0N/0… 0.0P 3 1 208 14 0.301 0.134
## 3 Ensayo 1 7 D 0.0N 0.0N/0… 0.0P 2 2 108 14 0.434 0.193
## 4 Ensayo 1 7 D 0.0N 0.0N/0… 0.0P 1 2 1 14 0.515 0.193
## 5 Ensayo 1 7 D 0.0N 0.0N/0… 0.0P 3 3 201 14 0.528 0.221
## 6 Ensayo 1 7 D 0.0N 0.0N/0… 0.0P 1 3 5 14 0.775 0.305
## # ℹ 11 more variables: PSR <dbl>, PST <dbl>, AF <dbl>, NH <dbl>, BR <dbl>,
## # R1 <dbl>, R2 <dbl>, R3 <dbl>, ALT <dbl>, LONG <dbl>, FCT <dbl>
str(tasa)
## tibble [459 × 23] (S3: tbl_df/tbl/data.frame)
## $ Ens : chr [1:459] "Ensayo 1" "Ensayo 1" "Ensayo 1" "Ensayo 1" ...
## $ SDT : num [1:459] 7 7 7 7 7 7 7 7 7 7 ...
## $ TIP : chr [1:459] "D" "D" "D" "D" ...
## $ N : chr [1:459] "0.0N" "0.0N" "0.0N" "0.0N" ...
## $ TTO : chr [1:459] "0.0N/0.0P" "0.0N/0.0P" "0.0N/0.0P" "0.0N/0.0P" ...
## $ P : chr [1:459] "0.0P" "0.0P" "0.0P" "0.0P" ...
## $ B : num [1:459] 2 3 2 1 3 1 3 2 2 1 ...
## $ B2 : num [1:459] 1 1 2 2 3 3 1 1 2 2 ...
## $ PL : chr [1:459] "107" "208" "108" "1" ...
## $ EDAD : num [1:459] 14 14 14 14 14 14 14 14 14 14 ...
## $ PSH : num [1:459] 0.258 0.301 0.434 0.515 0.528 0.775 0.294 0.389 0.613 0.429 ...
## $ PSTALLO: num [1:459] 0.13 0.134 0.193 0.193 0.221 0.305 0.144 0.254 0.251 0.263 ...
## $ PSR : num [1:459] 0.155 0.185 0.141 0.202 0.33 0.436 0.154 0.192 0.192 0.398 ...
## $ PST : num [1:459] 0.543 0.62 0.768 0.91 1.079 ...
## $ AF : num [1:459] 26.4 50.5 45.8 57.4 29.4 ...
## $ NH : num [1:459] 35 45 63 46 40 92 90 77 84 70 ...
## $ BR : num [1:459] 4 2 2 2 2 3 2 7 4 3 ...
## $ R1 : num [1:459] 1 2 2 2 2 2 2 2 1 1 ...
## $ R2 : num [1:459] 2 0 5 2 1 6 0 7 2 5 ...
## $ R3 : num [1:459] 0 0 0 0 0 0 0 1 6 1 ...
## $ ALT : num [1:459] 15.3 10 12.5 14 13 16 9 13.4 13 14 ...
## $ LONG : num [1:459] 15.5 10.7 12.8 18 12.8 17 18 16.2 16.5 20.2 ...
## $ FCT : num [1:459] 0.828 0.739 0.89 0.826 0.777 ...
library(forestmangr)
lm_table(tasa, model=PSH~SDT, "TTO")
## TTO b0 b1 Rsqr Rsqr_adj Std.Error
## 1 0.0N/0.0P 0.50225162 0.07953886 0.3128179 0.2968369 0.8881808
## 2 0.0N/0.25P 0.33016981 0.08167595 0.3431805 0.3279056 0.8513102
## 3 0.0N/0.5P 0.69660806 0.05647840 0.2220178 0.2039252 0.7965355
## 4 1.1N/0.0P 0.80157129 0.07126032 0.1787273 0.1596280 1.1508741
## 5 1.1N/0.25P 0.37313854 0.13024810 0.3505700 0.3354670 1.3356173
## 6 1.1N/0.5P -0.31349017 0.18486953 0.3701681 0.3555208 1.8168122
## 7 1.8N/0.0P 0.68827906 0.07447584 0.2831401 0.2664690 0.8928196
## 8 1.8N/0.25P 0.15524081 0.14091562 0.2908713 0.2743799 1.6576893
## 9 1.8N/0.5P 0.40790954 0.09616962 0.2218726 0.2037766 1.3568857
## 10 2.5N/0.0P 0.16853704 0.03095741 0.2481069 0.2011135 0.4200455
## 11 2.5N/0.25P 0.04861389 0.05208333 0.4206974 0.3844910 0.4763649
## 12 2.5N/0.5P 0.30774074 0.04237037 0.2919138 0.2476584 0.5143410
# regresiones por grupo anidando con Broom (No se usó)
# No fue posible extraer las pendientes
# Sirve para cuando se quieran estimar los valores predichos
# Fuente: https://drsimonj.svbtle.com/running-a-model-on-separate-groups
tasa.anid <- tasa %>%
filter(., Ens=="Ensayo 2") %>%
nest(., -TTO) %>%
mutate(fit = map(data, ~ lm(PSH ~ SDT, data = .)),
results = map(fit, glance)) %>% unnest(results)
tasa.anid
## # A tibble: 9 × 15
## TTO data fit r.squared adj.r.squared sigma statistic p.value df
## <chr> <list> <lis> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 0.0N/0.0P <tibble> <lm> 0.442 0.420 0.825 19.8 1.55e-4 1
## 2 0.0N/0.2… <tibble> <lm> 0.433 0.410 0.894 19.1 1.91e-4 1
## 3 0.0N/0.5P <tibble> <lm> 0.384 0.360 0.694 15.6 5.61e-4 1
## 4 1.1N/0.0P <tibble> <lm> 0.160 0.127 1.20 4.77 3.85e-2 1
## 5 1.1N/0.2… <tibble> <lm> 0.505 0.486 1.30 25.5 3.24e-5 1
## 6 1.1N/0.5P <tibble> <lm> 0.654 0.641 1.46 47.3 3.28e-7 1
## 7 1.8N/0.0P <tibble> <lm> 0.421 0.398 0.771 18.2 2.49e-4 1
## 8 1.8N/0.2… <tibble> <lm> 0.649 0.634 1.20 46.1 4.06e-7 1
## 9 1.8N/0.5P <tibble> <lm> 0.334 0.307 1.38 12.5 1.60e-3 1
## # ℹ 6 more variables: logLik <dbl>, AIC <dbl>, BIC <dbl>, deviance <dbl>,
## # df.residual <int>, nobs <int>
## Grafico exploratorio
tasa %>%
dplyr::group_by(., Ens, B2, TTO) %>%
lm_table(PSH~SDT) %>% dplyr::select(., Ens, B2, TTO, b1) %>%
ggplot(., aes(x=TTO, y=b1))+
geom_point()+
facet_wrap(~Ens, scales="free_x" )+
theme_bw()+
theme(axis.text = element_text(angle = 90))+
labs(x = "Tratamiento" , y = "Growth rate (g por semana)")
## Estimacion de los modelos lineales y su pendiente ensayo 1
PSH.tasa.E1 <- tasa %>%
filter(., Ens=="Ensayo 1") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(PSH~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
dplyr::mutate(., TTO1=TTO) %>%
separate(., col=TTO1, into=c("N","P"), sep="/")
PSH.tasa.E1
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P 0.040861111 0.7965620 0.0N 0.0P
## 2 2 0.0N/0.0P 0.068655556 0.8956473 0.0N 0.0P
## 3 3 0.0N/0.0P 0.080000000 0.8884603 0.0N 0.0P
## 4 1 0.0N/0.25P 0.041361111 0.6677532 0.0N 0.25P
## 5 2 0.0N/0.25P 0.065027778 0.8379364 0.0N 0.25P
## 6 3 0.0N/0.25P 0.065861111 0.8301753 0.0N 0.25P
## 7 1 0.0N/0.5P 0.035194444 0.8050745 0.0N 0.5P
## 8 2 0.0N/0.5P 0.038694444 0.8009514 0.0N 0.5P
## 9 3 0.0N/0.5P 0.051422222 0.6085400 0.0N 0.5P
## 10 1 1.1N/0.0P 0.032722222 0.4814582 1.1N 0.0P
## 11 2 1.1N/0.0P 0.071583333 0.8041732 1.1N 0.0P
## 12 3 1.1N/0.0P 0.141972222 0.8578195 1.1N 0.0P
## 13 1 1.1N/0.25P 0.071716667 0.8528410 1.1N 0.25P
## 14 2 1.1N/0.25P 0.062888889 0.8768121 1.1N 0.25P
## 15 3 1.1N/0.25P 0.105611111 0.8561131 1.1N 0.25P
## 16 1 1.1N/0.5P 0.053916667 0.8780379 1.1N 0.5P
## 17 2 1.1N/0.5P 0.078611111 0.9610788 1.1N 0.5P
## 18 3 1.1N/0.5P 0.127222222 0.9084963 1.1N 0.5P
## 19 1 1.8N/0.0P 0.027055556 0.6059050 1.8N 0.0P
## 20 2 1.8N/0.0P 0.072638889 0.9659103 1.8N 0.0P
## 21 3 1.8N/0.0P 0.094361111 0.7204203 1.8N 0.0P
## 22 1 1.8N/0.25P 0.029305556 0.4659884 1.8N 0.25P
## 23 2 1.8N/0.25P 0.064694444 0.9073213 1.8N 0.25P
## 24 3 1.8N/0.25P 0.053416667 0.7797068 1.8N 0.25P
## 25 1 1.8N/0.5P 0.039583333 0.9861983 1.8N 0.5P
## 26 2 1.8N/0.5P 0.058944444 0.9307888 1.8N 0.5P
## 27 3 1.8N/0.5P 0.079138889 0.6411745 1.8N 0.5P
## 28 1 2.5N/0.0P 0.001583333 0.0192272 2.5N 0.0P
## 29 2 2.5N/0.0P 0.029805556 0.7944960 2.5N 0.0P
## 30 3 2.5N/0.0P 0.061483333 0.7734411 2.5N 0.0P
## 31 1 2.5N/0.25P 0.027777778 0.3337555 2.5N 0.25P
## 32 2 2.5N/0.25P 0.042361111 0.9575149 2.5N 0.25P
## 33 3 2.5N/0.25P 0.086111111 0.9411973 2.5N 0.25P
## 34 1 2.5N/0.5P 0.008527778 0.6113502 2.5N 0.5P
## 35 2 2.5N/0.5P 0.037472222 0.6591024 2.5N 0.5P
## 36 3 2.5N/0.5P 0.081111111 0.9436401 2.5N 0.5P
## Analisis de varianza entre las pendientes
library(car)
PSH.AOV.E1=aov(b1~N*P, data = PSH.tasa.E1) # modelo AOV | Tipo I -> Balanceado
summary(PSH.AOV.E1) # Resumen modelo AOV
## Df Sum Sq Mean Sq F value Pr(>F)
## N 3 0.008039 0.0026795 3.008 0.0501 .
## P 2 0.000050 0.0000252 0.028 0.9721
## N:P 6 0.001796 0.0002994 0.336 0.9110
## Residuals 24 0.021382 0.0008909
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(PSH.AOV.E1) # Extraccion residuales
res_estan=Residuales/sd(Residuales) # Estandarización de residuales
hist(PSH.tasa.E1$b1) # exploratorio distribucion de la variable
ks.test(res_estan, "pnorm") # Prueba normalidad | >0.05 son normales
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.070306, p-value = 0.9887
## alternative hypothesis: two-sided
shapiro.test(res_estan) # Prueba normalidad | >0.05 son normales
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.9892, p-value = 0.9743
bartlett.test(res_estan~PSH.tasa.E1$N) # Prueba homocedasticidad
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by PSH.tasa.E1$N
## Bartlett's K-squared = 7.1298, df = 3, p-value = 0.06787
bartlett.test(res_estan~PSH.tasa.E1$P) # Prueba homocedasticidad
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by PSH.tasa.E1$P
## Bartlett's K-squared = 2.858, df = 2, p-value = 0.2395
library(agricolae)
HSD.test(PSH.AOV.E1, "N", console=T)
##
## Study: PSH.AOV.E1 ~ "N"
##
## HSD Test for b1
##
## Mean Square Error: 0.0008909236
##
## N, means
##
## b1 std r se Min Max Q25
## 0.0N 0.05411975 0.01612405 9 0.009949448 0.035194444 0.08000000 0.04086111
## 1.1N 0.08291605 0.03535462 9 0.009949448 0.032722222 0.14197222 0.06288889
## 1.8N 0.05768210 0.02282868 9 0.009949448 0.027055556 0.09436111 0.03958333
## 2.5N 0.04180370 0.02953859 9 0.009949448 0.001583333 0.08611111 0.02777778
## Q50 Q75
## 0.0N 0.05142222 0.06586111
## 1.1N 0.07171667 0.10561111
## 1.8N 0.05894444 0.07263889
## 2.5N 0.03747222 0.06148333
##
## Alpha: 0.05 ; DF Error: 24
## Critical Value of Studentized Range: 3.901262
##
## Minimun Significant Difference: 0.0388154
##
## Treatments with the same letter are not significantly different.
##
## b1 groups
## 1.1N 0.08291605 a
## 1.8N 0.05768210 ab
## 0.0N 0.05411975 ab
## 2.5N 0.04180370 b
## Gráfico factor simple Nitrogeno Ensayo 1
GRAF.PSH.E1.N <- PSH.tasa.E1 %>%
dplyr::select(., -B2) %>%
dplyr::group_by(., N) %>%
dplyr::summarise( n=n(), media=mean(b1), sd=sd(b1),
R2=mean(Rsqr)) %>%
dplyr::mutate( se=sd/sqrt(n)) %>%
dplyr::mutate( ic=se * qt((1-0.05)/2 + .5, n-1)) %>%
dplyr::mutate(., Tukey=c("ab","a","ab", "b"))
GRAF.PSH.E1.N
## # A tibble: 4 × 8
## N n media sd R2 se ic Tukey
## <chr> <int> <dbl> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 0.0N 9 0.0541 0.0161 0.792 0.00537 0.0124 ab
## 2 1.1N 9 0.0829 0.0354 0.831 0.0118 0.0272 a
## 3 1.8N 9 0.0577 0.0228 0.778 0.00761 0.0175 ab
## 4 2.5N 9 0.0418 0.0295 0.670 0.00985 0.0227 b
ggplot(GRAF.PSH.E1.N, aes(x=N, y=media))+
geom_point(size=3, position=position_dodge(.3))+
geom_errorbar(ymin=GRAF.PSH.E1.N$media-GRAF.PSH.E1.N$se,
ymax=GRAF.PSH.E1.N$media+GRAF.PSH.E1.N$se,
alpha=0.7, width=.3, color="darkgray",
position=position_dodge(.3) )+
geom_text(aes(label=Tukey, x=N, y=media+se+0.01 ),
color="red", size=4)+
geom_text(aes(label=round(R2, digits=2), x=N, y=media-se-0.01 ),
color="blue", size=3)+
labs(x = "Tratamiento",y = "Tasa de crecimiento de hojas \n(g por semana)")+
theme_classic()+
ylim(0, 0.15)
## Estimacion de los modelos lineales y su pendiente ensayo 2
PSH.tasa.E2 <- tasa %>%
filter(., Ens=="Ensayo 2") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(PSH~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
mutate(., TTO1=TTO) %>%
separate(., col=TTO1, into=c("N","P"), sep="/")
PSH.tasa.E2
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P 0.06605191 0.6362850 0.0N 0.0P
## 2 2 0.0N/0.0P 0.10796585 0.7585410 0.0N 0.0P
## 3 3 0.0N/0.0P 0.11388934 0.7866649 0.0N 0.0P
## 4 1 0.0N/0.25P 0.05028142 0.6439566 0.0N 0.25P
## 5 2 0.0N/0.25P 0.10764071 0.6509908 0.0N 0.25P
## 6 3 0.0N/0.25P 0.14839290 0.7643691 0.0N 0.25P
## 7 1 0.0N/0.5P 0.04862978 0.4892808 0.0N 0.5P
## 8 2 0.0N/0.5P 0.08397951 0.7399593 0.0N 0.5P
## 9 3 0.0N/0.5P 0.08235027 0.4571036 0.0N 0.5P
## 10 1 1.1N/0.0P 0.01737773 0.0732083 1.1N 0.0P
## 11 2 1.1N/0.0P 0.08119536 0.6488253 1.1N 0.0P
## 12 3 1.1N/0.0P 0.10743238 0.3896517 1.1N 0.0P
## 13 1 1.1N/0.25P 0.10080915 0.7417001 1.1N 0.25P
## 14 2 1.1N/0.25P 0.16349590 0.6735259 1.1N 0.25P
## 15 3 1.1N/0.25P 0.24919536 0.9041145 1.1N 0.25P
## 16 1 1.1N/0.5P 0.31152322 0.7467100 1.1N 0.5P
## 17 2 1.1N/0.5P 0.25747439 0.6430034 1.1N 0.5P
## 18 3 1.1N/0.5P 0.21830205 0.7834260 1.1N 0.5P
## 19 1 1.8N/0.0P 0.06129372 0.6909911 1.8N 0.0P
## 20 2 1.8N/0.0P 0.06796585 0.8573982 1.8N 0.0P
## 21 3 1.8N/0.0P 0.12872514 0.8100282 1.8N 0.0P
## 22 1 1.8N/0.25P 0.21868661 0.7431877 1.8N 0.25P
## 23 2 1.8N/0.25P 0.22910628 0.8555349 1.8N 0.25P
## 24 3 1.8N/0.25P 0.19320738 0.7006975 1.8N 0.25P
## 25 1 1.8N/0.5P 0.04379372 0.4630757 1.8N 0.5P
## 26 2 1.8N/0.5P 0.12905055 0.5858477 1.8N 0.5P
## 27 3 1.8N/0.5P 0.21131148 0.7895798 1.8N 0.5P
## modelo AOV | Tipo I -> Balanceado
library(car)
PSH.AOV.E2=aov(b1~N*P, data = PSH.tasa.E2) # modelo AOV | Tipo I -> Balanceado
summary(PSH.AOV.E2) # Resumen modelo AOV
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 0.02820 0.014099 5.734 0.01184 *
## P 2 0.03373 0.016865 6.859 0.00611 **
## N:P 4 0.04961 0.012401 5.043 0.00663 **
## Residuals 18 0.04426 0.002459
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(PSH.AOV.E2) # Extraccion residuales
res_estan=Residuales/sd(Residuales) # Estandarización de residuales
hist(PSH.tasa.E2$b1) # exploratorio distribucion de la variable
ks.test(res_estan, "pnorm") # Prueba normalidad | >0.05 son normales
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.1098, p-value = 0.8658
## alternative hypothesis: two-sided
shapiro.test(res_estan) # Prueba normalidad | >0.05 son normales
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.98215, p-value = 0.9074
bartlett.test(res_estan~PSH.tasa.E2$N) # Prueba homocedasticidad
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by PSH.tasa.E2$N
## Bartlett's K-squared = 2.1327, df = 2, p-value = 0.3443
bartlett.test(res_estan~PSH.tasa.E2$P) # Prueba homocedasticidad
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by PSH.tasa.E2$P
## Bartlett's K-squared = 1.3466, df = 2, p-value = 0.51
# Analizando la interacción
PSH.AOV.E2.1=aov(b1~TTO, data = PSH.tasa.E2) # modelo AOV para la interaccion
summary(PSH.AOV.E2.1) # Resumen modelo AOV
## Df Sum Sq Mean Sq F value Pr(>F)
## TTO 8 0.11153 0.013942 5.67 0.0011 **
## Residuals 18 0.04426 0.002459
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
HSD.test(PSH.AOV.E2.1, "TTO", console=T, alpha = 0.06) # Tukey interaccion
##
## Study: PSH.AOV.E2.1 ~ "TTO"
##
## HSD Test for b1
##
## Mean Square Error: 0.002458947
##
## TTO, means
##
## b1 std r se Min Max Q25
## 0.0N/0.0P 0.09596903 0.02607772 3 0.02862951 0.06605191 0.11388934 0.08700888
## 0.0N/0.25P 0.10210501 0.04928943 3 0.02862951 0.05028142 0.14839290 0.07896107
## 0.0N/0.5P 0.07165319 0.01995549 3 0.02862951 0.04862978 0.08397951 0.06549003
## 1.1N/0.0P 0.06866849 0.04631578 3 0.02862951 0.01737773 0.10743238 0.04928654
## 1.1N/0.25P 0.17116680 0.07448992 3 0.02862951 0.10080915 0.24919536 0.13215253
## 1.1N/0.5P 0.26243322 0.04680801 3 0.02862951 0.21830205 0.31152322 0.23788822
## 1.8N/0.0P 0.08599490 0.03715554 3 0.02862951 0.06129372 0.12872514 0.06462978
## 1.8N/0.25P 0.21366676 0.01846841 3 0.02862951 0.19320738 0.22910628 0.20594699
## 1.8N/0.5P 0.12805191 0.08376334 3 0.02862951 0.04379372 0.21131148 0.08642213
## Q50 Q75
## 0.0N/0.0P 0.10796585 0.11092760
## 0.0N/0.25P 0.10764071 0.12801680
## 0.0N/0.5P 0.08235027 0.08316489
## 1.1N/0.0P 0.08119536 0.09431387
## 1.1N/0.25P 0.16349590 0.20634563
## 1.1N/0.5P 0.25747439 0.28449880
## 1.8N/0.0P 0.06796585 0.09834549
## 1.8N/0.25P 0.21868661 0.22389645
## 1.8N/0.5P 0.12905055 0.17018101
##
## Alpha: 0.06 ; DF Error: 18
## Critical Value of Studentized Range: 4.822424
##
## Minimun Significant Difference: 0.1380636
##
## Treatments with the same letter are not significantly different.
##
## b1 groups
## 1.1N/0.5P 0.26243322 a
## 1.8N/0.25P 0.21366676 ab
## 1.1N/0.25P 0.17116680 abc
## 1.8N/0.5P 0.12805191 abc
## 0.0N/0.25P 0.10210501 bc
## 0.0N/0.0P 0.09596903 bc
## 1.8N/0.0P 0.08599490 bc
## 0.0N/0.5P 0.07165319 c
## 1.1N/0.0P 0.06866849 c
## Gráfico interaccion Ensayo 2
GRAF.PSH.E2.TTO <- PSH.tasa.E2 %>%
dplyr::select(., -B2) %>%
dplyr::group_by(., TTO) %>%
dplyr::summarise( n=n(), media=mean(b1), sd=sd(b1),
R2=mean(Rsqr)) %>%
dplyr::mutate( se=sd/sqrt(n)) %>%
dplyr::mutate( ic=se * qt((1-0.05)/2 + .5, n-1)) %>%
dplyr::mutate(., Tukey=c("bc","bc","c","c","abc","a","bc","ab","abc"))
GRAF.PSH.E2.TTO
## # A tibble: 9 × 8
## TTO n media sd R2 se ic Tukey
## <chr> <int> <dbl> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 0.0N/0.0P 3 0.0960 0.0261 0.727 0.0151 0.0648 bc
## 2 0.0N/0.25P 3 0.102 0.0493 0.686 0.0285 0.122 bc
## 3 0.0N/0.5P 3 0.0717 0.0200 0.562 0.0115 0.0496 c
## 4 1.1N/0.0P 3 0.0687 0.0463 0.371 0.0267 0.115 c
## 5 1.1N/0.25P 3 0.171 0.0745 0.773 0.0430 0.185 abc
## 6 1.1N/0.5P 3 0.262 0.0468 0.724 0.0270 0.116 a
## 7 1.8N/0.0P 3 0.0860 0.0372 0.786 0.0215 0.0923 bc
## 8 1.8N/0.25P 3 0.214 0.0185 0.766 0.0107 0.0459 ab
## 9 1.8N/0.5P 3 0.128 0.0838 0.613 0.0484 0.208 abc
ggplot(GRAF.PSH.E2.TTO, aes(x=TTO, y=media))+
geom_point(size=2, position=position_dodge(.3))+
geom_errorbar(ymin=GRAF.PSH.E2.TTO$media-GRAF.PSH.E2.TTO$se,
ymax=GRAF.PSH.E2.TTO$media+GRAF.PSH.E2.TTO$se,
alpha=0.7, width=.3, color="darkgray",
position=position_dodge(.3) )+
geom_text(aes(label=Tukey, x=TTO, y=media+se+0.02),
color="red", size=4)+
geom_text(aes(label=round(R2, digits=2), x=TTO, y=media-se-0.02),
color="blue", size=3)+
labs(x = "Tratamiento",y = "Tasa de crecimiento de hojas \n(g por semana)")+
theme_classic()+
theme(axis.text = element_text(angle = 90))+
ylim(0, 0.35)
## Peso seco de tallos - PSTALLOS ### ENSAYO 1
tasa %>%
dplyr::group_by(., Ens, B2, TTO) %>%
lm_table(PSTALLO~SDT) %>% dplyr::select(., Ens, B2, TTO, b1) %>%
ggplot(., aes(x=TTO, y=b1))+
geom_point()+
facet_wrap(~Ens, scales="free_x" )+
theme_bw()+
theme(axis.text = element_text(angle = 90))+
labs(x = "Tratamiento" , y = "Stem growth rate (g por semana)")
PSTALLO.tasa.E1 <- tasa %>%
filter(., Ens=="Ensayo 1") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(PSTALLO~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
mutate(., TTO1=TTO) %>%
separate(., col=TTO1, into=c("N","P"), sep="/")
PSTALLO.tasa.E1
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P 0.027083333 0.97174159 0.0N 0.0P
## 2 2 0.0N/0.0P 0.052388889 0.91059020 0.0N 0.0P
## 3 3 0.0N/0.0P 0.068861111 0.95821092 0.0N 0.0P
## 4 1 0.0N/0.25P 0.012833333 0.30263925 0.0N 0.25P
## 5 2 0.0N/0.25P 0.050288889 0.98458217 0.0N 0.25P
## 6 3 0.0N/0.25P 0.054638889 0.85903367 0.0N 0.25P
## 7 1 0.0N/0.5P 0.029444444 0.89459501 0.0N 0.5P
## 8 2 0.0N/0.5P 0.023777778 0.76288734 0.0N 0.5P
## 9 3 0.0N/0.5P 0.051633333 0.79981451 0.0N 0.5P
## 10 1 1.1N/0.0P 0.013611111 0.14520219 1.1N 0.0P
## 11 2 1.1N/0.0P 0.037527778 0.89973164 1.1N 0.0P
## 12 3 1.1N/0.0P 0.082083333 0.93322387 1.1N 0.0P
## 13 1 1.1N/0.25P 0.040177778 0.86982813 1.1N 0.25P
## 14 2 1.1N/0.25P 0.051138889 0.96588230 1.1N 0.25P
## 15 3 1.1N/0.25P 0.079138889 0.92069534 1.1N 0.25P
## 16 1 1.1N/0.5P 0.029472222 0.92069108 1.1N 0.5P
## 17 2 1.1N/0.5P 0.047527778 0.88309975 1.1N 0.5P
## 18 3 1.1N/0.5P 0.085638889 0.85043188 1.1N 0.5P
## 19 1 1.8N/0.0P 0.022388889 0.73929302 1.8N 0.0P
## 20 2 1.8N/0.0P 0.028416667 0.91596385 1.8N 0.0P
## 21 3 1.8N/0.0P 0.064083333 0.91115585 1.8N 0.0P
## 22 1 1.8N/0.25P 0.021583333 0.88455178 1.8N 0.25P
## 23 2 1.8N/0.25P 0.033000000 0.81206185 1.8N 0.25P
## 24 3 1.8N/0.25P 0.035861111 0.79739805 1.8N 0.25P
## 25 1 1.8N/0.5P 0.016083333 0.73106486 1.8N 0.5P
## 26 2 1.8N/0.5P 0.035166667 0.93403893 1.8N 0.5P
## 27 3 1.8N/0.5P 0.052516667 0.51577028 1.8N 0.5P
## 28 1 2.5N/0.0P 0.001666667 0.02300447 2.5N 0.0P
## 29 2 2.5N/0.0P 0.045166667 0.75431038 2.5N 0.0P
## 30 3 2.5N/0.0P 0.047116667 0.92039943 2.5N 0.0P
## 31 1 2.5N/0.25P 0.014583333 0.26062154 2.5N 0.25P
## 32 2 2.5N/0.25P 0.056861111 0.68144324 2.5N 0.25P
## 33 3 2.5N/0.25P 0.076361111 0.87023970 2.5N 0.25P
## 34 1 2.5N/0.5P 0.017611111 0.95339401 2.5N 0.5P
## 35 2 2.5N/0.5P 0.035305556 0.90638335 2.5N 0.5P
## 36 3 2.5N/0.5P 0.040055556 0.59397808 2.5N 0.5P
library(car)
PSTALLO.AOV.E1=aov(b1~N*P, data = PSTALLO.tasa.E1)
summary(PSTALLO.AOV.E1)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 3 0.001581 0.0005271 1.001 0.409
## P 2 0.000163 0.0000814 0.155 0.858
## N:P 6 0.001183 0.0001972 0.375 0.888
## Residuals 24 0.012632 0.0005263
Residuales=residuals(PSTALLO.AOV.E1)
res_estan=Residuales/sd(Residuales)
hist(PSTALLO.tasa.E1$b1)
ks.test(res_estan, "pnorm")
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.086276, p-value = 0.9304
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.97939, p-value = 0.7247
bartlett.test(res_estan~PSTALLO.tasa.E1$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by PSTALLO.tasa.E1$N
## Bartlett's K-squared = 2.1893, df = 3, p-value = 0.5341
bartlett.test(res_estan~PSTALLO.tasa.E1$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by PSTALLO.tasa.E1$P
## Bartlett's K-squared = 1.0372, df = 2, p-value = 0.5954
PSTALLO.tasa.E2 <- tasa %>%
filter(., Ens=="Ensayo 2") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(PSTALLO~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
mutate(., TTO1=TTO) %>%
tidyr::separate(., col=TTO1, into=c("N","P"), sep="/")
PSTALLO.tasa.E2
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P 0.02384290 0.6383045 0.0N 0.0P
## 2 2 0.0N/0.0P 0.04850820 0.6365610 0.0N 0.0P
## 3 3 0.0N/0.0P 0.04767486 0.6024554 0.0N 0.0P
## 4 1 0.0N/0.25P 0.02024180 0.5618520 0.0N 0.25P
## 5 2 0.0N/0.25P 0.03362978 0.5393378 0.0N 0.25P
## 6 3 0.0N/0.25P 0.05835929 0.4224614 0.0N 0.25P
## 7 1 0.0N/0.5P 0.03224317 0.5582140 0.0N 0.5P
## 8 2 0.0N/0.5P 0.01634358 0.3374451 0.0N 0.5P
## 9 3 0.0N/0.5P 0.04696639 0.5344415 0.0N 0.5P
## 10 1 1.1N/0.0P 0.02237964 0.3897380 1.1N 0.0P
## 11 2 1.1N/0.0P 0.04394809 0.5853810 1.1N 0.0P
## 12 3 1.1N/0.0P 0.06361407 0.3501962 1.1N 0.0P
## 13 1 1.1N/0.25P 0.04907691 0.6244463 1.1N 0.25P
## 14 2 1.1N/0.25P 0.06158470 0.4205139 1.1N 0.25P
## 15 3 1.1N/0.25P 0.13246858 0.6333537 1.1N 0.25P
## 16 1 1.1N/0.5P 0.11976776 0.7908665 1.1N 0.5P
## 17 2 1.1N/0.5P 0.13103245 0.8149119 1.1N 0.5P
## 18 3 1.1N/0.5P 0.13674947 0.8027086 1.1N 0.5P
## 19 1 1.8N/0.0P 0.03653689 0.8276451 1.8N 0.0P
## 20 2 1.8N/0.0P 0.04459563 0.6162199 1.8N 0.0P
## 21 3 1.8N/0.0P 0.09935109 0.7688120 1.8N 0.0P
## 22 1 1.8N/0.25P 0.09836749 0.8251635 1.8N 0.25P
## 23 2 1.8N/0.25P 0.08729071 0.7280926 1.8N 0.25P
## 24 3 1.8N/0.25P 0.09711776 0.6385191 1.8N 0.25P
## 25 1 1.8N/0.5P 0.02605738 0.6495979 1.8N 0.5P
## 26 2 1.8N/0.5P 0.05239071 0.5650702 1.8N 0.5P
## 27 3 1.8N/0.5P 0.11228005 0.8009778 1.8N 0.5P
library(car)
PSTALLO.AOV.E2=aov(b1~N*P, data = PSTALLO.tasa.E2)
summary(PSTALLO.AOV.E2)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 0.011300 0.005650 7.841 0.00355 **
## P 2 0.003839 0.001919 2.664 0.09697 .
## N:P 4 0.009496 0.002374 3.295 0.03422 *
## Residuals 18 0.012969 0.000721
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(PSTALLO.AOV.E2)
res_estan=Residuales/sd(Residuales)
hist(PSTALLO.tasa.E2$b1)
ks.test(res_estan, "pnorm")
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.12954, p-value = 0.7076
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.9471, p-value = 0.1822
bartlett.test(res_estan~PSTALLO.tasa.E2$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by PSTALLO.tasa.E2$N
## Bartlett's K-squared = 3.4465, df = 2, p-value = 0.1785
bartlett.test(res_estan~PSTALLO.tasa.E2$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by PSTALLO.tasa.E2$P
## Bartlett's K-squared = 0.19014, df = 2, p-value = 0.9093
PSTALLO.AOV.E2.1=aov(b1~TTO, data = PSTALLO.tasa.E2)
summary(PSTALLO.AOV.E2.1)
## Df Sum Sq Mean Sq F value Pr(>F)
## TTO 8 0.02463 0.0030792 4.274 0.00501 **
## Residuals 18 0.01297 0.0007205
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
library(agricolae)
HSD.test(PSTALLO.AOV.E2.1, "TTO", console=T)
##
## Study: PSTALLO.AOV.E2.1 ~ "TTO"
##
## HSD Test for b1
##
## Mean Square Error: 0.0007205092
##
## TTO, means
##
## b1 std r se Min Max Q25
## 0.0N/0.0P 0.04000865 0.014006154 3 0.01549741 0.02384290 0.04850820 0.03575888
## 0.0N/0.25P 0.03741029 0.019337913 3 0.01549741 0.02024180 0.05835929 0.02693579
## 0.0N/0.5P 0.03185105 0.015315172 3 0.01549741 0.01634358 0.04696639 0.02429337
## 1.1N/0.0P 0.04331393 0.020624526 3 0.01549741 0.02237964 0.06361407 0.03316387
## 1.1N/0.25P 0.08104340 0.044972472 3 0.01549741 0.04907691 0.13246858 0.05533081
## 1.1N/0.5P 0.12918322 0.008640562 3 0.01549741 0.11976776 0.13674947 0.12540010
## 1.8N/0.0P 0.06016120 0.034177792 3 0.01549741 0.03653689 0.09935109 0.04056626
## 1.8N/0.25P 0.09425865 0.006066681 3 0.01549741 0.08729071 0.09836749 0.09220423
## 1.8N/0.5P 0.06357605 0.044186212 3 0.01549741 0.02605738 0.11228005 0.03922404
## Q50 Q75
## 0.0N/0.0P 0.04767486 0.04809153
## 0.0N/0.25P 0.03362978 0.04599454
## 0.0N/0.5P 0.03224317 0.03960478
## 1.1N/0.0P 0.04394809 0.05378108
## 1.1N/0.25P 0.06158470 0.09702664
## 1.1N/0.5P 0.13103245 0.13389096
## 1.8N/0.0P 0.04459563 0.07197336
## 1.8N/0.25P 0.09711776 0.09774262
## 1.8N/0.5P 0.05239071 0.08233538
##
## Alpha: 0.05 ; DF Error: 18
## Critical Value of Studentized Range: 4.955209
##
## Minimun Significant Difference: 0.07679291
##
## Treatments with the same letter are not significantly different.
##
## b1 groups
## 1.1N/0.5P 0.12918322 a
## 1.8N/0.25P 0.09425865 ab
## 1.1N/0.25P 0.08104340 ab
## 1.8N/0.5P 0.06357605 ab
## 1.8N/0.0P 0.06016120 ab
## 1.1N/0.0P 0.04331393 b
## 0.0N/0.0P 0.04000865 b
## 0.0N/0.25P 0.03741029 b
## 0.0N/0.5P 0.03185105 b
GRAF.PSTALLO.E2.TTO <- PSTALLO.tasa.E2 %>%
dplyr::select(., -B2) %>%
dplyr::group_by(., TTO) %>%
dplyr::summarise( n=n(), media=mean(b1), sd=sd(b1),
R2=mean(Rsqr)) %>%
dplyr::mutate( se=sd/sqrt(n)) %>%
dplyr::mutate( ic=se * qt((1-0.05)/2 + .5, n-1)) %>%
dplyr::mutate(., Tukey=c("b","b","b","b","ab","a","ab","ab","ab"))
GRAF.PSTALLO.E2.TTO
## # A tibble: 9 × 8
## TTO n media sd R2 se ic Tukey
## <chr> <int> <dbl> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 0.0N/0.0P 3 0.0400 0.0140 0.626 0.00809 0.0348 b
## 2 0.0N/0.25P 3 0.0374 0.0193 0.508 0.0112 0.0480 b
## 3 0.0N/0.5P 3 0.0319 0.0153 0.477 0.00884 0.0380 b
## 4 1.1N/0.0P 3 0.0433 0.0206 0.442 0.0119 0.0512 b
## 5 1.1N/0.25P 3 0.0810 0.0450 0.559 0.0260 0.112 ab
## 6 1.1N/0.5P 3 0.129 0.00864 0.803 0.00499 0.0215 a
## 7 1.8N/0.0P 3 0.0602 0.0342 0.738 0.0197 0.0849 ab
## 8 1.8N/0.25P 3 0.0943 0.00607 0.731 0.00350 0.0151 ab
## 9 1.8N/0.5P 3 0.0636 0.0442 0.672 0.0255 0.110 ab
ggplot(GRAF.PSTALLO.E2.TTO, aes(x=TTO, y=media))+
geom_point(size=2, position=position_dodge(.3))+
geom_errorbar(ymin=GRAF.PSTALLO.E2.TTO$media-GRAF.PSTALLO.E2.TTO$se,
ymax=GRAF.PSTALLO.E2.TTO$media+GRAF.PSTALLO.E2.TTO$se,
alpha=0.7, width=.3, color="darkgray",
position=position_dodge(.3) )+
geom_text(aes(label=Tukey, x=TTO, y=media+se+0.02),
color="red", size=4)+
geom_text(aes(label=round(R2, digits=2), x=TTO, y=media-se-0.02),
color="blue", size=3)+
labs(x = "Tratamiento",y = "Tasa de crecimiento de tallo \n(g por semana)")+
theme_classic()+
theme(axis.text = element_text(angle = 90))+
ylim(-0.08, 0.25)
tasa %>%
dplyr::group_by(., Ens, B2, TTO) %>%
lm_table(PSR~SDT) %>% dplyr::select(., Ens, B2, TTO, b1) %>%
ggplot(., aes(x=TTO, y=b1))+
geom_point()+
facet_wrap(~Ens, scales="free_x" )+
theme_bw()+
theme(axis.text = element_text(angle = 90))+
labs(x = "Tratamiento" , y = "Growth rate (g por semana)")
PSR.tasa.E1 <- tasa %>%
filter(., Ens=="Ensayo 1") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(PSR~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
mutate(., TTO1=TTO) %>%
separate(., col=TTO1, into=c("N","P"), sep="/")
PSR.tasa.E1
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P 0.0306666667 0.86999913 0.0N 0.0P
## 2 2 0.0N/0.0P 0.0302111111 0.64102499 0.0N 0.0P
## 3 3 0.0N/0.0P 0.0422500000 0.63456296 0.0N 0.0P
## 4 1 0.0N/0.25P 0.0137777778 0.22079304 0.0N 0.25P
## 5 2 0.0N/0.25P 0.0421166667 0.83117462 0.0N 0.25P
## 6 3 0.0N/0.25P 0.0425000000 0.80865493 0.0N 0.25P
## 7 1 0.0N/0.5P 0.0325277778 0.90867108 0.0N 0.5P
## 8 2 0.0N/0.5P 0.0288333333 0.84993016 0.0N 0.5P
## 9 3 0.0N/0.5P 0.0370555556 0.53793624 0.0N 0.5P
## 10 1 1.1N/0.0P 0.0074166667 0.37069204 1.1N 0.0P
## 11 2 1.1N/0.0P 0.0134444444 0.56778872 1.1N 0.0P
## 12 3 1.1N/0.0P -0.0044722222 0.05485694 1.1N 0.0P
## 13 1 1.1N/0.25P 0.0037833333 0.07851439 1.1N 0.25P
## 14 2 1.1N/0.25P 0.0124444444 0.46684034 1.1N 0.25P
## 15 3 1.1N/0.25P 0.0103888889 0.36332014 1.1N 0.25P
## 16 1 1.1N/0.5P 0.0082500000 0.67240670 1.1N 0.5P
## 17 2 1.1N/0.5P -0.0034722222 0.28399108 1.1N 0.5P
## 18 3 1.1N/0.5P 0.0188611111 0.49652713 1.1N 0.5P
## 19 1 1.8N/0.0P 0.0028333333 0.07686435 1.8N 0.0P
## 20 2 1.8N/0.0P 0.0037222222 0.22051022 1.8N 0.0P
## 21 3 1.8N/0.0P 0.0145555556 0.25658351 1.8N 0.0P
## 22 1 1.8N/0.25P 0.0024722222 0.02582575 1.8N 0.25P
## 23 2 1.8N/0.25P -0.0010277778 0.00380080 1.8N 0.25P
## 24 3 1.8N/0.25P -0.0048888889 0.08305983 1.8N 0.25P
## 25 1 1.8N/0.5P 0.0016944444 0.03728158 1.8N 0.5P
## 26 2 1.8N/0.5P 0.0007777778 0.01191429 1.8N 0.5P
## 27 3 1.8N/0.5P -0.0087166667 0.10830735 1.8N 0.5P
## 28 1 2.5N/0.0P 0.0044166667 0.52262266 2.5N 0.0P
## 29 2 2.5N/0.0P -0.0023611111 0.15922514 2.5N 0.0P
## 30 3 2.5N/0.0P -0.0018722222 0.01454081 2.5N 0.0P
## 31 1 2.5N/0.25P -0.0010277778 0.03227759 2.5N 0.25P
## 32 2 2.5N/0.25P -0.0055555556 0.26596570 2.5N 0.25P
## 33 3 2.5N/0.25P 0.0110277778 0.37622262 2.5N 0.25P
## 34 1 2.5N/0.5P 0.0015277778 0.26137673 2.5N 0.5P
## 35 2 2.5N/0.5P 0.0026111111 0.11575849 2.5N 0.5P
## 36 3 2.5N/0.5P 0.0020555556 0.03862431 2.5N 0.5P
library(car)
PSR.AOV.E1=aov(b1~N*P, data = PSR.tasa.E1)
summary(PSR.AOV.E1)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 3 0.006317 0.0021058 34.115 8.08e-09 ***
## P 2 0.000016 0.0000082 0.132 0.877
## N:P 6 0.000164 0.0000274 0.444 0.842
## Residuals 24 0.001481 0.0000617
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(PSR.AOV.E1)
res_estan=Residuales/sd(Residuales)
hist(PSR.tasa.E1$b1)
ks.test(res_estan, "pnorm")
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.092427, p-value = 0.8904
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.96528, p-value = 0.3109
bartlett.test(res_estan~PSR.tasa.E1$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by PSR.tasa.E1$N
## Bartlett's K-squared = 5.0912, df = 3, p-value = 0.1652
bartlett.test(res_estan~PSR.tasa.E1$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by PSR.tasa.E1$P
## Bartlett's K-squared = 2.0584, df = 2, p-value = 0.3573
library(agricolae)
HSD.test(PSR.AOV.E1, "N", console=T)
##
## Study: PSR.AOV.E1 ~ "N"
##
## HSD Test for b1
##
## Mean Square Error: 6.172548e-05
##
## N, means
##
## b1 std r se Min Max Q25
## 0.0N 0.033326543 0.009183642 9 0.002618852 0.013777778 0.04250000 0.030211111
## 1.1N 0.007404938 0.007707383 9 0.002618852 -0.004472222 0.01886111 0.003783333
## 1.8N 0.001269136 0.006420440 9 0.002618852 -0.008716667 0.01455556 -0.001027778
## 2.5N 0.001202469 0.004776025 9 0.002618852 -0.005555556 0.01102778 -0.001872222
## Q50 Q75
## 0.0N 0.032527778 0.042116667
## 1.1N 0.008250000 0.012444444
## 1.8N 0.001694444 0.002833333
## 2.5N 0.001527778 0.002611111
##
## Alpha: 0.05 ; DF Error: 24
## Critical Value of Studentized Range: 3.901262
##
## Minimun Significant Difference: 0.01021683
##
## Treatments with the same letter are not significantly different.
##
## b1 groups
## 0.0N 0.033326543 a
## 1.1N 0.007404938 b
## 1.8N 0.001269136 b
## 2.5N 0.001202469 b
## Gráfico factor simple Nitrogeno Ensayo 1
GRAF.PSR.E1.N <- PSR.tasa.E1 %>%
dplyr::select(., -B2) %>%
dplyr::group_by(., N) %>%
dplyr::summarise( n=n(), media=mean(b1), sd=sd(b1),
R2=mean(Rsqr)) %>%
dplyr::mutate( se=sd/sqrt(n)) %>%
dplyr::mutate( ic=se * qt((1-0.05)/2 + .5, n-1)) %>%
dplyr::mutate(., Tukey=c("a","b","b","b"))
GRAF.PSR.E1.N
## # A tibble: 4 × 8
## N n media sd R2 se ic Tukey
## <chr> <int> <dbl> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 0.0N 9 0.0333 0.00918 0.700 0.00306 0.00706 a
## 2 1.1N 9 0.00740 0.00771 0.373 0.00257 0.00592 b
## 3 1.8N 9 0.00127 0.00642 0.0916 0.00214 0.00494 b
## 4 2.5N 9 0.00120 0.00478 0.199 0.00159 0.00367 b
ggplot(GRAF.PSR.E1.N, aes(x=N, y=media))+
geom_point(size=3, position=position_dodge(.3))+
geom_errorbar(ymin=GRAF.PSR.E1.N$media-GRAF.PSR.E1.N$se,
ymax=GRAF.PSR.E1.N$media+GRAF.PSR.E1.N$se,
alpha=0.7, width=.3, color="darkgray",
position=position_dodge(.3) )+
geom_text(aes(label=Tukey, x=N, y=media+se+0.01 ),
color="red", size=4)+
geom_text(aes(label=round(R2, digits=2), x=N, y=media-se-0.01 ),
color="blue", size=3)+
labs(x = "Tratamiento",y = "Tasa de crecimiento de raíz \n(g por semana)")+
theme_classic()+
ylim(-0.04, 0.08)
### ENSAYO 2
PSR.tasa.E2 <- tasa %>%
filter(., Ens=="Ensayo 2") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(PSR~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
mutate(., TTO1=TTO) %>%
tidyr::separate(., col=TTO1, into=c("N","P"), sep="/")
PSR.tasa.E2
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P 0.020901639 0.44490134 0.0N 0.0P
## 2 2 0.0N/0.0P 0.051040984 0.51309644 0.0N 0.0P
## 3 3 0.0N/0.0P 0.067819672 0.48916539 0.0N 0.0P
## 4 1 0.0N/0.25P 0.016939891 0.27309746 0.0N 0.25P
## 5 2 0.0N/0.25P 0.064039617 0.65910650 0.0N 0.25P
## 6 3 0.0N/0.25P 0.089332787 0.55239881 0.0N 0.25P
## 7 1 0.0N/0.5P 0.014132514 0.51619431 0.0N 0.5P
## 8 2 0.0N/0.5P 0.042178962 0.60122040 0.0N 0.5P
## 9 3 0.0N/0.5P 0.062866393 0.45640023 0.0N 0.5P
## 10 1 1.1N/0.0P 0.011085383 0.54516623 1.1N 0.0P
## 11 2 1.1N/0.0P 0.014072404 0.52902862 1.1N 0.0P
## 12 3 1.1N/0.0P 0.018844945 0.41440698 1.1N 0.0P
## 13 1 1.1N/0.25P 0.008384836 0.21039460 1.1N 0.25P
## 14 2 1.1N/0.25P 0.017990437 0.69937221 1.1N 0.25P
## 15 3 1.1N/0.25P 0.048983607 0.65450057 1.1N 0.25P
## 16 1 1.1N/0.5P 0.056704918 0.66083681 1.1N 0.5P
## 17 2 1.1N/0.5P 0.051896858 0.49042461 1.1N 0.5P
## 18 3 1.1N/0.5P 0.048743921 0.40937071 1.1N 0.5P
## 19 1 1.8N/0.0P 0.006830601 0.37286433 1.8N 0.0P
## 20 2 1.8N/0.0P 0.014703552 0.29855740 1.8N 0.0P
## 21 3 1.8N/0.0P 0.007203552 0.02941547 1.8N 0.0P
## 22 1 1.8N/0.25P 0.037740710 0.76047780 1.8N 0.25P
## 23 2 1.8N/0.25P 0.037849727 0.33370313 1.8N 0.25P
## 24 3 1.8N/0.25P 0.040210929 0.66366553 1.8N 0.25P
## 25 1 1.8N/0.5P 0.018519126 0.38995789 1.8N 0.5P
## 26 2 1.8N/0.5P 0.034913934 0.30467952 1.8N 0.5P
## 27 3 1.8N/0.5P 0.044385246 0.90547156 1.8N 0.5P
library(car)
PSR.AOV.E2=aov(b1~N*P, data = PSR.tasa.E2)
summary(PSR.AOV.E2)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 0.002199 0.0010996 3.105 0.0694 .
## P 2 0.001798 0.0008991 2.538 0.1069
## N:P 4 0.002335 0.0005838 1.648 0.2057
## Residuals 18 0.006376 0.0003542
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(PSR.AOV.E2)
res_estan=Residuales/sd(Residuales)
hist(PSR.tasa.E2$b1)
ks.test(res_estan, "pnorm")
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.18427, p-value = 0.2822
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.94431, p-value = 0.1556
bartlett.test(res_estan~PSR.tasa.E2$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by PSR.tasa.E2$N
## Bartlett's K-squared = 12.404, df = 2, p-value = 0.002025
bartlett.test(res_estan~PSR.tasa.E2$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by PSR.tasa.E2$P
## Bartlett's K-squared = 2.6148, df = 2, p-value = 0.2705
library(MASS)
boxcox(PSR.AOV.E2, lambda = c(-0.5:0.6, 0.1))
hist(PSR.tasa.E2$b1^-0.3)
PSR.AOV2.E2=aov( (b1^0.2)~N*P, data = PSR.tasa.E2)
summary(PSR.AOV2.E2)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 0.01791 0.008956 2.812 0.0865 .
## P 2 0.02597 0.012983 4.077 0.0346 *
## N:P 4 0.03122 0.007804 2.451 0.0834 .
## Residuals 18 0.05732 0.003185
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(PSR.AOV2.E2)
res_estan=Residuales/sd(Residuales)
ks.test(res_estan, "pnorm")
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.13613, p-value = 0.6496
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.96765, p-value = 0.5411
bartlett.test(res_estan~PSR.tasa.E2$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by PSR.tasa.E2$N
## Bartlett's K-squared = 5.566, df = 2, p-value = 0.06185
bartlett.test(res_estan~PSR.tasa.E2$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by PSR.tasa.E2$P
## Bartlett's K-squared = 1.9189, df = 2, p-value = 0.3831
library(agricolae)
HSD.test(PSR.AOV2.E2, "N", console=T, alpha = 0.1)
##
## Study: PSR.AOV2.E2 ~ "N"
##
## HSD Test for (b1^0.2)
##
## Mean Square Error: 0.003184525
##
## N, means
##
## X.b1.0.2. std r se Min Max Q25 Q50
## 0.0N 0.5295184 0.06913368 9 0.01881053 0.4266226 0.6168821 0.4613559 0.5515486
## 1.1N 0.4807540 0.07116958 9 0.01881053 0.3843252 0.5632798 0.4262591 0.4518965
## 1.8N 0.4704707 0.06684712 9 0.01881053 0.3688857 0.5363493 0.4300158 0.5112105
## Q75
## 0.0N 0.5771514
## 1.1N 0.5470287
## 1.8N 0.5195323
##
## Alpha: 0.1 ; DF Error: 18
## Critical Value of Studentized Range: 3.097996
##
## Minimun Significant Difference: 0.05827496
##
## Treatments with the same letter are not significantly different.
##
## (b1^0.2) groups
## 0.0N 0.5295184 a
## 1.1N 0.4807540 ab
## 1.8N 0.4704707 b
## Gráfico factor simple Nitrogeno Ensayo 2
GRAF.PSR.E2.N <- PSR.tasa.E2 %>%
dplyr::select(., -B2) %>%
dplyr::group_by(., N) %>%
dplyr::summarise( n=n(), media=mean(b1), sd=sd(b1),
R2=mean(Rsqr)) %>%
dplyr::mutate( se=sd/sqrt(n)) %>%
dplyr::mutate( ic=se * qt((1-0.05)/2 + .5, n-1)) %>%
dplyr::mutate(., Tukey=c("a","ab","b"))
GRAF.PSR.E2.N
## # A tibble: 3 × 8
## N n media sd R2 se ic Tukey
## <chr> <int> <dbl> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 0.0N 9 0.0477 0.0261 0.501 0.00871 0.0201 a
## 2 1.1N 9 0.0307 0.0201 0.513 0.00672 0.0155 ab
## 3 1.8N 9 0.0269 0.0150 0.451 0.00499 0.0115 b
ggplot(GRAF.PSR.E2.N, aes(x=N, y=media))+
geom_point(size=3, position=position_dodge(.3))+
geom_errorbar(ymin=GRAF.PSR.E2.N$media-GRAF.PSR.E2.N$se,
ymax=GRAF.PSR.E2.N$media+GRAF.PSR.E2.N$se,
alpha=0.7, width=.3, color="darkgray",
position=position_dodge(.3) )+
geom_text(aes(label=Tukey, x=N, y=media+se+0.01 ),
color="red", size=4)+
geom_text(aes(label=round(R2, digits=2), x=N, y=media-se-0.01 ),
color="blue", size=3)+
labs(x = "Tratamiento",y = "Tasa de crecimiento de raíz \n(g por semana)")+
theme_classic()+
ylim(0.0, 0.08)
library(agricolae)
HSD.test(PSR.AOV2.E2, "P", console=T, alpha = 0.05)
##
## Study: PSR.AOV2.E2 ~ "P"
##
## HSD Test for (b1^0.2)
##
## Mean Square Error: 0.003184525
##
## P, means
##
## X.b1.0.2. std r se Min Max Q25 Q50
## 0.0P 0.4503329 0.07394918 9 0.01881053 0.3688857 0.5838095 0.4063967 0.4300158
## 0.25P 0.5088999 0.07242922 9 0.01881053 0.3843252 0.6168821 0.4477219 0.5195323
## 0.5P 0.5215103 0.05090942 9 0.01881053 0.4266226 0.5750210 0.5112105 0.5363493
## Q75
## 0.0P 0.4613559
## 0.25P 0.5470287
## 0.5P 0.5533861
##
## Alpha: 0.05 ; DF Error: 18
## Critical Value of Studentized Range: 3.609304
##
## Minimun Significant Difference: 0.06789293
##
## Treatments with the same letter are not significantly different.
##
## (b1^0.2) groups
## 0.5P 0.5215103 a
## 0.25P 0.5088999 ab
## 0.0P 0.4503329 b
## Gráfico factor simple fosforo Ensayo 2
GRAF.PSR.E2.P <- PSR.tasa.E2 %>%
dplyr::select(., -B2) %>%
dplyr::group_by(., P) %>%
dplyr::summarise( n=n(), media=mean(b1), sd=sd(b1),
R2=mean(Rsqr)) %>%
dplyr::mutate( se=sd/sqrt(n)) %>%
dplyr::mutate( ic=se * qt((1-0.05)/2 + .5, n-1)) %>%
dplyr::mutate(., Tukey=c("b","a","a"))
GRAF.PSR.E2.P
## # A tibble: 3 × 8
## P n media sd R2 se ic Tukey
## <chr> <int> <dbl> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 0.0P 9 0.0236 0.0213 0.404 0.00709 0.0163 b
## 2 0.25P 9 0.0402 0.0253 0.534 0.00843 0.0194 a
## 3 0.5P 9 0.0416 0.0165 0.526 0.00550 0.0127 a
ggplot(GRAF.PSR.E2.P, aes(x=P, y=media))+
geom_point(size=3, position=position_dodge(.3))+
geom_errorbar(ymin=GRAF.PSR.E2.P$media-GRAF.PSR.E2.P$se,
ymax=GRAF.PSR.E2.P$media+GRAF.PSR.E2.P$se,
alpha=0.7, width=.3, color="darkgray",
position=position_dodge(.3) )+
geom_text(aes(label=Tukey, x=P, y=media+se+0.01 ),
color="red", size=4)+
geom_text(aes(label=round(R2, digits=2), x=P, y=media-se-0.01 ),
color="blue", size=3)+
labs(x = "Tratamiento",y = "Tasa de crecimiento de raíz \n(g por semana)")+
theme_classic()+
ylim(0.0, 0.08)
# interaccion N x P # no muestra diferencias
PSR.AOV.E2.1=aov(b1~TTO, data = PSR.tasa.E2)
summary(PSR.AOV.E2.1)
## Df Sum Sq Mean Sq F value Pr(>F)
## TTO 8 0.006332 0.0007916 2.235 0.0747 .
## Residuals 18 0.006376 0.0003542
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
library(agricolae)
HSD.test(PSR.AOV.E2.1, "TTO", console=T, alpha = 0.1)
##
## Study: PSR.AOV.E2.1 ~ "TTO"
##
## HSD Test for b1
##
## Mean Square Error: 0.000354198
##
## TTO, means
##
## b1 std r se Min Max
## 0.0N/0.0P 0.046587432 0.023773957 3 0.01086582 0.020901639 0.06781967
## 0.0N/0.25P 0.056770765 0.036739761 3 0.01086582 0.016939891 0.08933279
## 0.0N/0.5P 0.039725956 0.024459368 3 0.01086582 0.014132514 0.06286639
## 1.1N/0.0P 0.014667577 0.003913870 3 0.01086582 0.011085383 0.01884495
## 1.1N/0.25P 0.025119627 0.021217541 3 0.01086582 0.008384836 0.04898361
## 1.1N/0.5P 0.052448566 0.004009072 3 0.01086582 0.048743921 0.05670492
## 1.8N/0.0P 0.009579235 0.004441705 3 0.01086582 0.006830601 0.01470355
## 1.8N/0.25P 0.038600455 0.001395776 3 0.01086582 0.037740710 0.04021093
## 1.8N/0.5P 0.032606102 0.013086581 3 0.01086582 0.018519126 0.04438525
## Q25 Q50 Q75
## 0.0N/0.0P 0.035971311 0.051040984 0.05943033
## 0.0N/0.25P 0.040489754 0.064039617 0.07668620
## 0.0N/0.5P 0.028155738 0.042178962 0.05252268
## 1.1N/0.0P 0.012578893 0.014072404 0.01645867
## 1.1N/0.25P 0.013187637 0.017990437 0.03348702
## 1.1N/0.5P 0.050320389 0.051896858 0.05430089
## 1.8N/0.0P 0.007017077 0.007203552 0.01095355
## 1.8N/0.25P 0.037795219 0.037849727 0.03903033
## 1.8N/0.5P 0.026716530 0.034913934 0.03964959
##
## Alpha: 0.1 ; DF Error: 18
## Critical Value of Studentized Range: 4.439598
##
## Minimun Significant Difference: 0.04823987
##
## Treatments with the same letter are not significantly different.
##
## b1 groups
## 0.0N/0.25P 0.056770765 a
## 1.1N/0.5P 0.052448566 a
## 0.0N/0.0P 0.046587432 a
## 0.0N/0.5P 0.039725956 a
## 1.8N/0.25P 0.038600455 a
## 1.8N/0.5P 0.032606102 a
## 1.1N/0.25P 0.025119627 a
## 1.1N/0.0P 0.014667577 a
## 1.8N/0.0P 0.009579235 a
GRAF.PSR.E2.TTO <- PSR.tasa.E2 %>%
dplyr::select(., -B2) %>%
dplyr::group_by(., TTO) %>%
dplyr::summarise( n=n(), media=mean(b1), sd=sd(b1),
R2=mean(Rsqr)) %>%
dplyr::mutate( se=sd/sqrt(n)) %>%
dplyr::mutate( ic=se * qt((1-0.05)/2 + .5, n-1)) %>%
dplyr::mutate(., Tukey=c("b","b","b","b","ab","a","ab","ab","ab"))
GRAF.PSR.E2.TTO
## # A tibble: 9 × 8
## TTO n media sd R2 se ic Tukey
## <chr> <int> <dbl> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 0.0N/0.0P 3 0.0466 0.0238 0.482 0.0137 0.0591 b
## 2 0.0N/0.25P 3 0.0568 0.0367 0.495 0.0212 0.0913 b
## 3 0.0N/0.5P 3 0.0397 0.0245 0.525 0.0141 0.0608 b
## 4 1.1N/0.0P 3 0.0147 0.00391 0.496 0.00226 0.00972 b
## 5 1.1N/0.25P 3 0.0251 0.0212 0.521 0.0122 0.0527 ab
## 6 1.1N/0.5P 3 0.0524 0.00401 0.520 0.00231 0.00996 a
## 7 1.8N/0.0P 3 0.00958 0.00444 0.234 0.00256 0.0110 ab
## 8 1.8N/0.25P 3 0.0386 0.00140 0.586 0.000806 0.00347 ab
## 9 1.8N/0.5P 3 0.0326 0.0131 0.533 0.00756 0.0325 ab
#ggplot(GRAF.PSR.E2.TTO, aes(x=TTO, y=media))+
# geom_point(size=2, position=position_dodge(.3))+
# geom_errorbar(ymin=GRAF.PSR.E2.TTO$media-GRAF.PSR.E2.TTO$se,
# ymax=GRAF.PSR.E2.TTO$media+GRAF.PSR.E2.TTO$se,
# alpha=0.7, width=.3, color="darkgray",
# position=position_dodge(.3) )+
# geom_text(aes(label=Tukey, x=TTO, y=media+se+0.02),
# color="red", size=4)+
# geom_text(aes(label=round(R2, digits=2), x=TTO, y=media-se-0.02),
# color="blue", size=3)+
# labs(x = "Tratamiento",y = "Tasa de crecimiento de raíz \n(g por semana)")+
# theme_classic()+
# theme(axis.text = element_text(angle = 90))+
# ylim(-0.08, 0.15)
tasa %>%
dplyr::group_by(., Ens, B2, TTO) %>%
lm_table(PST~SDT) %>% dplyr::select(., Ens, B2, TTO, b1) %>%
ggplot(., aes(x=TTO, y=b1))+
geom_point()+
facet_wrap(~Ens, scales="free_x" )+
theme_bw()+
theme(axis.text = element_text(angle = 90))+
labs(x = "Tratamiento" , y = "Tasa de crecimiento planta \n(g por semana)")
PST.tasa.E1 <- tasa %>%
dplyr::filter(., Ens=="Ensayo 1") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(PST~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
dplyr::mutate(., TTO1=TTO) %>%
tidyr::separate(., col=TTO1, into=c("N","P"), sep="/")
PST.tasa.E1
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P 0.098611111 0.9215918 0.0N 0.0P
## 2 2 0.0N/0.0P 0.151255556 0.8840571 0.0N 0.0P
## 3 3 0.0N/0.0P 0.191111111 0.9229230 0.0N 0.0P
## 4 1 0.0N/0.25P 0.067972222 0.4963118 0.0N 0.25P
## 5 2 0.0N/0.25P 0.157433333 0.9917190 0.0N 0.25P
## 6 3 0.0N/0.25P 0.163000000 0.9341481 0.0N 0.25P
## 7 1 0.0N/0.5P 0.097166667 0.9520145 0.0N 0.5P
## 8 2 0.0N/0.5P 0.091305556 0.8944999 0.0N 0.5P
## 9 3 0.0N/0.5P 0.140111111 0.6938877 0.0N 0.5P
## 10 1 1.1N/0.0P 0.053750000 0.3434415 1.1N 0.0P
## 11 2 1.1N/0.0P 0.122555556 0.8223241 1.1N 0.0P
## 12 3 1.1N/0.0P 0.219583333 0.8572718 1.1N 0.0P
## 13 1 1.1N/0.25P 0.115677778 0.8746793 1.1N 0.25P
## 14 2 1.1N/0.25P 0.126472222 0.9800676 1.1N 0.25P
## 15 3 1.1N/0.25P 0.195138889 0.9126572 1.1N 0.25P
## 16 1 1.1N/0.5P 0.091638889 0.9559999 1.1N 0.5P
## 17 2 1.1N/0.5P 0.122666667 0.9500021 1.1N 0.5P
## 18 3 1.1N/0.5P 0.231722222 0.8618978 1.1N 0.5P
## 19 1 1.8N/0.0P 0.052277778 0.6229590 1.8N 0.0P
## 20 2 1.8N/0.0P 0.104777778 0.9892801 1.8N 0.0P
## 21 3 1.8N/0.0P 0.173000000 0.8426718 1.8N 0.0P
## 22 1 1.8N/0.25P 0.053361111 0.5625925 1.8N 0.25P
## 23 2 1.8N/0.25P 0.096666667 0.9054794 1.8N 0.25P
## 24 3 1.8N/0.25P 0.084388889 0.8916065 1.8N 0.25P
## 25 1 1.8N/0.5P 0.057361111 0.9342131 1.8N 0.5P
## 26 2 1.8N/0.5P 0.094888889 0.9215193 1.8N 0.5P
## 27 3 1.8N/0.5P 0.122938889 0.4687226 1.8N 0.5P
## 28 1 2.5N/0.0P 0.007666667 0.1389755 2.5N 0.0P
## 29 2 2.5N/0.0P 0.072611111 0.8679277 2.5N 0.0P
## 30 3 2.5N/0.0P 0.106727778 0.7961168 2.5N 0.0P
## 31 1 2.5N/0.25P 0.041333333 0.2686940 2.5N 0.25P
## 32 2 2.5N/0.25P 0.093666667 0.8500370 2.5N 0.25P
## 33 3 2.5N/0.25P 0.173500000 0.9028592 2.5N 0.25P
## 34 1 2.5N/0.5P 0.027666667 0.8586108 2.5N 0.5P
## 35 2 2.5N/0.5P 0.075388889 0.7739279 2.5N 0.5P
## 36 3 2.5N/0.5P 0.123222222 0.8512010 2.5N 0.5P
library(car)
PST.AOV.E1=aov(b1~N*P, data = PST.tasa.E1)
summary(PST.AOV.E1)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 3 0.02289 0.007630 2.661 0.071 .
## P 2 0.00041 0.000206 0.072 0.931
## N:P 6 0.00627 0.001045 0.365 0.894
## Residuals 24 0.06882 0.002867
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(PST.AOV.E1)
res_estan=Residuales/sd(Residuales)
hist(PST.tasa.E1$b1)
ks.test(res_estan, "pnorm")
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.081495, p-value = 0.9548
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.97197, p-value = 0.4818
bartlett.test(res_estan~PST.tasa.E1$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by PST.tasa.E1$N
## Bartlett's K-squared = 2.5169, df = 3, p-value = 0.4722
bartlett.test(res_estan~PST.tasa.E1$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by PST.tasa.E1$P
## Bartlett's K-squared = 0.81335, df = 2, p-value = 0.6659
library(agricolae)
HSD.test(PST.AOV.E1, "N", console=T, alpha = 0.06)
##
## Study: PST.AOV.E1 ~ "N"
##
## HSD Test for b1
##
## Mean Square Error: 0.002867438
##
## N, means
##
## b1 std r se Min Max Q25
## 0.0N 0.12866296 0.04111279 9 0.01784949 0.067972222 0.1911111 0.09716667
## 1.1N 0.14213395 0.06010716 9 0.01784949 0.053750000 0.2317222 0.11567778
## 1.8N 0.09329568 0.03877032 9 0.01784949 0.052277778 0.1730000 0.05736111
## 2.5N 0.08019815 0.05129945 9 0.01784949 0.007666667 0.1735000 0.04133333
## Q50 Q75
## 0.0N 0.14011111 0.1574333
## 1.1N 0.12266667 0.1951389
## 1.8N 0.09488889 0.1047778
## 2.5N 0.07538889 0.1067278
##
## Alpha: 0.06 ; DF Error: 24
## Critical Value of Studentized Range: 3.779244
##
## Minimun Significant Difference: 0.06745757
##
## Treatments with the same letter are not significantly different.
##
## b1 groups
## 1.1N 0.14213395 a
## 0.0N 0.12866296 a
## 1.8N 0.09329568 a
## 2.5N 0.08019815 a
## Gráfico factor simple Nitrogeno Ensayo 1
GRAF.PST.E1.N <- PST.tasa.E1 %>%
dplyr::select(., -B2) %>%
dplyr::group_by(., N) %>%
dplyr::summarise( n=n(), media=mean(b1), sd=sd(b1),
R2=mean(Rsqr)) %>%
dplyr::mutate( se=sd/sqrt(n)) %>%
dplyr::mutate( ic=se * qt((1-0.05)/2 + .5, n-1)) %>%
dplyr::mutate(., Tukey=c("ab","a","ab", "b"))
GRAF.PST.E1.N
## # A tibble: 4 × 8
## N n media sd R2 se ic Tukey
## <chr> <int> <dbl> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 0.0N 9 0.129 0.0411 0.855 0.0137 0.0316 ab
## 2 1.1N 9 0.142 0.0601 0.840 0.0200 0.0462 a
## 3 1.8N 9 0.0933 0.0388 0.793 0.0129 0.0298 ab
## 4 2.5N 9 0.0802 0.0513 0.701 0.0171 0.0394 b
ggplot(GRAF.PST.E1.N, aes(x=N, y=media))+
geom_point(size=3, position=position_dodge(.3))+
geom_errorbar(ymin=GRAF.PST.E1.N$media-GRAF.PST.E1.N$se,
ymax=GRAF.PST.E1.N$media+GRAF.PST.E1.N$se,
alpha=0.7, width=.3, color="darkgray",
position=position_dodge(.3) )+
geom_text(aes(label=Tukey, x=N, y=media+ic+0.01 ),
color="red", size=4)+
geom_text(aes(label=round(R2, digits=2), x=N, y=media-ic-0.01 ),
color="blue", size=3)+
labs(x = "Tratamiento",y = "Tasa de crecimiento planta \n(g por semana)")+
theme_classic()+
ylim(0, 0.25)
### ENSAYO 2
PST.tasa.E2 <- tasa %>%
filter(., Ens=="Ensayo 2") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(PST~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
mutate(., TTO1=TTO) %>%
separate(., col=TTO1, into=c("N","P"), sep="/")
PSH.tasa.E2
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P 0.06605191 0.6362850 0.0N 0.0P
## 2 2 0.0N/0.0P 0.10796585 0.7585410 0.0N 0.0P
## 3 3 0.0N/0.0P 0.11388934 0.7866649 0.0N 0.0P
## 4 1 0.0N/0.25P 0.05028142 0.6439566 0.0N 0.25P
## 5 2 0.0N/0.25P 0.10764071 0.6509908 0.0N 0.25P
## 6 3 0.0N/0.25P 0.14839290 0.7643691 0.0N 0.25P
## 7 1 0.0N/0.5P 0.04862978 0.4892808 0.0N 0.5P
## 8 2 0.0N/0.5P 0.08397951 0.7399593 0.0N 0.5P
## 9 3 0.0N/0.5P 0.08235027 0.4571036 0.0N 0.5P
## 10 1 1.1N/0.0P 0.01737773 0.0732083 1.1N 0.0P
## 11 2 1.1N/0.0P 0.08119536 0.6488253 1.1N 0.0P
## 12 3 1.1N/0.0P 0.10743238 0.3896517 1.1N 0.0P
## 13 1 1.1N/0.25P 0.10080915 0.7417001 1.1N 0.25P
## 14 2 1.1N/0.25P 0.16349590 0.6735259 1.1N 0.25P
## 15 3 1.1N/0.25P 0.24919536 0.9041145 1.1N 0.25P
## 16 1 1.1N/0.5P 0.31152322 0.7467100 1.1N 0.5P
## 17 2 1.1N/0.5P 0.25747439 0.6430034 1.1N 0.5P
## 18 3 1.1N/0.5P 0.21830205 0.7834260 1.1N 0.5P
## 19 1 1.8N/0.0P 0.06129372 0.6909911 1.8N 0.0P
## 20 2 1.8N/0.0P 0.06796585 0.8573982 1.8N 0.0P
## 21 3 1.8N/0.0P 0.12872514 0.8100282 1.8N 0.0P
## 22 1 1.8N/0.25P 0.21868661 0.7431877 1.8N 0.25P
## 23 2 1.8N/0.25P 0.22910628 0.8555349 1.8N 0.25P
## 24 3 1.8N/0.25P 0.19320738 0.7006975 1.8N 0.25P
## 25 1 1.8N/0.5P 0.04379372 0.4630757 1.8N 0.5P
## 26 2 1.8N/0.5P 0.12905055 0.5858477 1.8N 0.5P
## 27 3 1.8N/0.5P 0.21131148 0.7895798 1.8N 0.5P
library(car)
PST.AOV.E2=aov(b1~N*P, data = PST.tasa.E2)
summary(PST.AOV.E2)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 0.05427 0.02714 3.586 0.0489 *
## P 2 0.08212 0.04106 5.426 0.0143 *
## N:P 4 0.12974 0.03243 4.286 0.0131 *
## Residuals 18 0.13621 0.00757
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(PST.AOV.E2)
res_estan=Residuales/sd(Residuales)
hist(PST.tasa.E2$b1)
ks.test(res_estan, "pnorm")
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.097752, p-value = 0.9364
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.98136, p-value = 0.8917
bartlett.test(res_estan~PST.tasa.E2$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by PST.tasa.E2$N
## Bartlett's K-squared = 0.36418, df = 2, p-value = 0.8335
bartlett.test(res_estan~PST.tasa.E2$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by PST.tasa.E2$P
## Bartlett's K-squared = 1.1774, df = 2, p-value = 0.555
PST.AOV.E2.1=aov(b1~TTO, data = PST.tasa.E2)
summary(PST.AOV.E2.1)
## Df Sum Sq Mean Sq F value Pr(>F)
## TTO 8 0.2661 0.03327 4.396 0.00435 **
## Residuals 18 0.1362 0.00757
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
library(agricolae)
HSD.test(PST.AOV.E2.1, "TTO", console=T, alpha = 0.07)
##
## Study: PST.AOV.E2.1 ~ "TTO"
##
## HSD Test for b1
##
## Mean Square Error: 0.007567149
##
## TTO, means
##
## b1 std r se Min Max Q25
## 0.0N/0.0P 0.1825651 0.06310799 3 0.05022333 0.11079645 0.2293839 0.1591557
## 0.0N/0.25P 0.1962861 0.10460327 3 0.05022333 0.08746311 0.2960850 0.1463866
## 0.0N/0.5P 0.1432302 0.04859289 3 0.05022333 0.09500546 0.1921831 0.1187538
## 1.1N/0.0P 0.1266500 0.07037085 3 0.05022333 0.05084276 0.1898914 0.0950293
## 1.1N/0.25P 0.2773298 0.13938259 3 0.05022333 0.15827090 0.4306475 0.2006710
## 1.1N/0.5P 0.4440650 0.04221947 3 0.05022333 0.40379544 0.4879959 0.4220996
## 1.8N/0.0P 0.1557353 0.06980847 3 0.05022333 0.10466120 0.2352798 0.1159631
## 1.8N/0.25P 0.3465259 0.01385028 3 0.05022333 0.33053607 0.3547948 0.3423914
## 1.8N/0.5P 0.2242341 0.13996969 3 0.05022333 0.08837022 0.3679768 0.1523627
## Q50 Q75
## 0.0N/0.0P 0.2075150 0.2184495
## 0.0N/0.25P 0.2053101 0.2506975
## 0.0N/0.5P 0.1425020 0.1673426
## 1.1N/0.0P 0.1392158 0.1645536
## 1.1N/0.25P 0.2430710 0.3368593
## 1.1N/0.5P 0.4404037 0.4641998
## 1.8N/0.0P 0.1272650 0.1812724
## 1.8N/0.25P 0.3542467 0.3545208
## 1.8N/0.5P 0.2163552 0.2921660
##
## Alpha: 0.07 ; DF Error: 18
## Critical Value of Studentized Range: 4.708743
##
## Minimun Significant Difference: 0.2364888
##
## Treatments with the same letter are not significantly different.
##
## b1 groups
## 1.1N/0.5P 0.4440650 a
## 1.8N/0.25P 0.3465259 ab
## 1.1N/0.25P 0.2773298 ab
## 1.8N/0.5P 0.2242341 ab
## 0.0N/0.25P 0.1962861 b
## 0.0N/0.0P 0.1825651 b
## 1.8N/0.0P 0.1557353 b
## 0.0N/0.5P 0.1432302 b
## 1.1N/0.0P 0.1266500 b
GRAF.PST.E2.TTO <- PST.tasa.E2 %>%
dplyr::select(., -B2) %>%
dplyr::group_by(., TTO) %>%
dplyr::summarise( n=n(), media=mean(b1), sd=sd(b1),
R2=mean(Rsqr)) %>%
dplyr::mutate( se=sd/sqrt(n)) %>%
dplyr::mutate( ic=se * qt((1-0.05)/2 + .5, n-1)) %>%
dplyr::mutate(., Tukey=c("b","b","b","b","ab","a","b","ab","ab"))
GRAF.PST.E2.TTO
## # A tibble: 9 × 8
## TTO n media sd R2 se ic Tukey
## <chr> <int> <dbl> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 0.0N/0.0P 3 0.183 0.0631 0.779 0.0364 0.157 b
## 2 0.0N/0.25P 3 0.196 0.105 0.673 0.0604 0.260 b
## 3 0.0N/0.5P 3 0.143 0.0486 0.687 0.0281 0.121 b
## 4 1.1N/0.0P 3 0.127 0.0704 0.523 0.0406 0.175 b
## 5 1.1N/0.25P 3 0.277 0.139 0.718 0.0805 0.346 ab
## 6 1.1N/0.5P 3 0.444 0.0422 0.774 0.0244 0.105 a
## 7 1.8N/0.0P 3 0.156 0.0698 0.866 0.0403 0.173 b
## 8 1.8N/0.25P 3 0.347 0.0139 0.794 0.00800 0.0344 ab
## 9 1.8N/0.5P 3 0.224 0.140 0.752 0.0808 0.348 ab
ggplot(GRAF.PST.E2.TTO, aes(x=TTO, y=media))+
geom_point(size=2, position=position_dodge(.3))+
geom_errorbar(ymin=GRAF.PST.E2.TTO$media-GRAF.PST.E2.TTO$sd,
ymax=GRAF.PST.E2.TTO$media+GRAF.PST.E2.TTO$sd,
alpha=0.7, width=.3, color="darkgray",
position=position_dodge(.3) )+
geom_text(aes(label=Tukey, x=TTO, y=media+sd+0.05),
color="red", size=4)+
geom_text(aes(label=round(R2, digits=2), x=TTO, y=media-sd-0.05),
color="blue", size=3)+
labs(x = "Tratamiento",y = "Tasa de crecimiento planta \n(g por semana)")+
theme_classic()+
theme(axis.text = element_text(angle = 90))+
ylim(-0.15, 0.6)
## Area foliar - AF ### ENSAYO 1
tasa %>%
dplyr::group_by(., Ens, B2, TTO) %>%
lm_table(AF~SDT) %>% dplyr::select(., Ens, B2, TTO, b1) %>%
ggplot(., aes(x=TTO, y=b1))+
geom_point()+
facet_wrap(~Ens, scales="free_x" )+
theme_bw()+
theme(axis.text = element_text(angle = 90))+
labs(x = "Tratamiento" , y = "Growth rate (g por semana)")
AF.tasa.E1 <- tasa %>%
filter(., Ens=="Ensayo 1") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(AF~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
mutate(., TTO1=TTO) %>%
separate(., col=TTO1, into=c("N","P"), sep="/")
AF.tasa.E1
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P 1.85366694 0.5549877369 0.0N 0.0P
## 2 2 0.0N/0.0P 6.02559336 0.9351577039 0.0N 0.0P
## 3 3 0.0N/0.0P 4.17690306 0.4788373392 0.0N 0.0P
## 4 1 0.0N/0.25P 1.14944778 0.0934432390 0.0N 0.25P
## 5 2 0.0N/0.25P 6.22367768 0.6521640583 0.0N 0.25P
## 6 3 0.0N/0.25P 4.43193472 0.4216091903 0.0N 0.25P
## 7 1 0.0N/0.5P 1.43429667 0.5643572420 0.0N 0.5P
## 8 2 0.0N/0.5P 1.67948139 0.2538768601 0.0N 0.5P
## 9 3 0.0N/0.5P 3.63773528 0.3929549751 0.0N 0.5P
## 10 1 1.1N/0.0P 2.91560278 0.5836414728 1.1N 0.0P
## 11 2 1.1N/0.0P 4.70006492 0.7165443349 1.1N 0.0P
## 12 3 1.1N/0.0P 9.79480714 0.7883476053 1.1N 0.0P
## 13 1 1.1N/0.25P 6.69742989 0.7228355477 1.1N 0.25P
## 14 2 1.1N/0.25P 4.21128472 0.6004975951 1.1N 0.25P
## 15 3 1.1N/0.25P 11.17894250 0.9372334014 1.1N 0.25P
## 16 1 1.1N/0.5P 3.75264417 0.6070292726 1.1N 0.5P
## 17 2 1.1N/0.5P 6.94737119 0.8035912986 1.1N 0.5P
## 18 3 1.1N/0.5P 7.12386786 0.8692011092 1.1N 0.5P
## 19 1 1.8N/0.0P 2.01914306 0.3924003579 1.8N 0.0P
## 20 2 1.8N/0.0P 6.85382750 0.8921656799 1.8N 0.0P
## 21 3 1.8N/0.0P 9.60157933 0.6338175229 1.8N 0.0P
## 22 1 1.8N/0.25P 1.42907942 0.0866096778 1.8N 0.25P
## 23 2 1.8N/0.25P 7.33051916 0.8051977685 1.8N 0.25P
## 24 3 1.8N/0.25P 2.24118661 0.5887462885 1.8N 0.25P
## 25 1 1.8N/0.5P 3.55038694 0.9259844766 1.8N 0.5P
## 26 2 1.8N/0.5P 4.46404250 0.9195324727 1.8N 0.5P
## 27 3 1.8N/0.5P 7.98613275 0.7300031485 1.8N 0.5P
## 28 1 2.5N/0.0P 0.30747456 0.0651548819 2.5N 0.0P
## 29 2 2.5N/0.0P 0.34868639 0.0160722723 2.5N 0.0P
## 30 3 2.5N/0.0P 2.20162850 0.2139249308 2.5N 0.0P
## 31 1 2.5N/0.25P 0.08132611 0.0002563398 2.5N 0.25P
## 32 2 2.5N/0.25P 2.51969757 0.9801374834 2.5N 0.25P
## 33 3 2.5N/0.25P 8.96187269 0.9772271442 2.5N 0.25P
## 34 1 2.5N/0.5P 0.87837744 0.3942760987 2.5N 0.5P
## 35 2 2.5N/0.5P 3.36897972 0.6377322740 2.5N 0.5P
## 36 3 2.5N/0.5P 6.02254797 0.8524673286 2.5N 0.5P
library(car)
AF.AOV.E1=aov(b1~N*P, data = AF.tasa.E1)
summary(AF.AOV.E1)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 3 72.40 24.135 2.880 0.0569 .
## P 2 1.76 0.882 0.105 0.9005
## N:P 6 33.05 5.509 0.657 0.6842
## Residuals 24 201.12 8.380
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(AF.AOV.E1)
res_estan=Residuales/sd(Residuales)
hist(AF.tasa.E1$b1)
ks.test(res_estan, "pnorm")
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.12192, p-value = 0.6148
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.9719, p-value = 0.4796
bartlett.test(res_estan~AF.tasa.E1$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by AF.tasa.E1$N
## Bartlett's K-squared = 1.8035, df = 3, p-value = 0.6142
bartlett.test(res_estan~AF.tasa.E1$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by AF.tasa.E1$P
## Bartlett's K-squared = 2.9287, df = 2, p-value = 0.2312
# Ensayo 2
AF.tasa.E2 <- tasa %>%
filter(., Ens=="Ensayo 2") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(AF~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
mutate(., TTO1=TTO) %>%
separate(., col=TTO1, into=c("N","P"), sep="/")
AF.tasa.E2
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P 5.364906 0.5759293 0.0N 0.0P
## 2 2 0.0N/0.0P 10.836096 0.7954327 0.0N 0.0P
## 3 3 0.0N/0.0P 10.323614 0.7622405 0.0N 0.0P
## 4 1 0.0N/0.25P 3.427498 0.5218335 0.0N 0.25P
## 5 2 0.0N/0.25P 10.775283 0.4651304 0.0N 0.25P
## 6 3 0.0N/0.25P 18.393868 0.6436960 0.0N 0.25P
## 7 1 0.0N/0.5P 2.339322 0.2274092 0.0N 0.5P
## 8 2 0.0N/0.5P 7.814618 0.8035953 0.0N 0.5P
## 9 3 0.0N/0.5P 6.283919 0.5185098 0.0N 0.5P
## 10 1 1.1N/0.0P 3.238677 0.2328672 1.1N 0.0P
## 11 2 1.1N/0.0P 7.322146 0.4810360 1.1N 0.0P
## 12 3 1.1N/0.0P 13.799084 0.6645060 1.1N 0.0P
## 13 1 1.1N/0.25P 7.184296 0.4570721 1.1N 0.25P
## 14 2 1.1N/0.25P 14.967865 0.5044575 1.1N 0.25P
## 15 3 1.1N/0.25P 20.065929 0.8252324 1.1N 0.25P
## 16 1 1.1N/0.5P 30.234876 0.7661495 1.1N 0.5P
## 17 2 1.1N/0.5P 27.559034 0.8036919 1.1N 0.5P
## 18 3 1.1N/0.5P 26.885453 0.8570724 1.1N 0.5P
## 19 1 1.8N/0.0P 5.602786 0.8188450 1.8N 0.0P
## 20 2 1.8N/0.0P 5.072382 0.8103356 1.8N 0.0P
## 21 3 1.8N/0.0P 10.453232 0.7861522 1.8N 0.0P
## 22 1 1.8N/0.25P 17.630261 0.7691996 1.8N 0.25P
## 23 2 1.8N/0.25P 17.184086 0.7824289 1.8N 0.25P
## 24 3 1.8N/0.25P 20.661330 0.7915682 1.8N 0.25P
## 25 1 1.8N/0.5P 4.347204 0.4892661 1.8N 0.5P
## 26 2 1.8N/0.5P 10.314120 0.8286625 1.8N 0.5P
## 27 3 1.8N/0.5P 21.254354 0.8410228 1.8N 0.5P
library(car)
AF.AOV.E2=aov(b1~N*P, data = AF.tasa.E2)
summary(AF.AOV.E2)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 318.4 159.20 6.160 0.00916 **
## P 2 284.0 142.02 5.495 0.01371 *
## N:P 4 598.3 149.57 5.788 0.00355 **
## Residuals 18 465.2 25.84
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(AF.AOV.E2)
res_estan=Residuales/sd(Residuales)
hist(AF.tasa.E2$b1)
ks.test(res_estan, "pnorm")
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.10525, p-value = 0.8955
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.97173, p-value = 0.6479
bartlett.test(res_estan~AF.tasa.E2$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by AF.tasa.E2$N
## Bartlett's K-squared = 0.064147, df = 2, p-value = 0.9684
bartlett.test(res_estan~AF.tasa.E2$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by AF.tasa.E2$P
## Bartlett's K-squared = 1.1908, df = 2, p-value = 0.5513
AF.AOV.E2.1=aov(b1~TTO, data = AF.tasa.E2)
summary(AF.AOV.E2.1)
## Df Sum Sq Mean Sq F value Pr(>F)
## TTO 8 1200.7 150.09 5.808 0.000956 ***
## Residuals 18 465.2 25.84
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
HSD.test(AF.AOV.E2.1, "TTO", console=T, alpha=0.06)
##
## Study: AF.AOV.E2.1 ~ "TTO"
##
## HSD Test for b1
##
## Mean Square Error: 25.84383
##
## TTO, means
##
## b1 std r se Min Max Q25
## 0.0N/0.0P 8.841539 3.021736 3 2.935066 5.364906 10.836096 7.844260
## 0.0N/0.25P 10.865549 7.483594 3 2.935066 3.427498 18.393868 7.101390
## 0.0N/0.5P 5.479286 2.824941 3 2.935066 2.339322 7.814618 4.311621
## 1.1N/0.0P 8.119969 5.325218 3 2.935066 3.238677 13.799084 5.280411
## 1.1N/0.25P 14.072697 6.487304 3 2.935066 7.184296 20.065929 11.076081
## 1.1N/0.5P 28.226454 1.771651 3 2.935066 26.885453 30.234876 27.222243
## 1.8N/0.0P 7.042800 2.965403 3 2.935066 5.072382 10.453232 5.337584
## 1.8N/0.25P 18.491892 1.891986 3 2.935066 17.184086 20.661330 17.407174
## 1.8N/0.5P 11.971893 8.574619 3 2.935066 4.347204 21.254354 7.330662
## Q50 Q75
## 0.0N/0.0P 10.323614 10.579855
## 0.0N/0.25P 10.775283 14.584575
## 0.0N/0.5P 6.283919 7.049268
## 1.1N/0.0P 7.322146 10.560615
## 1.1N/0.25P 14.967865 17.516897
## 1.1N/0.5P 27.559034 28.896955
## 1.8N/0.0P 5.602786 8.028009
## 1.8N/0.25P 17.630261 19.145795
## 1.8N/0.5P 10.314120 15.784237
##
## Alpha: 0.06 ; DF Error: 18
## Critical Value of Studentized Range: 4.822424
##
## Minimun Significant Difference: 14.15413
##
## Treatments with the same letter are not significantly different.
##
## b1 groups
## 1.1N/0.5P 28.226454 a
## 1.8N/0.25P 18.491892 ab
## 1.1N/0.25P 14.072697 b
## 1.8N/0.5P 11.971893 b
## 0.0N/0.25P 10.865549 b
## 0.0N/0.0P 8.841539 b
## 1.1N/0.0P 8.119969 b
## 1.8N/0.0P 7.042800 b
## 0.0N/0.5P 5.479286 b
library(agricolae)
GRAF.AF.E2.TTO <- AF.tasa.E2 %>%
dplyr::select(., -B2) %>%
dplyr::group_by(., TTO) %>%
dplyr::summarise( n=n(), media=mean(b1), sd=sd(b1),
R2=mean(Rsqr)) %>%
dplyr::mutate( se=sd/sqrt(n)) %>%
dplyr::mutate( ic=se * qt((1-0.05)/2 + .5, n-1)) %>%
dplyr::mutate(., Tukey=c("b","b","b","b","ab","a","b","ab","ab"))
GRAF.AF.E2.TTO
## # A tibble: 9 × 8
## TTO n media sd R2 se ic Tukey
## <chr> <int> <dbl> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 0.0N/0.0P 3 8.84 3.02 0.711 1.74 7.51 b
## 2 0.0N/0.25P 3 10.9 7.48 0.544 4.32 18.6 b
## 3 0.0N/0.5P 3 5.48 2.82 0.517 1.63 7.02 b
## 4 1.1N/0.0P 3 8.12 5.33 0.459 3.07 13.2 b
## 5 1.1N/0.25P 3 14.1 6.49 0.596 3.75 16.1 ab
## 6 1.1N/0.5P 3 28.2 1.77 0.809 1.02 4.40 a
## 7 1.8N/0.0P 3 7.04 2.97 0.805 1.71 7.37 b
## 8 1.8N/0.25P 3 18.5 1.89 0.781 1.09 4.70 ab
## 9 1.8N/0.5P 3 12.0 8.57 0.720 4.95 21.3 ab
ggplot(GRAF.AF.E2.TTO, aes(x=TTO, y=media))+
geom_point(size=2, position=position_dodge(.3))+
geom_errorbar(ymin=GRAF.AF.E2.TTO$media-GRAF.AF.E2.TTO$se,
ymax=GRAF.AF.E2.TTO$media+GRAF.AF.E2.TTO$se,
alpha=0.7, width=.3, color="darkgray",
position=position_dodge(.3) )+
geom_text(aes(label=Tukey, x=TTO, y=media+se+3.1),
color="red", size=4)+
geom_text(aes(label=round(R2, digits=2), x=TTO, y=media-se-3.1),
color="blue", size=3)+
labs(x = "Tratamiento",y = "Tasa crecimiento AF (mm"^{2}~"/sem)")+
theme_classic()+
theme(axis.text = element_text(angle = 90))+
ylim(0, 33)
tasa %>%
dplyr::group_by(., Ens, B2, TTO) %>%
lm_table(NH~SDT) %>% dplyr::select(., Ens, B2, TTO, b1) %>%
ggplot(., aes(x=TTO, y=b1))+
geom_point()+
facet_wrap(~Ens, scales="free_x" )+
theme_bw()+
theme(axis.text = element_text(angle = 90))+
labs(x = "Tratamiento" , y = "Growth rate (g por semana)")
NH.tasa.E1 <- tasa %>%
filter(., Ens=="Ensayo 1") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(NH~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
mutate(., TTO1=TTO) %>%
separate(., col=TTO1, into=c("N","P"), sep="/")
NH.tasa.E1
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P 1.6944444 0.232863282 0.0N 0.0P
## 2 2 0.0N/0.0P 6.6111111 0.855064005 0.0N 0.0P
## 3 3 0.0N/0.0P 2.7500000 0.160516001 0.0N 0.0P
## 4 1 0.0N/0.25P 0.1666667 0.008200456 0.0N 0.25P
## 5 2 0.0N/0.25P 4.9166667 0.670033934 0.0N 0.25P
## 6 3 0.0N/0.25P 1.4444444 0.047552054 0.0N 0.25P
## 7 1 0.0N/0.5P 0.7777778 0.262734584 0.0N 0.5P
## 8 2 0.0N/0.5P 1.8333333 0.249923501 0.0N 0.5P
## 9 3 0.0N/0.5P 2.4611111 0.191851133 0.0N 0.5P
## 10 1 1.1N/0.0P 1.9166667 0.586089454 1.1N 0.0P
## 11 2 1.1N/0.0P 3.0555556 0.295790486 1.1N 0.0P
## 12 3 1.1N/0.0P 6.0833333 0.455153106 1.1N 0.0P
## 13 1 1.1N/0.25P 3.7777778 0.584035029 1.1N 0.25P
## 14 2 1.1N/0.25P 0.7222222 0.027973957 1.1N 0.25P
## 15 3 1.1N/0.25P 7.0277778 0.360436743 1.1N 0.25P
## 16 1 1.1N/0.5P 1.0277778 0.053503035 1.1N 0.5P
## 17 2 1.1N/0.5P 6.7500000 0.933846786 1.1N 0.5P
## 18 3 1.1N/0.5P 4.2500000 0.552576914 1.1N 0.5P
## 19 1 1.8N/0.0P 2.0555556 0.404690348 1.8N 0.0P
## 20 2 1.8N/0.0P 3.8611111 0.290815405 1.8N 0.0P
## 21 3 1.8N/0.0P 6.3333333 0.352220576 1.8N 0.0P
## 22 1 1.8N/0.25P 0.7777778 0.016432614 1.8N 0.25P
## 23 2 1.8N/0.25P 5.9166667 0.596557588 1.8N 0.25P
## 24 3 1.8N/0.25P 0.4444444 0.065206317 1.8N 0.25P
## 25 1 1.8N/0.5P 3.8888889 0.705899301 1.8N 0.5P
## 26 2 1.8N/0.5P 2.9722222 0.895012508 1.8N 0.5P
## 27 3 1.8N/0.5P 7.5777778 0.907079673 1.8N 0.5P
## 28 1 2.5N/0.0P 0.8333333 0.099874232 2.5N 0.0P
## 29 2 2.5N/0.0P 0.6388889 0.014220430 2.5N 0.0P
## 30 3 2.5N/0.0P -0.5416667 0.010245046 2.5N 0.0P
## 31 1 2.5N/0.25P -0.2500000 0.002057334 2.5N 0.25P
## 32 2 2.5N/0.25P 2.1666667 0.455116697 2.5N 0.25P
## 33 3 2.5N/0.25P 6.3388889 0.584894943 2.5N 0.25P
## 34 1 2.5N/0.5P 0.5277778 0.069104135 2.5N 0.5P
## 35 2 2.5N/0.5P 0.7777778 0.111691519 2.5N 0.5P
## 36 3 2.5N/0.5P 6.5555556 0.621681153 2.5N 0.5P
library(car)
NH.AOV.E1=aov(b1~N*P, data = NH.tasa.E1)
summary(NH.AOV.E1)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 3 24.72 8.240 1.243 0.316
## P 2 1.55 0.773 0.117 0.890
## N:P 6 25.77 4.296 0.648 0.691
## Residuals 24 159.07 6.628
Residuales=residuals(NH.AOV.E1)
res_estan=Residuales/sd(Residuales)
hist(NH.tasa.E1$b1)
ks.test(res_estan, "pnorm")
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.13593, p-value = 0.4778
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.91518, p-value = 0.009111
bartlett.test(res_estan~NH.tasa.E1$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by NH.tasa.E1$N
## Bartlett's K-squared = 0.68507, df = 3, p-value = 0.8767
bartlett.test(res_estan~NH.tasa.E1$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by NH.tasa.E1$P
## Bartlett's K-squared = 1.6242, df = 2, p-value = 0.4439
NH.tasa.E2 <- tasa %>%
filter(., Ens=="Ensayo 2") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(NH~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
mutate(., TTO1=TTO) %>%
separate(., col=TTO1, into=c("N","P"), sep="/")
NH.tasa.E2
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P 2.7349727 0.25412283 0.0N 0.0P
## 2 2 0.0N/0.0P 8.1092896 0.53438759 0.0N 0.0P
## 3 3 0.0N/0.0P 5.6516393 0.48561347 0.0N 0.0P
## 4 1 0.0N/0.25P 3.2295082 0.41897472 0.0N 0.25P
## 5 2 0.0N/0.25P 9.7144809 0.65112786 0.0N 0.25P
## 6 3 0.0N/0.25P 14.0076503 0.44267313 0.0N 0.25P
## 7 1 0.0N/0.5P 0.6844262 0.03543062 0.0N 0.5P
## 8 2 0.0N/0.5P 5.8142077 0.69124078 0.0N 0.5P
## 9 3 0.0N/0.5P 2.9685792 0.14721179 0.0N 0.5P
## 10 1 1.1N/0.0P 4.5691257 0.53274107 1.1N 0.0P
## 11 2 1.1N/0.0P 5.9685792 0.63079688 1.1N 0.0P
## 12 3 1.1N/0.0P 10.2377049 0.60594211 1.1N 0.0P
## 13 1 1.1N/0.25P 6.7950820 0.35380605 1.1N 0.25P
## 14 2 1.1N/0.25P 10.3811475 0.66634856 1.1N 0.25P
## 15 3 1.1N/0.25P 16.0232240 0.82128068 1.1N 0.25P
## 16 1 1.1N/0.5P 21.7486339 0.77939049 1.1N 0.5P
## 17 2 1.1N/0.5P 21.3688525 0.85186553 1.1N 0.5P
## 18 3 1.1N/0.5P 22.5047814 0.71646633 1.1N 0.5P
## 19 1 1.8N/0.0P 5.6926230 0.72302974 1.8N 0.0P
## 20 2 1.8N/0.0P 6.0232240 0.72038180 1.8N 0.0P
## 21 3 1.8N/0.0P 7.1072404 0.69885152 1.8N 0.0P
## 22 1 1.8N/0.25P 12.4180328 0.71188420 1.8N 0.25P
## 23 2 1.8N/0.25P 5.8540984 0.33295138 1.8N 0.25P
## 24 3 1.8N/0.25P 14.5382514 0.84655170 1.8N 0.25P
## 25 1 1.8N/0.5P 5.2390710 0.56483228 1.8N 0.5P
## 26 2 1.8N/0.5P 9.1803279 0.72042932 1.8N 0.5P
## 27 3 1.8N/0.5P 19.4275956 0.91612821 1.8N 0.5P
library(car)
NH.AOV.E2=aov(b1~N*P, data = NH.tasa.E2)
summary(NH.AOV.E2)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 247.1 123.54 7.479 0.00432 **
## P 2 163.2 81.60 4.940 0.01949 *
## N:P 4 292.8 73.19 4.431 0.01144 *
## Residuals 18 297.3 16.52
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(NH.AOV.E2)
res_estan=Residuales/sd(Residuales)
hist(NH.tasa.E2$b1)
ks.test(res_estan, "pnorm")
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.10643, p-value = 0.8881
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.97723, p-value = 0.7952
bartlett.test(res_estan~NH.tasa.E2$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by NH.tasa.E2$N
## Bartlett's K-squared = 1.5467, df = 2, p-value = 0.4615
bartlett.test(res_estan~NH.tasa.E2$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by NH.tasa.E2$P
## Bartlett's K-squared = 4.023, df = 2, p-value = 0.1338
NH.AOV.E2.1=aov(b1~TTO, data = NH.tasa.E2)
summary(NH.AOV.E2.1)
## Df Sum Sq Mean Sq F value Pr(>F)
## TTO 8 703.0 87.88 5.32 0.00157 **
## Residuals 18 297.3 16.52
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
library(agricolae)
HSD.test(NH.AOV.E2.1, "TTO", console=T, alpha=0.05)
##
## Study: NH.AOV.E2.1 ~ "TTO"
##
## HSD Test for b1
##
## Mean Square Error: 16.51896
##
## TTO, means
##
## b1 std r se Min Max Q25
## 0.0N/0.0P 5.498634 2.6904235 3 2.346555 2.7349727 8.109290 4.193306
## 0.0N/0.25P 8.983880 5.4260870 3 2.346555 3.2295082 14.007650 6.471995
## 0.0N/0.5P 3.155738 2.5700069 3 2.346555 0.6844262 5.814208 1.826503
## 1.1N/0.0P 6.925137 2.9528713 3 2.346555 4.5691257 10.237705 5.268852
## 1.1N/0.25P 11.066485 4.6520873 3 2.346555 6.7950820 16.023224 8.588115
## 1.1N/0.5P 21.874089 0.5782629 3 2.346555 21.3688525 22.504781 21.558743
## 1.8N/0.0P 6.274362 0.7399923 3 2.346555 5.6926230 7.107240 5.857923
## 1.8N/0.25P 10.936794 4.5276019 3 2.346555 5.8540984 14.538251 9.136066
## 1.8N/0.5P 11.282332 7.3240953 3 2.346555 5.2390710 19.427596 7.209699
## Q50 Q75
## 0.0N/0.0P 5.651639 6.880464
## 0.0N/0.25P 9.714481 11.861066
## 0.0N/0.5P 2.968579 4.391393
## 1.1N/0.0P 5.968579 8.103142
## 1.1N/0.25P 10.381148 13.202186
## 1.1N/0.5P 21.748634 22.126708
## 1.8N/0.0P 6.023224 6.565232
## 1.8N/0.25P 12.418033 13.478142
## 1.8N/0.5P 9.180328 14.303962
##
## Alpha: 0.05 ; DF Error: 18
## Critical Value of Studentized Range: 4.955209
##
## Minimun Significant Difference: 11.62767
##
## Treatments with the same letter are not significantly different.
##
## b1 groups
## 1.1N/0.5P 21.874089 a
## 1.8N/0.5P 11.282332 ab
## 1.1N/0.25P 11.066485 ab
## 1.8N/0.25P 10.936794 ab
## 0.0N/0.25P 8.983880 b
## 1.1N/0.0P 6.925137 b
## 1.8N/0.0P 6.274362 b
## 0.0N/0.0P 5.498634 b
## 0.0N/0.5P 3.155738 b
GRAF.NH.E2.TTO <- NH.tasa.E2 %>%
dplyr::select(., -B2) %>%
dplyr::group_by(., TTO) %>%
dplyr::summarise( n=n(), media=mean(b1), sd=sd(b1),
R2=mean(Rsqr)) %>%
dplyr::mutate( se=sd/sqrt(n)) %>%
dplyr::mutate( ic=se * qt((1-0.05)/2 + .5, n-1)) %>%
dplyr::mutate(., Tukey=c("b","b","b","b","ab","a","b","ab","ab"))
GRAF.NH.E2.TTO
## # A tibble: 9 × 8
## TTO n media sd R2 se ic Tukey
## <chr> <int> <dbl> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 0.0N/0.0P 3 5.50 2.69 0.425 1.55 6.68 b
## 2 0.0N/0.25P 3 8.98 5.43 0.504 3.13 13.5 b
## 3 0.0N/0.5P 3 3.16 2.57 0.291 1.48 6.38 b
## 4 1.1N/0.0P 3 6.93 2.95 0.590 1.70 7.34 b
## 5 1.1N/0.25P 3 11.1 4.65 0.614 2.69 11.6 ab
## 6 1.1N/0.5P 3 21.9 0.578 0.783 0.334 1.44 a
## 7 1.8N/0.0P 3 6.27 0.740 0.714 0.427 1.84 b
## 8 1.8N/0.25P 3 10.9 4.53 0.630 2.61 11.2 ab
## 9 1.8N/0.5P 3 11.3 7.32 0.734 4.23 18.2 ab
ggplot(GRAF.NH.E2.TTO, aes(x=TTO, y=media))+
geom_point(size=2, position=position_dodge(.3))+
geom_errorbar(ymin=GRAF.NH.E2.TTO$media-GRAF.NH.E2.TTO$se,
ymax=GRAF.NH.E2.TTO$media+GRAF.NH.E2.TTO$se,
alpha=0.7, width=.3, color="darkgray",
position=position_dodge(.3) )+
geom_text(aes(label=Tukey, x=TTO, y=media+se+2),
color="red", size=4)+
geom_text(aes(label=round(R2, digits=2), x=TTO, y=media-se-2),
color="blue", size=3)+
labs(x = "Tratamiento",y = "Tasa emisión hojas \n(por semana)")+
theme_classic()+
theme(axis.text = element_text(angle = 90))+
ylim(0, 25)
tasa %>%
dplyr::group_by(., Ens, B2, TTO) %>%
lm_table(BR~SDT) %>% dplyr::select(., Ens, B2, TTO, b1) %>%
ggplot(., aes(x=TTO, y=b1))+
geom_point()+
facet_wrap(~Ens, scales="free_x" )+
theme_bw()+
theme(axis.text = element_text(angle = 90))+
labs(x = "Tratamiento" , y = "Growth rate (g por semana)")
BR.tasa.E1 <- tasa %>%
filter(., Ens=="Ensayo 1") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(BR~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
mutate(., TTO1=TTO) %>%
separate(., col=TTO1, into=c("N","P"), sep="/")
BR.tasa.E1
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P -2.777778e-02 7.500000e-02 0.0N 0.0P
## 2 2 0.0N/0.0P 1.666667e-01 8.307692e-01 0.0N 0.0P
## 3 3 0.0N/0.0P -2.777778e-02 1.250000e-01 0.0N 0.0P
## 4 1 0.0N/0.25P -1.666667e-01 2.142857e-01 0.0N 0.25P
## 5 2 0.0N/0.25P -2.777778e-02 1.685393e-02 0.0N 0.25P
## 6 3 0.0N/0.25P -1.388889e-01 6.578947e-01 0.0N 0.25P
## 7 1 0.0N/0.5P -8.333333e-02 2.368421e-01 0.0N 0.5P
## 8 2 0.0N/0.5P 2.777778e-02 3.658537e-02 0.0N 0.5P
## 9 3 0.0N/0.5P -2.222222e-01 9.230769e-01 0.0N 0.5P
## 10 1 1.1N/0.0P -2.777778e-02 1.851852e-02 1.1N 0.0P
## 11 2 1.1N/0.0P -2.777778e-02 2.830189e-02 1.1N 0.0P
## 12 3 1.1N/0.0P 5.555556e-02 1.162791e-02 1.1N 0.0P
## 13 1 1.1N/0.25P 5.555556e-02 3.846154e-02 1.1N 0.25P
## 14 2 1.1N/0.25P -2.222222e-01 7.007299e-01 1.1N 0.25P
## 15 3 1.1N/0.25P -2.777778e-02 5.208333e-03 1.1N 0.25P
## 16 1 1.1N/0.5P 1.111111e-01 1.276596e-01 1.1N 0.5P
## 17 2 1.1N/0.5P 1.666667e-01 6.666667e-01 1.1N 0.5P
## 18 3 1.1N/0.5P -8.635068e-17 1.089874e-31 1.1N 0.5P
## 19 1 1.8N/0.0P 2.777778e-02 1.612903e-02 1.8N 0.0P
## 20 2 1.8N/0.0P 2.777778e-02 2.027027e-03 1.8N 0.0P
## 21 3 1.8N/0.0P -5.555556e-02 1.336303e-02 1.8N 0.0P
## 22 1 1.8N/0.25P -5.555556e-02 1.875000e-01 1.8N 0.25P
## 23 2 1.8N/0.25P -2.777778e-02 2.173913e-02 1.8N 0.25P
## 24 3 1.8N/0.25P 2.777778e-01 6.666667e-01 1.8N 0.25P
## 25 1 1.8N/0.5P 1.944444e-01 7.000000e-01 1.8N 0.5P
## 26 2 1.8N/0.5P 2.777778e-02 7.500000e-02 1.8N 0.5P
## 27 3 1.8N/0.5P 1.666667e-01 3.068182e-01 1.8N 0.5P
## 28 1 2.5N/0.0P -5.551115e-17 9.102241e-32 2.5N 0.0P
## 29 2 2.5N/0.0P -5.555556e-02 1.463415e-01 2.5N 0.0P
## 30 3 2.5N/0.0P -1.666667e-01 1.921708e-01 2.5N 0.0P
## 31 1 2.5N/0.25P 2.777778e-02 2.631579e-02 2.5N 0.25P
## 32 2 2.5N/0.25P 2.777778e-02 3.125000e-02 2.5N 0.25P
## 33 3 2.5N/0.25P 2.777778e-02 3.333333e-02 2.5N 0.25P
## 34 1 2.5N/0.5P -4.317534e-17 1.792866e-31 2.5N 0.5P
## 35 2 2.5N/0.5P -2.777778e-02 8.823529e-02 2.5N 0.5P
## 36 3 2.5N/0.5P -8.333333e-02 1.406250e-01 2.5N 0.5P
library(car)
BR.AOV.E1=aov(b1~N*P, data = BR.tasa.E1)
summary(BR.AOV.E1)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 3 0.07311 0.024370 2.499 0.0838 .
## P 2 0.01247 0.006237 0.640 0.5363
## N:P 6 0.10532 0.017554 1.800 0.1417
## Residuals 24 0.23405 0.009752
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(BR.AOV.E1)
res_estan=Residuales/sd(Residuales)
hist(BR.tasa.E1$b1)
ks.test(res_estan, "pnorm")
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.083333, p-value = 0.9639
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.98446, p-value = 0.8826
bartlett.test(res_estan~BR.tasa.E1$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by BR.tasa.E1$N
## Bartlett's K-squared = 4.5516, df = 3, p-value = 0.2077
bartlett.test(res_estan~BR.tasa.E1$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by BR.tasa.E1$P
## Bartlett's K-squared = 2.2832, df = 2, p-value = 0.3193
BR.tasa.E2 <- tasa %>%
filter(., Ens=="Ensayo 2") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(BR~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
mutate(., TTO1=TTO) %>%
separate(., col=TTO1, into=c("N","P"), sep="/")
BR.tasa.E2
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P 0.04234973 0.012583146 0.0N 0.0P
## 2 2 0.0N/0.0P 0.12295082 0.047902917 0.0N 0.0P
## 3 3 0.0N/0.0P -0.05054645 0.009020347 0.0N 0.0P
## 4 1 0.0N/0.25P 0.19535519 0.057678856 0.0N 0.25P
## 5 2 0.0N/0.25P 0.31693989 0.111691222 0.0N 0.25P
## 6 3 0.0N/0.25P 0.39344262 0.129795504 0.0N 0.25P
## 7 1 0.0N/0.5P -0.21448087 0.325872554 0.0N 0.5P
## 8 2 0.0N/0.5P -0.16803279 0.208768008 0.0N 0.5P
## 9 3 0.0N/0.5P 0.01366120 0.010245902 0.0N 0.5P
## 10 1 1.1N/0.0P 0.22677596 0.150981853 1.1N 0.0P
## 11 2 1.1N/0.0P -0.22814208 0.296880988 1.1N 0.0P
## 12 3 1.1N/0.0P 0.04371585 0.008564737 1.1N 0.0P
## 13 1 1.1N/0.25P 0.04508197 0.005834137 1.1N 0.25P
## 14 2 1.1N/0.25P 0.26229508 0.229957332 1.1N 0.25P
## 15 3 1.1N/0.25P 0.27595628 0.093791087 1.1N 0.25P
## 16 1 1.1N/0.5P 0.66256831 0.371784375 1.1N 0.5P
## 17 2 1.1N/0.5P 0.51639344 0.650655738 1.1N 0.5P
## 18 3 1.1N/0.5P -0.21721311 0.138702350 1.1N 0.5P
## 19 1 1.8N/0.0P 0.09562842 0.038619168 1.8N 0.0P
## 20 2 1.8N/0.0P 0.13114754 0.155434123 1.8N 0.0P
## 21 3 1.8N/0.0P 0.37295082 0.188547359 1.8N 0.0P
## 22 1 1.8N/0.25P 0.02595628 0.002390159 1.8N 0.25P
## 23 2 1.8N/0.25P -0.35519126 0.310593251 1.8N 0.25P
## 24 3 1.8N/0.25P 0.25000000 0.125000000 1.8N 0.25P
## 25 1 1.8N/0.5P 0.04918033 0.029508197 1.8N 0.5P
## 26 2 1.8N/0.5P 0.43032787 0.389520916 1.8N 0.5P
## 27 3 1.8N/0.5P 0.63114754 0.506232923 1.8N 0.5P
library(car)
BR.AOV.E2=aov(b1~N*P, data = BR.tasa.E2)
summary(BR.AOV.E2)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 0.0680 0.03402 0.581 0.5697
## P 2 0.0522 0.02609 0.445 0.6475
## N:P 4 0.6037 0.15093 2.576 0.0727 .
## Residuals 18 1.0545 0.05858
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(BR.AOV.E2)
res_estan=Residuales/sd(Residuales)
hist(BR.tasa.E2$b1)
ks.test(res_estan, "pnorm")
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.11318, p-value = 0.8417
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.96388, p-value = 0.4508
bartlett.test(res_estan~BR.tasa.E2$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by BR.tasa.E2$N
## Bartlett's K-squared = 7.9429, df = 2, p-value = 0.01885
bartlett.test(res_estan~BR.tasa.E2$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by BR.tasa.E2$P
## Bartlett's K-squared = 3.9413, df = 2, p-value = 0.1394
library(MASS)
#boxcox(BR.AOV.E2, lambda = c(-2:2, 0.1)) # no deja correrlo porque hay pendientes negativas
#hist(BR.tasa.E2$BR)
BR.AOV2.E2=aov( (b1^0.5)~N*P, data = BR.tasa.E2)
summary(BR.AOV2.E2)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 0.0402 0.02010 0.576 0.577
## P 2 0.1093 0.05467 1.566 0.249
## N:P 4 0.3380 0.08449 2.421 0.106
## Residuals 12 0.4189 0.03491
## 6 observations deleted due to missingness
Residuales=residuals(BR.AOV2.E2)
res_estan=Residuales/sd(Residuales)
ks.test(res_estan, "pnorm")
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.12019, p-value = 0.887
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.97324, p-value = 0.8033
#bartlett.test(res_estan~BR.tasa.E2$N)
#bartlett.test(res_estan~BR.tasa.E2$P)
#### NO PARAMETRICO
## Grafico exploratorio
tasa %>%
dplyr::group_by(., Ens, B2, TTO) %>%
lm_table(R1~SDT) %>% dplyr::select(., Ens, B2, TTO, b1) %>%
ggplot(., aes(x=TTO, y=b1))+
geom_point()+
facet_wrap(~Ens, scales="free_x" )+
theme_bw()+
theme(axis.text = element_text(angle = 90))+
labs(x = "Tratamiento" , y = "Growth rate (g por semana)")
## Estimacion de los modelos lineales y su pendiente ensayo 1
R1.tasa.E1 <- tasa %>%
filter(., Ens=="Ensayo 1") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(R1~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
dplyr::mutate(., TTO1=TTO) %>%
separate(., col=TTO1, into=c("N","P"), sep="/")
R1.tasa.E1
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P -2.777778e-02 3.000000e-01 0.0N 0.0P
## 2 2 0.0N/0.0P -5.555556e-02 7.500000e-01 0.0N 0.0P
## 3 3 0.0N/0.0P -2.777778e-02 1.666667e-01 0.0N 0.0P
## 4 1 0.0N/0.25P -5.555556e-02 7.500000e-01 0.0N 0.25P
## 5 2 0.0N/0.25P 2.777778e-02 3.000000e-01 0.0N 0.25P
## 6 3 0.0N/0.25P -2.777778e-02 3.000000e-01 0.0N 0.25P
## 7 1 0.0N/0.5P -1.510822e-17 4.814815e-01 0.0N 0.5P
## 8 2 0.0N/0.5P -2.777778e-02 3.000000e-01 0.0N 0.5P
## 9 3 0.0N/0.5P -5.555556e-02 7.500000e-01 0.0N 0.5P
## 10 1 1.1N/0.0P -2.777778e-02 3.000000e-01 1.1N 0.0P
## 11 2 1.1N/0.0P -2.777778e-02 3.000000e-01 1.1N 0.0P
## 12 3 1.1N/0.0P -1.850372e-17 1.972152e-31 1.1N 0.0P
## 13 1 1.1N/0.25P -2.777778e-02 3.000000e-01 1.1N 0.25P
## 14 2 1.1N/0.25P -5.555556e-02 7.500000e-01 1.1N 0.25P
## 15 3 1.1N/0.25P -2.777778e-02 1.875000e-01 1.1N 0.25P
## 16 1 1.1N/0.5P -2.777778e-02 7.142857e-02 1.1N 0.5P
## 17 2 1.1N/0.5P -2.777778e-02 3.000000e-01 1.1N 0.5P
## 18 3 1.1N/0.5P -2.777778e-02 1.875000e-01 1.1N 0.5P
## 19 1 1.8N/0.0P -2.777778e-02 3.000000e-01 1.8N 0.0P
## 20 2 1.8N/0.0P -1.510822e-17 4.814815e-01 1.8N 0.0P
## 21 3 1.8N/0.0P -2.777778e-02 3.000000e-01 1.8N 0.0P
## 22 1 1.8N/0.25P -1.510822e-17 4.814815e-01 1.8N 0.25P
## 23 2 1.8N/0.25P -2.777778e-02 3.000000e-01 1.8N 0.25P
## 24 3 1.8N/0.25P -5.555556e-02 3.000000e-01 1.8N 0.25P
## 25 1 1.8N/0.5P -1.510822e-17 4.814815e-01 1.8N 0.5P
## 26 2 1.8N/0.5P -2.777778e-02 3.000000e-01 1.8N 0.5P
## 27 3 1.8N/0.5P -1.111111e-01 7.500000e-01 1.8N 0.5P
## 28 1 2.5N/0.0P -2.777778e-02 3.000000e-01 2.5N 0.0P
## 29 2 2.5N/0.0P -5.555556e-02 7.500000e-01 2.5N 0.0P
## 30 3 2.5N/0.0P -5.555556e-02 6.666667e-01 2.5N 0.0P
## 31 1 2.5N/0.25P -1.510822e-17 4.814815e-01 2.5N 0.25P
## 32 2 2.5N/0.25P -2.777778e-02 3.000000e-01 2.5N 0.25P
## 33 3 2.5N/0.25P -3.700743e-17 1.035380e-31 2.5N 0.25P
## 34 1 2.5N/0.5P -2.777778e-02 3.000000e-01 2.5N 0.5P
## 35 2 2.5N/0.5P -2.777778e-02 3.000000e-01 2.5N 0.5P
## 36 3 2.5N/0.5P 2.777778e-02 3.000000e-01 2.5N 0.5P
## Analisis de varianza entre las pendientes
library(car)
R1.AOV.E1=aov(b1~N*P, data = R1.tasa.E1)
summary(R1.AOV.E1)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 3 0.000407 0.0001357 0.176 0.912
## P 2 0.000300 0.0001500 0.194 0.825
## N:P 6 0.004672 0.0007787 1.009 0.443
## Residuals 24 0.018519 0.0007716
Residuales=residuals(R1.AOV.E1)
res_estan=Residuales/sd(Residuales)
hist(R1.tasa.E1$b1)
ks.test(res_estan, "pnorm")
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.1008, p-value = 0.8579
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.96934, p-value = 0.4077
bartlett.test(res_estan~R1.tasa.E1$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by R1.tasa.E1$N
## Bartlett's K-squared = 8.1582, df = 3, p-value = 0.04285
bartlett.test(res_estan~R1.tasa.E1$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by R1.tasa.E1$P
## Bartlett's K-squared = 6.2146, df = 2, p-value = 0.04472
## Estimacion de los modelos lineales y su pendiente ensayo 2
R1.tasa.E2 <- tasa %>%
filter(., Ens=="Ensayo 2") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(R1~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
mutate(., TTO1=TTO) %>%
separate(., col=TTO1, into=c("N","P"), sep="/")
R1.tasa.E2
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P -1.912568e-02 4.227783e-02 0.0N 0.0P
## 2 2 0.0N/0.0P 0.000000e+00 NaN 0.0N 0.0P
## 3 3 0.0N/0.0P 0.000000e+00 NaN 0.0N 0.0P
## 4 1 0.0N/0.25P 1.366120e-03 1.024590e-03 0.0N 0.25P
## 5 2 0.0N/0.25P 1.912568e-02 1.147541e-01 0.0N 0.25P
## 6 3 0.0N/0.25P -5.601093e-02 3.625971e-01 0.0N 0.25P
## 7 1 0.0N/0.5P -3.688525e-02 3.319672e-01 0.0N 0.5P
## 8 2 0.0N/0.5P 3.688525e-02 3.319672e-01 0.0N 0.5P
## 9 3 0.0N/0.5P -2.732240e-03 1.639344e-03 0.0N 0.5P
## 10 1 1.1N/0.0P 3.551913e-02 3.957845e-01 1.1N 0.0P
## 11 2 1.1N/0.0P -1.912568e-02 2.008197e-01 1.1N 0.0P
## 12 3 1.1N/0.0P -3.825137e-02 4.590164e-01 1.1N 0.0P
## 13 1 1.1N/0.25P 1.912568e-02 1.147541e-01 1.1N 0.25P
## 14 2 1.1N/0.25P -1.366120e-03 5.854801e-04 1.1N 0.25P
## 15 3 1.1N/0.25P 0.000000e+00 NaN 1.1N 0.25P
## 16 1 1.1N/0.5P -1.912568e-02 2.008197e-01 1.1N 0.5P
## 17 2 1.1N/0.5P -3.825137e-02 4.590164e-01 1.1N 0.5P
## 18 3 1.1N/0.5P 1.005149e-17 2.465190e-32 1.1N 0.5P
## 19 1 1.8N/0.0P 1.005149e-17 2.465190e-32 1.8N 0.0P
## 20 2 1.8N/0.0P 5.464481e-02 3.451251e-01 1.8N 0.0P
## 21 3 1.8N/0.0P 2.049180e-02 1.024590e-01 1.8N 0.0P
## 22 1 1.8N/0.25P 0.000000e+00 NaN 1.8N 0.25P
## 23 2 1.8N/0.25P -3.688525e-02 1.659836e-01 1.8N 0.25P
## 24 3 1.8N/0.25P 1.775956e-02 1.731557e-01 1.8N 0.25P
## 25 1 1.8N/0.5P 1.366120e-03 4.098361e-04 1.8N 0.5P
## 26 2 1.8N/0.5P -1.366120e-03 4.098361e-04 1.8N 0.5P
## 27 3 1.8N/0.5P 1.366120e-03 4.098361e-04 1.8N 0.5P
library(car)
R1.AOV.E2=aov(b1~N*P, data = R1.tasa.E2)
summary(R1.AOV.E2)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 0.001012 0.0005058 0.697 0.511
## P 2 0.000524 0.0002621 0.361 0.702
## N:P 4 0.002235 0.0005588 0.770 0.559
## Residuals 18 0.013063 0.0007257
Residuales=residuals(R1.AOV.E2)
res_estan=Residuales/sd(Residuales)
hist(R1.tasa.E2$b1)
ks.test(res_estan, "pnorm")
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.091747, p-value = 0.9612
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.98174, p-value = 0.8993
bartlett.test(res_estan~R1.tasa.E2$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by R1.tasa.E2$N
## Bartlett's K-squared = 0.9041, df = 2, p-value = 0.6363
bartlett.test(res_estan~R1.tasa.E2$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by R1.tasa.E2$P
## Bartlett's K-squared = 0.26236, df = 2, p-value = 0.8771
## Grafico exploratorio
tasa %>%
dplyr::group_by(., Ens, B2, TTO) %>%
lm_table(R2~SDT) %>% dplyr::select(., Ens, B2, TTO, b1) %>%
ggplot(., aes(x=TTO, y=b1))+
geom_point()+
facet_wrap(~Ens, scales="free_x" )+
theme_bw()+
theme(axis.text = element_text(angle = 90))+
labs(x = "Tratamiento" , y = "Growth rate (g por semana)")
## Estimacion de los modelos lineales y su pendiente ensayo 1
R2.tasa.E1 <- tasa %>%
filter(., Ens=="Ensayo 1") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(R2~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
dplyr::mutate(., TTO1=TTO) %>%
separate(., col=TTO1, into=c("N","P"), sep="/")
R2.tasa.E1
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P 2.777778e-02 2.830189e-02 0.0N 0.0P
## 2 2 0.0N/0.0P 2.777778e-02 1.250000e-02 0.0N 0.0P
## 3 3 0.0N/0.0P -2.777778e-02 1.785714e-02 0.0N 0.0P
## 4 1 0.0N/0.25P -8.333333e-02 8.181818e-02 0.0N 0.25P
## 5 2 0.0N/0.25P -5.555556e-02 1.463415e-01 0.0N 0.25P
## 6 3 0.0N/0.25P -5.555556e-02 4.255319e-02 0.0N 0.25P
## 7 1 0.0N/0.5P -5.555556e-02 3.000000e-01 0.0N 0.5P
## 8 2 0.0N/0.5P -2.777778e-02 3.125000e-02 0.0N 0.5P
## 9 3 0.0N/0.5P -1.666667e-01 1.956522e-01 0.0N 0.5P
## 10 1 1.1N/0.0P -2.775558e-17 9.466331e-31 1.1N 0.0P
## 11 2 1.1N/0.0P -5.555556e-02 7.500000e-01 1.1N 0.0P
## 12 3 1.1N/0.0P 8.333333e-02 9.574468e-02 1.1N 0.0P
## 13 1 1.1N/0.25P 8.333333e-02 7.941176e-01 1.1N 0.25P
## 14 2 1.1N/0.25P -2.777778e-01 6.329114e-01 1.1N 0.25P
## 15 3 1.1N/0.25P -5.555556e-02 1.875000e-01 1.1N 0.25P
## 16 1 1.1N/0.5P -3.700743e-17 2.588450e-31 1.1N 0.5P
## 17 2 1.1N/0.5P 1.111111e-01 3.000000e-01 1.1N 0.5P
## 18 3 1.1N/0.5P 1.111111e-01 1.363636e-01 1.1N 0.5P
## 19 1 1.8N/0.0P 5.555556e-02 3.000000e-01 1.8N 0.0P
## 20 2 1.8N/0.0P -5.555556e-02 3.000000e-01 1.8N 0.0P
## 21 3 1.8N/0.0P -3.333333e-01 3.776224e-01 1.8N 0.0P
## 22 1 1.8N/0.25P -1.233581e-17 1.035380e-30 1.8N 0.25P
## 23 2 1.8N/0.25P 8.333333e-02 3.292683e-01 1.8N 0.25P
## 24 3 1.8N/0.25P -1.388889e-01 2.516779e-01 1.8N 0.25P
## 25 1 1.8N/0.5P 2.777778e-02 3.000000e-01 1.8N 0.5P
## 26 2 1.8N/0.5P -5.555556e-02 7.142857e-02 1.8N 0.5P
## 27 3 1.8N/0.5P -2.467162e-17 2.232625e-31 1.8N 0.5P
## 28 1 2.5N/0.0P 2.777778e-02 2.830189e-02 2.5N 0.0P
## 29 2 2.5N/0.0P -5.555556e-02 1.250000e-01 2.5N 0.0P
## 30 3 2.5N/0.0P -2.222222e-01 3.416370e-01 2.5N 0.0P
## 31 1 2.5N/0.25P 2.777778e-02 3.658537e-02 2.5N 0.25P
## 32 2 2.5N/0.25P -2.777778e-02 2.830189e-02 2.5N 0.25P
## 33 3 2.5N/0.25P 8.333333e-02 4.655172e-01 2.5N 0.25P
## 34 1 2.5N/0.5P 2.777778e-02 5.172414e-02 2.5N 0.5P
## 35 2 2.5N/0.5P 8.333333e-02 4.218750e-01 2.5N 0.5P
## 36 3 2.5N/0.5P -2.777778e-02 2.307692e-02 2.5N 0.5P
## Analisis de varianza entre las pendientes
library(car)
R2.AOV.E1=aov(b1~N*P, data = R2.tasa.E1)
summary(R2.AOV.E1)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 3 0.01601 0.005337 0.509 0.680
## P 2 0.01440 0.007202 0.687 0.513
## N:P 6 0.08128 0.013546 1.292 0.298
## Residuals 24 0.25154 0.010481
Residuales=residuals(R2.AOV.E1)
res_estan=Residuales/sd(Residuales)
hist(R2.tasa.E1$b1)
ks.test(res_estan, "pnorm")
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.13889, p-value = 0.491
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.95417, p-value = 0.1414
bartlett.test(res_estan~R2.tasa.E1$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by R2.tasa.E1$N
## Bartlett's K-squared = 7.9371, df = 3, p-value = 0.04733
bartlett.test(res_estan~R2.tasa.E1$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by R2.tasa.E1$P
## Bartlett's K-squared = 5.4417, df = 2, p-value = 0.06582
## Estimacion de los modelos lineales y su pendiente ensayo 2
R2.tasa.E2 <- tasa %>%
filter(., Ens=="Ensayo 2") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(R2~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
mutate(., TTO1=TTO) %>%
separate(., col=TTO1, into=c("N","P"), sep="/")
R2.tasa.E2
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P 0.05737705 0.047251688 0.0N 0.0P
## 2 2 0.0N/0.0P -0.03278689 0.037470726 0.0N 0.0P
## 3 3 0.0N/0.0P 0.10245902 0.106728142 0.0N 0.0P
## 4 1 0.0N/0.25P -0.01775956 0.006184133 0.0N 0.25P
## 5 2 0.0N/0.25P 0.12158470 0.168202667 0.0N 0.25P
## 6 3 0.0N/0.25P 0.10382514 0.028248367 0.0N 0.25P
## 7 1 0.0N/0.5P -0.15437158 0.393473438 0.0N 0.5P
## 8 2 0.0N/0.5P 0.12431694 0.399276760 0.0N 0.5P
## 9 3 0.0N/0.5P 0.21721311 0.319785974 0.0N 0.5P
## 10 1 1.1N/0.0P 0.13251366 0.245614493 1.1N 0.0P
## 11 2 1.1N/0.0P 0.05054645 0.030998098 1.1N 0.0P
## 12 3 1.1N/0.0P 0.20901639 0.333119877 1.1N 0.0P
## 13 1 1.1N/0.25P -0.06693989 0.012455904 1.1N 0.25P
## 14 2 1.1N/0.25P 0.39480874 0.371257245 1.1N 0.25P
## 15 3 1.1N/0.25P -0.02595628 0.005918033 1.1N 0.25P
## 16 1 1.1N/0.5P 0.04234973 0.015206659 1.1N 0.5P
## 17 2 1.1N/0.5P 0.17759563 0.128026423 1.1N 0.5P
## 18 3 1.1N/0.5P 0.05191257 0.007606726 1.1N 0.5P
## 19 1 1.8N/0.0P -0.05327869 0.026639344 1.8N 0.0P
## 20 2 1.8N/0.0P 0.17759563 0.287395415 1.8N 0.0P
## 21 3 1.8N/0.0P 0.24180328 0.250287604 1.8N 0.0P
## 22 1 1.8N/0.25P 0.23497268 0.452410081 1.8N 0.25P
## 23 2 1.8N/0.25P 0.17486339 0.159495347 1.8N 0.25P
## 24 3 1.8N/0.25P 0.28278689 0.250157629 1.8N 0.25P
## 25 1 1.8N/0.5P 0.01775956 0.001515586 1.8N 0.5P
## 26 2 1.8N/0.5P -0.06967213 0.028200625 1.8N 0.5P
## 27 3 1.8N/0.5P 0.34836066 0.429139938 1.8N 0.5P
library(car)
R2.AOV.E2=aov(b1~N*P, data = R2.tasa.E2)
summary(R2.AOV.E2)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 0.0386 0.019318 0.866 0.438
## P 2 0.0117 0.005868 0.263 0.772
## N:P 4 0.0219 0.005467 0.245 0.909
## Residuals 18 0.4016 0.022313
Residuales=residuals(R2.AOV.E2)
res_estan=Residuales/sd(Residuales)
hist(R2.tasa.E2$b1)
ks.test(res_estan, "pnorm")
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.093876, p-value = 0.9533
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.9694, p-value = 0.586
bartlett.test(res_estan~R2.tasa.E2$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by R2.tasa.E2$N
## Bartlett's K-squared = 0.52237, df = 2, p-value = 0.7701
bartlett.test(res_estan~R2.tasa.E2$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by R2.tasa.E2$P
## Bartlett's K-squared = 1.745, df = 2, p-value = 0.4179
## Grafico exploratorio
tasa %>%
dplyr::group_by(., Ens, B2, TTO) %>%
lm_table(R3~SDT) %>% dplyr::select(., Ens, B2, TTO, b1) %>%
ggplot(., aes(x=TTO, y=b1))+
geom_point()+
facet_wrap(~Ens, scales="free_x" )+
theme_bw()+
theme(axis.text = element_text(angle = 90))+
labs(x = "Tratamiento" , y = "Growth rate (g por semana)")
## Estimacion de los modelos lineales y su pendiente ensayo 1
R3.tasa.E1 <- tasa %>%
filter(., Ens=="Ensayo 1") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(R3~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
dplyr::mutate(., TTO1=TTO) %>%
separate(., col=TTO1, into=c("N","P"), sep="/")
R3.tasa.E1
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P 5.555556e-02 1.941748e-02 0.0N 0.0P
## 2 2 0.0N/0.0P 2.222222e-01 5.000000e-01 0.0N 0.0P
## 3 3 0.0N/0.0P 1.666667e-01 2.857143e-01 0.0N 0.0P
## 4 1 0.0N/0.25P 2.500000e-01 6.462766e-01 0.0N 0.25P
## 5 2 0.0N/0.25P 1.111111e-01 1.200000e-01 0.0N 0.25P
## 6 3 0.0N/0.25P 3.055556e-01 7.062257e-01 0.0N 0.25P
## 7 1 0.0N/0.5P 1.388889e-01 2.027027e-01 0.0N 0.5P
## 8 2 0.0N/0.5P -2.777778e-02 1.388889e-02 0.0N 0.5P
## 9 3 0.0N/0.5P 2.777778e-01 7.614213e-01 0.0N 0.5P
## 10 1 1.1N/0.0P 1.944444e-01 1.044034e-01 1.1N 0.0P
## 11 2 1.1N/0.0P 1.388889e-01 1.903553e-01 1.1N 0.0P
## 12 3 1.1N/0.0P 1.388889e-01 5.850234e-02 1.1N 0.0P
## 13 1 1.1N/0.25P 2.500000e-01 5.062500e-01 1.1N 0.25P
## 14 2 1.1N/0.25P 1.666667e-01 1.824324e-01 1.1N 0.25P
## 15 3 1.1N/0.25P -1.388889e-01 7.545272e-02 1.1N 0.25P
## 16 1 1.1N/0.5P 1.388889e-01 3.472222e-01 1.1N 0.5P
## 17 2 1.1N/0.5P 2.777778e-01 8.333333e-01 1.1N 0.5P
## 18 3 1.1N/0.5P 2.500000e-01 5.148305e-01 1.1N 0.5P
## 19 1 1.8N/0.0P 2.222222e-01 6.857143e-01 1.8N 0.0P
## 20 2 1.8N/0.0P -2.777778e-02 5.338078e-03 1.8N 0.0P
## 21 3 1.8N/0.0P 4.166667e-01 3.729282e-01 1.8N 0.0P
## 22 1 1.8N/0.25P 3.611111e-01 4.980354e-01 1.8N 0.25P
## 23 2 1.8N/0.25P 1.111111e-01 3.529412e-01 1.8N 0.25P
## 24 3 1.8N/0.25P 1.944444e-01 2.702206e-01 1.8N 0.25P
## 25 1 1.8N/0.5P -9.868649e-17 1.479114e-31 1.8N 0.5P
## 26 2 1.8N/0.5P -2.777778e-02 4.687500e-03 1.8N 0.5P
## 27 3 1.8N/0.5P 4.722222e-01 8.865031e-01 1.8N 0.5P
## 28 1 2.5N/0.0P 1.666667e-01 3.624161e-01 2.5N 0.0P
## 29 2 2.5N/0.0P 3.333333e-01 9.270386e-01 2.5N 0.0P
## 30 3 2.5N/0.0P 3.055556e-01 3.258528e-01 2.5N 0.0P
## 31 1 2.5N/0.25P 1.111111e-01 1.610738e-01 2.5N 0.25P
## 32 2 2.5N/0.25P -2.777778e-02 1.327434e-02 2.5N 0.25P
## 33 3 2.5N/0.25P 8.333333e-02 1.336634e-01 2.5N 0.25P
## 34 1 2.5N/0.5P 5.555556e-02 4.651163e-02 2.5N 0.5P
## 35 2 2.5N/0.5P 1.666667e-01 5.346535e-01 2.5N 0.5P
## 36 3 2.5N/0.5P 2.777778e-02 5.338078e-03 2.5N 0.5P
## Analisis de varianza entre las pendientes
library(car)
R3.AOV.E1=aov(b1~N*P, data = R3.tasa.E1)
summary(R3.AOV.E1)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 3 0.0143 0.004765 0.226 0.877
## P 2 0.0180 0.009023 0.429 0.656
## N:P 6 0.1109 0.018483 0.878 0.526
## Residuals 24 0.5051 0.021048
Residuales=residuals(R3.AOV.E1)
res_estan=Residuales/sd(Residuales)
hist(R3.tasa.E1$b1)
ks.test(res_estan, "pnorm")
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.10506, p-value = 0.7833
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.97817, p-value = 0.6833
bartlett.test(res_estan~R3.tasa.E1$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by R3.tasa.E1$N
## Bartlett's K-squared = 8.2914, df = 3, p-value = 0.04036
bartlett.test(res_estan~R3.tasa.E1$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by R3.tasa.E1$P
## Bartlett's K-squared = 0.89862, df = 2, p-value = 0.6381
## Estimacion de los modelos lineales y su pendiente ensayo 2
R3.tasa.E2 <- tasa %>%
filter(., Ens=="Ensayo 2") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(R3~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
mutate(., TTO1=TTO) %>%
separate(., col=TTO1, into=c("N","P"), sep="/")
R3.tasa.E2
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P 0.00000000 1.792866e-32 0.0N 0.0P
## 2 2 0.0N/0.0P 0.20765027 2.249134e-01 0.0N 0.0P
## 3 3 0.0N/0.0P -0.05737705 1.544767e-02 0.0N 0.0P
## 4 1 0.0N/0.25P 0.09699454 1.666116e-01 0.0N 0.25P
## 5 2 0.0N/0.25P 0.40983607 5.322546e-01 0.0N 0.25P
## 6 3 0.0N/0.25P 0.30464481 2.101107e-01 0.0N 0.25P
## 7 1 0.0N/0.5P 0.19398907 2.801334e-01 0.0N 0.5P
## 8 2 0.0N/0.5P 0.33469945 2.989114e-01 0.0N 0.5P
## 9 3 0.0N/0.5P 0.22267760 1.717498e-01 0.0N 0.5P
## 10 1 1.1N/0.0P 0.39480874 2.808033e-01 1.1N 0.0P
## 11 2 1.1N/0.0P 0.21038251 1.097028e-01 1.1N 0.0P
## 12 3 1.1N/0.0P 0.18579235 1.826585e-01 1.1N 0.0P
## 13 1 1.1N/0.25P 0.43032787 5.443907e-01 1.1N 0.25P
## 14 2 1.1N/0.25P 0.05601093 7.329090e-03 1.1N 0.25P
## 15 3 1.1N/0.25P 0.16393443 1.285760e-01 1.1N 0.25P
## 16 1 1.1N/0.5P 0.90846995 7.189201e-01 1.1N 0.5P
## 17 2 1.1N/0.5P 0.69398907 8.006354e-01 1.1N 0.5P
## 18 3 1.1N/0.5P 0.67076503 6.608942e-01 1.1N 0.5P
## 19 1 1.8N/0.0P 0.39071038 4.981122e-01 1.8N 0.0P
## 20 2 1.8N/0.0P 0.14890710 1.360129e-01 1.8N 0.0P
## 21 3 1.8N/0.0P 0.17213115 2.259221e-01 1.8N 0.0P
## 22 1 1.8N/0.25P 0.34289617 1.959035e-01 1.8N 0.25P
## 23 2 1.8N/0.25P 0.55464481 6.337311e-01 1.8N 0.25P
## 24 3 1.8N/0.25P 0.53415301 2.662820e-01 1.8N 0.25P
## 25 1 1.8N/0.5P 0.52049180 4.558790e-01 1.8N 0.5P
## 26 2 1.8N/0.5P 0.33606557 2.417314e-01 1.8N 0.5P
## 27 3 1.8N/0.5P 0.93306011 6.533972e-01 1.8N 0.5P
library(car)
R3.AOV.E2=aov(b1~N*P, data = R3.tasa.E2)
summary(R3.AOV.E2)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 0.3326 0.16632 6.172 0.009095 **
## P 2 0.5637 0.28187 10.460 0.000968 ***
## N:P 4 0.2653 0.06632 2.461 0.082467 .
## Residuals 18 0.4851 0.02695
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(R3.AOV.E2)
res_estan=Residuales/sd(Residuales)
hist(R3.tasa.E2$b1)
ks.test(res_estan, "pnorm")
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.19865, p-value = 0.2074
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.95431, p-value = 0.2722
bartlett.test(res_estan~R3.tasa.E2$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by R3.tasa.E2$N
## Bartlett's K-squared = 1.6838, df = 2, p-value = 0.4309
bartlett.test(res_estan~R3.tasa.E2$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by R3.tasa.E2$P
## Bartlett's K-squared = 1.3103, df = 2, p-value = 0.5194
R3.AOV.E2.1=aov(b1~TTO, data = R3.tasa.E2)
library(agricolae)
summary(R3.AOV.E2.1)
## Df Sum Sq Mean Sq F value Pr(>F)
## TTO 8 1.1617 0.14521 5.388 0.00146 **
## Residuals 18 0.4851 0.02695
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
HSD.test(R3.AOV.E2.1, "TTO", console=T, alpha=0.06)
##
## Study: R3.AOV.E2.1 ~ "TTO"
##
## HSD Test for b1
##
## Mean Square Error: 0.02694818
##
## TTO, means
##
## b1 std r se Min Max Q25
## 0.0N/0.0P 0.05009107 0.13943352 3 0.09477725 -0.05737705 0.2076503 -0.02868852
## 0.0N/0.25P 0.27049180 0.15919258 3 0.09477725 0.09699454 0.4098361 0.20081967
## 0.0N/0.5P 0.25045537 0.07435426 3 0.09477725 0.19398907 0.3346995 0.20833333
## 1.1N/0.0P 0.26366120 0.11424065 3 0.09477725 0.18579235 0.3948087 0.19808743
## 1.1N/0.25P 0.21675774 0.19266816 3 0.09477725 0.05601093 0.4303279 0.10997268
## 1.1N/0.5P 0.75774135 0.13105026 3 0.09477725 0.67076503 0.9084699 0.68237705
## 1.8N/0.0P 0.23724954 0.13340731 3 0.09477725 0.14890710 0.3907104 0.16051913
## 1.8N/0.25P 0.47723133 0.11678797 3 0.09477725 0.34289617 0.5546448 0.43852459
## 1.8N/0.5P 0.59653916 0.30567633 3 0.09477725 0.33606557 0.9330601 0.42827869
## Q50 Q75
## 0.0N/0.0P 0.0000000 0.1038251
## 0.0N/0.25P 0.3046448 0.3572404
## 0.0N/0.5P 0.2226776 0.2786885
## 1.1N/0.0P 0.2103825 0.3025956
## 1.1N/0.25P 0.1639344 0.2971311
## 1.1N/0.5P 0.6939891 0.8012295
## 1.8N/0.0P 0.1721311 0.2814208
## 1.8N/0.25P 0.5341530 0.5443989
## 1.8N/0.5P 0.5204918 0.7267760
##
## Alpha: 0.06 ; DF Error: 18
## Critical Value of Studentized Range: 4.822424
##
## Minimun Significant Difference: 0.4570561
##
## Treatments with the same letter are not significantly different.
##
## b1 groups
## 1.1N/0.5P 0.75774135 a
## 1.8N/0.5P 0.59653916 ab
## 1.8N/0.25P 0.47723133 abc
## 0.0N/0.25P 0.27049180 bc
## 1.1N/0.0P 0.26366120 bc
## 0.0N/0.5P 0.25045537 bc
## 1.8N/0.0P 0.23724954 bc
## 1.1N/0.25P 0.21675774 bc
## 0.0N/0.0P 0.05009107 c
GRAF.R3.E2.TTO <- R3.tasa.E2 %>%
dplyr::select(., -B2) %>%
dplyr::group_by(., TTO) %>%
dplyr::summarise( n=n(), media=mean(b1), sd=sd(b1),
R2=mean(Rsqr)) %>%
dplyr::mutate( se=sd/sqrt(n)) %>%
dplyr::mutate( ic=se * qt((1-0.05)/2 + .5, n-1)) %>%
dplyr::mutate(., Tukey=c("c","bc","bc","bc","bc","a","bc","abc","ab"))
GRAF.R3.E2.TTO
## # A tibble: 9 × 8
## TTO n media sd R2 se ic Tukey
## <chr> <int> <dbl> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 0.0N/0.0P 3 0.0501 0.139 0.0801 0.0805 0.346 c
## 2 0.0N/0.25P 3 0.270 0.159 0.303 0.0919 0.395 bc
## 3 0.0N/0.5P 3 0.250 0.0744 0.250 0.0429 0.185 bc
## 4 1.1N/0.0P 3 0.264 0.114 0.191 0.0660 0.284 bc
## 5 1.1N/0.25P 3 0.217 0.193 0.227 0.111 0.479 bc
## 6 1.1N/0.5P 3 0.758 0.131 0.727 0.0757 0.326 a
## 7 1.8N/0.0P 3 0.237 0.133 0.287 0.0770 0.331 bc
## 8 1.8N/0.25P 3 0.477 0.117 0.365 0.0674 0.290 abc
## 9 1.8N/0.5P 3 0.597 0.306 0.450 0.176 0.759 ab
ggplot(GRAF.R3.E2.TTO, aes(x=TTO, y=media))+
geom_point(size=2, position=position_dodge(.3))+
geom_errorbar(ymin=GRAF.R3.E2.TTO$media-GRAF.R3.E2.TTO$se,
ymax=GRAF.R3.E2.TTO$media+GRAF.R3.E2.TTO$se,
alpha=0.7, width=.3, color="darkgray",
position=position_dodge(.3) )+
geom_text(aes(label=Tukey, x=TTO, y=media+se+0.1),
color="red", size=4)+
geom_text(aes(label=round(R2, digits=2), x=TTO, y=media-se-0.1),
color="blue", size=3)+
labs(x = "Tratamiento",y = "Tasa emisión de ramas \n(por semana)")+
theme_classic()+
theme(axis.text = element_text(angle = 90))+
ylim(-0.3, 1.2)
## Grafico exploratorio
tasa %>%
dplyr::group_by(., Ens, B2, TTO) %>%
lm_table(ALT~SDT) %>% dplyr::select(., Ens, B2, TTO, b1) %>%
ggplot(., aes(x=TTO, y=b1))+
geom_point()+
facet_wrap(~Ens, scales="free_x" )+
theme_bw()+
theme(axis.text = element_text(angle = 90))+
labs(x = "Tratamiento" , y = "Growth rate (g por semana)")
## Estimacion de los modelos lineales y su pendiente ensayo 1
ALT.tasa.E1 <- tasa %>%
filter(., Ens=="Ensayo 1") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(ALT~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
dplyr::mutate(., TTO1=TTO) %>%
separate(., col=TTO1, into=c("N","P"), sep="/")
ALT.tasa.E1
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P 0.23055556 0.2807329729 0.0N 0.0P
## 2 2 0.0N/0.0P 0.50555556 0.7199617458 0.0N 0.0P
## 3 3 0.0N/0.0P 0.05555556 0.0444444444 0.0N 0.0P
## 4 1 0.0N/0.25P 0.19166667 0.2410388821 0.0N 0.25P
## 5 2 0.0N/0.25P -0.03055556 0.0068027976 0.0N 0.25P
## 6 3 0.0N/0.25P 0.14444444 0.0896849088 0.0N 0.25P
## 7 1 0.0N/0.5P 0.14444444 0.1626237921 0.0N 0.5P
## 8 2 0.0N/0.5P 0.01666667 0.0025547618 0.0N 0.5P
## 9 3 0.0N/0.5P 0.32222222 0.3642729520 0.0N 0.5P
## 10 1 1.1N/0.0P 0.45833333 0.7458495425 1.1N 0.0P
## 11 2 1.1N/0.0P 0.15833333 0.1581227085 1.1N 0.0P
## 12 3 1.1N/0.0P 0.25000000 0.2642453241 1.1N 0.0P
## 13 1 1.1N/0.25P 0.13611111 0.1538313685 1.1N 0.25P
## 14 2 1.1N/0.25P -0.05555556 0.0072996253 1.1N 0.25P
## 15 3 1.1N/0.25P 0.04166667 0.0087469224 1.1N 0.25P
## 16 1 1.1N/0.5P -0.01944444 0.0011157834 1.1N 0.5P
## 17 2 1.1N/0.5P 0.37777778 0.4048327788 1.1N 0.5P
## 18 3 1.1N/0.5P 0.54166667 0.5478054168 1.1N 0.5P
## 19 1 1.8N/0.0P 0.17500000 0.1630693802 1.8N 0.0P
## 20 2 1.8N/0.0P 0.15555556 0.2315872391 1.8N 0.0P
## 21 3 1.8N/0.0P -0.16666667 0.0802997859 1.8N 0.0P
## 22 1 1.8N/0.25P 0.28333333 0.2687723891 1.8N 0.25P
## 23 2 1.8N/0.25P -0.05833333 0.0079503389 1.8N 0.25P
## 24 3 1.8N/0.25P 0.22777778 0.5498255560 1.8N 0.25P
## 25 1 1.8N/0.5P 0.16944444 0.2852798364 1.8N 0.5P
## 26 2 1.8N/0.5P 0.22222222 0.3513394818 1.8N 0.5P
## 27 3 1.8N/0.5P 0.23333333 0.3401793463 1.8N 0.5P
## 28 1 2.5N/0.0P -0.02777778 0.0136549841 2.5N 0.0P
## 29 2 2.5N/0.0P 0.03888889 0.0625398851 2.5N 0.0P
## 30 3 2.5N/0.0P 0.18055556 0.1376849378 2.5N 0.0P
## 31 1 2.5N/0.25P 0.07777778 0.0872662511 2.5N 0.25P
## 32 2 2.5N/0.25P 0.38888889 0.4230215827 2.5N 0.25P
## 33 3 2.5N/0.25P 0.19722222 0.1963974962 2.5N 0.25P
## 34 1 2.5N/0.5P 0.14444444 0.1664409701 2.5N 0.5P
## 35 2 2.5N/0.5P -0.01388889 0.0008456421 2.5N 0.5P
## 36 3 2.5N/0.5P 0.11111111 0.1215497594 2.5N 0.5P
## Analisis de varianza entre las pendientes
library(car)
ALT.AOV.E1=aov(b1~N*P, data = ALT.tasa.E1)
summary(ALT.AOV.E1)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 3 0.0419 0.01398 0.525 0.669
## P 2 0.0215 0.01075 0.404 0.672
## N:P 6 0.2288 0.03813 1.433 0.243
## Residuals 24 0.6386 0.02661
Residuales=residuals(ALT.AOV.E1)
res_estan=Residuales/sd(Residuales)
hist(ALT.tasa.E1$b1)
ks.test(res_estan, "pnorm")
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.081156, p-value = 0.9563
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.98129, p-value = 0.7879
bartlett.test(res_estan~ALT.tasa.E1$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by ALT.tasa.E1$N
## Bartlett's K-squared = 1.896, df = 3, p-value = 0.5943
bartlett.test(res_estan~ALT.tasa.E1$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by ALT.tasa.E1$P
## Bartlett's K-squared = 0.50772, df = 2, p-value = 0.7758
## Estimacion de los modelos lineales y su pendiente ensayo 2
ALT.tasa.E2 <- tasa %>%
filter(., Ens=="Ensayo 2") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(ALT~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
mutate(., TTO1=TTO) %>%
separate(., col=TTO1, into=c("N","P"), sep="/")
ALT.tasa.E2
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P 0.023087432 0.0017833157 0.0N 0.0P
## 2 2 0.0N/0.0P 0.454371585 0.3850456217 0.0N 0.0P
## 3 3 0.0N/0.0P 0.050546448 0.0037127155 0.0N 0.0P
## 4 1 0.0N/0.25P 0.490710383 0.4460852946 0.0N 0.25P
## 5 2 0.0N/0.25P -0.135382514 0.0674383347 0.0N 0.25P
## 6 3 0.0N/0.25P 0.199726776 0.0414547779 0.0N 0.25P
## 7 1 0.0N/0.5P 0.233060109 0.1319908863 0.0N 0.5P
## 8 2 0.0N/0.5P 0.017144809 0.0028752875 0.0N 0.5P
## 9 3 0.0N/0.5P 0.143715847 0.0886878130 0.0N 0.5P
## 10 1 1.1N/0.0P -0.015710383 0.0005043344 1.1N 0.0P
## 11 2 1.1N/0.0P 0.314617486 0.1787002699 1.1N 0.0P
## 12 3 1.1N/0.0P 0.103142077 0.0250608028 1.1N 0.0P
## 13 1 1.1N/0.25P -0.231010929 0.3928262135 1.1N 0.25P
## 14 2 1.1N/0.25P -0.489617486 0.6794836095 1.1N 0.25P
## 15 3 1.1N/0.25P -0.389344262 0.3614198253 1.1N 0.25P
## 16 1 1.1N/0.5P 0.318852459 0.2378857049 1.1N 0.5P
## 17 2 1.1N/0.5P -0.170560109 0.1916417220 1.1N 0.5P
## 18 3 1.1N/0.5P -0.477459016 0.3155433141 1.1N 0.5P
## 19 1 1.8N/0.0P -0.326502732 0.3375130967 1.8N 0.0P
## 20 2 1.8N/0.0P -0.009016393 0.0006259409 1.8N 0.0P
## 21 3 1.8N/0.0P 0.186748634 0.0785654645 1.8N 0.0P
## 22 1 1.8N/0.25P 0.186202186 0.1711660307 1.8N 0.25P
## 23 2 1.8N/0.25P 0.128825137 0.0407708232 1.8N 0.25P
## 24 3 1.8N/0.25P -0.338251366 0.3201573877 1.8N 0.25P
## 25 1 1.8N/0.5P 0.250683060 0.2091843758 1.8N 0.5P
## 26 2 1.8N/0.5P 0.096584699 0.0481312315 1.8N 0.5P
## 27 3 1.8N/0.5P 0.017486339 0.0015925703 1.8N 0.5P
library(car)
ALT.AOV.E2=aov(b1~N*P, data = ALT.tasa.E2)
summary(ALT.AOV.E2)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 0.3512 0.17560 2.935 0.0788 .
## P 2 0.1106 0.05532 0.925 0.4147
## N:P 4 0.3233 0.08082 1.351 0.2899
## Residuals 18 1.0769 0.05983
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(ALT.AOV.E2)
res_estan=Residuales/sd(Residuales)
hist(ALT.tasa.E2$b1)
ks.test(res_estan, "pnorm")
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.078068, p-value = 0.9921
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.98126, p-value = 0.8895
bartlett.test(res_estan~ALT.tasa.E2$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by ALT.tasa.E2$N
## Bartlett's K-squared = 0.12285, df = 2, p-value = 0.9404
bartlett.test(res_estan~ALT.tasa.E2$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by ALT.tasa.E2$P
## Bartlett's K-squared = 0.13291, df = 2, p-value = 0.9357
## Grafico exploratorio
tasa %>%
dplyr::group_by(., Ens, B2, TTO) %>%
lm_table(LONG~SDT) %>% dplyr::select(., Ens, B2, TTO, b1) %>%
ggplot(., aes(x=TTO, y=b1))+
geom_point()+
facet_wrap(~Ens, scales="free_x" )+
theme_bw()+
theme(axis.text = element_text(angle = 90))+
labs(x = "Tratamiento" , y = "Growth rate (g por semana)")
## Estimacion de los modelos lineales y su pendiente ensayo 1
LONG.tasa.E1 <- tasa %>%
filter(., Ens=="Ensayo 1") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(LONG~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
dplyr::mutate(., TTO1=TTO) %>%
separate(., col=TTO1, into=c("N","P"), sep="/")
LONG.tasa.E1
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P 0.92500000 0.72209343 0.0N 0.0P
## 2 2 0.0N/0.0P 0.59444444 0.56737671 0.0N 0.0P
## 3 3 0.0N/0.0P 0.41111111 0.51171194 0.0N 0.0P
## 4 1 0.0N/0.25P 0.12500000 0.26383219 0.0N 0.25P
## 5 2 0.0N/0.25P 0.36666667 0.37528000 0.0N 0.25P
## 6 3 0.0N/0.25P 0.14444444 0.17136339 0.0N 0.25P
## 7 1 0.0N/0.5P 0.33611111 0.82487605 0.0N 0.5P
## 8 2 0.0N/0.5P 0.75000000 0.53733324 0.0N 0.5P
## 9 3 0.0N/0.5P 0.60000000 0.84039375 0.0N 0.5P
## 10 1 1.1N/0.0P 0.65555556 0.77417920 1.1N 0.0P
## 11 2 1.1N/0.0P 0.47500000 0.38175955 1.1N 0.0P
## 12 3 1.1N/0.0P 0.51388889 0.60268722 1.1N 0.0P
## 13 1 1.1N/0.25P 0.48333333 0.24805549 1.1N 0.25P
## 14 2 1.1N/0.25P 0.84722222 0.48490087 1.1N 0.25P
## 15 3 1.1N/0.25P 0.36944444 0.31978475 1.1N 0.25P
## 16 1 1.1N/0.5P 0.32500000 0.40397214 1.1N 0.5P
## 17 2 1.1N/0.5P 0.70277778 0.75548832 1.1N 0.5P
## 18 3 1.1N/0.5P 0.80000000 0.71088358 1.1N 0.5P
## 19 1 1.8N/0.0P 0.32500000 0.25807202 1.8N 0.0P
## 20 2 1.8N/0.0P 0.45555556 0.76297823 1.8N 0.0P
## 21 3 1.8N/0.0P 0.45833333 0.52341677 1.8N 0.0P
## 22 1 1.8N/0.25P 0.63055556 0.92601445 1.8N 0.25P
## 23 2 1.8N/0.25P 0.27777778 0.08329631 1.8N 0.25P
## 24 3 1.8N/0.25P 0.48333333 0.44677711 1.8N 0.25P
## 25 1 1.8N/0.5P 0.45277778 0.51747040 1.8N 0.5P
## 26 2 1.8N/0.5P 0.40277778 0.80207274 1.8N 0.5P
## 27 3 1.8N/0.5P 0.57777778 0.27865277 1.8N 0.5P
## 28 1 2.5N/0.0P 0.06388889 0.10141871 2.5N 0.0P
## 29 2 2.5N/0.0P 0.41111111 0.77270055 2.5N 0.0P
## 30 3 2.5N/0.0P 0.55555556 0.63559322 2.5N 0.0P
## 31 1 2.5N/0.25P 0.23333333 0.36636782 2.5N 0.25P
## 32 2 2.5N/0.25P 0.85000000 0.67633916 2.5N 0.25P
## 33 3 2.5N/0.25P 0.70555556 0.91906625 2.5N 0.25P
## 34 1 2.5N/0.5P 0.35000000 0.71960838 2.5N 0.5P
## 35 2 2.5N/0.5P 0.27222222 0.68210227 2.5N 0.5P
## 36 3 2.5N/0.5P 0.29444444 0.53652946 2.5N 0.5P
## Analisis de varianza entre las pendientes
library(car)
LONG.AOV.E1=aov(b1~N*P, data = LONG.tasa.E1)
summary(LONG.AOV.E1)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 3 0.1263 0.04209 1.055 0.387
## P 2 0.0063 0.00317 0.079 0.924
## N:P 6 0.4717 0.07862 1.970 0.110
## Residuals 24 0.9577 0.03990
Residuales=residuals(LONG.AOV.E1)
res_estan=Residuales/sd(Residuales)
hist(LONG.tasa.E1$b1)
ks.test(res_estan, "pnorm")
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.103, p-value = 0.8025
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.97569, p-value = 0.5996
bartlett.test(res_estan~LONG.tasa.E1$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by LONG.tasa.E1$N
## Bartlett's K-squared = 3.2554, df = 3, p-value = 0.3539
bartlett.test(res_estan~LONG.tasa.E1$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by LONG.tasa.E1$P
## Bartlett's K-squared = 1.04, df = 2, p-value = 0.5945
## Estimacion de los modelos lineales y su pendiente ensayo 2
LONG.tasa.E2 <- tasa %>%
filter(., Ens=="Ensayo 2") %>%
dplyr::group_by(., B2, TTO) %>%
lm_table(LONG~SDT) %>%
dplyr::select(., B2, TTO, b1, Rsqr) %>%
mutate(., TTO1=TTO) %>%
separate(., col=TTO1, into=c("N","P"), sep="/")
LONG.tasa.E2
## B2 TTO b1 Rsqr N P
## 1 1 0.0N/0.0P 0.3493169 0.35747679 0.0N 0.0P
## 2 2 0.0N/0.0P 0.8234973 0.57176025 0.0N 0.0P
## 3 3 0.0N/0.0P 0.6579235 0.31970667 0.0N 0.0P
## 4 1 0.0N/0.25P 0.6525956 0.73732846 0.0N 0.25P
## 5 2 0.0N/0.25P 0.2181694 0.07509409 0.0N 0.25P
## 6 3 0.0N/0.25P 0.8948087 0.52315230 0.0N 0.25P
## 7 1 0.0N/0.5P 0.4198087 0.24586699 0.0N 0.5P
## 8 2 0.0N/0.5P 0.1260929 0.09000831 0.0N 0.5P
## 9 3 0.0N/0.5P 0.5913934 0.72283647 0.0N 0.5P
## 10 1 1.1N/0.0P 0.2984973 0.12706507 1.1N 0.0P
## 11 2 1.1N/0.0P 0.4137978 0.48320192 1.1N 0.0P
## 12 3 1.1N/0.0P 1.1825137 0.72678960 1.1N 0.0P
## 13 1 1.1N/0.25P 0.2756831 0.19888524 1.1N 0.25P
## 14 2 1.1N/0.25P 0.5409836 0.29029569 1.1N 0.25P
## 15 3 1.1N/0.25P 0.5500000 0.33155152 1.1N 0.25P
## 16 1 1.1N/0.5P 0.8240437 0.74879962 1.1N 0.5P
## 17 2 1.1N/0.5P 0.7234290 0.88676435 1.1N 0.5P
## 18 3 1.1N/0.5P 0.6306011 0.30141079 1.1N 0.5P
## 19 1 1.8N/0.0P 0.1512295 0.23164669 1.8N 0.0P
## 20 2 1.8N/0.0P 0.2730874 0.34631113 1.8N 0.0P
## 21 3 1.8N/0.0P 0.9353825 0.67292975 1.8N 0.0P
## 22 1 1.8N/0.25P 0.6009563 0.30641157 1.8N 0.25P
## 23 2 1.8N/0.25P 0.8449454 0.63874618 1.8N 0.25P
## 24 3 1.8N/0.25P 0.1618852 0.02344872 1.8N 0.25P
## 25 1 1.8N/0.5P 0.5062842 0.53182294 1.8N 0.5P
## 26 2 1.8N/0.5P 0.6110656 0.36451241 1.8N 0.5P
## 27 3 1.8N/0.5P 0.9684426 0.60385516 1.8N 0.5P
library(car)
LONG.AOV.E2=aov(b1~N*P, data = LONG.tasa.E2)
summary(LONG.AOV.E2)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 0.0278 0.01388 0.147 0.865
## P 2 0.0243 0.01215 0.128 0.880
## N:P 4 0.2773 0.06933 0.732 0.582
## Residuals 18 1.7040 0.09467
Residuales=residuals(LONG.AOV.E2)
res_estan=Residuales/sd(Residuales)
hist(LONG.tasa.E2$b1)
ks.test(res_estan, "pnorm")
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.12922, p-value = 0.7104
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.95833, p-value = 0.3383
bartlett.test(res_estan~LONG.tasa.E2$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by LONG.tasa.E2$N
## Bartlett's K-squared = 0.38283, df = 2, p-value = 0.8258
bartlett.test(res_estan~LONG.tasa.E2$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by LONG.tasa.E2$P
## Bartlett's K-squared = 3.1683, df = 2, p-value = 0.2051
Indices.simples <- tasa %>% mutate(.,
SLA= (AF/PSH), # area foliar especifica SLA
SLW= (PSH/AF), # peso foliar especifico SLW
LAR= (AF/PST), # ratio area foliar LAR
LWR= (PSH/PST), # relacion peso foliar peso total LWR (% hojas)
RRpa=(PSR/PST), # relacion peso raiz peso total RPA (% raíz)
) %>%
dplyr::filter(., SDT>=24) %>%
dplyr::select(., Ens, N, P, TTO, SLA, SLW, LAR, LWR, RRpa)
Indices.simples
## # A tibble: 153 × 9
## Ens N P TTO SLA SLW LAR LWR RRpa
## <chr> <chr> <chr> <chr> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 Ensayo 1 0.0N 0.0P 0.0N/0.0P 79.9 0.0125 33.8 0.423 0.306
## 2 Ensayo 1 0.0N 0.0P 0.0N/0.0P 63.5 0.0158 27.8 0.437 0.307
## 3 Ensayo 1 0.0N 0.0P 0.0N/0.0P 91.6 0.0109 46.6 0.509 0.187
## 4 Ensayo 1 0.0N 0.0P 0.0N/0.0P 95.3 0.0105 43.7 0.459 0.211
## 5 Ensayo 1 0.0N 0.0P 0.0N/0.0P 67.2 0.0149 28.2 0.419 0.262
## 6 Ensayo 1 0.0N 0.0P 0.0N/0.0P 57.2 0.0175 26.4 0.462 0.222
## 7 Ensayo 1 0.0N 0.25P 0.0N/0.25P 61.4 0.0163 32.6 0.531 0.226
## 8 Ensayo 1 0.0N 0.25P 0.0N/0.25P 65.9 0.0152 38.6 0.585 0.211
## 9 Ensayo 1 0.0N 0.25P 0.0N/0.25P 76.5 0.0131 29.8 0.390 0.317
## 10 Ensayo 1 0.0N 0.25P 0.0N/0.25P 108. 0.00927 51.0 0.473 0.226
## # ℹ 143 more rows
# Grafico exploratorio
ggplot(Indices.simples, aes(x=TTO, y=SLA) )+
geom_point(alpha=0.1)+
geom_boxplot(alpha=0.2)+
facet_wrap(~Ens, scales="free_x" )+
theme_bw()+
theme(axis.text = element_text(angle = 90))
# Ensayo 1
SLA.E1 <- Indices.simples %>%
dplyr::filter(., Ens=="Ensayo 1") %>%
dplyr::select(., Ens, N, P, TTO, SLA)
SLA.E1
## # A tibble: 72 × 5
## Ens N P TTO SLA
## <chr> <chr> <chr> <chr> <dbl>
## 1 Ensayo 1 0.0N 0.0P 0.0N/0.0P 79.9
## 2 Ensayo 1 0.0N 0.0P 0.0N/0.0P 63.5
## 3 Ensayo 1 0.0N 0.0P 0.0N/0.0P 91.6
## 4 Ensayo 1 0.0N 0.0P 0.0N/0.0P 95.3
## 5 Ensayo 1 0.0N 0.0P 0.0N/0.0P 67.2
## 6 Ensayo 1 0.0N 0.0P 0.0N/0.0P 57.2
## 7 Ensayo 1 0.0N 0.25P 0.0N/0.25P 61.4
## 8 Ensayo 1 0.0N 0.25P 0.0N/0.25P 65.9
## 9 Ensayo 1 0.0N 0.25P 0.0N/0.25P 76.5
## 10 Ensayo 1 0.0N 0.25P 0.0N/0.25P 108.
## # ℹ 62 more rows
## Analisis de varianza
library(car)
SLA.AOV.E1=aov(SLA~N*P, data = SLA.E1) # modelo AOV | Tipo I -> Balanceado
summary(SLA.AOV.E1) # Resumen modelo AOV
## Df Sum Sq Mean Sq F value Pr(>F)
## N 3 2583 861.0 4.353 0.00769 **
## P 2 29 14.4 0.073 0.92997
## N:P 6 273 45.5 0.230 0.96533
## Residuals 60 11867 197.8
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(SLA.AOV.E1) # Extraccion residuales
res_estan=Residuales/sd(Residuales) # Estandarización de residuales
hist(SLA.E1$SLA) # exploratorio distribucion de la variable
ks.test(res_estan, "pnorm") # Prueba normalidad | >0.05 son normales
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.11904, p-value = 0.2396
## alternative hypothesis: two-sided
shapiro.test(res_estan) # Prueba normalidad | >0.05 son normales
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.96751, p-value = 0.05947
bartlett.test(res_estan~SLA.E1$N) # Prueba homocedasticidad
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by SLA.E1$N
## Bartlett's K-squared = 6.0829, df = 3, p-value = 0.1076
bartlett.test(res_estan~SLA.E1$P) # Prueba homocedasticidad
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by SLA.E1$P
## Bartlett's K-squared = 1.2306, df = 2, p-value = 0.5405
library(agricolae)
HSD.test(SLA.AOV.E1, "N", console=T, alpha=0.05)
##
## Study: SLA.AOV.E1 ~ "N"
##
## HSD Test for SLA
##
## Mean Square Error: 197.7762
##
## N, means
##
## SLA std r se Min Max Q25 Q50
## 0.0N 77.24680 14.508462 18 3.31475 57.19452 107.8590 66.63105 72.52271
## 1.1N 87.12793 9.329516 18 3.31475 70.80687 101.8816 79.32477 89.87034
## 1.8N 92.81993 12.277245 18 3.31475 71.49959 108.7916 81.23248 97.00807
## 2.5N 90.80516 16.355625 18 3.31475 66.59197 120.6337 80.38450 84.60723
## Q75
## 0.0N 87.38023
## 1.1N 93.46346
## 1.8N 102.48044
## 2.5N 103.66043
##
## Alpha: 0.05 ; DF Error: 60
## Critical Value of Studentized Range: 3.737089
##
## Minimun Significant Difference: 12.38752
##
## Treatments with the same letter are not significantly different.
##
## SLA groups
## 1.8N 92.81993 a
## 2.5N 90.80516 a
## 1.1N 87.12793 ab
## 0.0N 77.24680 b
## Gráfico factor simple Nitrogeno Ensayo 1
GRAF.SLA.E1.N <- Indices.simples %>% dplyr::group_by(., N) %>%
dplyr::summarise( n=n(), media=mean(SLA), sd=sd(SLA)) %>%
dplyr::mutate( se=sd/sqrt(n)) %>%
dplyr::mutate( ic=se * qt((1-0.05)/2 + .5, n-1)) %>%
dplyr::mutate(., Tukey=c("b","a","ab", "a"))
GRAF.SLA.E1.N
## # A tibble: 4 × 7
## N n media sd se ic Tukey
## <chr> <int> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 0.0N 45 82.7 15.9 2.38 4.79 b
## 2 1.1N 45 91.6 13.2 1.97 3.98 a
## 3 1.8N 45 89.4 10.9 1.62 3.27 ab
## 4 2.5N 18 90.8 16.4 3.86 8.13 a
ggplot(GRAF.SLA.E1.N, aes(x=N, y=media))+
geom_point(size=3, position=position_dodge(.3))+
geom_text(aes(label=Tukey, x=N, y=media+se+1.5 ),
color="red", size=4)+
geom_errorbar(ymin=GRAF.SLA.E1.N$media-GRAF.SLA.E1.N$se,
ymax=GRAF.SLA.E1.N$media+GRAF.SLA.E1.N$se,
alpha=0.7, width=.3, color="darkgray",
position=position_dodge(.3) )+
labs(x = "Tratamiento",y = "AFE (cm"^{2}~".g"^{-1}~")")+
theme_classic()+
ylim(75, 100)
SLA.E2 <- Indices.simples %>%
dplyr::filter(., Ens=="Ensayo 2") %>%
dplyr::select(., Ens, N, P, TTO, SLA)
SLA.E2
## # A tibble: 81 × 5
## Ens N P TTO SLA
## <chr> <chr> <chr> <chr> <dbl>
## 1 Ensayo 2 0.0N 0.0P 0.0N/0.0P 61.4
## 2 Ensayo 2 0.0N 0.0P 0.0N/0.0P 81.5
## 3 Ensayo 2 0.0N 0.0P 0.0N/0.0P 87.9
## 4 Ensayo 2 0.0N 0.0P 0.0N/0.0P 102.
## 5 Ensayo 2 0.0N 0.0P 0.0N/0.0P 105.
## 6 Ensayo 2 0.0N 0.0P 0.0N/0.0P 81.5
## 7 Ensayo 2 0.0N 0.0P 0.0N/0.0P 74.2
## 8 Ensayo 2 0.0N 0.0P 0.0N/0.0P 95.0
## 9 Ensayo 2 0.0N 0.0P 0.0N/0.0P 70.0
## 10 Ensayo 2 0.0N 0.25P 0.0N/0.25P 80.6
## # ℹ 71 more rows
library(car)
SLA.AOV.E2=aov(SLA~N*P, data = SLA.E2)
summary(SLA.AOV.E2)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 1084 542.2 3.170 0.0479 *
## P 2 103 51.4 0.301 0.7412
## N:P 4 2227 556.7 3.255 0.0164 *
## Residuals 72 12315 171.0
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(SLA.AOV.E2)
res_estan=Residuales/sd(Residuales)
hist(SLA.E2$SLA)
ks.test(res_estan, "pnorm")
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.067869, p-value = 0.8496
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.98921, p-value = 0.7376
bartlett.test(res_estan~SLA.E2$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by SLA.E2$N
## Bartlett's K-squared = 5.4609, df = 2, p-value = 0.06519
bartlett.test(res_estan~SLA.E2$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by SLA.E2$P
## Bartlett's K-squared = 5.7604, df = 2, p-value = 0.05612
library(MASS)
boxcox(SLA.AOV.E2, lambda = c(-2:2, 0.1))
hist(SLA.E2$SLA^-0.4)
SLA.AOV2.E2=aov( (SLA^1.1)~N*P, data = SLA.E2)
summary(SLA.AOV2.E2)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 3241 1620.5 3.168 0.0480 *
## P 2 313 156.4 0.306 0.7375
## N:P 4 6700 1675.0 3.274 0.0159 *
## Residuals 72 36831 511.5
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(SLA.AOV2.E2)
res_estan=Residuales/sd(Residuales)
ks.test(res_estan, "pnorm")
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.069024, p-value = 0.835
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.9884, p-value = 0.6836
bartlett.test(res_estan~SLA.E2$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by SLA.E2$N
## Bartlett's K-squared = 5.5548, df = 2, p-value = 0.0622
bartlett.test(res_estan~SLA.E2$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by SLA.E2$P
## Bartlett's K-squared = 5.7928, df = 2, p-value = 0.05522
## NO PARAMETRICO
# Grafico exploratorio
ggplot(Indices.simples, aes(x=TTO, y=SLW) )+
geom_point(alpha=0.1)+
geom_boxplot(alpha=0.2)+
facet_wrap(~Ens, scales="free_x" )+
theme_bw()+
theme(axis.text = element_text(angle = 90))
# Ensayo 1
SLW.E1 <- Indices.simples %>%
dplyr::filter(., Ens=="Ensayo 1") %>%
dplyr::select(., Ens, N, P, TTO, SLW)
SLW.E1
## # A tibble: 72 × 5
## Ens N P TTO SLW
## <chr> <chr> <chr> <chr> <dbl>
## 1 Ensayo 1 0.0N 0.0P 0.0N/0.0P 0.0125
## 2 Ensayo 1 0.0N 0.0P 0.0N/0.0P 0.0158
## 3 Ensayo 1 0.0N 0.0P 0.0N/0.0P 0.0109
## 4 Ensayo 1 0.0N 0.0P 0.0N/0.0P 0.0105
## 5 Ensayo 1 0.0N 0.0P 0.0N/0.0P 0.0149
## 6 Ensayo 1 0.0N 0.0P 0.0N/0.0P 0.0175
## 7 Ensayo 1 0.0N 0.25P 0.0N/0.25P 0.0163
## 8 Ensayo 1 0.0N 0.25P 0.0N/0.25P 0.0152
## 9 Ensayo 1 0.0N 0.25P 0.0N/0.25P 0.0131
## 10 Ensayo 1 0.0N 0.25P 0.0N/0.25P 0.00927
## # ℹ 62 more rows
## Analisis de varianza
library(car)
SLW.AOV.E1=aov(SLW~N*P, data= SLW.E1) # modelo AOV | Tipo I -> Balanceado
summary(SLW.AOV.E1) # Resumen modelo AOV
## Df Sum Sq Mean Sq F value Pr(>F)
## N 3 6.026e-05 2.009e-05 5.591 0.00189 **
## P 2 1.300e-06 6.510e-07 0.181 0.83483
## N:P 6 5.960e-06 9.940e-07 0.277 0.94579
## Residuals 60 2.156e-04 3.593e-06
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(SLW.AOV.E1) # Extraccion residuales
res_estan=Residuales/sd(Residuales) # Estandarización de residuales
hist(SLW.E1$SLW) # exploratorio distribucion de la variable
ks.test(res_estan, "pnorm") # Prueba normalidad | >0.05 son normales
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.061544, p-value = 0.9323
## alternative hypothesis: two-sided
shapiro.test(res_estan) # Prueba normalidad | >0.05 son normales
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.98931, p-value = 0.8054
bartlett.test(res_estan~SLW.E1$N) # Prueba homocedasticidad
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by SLW.E1$N
## Bartlett's K-squared = 7.3674, df = 3, p-value = 0.06107
bartlett.test(res_estan~SLW.E1$P) # Prueba homocedasticidad
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by SLW.E1$P
## Bartlett's K-squared = 1.7279, df = 2, p-value = 0.4215
library(agricolae)
HSD.test(SLW.AOV.E1, "N", console=T, alpha=0.05)
##
## Study: SLW.AOV.E1 ~ "N"
##
## HSD Test for SLW
##
## Mean Square Error: 3.592862e-06
##
## N, means
##
## SLW std r se Min Max Q25
## 0.0N 0.01334978 0.002314797 18 0.00044677 0.009271364 0.01748419 0.011473857
## 1.1N 0.01160459 0.001263565 18 0.00044677 0.009815313 0.01412292 0.010700632
## 1.8N 0.01096615 0.001548956 18 0.00044677 0.009191888 0.01398609 0.009757961
## 2.5N 0.01133787 0.001937501 18 0.00044677 0.008289557 0.01501682 0.009651374
## Q50 Q75
## 0.0N 0.01378972 0.01500833
## 1.1N 0.01112952 0.01260747
## 1.8N 0.01031059 0.01231155
## 2.5N 0.01182143 0.01244100
##
## Alpha: 0.05 ; DF Error: 60
## Critical Value of Studentized Range: 3.737089
##
## Minimun Significant Difference: 0.001669619
##
## Treatments with the same letter are not significantly different.
##
## SLW groups
## 0.0N 0.01334978 a
## 1.1N 0.01160459 b
## 2.5N 0.01133787 b
## 1.8N 0.01096615 b
## Gráfico factor simple Nitrogeno Ensayo 1
GRAF.SLW.E1.N <- Indices.simples %>% dplyr::group_by(., N) %>%
dplyr::summarise( n=n(), media=mean(SLW), sd=sd(SLW)) %>%
dplyr::mutate( se=sd/sqrt(n)) %>%
dplyr::mutate( ic=se * qt((1-0.05)/2 + .5, n-1)) %>%
dplyr::mutate(., Tukey=c("a","b","b","b"))
GRAF.SLW.E1.N
## # A tibble: 4 × 7
## N n media sd se ic Tukey
## <chr> <int> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 0.0N 45 0.0125 0.00225 0.000335 0.000675 a
## 2 1.1N 45 0.0111 0.00159 0.000236 0.000476 b
## 3 1.8N 45 0.0113 0.00139 0.000207 0.000418 b
## 4 2.5N 18 0.0113 0.00194 0.000457 0.000963 b
ggplot(GRAF.SLW.E1.N, aes(x=N, y=media))+
geom_point(size=3, position=position_dodge(.3))+
geom_text(aes(label=Tukey, x=N, y=media+se+0.001 ),
color="red", size=4)+
geom_errorbar(ymin=GRAF.SLW.E1.N$media-GRAF.SLW.E1.N$se,
ymax=GRAF.SLW.E1.N$media+GRAF.SLW.E1.N$se,
alpha=0.7, width=.3, color="darkgray",
position=position_dodge(.3) )+
labs(x = "Tratamiento",y = "PFE (g.cm"^{-2}~")")+
theme_classic()+
ylim(0.007, 0.015)
SLW.E2 <- Indices.simples %>%
dplyr::filter(., Ens=="Ensayo 2") %>%
dplyr::select(., Ens, N, P, TTO, SLW)
SLW.E2
## # A tibble: 81 × 5
## Ens N P TTO SLW
## <chr> <chr> <chr> <chr> <dbl>
## 1 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.0163
## 2 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.0123
## 3 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.0114
## 4 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.00980
## 5 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.00954
## 6 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.0123
## 7 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.0135
## 8 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.0105
## 9 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.0143
## 10 Ensayo 2 0.0N 0.25P 0.0N/0.25P 0.0124
## # ℹ 71 more rows
## Analisis de varianza
library(car)
SLW.AOV.E2=aov(SLW~N*P, data = SLW.E2)
summary(SLW.AOV.E2)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 1.711e-05 8.553e-06 3.155 0.0486 *
## P 2 1.120e-06 5.600e-07 0.206 0.8139
## N:P 4 2.991e-05 7.478e-06 2.759 0.0341 *
## Residuals 72 1.952e-04 2.711e-06
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(SLW.AOV.E2)
res_estan=Residuales/sd(Residuales)
hist(SLW.E2$SLW)
ks.test(res_estan, "pnorm")
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.074328, p-value = 0.7621
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.9857, p-value = 0.5074
bartlett.test(res_estan~SLW.E2$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by SLW.E2$N
## Bartlett's K-squared = 5.3534, df = 2, p-value = 0.06879
bartlett.test(res_estan~SLW.E2$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by SLW.E2$P
## Bartlett's K-squared = 5.7642, df = 2, p-value = 0.05602
library(MASS)
boxcox(SLW.AOV.E2, lambda = c(-2:2, 0.1))
hist(SLW.E2$SLW^0.17)
SLW.AOV2.E2=aov( (SLW^1.9)~N*P, data = SLW.E2)
summary(SLW.AOV2.E2)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 2.054e-08 1.027e-08 3.134 0.0495 *
## P 2 1.150e-09 5.760e-10 0.176 0.8392
## N:P 4 3.279e-08 8.198e-09 2.502 0.0497 *
## Residuals 72 2.359e-07 3.276e-09
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(SLW.AOV2.E2)
res_estan=Residuales/sd(Residuales)
ks.test(res_estan, "pnorm")
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.084107, p-value = 0.6155
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.97528, p-value = 0.1187
bartlett.test(res_estan~SLW.E2$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by SLW.E2$N
## Bartlett's K-squared = 6.2683, df = 2, p-value = 0.04354
bartlett.test(res_estan~SLW.E2$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by SLW.E2$P
## Bartlett's K-squared = 6.1634, df = 2, p-value = 0.04588
# NO PARAMÉTRICO
# Grafico exploratorio
ggplot(Indices.simples, aes(x=TTO, y=LAR) )+
geom_point(alpha=0.1)+
geom_boxplot(alpha=0.2)+
facet_wrap(~Ens, scales="free_x" )+
theme_bw()+
theme(axis.text = element_text(angle = 90))
# Ensayo 1
LAR.E1 <- Indices.simples %>%
dplyr::filter(., Ens=="Ensayo 1") %>%
dplyr::select(., Ens, N, P, TTO, LAR)
LAR.E1
## # A tibble: 72 × 5
## Ens N P TTO LAR
## <chr> <chr> <chr> <chr> <dbl>
## 1 Ensayo 1 0.0N 0.0P 0.0N/0.0P 33.8
## 2 Ensayo 1 0.0N 0.0P 0.0N/0.0P 27.8
## 3 Ensayo 1 0.0N 0.0P 0.0N/0.0P 46.6
## 4 Ensayo 1 0.0N 0.0P 0.0N/0.0P 43.7
## 5 Ensayo 1 0.0N 0.0P 0.0N/0.0P 28.2
## 6 Ensayo 1 0.0N 0.0P 0.0N/0.0P 26.4
## 7 Ensayo 1 0.0N 0.25P 0.0N/0.25P 32.6
## 8 Ensayo 1 0.0N 0.25P 0.0N/0.25P 38.6
## 9 Ensayo 1 0.0N 0.25P 0.0N/0.25P 29.8
## 10 Ensayo 1 0.0N 0.25P 0.0N/0.25P 51.0
## # ℹ 62 more rows
## Analisis de varianza
library(car)
LAR.AOV.E1=aov(LAR~N*P, data= LAR.E1) # modelo AOV | Tipo I -> Balanceado
summary(LAR.AOV.E1) # Resumen modelo AOV
## Df Sum Sq Mean Sq F value Pr(>F)
## N 3 3428 1142.5 14.046 4.82e-07 ***
## P 2 28 13.9 0.171 0.843
## N:P 6 309 51.5 0.634 0.703
## Residuals 60 4880 81.3
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(LAR.AOV.E1) # Extraccion residuales
res_estan=Residuales/sd(Residuales) # Estandarización de residuales
hist(LAR.E1$LAR) # exploratorio distribucion de la variable
ks.test(res_estan, "pnorm") # Prueba normalidad | >0.05 son normales
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.083526, p-value = 0.6654
## alternative hypothesis: two-sided
shapiro.test(res_estan) # Prueba normalidad | >0.05 son normales
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.98558, p-value = 0.5834
bartlett.test(res_estan~LAR.E1$N) # Prueba homocedasticidad
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by LAR.E1$N
## Bartlett's K-squared = 3.1045, df = 3, p-value = 0.3758
bartlett.test(res_estan~LAR.E1$P) # Prueba homocedasticidad
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by LAR.E1$P
## Bartlett's K-squared = 0.39281, df = 2, p-value = 0.8217
library(agricolae)
HSD.test(LAR.AOV.E1, "N", console=T, alpha=0.05)
##
## Study: LAR.AOV.E1 ~ "N"
##
## HSD Test for LAR
##
## Mean Square Error: 81.34032
##
## N, means
##
## LAR std r se Min Max Q25 Q50
## 0.0N 34.51453 8.016346 18 2.125772 24.45760 50.98131 28.27735 31.63367
## 1.1N 48.53120 6.914894 18 2.125772 35.82048 57.69827 43.33731 48.71325
## 1.8N 53.25947 9.561806 18 2.125772 36.76902 70.74988 48.18605 53.55847
## 2.5N 46.12155 10.168677 18 2.125772 26.30658 63.32547 40.02486 46.47435
## Q75
## 0.0N 39.82952
## 1.1N 54.50906
## 1.8N 61.15729
## 2.5N 52.81663
##
## Alpha: 0.05 ; DF Error: 60
## Critical Value of Studentized Range: 3.737089
##
## Minimun Significant Difference: 7.9442
##
## Treatments with the same letter are not significantly different.
##
## LAR groups
## 1.8N 53.25947 a
## 1.1N 48.53120 a
## 2.5N 46.12155 a
## 0.0N 34.51453 b
## Gráfico factor simple Nitrogeno Ensayo 1
GRAF.LAR.E1.N <- Indices.simples %>%
dplyr::filter(., Ens=="Ensayo 1") %>%
dplyr::group_by(., N) %>%
dplyr::summarise( n=n(), media=mean(LAR), sd=sd(LAR)) %>%
dplyr::mutate( se=sd/sqrt(n)) %>%
dplyr::mutate( ic=se * qt((1-0.05)/2 + .5, n-1)) %>%
dplyr::mutate(., Tukey=c("b","a","a","a"))
GRAF.LAR.E1.N
## # A tibble: 4 × 7
## N n media sd se ic Tukey
## <chr> <int> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 0.0N 18 34.5 8.02 1.89 3.99 b
## 2 1.1N 18 48.5 6.91 1.63 3.44 a
## 3 1.8N 18 53.3 9.56 2.25 4.75 a
## 4 2.5N 18 46.1 10.2 2.40 5.06 a
ggplot(GRAF.LAR.E1.N, aes(x=N, y=media))+
geom_point(size=3, position=position_dodge(.3))+
geom_text(aes(label=Tukey, x=N, y=media+se+5 ),
color="red", size=4)+
geom_errorbar(ymin=GRAF.LAR.E1.N$media-GRAF.LAR.E1.N$se,
ymax=GRAF.LAR.E1.N$media+GRAF.LAR.E1.N$se,
alpha=0.7, width=.3, color="darkgray",
position=position_dodge(.3) )+
labs(x = "Tratamiento",y = "RAF (cm"^{2}~".g"^{-1}~")")+
theme_classic()+
ylim(0, 65)
# Ensayo 2
LAR.E2 <- Indices.simples %>%
dplyr::filter(., Ens=="Ensayo 2") %>%
dplyr::select(., Ens, N, P, TTO, LAR)
LAR.E2
## # A tibble: 81 × 5
## Ens N P TTO LAR
## <chr> <chr> <chr> <chr> <dbl>
## 1 Ensayo 2 0.0N 0.0P 0.0N/0.0P 32.9
## 2 Ensayo 2 0.0N 0.0P 0.0N/0.0P 47.9
## 3 Ensayo 2 0.0N 0.0P 0.0N/0.0P 47.9
## 4 Ensayo 2 0.0N 0.0P 0.0N/0.0P 54.0
## 5 Ensayo 2 0.0N 0.0P 0.0N/0.0P 45.8
## 6 Ensayo 2 0.0N 0.0P 0.0N/0.0P 49.3
## 7 Ensayo 2 0.0N 0.0P 0.0N/0.0P 43.9
## 8 Ensayo 2 0.0N 0.0P 0.0N/0.0P 53.6
## 9 Ensayo 2 0.0N 0.0P 0.0N/0.0P 30.9
## 10 Ensayo 2 0.0N 0.25P 0.0N/0.25P 44.7
## # ℹ 71 more rows
library(car)
LAR.AOV.E2=aov(LAR~N*P, data= LAR.E2) # modelo AOV | Tipo I -> Balanceado
summary(LAR.AOV.E2) # Resumen modelo AOV
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 852 425.9 9.149 0.000288 ***
## P 2 26 13.2 0.285 0.753201
## N:P 4 552 138.0 2.965 0.025164 *
## Residuals 72 3352 46.6
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(LAR.AOV.E2) # Extraccion residuales
res_estan=Residuales/sd(Residuales) # Estandarización de residuales
hist(LAR.E2$LAR) # exploratorio distribucion de la variable
ks.test(res_estan, "pnorm") # Prueba normalidad | >0.05 son normales
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.048008, p-value = 0.9922
## alternative hypothesis: two-sided
shapiro.test(res_estan) # Prueba normalidad | >0.05 son normales
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.9876, p-value = 0.6299
bartlett.test(res_estan~LAR.E2$N) # Prueba homocedasticidad
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by LAR.E2$N
## Bartlett's K-squared = 6.3048, df = 2, p-value = 0.04275
bartlett.test(res_estan~LAR.E2$P) # Prueba homocedasticidad
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by LAR.E2$P
## Bartlett's K-squared = 1.1023, df = 2, p-value = 0.5763
library(agricolae)
HSD.test(LAR.AOV.E2, "N", console=T, alpha=0.07)
##
## Study: LAR.AOV.E2 ~ "N"
##
## HSD Test for LAR
##
## Mean Square Error: 46.55096
##
## N, means
##
## LAR std r se Min Max Q25 Q50 Q75
## 0.0N 45.18910 8.340534 27 1.313054 30.88752 59.50765 39.61037 45.76491 51.29996
## 1.1N 53.12057 7.667458 27 1.313054 41.00300 65.09301 46.19673 52.03402 59.27012
## 1.8N 48.78253 4.775818 27 1.313054 40.41975 57.23064 45.68735 47.65786 52.12770
##
## Alpha: 0.07 ; DF Error: 72
## Critical Value of Studentized Range: 3.179391
##
## Minimun Significant Difference: 4.174711
##
## Treatments with the same letter are not significantly different.
##
## LAR groups
## 1.1N 53.12057 a
## 1.8N 48.78253 b
## 0.0N 45.18910 b
## Gráfico factor simple Nitrogeno Ensayo 1
GRAF.LAR.E2.N <- Indices.simples %>%
dplyr::filter(., Ens=="Ensayo 2") %>%
dplyr::group_by(., N) %>%
dplyr::summarise( n=n(), media=mean(LAR), sd=sd(LAR)) %>%
dplyr::mutate( se=sd/sqrt(n)) %>%
dplyr::mutate( ic=se * qt((1-0.05)/2 + .5, n-1)) %>%
dplyr::mutate(., Tukey=c("c","a","b"))
GRAF.LAR.E2.N
## # A tibble: 3 × 7
## N n media sd se ic Tukey
## <chr> <int> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 0.0N 27 45.2 8.34 1.61 3.30 c
## 2 1.1N 27 53.1 7.67 1.48 3.03 a
## 3 1.8N 27 48.8 4.78 0.919 1.89 b
ggplot(GRAF.LAR.E2.N, aes(x=N, y=media))+
geom_point(size=3, position=position_dodge(.3))+
geom_text(aes(label=Tukey, x=N, y=media+se+5 ),
color="red", size=4)+
geom_errorbar(ymin=GRAF.LAR.E2.N$media-GRAF.LAR.E2.N$se,
ymax=GRAF.LAR.E2.N$media+GRAF.LAR.E2.N$se,
alpha=0.7, width=.3, color="darkgray",
position=position_dodge(.3) )+
labs(x = "Tratamiento",y = "RAF (cm"^{2}~".g"^{-1}~")")+
theme_classic()+
ylim(0, 65)
# Grafico exploratorio
ggplot(Indices.simples, aes(x=TTO, y=LWR) )+
geom_point(alpha=0.1)+
geom_boxplot(alpha=0.2)+
facet_wrap(~Ens, scales="free_x" )+
theme_bw()+
theme(axis.text = element_text(angle = 90))
# Ensayo 1
LWR.E1 <- Indices.simples %>%
dplyr::filter(., Ens=="Ensayo 1") %>%
dplyr::select(., Ens, N, P, TTO, LWR)
LWR.E1
## # A tibble: 72 × 5
## Ens N P TTO LWR
## <chr> <chr> <chr> <chr> <dbl>
## 1 Ensayo 1 0.0N 0.0P 0.0N/0.0P 0.423
## 2 Ensayo 1 0.0N 0.0P 0.0N/0.0P 0.437
## 3 Ensayo 1 0.0N 0.0P 0.0N/0.0P 0.509
## 4 Ensayo 1 0.0N 0.0P 0.0N/0.0P 0.459
## 5 Ensayo 1 0.0N 0.0P 0.0N/0.0P 0.419
## 6 Ensayo 1 0.0N 0.0P 0.0N/0.0P 0.462
## 7 Ensayo 1 0.0N 0.25P 0.0N/0.25P 0.531
## 8 Ensayo 1 0.0N 0.25P 0.0N/0.25P 0.585
## 9 Ensayo 1 0.0N 0.25P 0.0N/0.25P 0.390
## 10 Ensayo 1 0.0N 0.25P 0.0N/0.25P 0.473
## # ℹ 62 more rows
## Analisis de varianza
library(car)
LWR.AOV.E1=aov(LWR~N*P, data= LWR.E1) # modelo AOV | Tipo I -> Balanceado
summary(LWR.AOV.E1) # Resumen modelo AOV
## Df Sum Sq Mean Sq F value Pr(>F)
## N 3 0.17197 0.05732 15.541 1.36e-07 ***
## P 2 0.00104 0.00052 0.140 0.869
## N:P 6 0.02386 0.00398 1.078 0.386
## Residuals 60 0.22132 0.00369
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(LWR.AOV.E1) # Extraccion residuales
res_estan=Residuales/sd(Residuales) # Estandarización de residuales
hist(LWR.E1$LWR) # exploratorio distribucion de la variable
ks.test(res_estan, "pnorm") # Prueba normalidad | >0.05 son normales
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.05272, p-value = 0.982
## alternative hypothesis: two-sided
shapiro.test(res_estan) # Prueba normalidad | >0.05 son normales
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.99295, p-value = 0.9615
bartlett.test(res_estan~LWR.E1$N) # Prueba homocedasticidad
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by LWR.E1$N
## Bartlett's K-squared = 7.857, df = 3, p-value = 0.04906
bartlett.test(res_estan~LWR.E1$P) # Prueba homocedasticidad
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by LWR.E1$P
## Bartlett's K-squared = 1.3497, df = 2, p-value = 0.5092
library(agricolae)
HSD.test(LWR.AOV.E1, "N", console=T, alpha=0.05)
##
## Study: LWR.AOV.E1 ~ "N"
##
## HSD Test for LWR
##
## Mean Square Error: 0.003688613
##
## N, means
##
## LWR std r se Min Max Q25 Q50
## 0.0N 0.4467718 0.06040390 18 0.01431513 0.3400402 0.5848613 0.4022323 0.4327741
## 1.1N 0.5558201 0.03900167 18 0.01431513 0.4738114 0.6033134 0.5392932 0.5649187
## 1.8N 0.5727239 0.06101950 18 0.01431513 0.4704069 0.6902473 0.5229478 0.5721687
## 2.5N 0.5085005 0.07476709 18 0.01431513 0.3681755 0.6627258 0.4820506 0.5128616
## Q75
## 0.0N 0.4889592
## 1.1N 0.5743867
## 1.8N 0.6083568
## 2.5N 0.5577934
##
## Alpha: 0.05 ; DF Error: 60
## Critical Value of Studentized Range: 3.737089
##
## Minimun Significant Difference: 0.05349692
##
## Treatments with the same letter are not significantly different.
##
## LWR groups
## 1.8N 0.5727239 a
## 1.1N 0.5558201 ab
## 2.5N 0.5085005 b
## 0.0N 0.4467718 c
## Gráfico factor simple Nitrogeno Ensayo 1
GRAF.LWR.E1.N <- Indices.simples %>%
dplyr::filter(., Ens=="Ensayo 1") %>%
dplyr::group_by(., N) %>%
dplyr::summarise( n=n(), media=mean(LWR), sd=sd(LWR)) %>%
dplyr::mutate( se=sd/sqrt(n)) %>%
dplyr::mutate( ic=se * qt((1-0.05)/2 + .5, n-1)) %>%
dplyr::mutate(., Tukey=c("c","ab","a","b"))
GRAF.LWR.E1.N
## # A tibble: 4 × 7
## N n media sd se ic Tukey
## <chr> <int> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 0.0N 18 0.447 0.0604 0.0142 0.0300 c
## 2 1.1N 18 0.556 0.0390 0.00919 0.0194 ab
## 3 1.8N 18 0.573 0.0610 0.0144 0.0303 a
## 4 2.5N 18 0.509 0.0748 0.0176 0.0372 b
ggplot(GRAF.LWR.E1.N, aes(x=N, y=media))+
geom_point(size=3, position=position_dodge(.3))+
geom_text(aes(label=Tukey, x=N, y=media+se+0.05 ),
color="red", size=4)+
geom_errorbar(ymin=GRAF.LWR.E1.N$media-GRAF.LWR.E1.N$se,
ymax=GRAF.LWR.E1.N$media+GRAF.LWR.E1.N$se,
alpha=0.7, width=.3, color="darkgray",
position=position_dodge(.3) )+
labs(x = "Tratamiento",y = "RPF (%)")+
theme_classic()+
ylim(0.3, 0.8)
# Ensayo 2
LWR.E2 <- Indices.simples %>%
dplyr::filter(., Ens=="Ensayo 2") %>%
dplyr::select(., Ens, N, P, TTO, LWR)
LWR.E2
## # A tibble: 81 × 5
## Ens N P TTO LWR
## <chr> <chr> <chr> <chr> <dbl>
## 1 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.536
## 2 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.588
## 3 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.545
## 4 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.529
## 5 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.436
## 6 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.605
## 7 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.592
## 8 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.565
## 9 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.441
## 10 Ensayo 2 0.0N 0.25P 0.0N/0.25P 0.554
## # ℹ 71 more rows
## Analisis de varianza
library(car)
LWR.AOV.E2=aov(LWR~N*P, data= LWR.E2) # modelo AOV | Tipo I -> Balanceado
summary(LWR.AOV.E2) # Resumen modelo AOV
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 0.02812 0.014061 4.223 0.0184 *
## P 2 0.01194 0.005970 1.793 0.1738
## N:P 4 0.01718 0.004294 1.290 0.2821
## Residuals 72 0.23972 0.003329
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(LWR.AOV.E2) # Extraccion residuales
res_estan=Residuales/sd(Residuales) # Estandarización de residuales
hist(LWR.E2$LWR) # exploratorio distribucion de la variable
ks.test(res_estan, "pnorm") # Prueba normalidad | >0.05 son normales
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.11153, p-value = 0.266
## alternative hypothesis: two-sided
shapiro.test(res_estan) # Prueba normalidad | >0.05 son normales
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.94988, p-value = 0.003152
bartlett.test(res_estan~LWR.E2$N) # Prueba homocedasticidad
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by LWR.E2$N
## Bartlett's K-squared = 0.78053, df = 2, p-value = 0.6769
bartlett.test(res_estan~LWR.E2$P) # Prueba homocedasticidad
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by LWR.E2$P
## Bartlett's K-squared = 2.6423, df = 2, p-value = 0.2668
library(agricolae)
HSD.test(LWR.AOV.E2, "N", console=T, alpha=0.05)
##
## Study: LWR.AOV.E2 ~ "N"
##
## HSD Test for LWR
##
## Mean Square Error: 0.003329383
##
## N, means
##
## LWR std r se Min Max Q25 Q50
## 0.0N 0.5252732 0.05656465 27 0.01110452 0.4082511 0.6049613 0.5021769 0.5364238
## 1.1N 0.5658813 0.05741578 27 0.01110452 0.4145516 0.6407748 0.5373651 0.5699340
## 1.8N 0.5636211 0.06199647 27 0.01110452 0.4232600 0.6590983 0.5280809 0.5747945
## Q75
## 0.0N 0.5621140
## 1.1N 0.6082227
## 1.8N 0.6128749
##
## Alpha: 0.05 ; DF Error: 72
## Critical Value of Studentized Range: 3.384388
##
## Minimun Significant Difference: 0.03758202
##
## Treatments with the same letter are not significantly different.
##
## LWR groups
## 1.1N 0.5658813 a
## 1.8N 0.5636211 a
## 0.0N 0.5252732 b
## Gráfico factor simple Nitrogeno Ensayo 1
GRAF.LWR.E2.N <- Indices.simples %>%
dplyr::filter(., Ens=="Ensayo 2") %>%
dplyr::group_by(., N) %>%
dplyr::summarise( n=n(), media=mean(LWR), sd=sd(LWR)) %>%
dplyr::mutate( se=sd/sqrt(n)) %>%
dplyr::mutate( ic=se * qt((1-0.05)/2 + .5, n-1)) %>%
dplyr::mutate(., Tukey=c("b","a","a"))
GRAF.LWR.E2.N
## # A tibble: 3 × 7
## N n media sd se ic Tukey
## <chr> <int> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 0.0N 27 0.525 0.0566 0.0109 0.0224 b
## 2 1.1N 27 0.566 0.0574 0.0110 0.0227 a
## 3 1.8N 27 0.564 0.0620 0.0119 0.0245 a
ggplot(GRAF.LWR.E2.N, aes(x=N, y=media))+
geom_point(size=3, position=position_dodge(.3))+
geom_text(aes(label=Tukey, x=N, y=media+se+0.05 ),
color="red", size=4)+
geom_errorbar(ymin=GRAF.LWR.E2.N$media-GRAF.LWR.E2.N$se,
ymax=GRAF.LWR.E2.N$media+GRAF.LWR.E2.N$se,
alpha=0.7, width=.3, color="darkgray",
position=position_dodge(.3) )+
labs(x = "Tratamiento",y = "RPF (%)")+
theme_classic()+
ylim(0.3, 0.8)
# Grafico exploratorio
ggplot(Indices.simples, aes(x=TTO, y=RRpa) )+
geom_point(alpha=0.4)+
geom_boxplot(alpha=0.1)+
facet_wrap(~Ens, scales="free_x" )+
theme_bw()+
theme(axis.text = element_text(angle = 90))
# Ensayo 1
RRpa.E1 <- Indices.simples %>%
dplyr::filter(., Ens=="Ensayo 1") %>%
dplyr::select(., Ens, N, P, TTO, RRpa)
RRpa.E1
## # A tibble: 72 × 5
## Ens N P TTO RRpa
## <chr> <chr> <chr> <chr> <dbl>
## 1 Ensayo 1 0.0N 0.0P 0.0N/0.0P 0.306
## 2 Ensayo 1 0.0N 0.0P 0.0N/0.0P 0.307
## 3 Ensayo 1 0.0N 0.0P 0.0N/0.0P 0.187
## 4 Ensayo 1 0.0N 0.0P 0.0N/0.0P 0.211
## 5 Ensayo 1 0.0N 0.0P 0.0N/0.0P 0.262
## 6 Ensayo 1 0.0N 0.0P 0.0N/0.0P 0.222
## 7 Ensayo 1 0.0N 0.25P 0.0N/0.25P 0.226
## 8 Ensayo 1 0.0N 0.25P 0.0N/0.25P 0.211
## 9 Ensayo 1 0.0N 0.25P 0.0N/0.25P 0.317
## 10 Ensayo 1 0.0N 0.25P 0.0N/0.25P 0.226
## # ℹ 62 more rows
## Analisis de varianza
library(car)
RRpa.AOV.E1=aov(RRpa~N*P, data = RRpa.E1) # modelo AOV | Tipo I -> Balanceado
summary(RRpa.AOV.E1) # Resumen modelo AOV
## Df Sum Sq Mean Sq F value Pr(>F)
## N 3 0.27527 0.09176 49.044 3.82e-16 ***
## P 2 0.00096 0.00048 0.255 0.776
## N:P 6 0.00942 0.00157 0.839 0.545
## Residuals 60 0.11226 0.00187
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(RRpa.AOV.E1) # Extraccion residuales
res_estan=Residuales/sd(Residuales) # Estandarización de residuales
hist(RRpa.E1$RRpa) # exploratorio distribucion de la variable
ks.test(res_estan, "pnorm") # Prueba normalidad | >0.05 son normales
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.087559, p-value = 0.6076
## alternative hypothesis: two-sided
shapiro.test(res_estan) # Prueba normalidad | >0.05 son normales
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.9716, p-value = 0.1027
bartlett.test(res_estan~RRpa.E1$N) # Prueba homocedasticidad
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by RRpa.E1$N
## Bartlett's K-squared = 4.8605, df = 3, p-value = 0.1823
bartlett.test(res_estan~RRpa.E1$P) # Prueba homocedasticidad
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by RRpa.E1$P
## Bartlett's K-squared = 6.1763, df = 2, p-value = 0.04559
library(agricolae)
HSD.test(RRpa.AOV.E1, "N", console=T, alpha=0.05)
##
## Study: RRpa.AOV.E1 ~ "N"
##
## HSD Test for RRpa
##
## Mean Square Error: 0.001870927
##
## N, means
##
## RRpa std r se Min Max Q25
## 0.0N 0.2614938 0.04694304 18 0.01019512 0.18667979 0.3558851 0.22559076
## 1.1N 0.1271459 0.03535664 18 0.01019512 0.07003546 0.1887755 0.10024310
## 1.8N 0.1108731 0.03450840 18 0.01019512 0.05494505 0.1711779 0.09074869
## 2.5N 0.1199435 0.05068270 18 0.01019512 0.05341953 0.2487624 0.08709808
## Q50 Q75
## 0.0N 0.2577395 0.3066771
## 1.1N 0.1259700 0.1526843
## 1.8N 0.1015799 0.1412130
## 2.5N 0.1089180 0.1362826
##
## Alpha: 0.05 ; DF Error: 60
## Critical Value of Studentized Range: 3.737089
##
## Minimun Significant Difference: 0.03810006
##
## Treatments with the same letter are not significantly different.
##
## RRpa groups
## 0.0N 0.2614938 a
## 1.1N 0.1271459 b
## 2.5N 0.1199435 b
## 1.8N 0.1108731 b
## Gráfico factor simple Nitrogeno Ensayo 1
GRAF.RRpa.E1.N <- RRpa.E1 %>%
dplyr::group_by(., N) %>%
dplyr::summarise( n=n(), media=mean(RRpa), sd=sd(RRpa)) %>%
dplyr::mutate( se=sd/sqrt(n)) %>%
dplyr::mutate( ic=se * qt((1-0.05)/2 + .5, n-1)) %>%
dplyr::mutate(., Tukey=c("a","b","b","b"))
GRAF.RRpa.E1.N
## # A tibble: 4 × 7
## N n media sd se ic Tukey
## <chr> <int> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 0.0N 18 0.261 0.0469 0.0111 0.0233 a
## 2 1.1N 18 0.127 0.0354 0.00833 0.0176 b
## 3 1.8N 18 0.111 0.0345 0.00813 0.0172 b
## 4 2.5N 18 0.120 0.0507 0.0119 0.0252 b
ggplot(GRAF.RRpa.E1.N, aes(x=N, y=media))+
geom_point(size=3, position=position_dodge(.3))+
geom_text(aes(label=Tukey, x=N, y=media+se+0.05 ),
color="red", size=4)+
geom_errorbar(ymin=GRAF.RRpa.E1.N$media-GRAF.RRpa.E1.N$se,
ymax=GRAF.RRpa.E1.N$media+GRAF.RRpa.E1.N$se,
alpha=0.7, width=.3, color="darkgray",
position=position_dodge(.3) )+
labs(x = "Tratamiento",y = "RPR (%)")+
theme_classic()+
ylim(0.00, 0.38)
RRpa.E2 <- Indices.simples %>%
dplyr::filter(., Ens=="Ensayo 2") %>%
dplyr::select(., Ens, N, P, TTO, RRpa)
RRpa.E2
## # A tibble: 81 × 5
## Ens N P TTO RRpa
## <chr> <chr> <chr> <chr> <dbl>
## 1 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.167
## 2 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.201
## 3 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.216
## 4 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.150
## 5 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.315
## 6 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.142
## 7 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.200
## 8 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.161
## 9 Ensayo 2 0.0N 0.0P 0.0N/0.0P 0.324
## 10 Ensayo 2 0.0N 0.25P 0.0N/0.25P 0.156
## # ℹ 71 more rows
## Analisis de varianza
library(car)
RRpa.AOV.E2=aov(RRpa~N*P, data = RRpa.E2) # modelo AOV | Tipo I -> Balanceado
summary(RRpa.AOV.E2) # Resumen modelo AOV
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 0.14931 0.07465 30.315 2.81e-10 ***
## P 2 0.00049 0.00024 0.099 0.906
## N:P 4 0.00747 0.00187 0.758 0.556
## Residuals 72 0.17730 0.00246
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(RRpa.AOV.E2) # Extraccion residuales
res_estan=Residuales/sd(Residuales) # Estandarización de residuales
hist(RRpa.E2$RRpa) # exploratorio distribucion de la variable
ks.test(res_estan, "pnorm") # Prueba normalidad | >0.05 son normales
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.068884, p-value = 0.8368
## alternative hypothesis: two-sided
shapiro.test(res_estan) # Prueba normalidad | >0.05 son normales
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.97258, p-value = 0.07937
bartlett.test(res_estan~RRpa.E2$N) # Prueba homocedasticidad
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by RRpa.E2$N
## Bartlett's K-squared = 6.5505, df = 2, p-value = 0.03781
bartlett.test(res_estan~RRpa.E2$P) # Prueba homocedasticidad
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by RRpa.E2$P
## Bartlett's K-squared = 1.1188, df = 2, p-value = 0.5716
library(MASS)
boxcox(RRpa.AOV.E2, lambda = c(-2:2, 0.1))
hist(RRpa.E2$RRpa^0.2)
RRpa.AOV2.E2=aov( (RRpa^0.2)~N*P, data = RRpa.E2)
summary(RRpa.AOV2.E2)
## Df Sum Sq Mean Sq F value Pr(>F)
## N 2 0.10568 0.05284 27.637 1.24e-09 ***
## P 2 0.00038 0.00019 0.100 0.905
## N:P 4 0.00429 0.00107 0.561 0.691
## Residuals 72 0.13766 0.00191
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residuales=residuals(RRpa.AOV2.E2)
res_estan=Residuales/sd(Residuales)
ks.test(res_estan, "pnorm")
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: res_estan
## D = 0.071169, p-value = 0.8065
## alternative hypothesis: two-sided
shapiro.test(res_estan)
##
## Shapiro-Wilk normality test
##
## data: res_estan
## W = 0.9754, p-value = 0.1208
bartlett.test(res_estan~RRpa.E2$N)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by RRpa.E2$N
## Bartlett's K-squared = 0.41152, df = 2, p-value = 0.814
bartlett.test(res_estan~RRpa.E2$P)
##
## Bartlett test of homogeneity of variances
##
## data: res_estan by RRpa.E2$P
## Bartlett's K-squared = 1.1518, df = 2, p-value = 0.5622
library(agricolae)
HSD.test(RRpa.AOV.E2, "N", console=T, alpha=0.05)
##
## Study: RRpa.AOV.E2 ~ "N"
##
## HSD Test for RRpa
##
## Mean Square Error: 0.002462566
##
## N, means
##
## RRpa std r se Min Max Q25
## 0.0N 0.2183160 0.06116082 27 0.009550191 0.13831152 0.3243938 0.16428704
## 1.1N 0.1289317 0.03798841 27 0.009550191 0.06345476 0.2022562 0.09785547
## 1.8N 0.1256380 0.04406491 27 0.009550191 0.06260136 0.2201330 0.09594862
## Q50 Q75
## 0.0N 0.2042922 0.2640859
## 1.1N 0.1377374 0.1509570
## 1.8N 0.1194500 0.1509051
##
## Alpha: 0.05 ; DF Error: 72
## Critical Value of Studentized Range: 3.384388
##
## Minimun Significant Difference: 0.03232155
##
## Treatments with the same letter are not significantly different.
##
## RRpa groups
## 0.0N 0.2183160 a
## 1.1N 0.1289317 b
## 1.8N 0.1256380 b
## Gráfico factor simple Nitrogeno Ensayo 1
GRAF.RRpa.E2.N <- RRpa.E2 %>%
dplyr::group_by(., N) %>%
dplyr::summarise( n=n(), media=mean(RRpa), sd=sd(RRpa)) %>%
dplyr::mutate( se=sd/sqrt(n)) %>%
dplyr::mutate( ic=se * qt((1-0.05)/2 + .5, n-1)) %>%
dplyr::mutate(., Tukey=c("a","b","b"))
GRAF.RRpa.E2.N
## # A tibble: 3 × 7
## N n media sd se ic Tukey
## <chr> <int> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 0.0N 27 0.218 0.0612 0.0118 0.0242 a
## 2 1.1N 27 0.129 0.0380 0.00731 0.0150 b
## 3 1.8N 27 0.126 0.0441 0.00848 0.0174 b
ggplot(GRAF.RRpa.E2.N, aes(x=N, y=media))+
geom_point(size=3, position=position_dodge(.3))+
geom_text(aes(label=Tukey, x=N, y=media+se+0.04 ),
color="red", size=4)+
geom_errorbar(ymin=GRAF.RRpa.E2.N$media-GRAF.RRpa.E2.N$se,
ymax=GRAF.RRpa.E2.N$media+GRAF.RRpa.E2.N$se,
alpha=0.7, width=.3, color="darkgray",
position=position_dodge(.3) )+
labs(x = "Tratamiento",y = "RPR (%)")+
theme_classic()+
ylim(0.00, 0.28)