x = [25 24 22 25 19 19 22 32 18 20 25 22 29 18]
\(H_0:µ≤19\)
\(H_1:µ<19\)
\(α=0.05\)
mu=19
x=c(25, 24, 22, 25, 19, 19, 22, 32, 18, 20, 25, 22, 29, 18)
tt=t.test(x,mu=mu,alternative="greater")
tt
##
## One Sample t-test
##
## data: x
## t = 3.479, df = 13, p-value = 0.002037
## alternative hypothesis: true mean is greater than 19
## 95 percent confidence interval:
## 20.89372 Inf
## sample estimates:
## mean of x
## 22.85714
\(x_1\) = [67 69 62 64 70 60 61 63]
\(x_2\) = [71 65 76 69 65 75 69 65 74 65 60 64]
\(H_0:µ_1=µ_2\)
\(H_1:µ≠µ_2\)
\(α=0.1\)
x1=c(67, 69, 62, 64, 70, 60, 61, 63)
x2 = c(71, 65, 76, 69, 65, 75, 69, 65, 74, 65, 60, 64)
tt = t.test(x1, x2, alternative = "two.sided", var.equal = TRUE)
tt
##
## Two Sample t-test
##
## data: x1 and x2
## t = -1.7631, df = 18, p-value = 0.09485
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -8.0357995 0.7024661
## sample estimates:
## mean of x mean of y
## 64.50000 68.16667
\(x_1\) = [108 105 112 112 100 118 109 114 102 109]
\(x_2\) = [103 108 113 99 101 116 104 104 114 113]
\(H_0:(x1-x2)≤0\)
\(H_1:(x1-x2)>0\)
\(α=0.05\)
x1 =c (108, 105, 112, 112, 100, 118, 109, 114, 102, 109)
x2 = c (103, 108, 113, 99, 101, 116, 104, 104, 114, 113)
tt = t.test(x1, x2, paired = TRUE, alternative = "greater")
tt
##
## Paired t-test
##
## data: x1 and x2
## t = 0.60979, df = 9, p-value = 0.2785
## alternative hypothesis: true difference in means is greater than 0
## 95 percent confidence interval:
## -2.808626 Inf
## sample estimates:
## mean of the differences
## 1.4
\(x_1\) = [162 146 150 141 201 174 71 125 156 161 146 102 154 140]
\(x_2\) = [123 119 155 134 142 126 150 138 135 119 116 137]
\(H_0:π_1 = π_2\)
\(H_1:π_1 ≠ π_2\)
\(α=0.05\)
x1 = c(162, 146, 150, 141, 201, 174, 71, 125, 156, 161, 146, 102, 154, 140)
x2 = c(123, 119, 155, 134, 142, 126, 150, 138, 135, 119, 116, 137)
shapiro.test(x1)
##
## Shapiro-Wilk normality test
##
## data: x1
## W = 0.92448, p-value = 0.2549
shapiro.test(x2)
##
## Shapiro-Wilk normality test
##
## data: x2
## W = 0.94756, p-value = 0.6016
tt = var.test(x1, x2)
tt
##
## F test to compare two variances
##
## data: x1 and x2
## F = 6.1233, num df = 13, denom df = 11, p-value = 0.004897
## alternative hypothesis: true ratio of variances is not equal to 1
## 95 percent confidence interval:
## 1.805359 19.579190
## sample estimates:
## ratio of variances
## 6.123288
dados = c(x1, x2)
dados
## [1] 162 146 150 141 201 174 71 125 156 161 146 102 154 140 123 119 155
## [18] 134 142 126 150 138 135 119 116 137
grupos = as.factor(c(rep(1, length(x1)), rep(2, length(x2))))
grupos
## [1] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2
## Levels: 1 2
bartlett.test(dados, grupos)
##
## Bartlett test of homogeneity of variances
##
## data: dados and grupos
## Bartlett's K-squared = 7.9891, df = 1, p-value = 0.004706
library(car)
## Loading required package: carData
leveneTest(dados, grupos) # teste de Levene
## Levene's Test for Homogeneity of Variance (center = median)
## Df F value Pr(>F)
## group 1 2.333 0.1397
## 24
fligner.test(dados, grupos) # Teste de Fligner-Killen
##
## Fligner-Killeen test of homogeneity of variances
##
## data: dados and grupos
## Fligner-Killeen:med chi-squared = 1.1069, df = 1, p-value = 0.2928
\(x_1\) = [15 10 18 20 33 45 78 92 88 77 72]
\(H_0 : σ≤23\)
\(H_1 : σ>23\)
\(α = 0.05\)
x1 = c(15, 10, 18, 20, 33, 45, 78, 92, 88, 77, 72)
sigma = 23
alfa=0.05
s = sd(x1)
n = length(x1)
chi2 = (n-1)*s^2/sigma^2
1-pchisq(chi2, df = n-1)
## [1] 0.03535909
#tt = var.1.test(x = x1, sigma2 = sigma^2, alfa = alfa, alternative = "greater")
tt$p.value
## [1] 0.004897274
tt$sigmaIC
## NULL
tt$sigma2
## NULL
\(H_0 : σ≥0.6\)
\(H_1 : σ<0.6\)
\(α = 0.05\)
n = 323
x = 175
p0 = 0.6
C = 0.95
alfa = 1-C
p = x/n
z = abs(p-p0)/sqrt(p0*(1-p0)/n)
p.value = 1-pnorm(z)
p.value
## [1] 0.01636985
prop.test(175, 323, p = 0.6, correct = FALSE, alternative = "less")
##
## 1-sample proportions test without continuity correction
##
## data: 175 out of 323, null probability 0.6
## X-squared = 4.5593, df = 1, p-value = 0.01637
## alternative hypothesis: true p is less than 0.6
## 95 percent confidence interval:
## 0.0000000 0.5868609
## sample estimates:
## p
## 0.5417957
# Refrigerantes Doces Brinquedos
#Bairro 1 40 35 28
#Bairro 2 65 32 60
#Bairro 3 66 44 70
dados = rbind(
c(40, 35, 28),
c(65, 32, 60),
c(66, 44, 70)
)
C = 0.9
alfa = 1-C
tt = chisq.test(dados, correct = FALSE)
tt
##
## Pearson's Chi-squared test
##
## data: dados
## X-squared = 7.978, df = 4, p-value = 0.09239
# Bairro 1 Bairro 2 Bairro 3
# Felizes 40 35 28
# Total 50 50 50
dados2 = rbind(
c(40,35,28),
c(50,50,50)
)
tt = prop.test(dados2[1,], dados2[2,], correct=FALSE)
tt
##
## 3-sample test for equality of proportions without continuity
## correction
##
## data: dados2[1, ] out of dados2[2, ]
## X-squared = 6.7548, df = 2, p-value = 0.03414
## alternative hypothesis: two.sided
## sample estimates:
## prop 1 prop 2 prop 3
## 0.80 0.70 0.56
# ou então
tt = chisq.test(rbind(dados2[1,], dados2[2,]-dados2[1,]),correct = FALSE)
tt
##
## Pearson's Chi-squared test
##
## data: rbind(dados2[1, ], dados2[2, ] - dados2[1, ])
## X-squared = 6.7548, df = 2, p-value = 0.03414
a = [9 13 9 18 13 9 14 15 14 11 18 14 10 3]
a = c(9, 13, 9, 18, 13, 9, 14, 15, 14, 11, 18, 14, 10, 3)
alfa = 0.05
t.test(a, mu = 10, alternative = "greater")
##
## One Sample t-test
##
## data: a
## t = 2.0058, df = 13, p-value = 0.03307
## alternative hypothesis: true mean is greater than 10
## 95 percent confidence interval:
## 10.25095 Inf
## sample estimates:
## mean of x
## 12.14286
a =[19 22 15 15 28 18 29 25] b =[18 16 16 17 15 17 16 18 18 20]
a = c(19, 22, 15, 15, 28, 18, 29, 25)
b = c(18, 16, 16, 17, 15, 17, 16, 18, 18, 20)
alfa = 0.05
var.test(a, b)
##
## F test to compare two variances
##
## data: a and b
## F = 14.549, num df = 7, denom df = 9, p-value = 0.0006045
## alternative hypothesis: true ratio of variances is not equal to 1
## 95 percent confidence interval:
## 3.466561 70.174527
## sample estimates:
## ratio of variances
## 14.54932
t.test(a, b, alternative = "two.sided", var.equal = var.test(a, b)$p.value>0.05)
##
## Welch Two Sample t-test
##
## data: a and b
## t = 2.1297, df = 7.7727, p-value = 0.06682
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -0.3774792 8.9274792
## sample estimates:
## mean of x mean of y
## 21.375 17.100
a = [18 20 16 16 23 17 24 22 20 17 18]
a = c(18, 20, 16, 16, 23, 17, 24, 22, 20, 17, 18)
sigma = 2
alfa = 0.05
s = sd(a)
n = length(a)
# Fórmula da pag. 324
chisq2 = (n-1)*s^2/sigma^2
# Estatística unilateral à direita
p = 1-pchisq(chisq2, n-1)
p
## [1] 0.03012436
a =[18 20 16 16 23 17 24 22 20 17 18]
a = c(18, 20, 16, 16, 23, 17, 24, 22, 20, 17, 18)
sigma = 2
alfa = 0.05
s = sd(a)
n = length(a)
# Fórmula da pag. 324
chisq2 = (n-1)*s^2/sigma^2
# Estatística bilateral
p = 2*(1-pchisq(chisq2, n-1))
p
## [1] 0.06024871
a = [19 20 18 19 21 19 21 21 20 19 19]
a = c(19, 20, 18, 19, 21, 19, 21, 21, 20, 19, 19)
sigma = 2
alfa = 0.05
s = sd(a)
n = length(a)
# Fórmula da pag. 324
chisq2 = (n-1)*s^2/sigma^2
# Estatística unilateral à esquerda
p = pchisq(chisq2, n-1)
p
## [1] 0.01126391
p = 0.38
alfa = 0.05
x = 25
n = 45
prop.test(x, n, p, alternative = "two.sided")
##
## 1-sample proportions test with continuity correction
##
## data: x out of n, null probability p
## X-squared = 5.1651, df = 1, p-value = 0.02305
## alternative hypothesis: true p is not equal to 0.38
## 95 percent confidence interval:
## 0.4012357 0.7004945
## sample estimates:
## p
## 0.5555556
\(H_0 : p_1 ≤ p_2\)
alfa = 0.01
x1 = 56
n1 = 80
x2 = 38
n2 = 80
prop.test(c(x1, x2), c(n1, n2), alternative = "greater", correct = FALSE)
##
## 2-sample test for equality of proportions without continuity
## correction
##
## data: c(x1, x2) out of c(n1, n2)
## X-squared = 8.3559, df = 1, p-value = 0.001922
## alternative hypothesis: greater
## 95 percent confidence interval:
## 0.1003576 1.0000000
## sample estimates:
## prop 1 prop 2
## 0.700 0.475
# Solteiro Casado Divorciado/Viúvo
# Amigos e Vida social 41 49 42
# Emprego ou atividade principal 27 50 33
# Saúde e condição física 12 21 25
alfa = 0.01
dados = rbind(
c(41, 49, 42),
c(27, 50, 33),
c(12, 21, 25)
)
chisq.test(dados)
##
## Pearson's Chi-squared test
##
## data: dados
## X-squared = 5.3371, df = 4, p-value = 0.2544
# A favor Contra
#Sindicato 1 74 26
#Sindicato 2 81 19
#Sindicato 3 69 31
#Sindicato 4 75 25
#Sindicato 5 91 9
alfa = 0.01
dados = rbind(
c(74, 26),
c(81, 19),
c(69, 31),
c(75, 25),
c(91, 9)
)
prop.test(dados[1,], dados[1,]+dados[2,])
##
## 2-sample test for equality of proportions with continuity
## correction
##
## data: dados[1, ] out of dados[1, ] + dados[2, ]
## X-squared = 1.0323, df = 1, p-value = 0.3096
## alternative hypothesis: two.sided
## 95 percent confidence interval:
## -0.27903734 0.07832049
## sample estimates:
## prop 1 prop 2
## 0.4774194 0.5777778
\(H_0 : obs=esp\)
\(H_1 : obs≠esp\)
# Refrigerantes Doces Brinquedos
#Bairro 1 40 35 20
#Bairro 2 65 37 60
#Bairro 3 66 48 70
a = rbind(
c(40, 35, 20),
c(65, 37, 60),
c(66, 48, 70)
)
C = 0.99
alfa = 1-C
tt = chisq.test(a, correct = FALSE)
tt
##
## Pearson's Chi-squared test
##
## data: a
## X-squared = 11.216, df = 4, p-value = 0.02425
\(x_1\) = [152 144 190 145 205 170 179 172 124 134 168 190 135]
\(x_2\) = [122 155 99 94 156 170 144 138 138 168 125 125]
\(H_0 : σ1 = σ2\)
\(H_1 : σ1 ≠ σ2\)
\(α = 0.05\)
x1 = c(152, 144, 190, 145, 205, 170, 179, 172, 124, 134, 168, 190, 135)
x2 = c(122, 155, 99, 94, 156, 170, 144, 138, 138, 168, 125, 125)
shapiro.test(x1)
##
## Shapiro-Wilk normality test
##
## data: x1
## W = 0.95825, p-value = 0.7265
shapiro.test(x2)
##
## Shapiro-Wilk normality test
##
## data: x2
## W = 0.94913, p-value = 0.6242
tt = var.test(x1, x2)
tt
##
## F test to compare two variances
##
## data: x1 and x2
## F = 1.0548, num df = 12, denom df = 11, p-value = 0.9365
## alternative hypothesis: true ratio of variances is not equal to 1
## 95 percent confidence interval:
## 0.3075536 3.5034647
## sample estimates:
## ratio of variances
## 1.05479
dados = c(x1, x2)
dados
## [1] 152 144 190 145 205 170 179 172 124 134 168 190 135 122 155 99 94
## [18] 156 170 144 138 138 168 125 125
grupos = as.factor(c(rep(1, length(x1)), rep(2, length(x2))))
grupos
## [1] 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2
## Levels: 1 2
bartlett.test(dados, grupos)
##
## Bartlett test of homogeneity of variances
##
## data: dados and grupos
## Bartlett's K-squared = 0.0078168, df = 1, p-value = 0.9295
library(car)
leveneTest(dados, grupos)
## Levene's Test for Homogeneity of Variance (center = median)
## Df F value Pr(>F)
## group 1 0.1149 0.7377
## 23
fligner.test(dados, grupos)
##
## Fligner-Killeen test of homogeneity of variances
##
## data: dados and grupos
## Fligner-Killeen:med chi-squared = 0.15328, df = 1, p-value =
## 0.6954
\(H_0 : (x_1-x_2) ≥ 0\)
\(H_1 : (x_1-x_2) < 0\)
\(α = 0.05\)
x1 = [76 82 71 85 75 71 72 89 82 88]
x2 = [82 79 84 76 82 83 78 86 90 93]
x1 = c(76, 82, 71, 85, 75, 71, 72, 89, 82, 88)
x2 = c(82, 79, 84, 76, 82, 83, 78, 86, 90, 93)
tt = t.test(x1, x2, paired = TRUE, alternative = "less")
tt
##
## Paired t-test
##
## data: x1 and x2
## t = -1.8875, df = 9, p-value = 0.04585
## alternative hypothesis: true difference in means is less than 0
## 95 percent confidence interval:
## -Inf -0.1211235
## sample estimates:
## mean of the differences
## -4.2
\(H_0 = p_i = pj ∀ i, j\)
\(H_1 = :∃ i≠j|p_i≠p_j\)
\(α = 0.05\)
# Bairro 1 Bairro 2 Bairro 3
# Felizes 40 48 39
# Total 50 50 50
a = rbind(
c(40,48,39),
c(50,50,50)
)
tt = prop.test(a[1,], a[2,],correct = FALSE)
tt
##
## 3-sample test for equality of proportions without continuity
## correction
##
## data: a[1, ] out of a[2, ]
## X-squared = 7.4974, df = 2, p-value = 0.02355
## alternative hypothesis: two.sided
## sample estimates:
## prop 1 prop 2 prop 3
## 0.80 0.96 0.78
# ou
tt = chisq.test(rbind(a[1,], a[2,]-a[1,]),correct = FALSE)
tt
##
## Pearson's Chi-squared test
##
## data: rbind(a[1, ], a[2, ] - a[1, ])
## X-squared = 7.4974, df = 2, p-value = 0.02355
\(H_0 : π ≥ 0.6\)
\(H_1 : π < 0.6\)
\(α = 0.05\)
n = 40
x = 21
p0 = 0.6
C = 0.95
alfa = 1-C
p = x/n
z = abs(p-p0)/sqrt(p0*(1-p0)/n)
p.value = 1-pnorm(z)
p.value
## [1] 0.1664608
prop.test(x, n, p = 0.6, correct = FALSE, alternative = "less")
##
## 1-sample proportions test without continuity correction
##
## data: x out of n, null probability 0.6
## X-squared = 0.9375, df = 1, p-value = 0.1665
## alternative hypothesis: true p is less than 0.6
## 95 percent confidence interval:
## 0.0000000 0.6491193
## sample estimates:
## p
## 0.525
x = [19 25 16 14 25 18 30 21 24 21 24 16 16 19 18]
\(H_0 : µ ≥ 18.6\)
\(H_1 : µ ≠ 18.6\)
\(α = 0.1\)
x = c(19, 25, 16, 14, 25, 18, 30,21, 24, 21, 24, 16, 16, 19, 18)
mu = 18.6
tt = t.test(x, mu = mu, alternative = "two.sided")
tt
##
## One Sample t-test
##
## data: x
## t = 1.5712, df = 14, p-value = 0.1384
## alternative hypothesis: true mean is not equal to 18.6
## 95 percent confidence interval:
## 17.94295 22.85705
## sample estimates:
## mean of x
## 20.4
\(H_0 : π ≤ 23\)
\(H_1 : π > 23\)
\(α = 0.1\)
\(x_1\) = [15 22 18 20 33 45 78 92 88 77 72]
x1 = c(15, 22, 18, 20, 33, 45, 78, 92, 88, 77, 72)
\(H_0 : µ_1 ≥ µ_2\)
\(H_1 : µ_1 < µ_2\)
\(α = 0.05\)
\(x_1\) = [69 67 62 67 64 67 62 60 61 60 69 69 68 64 63]
\(x_2\) = [59 64 63 73 77 58 64 79 74 65 64 60]
x1 = c(69, 67, 62, 67, 64, 67, 62, 60, 61, 60, 69, 69, 68, 64, 63)
x2 = c(59, 64, 63, 73, 77, 58, 64, 79, 74, 65, 64, 60)
tt = t.test(x1, x2, alternative = "less", var.equal = TRUE)
tt
##
## Two Sample t-test
##
## data: x1 and x2
## t = -0.89373, df = 25, p-value = 0.19
## alternative hypothesis: true difference in means is less than 0
## 95 percent confidence interval:
## -Inf 1.701017
## sample estimates:
## mean of x mean of y
## 64.80000 66.66667