The objectives of this problem set is to orient you to a number of activities in R
. And to conduct a thoughtful exercise in appreciating the importance of data visualization. For each question create a code chunk or text response that completes/answers the activity or question requested. Finally, upon completion name your final output .html
file as: YourName_ANLY512-Section-Year-Semester.html
and upload it to the “Problem Set 2” assignmenet on Moodle.
anscombe
data that is part of the library(datasets)
in R
. And assign that data to a new object called data
.data<-anscombe
data
## x1 x2 x3 x4 y1 y2 y3 y4
## 1 10 10 10 8 8.04 9.14 7.46 6.58
## 2 8 8 8 8 6.95 8.14 6.77 5.76
## 3 13 13 13 8 7.58 8.74 12.74 7.71
## 4 9 9 9 8 8.81 8.77 7.11 8.84
## 5 11 11 11 8 8.33 9.26 7.81 8.47
## 6 14 14 14 8 9.96 8.10 8.84 7.04
## 7 6 6 6 8 7.24 6.13 6.08 5.25
## 8 4 4 4 19 4.26 3.10 5.39 12.50
## 9 12 12 12 8 10.84 9.13 8.15 5.56
## 10 7 7 7 8 4.82 7.26 6.42 7.91
## 11 5 5 5 8 5.68 4.74 5.73 6.89
fBasics()
package!)library(fBasics)
## Loading required package: timeDate
## Loading required package: timeSeries
##
## Rmetrics Package fBasics
## Analysing Markets and calculating Basic Statistics
## Copyright (C) 2005-2014 Rmetrics Association Zurich
## Educational Software for Financial Engineering and Computational Science
## Rmetrics is free software and comes with ABSOLUTELY NO WARRANTY.
## https://www.rmetrics.org --- Mail to: info@rmetrics.org
x1_mean=colStats(data$x1,mean)
x1_var=colStats(data$x1,var)
x2_mean=colStats(data$x2,mean)
x2_var=colStats(data$x2,var)
x3_mean=colStats(data$x3,mean)
x3_var=colStats(data$x3,var)
x4_mean=colStats(data$x4,mean)
x4_var=colStats(data$x4,var)
y1_mean=colStats(data$y1,mean)
y1_var=colStats(data$y1,var)
y2_mean=colStats(data$y2,mean)
y2_var=colStats(data$y2,var)
y3_mean=colStats(data$y3,mean)
y3_var=colStats(data$y3,var)
y4_mean=colStats(data$y4,mean)
y4_var=colStats(data$y4,var)
cat("X1 Mean =", x1_mean[1], "X1 Var =", x1_var[1],"\n")
## X1 Mean = 9 X1 Var = 11
cat("X2 Mean =", x2_mean[1], "X2 Var =", x2_var[1],"\n")
## X2 Mean = 9 X2 Var = 11
cat("X3 Mean =", x3_mean[1], "X3 Var =", x3_var[1],"\n")
## X3 Mean = 9 X3 Var = 11
cat("X4 Mean =", x4_mean[1], "X4 Var =", x4_var[1],"\n")
## X4 Mean = 9 X4 Var = 11
cat("Y1 Mean =", y1_mean[1], "Y1 Var =", y1_var[1],"\n")
## Y1 Mean = 7.500909 Y1 Var = 4.127269
cat("Y2 Mean =", y2_mean[1], "Y2 Var =", y2_var[1],"\n")
## Y2 Mean = 7.500909 Y2 Var = 4.127629
cat("Y3 Mean =", y3_mean[1], "Y3 Var =", y3_var[1],"\n")
## Y3 Mean = 7.5 Y3 Var = 4.12262
cat("Y4 Mean =", y4_mean[1], "Y4 Var =", y4_var[1],"\n")
## Y4 Mean = 7.500909 Y4 Var = 4.123249
cat("\n Correlation between X1 & Y1 \n")
##
## Correlation between X1 & Y1
cor.test(data$x1, data$y1, method=c("pearson", "kendall", "spearman"))
##
## Pearson's product-moment correlation
##
## data: data$x1 and data$y1
## t = 4.2415, df = 9, p-value = 0.00217
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.4243912 0.9506933
## sample estimates:
## cor
## 0.8164205
cat("\n Correlation between X2 & Y2 \n")
##
## Correlation between X2 & Y2
cor.test(data$x2, data$y2, method=c("pearson", "kendall", "spearman"))
##
## Pearson's product-moment correlation
##
## data: data$x2 and data$y2
## t = 4.2386, df = 9, p-value = 0.002179
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.4239389 0.9506402
## sample estimates:
## cor
## 0.8162365
cat("\n Correlation between X3 & Y3 \n")
##
## Correlation between X3 & Y3
cor.test(data$x3, data$y3, method=c("pearson", "kendall", "spearman"))
##
## Pearson's product-moment correlation
##
## data: data$x3 and data$y3
## t = 4.2394, df = 9, p-value = 0.002176
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.4240623 0.9506547
## sample estimates:
## cor
## 0.8162867
cat("\n Correlation between X4 & Y4 \n")
##
## Correlation between X4 & Y4
cor.test(data$x4, data$y4, method=c("pearson", "kendall", "spearman"))
##
## Pearson's product-moment correlation
##
## data: data$x4 and data$y4
## t = 4.243, df = 9, p-value = 0.002165
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.4246394 0.9507224
## sample estimates:
## cor
## 0.8165214
library(ggplot2)
x1y1<-ggplot(data, aes(x = data$x1, y = data$y1)) +
geom_point() +
ylab("X1") +
xlab("Y1")
x1y1 + labs(title = "Relationship between X1 & Y1")
x2y2<-ggplot(data, aes(x = data$x2, y = data$y2)) +
geom_point() +
ylab("X2") +
xlab("Y2")
x2y2 + labs(title = "Relationship between X2 & Y2")
x3y3<-ggplot(data, aes(x = data$x3, y = data$y3)) +
geom_point() +
ylab("X3") +
xlab("Y3")
x3y3 + labs(title = "Relationship between X3 & Y3")
x4y4<-ggplot(data, aes(x = data$x4, y = data$y4)) +
geom_point() +
ylab("X4") +
xlab("Y4")
x4y4 + labs(title = "Relationship between X4 & Y4")
library(gridExtra)
p1 <- ggplot(data, aes(x1, y1)) + geom_point()
p2 <- ggplot(data, aes(x2, y2)) + geom_point()
p3 <- ggplot(data, aes(x3, y3)) + geom_point()
p4 <- ggplot(data, aes(x4, y4)) + geom_point()
grid.arrange(p1,p2,p3,p4, ncol = 2)
lm()
function.lm1<-lm(data$y1~data$x1)
cat("Data Set X1 & Y1")
## Data Set X1 & Y1
lm1
##
## Call:
## lm(formula = data$y1 ~ data$x1)
##
## Coefficients:
## (Intercept) data$x1
## 3.0001 0.5001
lm2<-lm(data$y2~data$x2)
cat("Data Set X2 & Y2")
## Data Set X2 & Y2
lm2
##
## Call:
## lm(formula = data$y2 ~ data$x2)
##
## Coefficients:
## (Intercept) data$x2
## 3.001 0.500
lm3<-lm(data$y3~data$x3)
cat("Data Set X3 & Y3")
## Data Set X3 & Y3
lm3
##
## Call:
## lm(formula = data$y3 ~ data$x3)
##
## Coefficients:
## (Intercept) data$x3
## 3.0025 0.4997
lm4<-lm(data$y4~data$x4)
cat("Data Set X4 & Y4")
## Data Set X4 & Y4
lm4
##
## Call:
## lm(formula = data$y4 ~ data$x4)
##
## Coefficients:
## (Intercept) data$x4
## 3.0017 0.4999
p1 <- ggplot(data, aes(x1, y1)) + geom_point()+
stat_smooth(method = "lm", col = "blue")
p1
p2 <- ggplot(data, aes(x2, y2)) + geom_point()+
stat_smooth(method = "lm", col = "blue")
p2
p3 <- ggplot(data, aes(x3, y3)) + geom_point()+
stat_smooth(method = "lm", col = "blue")
p3
p4 <- ggplot(data, aes(x4, y4)) + geom_point()+
stat_smooth(method = "lm", col = "blue")
p4
grid.arrange(p1,p2,p3,p4, ncol = 2)
anova(lm1)
Analysis of Variance Table
Response: data\(y1 Df Sum Sq Mean Sq F value Pr(>F) data\)x1 1 27.510 27.5100 17.99 0.00217 ** Residuals 9 13.763 1.5292
— Signif. codes: 0 ‘’ 0.001 ’’ 0.01 ’’ 0.05 ‘.’ 0.1 ‘’ 1
anova(lm2)
Analysis of Variance Table
Response: data\(y2 Df Sum Sq Mean Sq F value Pr(>F) data\)x2 1 27.500 27.5000 17.966 0.002179 ** Residuals 9 13.776 1.5307
— Signif. codes: 0 ‘’ 0.001 ’’ 0.01 ’’ 0.05 ‘.’ 0.1 ‘’ 1
anova(lm3)
Analysis of Variance Table
Response: data\(y3 Df Sum Sq Mean Sq F value Pr(>F) data\)x3 1 27.470 27.4700 17.972 0.002176 ** Residuals 9 13.756 1.5285
— Signif. codes: 0 ‘’ 0.001 ’’ 0.01 ’’ 0.05 ‘.’ 0.1 ‘’ 1
anova(lm4)
Analysis of Variance Table
Response: data\(y4 Df Sum Sq Mean Sq F value Pr(>F) data\)x4 1 27.490 27.4900 18.003 0.002165 ** Residuals 9 13.742 1.5269
— Signif. codes: 0 ‘’ 0.001 ’’ 0.01 ’’ 0.05 ‘.’ 0.1 ‘’ 1
Anscombe’s quartet is a good example to show positives of data visualization . Just by looking at the mean and variance data , all the data sets looked very similar , but after we plotted each data set pair we realised how different they were from each other.