Part 1: Linear Regression
Understanding regression
getwd()
## [1] "C:/Users/n0910803/Documents"
## Example: Space Shuttle Launch Data
launch <- read.csv("challenger.csv")
# estimate beta manually
b <- cov(launch$temperature, launch$distress_ct) / var(launch$temperature)
b
## [1] -0.04753968
# estimate alpha manually
a <- mean(launch$distress_ct) - b * mean(launch$temperature)
a
## [1] 3.698413
# calculate the correlation of launch data
r <- cov(launch$temperature, launch$distress_ct) /
(sd(launch$temperature) * sd(launch$distress_ct))
r
## [1] -0.5111264
#This correlation is not bad, probably good. Moderate correlation. Meaning: the variable that we chose to explain distress events (temperature),explains distress in 51%.
cor(launch$temperature, launch$distress_ct)
## [1] -0.5111264
# computing the slope using correlation
r * (sd(launch$distress_ct) / sd(launch$temperature))
## [1] -0.04753968
# confirming the regression line using the lm function (not in text)
model <- lm(distress_ct ~ temperature, data = launch)
model
##
## Call:
## lm(formula = distress_ct ~ temperature, data = launch)
##
## Coefficients:
## (Intercept) temperature
## 3.69841 -0.04754
summary(model)
##
## Call:
## lm(formula = distress_ct ~ temperature, data = launch)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.5608 -0.3944 -0.0854 0.1056 1.8671
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 3.69841 1.21951 3.033 0.00633 **
## temperature -0.04754 0.01744 -2.725 0.01268 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.5774 on 21 degrees of freedom
## Multiple R-squared: 0.2613, Adjusted R-squared: 0.2261
## F-statistic: 7.426 on 1 and 21 DF, p-value: 0.01268
# creating a simple multiple regression function
reg <- function(y, x) {
x <- as.matrix(x)
x <- cbind(Intercept = 1, x)
b <- solve(t(x) %*% x) %*% t(x) %*% y
colnames(b) <- "estimate"
print(b)
}
# examine the launch data
str(launch)
## 'data.frame': 23 obs. of 4 variables:
## $ distress_ct : int 0 1 0 0 0 0 0 0 1 1 ...
## $ temperature : int 66 70 69 68 67 72 73 70 57 63 ...
## $ field_check_pressure: int 50 50 50 50 50 50 100 100 200 200 ...
## $ flight_num : int 1 2 3 4 5 6 7 8 9 10 ...
# test regression model with simple linear regression
reg(y = launch$distress_ct, x = launch[2])
## estimate
## Intercept 3.69841270
## temperature -0.04753968
# use regression model with multiple regression
reg(y = launch$distress_ct, x = launch[2:4])
## estimate
## Intercept 3.527093383
## temperature -0.051385940
## field_check_pressure 0.001757009
## flight_num 0.014292843
# confirming the multiple regression result using the lm function (not in text)
model <- lm(distress_ct ~ temperature + field_check_pressure + flight_num, data = launch)
model
##
## Call:
## lm(formula = distress_ct ~ temperature + field_check_pressure +
## flight_num, data = launch)
##
## Coefficients:
## (Intercept) temperature field_check_pressure
## 3.527093 -0.051386 0.001757
## flight_num
## 0.014293
Predicting Medical Expenses
## Step 2: Exploring and preparing the data ----
insurance <- read.csv("insurance.csv", stringsAsFactors = TRUE)
str(insurance)
## 'data.frame': 1338 obs. of 7 variables:
## $ age : int 19 18 28 33 32 31 46 37 37 60 ...
## $ sex : Factor w/ 2 levels "female","male": 1 2 2 2 2 1 1 1 2 1 ...
## $ bmi : num 27.9 33.8 33 22.7 28.9 25.7 33.4 27.7 29.8 25.8 ...
## $ children: int 0 1 3 0 0 0 1 3 2 0 ...
## $ smoker : Factor w/ 2 levels "no","yes": 2 1 1 1 1 1 1 1 1 1 ...
## $ region : Factor w/ 4 levels "northeast","northwest",..: 4 3 3 2 2 3 3 2 1 2 ...
## $ expenses: num 16885 1726 4449 21984 3867 ...
# summarize the charges variable
summary(insurance$expenses)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 1122 4740 9382 13270 16640 63770
# histogram of insurance charges
hist(insurance$expenses)

# table of region
table(insurance$region)
##
## northeast northwest southeast southwest
## 324 325 364 325
# exploring relationships among features: correlation matrix
cor(insurance[c("age", "bmi", "children", "expenses")])
## age bmi children expenses
## age 1.0000000 0.10934101 0.04246900 0.29900819
## bmi 0.1093410 1.00000000 0.01264471 0.19857626
## children 0.0424690 0.01264471 1.00000000 0.06799823
## expenses 0.2990082 0.19857626 0.06799823 1.00000000
#Example: Age explains 29.9% of the expenses. But not significant correlations here.
# visualing relationships among features: scatterplot matrix
pairs(insurance[c("age", "bmi", "children", "expenses")])

## Step 3: Training a model on the data ----
ins_model <- lm(expenses ~ age + children + bmi + sex + smoker + region,
data = insurance)
ins_model <- lm(expenses ~ ., data = insurance) # this is equivalent to above
# see the estimated beta coefficients
ins_model
##
## Call:
## lm(formula = expenses ~ ., data = insurance)
##
## Coefficients:
## (Intercept) age sexmale bmi
## -11941.6 256.8 -131.4 339.3
## children smokeryes regionnorthwest regionsoutheast
## 475.7 23847.5 -352.8 -1035.6
## regionsouthwest
## -959.3
#we put a '.' to bring all the data. And we do not attach the file because its not a good idea.
Step 5: Improving model performance
# add a higher-order "age" term
insurance$age2 <- insurance$age^2
#we create a new variable Age2 to try to improve the model.
# add an indicator for BMI >= 30
insurance$bmi30 <- ifelse(insurance$bmi >= 30, 1, 0)
#instead of using bmi as a numerical variable we use 1 and 0, if its greater than 30 we are in 'trouble' we are high risk.
#is now cathegorical. If is greater or equal to 30 is 1=high risk, if its less is 0.
# create final model
ins_model2 <- lm(expenses ~ age + age2 + children + bmi + sex +
bmi30*smoker + region, data = insurance)
summary(ins_model2)
##
## Call:
## lm(formula = expenses ~ age + age2 + children + bmi + sex + bmi30 *
## smoker + region, data = insurance)
##
## Residuals:
## Min 1Q Median 3Q Max
## -17297.1 -1656.0 -1262.7 -727.8 24161.6
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 139.0053 1363.1359 0.102 0.918792
## age -32.6181 59.8250 -0.545 0.585690
## age2 3.7307 0.7463 4.999 6.54e-07 ***
## children 678.6017 105.8855 6.409 2.03e-10 ***
## bmi 119.7715 34.2796 3.494 0.000492 ***
## sexmale -496.7690 244.3713 -2.033 0.042267 *
## bmi30 -997.9355 422.9607 -2.359 0.018449 *
## smokeryes 13404.5952 439.9591 30.468 < 2e-16 ***
## regionnorthwest -279.1661 349.2826 -0.799 0.424285
## regionsoutheast -828.0345 351.6484 -2.355 0.018682 *
## regionsouthwest -1222.1619 350.5314 -3.487 0.000505 ***
## bmi30:smokeryes 19810.1534 604.6769 32.762 < 2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 4445 on 1326 degrees of freedom
## Multiple R-squared: 0.8664, Adjusted R-squared: 0.8653
## F-statistic: 781.7 on 11 and 1326 DF, p-value: < 2.2e-16
# making predictions with the regression model
insurance$pred <- predict(ins_model2, insurance)
cor(insurance$pred, insurance$expenses)
## [1] 0.9307999
plot(insurance$pred, insurance$expenses)
abline(a = 0, b = 1, col = "red", lwd = 3, lty = 2)

predict(ins_model2,
data.frame(age = 30, age2 = 30^2, children = 2,
bmi = 30, sex = "male", bmi30 = 1,
smoker = "no", region = "northeast"))
## 1
## 5973.774
predict(ins_model2,
data.frame(age = 30, age2 = 30^2, children = 2,
bmi = 30, sex = "female", bmi30 = 1,
smoker = "no", region = "northeast"))
## 1
## 6470.543
predict(ins_model2,
data.frame(age = 30, age2 = 30^2, children = 0,
bmi = 30, sex = "female", bmi30 = 1,
smoker = "no", region = "northeast"))
## 1
## 5113.34
predict(ins_model2,
data.frame(age = 30, age2 = 30^2, children = 0,
bmi = 30, sex = "female", bmi30 = 1,
smoker = "no", region = "northeast"))
## 1
## 5113.34
#testing for myself
predict(ins_model2,
data.frame(age = 29, age2 = 29^2, children = 0,
bmi = 19, sex = "male", bmi30 = 0,
smoker = "no", region = "southeast"))
## 1
## 3281.49
#testing for myself being smoker
predict(ins_model2,
data.frame(age = 29, age2 = 29^2, children = 2,
bmi = 19, sex = "male", bmi30 = 0,
smoker = "yes", region = "southeast"))
## 1
## 18043.29
#changing only the fact of being a smoker changes a lot the expensing prediction
#testing for my mom
predict(ins_model2,
data.frame(age = 65, age2 = 65^2, children = 2,
bmi = 28, sex = "female", bmi30 = 0,
smoker = "no", region = "southeast"))
## 1
## 17663.99
#testing for my mom in other region
predict(ins_model2,
data.frame(age = 65, age2 = 65^2, children = 2,
bmi = 28, sex = "female", bmi30 = 0,
smoker = "no", region = "southwest"))
## 1
## 17269.86
#case1
predict(ins_model2,
data.frame(age = 22, age2 = 22^2, children = 3,
bmi = 24, sex = "female", bmi30 = 0,
smoker = "no", region = "northeast"))
## 1
## 6137.407
#Case2
predict(ins_model2,
data.frame(age = 22, age2 = 22^2, children = 1,
bmi = 27, sex = "male", bmi30 = 0,
smoker = "yes", region = "southeast"))
## 1
## 17219.31