library(tidyverse)
## -- Attaching packages --------------------------------------- tidyverse 1.3.0 --
## v ggplot2 3.3.0 v purrr 0.3.4
## v tibble 3.0.1 v dplyr 0.8.5
## v tidyr 1.0.2 v stringr 1.4.0
## v readr 1.3.1 v forcats 0.5.0
## -- Conflicts ------------------------------------------ tidyverse_conflicts() --
## x dplyr::filter() masks stats::filter()
## x dplyr::lag() masks stats::lag()
# Set seed
set.seed(42)
# Set number of draws, maximum for the uniform draws, mean and standard deviation for the normal draws
draws<-10000
N<-7
avg<-(N+1)/2
stdev<-(N+1)/2
# Generate random uniform variable
X<-runif(draws,1,N)
length(X)
## [1] 10000
# Generate random normal variable
Y<-rnorm(draws,mean=avg,sd=stdev)
(x<-median(X))
## [1] 3.999024
(y<-quantile(Y,0.25))
## 25%
## 1.261755
Combine the variables into a data frame and add boolean columns to store the conditional values for greater than and less than.
df<-data.frame(X,Y)
df$Xgtx<-(X>x)
df$Xgty<-(X>y)
df$Xltx<-(X<x)
df$Ygty<-(Y>y)
df$Xlty<-(X<y)
df$Ylty<-(Y<y)
df$Yltx<-(Y<x)
df$Ygtx<-(Y>x)
dim(df)
## [1] 10000 10
head(df)
## X Y Xgtx Xgty Xltx Ygty Xlty Ylty Yltx Ygtx
## 1 6.488836 4.284890 TRUE TRUE FALSE TRUE FALSE FALSE FALSE TRUE
## 2 6.622452 7.881160 TRUE TRUE FALSE TRUE FALSE FALSE FALSE TRUE
## 3 2.716837 5.240141 FALSE TRUE TRUE TRUE FALSE FALSE FALSE TRUE
## 4 5.982686 3.441806 TRUE TRUE FALSE TRUE FALSE FALSE TRUE FALSE
## 5 4.850473 2.694755 TRUE TRUE FALSE TRUE FALSE FALSE TRUE FALSE
## 6 4.114576 3.524762 TRUE TRUE FALSE TRUE FALSE FALSE TRUE FALSE
From Bayes theorem, P(X>x|X>y) = P(X>x and X>y)/P(X>y)
(p.Xgtx.given.Xgty<-sum(X>x&X>y)/sum(X>y))
## [1] 0.5244389
For P(X>x,Y>y)
(sum(X>x)/length(X))*(sum(Y>y)/length(Y))
## [1] 0.375
For P(X<x|X>y)
(p.Xltx.given.Xgty<-sum(X<x&X>y)/sum(X>y))
## [1] 0.4755611
# This is the complement of the previous problem in 1.a. So it should be same as 1 minus probability computed in 1.a
(1-p.Xgtx.given.Xgty)
## [1] 0.4755611
(sum(X>x&Y>y)/length(X))
## [1] 0.3732
#length(df$Xgtx)
#length(df$Xgty)
tb<-table(df$Xgtx, df$Ygty, dnn=c('Xgtx','Ygty'))
tb
## Ygty
## Xgtx FALSE TRUE
## FALSE 1232 3768
## TRUE 1268 3732
prop.tb<-prop.table(tb)
prop.tb
## Ygty
## Xgtx FALSE TRUE
## FALSE 0.1232 0.3768
## TRUE 0.1268 0.3732
prob.Xgtx.and.Ygty<-prop.tb[2,2] # Joint probability
prob.Xgtx<-prop.tb[2,1]+prop.tb[2,2] # Marginal probability
prob.Ygty<-prop.tb[1,2]+prop.tb[2,2] # Marginal probability
#(prop.tb[2,1]+prop.tb[2,2])*(prop.tb[1,2]+prop.tb[2,2])
(prob.Xgtx.and.Ygty)
## [1] 0.3732
(prob.Xgtx*prob.Ygty)
## [1] 0.375
This is a check for independence of 2 events. From the above, it can be seen that the joint probability of both X greater than x and Y greater than y i.e. 0.3732 is close to but not exactly equal to the product of the 2 marginal probabilities respectively i.e. 0.375. While these 2 events should be independent, it is likely that there is some spurious correlation reflected because of the random number generation.
# Fisher's test
fisher.test(tb)
##
## Fisher's Exact Test for Count Data
##
## data: tb
## p-value = 0.4189
## alternative hypothesis: true odds ratio is not equal to 1
## 95 percent confidence interval:
## 0.8780932 1.0546192
## sample estimates:
## odds ratio
## 0.9623146
#Chi square test
chisq.test(tb)
##
## Pearson's Chi-squared test with Yates' continuity correction
##
## data: tb
## X-squared = 0.65333, df = 1, p-value = 0.4189
Both tests give a p-value of 0.4189 at the 95% confidence level, which is quite large - thereby preventing rejection of the Null Hypothesis that the 2 events are independent, as should be expected. Fisher’s test is appropriate for sample sample sizes. In this case, the Chi-squared test is most appropriate because the sample size is large, and the smallest marginal probability is 0.25 resulting in 2,500 outcomes favorable to the event Y less than y. This is way more than the threshold of 5 suggested as the criterion for applying the Chi-squared test.
#Load packages
library(tidyverse)
library(corrplot)
## corrplot 0.84 loaded
library(MASS)
##
## Attaching package: 'MASS'
## The following object is masked from 'package:dplyr':
##
## select
library(gmodels)
library(psych)
##
## Attaching package: 'psych'
## The following objects are masked from 'package:ggplot2':
##
## %+%, alpha
library(matrixcalc)
library(Hmisc)
## Loading required package: lattice
## Loading required package: survival
## Loading required package: Formula
##
## Attaching package: 'Hmisc'
## The following object is masked from 'package:psych':
##
## describe
## The following objects are masked from 'package:dplyr':
##
## src, summarize
## The following objects are masked from 'package:base':
##
## format.pval, units
library(PerformanceAnalytics)
## Loading required package: xts
## Loading required package: zoo
##
## Attaching package: 'zoo'
## The following objects are masked from 'package:base':
##
## as.Date, as.Date.numeric
##
## Attaching package: 'xts'
## The following objects are masked from 'package:dplyr':
##
## first, last
##
## Attaching package: 'PerformanceAnalytics'
## The following object is masked from 'package:graphics':
##
## legend
library(gmodels)
library(pracma)
##
## Attaching package: 'pracma'
## The following object is masked from 'package:Hmisc':
##
## ceil
## The following objects are masked from 'package:psych':
##
## logit, polar
## The following object is masked from 'package:purrr':
##
## cross
#Load the training dataset from Kaggle
train<-read.csv("C:/Jagdish/MastersPrograms/CUNY/DS605 Computational Mathematics/Final Project/train.csv")
# Conduct basic checks on the size and structure of the data
dim(train)
## [1] 1460 81
str(train)
## 'data.frame': 1460 obs. of 81 variables:
## $ Id : int 1 2 3 4 5 6 7 8 9 10 ...
## $ MSSubClass : int 60 20 60 70 60 50 20 60 50 190 ...
## $ MSZoning : Factor w/ 5 levels "C (all)","FV",..: 4 4 4 4 4 4 4 4 5 4 ...
## $ LotFrontage : int 65 80 68 60 84 85 75 NA 51 50 ...
## $ LotArea : int 8450 9600 11250 9550 14260 14115 10084 10382 6120 7420 ...
## $ Street : Factor w/ 2 levels "Grvl","Pave": 2 2 2 2 2 2 2 2 2 2 ...
## $ Alley : Factor w/ 2 levels "Grvl","Pave": NA NA NA NA NA NA NA NA NA NA ...
## $ LotShape : Factor w/ 4 levels "IR1","IR2","IR3",..: 4 4 1 1 1 1 4 1 4 4 ...
## $ LandContour : Factor w/ 4 levels "Bnk","HLS","Low",..: 4 4 4 4 4 4 4 4 4 4 ...
## $ Utilities : Factor w/ 2 levels "AllPub","NoSeWa": 1 1 1 1 1 1 1 1 1 1 ...
## $ LotConfig : Factor w/ 5 levels "Corner","CulDSac",..: 5 3 5 1 3 5 5 1 5 1 ...
## $ LandSlope : Factor w/ 3 levels "Gtl","Mod","Sev": 1 1 1 1 1 1 1 1 1 1 ...
## $ Neighborhood : Factor w/ 25 levels "Blmngtn","Blueste",..: 6 25 6 7 14 12 21 17 18 4 ...
## $ Condition1 : Factor w/ 9 levels "Artery","Feedr",..: 3 2 3 3 3 3 3 5 1 1 ...
## $ Condition2 : Factor w/ 8 levels "Artery","Feedr",..: 3 3 3 3 3 3 3 3 3 1 ...
## $ BldgType : Factor w/ 5 levels "1Fam","2fmCon",..: 1 1 1 1 1 1 1 1 1 2 ...
## $ HouseStyle : Factor w/ 8 levels "1.5Fin","1.5Unf",..: 6 3 6 6 6 1 3 6 1 2 ...
## $ OverallQual : int 7 6 7 7 8 5 8 7 7 5 ...
## $ OverallCond : int 5 8 5 5 5 5 5 6 5 6 ...
## $ YearBuilt : int 2003 1976 2001 1915 2000 1993 2004 1973 1931 1939 ...
## $ YearRemodAdd : int 2003 1976 2002 1970 2000 1995 2005 1973 1950 1950 ...
## $ RoofStyle : Factor w/ 6 levels "Flat","Gable",..: 2 2 2 2 2 2 2 2 2 2 ...
## $ RoofMatl : Factor w/ 8 levels "ClyTile","CompShg",..: 2 2 2 2 2 2 2 2 2 2 ...
## $ Exterior1st : Factor w/ 15 levels "AsbShng","AsphShn",..: 13 9 13 14 13 13 13 7 4 9 ...
## $ Exterior2nd : Factor w/ 16 levels "AsbShng","AsphShn",..: 14 9 14 16 14 14 14 7 16 9 ...
## $ MasVnrType : Factor w/ 4 levels "BrkCmn","BrkFace",..: 2 3 2 3 2 3 4 4 3 3 ...
## $ MasVnrArea : int 196 0 162 0 350 0 186 240 0 0 ...
## $ ExterQual : Factor w/ 4 levels "Ex","Fa","Gd",..: 3 4 3 4 3 4 3 4 4 4 ...
## $ ExterCond : Factor w/ 5 levels "Ex","Fa","Gd",..: 5 5 5 5 5 5 5 5 5 5 ...
## $ Foundation : Factor w/ 6 levels "BrkTil","CBlock",..: 3 2 3 1 3 6 3 2 1 1 ...
## $ BsmtQual : Factor w/ 4 levels "Ex","Fa","Gd",..: 3 3 3 4 3 3 1 3 4 4 ...
## $ BsmtCond : Factor w/ 4 levels "Fa","Gd","Po",..: 4 4 4 2 4 4 4 4 4 4 ...
## $ BsmtExposure : Factor w/ 4 levels "Av","Gd","Mn",..: 4 2 3 4 1 4 1 3 4 4 ...
## $ BsmtFinType1 : Factor w/ 6 levels "ALQ","BLQ","GLQ",..: 3 1 3 1 3 3 3 1 6 3 ...
## $ BsmtFinSF1 : int 706 978 486 216 655 732 1369 859 0 851 ...
## $ BsmtFinType2 : Factor w/ 6 levels "ALQ","BLQ","GLQ",..: 6 6 6 6 6 6 6 2 6 6 ...
## $ BsmtFinSF2 : int 0 0 0 0 0 0 0 32 0 0 ...
## $ BsmtUnfSF : int 150 284 434 540 490 64 317 216 952 140 ...
## $ TotalBsmtSF : int 856 1262 920 756 1145 796 1686 1107 952 991 ...
## $ Heating : Factor w/ 6 levels "Floor","GasA",..: 2 2 2 2 2 2 2 2 2 2 ...
## $ HeatingQC : Factor w/ 5 levels "Ex","Fa","Gd",..: 1 1 1 3 1 1 1 1 3 1 ...
## $ CentralAir : Factor w/ 2 levels "N","Y": 2 2 2 2 2 2 2 2 2 2 ...
## $ Electrical : Factor w/ 5 levels "FuseA","FuseF",..: 5 5 5 5 5 5 5 5 2 5 ...
## $ X1stFlrSF : int 856 1262 920 961 1145 796 1694 1107 1022 1077 ...
## $ X2ndFlrSF : int 854 0 866 756 1053 566 0 983 752 0 ...
## $ LowQualFinSF : int 0 0 0 0 0 0 0 0 0 0 ...
## $ GrLivArea : int 1710 1262 1786 1717 2198 1362 1694 2090 1774 1077 ...
## $ BsmtFullBath : int 1 0 1 1 1 1 1 1 0 1 ...
## $ BsmtHalfBath : int 0 1 0 0 0 0 0 0 0 0 ...
## $ FullBath : int 2 2 2 1 2 1 2 2 2 1 ...
## $ HalfBath : int 1 0 1 0 1 1 0 1 0 0 ...
## $ BedroomAbvGr : int 3 3 3 3 4 1 3 3 2 2 ...
## $ KitchenAbvGr : int 1 1 1 1 1 1 1 1 2 2 ...
## $ KitchenQual : Factor w/ 4 levels "Ex","Fa","Gd",..: 3 4 3 3 3 4 3 4 4 4 ...
## $ TotRmsAbvGrd : int 8 6 6 7 9 5 7 7 8 5 ...
## $ Functional : Factor w/ 7 levels "Maj1","Maj2",..: 7 7 7 7 7 7 7 7 3 7 ...
## $ Fireplaces : int 0 1 1 1 1 0 1 2 2 2 ...
## $ FireplaceQu : Factor w/ 5 levels "Ex","Fa","Gd",..: NA 5 5 3 5 NA 3 5 5 5 ...
## $ GarageType : Factor w/ 6 levels "2Types","Attchd",..: 2 2 2 6 2 2 2 2 6 2 ...
## $ GarageYrBlt : int 2003 1976 2001 1998 2000 1993 2004 1973 1931 1939 ...
## $ GarageFinish : Factor w/ 3 levels "Fin","RFn","Unf": 2 2 2 3 2 3 2 2 3 2 ...
## $ GarageCars : int 2 2 2 3 3 2 2 2 2 1 ...
## $ GarageArea : int 548 460 608 642 836 480 636 484 468 205 ...
## $ GarageQual : Factor w/ 5 levels "Ex","Fa","Gd",..: 5 5 5 5 5 5 5 5 2 3 ...
## $ GarageCond : Factor w/ 5 levels "Ex","Fa","Gd",..: 5 5 5 5 5 5 5 5 5 5 ...
## $ PavedDrive : Factor w/ 3 levels "N","P","Y": 3 3 3 3 3 3 3 3 3 3 ...
## $ WoodDeckSF : int 0 298 0 0 192 40 255 235 90 0 ...
## $ OpenPorchSF : int 61 0 42 35 84 30 57 204 0 4 ...
## $ EnclosedPorch: int 0 0 0 272 0 0 0 228 205 0 ...
## $ X3SsnPorch : int 0 0 0 0 0 320 0 0 0 0 ...
## $ ScreenPorch : int 0 0 0 0 0 0 0 0 0 0 ...
## $ PoolArea : int 0 0 0 0 0 0 0 0 0 0 ...
## $ PoolQC : Factor w/ 3 levels "Ex","Fa","Gd": NA NA NA NA NA NA NA NA NA NA ...
## $ Fence : Factor w/ 4 levels "GdPrv","GdWo",..: NA NA NA NA NA 3 NA NA NA NA ...
## $ MiscFeature : Factor w/ 4 levels "Gar2","Othr",..: NA NA NA NA NA 3 NA 3 NA NA ...
## $ MiscVal : int 0 0 0 0 0 700 0 350 0 0 ...
## $ MoSold : int 2 5 9 2 12 10 8 11 4 1 ...
## $ YrSold : int 2008 2007 2008 2006 2008 2009 2007 2009 2008 2008 ...
## $ SaleType : Factor w/ 9 levels "COD","Con","ConLD",..: 9 9 9 9 9 9 9 9 9 9 ...
## $ SaleCondition: Factor w/ 6 levels "Abnorml","AdjLand",..: 5 5 5 1 5 5 5 5 1 5 ...
## $ SalePrice : int 208500 181500 223500 140000 250000 143000 307000 200000 129900 118000 ...
summary(train)
## Id MSSubClass MSZoning LotFrontage
## Min. : 1.0 Min. : 20.0 C (all): 10 Min. : 21.00
## 1st Qu.: 365.8 1st Qu.: 20.0 FV : 65 1st Qu.: 59.00
## Median : 730.5 Median : 50.0 RH : 16 Median : 69.00
## Mean : 730.5 Mean : 56.9 RL :1151 Mean : 70.05
## 3rd Qu.:1095.2 3rd Qu.: 70.0 RM : 218 3rd Qu.: 80.00
## Max. :1460.0 Max. :190.0 Max. :313.00
## NA's :259
## LotArea Street Alley LotShape LandContour
## Min. : 1300 Grvl: 6 Grvl: 50 IR1:484 Bnk: 63
## 1st Qu.: 7554 Pave:1454 Pave: 41 IR2: 41 HLS: 50
## Median : 9478 NA's:1369 IR3: 10 Low: 36
## Mean : 10517 Reg:925 Lvl:1311
## 3rd Qu.: 11602
## Max. :215245
##
## Utilities LotConfig LandSlope Neighborhood Condition1
## AllPub:1459 Corner : 263 Gtl:1382 NAmes :225 Norm :1260
## NoSeWa: 1 CulDSac: 94 Mod: 65 CollgCr:150 Feedr : 81
## FR2 : 47 Sev: 13 OldTown:113 Artery : 48
## FR3 : 4 Edwards:100 RRAn : 26
## Inside :1052 Somerst: 86 PosN : 19
## Gilbert: 79 RRAe : 11
## (Other):707 (Other): 15
## Condition2 BldgType HouseStyle OverallQual
## Norm :1445 1Fam :1220 1Story :726 Min. : 1.000
## Feedr : 6 2fmCon: 31 2Story :445 1st Qu.: 5.000
## Artery : 2 Duplex: 52 1.5Fin :154 Median : 6.000
## PosN : 2 Twnhs : 43 SLvl : 65 Mean : 6.099
## RRNn : 2 TwnhsE: 114 SFoyer : 37 3rd Qu.: 7.000
## PosA : 1 1.5Unf : 14 Max. :10.000
## (Other): 2 (Other): 19
## OverallCond YearBuilt YearRemodAdd RoofStyle
## Min. :1.000 Min. :1872 Min. :1950 Flat : 13
## 1st Qu.:5.000 1st Qu.:1954 1st Qu.:1967 Gable :1141
## Median :5.000 Median :1973 Median :1994 Gambrel: 11
## Mean :5.575 Mean :1971 Mean :1985 Hip : 286
## 3rd Qu.:6.000 3rd Qu.:2000 3rd Qu.:2004 Mansard: 7
## Max. :9.000 Max. :2010 Max. :2010 Shed : 2
##
## RoofMatl Exterior1st Exterior2nd MasVnrType MasVnrArea
## CompShg:1434 VinylSd:515 VinylSd:504 BrkCmn : 15 Min. : 0.0
## Tar&Grv: 11 HdBoard:222 MetalSd:214 BrkFace:445 1st Qu.: 0.0
## WdShngl: 6 MetalSd:220 HdBoard:207 None :864 Median : 0.0
## WdShake: 5 Wd Sdng:206 Wd Sdng:197 Stone :128 Mean : 103.7
## ClyTile: 1 Plywood:108 Plywood:142 NA's : 8 3rd Qu.: 166.0
## Membran: 1 CemntBd: 61 CmentBd: 60 Max. :1600.0
## (Other): 2 (Other):128 (Other):136 NA's :8
## ExterQual ExterCond Foundation BsmtQual BsmtCond BsmtExposure
## Ex: 52 Ex: 3 BrkTil:146 Ex :121 Fa : 45 Av :221
## Fa: 14 Fa: 28 CBlock:634 Fa : 35 Gd : 65 Gd :134
## Gd:488 Gd: 146 PConc :647 Gd :618 Po : 2 Mn :114
## TA:906 Po: 1 Slab : 24 TA :649 TA :1311 No :953
## TA:1282 Stone : 6 NA's: 37 NA's: 37 NA's: 38
## Wood : 3
##
## BsmtFinType1 BsmtFinSF1 BsmtFinType2 BsmtFinSF2
## ALQ :220 Min. : 0.0 ALQ : 19 Min. : 0.00
## BLQ :148 1st Qu.: 0.0 BLQ : 33 1st Qu.: 0.00
## GLQ :418 Median : 383.5 GLQ : 14 Median : 0.00
## LwQ : 74 Mean : 443.6 LwQ : 46 Mean : 46.55
## Rec :133 3rd Qu.: 712.2 Rec : 54 3rd Qu.: 0.00
## Unf :430 Max. :5644.0 Unf :1256 Max. :1474.00
## NA's: 37 NA's: 38
## BsmtUnfSF TotalBsmtSF Heating HeatingQC CentralAir
## Min. : 0.0 Min. : 0.0 Floor: 1 Ex:741 N: 95
## 1st Qu.: 223.0 1st Qu.: 795.8 GasA :1428 Fa: 49 Y:1365
## Median : 477.5 Median : 991.5 GasW : 18 Gd:241
## Mean : 567.2 Mean :1057.4 Grav : 7 Po: 1
## 3rd Qu.: 808.0 3rd Qu.:1298.2 OthW : 2 TA:428
## Max. :2336.0 Max. :6110.0 Wall : 4
##
## Electrical X1stFlrSF X2ndFlrSF LowQualFinSF
## FuseA: 94 Min. : 334 Min. : 0 Min. : 0.000
## FuseF: 27 1st Qu.: 882 1st Qu.: 0 1st Qu.: 0.000
## FuseP: 3 Median :1087 Median : 0 Median : 0.000
## Mix : 1 Mean :1163 Mean : 347 Mean : 5.845
## SBrkr:1334 3rd Qu.:1391 3rd Qu.: 728 3rd Qu.: 0.000
## NA's : 1 Max. :4692 Max. :2065 Max. :572.000
##
## GrLivArea BsmtFullBath BsmtHalfBath FullBath
## Min. : 334 Min. :0.0000 Min. :0.00000 Min. :0.000
## 1st Qu.:1130 1st Qu.:0.0000 1st Qu.:0.00000 1st Qu.:1.000
## Median :1464 Median :0.0000 Median :0.00000 Median :2.000
## Mean :1515 Mean :0.4253 Mean :0.05753 Mean :1.565
## 3rd Qu.:1777 3rd Qu.:1.0000 3rd Qu.:0.00000 3rd Qu.:2.000
## Max. :5642 Max. :3.0000 Max. :2.00000 Max. :3.000
##
## HalfBath BedroomAbvGr KitchenAbvGr KitchenQual
## Min. :0.0000 Min. :0.000 Min. :0.000 Ex:100
## 1st Qu.:0.0000 1st Qu.:2.000 1st Qu.:1.000 Fa: 39
## Median :0.0000 Median :3.000 Median :1.000 Gd:586
## Mean :0.3829 Mean :2.866 Mean :1.047 TA:735
## 3rd Qu.:1.0000 3rd Qu.:3.000 3rd Qu.:1.000
## Max. :2.0000 Max. :8.000 Max. :3.000
##
## TotRmsAbvGrd Functional Fireplaces FireplaceQu GarageType
## Min. : 2.000 Maj1: 14 Min. :0.000 Ex : 24 2Types : 6
## 1st Qu.: 5.000 Maj2: 5 1st Qu.:0.000 Fa : 33 Attchd :870
## Median : 6.000 Min1: 31 Median :1.000 Gd :380 Basment: 19
## Mean : 6.518 Min2: 34 Mean :0.613 Po : 20 BuiltIn: 88
## 3rd Qu.: 7.000 Mod : 15 3rd Qu.:1.000 TA :313 CarPort: 9
## Max. :14.000 Sev : 1 Max. :3.000 NA's:690 Detchd :387
## Typ :1360 NA's : 81
## GarageYrBlt GarageFinish GarageCars GarageArea GarageQual
## Min. :1900 Fin :352 Min. :0.000 Min. : 0.0 Ex : 3
## 1st Qu.:1961 RFn :422 1st Qu.:1.000 1st Qu.: 334.5 Fa : 48
## Median :1980 Unf :605 Median :2.000 Median : 480.0 Gd : 14
## Mean :1979 NA's: 81 Mean :1.767 Mean : 473.0 Po : 3
## 3rd Qu.:2002 3rd Qu.:2.000 3rd Qu.: 576.0 TA :1311
## Max. :2010 Max. :4.000 Max. :1418.0 NA's: 81
## NA's :81
## GarageCond PavedDrive WoodDeckSF OpenPorchSF EnclosedPorch
## Ex : 2 N: 90 Min. : 0.00 Min. : 0.00 Min. : 0.00
## Fa : 35 P: 30 1st Qu.: 0.00 1st Qu.: 0.00 1st Qu.: 0.00
## Gd : 9 Y:1340 Median : 0.00 Median : 25.00 Median : 0.00
## Po : 7 Mean : 94.24 Mean : 46.66 Mean : 21.95
## TA :1326 3rd Qu.:168.00 3rd Qu.: 68.00 3rd Qu.: 0.00
## NA's: 81 Max. :857.00 Max. :547.00 Max. :552.00
##
## X3SsnPorch ScreenPorch PoolArea PoolQC
## Min. : 0.00 Min. : 0.00 Min. : 0.000 Ex : 2
## 1st Qu.: 0.00 1st Qu.: 0.00 1st Qu.: 0.000 Fa : 2
## Median : 0.00 Median : 0.00 Median : 0.000 Gd : 3
## Mean : 3.41 Mean : 15.06 Mean : 2.759 NA's:1453
## 3rd Qu.: 0.00 3rd Qu.: 0.00 3rd Qu.: 0.000
## Max. :508.00 Max. :480.00 Max. :738.000
##
## Fence MiscFeature MiscVal MoSold
## GdPrv: 59 Gar2: 2 Min. : 0.00 Min. : 1.000
## GdWo : 54 Othr: 2 1st Qu.: 0.00 1st Qu.: 5.000
## MnPrv: 157 Shed: 49 Median : 0.00 Median : 6.000
## MnWw : 11 TenC: 1 Mean : 43.49 Mean : 6.322
## NA's :1179 NA's:1406 3rd Qu.: 0.00 3rd Qu.: 8.000
## Max. :15500.00 Max. :12.000
##
## YrSold SaleType SaleCondition SalePrice
## Min. :2006 WD :1267 Abnorml: 101 Min. : 34900
## 1st Qu.:2007 New : 122 AdjLand: 4 1st Qu.:129975
## Median :2008 COD : 43 Alloca : 12 Median :163000
## Mean :2008 ConLD : 9 Family : 20 Mean :180921
## 3rd Qu.:2009 ConLI : 5 Normal :1198 3rd Qu.:214000
## Max. :2010 ConLw : 5 Partial: 125 Max. :755000
## (Other): 9
The training dataset has a mix of numeric, factor and boolean variables. There are a total of 1460 observations comprising of 80 independent variables and 1 dependent variable i.e. Sale Price.
(num.records<-nrow(train))
## [1] 1460
# Check which columns of the dataframe have missing values
sapply(X = train, FUN = function(x) sum(is.na(x)))
## Id MSSubClass MSZoning LotFrontage LotArea
## 0 0 0 259 0
## Street Alley LotShape LandContour Utilities
## 0 1369 0 0 0
## LotConfig LandSlope Neighborhood Condition1 Condition2
## 0 0 0 0 0
## BldgType HouseStyle OverallQual OverallCond YearBuilt
## 0 0 0 0 0
## YearRemodAdd RoofStyle RoofMatl Exterior1st Exterior2nd
## 0 0 0 0 0
## MasVnrType MasVnrArea ExterQual ExterCond Foundation
## 8 8 0 0 0
## BsmtQual BsmtCond BsmtExposure BsmtFinType1 BsmtFinSF1
## 37 37 38 37 0
## BsmtFinType2 BsmtFinSF2 BsmtUnfSF TotalBsmtSF Heating
## 38 0 0 0 0
## HeatingQC CentralAir Electrical X1stFlrSF X2ndFlrSF
## 0 0 1 0 0
## LowQualFinSF GrLivArea BsmtFullBath BsmtHalfBath FullBath
## 0 0 0 0 0
## HalfBath BedroomAbvGr KitchenAbvGr KitchenQual TotRmsAbvGrd
## 0 0 0 0 0
## Functional Fireplaces FireplaceQu GarageType GarageYrBlt
## 0 0 690 81 81
## GarageFinish GarageCars GarageArea GarageQual GarageCond
## 81 0 0 81 81
## PavedDrive WoodDeckSF OpenPorchSF EnclosedPorch X3SsnPorch
## 0 0 0 0 0
## ScreenPorch PoolArea PoolQC Fence MiscFeature
## 0 0 1453 1179 1406
## MiscVal MoSold YrSold SaleType SaleCondition
## 0 0 0 0 0
## SalePrice
## 0
Rather than try to impute the missing data records, for now I’ll avoid using the variables with missing values such as LotFrontage, Alley, BsmtCond etc. in the linear model.
The response (or dependent) variable is the SalePrice. I’ll check its summary statistics.
fivenum(train$SalePrice, na.rm = TRUE)
## [1] 34900 129950 163000 214000 755000
summary(train$SalePrice)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 34900 129975 163000 180921 214000 755000
The variable shows a fairly wide range. The mean is higher than the median indicating some right skew.
# Graph the sale price
options(scipen = 4)
ggplot(train, aes(x = SalePrice)) + geom_histogram(fill="green", binwidth = 5000) +
scale_x_continuous(breaks = seq(0, 1000000, by = 100000))
As expected, the graph shows that the sale price is skewed to the right. Obviously the sale price cannot be negative and there will always be some really high-value transactions. The histogram of the sale price shows a few outliers above $700,000.
# Fixing some column names that got corrupted during data loading
names(train)[names(train)=='X1stFlrSF']<-'FirstFlrSF'
names(train)[names(train)=='X2ndFlrSF']<-'SecondFlrSF'
names(train)[names(train)=='1stFlrSF']<-'FirstFlrSF'
names(train)[names(train)=='2ndFlrSF']<-'SecondFlrSF'
names(train)
## [1] "Id" "MSSubClass" "MSZoning" "LotFrontage"
## [5] "LotArea" "Street" "Alley" "LotShape"
## [9] "LandContour" "Utilities" "LotConfig" "LandSlope"
## [13] "Neighborhood" "Condition1" "Condition2" "BldgType"
## [17] "HouseStyle" "OverallQual" "OverallCond" "YearBuilt"
## [21] "YearRemodAdd" "RoofStyle" "RoofMatl" "Exterior1st"
## [25] "Exterior2nd" "MasVnrType" "MasVnrArea" "ExterQual"
## [29] "ExterCond" "Foundation" "BsmtQual" "BsmtCond"
## [33] "BsmtExposure" "BsmtFinType1" "BsmtFinSF1" "BsmtFinType2"
## [37] "BsmtFinSF2" "BsmtUnfSF" "TotalBsmtSF" "Heating"
## [41] "HeatingQC" "CentralAir" "Electrical" "FirstFlrSF"
## [45] "SecondFlrSF" "LowQualFinSF" "GrLivArea" "BsmtFullBath"
## [49] "BsmtHalfBath" "FullBath" "HalfBath" "BedroomAbvGr"
## [53] "KitchenAbvGr" "KitchenQual" "TotRmsAbvGrd" "Functional"
## [57] "Fireplaces" "FireplaceQu" "GarageType" "GarageYrBlt"
## [61] "GarageFinish" "GarageCars" "GarageArea" "GarageQual"
## [65] "GarageCond" "PavedDrive" "WoodDeckSF" "OpenPorchSF"
## [69] "EnclosedPorch" "X3SsnPorch" "ScreenPorch" "PoolArea"
## [73] "PoolQC" "Fence" "MiscFeature" "MiscVal"
## [77] "MoSold" "YrSold" "SaleType" "SaleCondition"
## [81] "SalePrice"
# Calculating a derived variable called Age and adding it to the training dataset
CurrYear<-as.integer(format(Sys.Date(),"%Y"))
train$Age<-CurrYear-train$YearBuilt
#head(train)
Since location plays an important role in Sale Price, I calculate the average sale price per neighborhood and arrange it from highest to lowest. Based on this, it can be seen that the North Ridge neighborhood has the highest average sale price.
# Group by neighorhood and calculate average sale price
train%>%group_by(Neighborhood)%>%summarise(avg.price=mean(SalePrice))%>%arrange(desc(avg.price))
## # A tibble: 25 x 2
## Neighborhood avg.price
## <fct> <dbl>
## 1 NoRidge 335295.
## 2 NridgHt 316271.
## 3 StoneBr 310499
## 4 Timber 242247.
## 5 Veenker 238773.
## 6 Somerst 225380.
## 7 ClearCr 212565.
## 8 Crawfor 210625.
## 9 CollgCr 197966.
## 10 Blmngtn 194871.
## # ... with 15 more rows
I calculate the box plot of the sale price per neighborhood. This shows the spread of the sale price per neighborhood and the number of outliers.
ggplot(train, aes(x=reorder(Neighborhood, SalePrice, FUN=median), y=SalePrice) ) + geom_boxplot() + coord_flip()
The above confirms that the median sale price is the highest in the North Ridge Heights neighborhood, followed by North Ridge, which is the reverse when considering the mean sale price in these top 2 neighborhoods by sale price.
# Initial selection of predictor variables
columns<-c('SalePrice', 'LotArea', 'MSSubClass', 'Neighborhood', 'BldgType', 'FirstFlrSF', 'SecondFlrSF', 'GrLivArea', 'SaleCondition', 'OverallQual', 'OverallCond', 'CentralAir', 'TotRmsAbvGrd', 'Utilities','TotalBsmtSF', 'Age')
# Initial selection of numeric predictor variables
num_columns<-c('SalePrice','LotArea', 'FirstFlrSF', 'SecondFlrSF', 'GrLivArea', 'TotalBsmtSF', 'Age')
# Taking a subset of columns to work with
sub.train<-subset(train, select=columns)
dim(sub.train)
## [1] 1460 16
small.train<-train[,num_columns]
dim(small.train)
## [1] 1460 7
typeof(small.train)
## [1] "list"
names(sub.train)
## [1] "SalePrice" "LotArea" "MSSubClass" "Neighborhood"
## [5] "BldgType" "FirstFlrSF" "SecondFlrSF" "GrLivArea"
## [9] "SaleCondition" "OverallQual" "OverallCond" "CentralAir"
## [13] "TotRmsAbvGrd" "Utilities" "TotalBsmtSF" "Age"
# Calculate correlations between the predictor variables as well between predictor variable and response variable
#pairs(small.train, gap=0.5) #pairs.panels(small.train, method = "pearson")
# Examine correlations between the different numeric columns selected
(corr_data <- rcorr(as.matrix(small.train)))
## SalePrice LotArea FirstFlrSF SecondFlrSF GrLivArea TotalBsmtSF
## SalePrice 1.00 0.26 0.61 0.32 0.71 0.61
## LotArea 0.26 1.00 0.30 0.05 0.26 0.26
## FirstFlrSF 0.61 0.30 1.00 -0.20 0.57 0.82
## SecondFlrSF 0.32 0.05 -0.20 1.00 0.69 -0.17
## GrLivArea 0.71 0.26 0.57 0.69 1.00 0.45
## TotalBsmtSF 0.61 0.26 0.82 -0.17 0.45 1.00
## Age -0.52 -0.01 -0.28 -0.01 -0.20 -0.39
## Age
## SalePrice -0.52
## LotArea -0.01
## FirstFlrSF -0.28
## SecondFlrSF -0.01
## GrLivArea -0.20
## TotalBsmtSF -0.39
## Age 1.00
##
## n= 1460
##
##
## P
## SalePrice LotArea FirstFlrSF SecondFlrSF GrLivArea TotalBsmtSF
## SalePrice 0.0000 0.0000 0.0000 0.0000 0.0000
## LotArea 0.0000 0.0000 0.0514 0.0000 0.0000
## FirstFlrSF 0.0000 0.0000 0.0000 0.0000 0.0000
## SecondFlrSF 0.0000 0.0514 0.0000 0.0000 0.0000
## GrLivArea 0.0000 0.0000 0.0000 0.0000 0.0000
## TotalBsmtSF 0.0000 0.0000 0.0000 0.0000 0.0000
## Age 0.0000 0.5870 0.0000 0.6939 0.0000 0.0000
## Age
## SalePrice 0.0000
## LotArea 0.5870
## FirstFlrSF 0.0000
## SecondFlrSF 0.6939
## GrLivArea 0.0000
## TotalBsmtSF 0.0000
## Age
The above shows a high positive correlation between the sale price and the size of the house (as indicated by Ground floor area, 1st floor area, 2nd floor area) and a high negative correlation between the sale price and the age of the house. Both of these relations make intuitive sense.
The numeric data above can also be examined visually using the following graph:
# plot the correlation matrix
corrplot(cor(small.train),type = "upper")
The legend on the right indicates the nature of the correlation and the sixe of the circle indicates the strength of the correlation.
# Create scatterplots of variables with high correlation with the Sale Price
ggplot(small.train, aes(GrLivArea, SalePrice) ) + geom_point()
These 2 variables depict a positive, linear relationship with the exception of some outliers.
# Select 3 quantitative predictor variables
q_cols<-c('SalePrice','LotArea', 'GrLivArea')
# Taking a subset of columns to work with
q.vars<-train[,q_cols]
dim(q.vars)
## [1] 1460 3
# Plot the 3 selected numeric variables
plot(q.vars)
# Calculate correlation between these 3 variables
(q.cor = cor(q.vars))
## SalePrice LotArea GrLivArea
## SalePrice 1.0000000 0.2638434 0.7086245
## LotArea 0.2638434 1.0000000 0.2631162
## GrLivArea 0.7086245 0.2631162 1.0000000
# Check significance of the sample correlations, using Pearson's test
(q.cor1 = cor.test(q.vars$SalePrice,q.vars$GrLivArea,method="pearson",conf.level = 0.80))
##
## Pearson's product-moment correlation
##
## data: q.vars$SalePrice and q.vars$GrLivArea
## t = 38.348, df = 1458, p-value < 2.2e-16
## alternative hypothesis: true correlation is not equal to 0
## 80 percent confidence interval:
## 0.6915087 0.7249450
## sample estimates:
## cor
## 0.7086245
(q.cor1 = cor.test(q.vars$SalePrice,q.vars$LotArea,method="pearson",conf.level = 0.80))
##
## Pearson's product-moment correlation
##
## data: q.vars$SalePrice and q.vars$LotArea
## t = 10.445, df = 1458, p-value < 2.2e-16
## alternative hypothesis: true correlation is not equal to 0
## 80 percent confidence interval:
## 0.2323391 0.2947946
## sample estimates:
## cor
## 0.2638434
(q.cor1 = cor.test(q.vars$LotArea,q.vars$GrLivArea,method="pearson",conf.level = 0.80))
##
## Pearson's product-moment correlation
##
## data: q.vars$LotArea and q.vars$GrLivArea
## t = 10.414, df = 1458, p-value < 2.2e-16
## alternative hypothesis: true correlation is not equal to 0
## 80 percent confidence interval:
## 0.2315997 0.2940809
## sample estimates:
## cor
## 0.2631162
Based on the above results (tiny p-value), the null hypothesis that the pair-wise correlation between each pair of these variables is 0 can be rejected. The 80% confidence interval around the correlation parameter for each pair-wise correlation is shown above.
In statistics, Family-wise Error Rate (FWER) is the probability of making one or more false discoveries, or Type I errors when performing multiple hypotheses tests. Family is a set of related hypotheses that need to be jointly accurate.
In this case, it means that we consider a random correlation to be a significant correlation based on the sample stats i.e. a true null is considered significant i.e. a false positive. When we extend the analysis to a family i.e. multiple pairs of variables, the degree of error gets compounded. This probability of “false positives” or at least one Type 1 error can be estimated as follows:
FWER = 1 - 0.95^n (where alpha = 0.05) where n represents the number of pair-wise correlations (number of multiple hypotheses)
alpha<-0.05
n<-3 # for 3 pairs of variables
(fwer<-1-(1-alpha)^n)
## [1] 0.142625
So the probability of a false positive here is about 14%, given 3 variables. I would be worried about familywise error. There are multiple corrections which can be considered in this case.
# Calculate the inverse of the correlation matrix from above
round(prec.matrix<-solve(q.cor),3)
## SalePrice LotArea GrLivArea
## SalePrice 2.035 -0.169 -1.397
## LotArea -0.169 1.088 -0.166
## GrLivArea -1.397 -0.166 2.034
# Can also be done using a different function
round(precision_matrix<-inv(q.cor),3)
## SalePrice LotArea GrLivArea
## SalePrice 2.035 -0.169 -1.397
## LotArea -0.169 1.088 -0.166
## GrLivArea -1.397 -0.166 2.034
# Multiply the correlation matrix by the precision matrix (Right-multiplication)
round(q.cor %*% prec.matrix, 3)
## SalePrice LotArea GrLivArea
## SalePrice 1 0 0
## LotArea 0 1 0
## GrLivArea 0 0 1
#Multiply precision matrix by correlation matrix (Left-multiplication)
round(prec.matrix %*% q.cor, 3)
## SalePrice LotArea GrLivArea
## SalePrice 1 0 0
## LotArea 0 1 0
## GrLivArea 0 0 1
As expected, both the matrix multiplications result in the identity matrix.
# Conduct the LU decomposition
(lu<-lu.decomposition(q.cor))
## $L
## [,1] [,2] [,3]
## [1,] 1.0000000 0.00000000 0
## [2,] 0.2638434 1.00000000 0
## [3,] 0.7086245 0.08184802 1
##
## $U
## [,1] [,2] [,3]
## [1,] 1 0.2638434 0.70862448
## [2,] 0 0.9303867 0.07615031
## [3,] 0 0.0000000 0.49161860
# Same result via a different function
(lu1<-lu(q.cor))
## $L
## SalePrice LotArea GrLivArea
## SalePrice 1.0000000 0.00000000 0
## LotArea 0.2638434 1.00000000 0
## GrLivArea 0.7086245 0.08184802 1
##
## $U
## SalePrice LotArea GrLivArea
## SalePrice 1 0.2638434 0.70862448
## LotArea 0 0.9303867 0.07615031
## GrLivArea 0 0.0000000 0.49161860
# Assign the Upper and Lower diagonal matrices
L<-lu$L
U<-lu$U
lu1$L%*%lu1$U
## SalePrice LotArea GrLivArea
## SalePrice 1.0000000 0.2638434 0.7086245
## LotArea 0.2638434 1.0000000 0.2631162
## GrLivArea 0.7086245 0.2631162 1.0000000
# Verify if L*U gives the original correlation matrix
(L%*%U)==q.cor
## SalePrice LotArea GrLivArea
## SalePrice TRUE TRUE TRUE
## LotArea TRUE TRUE TRUE
## GrLivArea TRUE TRUE TRUE
It does return the original correlation matrix.
# Select a right-skewed variable
#dens<-density(small.train$TotalBsmtSF)
#hist(small.train$TotalBsmtSF, breaks = 30)
#plot(dens, type="l", col="green", xlab="TotalBasmtSF", main="Basement Area",lwd=2)
#+geom_text(aes(x=mean.val+600,label=paste0("Mean\n",mean.val),y=1.9))
mean.val<-round(mean(small.train$TotalBsmtSF),0)
median.val<-round(median(small.train$TotalBsmtSF),0)
ggplot(small.train,aes(TotalBsmtSF))+geom_histogram(aes(y=..density..), fill="lightblue", bins=30)+ geom_density(color="red", alpha=0.8, size=0.8)+geom_vline(xintercept=mean.val, size=0.5, color="red")+ geom_vline(xintercept=median.val, size=0.5, color="blue")
# Fit the selected variable to the exponential distribution
fit.var<-fitdistr(small.train$TotalBsmtSF, densfun="exponential")
# Check estimated lambda
(lambda<-fit.var$estimate)
## rate
## 0.0009456896
The estimated lambda for the fitted exponential distribution is about 0.000945
# Take 1000 samples of the exponential distribution fitted above
set.seed(42)
exp.sample<-rexp(1000,lambda)
# Plot histogram
ggplot(as.data.frame(exp.sample),aes(exp.sample))+geom_histogram(binwidth=200)
# Find 5th and 95th percentiles of the exponential distribution
(pct5<-qexp(0.05,rate=lambda))
## [1] 54.23904
(pct95<-qexp(0.95,rate=lambda))
## [1] 3167.776
# Calculate 5% and 95% quantiles from emperical data
(emp.quantiles<-quantile(small.train$TotalBsmtSF, c(0.05, 0.95)))
## 5% 95%
## 519.3 1753.0
Based on the lambda parameter of the fitted exponential distribution, the quantiles are too widely dispersed as compared to the actual data. So the exponential distribution does not seem to be a good candidate for modelling this variable.
# Generate a 95% confidence interval from the empirical data, assuming normality
(ci(small.train$TotalBsmtSF, confidence=0.95))
## Warning in ci.numeric(small.train$TotalBsmtSF, confidence = 0.95): No class
## or unkown class. Using default calcuation.
## Estimate CI lower CI upper Std. Error
## 1057.42945 1034.90755 1079.95135 11.48144
(mu<-mean(small.train$TotalBsmtSF))
## [1] 1057.429
(sd<-sd(small.train$TotalBsmtSF))
## [1] 438.7053
(lower<-mu-1.96*sd/sqrt(num.records))
## [1] 1034.926
(upper<-mu+1.96*sd/sqrt(num.records))
## [1] 1079.933
Assuming normality, the 95% confidence interval based on the estimated mean and standard distribution for this variable is 1034.92 and 1079.93. This range is much tighter than what is observed in the the actual sample data. So even the normal distribution is not a good candidate for modelling this variable. This should be expected from the fact that this variable is right-skewed, and therefore the mean is not the best measure of centrality.
# Fit a multiple regression model using a selected set of independent variables
model1=lm(SalePrice~LotArea + MSSubClass + OverallQual + OverallCond + TotalBsmtSF + FirstFlrSF + SecondFlrSF + Age, data=sub.train)
#model2=lm(SalePrice~LotArea + MSSubClass + OverallQual + OverallCond + TotalBsmtSF + Age, data=sub.train)
#model3=lm(SalePrice~GrLivArea + CentralAir + TotRmsAbvGrd + Utilities + FirstFlrSF + SecondFlrSF + + TotalBsmtSF + Age, data=sub.train)
# Check the model output
summary(model1)
##
## Call:
## lm(formula = SalePrice ~ LotArea + MSSubClass + OverallQual +
## OverallCond + TotalBsmtSF + FirstFlrSF + SecondFlrSF + Age,
## data = sub.train)
##
## Residuals:
## Min 1Q Median 3Q Max
## -505757 -18493 -2362 14200 275355
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -71794.5437 8136.1941 -8.824 < 2e-16 ***
## LotArea 0.5906 0.1053 5.610 2.41e-08 ***
## MSSubClass -145.3752 25.3150 -5.743 1.13e-08 ***
## OverallQual 21978.3193 1142.9454 19.230 < 2e-16 ***
## OverallCond 6015.9364 979.0506 6.145 1.03e-09 ***
## TotalBsmtSF 17.0363 4.2185 4.038 5.66e-05 ***
## FirstFlrSF 65.7950 4.6490 14.153 < 2e-16 ***
## SecondFlrSF 55.1523 2.7777 19.855 < 2e-16 ***
## Age -543.0638 44.6907 -12.152 < 2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 37480 on 1451 degrees of freedom
## Multiple R-squared: 0.7786, Adjusted R-squared: 0.7774
## F-statistic: 637.9 on 8 and 1451 DF, p-value: < 2.2e-16
The adjusted R-squared value is 0.7775, indicating that this model (using this selected set of predictor variables) explains about 78% of the variability in the Sale Price.
# Plot the model output - fitted values versus the residuals
plot(fitted(model1), resid(model1), xlab = "Sale Price", ylab = "Residuals", main = "Residuals of Sale Price")
abline(h = 0)
# Create a Quantile-Quantile Plot
qqnorm(model1$residuals)
qqline(model1$residuals)
The above plots show the residuals are not evenly distributed. At higher values of sale price, the variance in the residuals increases a lot. This indicates that there is scope for improving the model by feature engineering, as well as iterating through different combinations of predictor variables to mitigate collinearity, and select only the most useful set of independent variables.
# Load the test dataset
test<-read.csv("C:/Jagdish/MastersPrograms/CUNY/DS605 Computational Mathematics/Final Project/test.csv")
#sub.test<-subset(test, select=columns)
#dim(sub.test)
# Calculating a derived variable called Age and adding it to the test dataset
CurrYear<-as.integer(format(Sys.Date(),"%Y"))
test$Age<-CurrYear-test$YearBuilt
# Fixing some column names that got corrupted during data loading
names(test)[names(test)=='X1stFlrSF']<-'FirstFlrSF'
names(test)[names(test)=='X2ndFlrSF']<-'SecondFlrSF'
names(test)[names(test)=='1stFlrSF']<-'FirstFlrSF'
names(test)[names(test)=='2ndFlrSF']<-'SecondFlrSF'
names(test)
## [1] "Id" "MSSubClass" "MSZoning" "LotFrontage"
## [5] "LotArea" "Street" "Alley" "LotShape"
## [9] "LandContour" "Utilities" "LotConfig" "LandSlope"
## [13] "Neighborhood" "Condition1" "Condition2" "BldgType"
## [17] "HouseStyle" "OverallQual" "OverallCond" "YearBuilt"
## [21] "YearRemodAdd" "RoofStyle" "RoofMatl" "Exterior1st"
## [25] "Exterior2nd" "MasVnrType" "MasVnrArea" "ExterQual"
## [29] "ExterCond" "Foundation" "BsmtQual" "BsmtCond"
## [33] "BsmtExposure" "BsmtFinType1" "BsmtFinSF1" "BsmtFinType2"
## [37] "BsmtFinSF2" "BsmtUnfSF" "TotalBsmtSF" "Heating"
## [41] "HeatingQC" "CentralAir" "Electrical" "FirstFlrSF"
## [45] "SecondFlrSF" "LowQualFinSF" "GrLivArea" "BsmtFullBath"
## [49] "BsmtHalfBath" "FullBath" "HalfBath" "BedroomAbvGr"
## [53] "KitchenAbvGr" "KitchenQual" "TotRmsAbvGrd" "Functional"
## [57] "Fireplaces" "FireplaceQu" "GarageType" "GarageYrBlt"
## [61] "GarageFinish" "GarageCars" "GarageArea" "GarageQual"
## [65] "GarageCond" "PavedDrive" "WoodDeckSF" "OpenPorchSF"
## [69] "EnclosedPorch" "X3SsnPorch" "ScreenPorch" "PoolArea"
## [73] "PoolQC" "Fence" "MiscFeature" "MiscVal"
## [77] "MoSold" "YrSold" "SaleType" "SaleCondition"
## [81] "Age"
# Predict the response variable for the test data
test.pred<-round(predict(model1, test),0)
# Create a data frame with just the Id and the predicted sale price
test.predicted.price<-data.frame(cbind(Id=test$Id,SalePrice=test.pred))
# Replace NA values with mean of predicted sale price
#nrow(test.predicted.price)
#test.predicted.price<-test.predicted.price%>%drop_na()
#nrow(test.predicted.price)
#test.predicted.price%>%filter(is.na(SalePrice))
avg.price<-round(mean(test.predicted.price$SalePrice,na.rm=TRUE),0)
test.predicted.price$SalePrice[is.na(test.predicted.price$SalePrice)]<-avg.price
test.predicted.price%>%filter(is.na(SalePrice))
## [1] Id SalePrice
## <0 rows> (or 0-length row.names)
# Inspect the summary of the predicted sale price for the test dataset
summary(test.predicted.price)
## Id SalePrice
## Min. :1461 Min. :-21345
## 1st Qu.:1826 1st Qu.:129878
## Median :2190 Median :170661
## Mean :2190 Mean :178212
## 3rd Qu.:2554 3rd Qu.:222637
## Max. :2919 Max. :613874
Clearly, there need to be improvements made to the model, since it is estimating a negative sale price in some cases.
# Write the predicted values to a csv file for upload to kaggle
write.csv(test.predicted.price, file="C:/Jagdish/MastersPrograms/CUNY/DS605 Computational Mathematics/Final Project/test_predictions.csv",row.names=FALSE)