This loan data set lists the outcomes of 5611 loans. The following models will attempt to accurately predict if a loan will be good or bad.

library(car)
set.seed(1)
ld<-read.csv("c:/users/abbey/Desktop/Data Mining/LoanData.csv")
ld[1:3,]
##    Status Credit.Grade Amount Age Borrower.Rate Debt.To.Income.Ratio
## 1 Current            C   5000   4         0.150                 0.04
## 2 Current           HR   1900   6         0.265                 0.02
## 3 Current           HR   1000   3         0.150                 0.02
ld$Status = recode(ld$Status, "'Current'=1; else=0" )
ld$Status = as.numeric(levels(ld$Status)[ld$Status])
table(ld$Status)
## 
##    0    1 
##  425 5186
response=ld$Status
hist(response)

MeanRes=mean(response)
MeanRes
## [1] 0.9242559
n=length(ld$Status)
n
## [1] 5611
n1=floor(n*(0.6))
n1
## [1] 3366
n2=n-n1
n2
## [1] 2245
train=sample(1:n,n1)
xloan<-model.matrix(Status~., data=ld)[,-1]
xloan[1:3,]
##   Credit.GradeAA Credit.GradeB Credit.GradeC Credit.GradeD Credit.GradeE
## 1              0             0             1             0             0
## 2              0             0             0             0             0
## 3              0             0             0             0             0
##   Credit.GradeHR Credit.GradeNC Amount Age Borrower.Rate
## 1              0              0   5000   4         0.150
## 2              1              0   1900   6         0.265
## 3              1              0   1000   3         0.150
##   Debt.To.Income.Ratio
## 1                 0.04
## 2                 0.02
## 3                 0.02

Status is currently the dependent variable in this data set. Current status is good while late status and default are bad loans. Creating a design matrix will group bad loans together.

v1=rep(1,dim(ld)[1])
v2=rep(0,dim(ld)[1])
ld$Credit.GradeAA=ifelse(ld$Credit.Grade=="AA",v1,v2)
ld$Credit.GradeA=ifelse(ld$Credit.Grade=="A",v1,v2)
ld$Credit.GradeB=ifelse(ld$Credit.Grade=="B",v1,v2)
ld$Credit.GradeC=ifelse(ld$Credit.Grade=="C",v1,v2)
ld$Credit.GradeD=ifelse(ld$Credit.Grade=="D",v1,v2)
ld$Credit.GradeE=ifelse(ld$Credit.Grade=="E",v1,v2)
ld$Credit.GradeHR=ifelse(ld$Credit.Grade=="HR",v1,v2)
ldx=cbind(response,Amount=ld$Amount,Age=ld$Age,BorrowerRate=ld$Borrower.Rate,DebtIncomeRatio=ld$Debt.To.Income.Ratio,
         CreditGradeAA=ld$Credit.GradeAA,CreditGradeA=ld$Credit.GradeA,CreditGradeB=ld$Credit.GradeB,CreditGradeC=ld$Credit.GradeC,
         CreditGradeD=ld$Credit.GradeD,CreditGradeE=ld$Credit.GradeE,CreditGradeHR=ld$Credit.GradeHR)
ldx[1:3,]
##      response Amount Age BorrowerRate DebtIncomeRatio CreditGradeAA
## [1,]        1   5000   4        0.150            0.04             0
## [2,]        1   1900   6        0.265            0.02             0
## [3,]        1   1000   3        0.150            0.02             0
##      CreditGradeA CreditGradeB CreditGradeC CreditGradeD CreditGradeE
## [1,]            0            0            1            0            0
## [2,]            0            0            0            0            0
## [3,]            0            0            0            0            0
##      CreditGradeHR
## [1,]             0
## [2,]             1
## [3,]             1
m1=glm(response~.,family=binomial,data=data.frame(ldx))
## Warning: glm.fit: fitted probabilities numerically 0 or 1 occurred
summary(m1)
## 
## Call:
## glm(formula = response ~ ., family = binomial, data = data.frame(ldx))
## 
## Deviance Residuals: 
##     Min       1Q   Median       3Q      Max  
## -3.2427   0.1372   0.2384   0.3949   2.1454  
## 
## Coefficients:
##                   Estimate Std. Error z value Pr(>|z|)    
## (Intercept)      5.965e+00  4.773e-01  12.497  < 2e-16 ***
## Amount          -4.575e-05  1.585e-05  -2.887 0.003888 ** 
## Age             -3.651e-01  2.212e-02 -16.507  < 2e-16 ***
## BorrowerRate    -1.114e+01  1.276e+00  -8.728  < 2e-16 ***
## DebtIncomeRatio  1.917e-01  2.495e-01   0.768 0.442242    
## CreditGradeAA    2.277e+00  6.053e-01   3.762 0.000169 ***
## CreditGradeA     1.662e+00  5.041e-01   3.297 0.000979 ***
## CreditGradeB     1.369e+00  4.326e-01   3.165 0.001550 ** 
## CreditGradeC     1.699e+00  3.982e-01   4.267 1.98e-05 ***
## CreditGradeD     1.628e+00  3.750e-01   4.341 1.42e-05 ***
## CreditGradeE     1.043e+00  3.525e-01   2.958 0.003101 ** 
## CreditGradeHR    5.177e-01  3.473e-01   1.491 0.136006    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 3010.3  on 5610  degrees of freedom
## Residual deviance: 2371.0  on 5599  degrees of freedom
## AIC: 2395
## 
## Number of Fisher Scoring iterations: 16

Next is to created a model to fit for the training data.

ldx=ldx[,-1]
xtrain <-ldx[train,]
xnew <-ldx[-train,]
ytrain<-response[train]
ynew<-response[-train]
m2=glm(response~., family=binomial, data=data.frame(response=ytrain,xtrain))
## Warning: glm.fit: fitted probabilities numerically 0 or 1 occurred
summary(m2)
## 
## Call:
## glm(formula = response ~ ., family = binomial, data = data.frame(response = ytrain, 
##     xtrain))
## 
## Deviance Residuals: 
##     Min       1Q   Median       3Q      Max  
## -3.2961   0.1453   0.2465   0.3981   2.0036  
## 
## Coefficients:
##                   Estimate Std. Error z value Pr(>|z|)    
## (Intercept)      6.517e+00  6.565e-01   9.927  < 2e-16 ***
## Amount          -3.427e-05  2.072e-05  -1.654   0.0981 .  
## Age             -3.481e-01  2.815e-02 -12.365  < 2e-16 ***
## BorrowerRate    -1.199e+01  1.636e+00  -7.331 2.28e-13 ***
## DebtIncomeRatio  1.724e-03  3.958e-02   0.044   0.9653    
## CreditGradeAA    1.647e+00  8.077e-01   2.040   0.0414 *  
## CreditGradeA     7.227e-01  6.522e-01   1.108   0.2678    
## CreditGradeB     6.974e-01  5.892e-01   1.184   0.2365    
## CreditGradeC     1.271e+00  5.634e-01   2.257   0.0240 *  
## CreditGradeD     1.123e+00  5.267e-01   2.132   0.0330 *  
## CreditGradeE     7.294e-01  5.022e-01   1.452   0.1464    
## CreditGradeHR    6.914e-02  4.958e-01   0.139   0.8891    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 1806.1  on 3365  degrees of freedom
## Residual deviance: 1439.8  on 3354  degrees of freedom
## AIC: 1463.8
## 
## Number of Fisher Scoring iterations: 14

The following will show a prediciton of predicted default probabilities for cases in test set.

ptest <- predict(m2,newdata=data.frame(xnew),type="response")
data.frame(ynew,ptest)[1:10,]
##    ynew     ptest
## 1     1 0.9881787
## 2     1 0.9340965
## 3     1 0.7077864
## 4     1 0.9846826
## 5     1 0.8992271
## 6     1 0.8699288
## 7     0 0.7148193
## 8     1 0.7831417
## 9     1 0.8274410
## 10    1 0.9954536
hist(ptest)

plot(ynew~ptest)

The histogram and plot shows the majority of the probability greater than 0.5 which are labeled as 1 indicating Good loans.

gg1=floor(ptest+0.5)
ttt=table(ynew,gg1)
ttt
##     gg1
## ynew    0    1
##    0   14  156
##    1    9 2066
error=(ttt[1,2]+ttt[1,2])/n2
error
## [1] 0.1389755

This model is terrible at the prediction of bad loans. It predicted 2 bad loans out of 170.

Next will show results for the probability of 0.3 or larger for Good loans.

gg2=floor(ptest+0.7)
ttt=table(ynew,gg2)
ttt
##     gg2
## ynew    0    1
##    0    9  161
##    1    2 2073
error=(ttt[1,2]+ttt[1,2])/n2
error
## [1] 0.1434298

This continues to be terrible at predicting bad loans, but when predicting good loans it predicts.

bb=cbind(ptest,ynew)
head(bb)
##       ptest ynew
## 1 0.9881787    1
## 2 0.9340965    1
## 3 0.7077864    1
## 4 0.9846826    1
## 5 0.8992271    1
## 6 0.8699288    1
bb1=bb[order(ptest,decreasing=TRUE),]
head(bb1)
##      ptest ynew
## 2242     1    1
## 2243     1    1
## 2244     1    1
## 2245     1    1
## 2240     1    1
## 2241     1    1

The overall porbability in the test data set is a success.

xbar=mean(ynew)
xbar
## [1] 0.9242762
axis=dim(n2)
ax=dim(n2)
ay=dim(n2)
axis[1]=1
ax[1]=xbar
ay[1]=bb1[1,2]
for (i in 2:n2) {
  axis[i]=i
  ax[i]=xbar*i
  ay[i]=ay[i-1]+bb1[i,2]
}

aaa=cbind(bb1[,1],bb1[,2],ay,ax)
aaa[1:5,]
##          ay        ax
## 2242 1 1  1 0.9242762
## 2243 1 1  2 1.8485523
## 2244 1 1  3 2.7728285
## 2245 1 1  4 3.6971047
## 2240 1 1  5 4.6213808
plot(axis,ay,xlab="number of cases",ylab="number of successes",main="Lift: Cum successes sorted by pred val/success prob")
points(axis,ax,type="l")

The area under the curve is minimal indicating this model is not very good.

This data set lists characterisitics of 1043 passangers who were on the Titanic and if they survived the sinking ship. This data will attempt to accurately prdict if a passanger will survive which is coded as then number 1.

Tt<-read.csv("c:/users/abbey/Desktop/Data Mining/Titanicdata.csv")
head(Tt)
##   Class Survived Gender     Age NumbSibOrSpsAbd NumbParOrChildAbd     Fare
## 1     1        1 female 29.0000               0                 0 211.3375
## 2     1        1   male  0.9167               1                 2 151.5500
## 3     1        0 female  2.0000               1                 2 151.5500
## 4     1        0   male 30.0000               1                 2 151.5500
## 5     1        0 female 25.0000               1                 2 151.5500
## 6     1        1   male 48.0000               0                 0  26.5500
##   EmbarkedAt              HomeAndDestination         Home KnownDestination
## 1          S                    St Louis, MO St Louis, MO                 
## 2          S Montreal, PQ / Chesterville, ON Montreal, PQ Chesterville, ON
## 3          S Montreal, PQ / Chesterville, ON Montreal, PQ Chesterville, ON
## 4          S Montreal, PQ / Chesterville, ON Montreal, PQ Chesterville, ON
## 5          S Montreal, PQ / Chesterville, ON Montreal, PQ Chesterville, ON
## 6          S                    New York, NY New York, NY
Tt=Tt[,c(-9,-10,-11)]
Tt [1:3,]
##   Class Survived Gender     Age NumbSibOrSpsAbd NumbParOrChildAbd     Fare
## 1     1        1 female 29.0000               0                 0 211.3375
## 2     1        1   male  0.9167               1                 2 151.5500
## 3     1        0 female  2.0000               1                 2 151.5500
##   EmbarkedAt
## 1          S
## 2          S
## 3          S
response=Tt$Survived
n=length(Tt$Survived)
n
## [1] 1043
n1=floor(n*(0.6))
n1
## [1] 625
n2=n-n1
n2
## [1] 418
train=sample(1:n,n1)
xTt<-model.matrix(Survived~.,data=Tt)[,-1]
xTt[1:3,]
##   Class Gendermale     Age NumbSibOrSpsAbd NumbParOrChildAbd     Fare
## 1     1          0 29.0000               0                 0 211.3375
## 2     1          1  0.9167               1                 2 151.5500
## 3     1          0  2.0000               1                 2 151.5500
##   EmbarkedAtQ EmbarkedAtS
## 1           0           1
## 2           0           1
## 3           0           1

Variables to account for are class, gender, age, siblings, spouse, parent or childe, fare and embark. Creating a design matrix will indicate categorical varables for this data set.

xtrain <- xTt[train,]
xnew <- xTt[-train,]
ytrain <- Tt$Survived[train]
ynew <- Tt$Survived[-train]
m1=glm(Survived~.,family=binomial,data=data.frame(Survived=ytrain,xtrain))
summary(m1)
## 
## Call:
## glm(formula = Survived ~ ., family = binomial, data = data.frame(Survived = ytrain, 
##     xtrain))
## 
## Deviance Residuals: 
##     Min       1Q   Median       3Q      Max  
## -2.4294  -0.6797  -0.4042   0.6056   2.6323  
## 
## Coefficients:
##                     Estimate Std. Error z value Pr(>|z|)    
## (Intercept)        4.8671157  0.6506118   7.481 7.39e-14 ***
## Class             -1.0598986  0.1740407  -6.090 1.13e-09 ***
## Gendermale        -2.7624796  0.2449004 -11.280  < 2e-16 ***
## Age               -0.0258180  0.0084922  -3.040  0.00236 ** 
## NumbSibOrSpsAbd   -0.3452512  0.1487750  -2.321  0.02031 *  
## NumbParOrChildAbd -0.1282399  0.1505283  -0.852  0.39425    
## Fare               0.0006144  0.0025721   0.239  0.81119    
## EmbarkedAtQ       -1.6296583  0.6136211  -2.656  0.00791 ** 
## EmbarkedAtS       -0.4584157  0.2747837  -1.668  0.09526 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 836.16  on 624  degrees of freedom
## Residual deviance: 552.75  on 616  degrees of freedom
## AIC: 570.75
## 
## Number of Fisher Scoring iterations: 5
ptest <- predict(m1,newdata=data.frame(xnew),type="response")
data.frame(ynew,ptest)[1:10,]
##    ynew     ptest
## 2     1 0.5135071
## 8     0 0.3963783
## 24    1 0.9391378
## 28    1 0.9261165
## 32    1 0.9355692
## 33    1 0.8661888
## 40    1 0.7625426
## 45    1 0.9209794
## 51    1 0.4438197
## 54    1 0.8689101

probability of 0.5 or higher

gg1=floor(ptest+0.5)
ttt=table(ynew,gg1)
ttt
##     gg1
## ynew   0   1
##    0 208  29
##    1  62 119

This model is not a bad model at predicting survival.

error=(ttt[1,2]+ttt[1,2])/n2
error
## [1] 0.138756

Probability of 0.3 or higher

gg2=floor(ptest+0.7)
ttt=table(ynew,gg2)
ttt
##     gg2
## ynew   0   1
##    0 172  65
##    1  38 143
error=(ttt[1,2]+ttt[1,2])/n2
error
## [1] 0.3110048

This model is better at accuratly predicting survivals.

cb=cbind(ptest,ynew)
cb[1:10,]
##        ptest ynew
## 2  0.5135071    1
## 8  0.3963783    0
## 24 0.9391378    1
## 28 0.9261165    1
## 32 0.9355692    1
## 33 0.8661888    1
## 40 0.7625426    1
## 45 0.9209794    1
## 51 0.4438197    1
## 54 0.8689101    1
bb1=cb[order(ptest,decreasing=TRUE),]
head(bb1)
##         ptest ynew
## 136 0.9622643    1
## 264 0.9615202    1
## 94  0.9583639    1
## 160 0.9568131    1
## 226 0.9548570    1
## 109 0.9531650    1

Overall success probabailty for the test data

xbar=mean(ynew)
xbar
## [1] 0.4330144
axis=dim(n2)
ax=dim(n2)
ay=dim(n2)
axis[1]=1
ax[1]=xbar
ay[1]=bb1[1,2]
for (i in 2:n2) {
  axis[i]=i
  ax[i]=xbar*i
  ay[i]=ay[i-1]+bb1[i,2]
}

aaa=cbind(bb1[,1],bb1[,2],ay,ax)
aaa[1:10,]
##                 ay        ax
## 136 0.9622643 1  1 0.4330144
## 264 0.9615202 1  2 0.8660287
## 94  0.9583639 1  3 1.2990431
## 160 0.9568131 1  4 1.7320574
## 226 0.9548570 1  5 2.1650718
## 109 0.9531650 1  6 2.5980861
## 172 0.9520684 1  7 3.0311005
## 95  0.9510112 1  8 3.4641148
## 189 0.9496815 1  9 3.8971292
## 164 0.9443641 1 10 4.3301435
plot(axis,ay,xlab="# of cases",ylab="# of successes",main="Lift: Cum successes sorted by pred val/success prob")
points(axis,ax,type="l")

The area under the curve is bigger than the for LoanData although it is bigger it is an okay model not a great model.

R Markdown