# Radhe Radhe
library(randomForest);library(dplyr);library(caTools)
## randomForest 4.6-14
## Type rfNews() to see new features/changes/bug fixes.
##
## Attaching package: 'dplyr'
## The following object is masked from 'package:randomForest':
##
## combine
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
# Importing the dataset and converting the dataset attribute class into o and 1
dataset = read.csv('diab_1.csv',stringsAsFactors=FALSE)
df <- dataset$class
df[df == "tested_positive"] <-"1"
df[df == "tested_negative"] <-"0"
dataset$class <- df
dataset$class = as.numeric(as.character(dataset$class))
# Splitting the dataset into the Training set and Test set
#install.packages('caTools')
set.seed(789) #for fixing the referance
split = sample.split(dataset$class, SplitRatio = 0.76)
tran_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
#Fitting the random forest
rf_pima <- randomForest(class ~., data = tran_set, mtry = 8, ntree=171, importance = TRUE)
## Warning in randomForest.default(m, y, ...): The response has five or fewer
## unique values. Are you sure you want to do regression?
# Testing the Model
#install.packages("caret")
library(caret)
## Loading required package: lattice
## Loading required package: ggplot2
##
## Attaching package: 'ggplot2'
## The following object is masked from 'package:randomForest':
##
## margin
rf_probs <- predict(rf_pima, newdata = test_set)
rf_pred <- ifelse(rf_probs > 0.5, 1, 0)
confusionMatrix(as.factor(rf_pred), as.factor(test_set$class))
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 99 23
## 1 21 41
##
## Accuracy : 0.7609
## 95% CI : (0.6926, 0.8206)
## No Information Rate : 0.6522
## P-Value [Acc > NIR] : 0.0009686
##
## Kappa : 0.469
##
## Mcnemar's Test P-Value : 0.8801685
##
## Sensitivity : 0.8250
## Specificity : 0.6406
## Pos Pred Value : 0.8115
## Neg Pred Value : 0.6613
## Prevalence : 0.6522
## Detection Rate : 0.5380
## Detection Prevalence : 0.6630
## Balanced Accuracy : 0.7328
##
## 'Positive' Class : 0
##
ACC_RandomForest <- confusionMatrix(as.factor(rf_pred), as.factor(test_set$class))$overall['Accuracy']
# Random forest graphs
par(mfrow = c(1, 2))
varImpPlot(rf_pima, type = 2, main = "Variable Importance",col = 'black')
plot(rf_pima, main = "Error vs no. of trees grown")

# MODEL lOGISTIC REGRESSION
set.seed(123)
split = sample.split(dataset$class, SplitRatio = 0.75)
Traindata = subset(dataset, split == TRUE)
Testdata = subset(dataset, split == FALSE)
dataset$class <- as.factor(dataset$class)
# Training The Model
glm_Model1 <- glm(class ~., data = Traindata, family = binomial)
summary(glm_Model1)
##
## Call:
## glm(formula = class ~ ., family = binomial, data = Traindata)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -2.5786 -0.7009 -0.4046 0.6694 2.8366
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -8.597820 0.836126 -10.283 < 2e-16 ***
## preg 0.107268 0.038914 2.756 0.00584 **
## plas 0.040055 0.004599 8.709 < 2e-16 ***
## pres -0.018938 0.006538 -2.897 0.00377 **
## skin 0.008982 0.008373 1.073 0.28342
## insu -0.003051 0.001179 -2.588 0.00966 **
## mass 0.088903 0.017876 4.973 6.58e-07 ***
## pedi 0.794833 0.364194 2.182 0.02908 *
## age 0.020087 0.011341 1.771 0.07652 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 745.11 on 575 degrees of freedom
## Residual deviance: 527.03 on 567 degrees of freedom
## AIC: 545.03
##
## Number of Fisher Scoring iterations: 5
# Variables with the p_values greather than 0.01 are insignificant
glm_Model2 <- update(glm_Model1, ~. - skin - insu - age )
summary(glm_Model2)
##
## Call:
## glm(formula = class ~ preg + plas + pres + mass + pedi, family = binomial,
## data = Traindata)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -2.7239 -0.7192 -0.4141 0.6964 2.8894
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -7.821602 0.770056 -10.157 < 2e-16 ***
## preg 0.151722 0.033154 4.576 4.73e-06 ***
## plas 0.036884 0.004024 9.165 < 2e-16 ***
## pres -0.015398 0.006118 -2.517 0.0118 *
## mass 0.084551 0.016569 5.103 3.35e-07 ***
## pedi 0.690636 0.354260 1.950 0.0512 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 745.11 on 575 degrees of freedom
## Residual deviance: 538.28 on 570 degrees of freedom
## AIC: 550.28
##
## Number of Fisher Scoring iterations: 5
# Testing the Model
glm_probs <- predict(glm_Model2, newdata = Testdata, type = "response")
glm_pred <- ifelse(glm_probs > 0.5, 1, 0)
#print("Confusion Matrix for logistic regression");
table(Predicted = glm_pred, Actual = Testdata$class)
## Actual
## Predicted 0 1
## 0 102 29
## 1 23 38
confusionMatrix(as.factor(glm_pred), as.factor(Testdata$class) ) # Confusion Matrix for logistic regression
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 102 29
## 1 23 38
##
## Accuracy : 0.7292
## 95% CI : (0.6605, 0.7906)
## No Information Rate : 0.651
## P-Value [Acc > NIR] : 0.01287
##
## Kappa : 0.3913
##
## Mcnemar's Test P-Value : 0.48807
##
## Sensitivity : 0.8160
## Specificity : 0.5672
## Pos Pred Value : 0.7786
## Neg Pred Value : 0.6230
## Prevalence : 0.6510
## Detection Rate : 0.5312
## Detection Prevalence : 0.6823
## Balanced Accuracy : 0.6916
##
## 'Positive' Class : 0
##
#Accuracy of the GLM
Accur_GLM <- confusionMatrix(as.factor(glm_pred), as.factor(Testdata$class) )$overall['Accuracy']
Accur_GLM
## Accuracy
## 0.7291667