Section 16 Bayes Theorem

setwd()

df <-  read.csv("G:\\RStudio\\udemy\\ml\\Machine Learning AZ\\Part 3 - Classification\\Section 18 - Naive Bayes\\Naive_Bayes\\Social_Network_Ads.csv")
head(df)

Select the fields that we will be working with

df <- df[,3:5]
head(df)
# encode as factor for naive byes
df$Purchased <-  factor(df$Purchased, levels = c(0,1))

Split dataset into training and test set (300 training, 100 test)

library(caTools)
set.seed(1234)
split <- sample.split(df$Purchased, SplitRatio = 0.75)
training_set <- subset(df, split == TRUE)
test_set <- subset(df, split == FALSE)

For Classification,it is better to do feature scaling (normalization)

# Feature Scaling 1 age, 2 is salary
training_set[,1:2] <-  scale(training_set[,1:2])
test_set[,1:2] <-  scale(test_set[,1:2])

Fitting Classifier to the Training Set

# Create the classifier here
# install.pacages("e1071")
library(e1071)
# remove the last column since that is the dependent variable [-3]
classifier <- naiveBayes(x = training_set[-3],
                         y = training_set$Purchased)
summary(classifier)
        Length Class  Mode     
apriori 2      table  numeric  
tables  2      -none- list     
levels  2      -none- character
call    3      -none- call     

Predicting the test set results

y_pred <-  predict(classifier,newdata = test_set[-3])
# naive bayes does not output a vector so we will need to encode it further as factor
y_pred
  [1] 0 0 1 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0
 [39] 0 0 0 0 0 0 0 0 1 1 1 0 0 1 1 0 1 0 1 1 0 1 0 0 0 1 1 0 1 1 1 0 0 0 1 0 1 1
 [77] 0 1 0 1 0 1 1 0 0 1 0 1 0 0 1 0 1 1 1 0 0 1 1 1
Levels: 0 1

Evaluate the prediction using confusion Matrix.

# Making the confusion matrix
# [3] refers to the outcome
cm <- table(test_set[,3], y_pred)
cm
   y_pred
     0  1
  0 59  5
  1  6 30

Plot

# install.packages("ElemStatLearn")
library(ElemStatLearn)
package <U+393C><U+3E31>ElemStatLearn<U+393C><U+3E32> was built under R version 3.3.3
set <- training_set
X1 <- seq(min(set[,1]) - 1, max(set[,1]) + 1, by = 0.01)
X2 <- seq(min(set[,2]) - 1, max(set[,2]) + 1, by = 0.01)
grid_set <- expand.grid(X1, X2)
colnames(grid_set) = c('Age','EstimatedSalary')
y_grid = predict(classifier,  newdata = grid_set)
plot(set[,-3],
     main = 'Classifier Model (Training Set)',
     xlab =  'Age', ylab = 'Estimated Salary',
     xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg= ifelse(set[,3] == 1,'green4', 'red3'))

Plot description

The red region is predicted by the classifier as “Dont buy”
The green region is predicted by the classifier as “Buy”
The red dots are those people that actually did not buy
The green dots are those people that actually bought.
The line is the prediction boundary.

Now we see the result of the test set

set <- test_set
X1 <- seq(min(set[,1]) - 1, max(set[,1]) + 1, by = 0.01)
X2 <- seq(min(set[,2]) - 1, max(set[,2]) + 1, by = 0.01)
grid_set <- expand.grid(X1, X2)
colnames(grid_set) = c('Age','EstimatedSalary')
y_grid = predict(classifier,  newdata = grid_set)
plot(set[,-3],
     main = 'Classifier Model (Test Set)',
     xlab =  'Age', ylab = 'Estimated Salary',
     xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg= ifelse(set[,3] == 1,'green4', 'red3'))

LS0tDQp0aXRsZTogIk1MIFVzaW5nIFIgU2VjdGlvbiAxNiBCYXllcyBUaGVvcmVtIg0Kb3V0cHV0OiBodG1sX25vdGVib29rDQotLS0NCg0KIyBTZWN0aW9uIDE2IEJheWVzIFRoZW9yZW0NCiMgc2V0d2QoKQ0KDQpgYGB7cn0NCmRmIDwtICByZWFkLmNzdigiRzpcXFJTdHVkaW9cXHVkZW15XFxtbFxcTWFjaGluZSBMZWFybmluZyBBWlxcUGFydCAzIC0gQ2xhc3NpZmljYXRpb25cXFNlY3Rpb24gMTggLSBOYWl2ZSBCYXllc1xcTmFpdmVfQmF5ZXNcXFNvY2lhbF9OZXR3b3JrX0Fkcy5jc3YiKQ0KaGVhZChkZikNCmBgYA0KDQojIFNlbGVjdCB0aGUgZmllbGRzIHRoYXQgd2Ugd2lsbCBiZSB3b3JraW5nIHdpdGgNCg0KYGBge3J9DQpkZiA8LSBkZlssMzo1XQ0KaGVhZChkZikNCmBgYA0KDQpgYGB7cn0NCiMgZW5jb2RlIGFzIGZhY3RvciBmb3IgbmFpdmUgYnllcw0KZGYkUHVyY2hhc2VkIDwtICBmYWN0b3IoZGYkUHVyY2hhc2VkLCBsZXZlbHMgPSBjKDAsMSkpDQoNCmBgYA0KDQojIFNwbGl0IGRhdGFzZXQgaW50byB0cmFpbmluZyBhbmQgdGVzdCBzZXQgKDMwMCB0cmFpbmluZywgMTAwIHRlc3QpDQpgYGB7cn0NCmxpYnJhcnkoY2FUb29scykNCnNldC5zZWVkKDEyMzQpDQpzcGxpdCA8LSBzYW1wbGUuc3BsaXQoZGYkUHVyY2hhc2VkLCBTcGxpdFJhdGlvID0gMC43NSkNCnRyYWluaW5nX3NldCA8LSBzdWJzZXQoZGYsIHNwbGl0ID09IFRSVUUpDQp0ZXN0X3NldCA8LSBzdWJzZXQoZGYsIHNwbGl0ID09IEZBTFNFKQ0KDQpgYGANCg0KIyBGb3IgQ2xhc3NpZmljYXRpb24saXQgaXMgYmV0dGVyIHRvIGRvIGZlYXR1cmUgc2NhbGluZyAobm9ybWFsaXphdGlvbikNCg0KYGBge3J9DQojIEZlYXR1cmUgU2NhbGluZyAxIGFnZSwgMiBpcyBzYWxhcnkNCnRyYWluaW5nX3NldFssMToyXSA8LSAgc2NhbGUodHJhaW5pbmdfc2V0WywxOjJdKQ0KdGVzdF9zZXRbLDE6Ml0gPC0gIHNjYWxlKHRlc3Rfc2V0WywxOjJdKQ0KYGBgDQoNCiMgRml0dGluZyBDbGFzc2lmaWVyIHRvIHRoZSBUcmFpbmluZyBTZXQNCg0KYGBge3J9DQojIENyZWF0ZSB0aGUgY2xhc3NpZmllciBoZXJlDQojIGluc3RhbGwucGFjYWdlcygiZTEwNzEiKQ0KbGlicmFyeShlMTA3MSkNCiMgcmVtb3ZlIHRoZSBsYXN0IGNvbHVtbiBzaW5jZSB0aGF0IGlzIHRoZSBkZXBlbmRlbnQgdmFyaWFibGUgWy0zXQ0KY2xhc3NpZmllciA8LSBuYWl2ZUJheWVzKHggPSB0cmFpbmluZ19zZXRbLTNdLA0KICAgICAgICAgICAgICAgICAgICAgICAgIHkgPSB0cmFpbmluZ19zZXQkUHVyY2hhc2VkKQ0Kc3VtbWFyeShjbGFzc2lmaWVyKQ0KDQpgYGANCg0KIyBQcmVkaWN0aW5nIHRoZSB0ZXN0IHNldCByZXN1bHRzDQoNCmBgYHtyfQ0KeV9wcmVkIDwtICBwcmVkaWN0KGNsYXNzaWZpZXIsbmV3ZGF0YSA9IHRlc3Rfc2V0Wy0zXSkNCiMgbmFpdmUgYmF5ZXMgZG9lcyBub3Qgb3V0cHV0IGEgdmVjdG9yIHNvIHdlIHdpbGwgbmVlZCB0byBlbmNvZGUgaXQgZnVydGhlciBhcyBmYWN0b3INCnlfcHJlZA0KDQpgYGANCg0KIyBFdmFsdWF0ZSB0aGUgcHJlZGljdGlvbiB1c2luZyBjb25mdXNpb24gTWF0cml4Lg0KDQpgYGB7cn0NCiMgTWFraW5nIHRoZSBjb25mdXNpb24gbWF0cml4DQojIFszXSByZWZlcnMgdG8gdGhlIG91dGNvbWUNCg0KY20gPC0gdGFibGUodGVzdF9zZXRbLDNdLCB5X3ByZWQpDQpjbQ0KYGBgDQoNCiMgUGxvdA0KDQpgYGB7cn0NCiMgaW5zdGFsbC5wYWNrYWdlcygiRWxlbVN0YXRMZWFybiIpDQpsaWJyYXJ5KEVsZW1TdGF0TGVhcm4pDQpzZXQgPC0gdHJhaW5pbmdfc2V0DQpYMSA8LSBzZXEobWluKHNldFssMV0pIC0gMSwgbWF4KHNldFssMV0pICsgMSwgYnkgPSAwLjAxKQ0KWDIgPC0gc2VxKG1pbihzZXRbLDJdKSAtIDEsIG1heChzZXRbLDJdKSArIDEsIGJ5ID0gMC4wMSkNCmdyaWRfc2V0IDwtIGV4cGFuZC5ncmlkKFgxLCBYMikNCmNvbG5hbWVzKGdyaWRfc2V0KSA9IGMoJ0FnZScsJ0VzdGltYXRlZFNhbGFyeScpDQoNCnlfZ3JpZCA9IHByZWRpY3QoY2xhc3NpZmllciwgIG5ld2RhdGEgPSBncmlkX3NldCkNCnBsb3Qoc2V0WywtM10sDQogICAgIG1haW4gPSAnQ2xhc3NpZmllciBNb2RlbCAoVHJhaW5pbmcgU2V0KScsDQogICAgIHhsYWIgPSAgJ0FnZScsIHlsYWIgPSAnRXN0aW1hdGVkIFNhbGFyeScsDQogICAgIHhsaW0gPSByYW5nZShYMSksIHlsaW0gPSByYW5nZShYMikpDQpjb250b3VyKFgxLCBYMiwgbWF0cml4KGFzLm51bWVyaWMoeV9ncmlkKSwgbGVuZ3RoKFgxKSwgbGVuZ3RoKFgyKSksIGFkZCA9IFRSVUUpDQpwb2ludHMoZ3JpZF9zZXQsIHBjaCA9ICcuJywgY29sID0gaWZlbHNlKHlfZ3JpZCA9PSAxLCAnc3ByaW5nZ3JlZW4zJywgJ3RvbWF0bycpKQ0KcG9pbnRzKHNldCwgcGNoID0gMjEsIGJnPSBpZmVsc2Uoc2V0WywzXSA9PSAxLCdncmVlbjQnLCAncmVkMycpKQ0KYGBgDQoNCiMgUGxvdCBkZXNjcmlwdGlvbg0KVGhlIHJlZCByZWdpb24gaXMgcHJlZGljdGVkIGJ5IHRoZSBjbGFzc2lmaWVyIGFzICJEb250IGJ1eSIgPC9icj4NClRoZSBncmVlbiByZWdpb24gaXMgcHJlZGljdGVkIGJ5IHRoZSBjbGFzc2lmaWVyIGFzICJCdXkiIDwvYnI+DQpUaGUgcmVkIGRvdHMgYXJlIHRob3NlIHBlb3BsZSB0aGF0IGFjdHVhbGx5IGRpZCBub3QgYnV5IDwvYnI+DQpUaGUgZ3JlZW4gZG90cyBhcmUgdGhvc2UgcGVvcGxlIHRoYXQgYWN0dWFsbHkgYm91Z2h0LiA8L2JyPg0KVGhlIGxpbmUgaXMgdGhlIHByZWRpY3Rpb24gYm91bmRhcnkuIA0KDQojIE5vdyB3ZSBzZWUgdGhlIHJlc3VsdCBvZiB0aGUgdGVzdCBzZXQNCg0KYGBge3J9DQpzZXQgPC0gdGVzdF9zZXQNClgxIDwtIHNlcShtaW4oc2V0WywxXSkgLSAxLCBtYXgoc2V0WywxXSkgKyAxLCBieSA9IDAuMDEpDQpYMiA8LSBzZXEobWluKHNldFssMl0pIC0gMSwgbWF4KHNldFssMl0pICsgMSwgYnkgPSAwLjAxKQ0KZ3JpZF9zZXQgPC0gZXhwYW5kLmdyaWQoWDEsIFgyKQ0KY29sbmFtZXMoZ3JpZF9zZXQpID0gYygnQWdlJywnRXN0aW1hdGVkU2FsYXJ5JykNCg0KeV9ncmlkID0gcHJlZGljdChjbGFzc2lmaWVyLCAgbmV3ZGF0YSA9IGdyaWRfc2V0KQ0KcGxvdChzZXRbLC0zXSwNCiAgICAgbWFpbiA9ICdDbGFzc2lmaWVyIE1vZGVsIChUZXN0IFNldCknLA0KICAgICB4bGFiID0gICdBZ2UnLCB5bGFiID0gJ0VzdGltYXRlZCBTYWxhcnknLA0KICAgICB4bGltID0gcmFuZ2UoWDEpLCB5bGltID0gcmFuZ2UoWDIpKQ0KY29udG91cihYMSwgWDIsIG1hdHJpeChhcy5udW1lcmljKHlfZ3JpZCksIGxlbmd0aChYMSksIGxlbmd0aChYMikpLCBhZGQgPSBUUlVFKQ0KcG9pbnRzKGdyaWRfc2V0LCBwY2ggPSAnLicsIGNvbCA9IGlmZWxzZSh5X2dyaWQgPT0gMSwgJ3NwcmluZ2dyZWVuMycsICd0b21hdG8nKSkNCnBvaW50cyhzZXQsIHBjaCA9IDIxLCBiZz0gaWZlbHNlKHNldFssM10gPT0gMSwnZ3JlZW40JywgJ3JlZDMnKSkNCmBgYA0KDQo=