# Brett-Lantz Machine Learning with R
# Chapter 3 k-NN algorithm
# Example - diagnosing breast cancer with
# the k-NN algorithm
# We will investigate the utility of machine learning for detecting cancer by applying
# the k-NN algorithm to measurements of biopsied cells from women with abnormal
# breast masses.
# Step 1 - collecting data
# We will utilize the Wisconsin Breast Cancer Diagnostic dataset from the UCI
# Machine Learning Repository at http://archive.ics.uci.edu/ml
# The breast cancer data includes 569 examples of cancer biopsies, each with
# 32 features. One feature is an identification number, another is the cancer diagnosis,
# and 30 are numeric-valued laboratory measurements. The diagnosis is coded as
# "M" to indicate malignant or "B" to indicate benign.
# Step 2 - exploring and preparing the data
setwd("C:\\Users\\Luis\\Desktop\\Brett-Lantz\\chapter_3")
wbcd <- read.csv("C:\\Users\\Luis\\Desktop\\Brett-Lantz\\chapter_3\\wisc_bc_data.csv",
                 stringsAsFactors = FALSE)
names(wbcd)
##  [1] "id"                "diagnosis"         "radius_mean"      
##  [4] "texture_mean"      "perimeter_mean"    "area_mean"        
##  [7] "smoothness_mean"   "compactness_mean"  "concavity_mean"   
## [10] "points_mean"       "symmetry_mean"     "dimension_mean"   
## [13] "radius_se"         "texture_se"        "perimeter_se"     
## [16] "area_se"           "smoothness_se"     "compactness_se"   
## [19] "concavity_se"      "points_se"         "symmetry_se"      
## [22] "dimension_se"      "radius_worst"      "texture_worst"    
## [25] "perimeter_worst"   "area_worst"        "smoothness_worst" 
## [28] "compactness_worst" "concavity_worst"   "points_worst"     
## [31] "symmetry_worst"    "dimension_worst"
str(wbcd)
## 'data.frame':    569 obs. of  32 variables:
##  $ id               : int  87139402 8910251 905520 868871 9012568 906539 925291 87880 862989 89827 ...
##  $ diagnosis        : chr  "B" "B" "B" "B" ...
##  $ radius_mean      : num  12.3 10.6 11 11.3 15.2 ...
##  $ texture_mean     : num  12.4 18.9 16.8 13.4 13.2 ...
##  $ perimeter_mean   : num  78.8 69.3 70.9 73 97.7 ...
##  $ area_mean        : num  464 346 373 385 712 ...
##  $ smoothness_mean  : num  0.1028 0.0969 0.1077 0.1164 0.0796 ...
##  $ compactness_mean : num  0.0698 0.1147 0.078 0.1136 0.0693 ...
##  $ concavity_mean   : num  0.0399 0.0639 0.0305 0.0464 0.0339 ...
##  $ points_mean      : num  0.037 0.0264 0.0248 0.048 0.0266 ...
##  $ symmetry_mean    : num  0.196 0.192 0.171 0.177 0.172 ...
##  $ dimension_mean   : num  0.0595 0.0649 0.0634 0.0607 0.0554 ...
##  $ radius_se        : num  0.236 0.451 0.197 0.338 0.178 ...
##  $ texture_se       : num  0.666 1.197 1.387 1.343 0.412 ...
##  $ perimeter_se     : num  1.67 3.43 1.34 1.85 1.34 ...
##  $ area_se          : num  17.4 27.1 13.5 26.3 17.7 ...
##  $ smoothness_se    : num  0.00805 0.00747 0.00516 0.01127 0.00501 ...
##  $ compactness_se   : num  0.0118 0.03581 0.00936 0.03498 0.01485 ...
##  $ concavity_se     : num  0.0168 0.0335 0.0106 0.0219 0.0155 ...
##  $ points_se        : num  0.01241 0.01365 0.00748 0.01965 0.00915 ...
##  $ symmetry_se      : num  0.0192 0.035 0.0172 0.0158 0.0165 ...
##  $ dimension_se     : num  0.00225 0.00332 0.0022 0.00344 0.00177 ...
##  $ radius_worst     : num  13.5 11.9 12.4 11.9 16.2 ...
##  $ texture_worst    : num  15.6 22.9 26.4 15.8 15.7 ...
##  $ perimeter_worst  : num  87 78.3 79.9 76.5 104.5 ...
##  $ area_worst       : num  549 425 471 434 819 ...
##  $ smoothness_worst : num  0.139 0.121 0.137 0.137 0.113 ...
##  $ compactness_worst: num  0.127 0.252 0.148 0.182 0.174 ...
##  $ concavity_worst  : num  0.1242 0.1916 0.1067 0.0867 0.1362 ...
##  $ points_worst     : num  0.0939 0.0793 0.0743 0.0861 0.0818 ...
##  $ symmetry_worst   : num  0.283 0.294 0.3 0.21 0.249 ...
##  $ dimension_worst  : num  0.0677 0.0759 0.0788 0.0678 0.0677 ...
# The first variable is an integer variable named id. As this is simply a unique
# identifier (ID) for each patient in the data, it does not provide useful information,
# and we will need to exclude it from the model.
wbcd <- wbcd[-1]
# The next variable, diagnosis, is of particular interest as it is the outcome we
# hope to predict. This feature indicates whether the example is from a benign
# or malignant mass. The table() output indicates that 357 masses are benign
# while 212 are malignant:
table(wbcd$diagnosis)
## 
##   B   M 
## 357 212
# Many R machine learning classifiers require that the target feature is coded as a
# factor, so we will need to recode the diagnosis variable. We will also take this
# opportunity to give the "B" and "M" values more informative labels using the
# labels parameter:
wbcd$diagnosis <- factor(wbcd$diagnosis,levels = c("B","M"),
                                                   labels=c("Beningn","Malinagnt"))
round(prop.table(table(wbcd$diagnosis))*100,digits = 1)
## 
##   Beningn Malinagnt 
##      62.7      37.3
#summary(wbcd[c("wbcd$radius_mean","wbcd$area_mean","smoothness_mean")])
summary(wbcd)
##      diagnosis    radius_mean      texture_mean   perimeter_mean  
##  Beningn  :357   Min.   : 6.981   Min.   : 9.71   Min.   : 43.79  
##  Malinagnt:212   1st Qu.:11.700   1st Qu.:16.17   1st Qu.: 75.17  
##                  Median :13.370   Median :18.84   Median : 86.24  
##                  Mean   :14.127   Mean   :19.29   Mean   : 91.97  
##                  3rd Qu.:15.780   3rd Qu.:21.80   3rd Qu.:104.10  
##                  Max.   :28.110   Max.   :39.28   Max.   :188.50  
##    area_mean      smoothness_mean   compactness_mean  concavity_mean   
##  Min.   : 143.5   Min.   :0.05263   Min.   :0.01938   Min.   :0.00000  
##  1st Qu.: 420.3   1st Qu.:0.08637   1st Qu.:0.06492   1st Qu.:0.02956  
##  Median : 551.1   Median :0.09587   Median :0.09263   Median :0.06154  
##  Mean   : 654.9   Mean   :0.09636   Mean   :0.10434   Mean   :0.08880  
##  3rd Qu.: 782.7   3rd Qu.:0.10530   3rd Qu.:0.13040   3rd Qu.:0.13070  
##  Max.   :2501.0   Max.   :0.16340   Max.   :0.34540   Max.   :0.42680  
##   points_mean      symmetry_mean    dimension_mean      radius_se     
##  Min.   :0.00000   Min.   :0.1060   Min.   :0.04996   Min.   :0.1115  
##  1st Qu.:0.02031   1st Qu.:0.1619   1st Qu.:0.05770   1st Qu.:0.2324  
##  Median :0.03350   Median :0.1792   Median :0.06154   Median :0.3242  
##  Mean   :0.04892   Mean   :0.1812   Mean   :0.06280   Mean   :0.4052  
##  3rd Qu.:0.07400   3rd Qu.:0.1957   3rd Qu.:0.06612   3rd Qu.:0.4789  
##  Max.   :0.20120   Max.   :0.3040   Max.   :0.09744   Max.   :2.8730  
##    texture_se      perimeter_se       area_se        smoothness_se     
##  Min.   :0.3602   Min.   : 0.757   Min.   :  6.802   Min.   :0.001713  
##  1st Qu.:0.8339   1st Qu.: 1.606   1st Qu.: 17.850   1st Qu.:0.005169  
##  Median :1.1080   Median : 2.287   Median : 24.530   Median :0.006380  
##  Mean   :1.2169   Mean   : 2.866   Mean   : 40.337   Mean   :0.007041  
##  3rd Qu.:1.4740   3rd Qu.: 3.357   3rd Qu.: 45.190   3rd Qu.:0.008146  
##  Max.   :4.8850   Max.   :21.980   Max.   :542.200   Max.   :0.031130  
##  compactness_se      concavity_se       points_se       
##  Min.   :0.002252   Min.   :0.00000   Min.   :0.000000  
##  1st Qu.:0.013080   1st Qu.:0.01509   1st Qu.:0.007638  
##  Median :0.020450   Median :0.02589   Median :0.010930  
##  Mean   :0.025478   Mean   :0.03189   Mean   :0.011796  
##  3rd Qu.:0.032450   3rd Qu.:0.04205   3rd Qu.:0.014710  
##  Max.   :0.135400   Max.   :0.39600   Max.   :0.052790  
##   symmetry_se        dimension_se        radius_worst   texture_worst  
##  Min.   :0.007882   Min.   :0.0008948   Min.   : 7.93   Min.   :12.02  
##  1st Qu.:0.015160   1st Qu.:0.0022480   1st Qu.:13.01   1st Qu.:21.08  
##  Median :0.018730   Median :0.0031870   Median :14.97   Median :25.41  
##  Mean   :0.020542   Mean   :0.0037949   Mean   :16.27   Mean   :25.68  
##  3rd Qu.:0.023480   3rd Qu.:0.0045580   3rd Qu.:18.79   3rd Qu.:29.72  
##  Max.   :0.078950   Max.   :0.0298400   Max.   :36.04   Max.   :49.54  
##  perimeter_worst    area_worst     smoothness_worst  compactness_worst
##  Min.   : 50.41   Min.   : 185.2   Min.   :0.07117   Min.   :0.02729  
##  1st Qu.: 84.11   1st Qu.: 515.3   1st Qu.:0.11660   1st Qu.:0.14720  
##  Median : 97.66   Median : 686.5   Median :0.13130   Median :0.21190  
##  Mean   :107.26   Mean   : 880.6   Mean   :0.13237   Mean   :0.25427  
##  3rd Qu.:125.40   3rd Qu.:1084.0   3rd Qu.:0.14600   3rd Qu.:0.33910  
##  Max.   :251.20   Max.   :4254.0   Max.   :0.22260   Max.   :1.05800  
##  concavity_worst   points_worst     symmetry_worst   dimension_worst  
##  Min.   :0.0000   Min.   :0.00000   Min.   :0.1565   Min.   :0.05504  
##  1st Qu.:0.1145   1st Qu.:0.06493   1st Qu.:0.2504   1st Qu.:0.07146  
##  Median :0.2267   Median :0.09993   Median :0.2822   Median :0.08004  
##  Mean   :0.2722   Mean   :0.11461   Mean   :0.2901   Mean   :0.08395  
##  3rd Qu.:0.3829   3rd Qu.:0.16140   3rd Qu.:0.3179   3rd Qu.:0.09208  
##  Max.   :1.2520   Max.   :0.29100   Max.   :0.6638   Max.   :0.20750
summary(wbcd$radius_mean)
##    Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
##   6.981  11.700  13.370  14.130  15.780  28.110
summary(wbcd$area_mean)
##    Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
##   143.5   420.3   551.1   654.9   782.7  2501.0
summary(wbcd$smoothness_mean)
##    Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
## 0.05263 0.08637 0.09587 0.09636 0.10530 0.16340
# Looking at the features side-by-side, do you notice anything problematic about the
# values? Recall that the distance calculation for k-NN is heavily dependent upon
# the measurement scale of the input features. Since smoothness ranges from 0.05 to
# 0.16 and area ranges from 143.5 to 2501.0, the impact of area is going to be much
# larger than the smoothness in the distance calculation. This could potentially cause
# problems for our classifier, so let's apply normalization to rescale the features to a
# standard range of values.
# Transformation - normalizing numeric data
# To normalize these features, we need to create a normalize() function in R. This
# function takes a vector x of numeric values, and for each value in x, subtracts the
# minimum value in x and divides by the range of values in x. Finally, the resulting
# vector is returned.
normalize <- function(x){
  return((x-min(x))/(max(x)-min(x)))
}
# After executing the preceding code, the normalize() function is available for use in
# R. Let's test the function on a couple of vectors:
normalize(c(1,2,3,4,5))
## [1] 0.00 0.25 0.50 0.75 1.00
normalize(c(10,20,30,40,100))
## [1] 0.0000000 0.1111111 0.2222222 0.3333333 1.0000000
# The lapply() function takes a list and applies a specified function to each list
# element. As a data frame is a list of equal-length vectors, we can use lapply() to
# apply normalize() to each feature in the data frame. The final step is to convert the
# list returned by lapply() to a data frame, using the as.data.frame() function. The
# full process looks like this:
wbcd_n <- as.data.frame(lapply(wbcd[2:31], normalize))
# To confirm that the transformation was applied correctly, let's look at one variable's
# summary statistics:
summary(wbcd_n$area_mean)#As expected, the area_mean variable, which originally ranged from 143.5 to 2501.0,
##    Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
##  0.0000  0.1174  0.1729  0.2169  0.2711  1.0000
# now ranges from 0 to 1.
# Data preparation - creating training and test
# datasets
# dividing our data into
# two portions: a training dataset that will be used to build the k-NN model and a test
# dataset that will be used to estimate the predictive accuracy of the model.
wbcd_train <- wbcd_n[1:469,]
wbcd_test <- wbcd_n[470:569,]
# When we constructed our normalized training and test datasets, we excluded the
# target variable, diagnosis. For training the k-NN model, we will need to store
# these class labels in factor vectors, split between the training and test datasets:
wbcd_train_labels <- wbcd[1:469,1]
wbcd_test_labels <- wbcd[470:569,1]
# This code takes the diagnosis factor in the first column of the wbcd data frame, and
# creates the vectors wbcd_train_labels and wbcd_test_labels. We will use these
# in the next steps of training and evaluating our classifier.
# Step 3 - training a model on the data
# To classify our test instances, we will use a k-NN implementation from the class
# package, which provides a set of basic R functions for classification. If this package
# is not already installed on your system, you can install it by typing
library(class)
# The knn() function in the class package provides a standard, classic
# implementation of the k-NN algorithm. For each instance in the test data, the
# function will identify the k-Nearest Neighbors, using Euclidean distance, where k is
# a user-specified number. The test instance is classified by taking a "vote" among the
# k-Nearest Neighbors-specifically, this involves assigning the class of the majority of
# the k neighbors. A tie vote is broken at random.
wbcd_test_pred <- knn(train = wbcd_train,test = wbcd_test,
                      cl=wbcd_train_labels,k=21)
# The knn() function returns a factor vector of predicted labels for each of the
# examples in the test dataset, which we have assigned to wbcd_test_pred.
# Step 4 - evaluating model performance
# The next step of the process is to evaluate how well the predicted classes in the wbcd_
# test_pred vector match up with the known values in the wbcd_test_labels vector.
# To do this, we can use the CrossTable() function in the gmodels package
library(gmodels)
CrossTable(x=wbcd_test_labels,y=wbcd_test_pred,
           prop.chisq = FALSE)
## 
##  
##    Cell Contents
## |-------------------------|
## |                       N |
## |           N / Row Total |
## |           N / Col Total |
## |         N / Table Total |
## |-------------------------|
## 
##  
## Total Observations in Table:  100 
## 
##  
##                  | wbcd_test_pred 
## wbcd_test_labels |   Beningn | Malinagnt | Row Total | 
## -----------------|-----------|-----------|-----------|
##          Beningn |        61 |         0 |        61 | 
##                  |     1.000 |     0.000 |     0.610 | 
##                  |     0.968 |     0.000 |           | 
##                  |     0.610 |     0.000 |           | 
## -----------------|-----------|-----------|-----------|
##        Malinagnt |         2 |        37 |        39 | 
##                  |     0.051 |     0.949 |     0.390 | 
##                  |     0.032 |     1.000 |           | 
##                  |     0.020 |     0.370 |           | 
## -----------------|-----------|-----------|-----------|
##     Column Total |        63 |        37 |       100 | 
##                  |     0.630 |     0.370 |           | 
## -----------------|-----------|-----------|-----------|
## 
## 
# The cell percentages in the table indicate the proportion of values that fall into four
# categories. The top-left cell indicates the true negative results. These 61 of 100 values
# are cases where the mass was benign and the k-NN algorithm correctly identified it
# as such. The bottom-right cell indicates the true positive results, where the classifier
# and the clinically determined label agree that the mass is malignant. A total of 37 of
# 100 predictions were true positives.
# The cells falling on the other diagonal contain counts of examples where the k-NN
# approach disagreed with the true label. The two examples in the lower-left cell are
# false negative results; in this case, the predicted value was benign, but the tumor
# was actually malignant. Errors in this direction could be extremely costly as they
# might lead a patient to believe that she is cancer-free, but in reality, the disease may
# continue to spread. The top-right cell would contain the false positive results, if
# there were any. These values occur when the model classifies a mass as malignant,
# but in reality, it was benign. Although such errors are less dangerous than a false
# negative result, they should also be avoided as they could lead to additional financial
# burden on the health care system or additional stress for the patient as additional
# tests or treatment may have to be provided.
# Step 5 - improving model performance
# Transformation - z-score standardization
# Although normalization is traditionally used for k-NN classification, it may
# not always be the most appropriate way to rescale features. Since the z-score
# standardized values have no predefined minimum and maximum, extreme values
# are not compressed towards the center. One might suspect that with a malignant
# tumor, we might see some very extreme outliers as the tumors grow uncontrollably.
# It might, therefore, be reasonable to allow the outliers to be weighted more heavily in
# the distance calculation. Let's see whether z-score standardization can improve our
# predictive accuracy
# To standardize a vector, we can use the R's built-in scale() function, which, by
# default, rescales values using the z-score standardization. The scale() function
# offers the additional benefit that it can be applied directly to a data frame, so we can
# avoid the use of the lapply() function. To create a z-score standardized version of
# the wbcd data, we can use the following command
wbcd_z <- as.data.frame(scale(wbcd[-1]))
summary(wbcd_z$area_mean)
##    Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
## -1.4530 -0.6666 -0.2949  0.0000  0.3632  5.2460
# The mean of a z-score standardized variable should always be zero, and the range
# should be fairly compact. A z-score greater than 3 or less than -3 indicates an
# extremely rare value. With this in mind, the transformation seems to have worked.
# As we had done earlier, we need to divide the data into training and test sets, and
# then classify the test instances using the knn() function. We'll then compare the
# predicted labels to the actual labels using CrossTable():
wbcd_train <- wbcd_z[1:469,]
wbcd_test <- wbcd_z[470:569,]
wbcd_train_labels <- wbcd[1:469,1]
wbcd_test_labels <- wbcd[470:569,1]
wbcd_test_pred <- knn(train = wbcd_train,test = wbcd_test,
                      cl=wbcd_train_labels,k=21)
CrossTable(x=wbcd_test_labels,wbcd_test_pred,
           prop.chisq = FALSE)
## 
##  
##    Cell Contents
## |-------------------------|
## |                       N |
## |           N / Row Total |
## |           N / Col Total |
## |         N / Table Total |
## |-------------------------|
## 
##  
## Total Observations in Table:  100 
## 
##  
##                  | wbcd_test_pred 
## wbcd_test_labels |   Beningn | Malinagnt | Row Total | 
## -----------------|-----------|-----------|-----------|
##          Beningn |        61 |         0 |        61 | 
##                  |     1.000 |     0.000 |     0.610 | 
##                  |     0.924 |     0.000 |           | 
##                  |     0.610 |     0.000 |           | 
## -----------------|-----------|-----------|-----------|
##        Malinagnt |         5 |        34 |        39 | 
##                  |     0.128 |     0.872 |     0.390 | 
##                  |     0.076 |     1.000 |           | 
##                  |     0.050 |     0.340 |           | 
## -----------------|-----------|-----------|-----------|
##     Column Total |        66 |        34 |       100 | 
##                  |     0.660 |     0.340 |           | 
## -----------------|-----------|-----------|-----------|
## 
## 
# Unfortunately, in the following table, the results of our new transformation show a
# slight decline in accuracy. The instances where we had correctly classified 98 percent
# of examples previously, we classified only 95 percent correctly this time. Making
# matters worse, we did no better at classifying the dangerous false negatives