# number of output classes (i.e. flowers)
output_n <- 3
# image size to scale down to (original images are 120 x 120 px)
img_width <- 120
img_height <- 120
target_size <- c(img_width, img_height)
# RGB = 3 channels
channels <- 3
1- Download image dataset (Iris_Imgs.7z) from Canvas.
#Test Data Settings
test_datagen <- image_data_generator(rescale = 1/255, shear_range = 0.2,
zoom_range = 0.2, horizontal_flip = TRUE)
dir <- "~/Downloads/Iris_Imgs/"
test_generator <- flow_images_from_directory(directory = dir,
generator = test_datagen,
target_size = c(120,120), shuffle = TRUE,
batch_size = 50, class_mode = "sparse")
#Extracting Image and label from generator
Training_set <- generator_next(generator = train_generator)
Test_set <- generator_next(generator = test_generator)
#Splitting Images and labels
Train_images <- Training_set[[1]]
Train_labels <- Training_set[[2]]
Test_images <- Training_set[[1]]
Test_labels <- Training_set[[2]]
#define batch size and number of epochs
batch_size <- 32
epochs <- 10
2.Write a program using Python Keras, MatLab, or any programming language of your choice to classify images into 3 classes (setosa, versicolor, virginica) using CNN.
# initialise model
model <- keras_model_sequential()
# add layers
model %>%
layer_conv_2d(filter = 30, kernel_size = c(5,5), padding = "same",
input_shape = c(img_width, img_height, channels)) %>%
layer_activation("relu") %>%
# Second hidden layer
layer_conv_2d(filter = 15, kernel_size = c(3,3), padding = "same") %>%
layer_activation("relu") %>%
layer_batch_normalization() %>%
# Use max pooling
layer_max_pooling_2d(pool_size = c(2,2)) %>%
layer_dropout(0.25) %>%
# Flatten max filtered output into feature vector
# and feed into dense layer
layer_flatten() %>%
layer_dense(100) %>%
layer_activation("relu") %>%
layer_dropout(0.5) %>%
# Outputs from dense layer are projected onto output layer
layer_dense(output_n) %>%
layer_activation("softmax")
# compile
model %>% compile(
loss = "sparse_categorical_crossentropy",
optimizer = optimizer_rmsprop(lr = 0.001),
metrics = "accuracy"
)
#The model Summary
summary(model)
## Model: "sequential"
## ___________________________________________________________________________
## Layer (type) Output Shape Param #
## ===========================================================================
## conv2d (Conv2D) (None, 120, 120, 30) 2280
## ___________________________________________________________________________
## activation (Activation) (None, 120, 120, 30) 0
## ___________________________________________________________________________
## conv2d_1 (Conv2D) (None, 120, 120, 15) 4065
## ___________________________________________________________________________
## activation_1 (Activation) (None, 120, 120, 15) 0
## ___________________________________________________________________________
## batch_normalization (BatchNormal (None, 120, 120, 15) 60
## ___________________________________________________________________________
## max_pooling2d (MaxPooling2D) (None, 60, 60, 15) 0
## ___________________________________________________________________________
## dropout (Dropout) (None, 60, 60, 15) 0
## ___________________________________________________________________________
## flatten (Flatten) (None, 54000) 0
## ___________________________________________________________________________
## dense (Dense) (None, 100) 5400100
## ___________________________________________________________________________
## activation_2 (Activation) (None, 100) 0
## ___________________________________________________________________________
## dropout_1 (Dropout) (None, 100) 0
## ___________________________________________________________________________
## dense_1 (Dense) (None, 3) 303
## ___________________________________________________________________________
## activation_3 (Activation) (None, 3) 0
## ===========================================================================
## Total params: 5,406,808
## Trainable params: 5,406,778
## Non-trainable params: 30
## ___________________________________________________________________________
#Fit model
model %>% fit(Train_images, Train_labels, epochs = 5, batch_size = 15)
3- Print the configurations of all the layers in your CNN.
#Get the model configuration
config <- get_config(model)
#config
#Get All Layers config
for (i in 1:13) {
print(get_layer(model, index = i))
}
## <tensorflow.python.keras.layers.convolutional.Conv2D>
## <tensorflow.python.keras.layers.core.Activation>
## <tensorflow.python.keras.layers.convolutional.Conv2D>
## <tensorflow.python.keras.layers.core.Activation>
## <tensorflow.python.keras.layers.normalization.BatchNormalization>
## <tensorflow.python.keras.layers.pooling.MaxPooling2D>
## <tensorflow.python.keras.layers.core.Dropout>
## <tensorflow.python.keras.layers.core.Flatten>
## <tensorflow.python.keras.layers.core.Dense>
## <tensorflow.python.keras.layers.core.Activation>
## <tensorflow.python.keras.layers.core.Dropout>
## <tensorflow.python.keras.layers.core.Dense>
## <tensorflow.python.keras.layers.core.Activation>
4- Print the confusion matrix of your classification result, and what is the accuracy of classification result? The accuracy can be obtained either from training data, test data, or both. Please specify how the accuracy being calculated.
#Evaluate the model performance
model %>% evaluate(Test_images, Test_labels)
## $loss
## [1] 0.4372805
##
## $accuracy
## [1] 0.84
#Make Predictions
Predicted <- model %>% predict_classes(Test_images)
#Store both actuals and predictions in a dataframe
Actual_Predict <- as.data.frame(cbind(Test_labels, Predicted))
#Replace Classes by their actual names
Actual_Predict <- Actual_Predict %>%
mutate(Test_labels = replace(Test_labels, Test_labels == "1", "Versicolor")) %>%
mutate(Test_labels = replace(Test_labels, Test_labels == "2", "Virginia")) %>%
mutate(Test_labels = replace(Test_labels, Test_labels == "0", "Setosa")) %>%
mutate(Predicted = replace(Predicted, Predicted == "1", "Versicolor")) %>%
mutate(Predicted = replace(Predicted, Predicted == "2", "Virginia")) %>%
mutate(Predicted = replace(Predicted, Predicted == "0", "Setosa"))
#Change the types of actual and predicted to factor
Actual_Predict$Test_labels <- as.factor(Actual_Predict$Test_labels)
Actual_Predict$Predicted <- as.factor(Actual_Predict$Predicted)
#Getting Confusion Matrix
confusionMatrix(Actual_Predict$Test_labels, Actual_Predict$Predicted)
## Confusion Matrix and Statistics
##
## Reference
## Prediction Setosa Versicolor Virginia
## Setosa 30 6 0
## Versicolor 0 22 10
## Virginia 0 0 32
##
## Overall Statistics
##
## Accuracy : 0.84
## 95% CI : (0.7532, 0.9057)
## No Information Rate : 0.42
## P-Value [Acc > NIR] : < 2.2e-16
##
## Kappa : 0.7605
##
## Mcnemar's Test P-Value : NA
##
## Statistics by Class:
##
## Class: Setosa Class: Versicolor Class: Virginia
## Sensitivity 1.0000 0.7857 0.7619
## Specificity 0.9143 0.8611 1.0000
## Pos Pred Value 0.8333 0.6875 1.0000
## Neg Pred Value 1.0000 0.9118 0.8529
## Prevalence 0.3000 0.2800 0.4200
## Detection Rate 0.3000 0.2200 0.3200
## Detection Prevalence 0.3600 0.3200 0.3200
## Balanced Accuracy 0.9571 0.8234 0.8810
confusionMatrix(Actual_Predict$Test_labels, Actual_Predict$Predicted)
## Confusion Matrix and Statistics
##
## Reference
## Prediction Setosa Versicolor Virginia
## Setosa 30 6 0
## Versicolor 0 22 10
## Virginia 0 0 32
##
## Overall Statistics
##
## Accuracy : 0.84
## 95% CI : (0.7532, 0.9057)
## No Information Rate : 0.42
## P-Value [Acc > NIR] : < 2.2e-16
##
## Kappa : 0.7605
##
## Mcnemar's Test P-Value : NA
##
## Statistics by Class:
##
## Class: Setosa Class: Versicolor Class: Virginia
## Sensitivity 1.0000 0.7857 0.7619
## Specificity 0.9143 0.8611 1.0000
## Pos Pred Value 0.8333 0.6875 1.0000
## Neg Pred Value 1.0000 0.9118 0.8529
## Prevalence 0.3000 0.2800 0.4200
## Detection Rate 0.3000 0.2200 0.3200
## Detection Prevalence 0.3600 0.3200 0.3200
## Balanced Accuracy 0.9571 0.8234 0.8810