# If the virtual environment does not exist then create a
if (!reticulate::virtualenv_exists('new-env')) {
reticulate::virtualenv_create('new-env')
}
# Ensure that TensorFlow is installed
tensorflow::install_tensorflow(envname = 'new-env')
## Using virtual environment "new-env" ...
## + "C:/Users/10207/OneDrive/文档/.virtualenvs/new-env/Scripts/python.exe" -m pip install --upgrade --no-user "tensorflow==2.16.*"
##
## Installation complete.
# Activate the virtual environment
reticulate::use_virtualenv('new-env', required = TRUE)
# Load required libraries
suppressPackageStartupMessages({
library(tensorflow)
library(keras3)
library(ggplot2)
})
summary(cars)
## speed dist
## Min. : 4.0 Min. : 2.00
## 1st Qu.:12.0 1st Qu.: 26.00
## Median :15.0 Median : 36.00
## Mean :15.4 Mean : 42.98
## 3rd Qu.:19.0 3rd Qu.: 56.00
## Max. :25.0 Max. :120.00
library(tensorflow)
library(keras3)
library(ggplot2)
library(magrittr)
#load mnist fashion data
mnist <- dataset_fashion_mnist()
# create a 60,000x28x28 tensor for the training images
train_images <- mnist$train$x
# create a 60,000-element vector for the training labels
train_labels <- mnist$train$y
# create a 10,000x28x28 tensor for the test images
test_images <- mnist$test$x
# create a 10,000-element vector for the test labels
test_labels <- mnist$test$y
# select the 400th training image
digit <- train_images[17,,]
# plot it!
plot(as.raster(digit, max = 301))
### 3. Model 1: RMSprop optimiser
# Defining the model
model1 <- keras_model_sequential() %>%
layer_dense(units = 512, activation = "relu", input_shape = c(28 * 28)) %>%
layer_dropout(rate = 0.4) %>%
layer_dense(units = 256, activation = "relu") %>%
layer_dropout(rate = 0.3) %>%
layer_dense(units = 128, activation = "relu") %>%
layer_dense(units = 10, activation = "softmax")
model1 %>% compile(
optimizer = optimizer_rmsprop(),
loss = "categorical_crossentropy",
metrics = c("accuracy")
)
# Reshape and normalize images
train_images <- array_reshape(train_images, c(60000, 28 * 28)) / 255
test_images <- array_reshape(test_images, c(10000, 28 * 28)) / 255
# One-hot encoding of labels
train_labels <- to_categorical(train_labels)
test_labels <- to_categorical(test_labels)
# Defining class labels
class_labels <- c('T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot')
# Training the model
history1 <- model1 %>% fit(
train_images, train_labels,
epochs = 20, batch_size = 128,
validation_split = 0.3
)
## Epoch 1/20
## 329/329 - 2s - 5ms/step - accuracy: 0.7518 - loss: 0.6785 - val_accuracy: 0.7665 - val_loss: 0.5971
## Epoch 2/20
## 329/329 - 1s - 4ms/step - accuracy: 0.8278 - loss: 0.4722 - val_accuracy: 0.8507 - val_loss: 0.4212
## Epoch 3/20
## 329/329 - 1s - 4ms/step - accuracy: 0.8426 - loss: 0.4317 - val_accuracy: 0.8303 - val_loss: 0.4531
## Epoch 4/20
## 329/329 - 1s - 4ms/step - accuracy: 0.8537 - loss: 0.4036 - val_accuracy: 0.8501 - val_loss: 0.4178
## Epoch 5/20
## 329/329 - 1s - 4ms/step - accuracy: 0.8612 - loss: 0.3821 - val_accuracy: 0.8683 - val_loss: 0.3591
## Epoch 6/20
## 329/329 - 1s - 4ms/step - accuracy: 0.8650 - loss: 0.3688 - val_accuracy: 0.8617 - val_loss: 0.3787
## Epoch 7/20
## 329/329 - 1s - 4ms/step - accuracy: 0.8688 - loss: 0.3561 - val_accuracy: 0.8692 - val_loss: 0.3535
## Epoch 8/20
## 329/329 - 1s - 4ms/step - accuracy: 0.8734 - loss: 0.3444 - val_accuracy: 0.8592 - val_loss: 0.3960
## Epoch 9/20
## 329/329 - 1s - 4ms/step - accuracy: 0.8765 - loss: 0.3385 - val_accuracy: 0.8641 - val_loss: 0.3676
## Epoch 10/20
## 329/329 - 1s - 4ms/step - accuracy: 0.8788 - loss: 0.3330 - val_accuracy: 0.8720 - val_loss: 0.3747
## Epoch 11/20
## 329/329 - 1s - 4ms/step - accuracy: 0.8817 - loss: 0.3253 - val_accuracy: 0.8801 - val_loss: 0.3380
## Epoch 12/20
## 329/329 - 1s - 4ms/step - accuracy: 0.8810 - loss: 0.3207 - val_accuracy: 0.8771 - val_loss: 0.3411
## Epoch 13/20
## 329/329 - 1s - 4ms/step - accuracy: 0.8844 - loss: 0.3132 - val_accuracy: 0.8836 - val_loss: 0.3342
## Epoch 14/20
## 329/329 - 1s - 4ms/step - accuracy: 0.8853 - loss: 0.3126 - val_accuracy: 0.8811 - val_loss: 0.3331
## Epoch 15/20
## 329/329 - 1s - 4ms/step - accuracy: 0.8873 - loss: 0.3054 - val_accuracy: 0.8805 - val_loss: 0.3324
## Epoch 16/20
## 329/329 - 1s - 4ms/step - accuracy: 0.8885 - loss: 0.3059 - val_accuracy: 0.8852 - val_loss: 0.3233
## Epoch 17/20
## 329/329 - 1s - 4ms/step - accuracy: 0.8901 - loss: 0.2999 - val_accuracy: 0.8869 - val_loss: 0.3363
## Epoch 18/20
## 329/329 - 1s - 4ms/step - accuracy: 0.8901 - loss: 0.2975 - val_accuracy: 0.8765 - val_loss: 0.3442
## Epoch 19/20
## 329/329 - 1s - 4ms/step - accuracy: 0.8923 - loss: 0.2916 - val_accuracy: 0.8727 - val_loss: 0.3646
## Epoch 20/20
## 329/329 - 1s - 4ms/step - accuracy: 0.8956 - loss: 0.2842 - val_accuracy: 0.8888 - val_loss: 0.3264
# Evaluating the model performance
evaluation1 <- model1 %>% evaluate(test_images, test_labels)
## 313/313 - 0s - 811us/step - accuracy: 0.8818 - loss: 0.3572
cat("Model 1 Accuracy:", evaluation1[[2]], "\n")
## Model 1 Accuracy: 0.3571658
#Plotting the model
plot(history1)+theme_bw()
### 4. Model 2: Nadam Optimiser
# Define the second model
model_two <- keras_model_sequential() %>%
layer_dense(units = 1024, activation = "relu", input_shape = c(28 * 28)) %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 512, activation = "relu") %>%
layer_dropout(rate = 0.4) %>%
layer_dense(units = 256, activation = "relu") %>%
layer_dense(units = 10, activation = "softmax")
model_two %>% compile(
optimizer = optimizer_nadam(),
loss = "categorical_crossentropy",
metrics = c("accuracy")
)
# Training component of model 2
history_2 <- model_two %>% fit(
x = train_images, # Use of pre-processed data
y = train_labels, # Using one-hot encoded tags
epochs = 20,
batch_size = 128,
validation_split = 0.2
)
## Epoch 1/20
## 375/375 - 4s - 10ms/step - accuracy: 0.7801 - loss: 0.6035 - val_accuracy: 0.8503 - val_loss: 0.4081
## Epoch 2/20
## 375/375 - 3s - 7ms/step - accuracy: 0.8382 - loss: 0.4464 - val_accuracy: 0.8598 - val_loss: 0.3877
## Epoch 3/20
## 375/375 - 3s - 8ms/step - accuracy: 0.8486 - loss: 0.4105 - val_accuracy: 0.8677 - val_loss: 0.3610
## Epoch 4/20
## 375/375 - 3s - 8ms/step - accuracy: 0.8581 - loss: 0.3915 - val_accuracy: 0.8747 - val_loss: 0.3479
## Epoch 5/20
## 375/375 - 3s - 8ms/step - accuracy: 0.8633 - loss: 0.3729 - val_accuracy: 0.8720 - val_loss: 0.3478
## Epoch 6/20
## 375/375 - 3s - 8ms/step - accuracy: 0.8674 - loss: 0.3618 - val_accuracy: 0.8823 - val_loss: 0.3313
## Epoch 7/20
## 375/375 - 3s - 7ms/step - accuracy: 0.8701 - loss: 0.3510 - val_accuracy: 0.8779 - val_loss: 0.3320
## Epoch 8/20
## 375/375 - 3s - 8ms/step - accuracy: 0.8731 - loss: 0.3411 - val_accuracy: 0.8822 - val_loss: 0.3302
## Epoch 9/20
## 375/375 - 3s - 8ms/step - accuracy: 0.8756 - loss: 0.3367 - val_accuracy: 0.8805 - val_loss: 0.3288
## Epoch 10/20
## 375/375 - 3s - 8ms/step - accuracy: 0.8778 - loss: 0.3290 - val_accuracy: 0.8834 - val_loss: 0.3215
## Epoch 11/20
## 375/375 - 3s - 8ms/step - accuracy: 0.8794 - loss: 0.3214 - val_accuracy: 0.8844 - val_loss: 0.3310
## Epoch 12/20
## 375/375 - 3s - 8ms/step - accuracy: 0.8830 - loss: 0.3144 - val_accuracy: 0.8798 - val_loss: 0.3216
## Epoch 13/20
## 375/375 - 3s - 8ms/step - accuracy: 0.8842 - loss: 0.3081 - val_accuracy: 0.8867 - val_loss: 0.3085
## Epoch 14/20
## 375/375 - 3s - 8ms/step - accuracy: 0.8864 - loss: 0.3052 - val_accuracy: 0.8880 - val_loss: 0.3129
## Epoch 15/20
## 375/375 - 3s - 8ms/step - accuracy: 0.8885 - loss: 0.3005 - val_accuracy: 0.8873 - val_loss: 0.3103
## Epoch 16/20
## 375/375 - 3s - 7ms/step - accuracy: 0.8894 - loss: 0.2961 - val_accuracy: 0.8911 - val_loss: 0.3019
## Epoch 17/20
## 375/375 - 3s - 7ms/step - accuracy: 0.8914 - loss: 0.2904 - val_accuracy: 0.8876 - val_loss: 0.3043
## Epoch 18/20
## 375/375 - 3s - 7ms/step - accuracy: 0.8919 - loss: 0.2887 - val_accuracy: 0.8926 - val_loss: 0.3000
## Epoch 19/20
## 375/375 - 3s - 7ms/step - accuracy: 0.8923 - loss: 0.2856 - val_accuracy: 0.8902 - val_loss: 0.3029
## Epoch 20/20
## 375/375 - 3s - 8ms/step - accuracy: 0.8914 - loss: 0.2879 - val_accuracy: 0.8900 - val_loss: 0.3070
# Assessment model 2
results_two <- model_two %>% evaluate(test_images, test_labels)
## 313/313 - 0s - 1ms/step - accuracy: 0.8821 - loss: 0.3309
cat("Accuracy of model 2:", results_two[[2]], "\n")
## Accuracy of model 2: 0.3308547
# Show training history
plot(history_2) + ggtitle("Model 2 Training History")
### 5. Model 3: Adamax Optimiser
# Define the third model
model_three <- keras_model_sequential() %>%
layer_dense(units = 2048, activation = "relu", input_shape = c(28 * 28)) %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 1024, activation = "relu") %>%
layer_dropout(rate = 0.4) %>%
layer_dense(units = 512, activation = "relu") %>%
layer_dense(units = 10, activation = "softmax")
model_three %>% compile(
optimizer = optimizer_adamax(),
loss = "categorical_crossentropy",
metrics = c("accuracy")
)
# Create and train model 3
history_3 <- model_three %>% fit(
x = train_images,
y = train_labels,
epochs = 20,
batch_size = 128,
validation_split = 0.2
)
## Epoch 1/20
## 375/375 - 5s - 14ms/step - accuracy: 0.7874 - loss: 0.5921 - val_accuracy: 0.8482 - val_loss: 0.4235
## Epoch 2/20
## 375/375 - 5s - 13ms/step - accuracy: 0.8415 - loss: 0.4338 - val_accuracy: 0.8608 - val_loss: 0.3857
## Epoch 3/20
## 375/375 - 5s - 13ms/step - accuracy: 0.8567 - loss: 0.3926 - val_accuracy: 0.8579 - val_loss: 0.3805
## Epoch 4/20
## 375/375 - 5s - 12ms/step - accuracy: 0.8651 - loss: 0.3672 - val_accuracy: 0.8763 - val_loss: 0.3464
## Epoch 5/20
## 375/375 - 5s - 13ms/step - accuracy: 0.8714 - loss: 0.3499 - val_accuracy: 0.8789 - val_loss: 0.3301
## Epoch 6/20
## 375/375 - 5s - 13ms/step - accuracy: 0.8746 - loss: 0.3377 - val_accuracy: 0.8780 - val_loss: 0.3392
## Epoch 7/20
## 375/375 - 5s - 13ms/step - accuracy: 0.8791 - loss: 0.3255 - val_accuracy: 0.8786 - val_loss: 0.3266
## Epoch 8/20
## 375/375 - 5s - 13ms/step - accuracy: 0.8832 - loss: 0.3138 - val_accuracy: 0.8819 - val_loss: 0.3258
## Epoch 9/20
## 375/375 - 5s - 12ms/step - accuracy: 0.8864 - loss: 0.3043 - val_accuracy: 0.8803 - val_loss: 0.3345
## Epoch 10/20
## 375/375 - 5s - 12ms/step - accuracy: 0.8896 - loss: 0.2960 - val_accuracy: 0.8867 - val_loss: 0.3171
## Epoch 11/20
## 375/375 - 5s - 13ms/step - accuracy: 0.8913 - loss: 0.2899 - val_accuracy: 0.8905 - val_loss: 0.3017
## Epoch 12/20
## 375/375 - 5s - 13ms/step - accuracy: 0.8949 - loss: 0.2797 - val_accuracy: 0.8886 - val_loss: 0.3093
## Epoch 13/20
## 375/375 - 5s - 13ms/step - accuracy: 0.8953 - loss: 0.2760 - val_accuracy: 0.8891 - val_loss: 0.3107
## Epoch 14/20
## 375/375 - 5s - 13ms/step - accuracy: 0.8983 - loss: 0.2699 - val_accuracy: 0.8916 - val_loss: 0.2989
## Epoch 15/20
## 375/375 - 5s - 13ms/step - accuracy: 0.8999 - loss: 0.2630 - val_accuracy: 0.8917 - val_loss: 0.3046
## Epoch 16/20
## 375/375 - 5s - 13ms/step - accuracy: 0.9019 - loss: 0.2581 - val_accuracy: 0.8923 - val_loss: 0.3065
## Epoch 17/20
## 375/375 - 5s - 13ms/step - accuracy: 0.9035 - loss: 0.2535 - val_accuracy: 0.8882 - val_loss: 0.3062
## Epoch 18/20
## 375/375 - 5s - 12ms/step - accuracy: 0.9055 - loss: 0.2487 - val_accuracy: 0.8917 - val_loss: 0.3000
## Epoch 19/20
## 375/375 - 5s - 12ms/step - accuracy: 0.9087 - loss: 0.2436 - val_accuracy: 0.8957 - val_loss: 0.2903
## Epoch 20/20
## 375/375 - 5s - 12ms/step - accuracy: 0.9086 - loss: 0.2407 - val_accuracy: 0.8979 - val_loss: 0.2867
# Assessment model 3
results_three <- model_three %>% evaluate(test_images, test_labels)
## 313/313 - 1s - 2ms/step - accuracy: 0.8887 - loss: 0.3117
cat("Accuracy of model 3:", results_three[[2]], "\n")
## Accuracy of model 3: 0.3117392
# Show training history
plot(history_3) + ggtitle("Model 3 Training History")