Para treinar uma rede convolucional, temos que definir um objeto do tipo array
. Com isso, temos que definir também a entrada do argumento input_shape
.
## [1] 4208 1568
Treinando a rede…
model_cnn <- keras_model_sequential() %>%
layer_conv_2d(
filters = 32,
kernel_size = c(3, 3),
activation = 'relu',
input_shape = c(28,28*2,1)) %>%
layer_conv_2d(filters = 64,
kernel_size = c(3, 3),
activation = 'relu') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(rate = 0.25) %>%
layer_flatten() %>%
# these are the embeddings (activations) we are going to visualize
layer_dense(units = 784*4, activation = 'relu', name = 'features1') %>%
layer_dropout(rate = 0.4) %>%
layer_dense(units = 784*2, activation = 'relu', name = 'features2') %>%
layer_dropout(rate = 0.2) %>%
layer_dense(units = 784, activation = 'relu', name = 'features3') %>%
layer_dropout(rate = 0.1) %>%
layer_dense(units = 10, activation = 'softmax')
# Compile model
model_cnn %>% compile(
loss = "categorical_crossentropy",
optimizer = optimizer_adadelta(),
metrics = c('accuracy')
)
history <- model_cnn %>% fit(
x_train_cnn, rbind(y_train,y_train,y_train),
epochs = 2, batch_size = 128,
validation_split = 0.2
)
Testando…
## $loss
## [1] 0.04116193
##
## $acc
## [1] 0.9888308