# Bibliotecas
library(nnet)
library(plotly)
## Loading required package: ggplot2
##
## Attaching package: 'plotly'
## The following object is masked from 'package:ggplot2':
##
## last_plot
## The following object is masked from 'package:stats':
##
## filter
## The following object is masked from 'package:graphics':
##
## layout
library(Metrics)
library(monmlp)
## Loading required package: optimx
# Função para iterar sob os dados e aplicar a função a ser aproximada
get_y_or_z <- function(x1_list, x2_list, func) {
index <- 1
z_list <- c()
for (x1 in x1_list){
x2 <- x2_list[[index]]
z <- func(x1,x2)
z_list <- c(z_list,z)
index <- index + 1
}
return(z_list)
}
# Separar dados em treino e teste
get_train_test <- function(df){
smp_size <- floor(0.75 * nrow(df))
## set the seed to make your partition reproducible
set.seed(123)
train_ind <- sample(seq_len(nrow(df)), size = smp_size)
train <- df[train_ind, ]
test <- df[-train_ind, ]
return(c(train,test))
}
Aproximar as funções a seguir usando MLP:
# Função para aproximação
fx <- function(x1, x2){
return(max(c(exp(-(x1^2)), exp(-2 * (x2^2)), (2 * exp(-0.5 * ((x1^2) + (x2^2)))) ) ))
}
# Dataset de treino
x1 <- (10*runif(10000))
x2 <- (10*runif(10000))
grid <- data.frame(x1,x2)
train_test <- get_train_test(grid)
train <- data.frame(train_test[1],train_test[2])
test <- data.frame(train_test[3],train_test[4])
x1 <- train$x1
x2 <- train$x2
df_train <- data.frame(x1,x2)
y <- get_y_or_z(x1,x2,fx)
# Dataset de teste
x11 <- test$x1
x22 <- test$x2
df_test <- data.frame(x11,x22)
y_test <- get_y_or_z(x11,x22,fx)
# Rede Neural
r <- monmlp.fit(as.matrix(df_train), as.matrix(y), hidden1=3, n.ensemble=15, monotone=0, bag=TRUE)
## ** Ensemble 1
## ** Bagging on
## 0.003477047
## ** 0.003477047
##
## ** Ensemble 2
## ** Bagging on
## 0.004286808
## ** 0.004286808
##
## ** Ensemble 3
## ** Bagging on
## 0.004017378
## ** 0.004017378
##
## ** Ensemble 4
## ** Bagging on
## 0.003880898
## ** 0.003880898
##
## ** Ensemble 5
## ** Bagging on
## 0.004250739
## ** 0.004250739
##
## ** Ensemble 6
## ** Bagging on
## 0.003837411
## ** 0.003837411
##
## ** Ensemble 7
## ** Bagging on
## 0.003955111
## ** 0.003955111
##
## ** Ensemble 8
## ** Bagging on
## 0.002913768
## ** 0.002913768
##
## ** Ensemble 9
## ** Bagging on
## 0.004178194
## ** 0.004178194
##
## ** Ensemble 10
## ** Bagging on
## 0.003623224
## ** 0.003623224
##
## ** Ensemble 11
## ** Bagging on
## 0.002778102
## ** 0.002778102
##
## ** Ensemble 12
## ** Bagging on
## 0.004100403
## ** 0.004100403
##
## ** Ensemble 13
## ** Bagging on
## 0.00368251
## ** 0.00368251
##
## ** Ensemble 14
## ** Bagging on
## 0.00291258
## ** 0.00291258
##
## ** Ensemble 15
## ** Bagging on
## 0.003612996
## ** 0.003612996
y_pred <- monmlp.predict(x = as.matrix(df_test), weights = r)
# dataset teste + pred
df <- data.frame(x11,x22, y_test, y_pred)
# Métricas
rmse_metric <- rmse(y_test, y_pred)
mae_metric <-mae(y_test, y_pred)
print(rmse_metric)
## [1] 0.01746495
print(mae_metric)
## [1] 0.006213957
# Função para aproximação
fx <- function(x, y) {
return(0.5 + 0.1 * ((x^2) * cos(y + 3)) + (0.4 * x * y* exp(1-(y^2))))
}
# Dataset de treino
x1 <- (10*runif(10000))
x2 <- (10*runif(10000))
grid <- data.frame(x1,x2)
train_test <- get_train_test(grid)
train <- data.frame(train_test[1],train_test[2])
test <- data.frame(train_test[3],train_test[4])
x1 <- train$x1
x2 <- train$x2
df_train <- data.frame(x1,x2)
y <- get_y_or_z(x1,x2,fx)
# Dataset de teste
x11 <- test$x1
x22 <- test$x2
df_test <- data.frame(x11,x22)
y_test <- get_y_or_z(x11,x22,fx)
# Rede Neural
r <- monmlp.fit(as.matrix(df_train), as.matrix(y), hidden1=5, n.ensemble=25, monotone=0, bag=TRUE)
## ** Ensemble 1
## ** Bagging on
## 0.04997785
## ** 0.04997785
##
## ** Ensemble 2
## ** Bagging on
## 0.01346042
## ** 0.01346042
##
## ** Ensemble 3
## ** Bagging on
## 0.01317408
## ** 0.01317408
##
## ** Ensemble 4
## ** Bagging on
## 0.0484075
## ** 0.0484075
##
## ** Ensemble 5
## ** Bagging on
## 0.01374496
## ** 0.01374496
##
## ** Ensemble 6
## ** Bagging on
## 0.03589462
## ** 0.03589462
##
## ** Ensemble 7
## ** Bagging on
## 0.01287737
## ** 0.01287737
##
## ** Ensemble 8
## ** Bagging on
## 0.03795712
## ** 0.03795712
##
## ** Ensemble 9
## ** Bagging on
## 0.03683936
## ** 0.03683936
##
## ** Ensemble 10
## ** Bagging on
## 0.03583151
## ** 0.03583151
##
## ** Ensemble 11
## ** Bagging on
## 0.03570127
## ** 0.03570127
##
## ** Ensemble 12
## ** Bagging on
## 0.04978094
## ** 0.04978094
##
## ** Ensemble 13
## ** Bagging on
## 0.01320595
## ** 0.01320595
##
## ** Ensemble 14
## ** Bagging on
## 0.01107943
## ** 0.01107943
##
## ** Ensemble 15
## ** Bagging on
## 0.01293787
## ** 0.01293787
##
## ** Ensemble 16
## ** Bagging on
## 0.02008592
## ** 0.02008592
##
## ** Ensemble 17
## ** Bagging on
## 0.0136541
## ** 0.0136541
##
## ** Ensemble 18
## ** Bagging on
## 0.04990499
## ** 0.04990499
##
## ** Ensemble 19
## ** Bagging on
## 0.02542437
## ** 0.02542437
##
## ** Ensemble 20
## ** Bagging on
## 0.03671539
## ** 0.03671539
##
## ** Ensemble 21
## ** Bagging on
## 0.020355
## ** 0.020355
##
## ** Ensemble 22
## ** Bagging on
## 0.01379815
## ** 0.01379815
##
## ** Ensemble 23
## ** Bagging on
## 0.01273198
## ** 0.01273198
##
## ** Ensemble 24
## ** Bagging on
## 0.01287902
## ** 0.01287902
##
## ** Ensemble 25
## ** Bagging on
## 0.04851978
## ** 0.04851978
y_pred <- monmlp.predict(x = as.matrix(df_test), weights = r)
# dataset teste + pred
df <- data.frame(x11,x22, y_test, y_pred)
# Métricas
rmse_metric <- rmse(y_test, y_pred)
mae_metric <-mae(y_test, y_pred)
print(rmse_metric)
## [1] 0.3748403
print(mae_metric)
## [1] 0.2559508
# Função para aproximação
fx <- function(x1, x2) {
return((sin(sqrt((x1^2 ) + (x2^2)))) / (sqrt((x1^2 ) + (x2^2))))
}
# Dataset de treino
x1 <- c(-5,-4,-3,-2,-1,0,1,2,3,4,5)
x2 <- x1
grid <- expand.grid(x1,x2)
grid<-subset(grid, Var1!=0 | Var2!=0)
train_test <- get_train_test(grid)
train <- data.frame(train_test[1],train_test[2])
test <- data.frame(train_test[3],train_test[4])
x1 <- train$Var1
x2 <- train$Var2
df_train <- data.frame(x1,x2)
y <- get_y_or_z(x1,x2,fx)
# Dataset de teste
x11 <- test$Var1
x22 <- test$Var2
df_test <- data.frame(x11,x22)
y_test <- get_y_or_z(x11,x22,fx)
# Rede Neural
r <- monmlp.fit(as.matrix(df_train), as.matrix(y), hidden1=3, n.ensemble=15, monotone=0, bag=TRUE)
## ** Ensemble 1
## ** Bagging on
## 0.1526337
## ** 0.1526337
##
## ** Ensemble 2
## ** Bagging on
## 0.1404104
## ** 0.1404104
##
## ** Ensemble 3
## ** Bagging on
## 0.1158616
## ** 0.1158616
##
## ** Ensemble 4
## ** Bagging on
## 0.1424572
## ** 0.1424572
##
## ** Ensemble 5
## ** Bagging on
## 0.138396
## ** 0.138396
##
## ** Ensemble 6
## ** Bagging on
## 0.419832
## ** 0.419832
##
## ** Ensemble 7
## ** Bagging on
## 0.1309755
## ** 0.1309755
##
## ** Ensemble 8
## ** Bagging on
## 0.2816474
## ** 0.2816474
##
## ** Ensemble 9
## ** Bagging on
## 0.2197813
## ** 0.2197813
##
## ** Ensemble 10
## ** Bagging on
## 0.1062563
## ** 0.1062563
##
## ** Ensemble 11
## ** Bagging on
## 0.09890283
## ** 0.09890283
##
## ** Ensemble 12
## ** Bagging on
## 0.140569
## ** 0.140569
##
## ** Ensemble 13
## ** Bagging on
## 0.270762
## ** 0.270762
##
## ** Ensemble 14
## ** Bagging on
## 0.1552032
## ** 0.1552032
##
## ** Ensemble 15
## ** Bagging on
## 0.1706113
## ** 0.1706113
y_pred <- monmlp.predict(x = as.matrix(df_test), weights = r)
# dataset teste + pred
df <- data.frame(x11,x22, y_test, y_pred)
# Métricas
rmse_metric <- rmse(y_test, y_pred)
mae_metric <-mae(y_test, y_pred)
print(rmse_metric)
## [1] 0.1611288
print(mae_metric)
## [1] 0.1310891
# Função para aproximação
fx <- function(x_list) {
z_list <- c()
for (x in x_list){
z <- sqrt(2) * sin(x) + sqrt(2) * cos(x) - sqrt(2) * sin(3*x) + sqrt(2) * cos(3*x)
z_list <- c(z_list,z)
}
return(z_list)
}
# Dataset de treino
x1 <- (10*runif(10000))
df <- data.frame(x1)
smp_size <- floor(0.75 * nrow(df))
## set the seed to make your partition reproducible
set.seed(123)
train_ind <- sample(seq_len(nrow(df)), size = smp_size)
train <- df[train_ind, ]
test <- df[-train_ind, ]
x1 <- train
y <- fx(x1)
df_train <- data.frame(x1)
# Dataset de teste
x11 <- test
df_test <- data.frame(x11)
y_test <- fx(x11)
# Rede Neural
r <- monmlp.fit(as.matrix(df_train), as.matrix(y), hidden1=5, n.ensemble=25, monotone=0, bag=TRUE)
## ** Ensemble 1
## ** Bagging on
## 0.2065846
## ** 0.2065846
##
## ** Ensemble 2
## ** Bagging on
## 0.1053744
## ** 0.1053744
##
## ** Ensemble 3
## ** Bagging on
## 0.1624286
## ** 0.1624286
##
## ** Ensemble 4
## ** Bagging on
## 0.2361368
## ** 0.2361368
##
## ** Ensemble 5
## ** Bagging on
## 0.2605914
## ** 0.2605914
##
## ** Ensemble 6
## ** Bagging on
## 0.1074285
## ** 0.1074285
##
## ** Ensemble 7
## ** Bagging on
## 0.1583939
## ** 0.1583939
##
## ** Ensemble 8
## ** Bagging on
## 0.2621213
## ** 0.2621213
##
## ** Ensemble 9
## ** Bagging on
## 0.2033255
## ** 0.2033255
##
## ** Ensemble 10
## ** Bagging on
## 0.2326534
## ** 0.2326534
##
## ** Ensemble 11
## ** Bagging on
## 0.3088893
## ** 0.3088893
##
## ** Ensemble 12
## ** Bagging on
## 0.05318509
## ** 0.05318509
##
## ** Ensemble 13
## ** Bagging on
## 0.2058333
## ** 0.2058333
##
## ** Ensemble 14
## ** Bagging on
## 0.2079866
## ** 0.2079866
##
## ** Ensemble 15
## ** Bagging on
## 0.2757625
## ** 0.2757625
##
## ** Ensemble 16
## ** Bagging on
## 0.1341503
## ** 0.1341503
##
## ** Ensemble 17
## ** Bagging on
## 0.106889
## ** 0.106889
##
## ** Ensemble 18
## ** Bagging on
## 0.1535288
## ** 0.1535288
##
## ** Ensemble 19
## ** Bagging on
## 0.2078046
## ** 0.2078046
##
## ** Ensemble 20
## ** Bagging on
## 0.1969404
## ** 0.1969404
##
## ** Ensemble 21
## ** Bagging on
## 0.05212821
## ** 0.05212821
##
## ** Ensemble 22
## ** Bagging on
## 0.2514673
## ** 0.2514673
##
## ** Ensemble 23
## ** Bagging on
## 0.1842491
## ** 0.1842491
##
## ** Ensemble 24
## ** Bagging on
## 0.05144907
## ** 0.05144907
##
## ** Ensemble 25
## ** Bagging on
## 0.2580254
## ** 0.2580254
y_pred <- monmlp.predict(x = as.matrix(df_test), weights = r)
# Dataset teste + pred
df <- data.frame(x11, y_test, y_pred)
rmse_metric <- rmse(y_test, y_pred)
mae_metric <-mae(y_test, y_pred)
print(rmse_metric)
## [1] 0.6885447
print(mae_metric)
## [1] 0.5815287