The Code

The code refers to the article Code for “Shiny AdaBoosting: An interactive dashboard to adaptive boosting algorithm” developed below it’s fully the code developed below is entirely authored by the authors. In order to participate of the IV International Seminar on Statistics with R.

ShinyApp Code

#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
#    http://shiny.rstudio.com/
#
library(mlbench)
library(tidyverse)
library(class)
library(rpart)
library(rpart.plot)
library(caret)
library(kernlab)
library(shiny)
library(ggplot2)
library(tidyverse)
library(shinydashboard)
library(gganimate)
library(png)
library(gifski)
library(png)
library(bsplus)
#setwd("D:/my_computer/Est_ML_2019/adaboost_implementation/shiny/adaboost_project/adaBoosting")

source("adaBoosting_functions_opt.R",local=TRUE)
#====Reading all the data=====
# Circles <- read_csv("D:/my_computer/Est_ML_2019/adaboost_implementation/shiny/adaboost_project/adaBoosting/Circles.csv")
Circles<-read_csv("Circles.csv")
Circles$cluster<-Circles$cluster %>% as.factor 
levels(Circles$cluster)<-c('-1','1')
Circles<-Circles %>% as.data.frame()
names(Circles)<-c("x.1","x.2","classes")

Circles_Noisy<- read_csv("circles_noise_15.csv")
Circles_Noisy$X1<-as.factor(Circles_Noisy$X1)
levels(Circles_Noisy$X1)<-c("-1","1")
colnames(Circles_Noisy)<-c("classes","x.1","x.2")
Circles_Noisy<-data.frame(x.1=Circles_Noisy$x.1,
                          x.2=Circles_Noisy$x.2,
                          classes=Circles_Noisy$classes)

Moons<- read_csv("moons.csv")
Moons$X1<-as.factor(Moons$X1)
levels(Moons$X1)<-c("-1","1")
colnames(Moons)<-c("classes","x.1","x.2")
Moons<-data.frame(x.1=Moons$x.1,
                  x.2=Moons$x.2,
                  classes=Moons$classes)

Moons_noisy<- read_csv("moons_noisy.csv")
Moons_noisy$X1<-as.factor(Moons_noisy$X1)
levels(Moons_noisy$X1)<-c("-1","1")
colnames(Moons_noisy)<-c("classes","x.1","x.2")
Moons_noisy<-data.frame(x.1=Moons_noisy$x.1,
                        x.2=Moons_noisy$x.2,
                        classes=Moons_noisy$classes)


Spirals_Noisy<-mlbench.spirals(1000,3,sd = 0.05) %>% as.data.frame()
levels(Spirals_Noisy$classes)<-c('-1','1')
Spirals_Noisy<-Spirals_Noisy %>% as.data.frame()

Spirals<-mlbench.spirals(1000,3,sd = 0) %>% as.data.frame()
levels(Spirals$classes)<-c('-1','1')
Spirals<-Spirals %>% as.data.frame()



all_database<-list(Circles=Circles,Circles_Noisy=Circles_Noisy,Moons=Moons,Moons_Noisy=Moons_noisy,Spirals=Spirals,Spirals_Noisy=Spirals_Noisy)

ajust_height<-function(x){
  return(30*x)
}


header<-dashboardHeader(title="Learning AdaBoosting")
   
   
   # Sidebar with a slider input for number of bins 
sidebar<-dashboardSidebar(
  
      dashboardSidebar(
        sidebarMenu(id='tabs',
          menuItem("Overview",
                   tabName = 'overview',selected = TRUE),
          menuItem("Model Parameters",
                   tabName = "parameters",
                   icon = icon("cogs"),
                                selectInput(inputId="database",
                                            label="Database",
                                            choices=c("Circles","Circles_Noisy","Moons","Moons_Noisy","Spirals","Spirals_Noisy"),
                                            selected = "Circles"),
                                
                                sliderInput(inputId="training_prop",
                                            label="Training Ratio",
                                            min=0.5,max=1,
                                            value=0.7,step=0.05),
                                
                                selectInput(inputId='n_models',
                                            label="Number of Models",
                                            choices=c(10,25,50,100,150,250,500,750,100),
                                            selected=5),
                                
                                radioButtons(inputId="tree_size",
                                              label="Choose the tree model:",
                                              choices=c("Stumps","Complete Tree") ),
                   
                                checkboxInput(inputId="animate",
                                           label="Animated Model"),             
                                br(),
                   
                                actionButton(inputId = "run_model",
                                             label="Run AdaBoost",icon=icon("play")),
                                
                                br()
                   ,selected = "parameters"
          
 
          ),
        menuItem("Data Visualization", tabName = "data_view"),
        menuItem("All Models", tabName = "all_models"),
        menuItem("Final Model",tabName="final_model"),
        menuItem("Animation",tabName ='animation')#,
        #menuItem("About me",tabName = 'about_me',icon=icon('user-alt'))
      )
    )
  )
      # Show a plot of the generated distribution
body<-dashboardBody(
       tags$head(tags$style(HTML('
                                  /* body */
                                .content-wrapper, .right-side {
                                 background-color: #ffffff;
                                 font-family: "Roboto Condensed", sans-serif;

                                }
                                '))),
        tabItems(
            tabItem("paramenters"),
            #Fjalla One
            #Montserrat
            
            tabItem("overview",
                    withMathJax(),
                    tags$head(tags$style(HTML('
                                              /* body */
                                              .content-wrapper, .right-side {
                                              background-color: #ffffff;
                                              }
 
  
                                              '))),
                    includeCSS('www/mycss.css'),
                    
                    h1("AdaBoosting: The wisdom a weighted crowd of experts ",align='center'),
                    br(),
                    p("Boosting methods have been first proposed by Schapire and Freund's AdaBoost Algorithm as a version of an ensemble method
                      based on the idea that would be easier and better to use the combination of weaker classifiers (error closes 0.5), than just use a single strong classifier. The perfomance of those weaker classifier is ",em('boosted'),"by combing them using a majority vote for the classification, weighted on their respective accuracy."),
                    p('Along these years, some others derivantions from AdaBoosting were presented as the Gradient Boosting, and, the two currently state of the art boosting methods: eXtreme Gradient Boosting (XGBoost), and Light Gradient Boosting. 
                      Despite the infatuation around neural networks and deep learning,', strong('boosting algorithms gained the reputation of "Competition Winner" due to his achievements in several competitions. ')),
                    p(" Essentially, as said before, boosting consists of repeatedly using a base weak learning algorithm, on differently weighted versions of the training data,
                      yielding a sequence of weak classifiers that are combined in a addiction function. The weighting of each model depends on the accuracy of the previous, in order to increase the importance of classify correctly
                      wrong predicted observations from the last model. The ensemble prediction function of AdaBoost \\(H: X \\rightarrow \\{-1,1\\} \\)  is given by"),
                    uiOutput('equation'),
                    p("where \\( \\alpha_{1},...,\\alpha_{M} \\) is a set of weights from the respective \\( h_{1},...,h_{M} \\) set of models"),
                    p("The of this application it's to show graphically and iteratively how the base AdaBoost works, and how each model is built in order to get the
                      final classification. The right side panel of ",strong("Model Parameters")," the user can choose:"),
                    tags$li("The database which the AdaBoost will be applied, emphasizing that the observations of each class from all datasets are balanced."),
                    tags$li("The proportion of training set and the test of classication model."),
                    tags$li("The number of classifiers that AdaBoost will use (represented by \\(M\\) in AdaBoost function).",strong("Take care when choose this parameter, if M it's greater then 100 the results can take a great time to show up.")),
                    tags$li("The type of model \\(h_{i}\\) that will be used."),
                    br(),
                    p("After that, it's just need to run the model and navigated through the tabs to see how the model works. To learn more about AdaBoosting, and 
                      other ensemble models, you can check the reference.",a("[1]",href="https://link.springer.com/chapter/10.1007/978-1-4419-9326-7_2")
                      ),
                    
                    h3("Enjoy the dashboard, and keep learning!",align='center',icon=icon("book-reader")),
                    fluidRow(align='center',icon("book-reader"))
                    
                    ),
            tabItem("data_view",
              h1('The Data'),
              h4("Here it's presented the selected database, which is divided into the Training Data, which will be submited to them
                 AdaBoosting model, and the Test Data, which will validate the model."),
              br(),
                    
              fluidRow(
                column(width=4,align="center",plotOutput("originalData")),
                column(width=4,plotOutput("trainingData")),
                column(width=4,plotOutput("testData"))
                )
            ),
            
            tabItem("all_models",
            h1('All models'),
            withMathJax(),
            h4("Here it's presented each model that was calculated to compose the final AdaBoosting function. The relative size
               of each observation represent the weight that was given to it, and each colour grid represent the decision boundary of
               that model. The voting power \\( \\alpha \\) and the model number \\( n \\) are respectively represented in the plot's title.  "),
            br(),      
              uiOutput("each_models_ui")
            
            ),
           
            tabItem("final_model",
                    h1('The AdaBoosting Model'),
                    withMathJax(),
                    h4("Here it's presented the classification model \\( H\\mathbf{(x)} \\), and the prediction to the Test Dataset.
                       The Coloured background represents the decision boundary. Also, at the right side it's the presented the error rate, for each
                      the training and test set, varying with the number of models."),
                    br(),                         
                    fluidRow(
                    column(width=6,align="center",
                    plotOutput("test_predict")),
                    column(width=6,align='center',
                    plotOutput('error_plot')),
                    br()
                    ),br(),
                    fluidRow(column(width=4),
                      column(width=6,
                             valueBoxOutput('acc'),
                             valueBoxOutput('time'))
                      
                    )
                    
          ),
          
          
        tabItem("animation",
                h1('Why not animate?'),
                withMathJax(),
                h4(strong("Who doesn't love animations?"),"Just a animated perspective of the construction of the AdaBoosting model classfier."),
                h5("Obs:Wait until the animation appears... If you choose a great number of models, it may take a while to load."),
                br(),       
                fluidRow(
                column(width = 6,align="center",
                imageOutput('animated')),
                column(width=6,align="center",
                imageOutput('error_anim'))
                
        ))
        
  )
)
ui <- dashboardPage(header=header,
                    sidebar=sidebar,
                    body=body)     


# Define server logic required to draw a histogram
server <- function(input, output,session) {
  
 
  #---------Reading the data-------------------
  data <- reactive({  all_database[[input$database]]
    })
  stump_check<-reactive({
    if(input$tree_size=="Stumps"){
      TRUE
    }else{
      FALSE
    }
  })
  training_index<-reactive({
    sample(1:nrow(data()),round(input$training_prop*nrow( data() )))
  })
  
  training_data<-reactive({ data()[training_index(),]
  })
  
  test_data<-reactive({ data()[-training_index(),]
  })
  
  observeEvent(input$run_model,{
    updateTabItems(session,'tabs',"final_model")
  })
  
  number_models<-eventReactive(input$run_model,{as.numeric(input$n_models)})
  
  model_x<-eventReactive(input$run_model,{
    adaBoost_tree(formula = classes~.,data=training_data(),n_models=number_models(),stump = stump_check())
  })
  
  acc_model_x<-reactive({
    y_pred<-predict_test_adaBost_tree(test_data = isolate(test_data()),models_selected = model_x()$models,alpha = model_x()$alpha)
    accuracy<-sum(diag(table(y_pred,isolate(test_data())$classes)))/sum(table(y_pred,isolate(test_data())$classes))
    round(accuracy,3)
  })
  
  time_model<-eventReactive(input$run_model,{
    start<-Sys.time()
    adaBoost_tree(formula = classes~.,data=training_data(),n_models=number_models(),stump = stump_check())
    end<-Sys.time()
    round(end-start,3)
  })
  
  all_model_x<-eventReactive(input$run_model,{
    map(1:number_models(),~adaBoost_tree(formula=classes~.,data=training_data(),
                                        n_models=.x,stump = stump_check()))
  })
  
   error_animation<-eventReactive(input$run_model,{
     y_pred_test<-map(all_model_x(),~predict_test_adaBost_tree(test_data = test_data(),models_selected = .x$models,
                                                                                             alpha = .x$alpha))
     error_test<-1-map_dbl(y_pred_test,~sum(diag(table(.x,test_data()$classes)))/sum(table(.x,test_data()$classes)))
     
     y_pred_train<-map(all_model_x(),~predict_test_adaBost_tree(test_data = training_data(),models_selected = .x$models,
                                                                 alpha = .x$alpha))
     error_train<-1-map_dbl(y_pred_train,~sum(diag(table(.x,training_data()$classes)))/sum(table(.x,training_data()$classes)))
     
     n_model<-1:length(error_test)
     
     error_train<-data.frame(error=error_train,
                             type_data=as.factor("train"),
                             n_model=n_model)
     
     error_test<-data.frame(error=error_test,
                            type_data=as.factor("test"),
                            n_model=n_model)
     
     rbind(error_train,error_test)
     
   })
  
  
   # gif<-eventReactive(input$animate,{
   #  animate_adaBoost(test_data(),all_model_x())
   # })
   # 
  
  output$originalData <- renderPlot({
      # generate bins based on input$bins from ui.R
      ggplot(data())+
       geom_point(mapping=aes(x=x.1,y=x.2,fill=classes),col='black',pch=21,size=1.4,show.legend = FALSE)+
       ggtitle(paste("Complete Dataset:",input$database))+
       scale_fill_manual(values = c("1"='#0050FF',"-1"= '#FF9000'))+
       scale_color_manual(values=c("1"='#8CB0FF',"-1"='#FFC06D'))+
       xlab("X1")+
       ylab("X2")+      
       scale_size_area()+
       theme_bw()
   })
   
   output$trainingData <- renderPlot({
     # generate bins based on input$bins from ui.R
     ggplot(training_data())+
       geom_point(mapping=aes(x=x.1,y=x.2,fill=classes),col='black',pch=21,size=1.5,show.legend = FALSE)+
       ggtitle(paste("Training Set:",input$database,"(Training Ratio =",input$training_prop,")"))+
       scale_fill_manual(values = c("1"='#0050FF',"-1"= '#FF9000'))+
       scale_color_manual(values=c("1"='#8CB0FF',"-1"='#FFC06D'))+
       xlab("X1")+
       ylab("X2")+
       scale_size_area()+
       theme_bw()
   })
   
   output$testData <- renderPlot({
     if(input$training_prop!=1){
     # generate bins based on input$bins from ui.R
     ggplot(test_data())+
       geom_point(mapping=aes(x=x.1,y=x.2,fill=classes),col='black',pch=21,size=1.5,show.legend = FALSE)+
         ggtitle(paste("Test Set:",input$database,"(Test Ratio =",1-input$training_prop,")"))+
         scale_fill_manual(values = c("1"='#0050FF',"-1"= '#FF9000'))+
         scale_color_manual(values=c("1"='#8CB0FF',"-1"='#FFC06D'))+
         xlab("X1")+
         ylab("X2")+
         scale_size_area()+
       theme_bw()
     }
   })
   
   output$each_models_ui<-renderUI({
     plotOutput("each_models",height = number_models()*40)
   })
   
   output$each_models<-renderPlot({
       plott<-each_model_plot(model = model_x(), numero_modelo= number_models())
       plot(plott) })
   
   output$test_predict<-renderPlot(({
     plot_adaBoost(data=isolate(test_data()),model=model_x())
     
   }))
   
   output$animated<-renderImage({
     # A temp file to save the output.
     # This file will be removed later by renderImage
     outfile <- tempfile(fileext='.gif')
     
     # gif<-animate_adaBoost(isolate(test_data()),isolate(all_model_x()))
     if(input$animate==T){
     anim_save(filename = "ada_gif.gif",animate(animate_adaBoost(test_data(),all_model_x()),duration=10))
     }
     list(src = "ada_gif.gif",
          contentType = '"D:/my_computer/Est_ML_2019/adaboost_implementation/shiny/adaboost_project/adaBoosting'
          # width = 400,
          # height = 300,
          # alt = "This is alternate text"
     )}, deleteFile = TRUE)
   
   
   output$error_anim<-renderImage({
     # A temp file to save the output.
     # This file will be removed later by renderImage
     outfile <- tempfile(fileext='.gif')
     
     # gif<-animate_adaBoost(isolate(test_data()),isolate(all_model_x()))
     if(input$animate==T){
       anim_save(filename = "ada_err_gif.gif",animate(plot_error_animate(error_animation())))
     }
     list(src = "ada_err_gif.gif",
          contentType = '"D:/my_computer/Est_ML_2019/adaboost_implementation/shiny/adaboost_project/adaBoosting'
          # width = 400,
          # height = 300,
          # alt = "This is alternate text"
     )}, deleteFile = TRUE)
    
    output$error_plot<-renderPlot({
      ggplot(error_animation())+
        geom_line(mapping = aes(x=n_model,y=error,col=type_data),lwd=1)+
        scale_y_continuous(limits = c(0.0,max(0.5,error_animation()$error)),expand=c(0,0))+
        xlab("Number of Models")+
        ylab("% Error")+
        ggtitle("AdaBoosting Error")+
        scale_color_manual(values=c("train"='#33658A',"test"='#CC6C35'),labels = c("train"="Training Error","test"="Test Error"))+
        guides(col=guide_legend(title = NULL,keywidth = 2,keyheight = 1.5))+
        theme_bw()+
        theme(legend.justification=c(0.8,0.8), legend.position=c(0.9,0.9),
              legend.text = element_text(size=13),
              legend.background = element_rect(colour = 'black',
                                               linetype = 'solid'))
    })
    
    output$equation <- renderUI({
      withMathJax(
        '
               $$H(\\mathbf{x})=sign \\left(\\sum_{m=1}^{M} \\alpha_{m}h_{m}(\\mathbf{x}) \\right)$$')
    })
    
    output$acc<-renderValueBox({
      valueBox(value=acc_model_x(),
               subtitle ="Accuracy in the Test Data",
               icon=icon("bullseye"),
               color ='blue')
    })
    
    output$time<-renderValueBox({
      valueBox(value=time_model(),
               subtitle ="Elapsed Model Time (s)",
               icon=icon("hourglass"),
               color ='light-blue')
    })
   
}
  

# Run the application 
shinyApp(ui = ui, server = server)

Functions created to use in ShinyApp

adaBoost_tree<-function(formula,data=data,n_models,stump=TRUE){ 
      # colnames(data)<-c("x1","x2","cluster")
      #colnames(test_data[,ncol(test_data)])<-c("cluster")
      
      dp_name<-colnames(data)[ncol(data)] #Objeto que armazena o nome da variável resposta
      alpha<-rep(NA,n_models) #Vetor que corresponde ao coeficiente do "poder de voto"
      weights_new<-numeric(nrow(data)) #Definindo vetor de pesos
      final_model<-numeric(n_models)   
      selected_models<-list(n_models)
      error_resample<-numeric(100)
      stump_models_boots_pred<-rep(list(NULL),100)
      stump_models<-rep(list(NULL),100)
      plot<-rep(list(NA),n_models)
      plot_weight<-rep(list(0),30)
      pred_ada<-rep(list(NULL),n_models)
      new_data<-rep(list(NULL),n_models)      
      plot_grid<-rep(list(NULL),n_models)
      
      n_iter2=0
      error=numeric(n_models)
      weights_old<-rep(1/nrow(data),nrow(data))  #Calculating the weights
      weights_new<-weights_old
      predicted_training<-list(n_models)
      wrong_indexes<-list(n_models)
      y_true<-data[,dp_name] %>% as.vector() %>% as.factor  #Armazenado os valores reais
      
      
      while(n_iter2<n_models){
            
            n_iter2<-n_iter2+1
            #new_data<-data.frame(NULL)
            
            boots_data_index<-sample(1:nrow(data),nrow(data),replace = TRUE,prob=weights_old)
            boots_data<-data[boots_data_index,]
            new_data[[n_iter2]]<-boots_data
            
            y_true_boots<-boots_data[,dp_name] %>% as.vector() %>% as.factor   #Armazenado os valores reais
            
            if(length(unique(weights_old))==1){#First time without weights
                  new_data[[n_iter2]]<-data
            }
            #print(dim(new_data))
            
            if(stump){
                  stump_models[[n_iter2]]<-rpart(formula=formula,
                                                 method='class',
                                                 data=new_data[[n_iter2]],
                                                 parm=list(split='information'),
                                                 control=rpart.control(maxdepth=1,minsplit=2))
            }else{
                  stump_models[[n_iter2]]<-rpart(formula=formula,
                                                 method='class',
                                                 data=new_data[[n_iter2]],
                                                 parm=list(split='information'))
            }
            
            predict_new_data<-predict(stump_models[[n_iter2]],newdata=new_data[[n_iter2]],type='class')
            new_data[[n_iter2]]$cluster<-predict_new_data
            
            
            #index_model_selected<-which.min(error_resample)
            selected_models[[n_iter2]]<-stump_models[[n_iter2]]
            
            x1_range<-c(min(new_data[[n_iter2]][,1]),max(new_data[[n_iter2]][,1]))
            x2_range<-c(min(new_data[[n_iter2]][,2]),max(new_data[[n_iter2]][,2]))
            x1<-seq(x1_range[1],x1_range[2],length.out =100)
            x2<-seq(x2_range[1],x2_range[2],length.out = 100)
            all_grid<-expand.grid(x1,x2)
            names(all_grid)<-names(new_data[[n_iter2]])[1:2]
            pred_grid<-predict(stump_models[[n_iter2]],newdata=all_grid,type='class')
            plot_grid[[n_iter2]]<-cbind(all_grid,pred_grid)
            
            pred<-predict(selected_models[[n_iter2]],newdata=data,type='class')
            
            wrong_index<-which(y_true!=pred)
            
            error<-sum(weights_old[wrong_index])
            
            #If the accuracy it's inverted, just change the alpha signal
            
            
            alpha[n_iter2]<-log((1-error)/error)
            
            #=======Saving splitt decisions in each stump=============
            
            
            # pred_ada[[n_iter2]]<-predict_test_adaBost_tree(test_data=data,
            #                                                alpha=alpha[!is.na(alpha)],
            #                                                models_selected =compact(stump_models))
            
            
            #-------Updating the weights-------------
            
            weights_new[wrong_index]<-weights_old[wrong_index]*exp(alpha[n_iter2])
            weights_new<-weights_new/sum(weights_new)
            cat("Calculating model number:", n_iter2,"\n")
            weights_old<-weights_new#Updating the weights
      }
      pred_ada<-compact(pred_ada)
      plot<-compact(plot)
      return(list(weights=weights_new,alpha=alpha,models=selected_models,n_iter=n_iter2,erro=error,
                  graficos=plot,predicted_ada=pred_ada,boot_sample=new_data,decision_boundary=plot_grid))
}



#=======Function to predit using models generated by==========

predict_test_adaBost_tree<-function(test_data,models_selected,alpha){
      # colnames(test_data)<-c("x1","x2","cluster")
      # 
      final_model<-map(models_selected,~predict(.x,newdata=test_data,type='class'))%>%
            map(~ifelse(.x==1,1,-1)) %>% 
            map2(alpha,~.x*.y) %>%
            unlist %>% 
            matrix(ncol=nrow(test_data),byrow = TRUE)%>%
            apply(2,sum) %>% 
            sign %>% 
            as.factor()
      return(final_model)
      
}

#==============Each Model plots function==========================

each_model_plot<-function(model,numero_modelo){
      each_iteration<-map(model$decision_boundary,~ggplot()+
                                geom_point(mapping = aes(x=x.1,y=x.2,fill=pred_grid,col=pred_grid),
                                           show.legend = FALSE,data=.x)+
                                scale_x_continuous(limits = c(min(.x[,1]),max(.x[,1])),expand=c(0,0))+
                                scale_y_continuous(limits = c(min(.x[,2]),max(.x[,2])),expand=c(0,0))+
                                scale_fill_manual(values = c("1"='#0050FF',"-1"= '#FF9000'))+
                                scale_color_manual(values=c("1"='#577EC1',"-1"='#EF9F4F'))+
                                scale_size_continuous(range=c(0,3))+
                                scale_alpha(range=c(1,2))+
                                theme_bw()+
                                theme(plot.title = element_text(size=13,face='bold'),
                                      axis.title.x=element_blank(),
                                      axis.title.y=element_blank(),
                                      axis.text.x=element_blank(),
                                      axis.text.y=element_blank(),
                                      axis.ticks.x=element_blank(),
                                      axis.ticks.y=element_blank()))
      
      titles<-map2(seq(1,numero_modelo,by=1),model$alpha,~substitute(paste("Model n=",n," with ",alpha," = ",alfa),list(n=.x,alfa=round(.y,3))))

      final_models<-map2(each_iteration,model$boot_sample,~.x+geom_count(mapping = aes(x=x.1,y=x.2,fill=cluster),col='black',pch=21,
                                                                         show.legend = FALSE,data=.y)) %>%
            map2(titles,~.x+ggtitle(.y))

      all_plot<-cowplot::plot_grid(plotlist = final_models,ncol=5)
      return(all_plot)
      
}

plot_adaBoost<-function(data,model){
  pred_test<-predict_test_adaBost_tree(test_data = data,models_selected = model$models,
                                       alpha = model$alpha)
  
  pred_data<-cbind(data,pred_test)
  
  grid_boundary<-model$decision_boundary[[1]]
  
  decision_grid<-predict_test_adaBost_tree(test_data = grid_boundary ,models_selected = model$models,
                                           alpha = model$alpha)
  
  decision_boundary<-cbind(grid_boundary,decision_grid)
  
  final_plot<-ggplot()+
    geom_point(mapping=aes(x=x.1,y=x.2,fill=decision_grid,col=decision_grid),size=4.2,data=decision_boundary,show.legend = FALSE)+
    geom_point(mapping=aes(x=x.1,y=x.2,fill=pred_test),col='black',pch=21,size=3.5,show.legend = FALSE,
               data=pred_data)+
    scale_x_continuous(limits = range(decision_boundary[,1]),expand=c(0,0))+
    scale_y_continuous(limits = range(decision_boundary[,2]),expand=c(0,0))+
    xlab("X1")+
    ylab("X2")+
    scale_fill_manual(values = c("1"='#0050FF',"-1"= '#FF9000'))+
    scale_color_manual(values=c("1"='#577EC1',"-1"='#EF9F4F'))+
    ggtitle(paste0('AdaBoosting Weighting: Final model (n=',length(model$alpha),')'))+
    theme_bw() 
  
  return(final_plot)
}


animate_adaBoost_data<-function(data,model){
  #Precting test data Y
  pred_test<-predict_test_adaBost_tree(test_data = data,models_selected = model$models,
                                       alpha = model$alpha)
  #Joining X and Y
  pred_data<-cbind(data,pred_test)
  
  #Generating Decision Boundaries
  grid_boundary<-model$decision_boundary[[1]]
  decision_grid<-predict_test_adaBost_tree(test_data = grid_boundary ,models_selected = model$models,
                                           alpha = model$alpha)
  decision_boundary<-cbind(grid_boundary,decision_grid)
  
  #Associating the model number
  pred_data<-pred_data %>% 
            mutate(model_number=length(model$models))
  
  decision_boundary<-decision_boundary %>% 
                mutate(model_number=length(model$models))
  
  return(list(predicted_data=pred_data,decision_boundary=decision_boundary))
}


animate_adaBoost<-function(test_data,all_models){
  #O imput aqui são cada resultado dos adaboosts para diferentes números de models
  n<-length(all_models)
  all_data<-map(all_models,~animate_adaBoost_data(data = test_data,model =.x ))
  
  pred_datas<-map_df(all_data,~.x$predicted_data)
  
  decision_boundarys<-map_df(all_data,~.x$decision_boundary)
  
  final_plot<-c()
  
  final_plot<-ggplot()+
    
    scale_x_continuous(limits = range(decision_boundarys[,1]),expand=c(0,0))+
    scale_y_continuous(limits = range(decision_boundarys[,2]),expand=c(0,0))+
    xlab("X1")+
    ylab("X2")+
    scale_fill_manual(values = c("1"='#0050FF',"-1"= '#FF9000'))+
    scale_color_manual(values=c("1"='#577EC1',"-1"='#EF9F4F'))+
    theme_bw()
  
  main_animation<-final_plot+
    geom_point(mapping=aes(x=x.1,y=x.2,fill=decision_grid,col=decision_grid),size=4.2,data=decision_boundarys,show.legend = FALSE)+
    geom_point(mapping=aes(x=x.1,y=x.2,fill=pred_test),col='black',pch=21,size=2,show.legend = FALSE,
               data=pred_datas)+
    ggtitle(paste0('AdaBoosting Weighting: Final model (n={frame})'))+
    transition_manual(frame=model_number)
   
  # animation_gif<-animate(main_animation,fps=round(0.25*n),end_pause = 5)
  #   #ddaos foram gerados só precisa fazer a animação
  # return(animation_gif)
  return(main_animation)
  
}


plot_error_animate<-function(error_data){
    error_plot<-ggplot(error_data)+
    geom_line(mapping = aes(x=n_model,y=error,col=type_data),lwd=1)+
    scale_y_continuous(limits = c(0.0,max(0.5,error_data$error)),expand=c(0,0))+
    xlab("Number of Models")+
    ylab("% Error")+
    ggtitle("AdaBoosting Error")+
    scale_color_manual(values=c("train"='#33658A',"test"='#CC6C35'),labels = c("train"="Training Error","test"="Test Error"))+
    guides(col=guide_legend(title = NULL,keywidth = 2,keyheight = 1.5))+
    theme_bw()+
    theme(legend.justification=c(0.8,0.8), legend.position=c(0.9,0.9),
          legend.text = element_text(size=13),
          legend.background = element_rect(colour = 'black',
                                           linetype = 'solid'))+
    transition_reveal(n_model)
    
    return(error_plot)
}