En este video explico todo el script

library(vembedr)
embed_url("https://www.youtube.com/watch?v=k8jDhBi33XU")

Metodo 1: Stepwise

¿Qué es la regresión por pasos? La regresión paso a paso es la construcción iterativa paso a paso de un modelo de regresión que implica la selección de variables independientes que se utilizarán en un modelo final. Implica agregar o eliminar variables explicativas potenciales en sucesión y probar la significación estadística (por ejemplo, pruebas F, pruebas t) después de cada iteración.

Tipos de regresión por pasos

La selección hacia adelante (forward) comienza sin variables en el modelo, prueba cada variable a medida que se agrega al modelo y luego conserva las que se consideran estadísticamente más significativas, repitiendo el proceso hasta que los resultados sean óptimos.

La eliminación hacia atrás (backward) comienza con un conjunto de variables independientes, eliminando una a la vez y luego probando para ver si la variable eliminada es estadísticamente significativa.

La eliminación bidireccional (both) es una combinación de los dos primeros métodos que prueban qué variables deben incluirse o excluirse.

Librerias

library(car)            # extracts model results
library(MASS)           # support Functions 
library(ISLR)           # provides "Wage" dataset
library(tictoc)         # checks running time
library(sjPlot)         # visualizes model results
library(glmulti)        # finds the BEST model
library(tidyverse)      # provides a lot of useful stuff !!! 
library(performance)    # checks and compares quality of models
library(tidyr)          # Tools to help to create tidy data
library(boot)           # Functions and datasets for bootstrapping
library(vembedr)        # to embed hosted video in your R Markdown
library(DT)             # Data objects in R can be rendered as HTML
library(effects)        # Graphical and tabular effect displays,
library(report)         # converts statistical models and data frames
library(equatiomatic)   # takes a model and returns to 'LaTeX'
library(visreg)         # to visualize the fit of regression models
library(ggiraphExtra)   # to enhance 'ggplot2' and 'ggiraph'
library(leaps)          # Regression subset selection
library(relaimpo)       # assessing relative importance in linear models
library(caret)          # functions to streamline the model training process
library(ggplot2)        # creating graphics

Datos

Datos<-trees                    # el objeto
Datos<-Datos %>% drop_na()      # Eliminar celdas con NA
attach(Datos)                   # crucial
head(Datos)
##   Girth Height Volume
## 1   8.3     70   10.3
## 2   8.6     65   10.3
## 3   8.8     63   10.2
## 4  10.5     72   16.4
## 5  10.7     81   18.8
## 6  10.8     83   19.7
datatable(Datos, extensions = "Buttons", 
          options = list(dom = "Bfrtip",
          buttons = c("copy", "csv", "excel", "pdf")))
Datos$D<-(Girth*2.54)       # Convertir de "inches" a "cm" y llamar "Girth" como "Diametro"
Datos$H<-(Height*0.3048)    # Convertir de "ft" a "m" y llamar "Height" como "Altura"
Datos$V<-(Volume*0.0283168) # Convertir de "cubic ft" a "m3" y llamar "Volume" como "Volumen"
attach(Datos)

Variables

Datos$DH<-D*H; attach(Datos)
Datos$D2<-D*D; attach(Datos)
Datos$H2<-H*H; attach(Datos)
Datos$D2H<-D2*H; attach(Datos)
Datos$D2H2<-D2*H2; attach(Datos)
Datos$lnDH<-log(DH); attach(Datos)
Datos$lnD2H<-log(D2H); attach(Datos)
Datos$lnD2H2<-log(D2H2); attach(Datos)
Datos$lnD<-log(D); attach(Datos)
Datos$lnH<-log(H); attach(Datos)
Datos$lnV<-log(V); attach(Datos)
View(Datos)
attach(Datos)
names(Datos)
##  [1] "Girth"  "Height" "Volume" "D"      "H"      "V"      "DH"     "D2"    
##  [9] "H2"     "D2H"    "D2H2"   "lnDH"   "lnD2H"  "lnD2H2" "lnD"    "lnH"   
## [17] "lnV"

El set

Set1<-data.frame(V, H, D, DH, D2, H2, D2H, D2H2, lnDH, lnD2H, lnD2H2, lnD, lnH)
head(Set1)  # ver los datos
##           V       H      D       DH       D2       H2       D2H     D2H2
## 1 0.2916630 21.3360 21.082 449.8056 444.4507 455.2249  9482.801 202325.0
## 2 0.2916630 19.8120 21.844 432.7733 477.1603 392.5153  9453.501 187292.8
## 3 0.2888314 19.2024 22.352 429.2120 499.6119 368.7322  9593.748 184223.0
## 4 0.4643955 21.9456 26.670 585.2892 711.2889 481.6094 15609.662 342563.4
## 5 0.5323558 24.6888 27.178 670.9922 738.6437 609.5368 18236.226 450230.5
## 6 0.5578410 25.2984 27.432 693.9857 752.5146 640.0090 19037.416 481616.2
##       lnDH    lnD2H   lnD2H2      lnD      lnH
## 1 6.108815 9.157235 12.21763 3.048420 3.060396
## 2 6.070214 9.154140 12.14043 3.083926 2.986288
## 3 6.061951 9.168867 12.12390 3.106916 2.955035
## 4 6.372106 9.655645 12.74421 3.283539 3.088567
## 5 6.508758 9.811165 13.01752 3.302408 3.206350
## 6 6.542451 9.854162 13.08490 3.311710 3.230741
datatable(Set1, extensions = "Buttons", 
          options = list(dom = "Bfrtip",
          buttons = c("copy", "csv", "excel", "pdf")))

Generar el modelo

# Hacer una regresion vacia
regvacia<-lm(formula = V~1, Set1) # Ozone, es la variable dependiente
summary(regvacia)
## 
## Call:
## lm(formula = V ~ 1, data = Set1)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -0.5655 -0.3050 -0.1691  0.2019  1.3260 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)   0.8544     0.0836   10.22 2.75e-11 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.4655 on 30 degrees of freedom
# Hacer una regresion con todas las variables
regcompleta<-lm(formula = V~(.), Set1);  # la regresion e interaccion de las variables
summary(regcompleta)                     # veamos la regresion
## 
## Call:
## lm(formula = V ~ (.), data = Set1)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.107646 -0.030060 -0.000258  0.039214  0.128357 
## 
## Coefficients: (3 not defined because of singularities)
##               Estimate Std. Error t value Pr(>|t|)
## (Intercept)  1.321e+02  1.382e+02   0.956    0.350
## H            7.024e+00  8.123e+00   0.865    0.397
## D            7.217e-01  1.331e+00   0.542    0.593
## DH          -1.381e-02  3.566e-02  -0.387    0.703
## D2          -8.975e-03  2.626e-02  -0.342    0.736
## H2          -7.485e-02  9.844e-02  -0.760    0.455
## D2H          3.158e-04  1.470e-03   0.215    0.832
## D2H2        -1.341e-06  2.117e-05  -0.063    0.950
## lnDH        -1.476e+02  1.755e+02  -0.841    0.410
## lnD2H        7.061e+01  8.833e+01   0.799    0.433
## lnD2H2              NA         NA      NA       NA
## lnD                 NA         NA      NA       NA
## lnH                 NA         NA      NA       NA
## 
## Residual standard error: 0.07936 on 21 degrees of freedom
## Multiple R-squared:  0.9797, Adjusted R-squared:  0.9709 
## F-statistic: 112.3 on 9 and 21 DF,  p-value: 1.024e-15

Modelo final

# Generar el modelo
tic()
modelo<-step(regvacia, scope = list(lower=regvacia, upper=regcompleta),
              direction = "both")           # backward, both, forward
## Start:  AIC=-46.43
## V ~ 1
## 
##          Df Sum of Sq    RSS      AIC
## + D2H     1    6.3553 0.1445 -162.418
## + D2H2    1    6.2982 0.2016 -152.100
## + D2      1    6.2357 0.2641 -143.732
## + DH      1    6.2009 0.2989 -139.895
## + D       1    6.0794 0.4204 -129.316
## + lnD2H   1    5.8205 0.6793 -114.442
## + lnD     1    5.7440 0.7558 -111.135
## + lnD2H2  1    5.7143 0.7855 -109.939
## + lnDH    1    5.7143 0.7855 -109.939
## + H2      1    2.3619 4.1378  -58.428
## + H       1    2.3263 4.1735  -58.162
## + lnH     1    2.2803 4.2195  -57.823
## <none>                6.4998  -46.429
## 
## Step:  AIC=-162.42
## V ~ D2H
## 
##          Df Sum of Sq    RSS      AIC
## <none>                0.1445 -162.418
## + lnH     1    0.0008 0.1437 -160.590
## + H       1    0.0008 0.1438 -160.580
## + H2      1    0.0007 0.1438 -160.566
## + D2      1    0.0006 0.1439 -160.551
## + DH      1    0.0004 0.1442 -160.497
## + D       1    0.0004 0.1442 -160.493
## + D2H2    1    0.0003 0.1442 -160.492
## + lnD     1    0.0003 0.1442 -160.484
## + lnDH    1    0.0001 0.1445 -160.431
## + lnD2H2  1    0.0001 0.1445 -160.431
## + lnD2H   1    0.0000 0.1445 -160.421
## - D2H     1    6.3553 6.4998  -46.429
summary(modelo)
## 
## Call:
## lm(formula = V ~ D2H, data = Set1)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.130808 -0.031153 -0.004689  0.049416  0.118861 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)    
## (Intercept) -8.429e-03  2.728e-02  -0.309     0.76    
## D2H          3.059e-05  8.566e-07  35.711   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.07059 on 29 degrees of freedom
## Multiple R-squared:  0.9778, Adjusted R-squared:  0.977 
## F-statistic:  1275 on 1 and 29 DF,  p-value: < 2.2e-16
toc()
## 0.05 sec elapsed
plot(predictorEffects(modelo))

extract_eq(modelo)                          # El modelo final

\[ \operatorname{V} = \alpha + \beta_{1}(\operatorname{D2H}) + \epsilon \]

extract_eq(modelo, wrap = TRUE, use_coefs = TRUE) # El modelo final y coefficientes

\[ \begin{aligned} \operatorname{\widehat{V}} &= -0.01 + 0(\operatorname{D2H}) \end{aligned} \]

Verificar supuestos

outlierTest(modelo, cutoff=Inf, n.max=5)  # prueba de bonferroni para detectar outliers
##     rstudent unadjusted p-value Bonferroni p
## 18 -1.976136           0.058067           NA
## 26  1.833561           0.077370           NA
## 28  1.826281           0.078489           NA
## 17  1.704249           0.099409           NA
## 30 -1.646565           0.110830           NA
check_heteroskedasticity (modelo)         # los errores deben mostrar homogeneidad
## OK: Error variance appears to be homoscedastic (p = 0.062).
check_autocorrelation (modelo)            # residualaes no correlacionados
## OK: Residuals appear to be independent and not autocorrelated (p = 0.898).
check_collinearity (modelo)               # variables (x) independienets no correlacioandas
## Warning: Not enough model terms in the conditional part of the model to check for
##   multicollinearity.
## NULL
check_normality (modelo)                  # normalidad de los residuales (errores)
## OK: residuals appear as normally distributed (p = 0.322).

Metodo 2: Algoritmo genetico

Los algoritmos genéticos (GA) son un modelo matemático inspirado en la famosa idea de Charles Darwin sobre la selección natural.

La selección natural preserva sólo a los individuos más aptos a lo largo de las diferentes generaciones. Imagina una población de 100 conejos en 1900, si miramos la población actual, vamos a ver otros conejos más rápidos y hábiles para encontrar comida que sus antepasados.

Algoritmos Genéticos en Machine Learning

En machine learning, uno de los usos de los algoritmos genéticos es recoger el número correcto de variables para crear un modelo predictivo.

Recoger el subconjunto correcto de variables es un problema de combinatoria y optimización.

La ventaja de esta técnica sobre otras es que permite que la mejor solución surja de la mejor de las soluciones anteriores. Un algoritmo evolutivo que mejora la selección a lo largo del tiempo.

La idea de GA es combinar las diferentes soluciones generación tras generación para extraer los mejores genes (variables) de cada una de ellas. De esa manera crea nuevos y más adecuados individuos.

glmulti encuentra cuáles son los n mejores modelos entre todos los modelos posibles (el conjunto de candidatos, según lo especificado por el usuario). Los modelos están ajustados con la función de ajuste especificada (el valor predeterminado es glm) y se clasifican con el criterio de información especificado (el valor predeterminado es aicc). Los mejores modelos se encuentran a través de una selección exhaustiva de los candidatos o utilizando un algoritmo genético, que permite abordar conjuntos de candidatos muy grandes. La salida se puede utilizar para la selección de modelos, la selección de variables y la inferencia multimodelo.

Metodo exhaustivo y algoritmo genetico

el_modelo<-V ~ H + D + DH + D2 + H2 + D2H + D2H2 + lnDH + lnD2H + lnD2H2 + lnD + lnH
# el_modelo<-V ~ log(H) + log(D) + log(DH) + log(D2) + log(H2) + log(D2H) + log(D2H2)
# el_modelo<-V ~ sqrt(H) + sqrt(D) + sqrt(DH) + sqrt(D2) + sqrt(H2) + sqrt(D2H) + sqrt(D2H2)

tic()
modelo_m2 <- glmulti(el_modelo,       # (y ~ x)
                  data   = Datos, 
                  method = "h",       # "h" exhahustivo, "g" genetico, "l", "d" un resumen simple
                  crit   = aic,       # AICC corrected AIC for small samples
                  level  = 1,         # 2 interacciones, 1 sin interaccion
                  family = gaussian,
                  fitfunction = glm,  # Type of model (LM, GLM, GLMER etc.)
                  confsetsize = 100,  # Keep 100 best models
                  includeobjects = TRUE)
## Initialization...
## TASK: Exhaustive screening of candidate set.
## Fitting...
## 
## After 50 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -54.9643138236148

## 
## After 100 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -60.5927074933978

## 
## After 150 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -67.1795501911428

## 
## After 200 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -67.6952786596929

## 
## After 250 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -67.8124196644937

## 
## After 300 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.0130492206308

## 
## After 350 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.1269214179218

## 
## After 400 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.1500247804572

## 
## After 450 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.1843577158034

## 
## After 500 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.184628537371

## 
## After 550 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.2390807378663

## 
## After 600 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.3774922969714

## 
## After 650 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.4033562298772

## 
## After 700 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.5496220615582

## 
## After 750 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.5930627654433

## 
## After 800 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.6116075313489

## 
## After 850 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.6238477664445
## 
## After 900 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.6238477664445

## 
## After 950 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.6283415259631

## 
## After 1000 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.6402569310363
## 
## After 1050 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.6402569310363

## 
## After 1100 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.7548498654483

## 
## After 1150 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.7877076484434

## 
## After 1200 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.7974679525704

## 
## After 1250 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.8066782145324
## 
## After 1300 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.8066782145324

## 
## After 1350 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.8083651505144

## 
## After 1400 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.8158884764944
## 
## After 1450 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.8158884764944

## 
## After 1500 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.8234118024744

## 
## After 1550 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.8234118024744
## 
## After 1600 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.8234118024744

## 
## After 1650 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.8309351284544
## 
## After 1700 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.8309351284544

## 
## After 1750 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.8309351284544
## 
## After 1800 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.8384584544344
## 
## After 1850 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.8384584544344

## 
## After 1900 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.8459817804144
## 
## After 1950 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.8459817804144

## 
## After 2000 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.8459817804144
## 
## After 2050 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.8533396129001
## 
## After 2100 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.8533396129001

## 
## After 2150 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.8533396129001
## 
## After 2200 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.904672945118

## 
## After 2250 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9086959711433

## 
## After 2300 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9131159976326
## 
## After 2350 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9131159976326

## 
## After 2400 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9131159976326
## 
## After 2450 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9175354863489
## 
## After 2500 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9175354863489

## 
## After 2550 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9218840019493
## 
## After 2600 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9218840019493

## 
## After 2650 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9218840019493
## 
## After 2700 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9258882282165
## 
## After 2750 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9258882282165

## 
## After 2800 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9258882282165
## 
## After 2850 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9296217639981
## 
## After 2900 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9296217639981

## 
## After 2950 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9318507420669
## 
## After 3000 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9318507420669

## 
## After 3050 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9318507420669
## 
## After 3100 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9334939980041

## 
## After 3150 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9334939980041
## 
## After 3200 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9334939980041
## 
## After 3250 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9351372539414
## 
## After 3300 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9351372539414

## 
## After 3350 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9367805098786
## 
## After 3400 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9367805098786

## 
## After 3450 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9367805098786
## 
## After 3500 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9381660406657
## 
## After 3550 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9381660406657

## 
## After 3600 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9381660406657
## 
## After 3650 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9395401228483
## 
## After 3700 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9395401228483

## 
## After 3750 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9408736129905
## 
## After 3800 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9408736129905

## 
## After 3850 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9408736129905
## 
## After 3900 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9421811468493
## 
## After 3950 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9421811468493

## 
## After 4000 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9421811468493
## 
## After 4050 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9434621203745
## 
## After 4100 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9434621203745

## 
## After 4150 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9447430938997

## 
## After 4200 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9447430938997
## 
## After 4250 models:
## Best model: V~1+D2H
## Crit= -72.4437461210502
## Mean crit= -68.9447430938997
## Completed.
toc() 
## 14.42 sec elapsed

Se ajusta el modelo

lm(V~1+D2H)
## 
## Call:
## lm(formula = V ~ 1 + D2H)
## 
## Coefficients:
## (Intercept)          D2H  
##  -8.429e-03    3.059e-05
lm(V~D2H)
## 
## Call:
## lm(formula = V ~ D2H)
## 
## Coefficients:
## (Intercept)          D2H  
##  -8.429e-03    3.059e-05

Importancia de las variables

plot(modelo_m2, type="s", col="deepskyblue1")

Metodo 3: Machine learning

El aprendizaje automático (ML) es una subcategoría de inteligencia artificial que se refiere al proceso por el cual los PC desarrollan el reconocimiento de patrones o la capacidad de aprender continuamente y realizar predicciones basadas en datos, tras lo cual realizan ajustes sin haber sido programados específicamente para ello. 

El metodo

set.seed(123)
# Set up repeated k-fold cross-validation
train.control <- trainControl(method = "cv", number = 10)
# Train the model
step.model <- train(V~(.), data = Set1,
                    method = "leapBackward", 
                    tuneGrid = data.frame(nvmax = 1:7), # 5 de predictores
                    trControl = train.control
)
## Warning in leaps.setup(x, y, wt = weights, nbest = nbest, nvmax = nvmax, : 3
## linear dependencies found

## Warning in leaps.setup(x, y, wt = weights, nbest = nbest, nvmax = nvmax, : 3
## linear dependencies found

## Warning in leaps.setup(x, y, wt = weights, nbest = nbest, nvmax = nvmax, : 3
## linear dependencies found

## Warning in leaps.setup(x, y, wt = weights, nbest = nbest, nvmax = nvmax, : 3
## linear dependencies found

## Warning in leaps.setup(x, y, wt = weights, nbest = nbest, nvmax = nvmax, : 3
## linear dependencies found

## Warning in leaps.setup(x, y, wt = weights, nbest = nbest, nvmax = nvmax, : 3
## linear dependencies found

## Warning in leaps.setup(x, y, wt = weights, nbest = nbest, nvmax = nvmax, : 3
## linear dependencies found

## Warning in leaps.setup(x, y, wt = weights, nbest = nbest, nvmax = nvmax, : 3
## linear dependencies found

## Warning in leaps.setup(x, y, wt = weights, nbest = nbest, nvmax = nvmax, : 3
## linear dependencies found

## Warning in leaps.setup(x, y, wt = weights, nbest = nbest, nvmax = nvmax, : 3
## linear dependencies found

## Warning in leaps.setup(x, y, wt = weights, nbest = nbest, nvmax = nvmax, : 3
## linear dependencies found
summary(step.model$finalModel)
## Subset selection object
## 12 Variables  (and intercept)
##        Forced in Forced out
## H          FALSE      FALSE
## D          FALSE      FALSE
## DH         FALSE      FALSE
## D2         FALSE      FALSE
## H2         FALSE      FALSE
## D2H        FALSE      FALSE
## D2H2       FALSE      FALSE
## lnDH       FALSE      FALSE
## lnD2H      FALSE      FALSE
## lnD2H2     FALSE      FALSE
## lnD        FALSE      FALSE
## lnH        FALSE      FALSE
## 1 subsets of each size up to 3
## Selection Algorithm: backward
##          H   D   DH  D2  H2  D2H D2H2 lnDH lnD2H lnD2H2 lnD lnH
## 1  ( 1 ) " " " " " " " " " " "*" " "  " "  " "   " "    " " " "
## 2  ( 1 ) "*" " " " " " " " " "*" " "  " "  " "   " "    " " " "
## 3  ( 1 ) "*" " " " " " " "*" "*" " "  " "  " "   " "    " " " "
# plot(step.model)
# RocImp2 <- varImp(step.model, scale = TRUE); RocImp2
# plot(RocImp2, col="red")

RMSE predictores

jorge<-as.data.frame(step.model$results)
step.model$bestTune
##   nvmax
## 3     3
datatable(jorge, extensions = "Buttons", 
          options = list(dom = "Bfrtip",
          buttons = c("copy", "csv", "excel", "pdf")))
plot(step.model)

Se ajusta el modelo

subset1<-lm(V~D2H); summary(subset1)
## 
## Call:
## lm(formula = V ~ D2H)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.130808 -0.031153 -0.004689  0.049416  0.118861 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)    
## (Intercept) -8.429e-03  2.728e-02  -0.309     0.76    
## D2H          3.059e-05  8.566e-07  35.711   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.07059 on 29 degrees of freedom
## Multiple R-squared:  0.9778, Adjusted R-squared:  0.977 
## F-statistic:  1275 on 1 and 29 DF,  p-value: < 2.2e-16

Importancia de las variables

library(viridis)
library(ggplot2)
RocImp2 <- varImp(step.model, scale = TRUE); RocImp2
## loess r-squared variable importance
## 
##         Overall
## D2H    100.0000
## lnD2H   99.9267
## D2H2    99.1081
## lnD2H2  99.0929
## lnDH    99.0929
## DH      99.0907
## lnD     97.4532
## D       97.4181
## D2      97.3940
## H2       1.9175
## H        0.9615
## lnH      0.0000
plot(RocImp2, col="red")

# p<-ggplot(data=RocImp2, aes(y=Overall)) +
#   geom_bar(stat="identity", fill=turbo(7))+ # numero de variables
#   theme_minimal(); p

El Curso: Siga el curso aqui

library(vembedr)
embed_url("https://www.youtube.com/watch?v=Ke3vfYpKmtU") # lecccion 1
embed_url("https://www.youtube.com/watch?v=Yk8QOjhUvBY") # lecccion 2
embed_url("https://www.youtube.com/watch?v=qFIXTihSZ2w") # lecccion 3
embed_url("https://www.youtube.com/watch?v=KBoTaKgGtg8") # lecccion 4
embed_url("https://www.youtube.com/watch?v=5cDtzvCDzy0") # lecccion 5
embed_url("https://www.youtube.com/watch?v=cpLoEcJC1m4") # lecccion 6