Packages

lapply(c("tidyverse","rvest","kableExtra"),library,character.only=T)[[1]]
## ── Attaching packages ─────────────────────────────────────── tidyverse 1.3.2 ──
## ✔ ggplot2 3.4.2     ✔ purrr   1.0.1
## ✔ tibble  3.2.1     ✔ dplyr   1.1.3
## ✔ tidyr   1.3.0     ✔ stringr 1.5.0
## ✔ readr   2.1.3     ✔ forcats 0.5.2
## Warning: package 'ggplot2' was built under R version 4.2.3
## Warning: package 'tibble' was built under R version 4.2.3
## Warning: package 'tidyr' was built under R version 4.2.3
## Warning: package 'purrr' was built under R version 4.2.3
## Warning: package 'dplyr' was built under R version 4.2.3
## Warning: package 'stringr' was built under R version 4.2.3
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag()    masks stats::lag()
## 
## Attaching package: 'rvest'
## 
## 
## The following object is masked from 'package:readr':
## 
##     guess_encoding
## Warning: package 'kableExtra' was built under R version 4.2.3
## 
## Attaching package: 'kableExtra'
## 
## The following object is masked from 'package:dplyr':
## 
##     group_rows
##  [1] "forcats"   "stringr"   "dplyr"     "purrr"     "readr"     "tidyr"    
##  [7] "tibble"    "ggplot2"   "tidyverse" "stats"     "graphics"  "grDevices"
## [13] "utils"     "datasets"  "methods"   "base"
library(glmnet)
## Warning: package 'glmnet' was built under R version 4.2.3
## Loading required package: Matrix
## 
## Attaching package: 'Matrix'
## 
## The following objects are masked from 'package:tidyr':
## 
##     expand, pack, unpack
## 
## Loaded glmnet 4.1-8
library(imputeTS)
## Warning: package 'imputeTS' was built under R version 4.2.3
## Registered S3 method overwritten by 'quantmod':
##   method            from
##   as.zoo.data.frame zoo 
## 
## Attaching package: 'imputeTS'
## 
## The following object is masked from 'package:glmnet':
## 
##     na.replace
library(corrplot)
## Warning: package 'corrplot' was built under R version 4.2.3
## corrplot 0.92 loaded
library(randtests)
library(lmridge)
library(lares)
## Warning: package 'lares' was built under R version 4.2.3
library(MASS)
## Warning: package 'MASS' was built under R version 4.2.3
## 
## Attaching package: 'MASS'
## 
## The following object is masked from 'package:dplyr':
## 
##     select
library(elasticnet)
## Loading required package: lars
## Loaded lars 1.3
library(caret)
## Warning: package 'caret' was built under R version 4.2.3
## Loading required package: lattice
## 
## Attaching package: 'caret'
## 
## The following object is masked from 'package:purrr':
## 
##     lift

Input Data

datapsdind <- rio::import("https://raw.githubusercontent.com/afhkaniase/praktikum-psd/main/Tugas%20Individu/Data%20World%20Happiness%20Report%202023.csv")
str(datapsdind)
## 'data.frame':    133 obs. of  7 variables:
##  $ Y : num  7.8 7.59 7.53 7.47 7.4 ...
##  $ X1: num  10.8 11 10.9 10.6 10.9 ...
##  $ X2: num  71.2 71.2 72 72.7 71.5 ...
##  $ X3: num  0.969 0.954 0.983 0.943 0.93 0.939 0.943 0.92 0.879 0.952 ...
##  $ X4: num  0.961 0.934 0.936 0.809 0.887 0.948 0.947 0.891 0.915 0.887 ...
##  $ X5: num  -0.019 0.134 0.211 -0.023 0.213 0.165 0.141 0.027 0.024 0.175 ...
##  $ X6: num  0.182 0.196 0.668 0.708 0.379 0.202 0.283 0.266 0.345 0.271 ...
head(datapsdind)
##       Y     X1     X2    X3    X4     X5    X6
## 1 7.804 10.792 71.150 0.969 0.961 -0.019 0.182
## 2 7.586 10.962 71.250 0.954 0.934  0.134 0.196
## 3 7.530 10.896 72.050 0.983 0.936  0.211 0.668
## 4 7.473 10.639 72.697 0.943 0.809 -0.023 0.708
## 5 7.403 10.942 71.550 0.930 0.887  0.213 0.379
## 6 7.395 10.883 72.150 0.939 0.948  0.165 0.202

Data Peubah

Analisis ini menggunakan data sekunder yang berasal dari situs kaggle.com yang mengacu pada World Happiness Report 2023. Laporan tersebut berisi data tahunan tentang skor kebahagiaan berbagai negara yang diterbitkan oleh Perserikatan Bangsa-Bangsa (PBB). Penelitian dilakukan untuk mengetahui hubungan dari beberapa peubah, yaitu PDB per kapita, harapan hidup sehat, dukungan sosial, kebebasan dalam menentukan pilihan hidup, kemurahan hati, dan persepsi korupsi (Helliwell et al. 2023). Peubah yang digunakan pada analisis ini berjumlah 6 yang terdiri dari satu peubah respon dan sebelas peubah penjelas yang disajikan pada tabel berikut.

\[ Y = Skor \ Kebahagiaan\\X1 = PDB \ per \ kapita\\X2 = Harapan \ hidup \ sehat\\X3 = Dukungan \ Sosial\\X4 = Kekebasan \ dalam \ menentukan \ pilihan \ hidup\\X5 = Kemurahan \ hati\\X6 = Persepsi \ korupsi \]

Data Cleaning

# Menghapus baris dengan nilai NA
sum(is.na(datapsdind))
## [1] 2

Karena terdapat 2 data yang ‘NULL’ maka dilakukan interpolasi

datapsdindnew <- na_interpolation(datapsdind, option="spline")
head(datapsdindnew)
##       Y     X1     X2    X3    X4     X5    X6
## 1 7.804 10.792 71.150 0.969 0.961 -0.019 0.182
## 2 7.586 10.962 71.250 0.954 0.934  0.134 0.196
## 3 7.530 10.896 72.050 0.983 0.936  0.211 0.668
## 4 7.473 10.639 72.697 0.943 0.809 -0.023 0.708
## 5 7.403 10.942 71.550 0.930 0.887  0.213 0.379
## 6 7.395 10.883 72.150 0.939 0.948  0.165 0.202
# Mengecek kembali nilai NA
sum(is.na(datapsdindnew))
## [1] 0

Setelah dilakukan interpolasi, tidak ada lagi nilai NA

y <- datapsdindnew$Y
x <- data.matrix(datapsdindnew[, c('X1', 'X2', 'X3', 'X4', 'X5', 'X6')])

Eksplorasi Data

# Sebaran peubah Y (Skor Kebahagiaan)
hist(datapsdindnew$Y, col = "skyblue")

df_numeric <- datapsdindnew[sapply(datapsdindnew, is.numeric)]
cor_matrix <- cor(df_numeric, use = "complete.obs")
corrplot(cor_matrix, method = "number")

Model Regresi Klasik

Model Regresi

model_datapsdind <- lm(Y ~ X1 + X2 + X3 + X4 + X5 + X6, data = datapsdindnew)
summary(model_datapsdind)
## 
## Call:
## lm(formula = Y ~ X1 + X2 + X3 + X4 + X5 + X6, data = datapsdindnew)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.59510 -0.23555  0.05019  0.28241  0.98657 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept) -1.73785    0.71044  -2.446 0.015819 *  
## X1           0.23455    0.06846   3.426 0.000828 ***
## X2           0.02042    0.01370   1.490 0.138603    
## X3           3.47071    0.56699   6.121 1.09e-08 ***
## X4           1.97481    0.46707   4.228 4.49e-05 ***
## X5           0.15356    0.30913   0.497 0.620233    
## X6          -0.78788    0.27208  -2.896 0.004461 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.4656 on 126 degrees of freedom
## Multiple R-squared:  0.8063, Adjusted R-squared:  0.7971 
## F-statistic: 87.42 on 6 and 126 DF,  p-value: < 2.2e-16

Model regresi yang terbentuk

\[ \hat{Y_t}=-1.73785+0.23455X_1+0.02042X_2+3.47071X_3+1.97481X_4+0.15356X_5-0.78788X_6 \]

Multikolinearitas

car::vif(model_datapsdind)
##       X1       X2       X3       X4       X5       X6 
## 4.020223 3.576918 2.829921 1.422315 1.182618 1.432604

Tidak terdapat peubah penjelas dengan nilai \(VIF > 10\). Hal ini membuktikan bahwa tidak terdapat permasalahan multikolinearitas antar peubah

Pengujian Asumsi

Uji Asumsi Normalitas

##Kolmogorov-Smirnov Test
ks.test(model_datapsdind$residuals, "pnorm", mean=mean(model_datapsdind$residuals), sd=sd(model_datapsdind$residuals))
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  model_datapsdind$residuals
## D = 0.085963, p-value = 0.2794
## alternative hypothesis: two-sided

Hasil uji Kolmogorov-smirnov menunjukkan bahwa data sisaan pada model regresi menyebar normal dengan \(p-value > 0.05\) pada tingkat kepercayaan 95%.

Uji Asumsi Homoskedastisitas (Gauss Markov)

lmtest::bptest(model_datapsdind)
## 
##  studentized Breusch-Pagan test
## 
## data:  model_datapsdind
## BP = 8.9765, df = 6, p-value = 0.1749

Diketahui \(p-value > 0.05\) sehingga ada bukti untuk menyatakan bahwa sisaan pada model tidak terjadi gejala heteroskedastisitas pada tingkat kepercayaan 95%.

Uji Kebebasan Sisaan (Gauss Markov)

runs.test(model_datapsdind$residuals)
## 
##  Runs Test
## 
## data:  model_datapsdind$residuals
## statistic = -0.87373, runs = 62, n1 = 66, n2 = 66, n = 132, p-value =
## 0.3823
## alternative hypothesis: nonrandomness

Diketahui \(p-value > 0.05\) sehingga ada bukti untuk menyatakan bahwa sisaan pada model saling bebas (tidak ada autokorelasi) pada tingkat kepercayaan 95%

Uji Nilai Harapan Sisaan

t.test(model_datapsdind$residuals,
       mu = 0,
       conf.level = 0.95)
## 
##  One Sample t-test
## 
## data:  model_datapsdind$residuals
## t = 4.4485e-16, df = 132, p-value = 1
## alternative hypothesis: true mean is not equal to 0
## 95 percent confidence interval:
##  -0.07802827  0.07802827
## sample estimates:
##    mean of x 
## 1.754777e-17

Diketahui \(p-value > 0.05\) sehingga ada bukti untuk menyatakan bahwa nilai harapan sisaan adalah nol pada tingkat kepercayaan 95%.

AIC

AICklasik <- AIC(model_datapsdind)
AICklasik
## [1] 182.9201

Pemilihan Peubah Penjelas/ Variable Selection

BEST SUBSET

library(leaps)
regfit.full=regsubsets(Y ~ X1 + X2 + X3 + X4 + X5 + X6, data = datapsdindnew, nvmax=64)
reg.summary=summary(regfit.full)
reg.summary
## Subset selection object
## Call: regsubsets.formula(Y ~ X1 + X2 + X3 + X4 + X5 + X6, data = datapsdindnew, 
##     nvmax = 64)
## 6 Variables  (and intercept)
##    Forced in Forced out
## X1     FALSE      FALSE
## X2     FALSE      FALSE
## X3     FALSE      FALSE
## X4     FALSE      FALSE
## X5     FALSE      FALSE
## X6     FALSE      FALSE
## 1 subsets of each size up to 6
## Selection Algorithm: exhaustive
##          X1  X2  X3  X4  X5  X6 
## 1  ( 1 ) " " " " "*" " " " " " "
## 2  ( 1 ) "*" " " "*" " " " " " "
## 3  ( 1 ) "*" " " "*" "*" " " " "
## 4  ( 1 ) "*" " " "*" "*" " " "*"
## 5  ( 1 ) "*" "*" "*" "*" " " "*"
## 6  ( 1 ) "*" "*" "*" "*" "*" "*"
reg.summary$adjr2
## [1] 0.6496334 0.7377931 0.7810391 0.7965730 0.7982921 0.7970887
which.max(reg.summary$adjr2)
## [1] 5
coef(regfit.full,6)
## (Intercept)          X1          X2          X3          X4          X5 
## -1.73785346  0.23455475  0.02042251  3.47071075  1.97481447  0.15356057 
##          X6 
## -0.78787568
olsrr::ols_step_best_subset(model_datapsdind)
##     Best Subsets Regression     
## --------------------------------
## Model Index    Predictors
## --------------------------------
##      1         X3                
##      2         X1 X3             
##      3         X1 X3 X4          
##      4         X1 X3 X4 X6       
##      5         X1 X2 X3 X4 X6    
##      6         X1 X2 X3 X4 X5 X6 
## --------------------------------
## 
##                                                     Subsets Regression Summary                                                    
## ----------------------------------------------------------------------------------------------------------------------------------
##                        Adj.        Pred                                                                                            
## Model    R-Square    R-Square    R-Square     C(p)        AIC         SBIC         SBC        MSEP       FPE       HSP       APC  
## ----------------------------------------------------------------------------------------------------------------------------------
##   1        0.6523      0.6496      0.6404    97.1974    250.7419    -128.6833    259.4130    49.7888    0.3800    0.0029    0.3583 
##   2        0.7418      0.7378      0.7265    40.9891    213.1732    -165.6009    224.7346    37.2631    0.2865    0.0022    0.2702 
##   3        0.7860      0.7810      0.7676    14.2035    190.1742    -187.6239    204.6259    31.1191    0.2410    0.0018    0.2273 
##   4        0.8027      0.7966      0.7842     5.3253    181.3523    -195.7238    198.6943    28.9132    0.2255    0.0017    0.2127 
##   5        0.8059      0.7983      0.7806     5.2468    181.1803    -195.6211    201.4128    28.6706    0.2252    0.0017    0.2124 
##   6        0.8063      0.7971      0.7763     7.0000    182.9201    -193.7459    206.0429    28.8435    0.2282    0.0017    0.2152 
## ----------------------------------------------------------------------------------------------------------------------------------
## AIC: Akaike Information Criteria 
##  SBIC: Sawa's Bayesian Information Criteria 
##  SBC: Schwarz Bayesian Criteria 
##  MSEP: Estimated error of prediction, assuming multivariate normality 
##  FPE: Final Prediction Error 
##  HSP: Hocking's Sp 
##  APC: Amemiya Prediction Criteria

Pada pemilihan peubah dengan teknik best subset terlihat nilai AIC paling rendah serta Adj. R-Squared paling tinggi dimiliki oleh model ke-5. Pada model ini, peubah X5(Kemurahan hati) tidak dimasukkan ke dalam model. Nilai Adj. R-Square didapatkan sebesar 0.7983 yang artinya dapat dikatakan baik untuk menggambarkan keragaman skor kebahagiaan oleh kelima peubah yang terpilih.

Metode Forward

fmodelselect <- step(lm(y ~ 1, datapsdindnew), direction="forward", scope=formula(model_datapsdind), trace=1)
## Start:  AIC=9.8
## y ~ 1
## 
##        Df Sum of Sq     RSS      AIC
## + X3    1    91.996  49.040 -128.696
## + X1    1    90.039  50.998 -123.490
## + X2    1    80.258  60.778 -100.155
## + X4    1    49.531  91.505  -45.736
## + X6    1    31.653 109.383  -22.000
## <none>              141.036    9.803
## + X5    1     0.006 141.030   11.797
## 
## Step:  AIC=-128.7
## y ~ X3
## 
##        Df Sum of Sq    RSS     AIC
## + X1    1   12.6197 36.420 -166.26
## + X6    1   11.1334 37.907 -160.94
## + X4    1    9.0586 39.981 -153.86
## + X2    1    8.2303 40.810 -151.13
## <none>              49.040 -128.70
## + X5    1    0.0000 49.040 -126.70
## 
## Step:  AIC=-166.26
## y ~ X3 + X1
## 
##        Df Sum of Sq    RSS     AIC
## + X4    1    6.2408 30.180 -189.26
## + X6    1    4.6116 31.809 -182.27
## + X5    1    0.9123 35.508 -167.64
## + X2    1    0.7304 35.690 -166.96
## <none>              36.420 -166.26
## 
## Step:  AIC=-189.26
## y ~ X3 + X1 + X4
## 
##        Df Sum of Sq    RSS     AIC
## + X6    1   2.35840 27.821 -198.09
## + X2    1   0.76348 29.416 -190.67
## <none>              30.180 -189.26
## + X5    1   0.20339 29.976 -188.16
## 
## Step:  AIC=-198.09
## y ~ X3 + X1 + X4 + X6
## 
##        Df Sum of Sq    RSS     AIC
## + X2    1   0.45064 27.370 -198.26
## <none>              27.821 -198.09
## + X5    1   0.02252 27.799 -196.19
## 
## Step:  AIC=-198.26
## y ~ X3 + X1 + X4 + X6 + X2
## 
##        Df Sum of Sq    RSS     AIC
## <none>              27.370 -198.26
## + X5    1  0.053497 27.317 -196.52
summary(fmodelselect)
## 
## Call:
## lm(formula = y ~ X3 + X1 + X4 + X6 + X2, data = datapsdindnew)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.63112 -0.24498  0.06092  0.28883  0.97970 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept) -1.66396    0.69263  -2.402 0.017736 *  
## X3           3.53208    0.55172   6.402 2.71e-09 ***
## X1           0.22684    0.06648   3.412 0.000865 ***
## X4           2.00886    0.46064   4.361 2.65e-05 ***
## X6          -0.81662    0.26507  -3.081 0.002531 ** 
## X2           0.01962    0.01357   1.446 0.150636    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.4642 on 127 degrees of freedom
## Multiple R-squared:  0.8059, Adjusted R-squared:  0.7983 
## F-statistic: 105.5 on 5 and 127 DF,  p-value: < 2.2e-16
olsrr::ols_step_forward_p(model_datapsdind)
## 
##                             Selection Summary                             
## -------------------------------------------------------------------------
##         Variable                  Adj.                                       
## Step    Entered     R-Square    R-Square     C(p)        AIC        RMSE     
## -------------------------------------------------------------------------
##    1    X3            0.6523      0.6496    97.1974    250.7419    0.6118    
##    2    X1            0.7418      0.7378    40.9891    213.1732    0.5293    
##    3    X4            0.7860      0.7810    14.2035    190.1742    0.4837    
##    4    X6            0.8027      0.7966     5.3253    181.3523    0.4662    
##    5    X2            0.8059      0.7983     5.2468    181.1803    0.4642    
## -------------------------------------------------------------------------

Hasil metode Stepwise Forward juga menunjukkan hal yang sama. Langkah paling optimal berada pada langkah ke-5, yang mana belum memasukkan peubah X5(Kemurahan hati) ke dalam model. Langkah ke-5 ini dipilih dikarenakan memiliki nilai AIC terkecil yaitu 181.1803 dengan Adj. R-Square sebesar 0.7983.

Metode Backward

bmodelselect <- step(model_datapsdind, direction="backward", scope=formula(lm(Y ~ X1 + X2 + X3 + X4 + X5 + X6, data = datapsdindnew)), trace=1)
## Start:  AIC=-196.52
## Y ~ X1 + X2 + X3 + X4 + X5 + X6
## 
##        Df Sum of Sq    RSS     AIC
## - X5    1    0.0535 27.371 -198.26
## <none>              27.317 -196.52
## - X2    1    0.4816 27.799 -196.19
## - X6    1    1.8179 29.135 -189.95
## - X1    1    2.5446 29.862 -186.67
## - X4    1    3.8757 31.193 -180.87
## - X3    1    8.1237 35.441 -163.89
## 
## Step:  AIC=-198.26
## Y ~ X1 + X2 + X3 + X4 + X6
## 
##        Df Sum of Sq    RSS     AIC
## <none>              27.371 -198.26
## - X2    1    0.4506 27.821 -198.09
## - X6    1    2.0456 29.416 -190.67
## - X1    1    2.5092 29.880 -188.59
## - X4    1    4.0988 31.469 -181.70
## - X3    1    8.8329 36.203 -163.06
summary(bmodelselect)
## 
## Call:
## lm(formula = Y ~ X1 + X2 + X3 + X4 + X6, data = datapsdindnew)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.63112 -0.24498  0.06092  0.28883  0.97970 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept) -1.66396    0.69263  -2.402 0.017736 *  
## X1           0.22684    0.06648   3.412 0.000865 ***
## X2           0.01962    0.01357   1.446 0.150636    
## X3           3.53208    0.55172   6.402 2.71e-09 ***
## X4           2.00886    0.46064   4.361 2.65e-05 ***
## X6          -0.81662    0.26507  -3.081 0.002531 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.4642 on 127 degrees of freedom
## Multiple R-squared:  0.8059, Adjusted R-squared:  0.7983 
## F-statistic: 105.5 on 5 and 127 DF,  p-value: < 2.2e-16
olsrr::ols_step_backward_p(model_datapsdind)
## 
## 
##                           Elimination Summary                            
## ------------------------------------------------------------------------
##         Variable                  Adj.                                      
## Step    Removed     R-Square    R-Square     C(p)       AIC        RMSE     
## ------------------------------------------------------------------------
##    1    X5            0.8059      0.7983    5.2468    181.1803    0.4642    
## ------------------------------------------------------------------------

Pada metode Stepwise Backward, peubah X5(Kemurahan hati) dikeluarkan dari model. Menunjukkan bahwa metode Best Subset, Stepwise Forward, dan Stepwise Backward menghasilkan kesimpulan yang sama yaitu: Model terbaik diperoleh dengan tidak memasukkan peubah X5(Kemurahan hati).

Metode Stepwise

smodelselect <- step(lm(y ~ 1, datapsdindnew), direction="both", scope=formula(model_datapsdind), trace=1)
## Start:  AIC=9.8
## y ~ 1
## 
##        Df Sum of Sq     RSS      AIC
## + X3    1    91.996  49.040 -128.696
## + X1    1    90.039  50.998 -123.490
## + X2    1    80.258  60.778 -100.155
## + X4    1    49.531  91.505  -45.736
## + X6    1    31.653 109.383  -22.000
## <none>              141.036    9.803
## + X5    1     0.006 141.030   11.797
## 
## Step:  AIC=-128.7
## y ~ X3
## 
##        Df Sum of Sq     RSS      AIC
## + X1    1    12.620  36.420 -166.264
## + X6    1    11.133  37.907 -160.945
## + X4    1     9.059  39.981 -153.857
## + X2    1     8.230  40.810 -151.130
## <none>               49.040 -128.696
## + X5    1     0.000  49.040 -126.696
## - X3    1    91.996 141.036    9.803
## 
## Step:  AIC=-166.26
## y ~ X3 + X1
## 
##        Df Sum of Sq    RSS     AIC
## + X4    1    6.2408 30.180 -189.26
## + X6    1    4.6116 31.809 -182.27
## + X5    1    0.9123 35.508 -167.64
## + X2    1    0.7304 35.690 -166.96
## <none>              36.420 -166.26
## - X1    1   12.6197 49.040 -128.70
## - X3    1   14.5774 50.998 -123.49
## 
## Step:  AIC=-189.26
## y ~ X3 + X1 + X4
## 
##        Df Sum of Sq    RSS     AIC
## + X6    1    2.3584 27.821 -198.09
## + X2    1    0.7635 29.416 -190.67
## <none>              30.180 -189.26
## + X5    1    0.2034 29.976 -188.16
## - X4    1    6.2408 36.420 -166.26
## - X1    1    9.8018 39.981 -153.86
## - X3    1    9.9370 40.117 -153.41
## 
## Step:  AIC=-198.09
## y ~ X3 + X1 + X4 + X6
## 
##        Df Sum of Sq    RSS     AIC
## + X2    1    0.4506 27.371 -198.26
## <none>              27.821 -198.09
## + X5    1    0.0225 27.799 -196.19
## - X6    1    2.3584 30.180 -189.26
## - X4    1    3.9876 31.809 -182.27
## - X1    1    5.6767 33.498 -175.39
## - X3    1   11.3932 39.214 -154.43
## 
## Step:  AIC=-198.26
## y ~ X3 + X1 + X4 + X6 + X2
## 
##        Df Sum of Sq    RSS     AIC
## <none>              27.371 -198.26
## - X2    1    0.4506 27.821 -198.09
## + X5    1    0.0535 27.317 -196.52
## - X6    1    2.0456 29.416 -190.67
## - X1    1    2.5092 29.880 -188.59
## - X4    1    4.0988 31.469 -181.70
## - X3    1    8.8329 36.203 -163.06
summary(smodelselect)
## 
## Call:
## lm(formula = y ~ X3 + X1 + X4 + X6 + X2, data = datapsdindnew)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.63112 -0.24498  0.06092  0.28883  0.97970 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept) -1.66396    0.69263  -2.402 0.017736 *  
## X3           3.53208    0.55172   6.402 2.71e-09 ***
## X1           0.22684    0.06648   3.412 0.000865 ***
## X4           2.00886    0.46064   4.361 2.65e-05 ***
## X6          -0.81662    0.26507  -3.081 0.002531 ** 
## X2           0.01962    0.01357   1.446 0.150636    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.4642 on 127 degrees of freedom
## Multiple R-squared:  0.8059, Adjusted R-squared:  0.7983 
## F-statistic: 105.5 on 5 and 127 DF,  p-value: < 2.2e-16
olsrr::ols_step_both_p(model_datapsdind)
## 
##                              Stepwise Selection Summary                               
## -------------------------------------------------------------------------------------
##                      Added/                   Adj.                                       
## Step    Variable    Removed     R-Square    R-Square     C(p)        AIC        RMSE     
## -------------------------------------------------------------------------------------
##    1       X3       addition       0.652       0.650    97.1970    250.7419    0.6118    
##    2       X1       addition       0.742       0.738    40.9890    213.1732    0.5293    
##    3       X4       addition       0.786       0.781    14.2030    190.1742    0.4837    
##    4       X6       addition       0.803       0.797     5.3250    181.3523    0.4662    
## -------------------------------------------------------------------------------------

Ridge Regression

y <- datapsdindnew$Y
x <- data.matrix(datapsdindnew[, c('X1', 'X2', 'X3', 'X4', 'X5', 'X6')])

Fungsi glmnet

cv.r <- cv.glmnet(x,y,alpha=0)
plot(cv.r)

Hasil Regresi Ridge

library(lmridge)
model.ridge<-lmridge(Y~., data=datapsdindnew)
summary(model.ridge)
## 
## Call:
## lmridge.default(formula = Y ~ ., data = datapsdindnew)
## 
## 
## Coefficients: for Ridge parameter K= 0 
##            Estimate Estimate (Sc) StdErr (Sc) t-value (Sc) Pr(>|t|)    
## Intercept   -1.7378     -114.8945     57.8734      -1.9853   0.0493 *  
## X1           0.2346        3.1984      0.9299       3.4395   0.0008 ***
## X2           0.0204        1.3125      0.8771       1.4964   0.1371    
## X3           3.4707        4.7947      0.7802       6.1456   <2e-16 ***
## X4           1.9748        2.3479      0.5531       4.2449   <2e-16 ***
## X5           0.1536        0.2515      0.5044       0.4987   0.6189    
## X6          -0.7879       -1.6138      0.5551      -2.9072   0.0043 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Ridge Summary
##         R2     adj-R2   DF ridge          F        AIC        BIC 
##    0.80630    0.79870    5.99993   88.11567 -198.51767  469.24066 
## Ridge minimum MSE= 3.111256 at K= 0 
## P-value for F-test ( 5.99993 , 127.0002 ) = 7.450831e-43 
## -------------------------------------------------------------------

Koefisien Ridge

best_lambda <- cv.r$lambda.min
best_ridge <- glmnet(x, y, alpha = 0, lambda = best_lambda)
coef(best_ridge)
## 7 x 1 sparse Matrix of class "dgCMatrix"
##                     s0
## (Intercept) -1.6865257
## X1           0.2155973
## X2           0.0293849
## X3           2.8998954
## X4           1.9223654
## X5           0.1716632
## X6          -0.7237225

Lasso Regression

y <- matrix(datapsdindnew$Y)
x <- data.matrix(datapsdindnew[, c('X1', 'X2', 'X3', 'X4', 'X5', 'X6')])

Fungsi glmnet

cv.l<-cv.glmnet(x,y,alpha=1)
plot(cv.l)

Hasil Regresi Lasso

model.lasso<-lasso_vars(datapsdindnew,Y) 
## >>> Searching for optimal lambda with CV...
## Warning in doTryCatch(return(expr), name, parentenv, handler): Reached maximum
## number of iterations 47!
## Found best lambda: 0.012593
## >>> Fetching most relevant variables...
## >>> Generating plots for Y...
## Warning in .font_global(font, quiet = FALSE): Font 'Arial Narrow' is not
## installed, has other name, or can't be found
## Elapsed time: 14.3s
model.lasso
## $coef
## # A tibble: 6 × 6
##   names coefficients standardized_coefficients     abs     prc coef    
##   <chr>        <dbl>                     <dbl>   <dbl>   <dbl> <chr>   
## 1 X3          3.46                     0.416   0.416   0.364   positive
## 2 X1          0.230                    0.273   0.273   0.239   positive
## 3 X4          1.94                     0.201   0.201   0.176   positive
## 4 X6         -0.761                   -0.136   0.136   0.119   negative
## 5 X2          0.0194                   0.108   0.108   0.0948  positive
## 6 X5          0.0600                   0.00855 0.00855 0.00748 positive
## 
## $metrics
## # A tibble: 1 × 7
##    rmse   mae  mape   mse   rsq  rsqa bestlambda
##   <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>      <dbl>
## 1 0.454 0.341  72.3 0.206 0.806 0.805     0.0126
## 
## $model
## Model Details:
## ==============
## 
## H2ORegressionModel: glm
## Model ID:  GLM_model_R_1696142689398_6 
## GLM Model: summary
##     family     link            regularization number_of_predictors_total
## 1 gaussian identity Lasso (lambda = 0.01259 )                          6
##   number_of_active_predictors number_of_iterations  training_frame
## 1                           6                    1 temp_sid_8475_3
## 
## Coefficients: glm coefficients
##       names coefficients standardized_coefficients
## 1 Intercept    -7.230283                 -0.000000
## 2        X1     0.229933                  0.272902
## 3        X2     0.019365                  0.108325
## 4        X3     3.459808                  0.416018
## 5        X4     1.938114                  0.200559
## 6        X5     0.058917                  0.008400
## 7        X6    -0.761800                 -0.135816
## 
## H2ORegressionMetrics: glm
## ** Reported on training data. **
## 
## MSE:  0.2058288
## RMSE:  0.4536835
## MAE:  0.3411151
## RMSLE:  NaN
## Mean Residual Deviance :  0.2058288
## R^2 :  0.8058994
## Null Deviance :141.0362
## Null D.o.F. :132
## Residual Deviance :27.37522
## Residual D.o.F. :126
## AIC :183.2031
## 
## 
## 
## 
## 
## $plot

Koefisien Lasso

best.ll<-cv.l$lambda.min
bestlasso<-glmnet(x,y,alpha=1,lambda=best.ll)
coef(bestlasso)
## 7 x 1 sparse Matrix of class "dgCMatrix"
##                     s0
## (Intercept) -1.4576501
## X1           0.2268118
## X2           0.0183375
## X3           3.4210967
## X4           1.8756365
## X5           .        
## X6          -0.7159286

Adj. R-Square Lasso

n <- length(datapsdindnew$Y)
p <- ncol(datapsdindnew)
r_squared_lasso <- 0.8058

adjusted_r_squared <- 1 - ((1 - r_squared_lasso) * (n - 1) / (n - p - 1))
adjusted_r_squared
## [1] 0.7949248

Perbandingan Model Klasik, Ridge Regression, dan Lasso Regression

comparison_table <- data.frame(
  Method = c("Klasik", "Best Subset", "Ridge", "Lasso"),
  AIC = c(AICklasik, 181.1803, -198.51767, 183.2031),
  Adj.R2 = c(0.7971, 0.7983, 0.7987, 0.7940),
  R2 = c(0.8063, 0.8063, 0.8063, 0.8058)
) %>%
  arrange(desc(R2))

print(comparison_table)
##        Method       AIC Adj.R2     R2
## 1      Klasik  182.9201 0.7971 0.8063
## 2 Best Subset  181.1803 0.7983 0.8063
## 3       Ridge -198.5177 0.7987 0.8063
## 4       Lasso  183.2031 0.7940 0.8058

Jika dilihat dari nilai R-Square, model regresi hasil Klasik, Best Subset, dan Ridge adalah yang terbaik karena menghasilkan nilai terbaik yang sama. Namun, jika dilihat dari nilai AIC maka model Ridge adalah yang terbaik. Selisih nilai AIC model Ridge dengan ketiga model lainnya sangat besar, sedangkan perbedaan nilai R-square diantara keempat model tidak jauh berbeda. Oleh karena itu, model Ridge dipilih sebagai model yang paling baik untuk data yang digunakan.

Peubah Yang Berpengaruh

Pada model Klasik, Best Subset, Ridge, maupun Lasso memiliki hasil yang sama. Peubah penjelas yang berpengaruh signifikan terhadap peubah respon adalah X1, X2, X3, X4, dan X6, namun 1 diantaranya (X5 = Kemurahan Hati) tidak berpengaruh signifikan terhadap peubah respon. Peubah X5 tidak dipertahankan pada model Ridge, sedangkan pada model Lasso dipertahankan. Model ini memiliki R2 sebesar 0.806. Artinya, model ini sudah menjelaskan 80.6% keberagaman dari data. Berikut adalah model yang dibentuk.

Interpretasi Model Terbaik

coef(best_ridge)
## 7 x 1 sparse Matrix of class "dgCMatrix"
##                     s0
## (Intercept) -1.6865257
## X1           0.2155973
## X2           0.0293849
## X3           2.8998954
## X4           1.9223654
## X5           0.1716632
## X6          -0.7237225

\[ \hat{Y_t}=-1.71767815+0.21793065X_1+0.02857663X_2+2.96995280X_3+1.93516469X_4+0.17185865X_5-0.73069075X_6 \]

Peubah penjelas yang signifikan terhadap peubah respon:

Peubah penjelas yang tidak signifikan terhadap peubah respon: