library(ISLR)
library(leaps)
Warning: package ‘leaps’ was built under R version 4.0.5
library(glmnet)
Warning: package ‘glmnet’ was built under R version 4.0.5Loaded glmnet 4.1-4
library(pls)

Attaching package: ‘pls’

The following object is masked from ‘package:stats’:

    loadings

2a. 3 is correct since the lasso is less flexible but worth it if the small increase in bias is offset by the larger reduction in variance. It also can remove non-essential predictors.

2b. 3 is correct since ridge-reduction and lasso are very similar but lasso can remove a predictor in its entirety. Ridge-Reduction would only result in a very small coef.

2c. 2 is the correct since non-linear models have less bias as well as being less biased than a model generated from least-squares regression.

attach(College)
x = model.matrix(Apps~.,College)[,-1]
y = Apps

set.seed(1)

train <- sample (c(TRUE , FALSE), nrow (Hitters),
replace = TRUE)
test <- (!train)

y.test=y[test]
linear <- lm(Apps~., data = College[train,])
linear.pred <- predict(linear,College[test,],type = "response")
mean((linear.pred-College[test,]$Apps)^2)
[1] 1048053

train.x<-model.matrix(Apps~.,data=College[train,])
train.y<-College[train,]$Apps
test.x<-model.matrix(Apps~.,data=College[test,])
test.y<-College[test,]$Apps

ridge.fit<-cv.glmnet(train.x,train.y, alpha = 0)
lam.ridge<- ridge.fit$lambda.min

ridge.pred<-predict(ridge.fit, s=lam.ridge, newx=test.x)

mean((ridge.pred-test.y)^2)
[1] 1087614
lasso.fit <- cv.glmnet(train.x,train.y, alpha = 1)

lam.lasso<- lasso.fit$lambda.min

lasso.pred<-predict(lasso.fit, s=lam.ridge, newx=test.x)

mean((lasso.pred-test.y)^2)
[1] 1325859
pcr.fit <- pcr(Apps~., data = College[train,], scale = TRUE, validation = "CV")
summary(pcr.fit)
Data:   X dimension: 390 17 
    Y dimension: 390 1
Fit method: svdpc
Number of components considered: 17

VALIDATION: RMSEP
Cross-validated using 10 random segments.
       (Intercept)  1 comps  2 comps  3 comps  4 comps  5 comps  6 comps  7 comps
CV            4199     4125     2354     2346     1932     1864     1868     1868
adjCV         4199     4124     2350     2423     1914     1853     1860     1852
       8 comps  9 comps  10 comps  11 comps  12 comps  13 comps  14 comps  15 comps
CV        1852     1773      1753      1788      1790      1796      1795      1771
adjCV     1839     1762      1742      1777      1780      1786      1811      1747
       16 comps  17 comps
CV         1408      1352
adjCV      1389      1335

TRAINING: % variance explained
      1 comps  2 comps  3 comps  4 comps  5 comps  6 comps  7 comps  8 comps  9 comps
X      31.458    56.66    63.24    69.60    75.22    80.12    83.82    87.41    90.37
Apps    3.856    70.36    70.57    81.38    82.73    82.82    83.50    83.76    84.62
      10 comps  11 comps  12 comps  13 comps  14 comps  15 comps  16 comps  17 comps
X        92.82     94.99     96.93     97.98     98.71     99.34     99.82    100.00
Apps     85.23     85.29     85.38     85.40     85.42     90.56     93.25     93.63

xcol<-1:17

mse_pcr_comps<-numeric(length = length(xcol))

for (i in 1:length(xcol)){ 
    mse_pcr_comps[i]<-mean(
        (predict(pcr.fit,College[test,], ncomp = i )-College[test,]$Apps)^2)
}

mse_pcr_comps
 [1] 11717019  3104455  3095180  2093298  1965883  1974813  1908495  1794200
 [9]  1754765  1778336  1803771  1828637  1829753  1833692  1419045  1086308
[17]  1048053
pcr.pred=predict(pcr.fit,College[test,],ncomp=16)
mean((pcr.pred-y.test)^2)
[1] 1086308
pls.fit=plsr(Apps~., data= College[train,], scale = TRUE, validation="CV" )
summary(pls.fit)
Data:   X dimension: 390 17 
    Y dimension: 390 1
Fit method: kernelpls
Number of components considered: 17

VALIDATION: RMSEP
Cross-validated using 10 random segments.
       (Intercept)  1 comps  2 comps  3 comps  4 comps  5 comps  6 comps  7 comps
CV            4199     2123     1825     1680     1670     1571     1448     1417
adjCV         4199     2116     1818     1671     1646     1538     1425     1396
       8 comps  9 comps  10 comps  11 comps  12 comps  13 comps  14 comps  15 comps
CV        1399     1385      1386      1388      1391      1390      1389      1390
adjCV     1379     1366      1367      1369      1371      1370      1370      1370
       16 comps  17 comps
CV         1390      1390
adjCV      1371      1371

TRAINING: % variance explained
      1 comps  2 comps  3 comps  4 comps  5 comps  6 comps  7 comps  8 comps
X       25.50    42.05    62.57    65.26    67.81    71.99    75.39    79.20
Apps    76.53    83.93    87.06    90.72    92.98    93.41    93.49    93.53
      9 comps  10 comps  11 comps  12 comps  13 comps  14 comps  15 comps  16 comps
X       82.55     85.58     89.18     90.83     92.57     95.11     97.12     99.13
Apps    93.58     93.61     93.62     93.63     93.63     93.63     93.63     93.63
      17 comps
X       100.00
Apps     93.63
xcol<-1:17

mse_pls_comps<-numeric(length = length(xcol))

for (i in 1:length(xcol)){ 
    mse_pls_comps[i]<-mean(
        (predict(pls.fit,College[test,], ncomp = i )-College[test,]$Apps)^2)
}

mse_pls_comps
 [1] 2529127 1968498 1595733 1269633 1140182 1075574 1061787 1049708 1056804 1051164
[11] 1057288 1045069 1049615 1048361 1048134 1048165 1048053
pls.pred=predict(pls.fit,College[test,],ncomp=8)
mean((pls.pred-y.test)^2)
[1] 1049708

G.Linear performed the best but PLS was within a hair of it.

library(MASS)
set.seed(1)


train <- sample (c(TRUE , FALSE), nrow (Boston),
replace = TRUE)
test <- (!train)

btrain <-Boston[train,]
btest <- Boston[test,]

y.test=y[test]
linear <- lm(crim~., data = btrain)

boston_pred <- predict(linear, btest)
mean((boston_pred - btest$crim)^2)
[1] 50.65678
forward <- regsubsets(crim~., data = btrain, nvmax = 13, method = "forward")
forward_summary <- summary(forward)
plot(forward, scale = "adjr2")

It seems that around 7 predictors is where the optimal model would be found.

RIDGE

train.x<-model.matrix(crim~.,data=btrain)
train.y<-btrain$crim

test.x<-model.matrix(crim~.,data=btest)
test.y<-btest$crim

ridge.fit<-cv.glmnet(train.x,train.y, alpha = 0)

ridge<- ridge.fit$lambda.min
ridge
[1] 0.5240686
ridge.pred<-predict(ridge.fit, s=ridge, newx=test.x)
mean((ridge.pred-test.y)^2)
[1] 51.46284

LASSO

lasso.fit<-cv.glmnet(train.x,train.y, alpha = 1)

lasso<- lasso.fit$lambda.min
lasso
[1] 0.01973085
lasso.pred<-predict(lasso.fit, s=lasso, newx=test.x)
mean((lasso.pred-test.y)^2)
[1] 50.7379
pcr.fit=pcr(crim~., data= btrain, scale = TRUE, validation="CV" )
summary(pcr.fit)
Data:   X dimension: 262 13 
    Y dimension: 262 1
Fit method: svdpc
Number of components considered: 13

VALIDATION: RMSEP
Cross-validated using 10 random segments.
       (Intercept)  1 comps  2 comps  3 comps  4 comps  5 comps  6 comps  7 comps
CV           8.065    6.649    6.651    6.188    6.168    6.163    6.193    6.178
adjCV        8.065    6.642    6.646    6.177    6.158    6.151    6.179    6.163
       8 comps  9 comps  10 comps  11 comps  12 comps  13 comps
CV       6.068    6.048     6.053     6.061     6.018     5.972
adjCV    6.053    6.023     6.037     6.045     6.001     5.955

TRAINING: % variance explained
      1 comps  2 comps  3 comps  4 comps  5 comps  6 comps  7 comps  8 comps
X       44.58    58.02    68.58    75.64    82.03    87.37    90.66    93.02
crim    33.90    34.29    44.32    44.91    45.57    45.62    46.13    48.13
      9 comps  10 comps  11 comps  12 comps  13 comps
X       95.01     96.87      98.4     99.54    100.00
crim    48.80     48.96      49.0     49.91     50.71

xcol<-1:13
mse_pcr_comps<-numeric(length = length(xcol))

for (i in 1:length(xcol))
  { 
    mse_pcr_comps[i]<-mean((predict(pcr.fit, btest, ncomp = i )-btest$crim)^2)
    }

mse_pcr<-as.data.frame(cbind(ncomps = c(1:13), mse = as.numeric(mse_pcr_comps)))

mse_pcr

B. it seems pcr with ncomp = 13 has the lowest mean squared error.

C. yes the mode has all the features since it resulted in the lowest mean squared error.

LS0tDQp0aXRsZTogIkFzc2lnbm1lbnQgNSINCmF1dGhvcjogIlNhaSBBbmFudGh1bGEiDQpvdXRwdXQ6IGh0bWxfbm90ZWJvb2sNCi0tLQ0KDQpgYGB7cn0NCmxpYnJhcnkoSVNMUikNCmxpYnJhcnkobGVhcHMpDQpsaWJyYXJ5KGdsbW5ldCkNCmxpYnJhcnkocGxzKQ0KYGBgDQoyYS4gMyBpcyBjb3JyZWN0IHNpbmNlIHRoZSBsYXNzbyBpcyBsZXNzIGZsZXhpYmxlIGJ1dCB3b3J0aCBpdCBpZiB0aGUgDQpzbWFsbCBpbmNyZWFzZSBpbiBiaWFzIGlzIG9mZnNldCBieSB0aGUgbGFyZ2VyIHJlZHVjdGlvbiBpbiB2YXJpYW5jZS4gSXQgYWxzbw0KY2FuIHJlbW92ZSBub24tZXNzZW50aWFsIHByZWRpY3RvcnMuDQoNCjJiLiAzIGlzIGNvcnJlY3Qgc2luY2UgcmlkZ2UtcmVkdWN0aW9uIGFuZCBsYXNzbyBhcmUgdmVyeSBzaW1pbGFyIGJ1dCBsYXNzbw0KY2FuIHJlbW92ZSBhIHByZWRpY3RvciBpbiBpdHMgZW50aXJldHkuIFJpZGdlLVJlZHVjdGlvbiB3b3VsZCBvbmx5IHJlc3VsdCBpbiBhIA0KdmVyeSBzbWFsbCBjb2VmLiANCg0KMmMuIDIgaXMgdGhlIGNvcnJlY3Qgc2luY2Ugbm9uLWxpbmVhciBtb2RlbHMgaGF2ZSBsZXNzIGJpYXMgYXMgd2VsbCBhcyBiZWluZw0KbGVzcyBiaWFzZWQgdGhhbiBhIG1vZGVsIGdlbmVyYXRlZCBmcm9tIGxlYXN0LXNxdWFyZXMgcmVncmVzc2lvbi4gDQoNCg0KDQo5LiANCg0KDQpgYGB7cn0NCmF0dGFjaChDb2xsZWdlKQ0KYGBgDQoNCmBgYHtyfQ0KeCA9IG1vZGVsLm1hdHJpeChBcHBzfi4sQ29sbGVnZSlbLC0xXQ0KeSA9IEFwcHMNCg0Kc2V0LnNlZWQoMSkNCg0KdHJhaW4gPC0gc2FtcGxlIChjKFRSVUUgLCBGQUxTRSksIG5yb3cgKEhpdHRlcnMpLA0KcmVwbGFjZSA9IFRSVUUpDQp0ZXN0IDwtICghdHJhaW4pDQoNCnkudGVzdD15W3Rlc3RdDQoNCg0KDQoNCmBgYA0KDQpgYGB7cn0NCmxpbmVhciA8LSBsbShBcHBzfi4sIGRhdGEgPSBDb2xsZWdlW3RyYWluLF0pDQpsaW5lYXIucHJlZCA8LSBwcmVkaWN0KGxpbmVhcixDb2xsZWdlW3Rlc3QsXSx0eXBlID0gInJlc3BvbnNlIikNCm1lYW4oKGxpbmVhci5wcmVkLUNvbGxlZ2VbdGVzdCxdJEFwcHMpXjIpDQpgYGANCg0KYGBge3J9DQoNCnRyYWluLng8LW1vZGVsLm1hdHJpeChBcHBzfi4sZGF0YT1Db2xsZWdlW3RyYWluLF0pDQp0cmFpbi55PC1Db2xsZWdlW3RyYWluLF0kQXBwcw0KdGVzdC54PC1tb2RlbC5tYXRyaXgoQXBwc34uLGRhdGE9Q29sbGVnZVt0ZXN0LF0pDQp0ZXN0Lnk8LUNvbGxlZ2VbdGVzdCxdJEFwcHMNCg0KcmlkZ2UuZml0PC1jdi5nbG1uZXQodHJhaW4ueCx0cmFpbi55LCBhbHBoYSA9IDApDQpsYW0ucmlkZ2U8LSByaWRnZS5maXQkbGFtYmRhLm1pbg0KDQpyaWRnZS5wcmVkPC1wcmVkaWN0KHJpZGdlLmZpdCwgcz1sYW0ucmlkZ2UsIG5ld3g9dGVzdC54KQ0KDQptZWFuKChyaWRnZS5wcmVkLXRlc3QueSleMikNCmBgYA0KYGBge3J9DQpsYXNzby5maXQgPC0gY3YuZ2xtbmV0KHRyYWluLngsdHJhaW4ueSwgYWxwaGEgPSAxKQ0KDQpsYW0ubGFzc288LSBsYXNzby5maXQkbGFtYmRhLm1pbg0KDQpsYXNzby5wcmVkPC1wcmVkaWN0KGxhc3NvLmZpdCwgcz1sYW0ucmlkZ2UsIG5ld3g9dGVzdC54KQ0KDQptZWFuKChsYXNzby5wcmVkLXRlc3QueSleMikNCmBgYA0KDQpgYGB7cn0NCnBjci5maXQgPC0gcGNyKEFwcHN+LiwgZGF0YSA9IENvbGxlZ2VbdHJhaW4sXSwgc2NhbGUgPSBUUlVFLCB2YWxpZGF0aW9uID0gIkNWIikNCnN1bW1hcnkocGNyLmZpdCkNCmBgYA0KYGBge3J9DQoNCnhjb2w8LTE6MTcNCg0KbXNlX3Bjcl9jb21wczwtbnVtZXJpYyhsZW5ndGggPSBsZW5ndGgoeGNvbCkpDQoNCmZvciAoaSBpbiAxOmxlbmd0aCh4Y29sKSkNCiAgeyANCiAgICBtc2VfcGNyX2NvbXBzW2ldPC1tZWFuKA0KICAgICAgICAocHJlZGljdChwY3IuZml0LENvbGxlZ2VbdGVzdCxdLCBuY29tcCA9IGkgKS1Db2xsZWdlW3Rlc3QsXSRBcHBzKV4yKQ0KfQ0KDQptc2VfcGNyX2NvbXBzDQpgYGANCg0KYGBge3J9DQpwY3IucHJlZD1wcmVkaWN0KHBjci5maXQsQ29sbGVnZVt0ZXN0LF0sbmNvbXA9MTYpDQptZWFuKChwY3IucHJlZC15LnRlc3QpXjIpDQpgYGANCg0KYGBge3J9DQpwbHMuZml0PXBsc3IoQXBwc34uLCBkYXRhPSBDb2xsZWdlW3RyYWluLF0sIHNjYWxlID0gVFJVRSwgdmFsaWRhdGlvbj0iQ1YiICkNCnN1bW1hcnkocGxzLmZpdCkNCmBgYA0KDQpgYGB7cn0NCnhjb2w8LTE6MTcNCg0KbXNlX3Bsc19jb21wczwtbnVtZXJpYyhsZW5ndGggPSBsZW5ndGgoeGNvbCkpDQoNCmZvciAoaSBpbiAxOmxlbmd0aCh4Y29sKSkNCiAgeyANCiAgICBtc2VfcGxzX2NvbXBzW2ldPC1tZWFuKA0KICAgICAgICAocHJlZGljdChwbHMuZml0LENvbGxlZ2VbdGVzdCxdLCBuY29tcCA9IGkgKS1Db2xsZWdlW3Rlc3QsXSRBcHBzKV4yKQ0KfQ0KDQptc2VfcGxzX2NvbXBzDQpgYGANCg0KYGBge3J9DQpwbHMucHJlZD1wcmVkaWN0KHBscy5maXQsQ29sbGVnZVt0ZXN0LF0sbmNvbXA9OCkNCm1lYW4oKHBscy5wcmVkLXkudGVzdCleMikNCmBgYA0KRy5MaW5lYXIgcGVyZm9ybWVkIHRoZSBiZXN0IGJ1dCBQTFMgd2FzIHdpdGhpbiBhIGhhaXIgb2YgaXQuDQoNCg0KMTEuDQoNCg0KYGBge3J9DQpsaWJyYXJ5KE1BU1MpDQpgYGANCg0KYGBge3J9DQpzZXQuc2VlZCgxKQ0KDQoNCnRyYWluIDwtIHNhbXBsZSAoYyhUUlVFICwgRkFMU0UpLCBucm93IChCb3N0b24pLA0KcmVwbGFjZSA9IFRSVUUpDQp0ZXN0IDwtICghdHJhaW4pDQoNCmJ0cmFpbiA8LUJvc3Rvblt0cmFpbixdDQpidGVzdCA8LSBCb3N0b25bdGVzdCxdDQoNCnkudGVzdD15W3Rlc3RdDQpgYGANCg0KYGBge3J9DQpsaW5lYXIgPC0gbG0oY3JpbX4uLCBkYXRhID0gYnRyYWluKQ0KDQpib3N0b25fcHJlZCA8LSBwcmVkaWN0KGxpbmVhciwgYnRlc3QpDQptZWFuKChib3N0b25fcHJlZCAtIGJ0ZXN0JGNyaW0pXjIpDQpgYGANCg0KYGBge3J9DQpmb3J3YXJkIDwtIHJlZ3N1YnNldHMoY3JpbX4uLCBkYXRhID0gYnRyYWluLCBudm1heCA9IDEzLCBtZXRob2QgPSAiZm9yd2FyZCIpDQpmb3J3YXJkX3N1bW1hcnkgPC0gc3VtbWFyeShmb3J3YXJkKQ0KYGBgDQoNCmBgYHtyfQ0KcGxvdChmb3J3YXJkLCBzY2FsZSA9ICJhZGpyMiIpDQpgYGANCg0KDQpJdCBzZWVtcyB0aGF0IGFyb3VuZCA3IHByZWRpY3RvcnMgaXMgd2hlcmUgdGhlIG9wdGltYWwgbW9kZWwgd291bGQgYmUgZm91bmQuDQoNClJJREdFDQpgYGB7cn0NCnRyYWluLng8LW1vZGVsLm1hdHJpeChjcmltfi4sZGF0YT1idHJhaW4pDQp0cmFpbi55PC1idHJhaW4kY3JpbQ0KDQp0ZXN0Lng8LW1vZGVsLm1hdHJpeChjcmltfi4sZGF0YT1idGVzdCkNCnRlc3QueTwtYnRlc3QkY3JpbQ0KDQpyaWRnZS5maXQ8LWN2LmdsbW5ldCh0cmFpbi54LHRyYWluLnksIGFscGhhID0gMCkNCg0KcmlkZ2U8LSByaWRnZS5maXQkbGFtYmRhLm1pbg0KcmlkZ2UNCmBgYA0KDQpgYGB7cn0NCnJpZGdlLnByZWQ8LXByZWRpY3QocmlkZ2UuZml0LCBzPXJpZGdlLCBuZXd4PXRlc3QueCkNCg0KbWVhbigocmlkZ2UucHJlZC10ZXN0LnkpXjIpDQpgYGANCg0KTEFTU08NCg0KYGBge3J9DQpsYXNzby5maXQ8LWN2LmdsbW5ldCh0cmFpbi54LHRyYWluLnksIGFscGhhID0gMSkNCg0KbGFzc288LSBsYXNzby5maXQkbGFtYmRhLm1pbg0KbGFzc28NCmBgYA0KDQpgYGB7cn0NCmxhc3NvLnByZWQ8LXByZWRpY3QobGFzc28uZml0LCBzPWxhc3NvLCBuZXd4PXRlc3QueCkNCm1lYW4oKGxhc3NvLnByZWQtdGVzdC55KV4yKQ0KYGBgDQpgYGB7cn0NCnBjci5maXQ9cGNyKGNyaW1+LiwgZGF0YT0gYnRyYWluLCBzY2FsZSA9IFRSVUUsIHZhbGlkYXRpb249IkNWIiApDQpzdW1tYXJ5KHBjci5maXQpDQpgYGANCmBgYHtyfQ0KDQp4Y29sPC0xOjEzDQptc2VfcGNyX2NvbXBzPC1udW1lcmljKGxlbmd0aCA9IGxlbmd0aCh4Y29sKSkNCg0KZm9yIChpIGluIDE6bGVuZ3RoKHhjb2wpKQ0KICB7IA0KICAgIG1zZV9wY3JfY29tcHNbaV08LW1lYW4oKHByZWRpY3QocGNyLmZpdCwgYnRlc3QsIG5jb21wID0gaSApLWJ0ZXN0JGNyaW0pXjIpDQogICAgfQ0KDQptc2VfcGNyPC1hcy5kYXRhLmZyYW1lKGNiaW5kKG5jb21wcyA9IGMoMToxMyksIG1zZSA9IGFzLm51bWVyaWMobXNlX3Bjcl9jb21wcykpKQ0KDQptc2VfcGNyDQpgYGANCg0KDQoNCkIuIGl0IHNlZW1zIHBjciB3aXRoIG5jb21wID0gMTMgaGFzIHRoZSBsb3dlc3QgbWVhbiBzcXVhcmVkIGVycm9yLg0KDQpDLiB5ZXMgdGhlIG1vZGUgaGFzIGFsbCB0aGUgZmVhdHVyZXMgc2luY2UgaXQgcmVzdWx0ZWQgaW4gdGhlIGxvd2VzdA0KbWVhbiBzcXVhcmVkIGVycm9yLg0KDQoNCg0KDQoNCg0KDQoNCg0KDQoNCg0KDQoNCg0KDQoNCg0KDQoNCg0KDQoNCg0KDQoNCg0KDQoNCg0KDQoNCg0KDQoNCg0KDQoNCg0KDQoNCg0KDQoNCg0KDQoNCg0KDQoNCg0KDQoNCg==