library(AppliedPredictiveModeling)
library(tidyverse)
library(caret)
library(tidymodels)
library(plsmod)
library(pls)
data(permeability)
fingerprints_data <- as.data.frame(fingerprints)
my_data <- fingerprints_data %>% select(-nearZeroVar(fingerprints))
my_data <- cbind(permeability, my_data)
splits <- my_data %>% initial_split(prop = 0.8)
datatest <- splits %>% testing()
datatrain <- splits %>% training()
cbind(nrow(datatrain), ncol(datatrain))
## [,1] [,2]
## [1,] 133 389
pls_model <- train(x=datatrain[,2:389],
y=datatrain[,1],
method='pls',
tuneLength=20,
trControl=trainControl(method='cv'),
preProcess=c('center', 'scale')
)
pls_model
## Partial Least Squares
##
## 133 samples
## 388 predictors
##
## Pre-processing: centered (388), scaled (388)
## Resampling: Cross-Validated (10 fold)
## Summary of sample sizes: 118, 121, 120, 120, 119, 120, ...
## Resampling results across tuning parameters:
##
## ncomp RMSE Rsquared MAE
## 1 12.06986 0.3995930 9.418347
## 2 10.50379 0.5202863 7.805603
## 3 10.65271 0.5061727 7.990961
## 4 10.80183 0.5186580 8.166903
## 5 10.53182 0.5311303 7.826180
## 6 10.68120 0.5169627 7.768699
## 7 10.85336 0.5189327 8.218415
## 8 11.12495 0.5103056 8.303141
## 9 10.70186 0.5370399 7.836649
## 10 10.81793 0.5305239 8.071035
## 11 10.93651 0.5284338 8.153787
## 12 10.94083 0.5336146 8.189468
## 13 10.99322 0.5307262 8.322787
## 14 11.38261 0.5146162 8.664630
## 15 11.34855 0.5140142 8.619342
## 16 11.63730 0.4991656 8.850037
## 17 11.96477 0.4866832 9.045843
## 18 12.34475 0.4695736 9.384107
## 19 12.51723 0.4659004 9.631618
## 20 12.88829 0.4482917 9.972382
##
## RMSE was used to select the optimal model using the smallest value.
## The final value used for the model was ncomp = 2.
plot(pls_model)
The lowest RMSE is 6 components but the highest R^2 is 9 components. The difference in R^2 is only 0.001 so I’m going to go with 6.
pls_pred <- predict(pls_model, newdata=datatest[,2:389])
postResample(pred=pls_pred, obs=datatest[,1])
## RMSE Rsquared MAE
## 13.9198431 0.3525446 9.0589279
lasso_model <- train(x=datatrain[,2:389],
y=datatrain[,1],
method='lasso',
tuneGrid=data.frame(.fraction = seq(0, 0.5, by=0.05)),
trControl=trainControl(method='cv'),
preProcess=c('center','scale')
)
## Registered S3 method overwritten by 'elasticnet':
## method from
## print.spca mixOmics
## Warning in nominalTrainWorkflow(x = x, y = y, wts = weights, info =
## trainInfo, : There were missing values in resampled performance measures.
lasso_model
## The lasso
##
## 133 samples
## 388 predictors
##
## Pre-processing: centered (388), scaled (388)
## Resampling: Cross-Validated (10 fold)
## Summary of sample sizes: 118, 119, 119, 120, 120, 119, ...
## Resampling results across tuning parameters:
##
## fraction RMSE Rsquared MAE
## 0.00 15.00060 NaN 11.768061
## 0.05 12.50992 0.5106694 9.123686
## 0.10 12.14390 0.5145069 8.483762
## 0.15 12.07474 0.5098650 8.424482
## 0.20 12.31298 0.5090802 8.679018
## 0.25 12.72681 0.5113428 8.979664
## 0.30 13.09305 0.5211504 9.326423
## 0.35 13.71681 0.5199429 9.831085
## 0.40 14.50312 0.5187682 10.357824
## 0.45 15.33773 0.5153948 10.873020
## 0.50 16.11875 0.5091791 11.351193
##
## RMSE was used to select the optimal model using the smallest value.
## The final value used for the model was fraction = 0.15.
lasso_pred <- predict(lasso_model, newdata=datatest[,2:389])
postResample(pred=lasso_pred, obs=datatest[,1])
## RMSE Rsquared MAE
## 13.370229 0.389442 9.838752
With an R^2 of 0.38, I would choose the PLS model.
No, with an explainability of only 0.38, I would not have enough confidence to replace the laboratory experiment.
The matrix processPredictors contains the 57 predictors (12 describing the input biological material and 45 describing the process predictors) for the 176 manufacturing runs. yield contains the percent yield for each run.
library(AppliedPredictiveModeling)
data("ChemicalManufacturingProcess")
cmp_data <- as.data.frame(ChemicalManufacturingProcess)
cmp <- recipe(Yield~., data=cmp_data) %>%
step_bagimpute(all_predictors()) %>%
prep() %>%
juice()
set.seed(123)
splits <- cmp %>% initial_split(prop = 0.8)
datatest <- splits %>% testing()
datatrain <- splits %>% training()
cbind(nrow(datatrain), ncol(datatrain))
## [,1] [,2]
## [1,] 141 58
splits
## <Training/Validation/Total>
## <141/35/176>
pls_model <- plsr(Yield~., data = datatrain)
pls_model
## Partial least squares regression , fitted with the kernel algorithm.
## Call:
## plsr(formula = Yield ~ ., data = datatrain)
plot(pls_model)
pls_pred <- predict(pls_model, newdata = datatest)
predResult <- postResample(pred=pls_pred, obs=datatest$Yield)
predResult
## RMSE Rsquared MAE
## NA 0.2105003 NA
pls_imp <- varImp(pls_model)
plot(cbind(rownames(pls_imp),pls_imp))
plot(ChemicalManufacturingProcess$ManufacturingProcess36, ChemicalManufacturingProcess$Yield)
Since only one predictor had much of any variable importance, we should try other models to see if more information can be gathered before coming to any conclusions.