6.2 Developing a model to predict permeability (see Sect. 1.4) could save significant resources for a pharmaceutical company, while at the same time more rapidly identifying molecules that have a sufficient permeability to become a drug: (a) Start R and use these commands to load the data: library(AppliedPredictiveModeling) data(permeability) The matrix fingerprints contains the 1,107 binary molecular predictors for the 165 compounds, while permeability contains permeability response. (b) The fingerprint predictors indicate the presence or absence of substructures of a molecule and are often sparse meaning that relatively few of the molecules contain each substructure. Filter out the predictors that have low frequencies using the nearZeroVar function from the caret package. How many predictors are left for modeling? The predictors left were 388 variables (c) Split the data into a training and a test set, pre-process the data, and tune a PLS model. How many latent variables are optimal and what is the corresponding resampled estimate of R2? (d) Predict the response for the test set. What is the test set estimate of R2? Prediction=0.5677073, estimate optimal= 0.5062896 (e) Try building other models discussed in this chapter. Do any have better predictive performance? PLS seemed to have a better predictive performance (f) Would you recommend any of your models to replace the permeability laboratory experiment?
library(AppliedPredictiveModeling)
library(caret)
## Warning: package 'caret' was built under R version 4.4.1
## Loading required package: ggplot2
## Loading required package: lattice
library(glmnet)
## Loading required package: Matrix
## Loaded glmnet 4.1-8
library(MASS)
library(dplyr)
##
## Attaching package: 'dplyr'
## The following object is masked from 'package:MASS':
##
## select
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
library(tidyr)
##
## Attaching package: 'tidyr'
## The following objects are masked from 'package:Matrix':
##
## expand, pack, unpack
library(tidyverse)
## Warning: package 'lubridate' was built under R version 4.4.1
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ forcats 1.0.0 ✔ readr 2.1.5
## ✔ lubridate 1.9.4 ✔ stringr 1.5.1
## ✔ purrr 1.0.2 ✔ tibble 3.2.1
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ tidyr::expand() masks Matrix::expand()
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag() masks stats::lag()
## ✖ purrr::lift() masks caret::lift()
## ✖ tidyr::pack() masks Matrix::pack()
## ✖ dplyr::select() masks MASS::select()
## ✖ tidyr::unpack() masks Matrix::unpack()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(corrplot)
## Warning: package 'corrplot' was built under R version 4.4.1
## corrplot 0.95 loaded
data(permeability)
str(permeability)
## num [1:165, 1] 12.52 1.12 19.41 1.73 1.68 ...
## - attr(*, "dimnames")=List of 2
## ..$ : chr [1:165] "1" "2" "3" "4" ...
## ..$ : chr "permeability"
#B
dim(fingerprints)
## [1] 165 1107
fp_filtered<-fingerprints[, -nearZeroVar(fingerprints)]
dim(fp_filtered)
## [1] 165 388
#C
set.seed(180)
trainIndex<-createDataPartition(permeability, p=0.8, list = FALSE)
train_data<- fp_filtered[trainIndex,]
train_response<-permeability[trainIndex]
test_data<-fp_filtered[-trainIndex,]
test_reponse<-permeability[-trainIndex]
prePr <- preProcess(train_data, method = c("center", "scale"))
trainDataPreprocessed <- predict(prePr, train_data)
testDataPreprocessed <- predict(prePr, test_data)
plsr_model<-train(trainDataPreprocessed, train_response, method= "pls",
tuneLength=10, trControl=trainControl(method = "cv", number=10), preProc=c("center","scale"))
plot(plsr_model)
plsr_model$bestTune
## ncomp
## 8 8
plsr_model$results
## ncomp RMSE Rsquared MAE RMSESD RsquaredSD MAESD
## 1 1 13.12548 0.2772122 9.861808 2.048212 0.1627481 1.4831977
## 2 2 12.29770 0.3857903 8.743888 1.451267 0.1666073 1.2161872
## 3 3 12.19910 0.3897928 8.905749 1.241121 0.1947474 0.8544517
## 4 4 12.26380 0.3889972 9.130671 1.778023 0.2149767 1.3209687
## 5 5 11.72386 0.4272479 8.687815 2.051509 0.2331108 1.4141318
## 6 6 11.66674 0.4274702 8.604156 1.985863 0.2219827 1.2953716
## 7 7 11.24967 0.4712011 8.413484 2.181251 0.2111359 1.6134834
## 8 8 10.85772 0.5062896 8.196506 2.444207 0.2197076 1.8535029
## 9 9 10.88403 0.5114626 8.254461 2.394169 0.2110533 1.8104032
## 10 10 11.04298 0.5118142 8.321354 2.561473 0.2167356 1.8179403
#D
#Test set prediction
predictions<-predict(plsr_model, testDataPreprocessed)
#R^2 for test set
pls_r2<-cor(predictions, test_reponse)^2
head(predictions)
## [1] -6.1883669 39.7666289 -5.8253140 -4.5900196 0.8390047 -1.1905517
pls_r2
## [1] 0.5677073
plsr_model$results|>
inner_join((plsr_model$bestTune))
## Joining with `by = join_by(ncomp)`
## ncomp RMSE Rsquared MAE RMSESD RsquaredSD MAESD
## 1 8 10.85772 0.5062896 8.196506 2.444207 0.2197076 1.853503
#E Try other models
# Train an PCR model
pcr_model <- train(trainDataPreprocessed, train_response, method= "pcr",
tuneLength=10, trControl=trainControl(method = "cv", number=10), preProc=c("center","scale"))
# Evaluate performance on the test set
predictions3<-predict(pcr_model, testDataPreprocessed)
head(predictions3)
## [1] 1.7322320 22.9300878 0.3699488 12.7237369 8.7233511 4.5926569
#R^2 for test set
pcr_r2<-cor(predictions3, test_reponse)^2
pcr_model$results|>
inner_join((pcr_model$bestTune))
## Joining with `by = join_by(ncomp)`
## ncomp RMSE Rsquared MAE RMSESD RsquaredSD MAESD
## 1 8 12.5299 0.4096674 8.932555 2.67576 0.1907393 1.561996
#enet
enetGrid <- expand.grid(.lambda = c(0, 0.01, .1), .fraction = seq(.05, 1, length = 20))
enetTune <- train(trainDataPreprocessed, train_response, method= "enet",
tuneGrid=enetGrid, tuneLength=10, trControl=trainControl(method = "cv", number=10), preProc=c("center","scale"))
## Warning: model fit failed for Fold03: lambda=0.00, fraction=1 Error in if (zmin < gamhat) { : missing value where TRUE/FALSE needed
## Warning: model fit failed for Fold08: lambda=0.00, fraction=1 Error in if (zmin < gamhat) { : missing value where TRUE/FALSE needed
## Warning in nominalTrainWorkflow(x = x, y = y, wts = weights, info = trainInfo,
## : There were missing values in resampled performance measures.
# Evaluate performance on the test set
predictions4<-predict(enetTune, testDataPreprocessed)
head(predictions4)
## 5 7 18 29 30 34
## 2.273284 34.014080 2.273284 6.647267 2.479791 2.273284
#R^2 for test set
enet_r2<-cor(predictions4, test_reponse)^2
enetTune$results|>
inner_join((enetTune$bestTune))
## Joining with `by = join_by(lambda, fraction)`
## lambda fraction RMSE Rsquared MAE RMSESD RsquaredSD MAESD
## 1 0.1 0.1 11.80355 0.4557321 8.254648 2.639242 0.2334376 1.841055
# Ridge
ridgeGrid <- data.frame(.lambda = seq(0.001, .1, length = 15))
set.seed(100)
ridge_model <- train(trainDataPreprocessed, train_response, method= "ridge", tuneLength=10,tuneGrid = ridgeGrid, trControl=trainControl(method = "cv", number=10),preProc=c("center","scale"))
predictions2<-predict(ridge_model, testDataPreprocessed)
head(predictions2)
## 5 7 18 29 30 34
## -2.3364276 41.7846917 -5.3233414 -16.1899172 -1.2048183 0.5620621
#R^2 for test set
ridge_r2<-cor(predictions2, test_reponse)^2
ridge_model$results|>
inner_join((ridge_model$bestTune))
## Joining with `by = join_by(lambda)`
## lambda RMSE Rsquared MAE RMSESD RsquaredSD MAESD
## 1 0.1 12.24951 0.4780697 9.065775 2.548992 0.1821331 1.739534
6.3 A chemical manufacturing process for a pharmaceutical product was discussed in Sect. 1.4. In this problem, the objective is to understand the relationship between biological measurements of the raw materials (predictors), measurements of the manufacturing process (predictors), and the response of product yield. Biological predictors cannot be changed but can be used to assess the quality of the raw material before processing. On the other hand, manufacturing process predictors can be changed in the manufacturing process. Improving product yield by 1% will boost revenue by approximately one hundred thousand dollars per batch: (a) Start R and use these commands to load the data: library(AppliedPredictiveModeling) data(chemicalManufacturingProcess) The matrix processPredictors contains the 57 predictors (12 describing the input biological material and 45 describing the process predictors) for the 176 manufacturing runs. yield contains the percent yield for each run. (b) A small percentage of cells in the predictor set contain missing values. Use an imputation function to fill in these missing values (e.g., see Sect. 3.8). (c) Split the data into a training and a test set, pre-process the data, and tune a model of your choice from this chapter. What is the optimal value of the performance metric? (d) Predict the response for the test set. What is the value of the performance metric and how does this compare with the resampled performance metric on the training set? (e) Which predictors are most important in the model you have trained? Do either the biological or process predictors dominate the list? (f) Explore the relationships between each of the top predictors and the response. How could this information be helpful in improving yield in future runs of the manufacturing process?
library(RANN)
## Warning: package 'RANN' was built under R version 4.4.1
library(AppliedPredictiveModeling)
data(ChemicalManufacturingProcess)
You can also embed plots, for example:
#B missing values
Prepro<-preProcess(ChemicalManufacturingProcess, method= "knnImpute")
ProcessPredictorImputed<-predict(Prepro, ChemicalManufacturingProcess)
ProcessPredictorImputed<-as.data.frame(ProcessPredictorImputed)
#check
sum(is.na(ProcessPredictorImputed))
## [1] 0
#c
set.seed(123)
ProcessPredictor<-select(ProcessPredictorImputed, -Yield)
Yield_df<-ProcessPredictorImputed$Yield
ProcessPredictorf<-ProcessPredictor[, -nearZeroVar(ProcessPredictor)]
trainIndex2<-createDataPartition(Yield_df, p=0.8, list = FALSE)
train_data2<- ProcessPredictorf[trainIndex2,]
train_response2<-Yield_df[trainIndex2]
test_data2<-ProcessPredictorf[-trainIndex2,]
test_reponse2<-Yield_df[-trainIndex2]
prePr2 <- preProcess(train_data2, method = c("center", "scale"))
trainDataPreprocessed2 <- predict(prePr2, train_data2)
testDataPreprocessed2 <- predict(prePr2, test_data2)
pls2_model<-train(trainDataPreprocessed2, train_response2, method= "pls",
tuneLength=10, trControl=trainControl(method = "cv", number=10), preProc=c("center","scale"))
plot(pls2_model)
pls2_model$bestTune
## ncomp
## 3 3
pls2_model$results
## ncomp RMSE Rsquared MAE RMSESD RsquaredSD MAESD
## 1 1 0.7754057 0.4551107 0.6289419 0.2064820 0.2080301 0.1677273
## 2 2 1.0635795 0.4668878 0.6697724 0.8321828 0.2676093 0.2979132
## 3 3 0.6606301 0.6025962 0.5369693 0.1960452 0.1861347 0.1430776
## 4 4 0.8085067 0.5711244 0.5714930 0.5623388 0.2232811 0.1673670
## 5 5 1.0981659 0.5278682 0.6569408 1.2414162 0.2592831 0.3470588
## 6 6 1.1435873 0.5123137 0.6780413 1.3033184 0.2693839 0.3669206
## 7 7 1.3703141 0.4899506 0.7582540 1.8495620 0.2793116 0.5077298
## 8 8 1.5909598 0.4825770 0.8308428 2.4304549 0.2850208 0.6751223
## 9 9 1.8112121 0.4621626 0.8994732 2.9010201 0.2898199 0.8044436
## 10 10 2.1241168 0.4461615 0.9920989 3.6488540 0.3000363 1.0088230
pls2_model
## Partial Least Squares
##
## 144 samples
## 56 predictor
##
## Pre-processing: centered (56), scaled (56)
## Resampling: Cross-Validated (10 fold)
## Summary of sample sizes: 131, 130, 130, 129, 131, 129, ...
## Resampling results across tuning parameters:
##
## ncomp RMSE Rsquared MAE
## 1 0.7754057 0.4551107 0.6289419
## 2 1.0635795 0.4668878 0.6697724
## 3 0.6606301 0.6025962 0.5369693
## 4 0.8085067 0.5711244 0.5714930
## 5 1.0981659 0.5278682 0.6569408
## 6 1.1435873 0.5123137 0.6780413
## 7 1.3703141 0.4899506 0.7582540
## 8 1.5909598 0.4825770 0.8308428
## 9 1.8112121 0.4621626 0.8994732
## 10 2.1241168 0.4461615 0.9920989
##
## RMSE was used to select the optimal model using the smallest value.
## The final value used for the model was ncomp = 3.
The optimal, lowest RSME was at ncomp= 3 with a Rsquare of .603.
#D
predictionsChem<-predict(pls2_model, testDataPreprocessed2)
head(predictionsChem)
## [1] -0.30638113 -1.86907124 0.01655051 0.42538731 0.19283560 1.79226866
#R^2 for test set
pls2_r2<-cor(predictionsChem, test_reponse2)^2
pls2_r2
## [1] 0.4690064
trainR2<-pls2_model$results$Rsquared[pls2_model$results$ncomp==pls2_model$bestTune$ncomp]
trainR2
## [1] 0.6025962
The R square for the resamples cross validation is .603 and the r square for the test set .470, the values aren’t too far apart I believe the training wasn’t too far off.
#E
#Important Variable for the PLS model
varImp(pls2_model)
## Warning: package 'pls' was built under R version 4.4.1
##
## Attaching package: 'pls'
## The following object is masked from 'package:corrplot':
##
## corrplot
## The following object is masked from 'package:caret':
##
## R2
## The following object is masked from 'package:stats':
##
## loadings
## pls variable importance
##
## only 20 most important variables shown (out of 56)
##
## Overall
## ManufacturingProcess32 100.00
## ManufacturingProcess17 87.98
## ManufacturingProcess13 86.30
## ManufacturingProcess09 86.04
## ManufacturingProcess36 84.38
## ManufacturingProcess06 69.07
## ManufacturingProcess33 64.57
## BiologicalMaterial06 62.12
## BiologicalMaterial03 61.41
## BiologicalMaterial08 60.68
## BiologicalMaterial02 60.41
## ManufacturingProcess11 59.19
## BiologicalMaterial12 55.63
## BiologicalMaterial11 55.40
## BiologicalMaterial01 52.35
## BiologicalMaterial04 50.99
## ManufacturingProcess28 48.07
## ManufacturingProcess12 46.64
## ManufacturingProcess37 45.59
## BiologicalMaterial10 42.33
The process predictors seem to dominate the pls modeling, the top variables would be in manufacturingProcess group.
#f
#Variables correlation
Top_var<-as.data.frame(varImp(pls2_model)$importance)
#get top 10 variables
Top_var<-Top_var|>
arrange(desc(Overall))|>
head(10)
#get variable names
Top_names<-rownames(Top_var)
#subset from original training data
Top_var<-train_data2[,Top_names]
Top_var$Yield<-ProcessPredictorImputed$Yield[trainIndex2]
corrplot(cor(Top_var), method="number")
## Warning in plot.window(...): "method" is not a graphical parameter
## Warning in plot.xy(xy, type, ...): "method" is not a graphical parameter
## Warning in axis(side = side, at = at, labels = labels, ...): "method" is not a
## graphical parameter
## Warning in axis(side = side, at = at, labels = labels, ...): "method" is not a
## graphical parameter
## Warning in box(...): "method" is not a graphical parameter
## Warning in title(...): "method" is not a graphical parameter
Seeing the correlation between the variables and he yield could let us know how the important predictor is related to the yield, allowing us to see how to improve the manufacturing process to optimize yield. For example ManufacturingProcess36,has a negative correctional with the yield meaning the more ManufacturingProcess36 is used the less yield there would be.