Project 1

6.2

6.2. Developing a model to predict permeability (see Sect. 1.4) could save significant resources for a pharmaceutical company, while at the same time more rapidly identifying molecules that have a sufficient permeability to become a drug:

A

  1. Start R and use these commands to load the data:

The matrix fingerprints contains the 1,107 binary molecular predictors for the 165 compounds, while permeability contains permeability response.

library("AppliedPredictiveModeling")
data(permeability)

B

  1. The fingerprint predictors indicate the presence or absence of substructures of a molecule and are often sparse meaning that relatively few of the molecules contain each substructure. Filter out the predictors that have low frequencies using the nearZeroVar function from the caret package. How many predictors are left for modeling?
predictors <- fingerprints %>%  caret::nearZeroVar() %>% length()

paste0("There are ", predictors, " predictors left for modeling")
## [1] "There are 719 predictors left for modeling"

C

  1. Split the data into a training and a test set, pre-process the data, and tune a PLS model. How many latent variables are optimal and what is the corresponding re-sampled estimate of R2?
df <- as.data.frame(fingerprints[, nearZeroVar(fingerprints)]) %>%
  mutate(y = permeability)

set.seed(42)


split_train <- caret::createDataPartition(df$y, times = 1, p = 0.8, list = FALSE)
train_fingers <- df[split_train, ]
test_fingers <- df[-split_train, ]

pls_model <- caret::train(
  y ~ ., data = train_fingers, method = "pls",
  # The preProcess class can be used for many operations on predictors, including centering
  preProc = "center",
  #Specify that we want Cross Validation
  trControl = trainControl("cv", number = 10),
  #An integer denoting the amount of granularity in the tuning parameter grid. By default, this argument is the number of levels for each tuning parameters that should be generated by train
  tuneLength = 20
)

# Plot model RMSE vs different values of components
title <- paste("Training Set RMSE Minimized at", pls_model$bestTune$ncomp, "Components")
plot(pls_model, main = title)

pls_model$results %>%
  filter(ncomp == pls_model$bestTune$ncomp) %>%
  select(ncomp, RMSE, Rsquared)
##   ncomp     RMSE Rsquared
## 1     5 14.29229 0.263486
paste0("We capture ", round(pls_model$results %>% filter(ncomp == pls_model$bestTune$ncomp) %>% select(Rsquared)*100,2),"% of the variation")
## [1] "We capture 26.35% of the variation"

D

  1. Predict the response for the test set. What is the test set estimate of R2?
pls_predictions <- stats::predict(pls_model, test_fingers)


results <- data.frame(Model = "PLS",
                      RMSE = caret::RMSE(pls_predictions, test_fingers$y),
                      Rsquared = caret::R2(pls_predictions, test_fingers$y))
results
##              Model     RMSE  Rsquared
## permeability   PLS 13.23639 0.3016825
paste0("the r2 value is ", round(results$Rsquared,3))
## [1] "the r2 value is 0.302"

E

  1. Try building other models discussed in this chapter. Do any have better predictive performance?

PCR

pcr_model <- train(
  y ~ ., data = train_fingers, method = "pcr",
  preProc = "center",
  trControl = trainControl("cv", number = 10),
  tuneLength = 20
)

title <- paste("Training Set RMSE Minimized at", pcr_model$bestTune, "Components")
plot(pcr_model, main = title)

pcr_predict <- predict(pcr_model, test_fingers)
# Model performance metrics
pcr_results <- data.frame(Model = "PCR",
                          RMSE = caret::RMSE(pcr_predict, test_fingers$y),
                          Rsquared = caret::R2(pcr_predict, test_fingers$y))
pcr_results
##              Model     RMSE   Rsquared
## permeability   PCR 15.40959 0.07299689
paste0("the r2 value is ", round(pcr_results$Rsquared,3))
## [1] "the r2 value is 0.073"

Ridge Regression

#since we already have y in the data, we can use model.matrix
x <- model.matrix(y ~ ., data = train_fingers)

x_test <- model.matrix(y ~ ., data = test_fingers)
# Choose optimal lambda
rr_cv <- glmnet::cv.glmnet(x, train_fingers$y, alpha = 0)
# Create model
rr_model <- glmnet::glmnet(x, train_fingers$y, alpha = 0, lambda = rr_cv$lambda.min)
# Run predictions
rr_predictions <- as.vector(predict(rr_model, x_test))
# Look at results
rr_results <- data.frame(Model = "Ridge Regression",
                         RMSE = caret::RMSE(rr_predictions, test_fingers$y),
                         Rsquared = caret::R2(rr_predictions, test_fingers$y))
rr_results
##                         Model     RMSE  Rsquared
## permeability Ridge Regression 14.59042 0.1426659

Lasso Regression

lr_cv <- glmnet::cv.glmnet(x, train_fingers$y, alpha = 1)

lr_model <- glmnet::glmnet(x, train_fingers$y, alpha = 1, lambda = lr_cv$lambda.min)

lr_predictions <- as.vector(predict(lr_model, x_test))

lr_results <- data.frame(Model = "Lasso Regression",
                         RMSE = caret::RMSE(lr_predictions, test_fingers$y),
                         Rsquared = caret::R2(lr_predictions, test_fingers$y))
lr_results
##                         Model     RMSE  Rsquared
## permeability Lasso Regression 13.98552 0.2417885

Elastic Net Regression

en_model <- train(y ~ ., data = train_fingers, method = "glmnet",
  trControl = trainControl("cv", number = 10),
  tuneLength = 20
)
## Warning in nominalTrainWorkflow(x = x, y = y, wts = weights, info = trainInfo,
## : There were missing values in resampled performance measures.
# Best tuning parameters
en_model$bestTune
##         alpha   lambda
## 330 0.8578947 1.472981
# Use the  matrix from Ridge Regression
en_predictions <- en_model %>% predict(x_test)

en_results <- data.frame(Model = "Elastic Net Regression",
                         RMSE = caret::RMSE(en_predictions, test_fingers$y),
                         Rsquared = caret::R2(en_predictions, test_fingers$y))
en_results
##                               Model     RMSE  Rsquared
## permeability Elastic Net Regression 14.11518 0.1991924
pls_model$results %>%
  filter(ncomp == pls_model$bestTune$ncomp) %>%
  mutate("Model" = "PLS") %>%
  select(Model, RMSE, Rsquared) %>%
    bind_rows(pcr_results) %>%
    bind_rows(rr_results) %>%
    bind_rows(lr_results) %>%
    bind_rows(en_results) %>%
    select(Model, RMSE, Rsquared)
##                                   Model     RMSE   Rsquared
## ...1                                PLS 14.29229 0.26348596
## permeability...2                    PCR 15.40959 0.07299689
## permeability...3       Ridge Regression 14.59042 0.14266587
## permeability...4       Lasso Regression 13.98552 0.24178847
## permeability...5 Elastic Net Regression 14.11518 0.19919242

We can still see that PLS had the highest R2 so the most explainable data components but ENS had the best RMSE. But overall, the low \(R^2\) mean these models are junk. You could do better flipping a coin.

F

  1. Would you recommend any of your models to replace the permeability laboratory experiment?

Yes, they all could work as we can see with the RMSEs.

6.3

A chemical manufacturing process for a pharmaceutical product was discussed in Sect. 1.4. In this problem, the objective is to understand the relationship between biological measurements of the raw materials (predictors), measurements of the manufacturing process (predictors), and the response of product yield. Biological predictors cannot be changed but can be used to assess the quality of the raw material before processing. On the other hand, manufacturing process predictors can be changed in the manufacturing process. Improving product yield by 1% will boost revenue by approximately one hundred thousand dollars per batch:

A

  1. Start R and use these commands to load the data:
library("AppliedPredictiveModeling")
data("ChemicalManufacturingProcess")

The matrix processPredictors contains the 57 predictors (12 describing the input biological material and 45 describing the process predictors) for the 176 manufacturing runs. yield contains the percent yield for each run.

B

  1. A small percentage of cells in the predictor set contain missing values. Use an imputation function to fill in these missing values (e.g., see Sect. 3.8).
library("RANN")
impute <- preProcess(ChemicalManufacturingProcess, "knnImpute")

chem_data <- predict(impute, ChemicalManufacturingProcess)

C

  1. Split the data into a training and a test set, pre-process the data, and tune a model of your choice from this chapter. What is the optimal value of the performance metric?
set.seed(27)
# We want to filter out all the ones near 0 again
chem_data_f <- chem_data %>% select(!nearZeroVar(.))

# Split into train and test
train_split <- createDataPartition(chem_data_f$Yield , p=.8, list=F)

train_chem <-  chem_data_f[train_split,] 
test_chem <- chem_data_f[-train_split,]

pls_model_chem <- train(Yield ~ ., data = train_chem, method = "pls",
  trControl = trainControl("cv", number = 10),
  tuneLength = 20
)
title <- paste("Training Set RMSE Minimized at", pls_model_chem$bestTune$ncomp, "Components")

pls_model_chem$results %>% filter(ncomp == pls_model_chem$bestTune$ncomp)
##   ncomp      RMSE  Rsquared       MAE    RMSESD RsquaredSD      MAESD
## 1     5 0.6210586 0.5998084 0.5024646 0.1124813  0.1619742 0.09422711
plot(pls_model_chem, main = title)

We can see that 5 is the optimal number giving us an R2 or .59 and a RMSE of .62

D

  1. Predict the response for the test set.What is the value of the performance metric and how does this compare with the re-sampled performance metric on the training set?
postResample(pred = predict(pls_model_chem,test_chem), obs = test_chem$Yield)
##      RMSE  Rsquared       MAE 
## 0.7354498 0.4978613 0.6285760

The low R2 of .49 tells us that our model would be better as a coin flip.

E

  1. Which predictors are most important in the model you have trained? Do either the biological or process predictors dominate the list?
plot(varImp(pls_model_chem), top = 10)
## 
## Attaching package: 'pls'
## The following object is masked from 'package:caret':
## 
##     R2
## The following object is masked from 'package:corrplot':
## 
##     corrplot
## The following object is masked from 'package:stats':
## 
##     loadings

  1. Explore the relationships between each of the top predictors and the response. How could this information be helpful in improving yield in future runs of the manufacturing process?
chem_cor <- chem_data %>% select(Yield, ends_with(c("32","13","17","09","36","33","11","08"))) 
corr <- round(cor(chem_cor), 1)

ggcorrplot::ggcorrplot(corr, hc.order = TRUE, lab = TRUE, show.legend = TRUE,  legend.title = "Corr Plot for top 10 Manufacturing")

This corr plot can show us both the manufacturing processes that are associated with low yield when combined together and vice versa for high yield manufacturing.