Assignment 7:

In Kuhn and Johnson do problems 6.2 and 6.3. There are only two but they consist of many parts.
Please submit a link to your Rpubs and submit the .rmd file as well.

6.2

Developing a model to predict permeability (see Sect. 1.4) could save significant resources for a pharmaceutical company, while at the same time more rapidly identifying molecules that have a sufficient permeability to become a drug:

a.

Start R and use these commands to load the data:

library(AppliedPredictiveModeling)
## Warning: package 'AppliedPredictiveModeling' was built under R version
## 3.6.3
data(permeability)

The matrix fingerprints contains the 1,107 binary molecular predictors for the 165 compounds, while permeability contains permeability response.

b.

The fingerprint predictors indicate the presence or absence of substructures of a molecule and are often sparse meaning that relatively few of the molecules contain each substructure. Filter out the predictors that have low frequencies using the nearZeroVar function from the caret package. How many predictors are left for modeling?

fingerprints_adjusted <- fingerprints[,-nearZeroVar(fingerprints)]
print(paste("Non-Sparse predictors: ", ncol(fingerprints[,-nearZeroVar(fingerprints)])))
## [1] "Non-Sparse predictors:  388"

c.

Split the data into a training and a test set, pre-process the data, and tune a PLS model. How many latent variables are optimal and what is the corresponding re-sampled estimate of R2?

set.seed(42)

df <- as.data.frame(fingerprints[, nearZeroVar(fingerprints)]) %>% mutate(permeability = permeability)

split_data <- createDataPartition(df$permeability, p=.75, list = FALSE)
x_train <- fingerprints_adjusted[split_data,]
x_test <- fingerprints_adjusted[-split_data,]
y_train <- permeability[split_data,]
y_test <- permeability[-split_data,]

train_df <- df[split_data,]
test_df <- df[-split_data,]

plsfit <- train(x=x_train, y=y_train,
  method = "pls",
  tuneGrid = expand.grid(ncomp = 1:10),
  trControl = trainControl(method = "cv", number = 10)
)

title <- paste("Training set RMSE minimized at",
               plsfit$bestTune$ncomp, 
               "Components")

plsfit_graph <- data.frame(plsfit$results$ncomp,plsfit$results$RMSE)

ggplot(plsfit_graph, aes(x=plsfit.results.ncomp, y=plsfit.results.RMSE))+
  geom_line()+
  geom_point()+
  xlab("Number of Variables")+
  ggtitle(title)

plsfit$results
##    ncomp     RMSE  Rsquared       MAE   RMSESD RsquaredSD    MAESD
## 1      1 13.54107 0.2770095 10.105690 1.684024  0.2406596 1.240018
## 2      2 12.44804 0.3748665  8.686197 1.639971  0.2577112 1.460193
## 3      3 12.26612 0.3990032  9.105250 1.568333  0.2063813 1.585459
## 4      4 12.29700 0.4228856  9.390639 1.929871  0.1970819 1.357606
## 5      5 12.18617 0.4346136  9.131199 1.901818  0.2090920 1.439857
## 6      6 11.84291 0.4586880  8.839472 1.833305  0.1916259 1.147759
## 7      7 11.68123 0.4557405  9.068093 1.440496  0.2051092 1.302198
## 8      8 11.97498 0.4339375  9.394588 1.492779  0.1885532 1.411384
## 9      9 12.02858 0.4268795  9.533655 1.379109  0.1822796 1.183181
## 10    10 12.28244 0.4186498  9.562619 1.530888  0.1866089 1.379722
plsfit$results %>%
  filter(ncomp == plsfit$bestTune$ncomp) %>%
  select(ncomp, RMSE, Rsquared)
##   ncomp     RMSE  Rsquared
## 1     7 11.68123 0.4557405
  • After splitting the data 75/25, the PLS model is tuned.
    • 10-fold cross validation
  • Optimal number of features in this case is 7 explaining ~45% of the variance

d.

Predict the response for the test set. What is the test set estimate of R2?

training_data <- data.frame(x_train, y_train)

pls_model <- train(
  permeability ~ ., data = train_df, method = "pls",
  center = TRUE,
  trControl = trainControl("cv", number = 10),
  tuneLength = 25
)

result <- pls_model$result %>%
  filter(ncomp == pls_model$bestTune$ncomp) %>%
  select(ncomp, RMSE, Rsquared)

pls_results <- data.frame(
  Model = "PLS",
  RMSE = result$RMSE,
  Rsquared = result$Rsquared
)

result
##   ncomp     RMSE  Rsquared
## 1     2 13.19697 0.2622712
  • Test model performance got the following:
    • R squared 0.557
    • RMSE 11.183

e.

Try building other models discussed in this chapter. Do any have better predictive performance?

pcr_model <- train(
  permeability ~ ., data = train_df, method = "pcr",
  center = TRUE,
  trControl = trainControl("cv", number = 10),
  tuneLength = 25
)

pcr_pred <- predict(pcr_model, test_df)

pcr_results <- data.frame(
  Model = "PCR",
  RMSE = RMSE(pcr_pred, test_df$permeability),
  Rsquared = R2(pcr_pred, test_df$permeability)
)
r <- model.matrix(permeability ~ ., data = train_df)
r_test <- model.matrix(permeability ~ ., data = test_df)

ridge_cv <- cv.glmnet(r, train_df$permeability, alpha=0)
ridge_model <- glmnet(r, train_df$permeability, alpha = 0, lambda = ridge_cv$lambda.min)

ridge_pred <- as.vector(predict(ridge_model, r_test))

ridge_results <- data.frame(
  Model = "RIDGE",
  RMSE = RMSE(ridge_pred, test_df$permeability),
  Rsquared = R2(ridge_pred, test_df$permeability)
)
fractionTune = seq(.3,.8,.1)
lambdaTune = seq(.05, .3, .1)

elastic_model <- train(x = x_train, y = y_train, 
                  method = "enet", 
                  trControl = trainControl(method = "repeatedcv", repeats = 5), 
                  preProcess = c("center", "scale"), 
                  tuneGrid = expand.grid(lambda = lambdaTune,
                                         fraction = fractionTune)
                )

elastic_pred <- elastic_model%>%
  predict(x_test)

elastic_results <- data.frame(
  Model = "ElasticNet",
  RMSE = RMSE(elastic_pred, test_df$permeability),
  Rsquared = R2(elastic_pred, test_df$permeability)
 )
results <- rbind(pcr_results, ridge_results)
results <- rbind(results, elastic_results)
results <- rbind(results, pls_results)

kable(results)
Model RMSE Rsquared
permeability PCR 15.65137 0.1442742
permeability1 RIDGE 15.19384 0.2120270
permeability2 ElasticNet 11.40204 0.5407578
1 PLS 13.19697 0.2622712

f.

Would you recommend any of your models to replace the permeability laboratory experiment?

  • ElasticNet has the lowest RMSE and Highest R squared.
  • It would be the better model

6.3

A chemical manufacturing process for a pharmaceutical product was discussed in Sect. 1.4. In this problem, the objective is to understand the relationship between biological measurements of the raw materials (predictors), measurements of the manufacturing process (predictors), and the response of product yield. Biological predictors cannot be changed but can be used to assess the quality of the raw material before processing. On the other hand, manufacturing process predictors can be changed in the manufacturing process. Improving product yield by 1% will boost revenue by approximately one hundred thousand dollars per batch:

a.

Start R and use these commands to load the data:

library(AppliedPredictiveModeling)
library(RANN)
## Warning: package 'RANN' was built under R version 3.6.3
data(ChemicalManufacturingProcess)

The matrix processPredictors contains the 57 predictors (12 describing the input biological material and 45 describing the process predictors) for the 176 manufacturing runs. yield contains the percent yield for each run.

b.

A small percentage of cells in the predictor set contain missing values. Use an imputation function to fill in these missing values (e.g., see Sect. 3.8).

knn_model <- preProcess(ChemicalManufacturingProcess, "knnImpute")
CMP_predict <- predict(knn_model, ChemicalManufacturingProcess)

c.

Split the data into a training and a test set, pre-process the data, and tune a model of your choice from this chapter. What is the optimal value of the performance metric?

df <- CMP_predict %>%
  select_at(vars(-one_of(nearZeroVar(.,names = TRUE))))

split_data <- createDataPartition(df$Yield, p=.75, list = FALSE)

train_df <- df[split_data,]
test_df <- df[-split_data,]

plsfit <- train(Yield ~.,
  data = train_df,
  method = "pls",
  tuneGrid = expand.grid(ncomp = 1:10),
  trControl = trainControl(method = "cv", number = 10),
  tuneLength = 25
)

title <- paste("Training set RMSE minimized at",
               plsfit$bestTune$ncomp, 
               "Components")

plsfit_graph <- data.frame(plsfit$results$ncomp,plsfit$results$RMSE)

ggplot(plsfit_graph, aes(x=plsfit.results.ncomp, y=plsfit.results.RMSE))+
  geom_line()+
  geom_point()+
  xlab("Number of Variables")+
  ggtitle(title)

plsfit$results
##    ncomp      RMSE  Rsquared       MAE    RMSESD RsquaredSD      MAESD
## 1      1 0.7472099 0.4262670 0.6236362 0.1367575  0.2596593 0.13257445
## 2      2 0.6626974 0.5306621 0.5448321 0.1042993  0.2458386 0.11149077
## 3      3 0.6293885 0.5698569 0.5206214 0.0991379  0.2262120 0.10617586
## 4      4 0.6144691 0.5929567 0.5030554 0.1121245  0.2343502 0.11119358
## 5      5 0.6139138 0.6046150 0.5034673 0.1143169  0.2380311 0.09547191
## 6      6 0.6235815 0.6032130 0.5166274 0.1162388  0.2291827 0.09428940
## 7      7 0.6451769 0.5849935 0.5389323 0.1280646  0.2352423 0.10850624
## 8      8 0.6682045 0.5717118 0.5642601 0.1272223  0.2208726 0.11311625
## 9      9 0.6946711 0.5589944 0.5789888 0.1451478  0.2245601 0.13269763
## 10    10 0.7123591 0.5463201 0.5834654 0.1440022  0.2113789 0.13066141
plsfit$results %>%
  filter(ncomp == plsfit$bestTune$ncomp) %>%
  select(ncomp, RMSE, Rsquared)
##   ncomp      RMSE Rsquared
## 1     5 0.6139138 0.604615
  • After splitting the data 75/25, the PLS model is tuned.
    • 10-fold cross validation
  • Optimal number of features in this case is 4 explaining ~60% of the variance

d.

Predict the response for the test set. What is the value of the performance metric and how does this compare with the re-sampled performance metric on the training set?

pls_pred <- predict(plsfit, test_df)

pls_results <- data.frame(
  Model = "PLS",
  RMSE = RMSE(pls_pred, test_df$Yield),
  Rsquared = R2(pls_pred, test_df$Yield)
 )
pls_results
##   Model      RMSE Rsquared
## 1   PLS 0.7143706 0.519878
  • PLS model has higher RMSE but lower R Squared.
  • Personally, I say not much change.

e.

Which predictors are most important in the model you have trained? Do either the biological or process predictors dominate the list?

pls_important <- varImp(plsfit)$importance %>%
  as.data.frame()%>%
  rownames_to_column("Variable")%>%
  filter(Overall >= 50) %>%
  arrange(desc(Overall)) %>%
  mutate(importance = row_number())

pls_important%>%
  mutate(Variable = fct_reorder(Variable, Overall))%>%
  ggplot(aes(x=Overall, y=Variable))+
  geom_bar(stat = "identity")

  • Manufacturing dominates the top predictors

f.

Explore the relationships between each of the top predictors and the response. How could this information be helpful in improving yield in future runs of the manufacturing process?

pls_top <- varImp(plsfit, scale = FALSE)
toporder <- order(abs(pls_top$importance), decreasing = TRUE)
top <- rownames(pls_top$importance)[toporder[c(1:3)]] 

coef_order <- tibble(
  plsfit_coef_name = rownames(pls_top$importance),
  plsfit_coef = coef(plsfit$finalModel)
)

top15 <- coef_order %>%
  arrange(desc(plsfit_coef))%>%
  slice_head(n=15)

top15
## # A tibble: 15 x 2
##    plsfit_coef_name       plsfit_coef
##    <chr>                  <dbl[,1,1]>
##  1 ManufacturingProcess32      0.278 
##  2 ManufacturingProcess34      0.163 
##  3 ManufacturingProcess04      0.157 
##  4 ManufacturingProcess09      0.150 
##  5 ManufacturingProcess06      0.127 
##  6 ManufacturingProcess33      0.106 
##  7 BiologicalMaterial02        0.0758
##  8 ManufacturingProcess11      0.0721
##  9 BiologicalMaterial05        0.0670
## 10 ManufacturingProcess45      0.0646
## 11 ManufacturingProcess39      0.0581
## 12 BiologicalMaterial04        0.0557
## 13 BiologicalMaterial03        0.0431
## 14 ManufacturingProcess42      0.0430
## 15 ManufacturingProcess01      0.0412
library(gridExtra)
## 
## Attaching package: 'gridExtra'
## The following object is masked from 'package:dplyr':
## 
##     combine
p1 <- qplot(data = df, Yield, ManufacturingProcess32)
p2 <- qplot(data = df, Yield, ManufacturingProcess34)
p3 <- qplot(data = df, Yield, ManufacturingProcess09)
p4 <- qplot(data = df, Yield, ManufacturingProcess06)
p5 <- qplot(data = df, Yield, ManufacturingProcess04)
p6 <- qplot(data = df, Yield, BiologicalMaterial03)
p7 <- qplot(data = df, Yield, ManufacturingProcess33)
p8 <- qplot(data = df, Yield, ManufacturingProcess19)
p9 <- qplot(data = df, Yield, ManufacturingProcess39)
p10 <- qplot(data = df, Yield, ManufacturingProcess45)
p11 <- qplot(data = df, Yield, ManufacturingProcess11)
p12 <- qplot(data = df, Yield, ManufacturingProcess15)
p13 <- qplot(data = df, Yield, BiologicalMaterial06)
p14 <- qplot(data = df, Yield, ManufacturingProcess10)
p15 <- qplot(data = df, Yield, ManufacturingProcess44)

grid.arrange(p1, p2, p3, p4, nrow = 2, ncol=2)

grid.arrange(p5, p6, p7, p8, nrow = 2, ncol=2)

grid.arrange(p9, p10, p11, p12, nrow = 2, ncol=2)

grid.arrange(p13, p14, p15, nrow = 2, ncol=2)

  • I hoped to see a noticeable change as the Coeff dropped.
  • There is not enough trend to present as findings
df_correlation <- df[,top15$plsfit_coef_name]
df_correlation <- cbind(df_correlation, df$Yield)
names(df_correlation)[16] <- "Yield"

df_corr <- cor(df_correlation)
corrplot::corrplot(df_corr,method = "circle", order = "alphabet", type = "lower", tl.srt = 45)

  • At quick glance, I would recommend to stay away from Manufacturing Process 4.
    • It is in the top5 coeff values but has the highest negative correlation to yield
  • Would Recommend Process 32
    • Top of the coeff values and has the highest positive correlation to yield