1.1 Sample Data
The
Solubility
dataset from the
AppliedPredictiveModeling
package was used for this illustrated example. The original numeric
response was transformed to simulate a dichotomous categorical variable.
Other original predictors were removed from the dataset leaving only a
subset of numeric predictors used during the analysis.
Preliminary dataset assessment:
[A] 951 rows (observations)
[A.1] Train Set = 951 observations
[B] 5 columns (variables)
[B.1] 1/5 response = Log_Solubility_Class variable
(factor)
[B.1.1] Levels = Log_Solubility_Class=Low < Log_Solubility_Class=High
[B.2] 4/5 predictors = All remaining variables
(0/4 factor + 4/4 numeric)
##################################
# Loading R libraries
##################################
library(AppliedPredictiveModeling)
library(caret)
library(rpart)
library(lattice)
library(dplyr)
library(tidyr)
library(moments)
library(skimr)
library(RANN)
library(pls)
library(corrplot)
library(tidyverse)
library(lares)
library(DMwR)
library(gridExtra)
library(rattle)
library(rpart.plot)
library(RColorBrewer)
library(stats)
library(nnet)
library(elasticnet)
library(earth)
library(party)
library(kernlab)
library(randomForest)
library(Cubist)
library(pROC)
library(mda)
library(klaR)
library(pamr)
library(OptimalCutpoints)
library(broom)
library(PRROC)
##################################
# Loading source and
# formulating the train set
##################################
data(solubility)
Solubility_Train <- as.data.frame(cbind(solTrainY,solTrainX))
##################################
# Applying dichotomization and
# defining the response variable
##################################
Solubility_Train$Log_Solubility_Class <- ifelse(Solubility_Train$solTrainY<mean(Solubility_Train$solTrainY),
"Low","High")
Solubility_Train$Log_Solubility_Class <- factor(Solubility_Train$Log_Solubility_Class,
levels = c("Low","High"))
Solubility_Train$solTrainY <- NULL
##################################
# Filtering in a subset of variables
# for the analysis
##################################
Solubility_Train <- Solubility_Train[,c("MolWeight",
"NumCarbon",
"NumHalogen",
"HydrophilicFactor",
"Log_Solubility_Class")]
##################################
# Performing a general exploration of the train set
##################################
dim(Solubility_Train)
## [1] 951 5
## 'data.frame': 951 obs. of 5 variables:
## $ MolWeight : num 208 366 206 136 230 ...
## $ NumCarbon : int 14 21 13 10 9 10 17 12 22 14 ...
## $ NumHalogen : int 0 0 0 0 1 2 2 0 1 0 ...
## $ HydrophilicFactor : num -0.856 -0.37 -0.33 -0.96 -0.069 -0.651 -0.729 -0.835 0.194 0.353 ...
## $ Log_Solubility_Class: Factor w/ 2 levels "Low","High": 1 1 1 1 1 1 1 1 1 1 ...
summary(Solubility_Train)
## MolWeight NumCarbon NumHalogen HydrophilicFactor
## Min. : 46.09 Min. : 1.000 Min. : 0.0000 Min. :-0.98500
## 1st Qu.:122.61 1st Qu.: 6.000 1st Qu.: 0.0000 1st Qu.:-0.76300
## Median :179.23 Median : 9.000 Median : 0.0000 Median :-0.31400
## Mean :201.65 Mean : 9.893 Mean : 0.6982 Mean :-0.02059
## 3rd Qu.:264.34 3rd Qu.:12.000 3rd Qu.: 1.0000 3rd Qu.: 0.31300
## Max. :665.81 Max. :33.000 Max. :10.0000 Max. :13.48300
## Log_Solubility_Class
## Low :427
## High:524
##
##
##
##
##################################
# Formulating a data type assessment summary
##################################
PDA <- Solubility_Train
(PDA.Summary <- data.frame(
Column.Index=c(1:length(names(PDA))),
Column.Name= names(PDA),
Column.Type=sapply(PDA, function(x) class(x)),
row.names=NULL)
)
## Column.Index Column.Name Column.Type
## 1 1 MolWeight numeric
## 2 2 NumCarbon integer
## 3 3 NumHalogen integer
## 4 4 HydrophilicFactor numeric
## 5 5 Log_Solubility_Class factor
1.2 Data Quality Assessment
Data quality assessment:
[A] No missing observations noted for any
variable.
[B] No low variance observed for any variable with
First.Second.Mode.Ratio>5.
[C] No low variance observed for any variable with
Unique.Count.Ratio<0.01.
[D] No high skewness observed for any variables with
Skewness>3 or Skewness<(-3).
##################################
# Loading dataset
##################################
DQA <- Solubility_Train
##################################
# Formulating an overall data quality assessment summary
##################################
(DQA.Summary <- data.frame(
Column.Index=c(1:length(names(DQA))),
Column.Name= names(DQA),
Column.Type=sapply(DQA, function(x) class(x)),
Row.Count=sapply(DQA, function(x) nrow(DQA)),
NA.Count=sapply(DQA,function(x)sum(is.na(x))),
Fill.Rate=sapply(DQA,function(x)format(round((sum(!is.na(x))/nrow(DQA)),3),nsmall=3)),
row.names=NULL)
)
## Column.Index Column.Name Column.Type Row.Count NA.Count Fill.Rate
## 1 1 MolWeight numeric 951 0 1.000
## 2 2 NumCarbon integer 951 0 1.000
## 3 3 NumHalogen integer 951 0 1.000
## 4 4 HydrophilicFactor numeric 951 0 1.000
## 5 5 Log_Solubility_Class factor 951 0 1.000
##################################
# Listing all predictors
##################################
DQA.Predictors <- DQA[,!names(DQA) %in% c("Log_Solubility_Class")]
##################################
# Listing all numeric predictors
##################################
DQA.Predictors.Numeric <- DQA.Predictors[,sapply(DQA.Predictors, is.numeric)]
if (length(names(DQA.Predictors.Numeric))>0) {
print(paste0("There are ",
(length(names(DQA.Predictors.Numeric))),
" numeric predictor variable(s)."))
} else {
print("There are no numeric predictor variables.")
}
## [1] "There are 4 numeric predictor variable(s)."
##################################
# Listing all factor predictors
##################################
DQA.Predictors.Factor <- DQA.Predictors[,sapply(DQA.Predictors, is.factor)]
if (length(names(DQA.Predictors.Factor))>0) {
print(paste0("There are ",
(length(names(DQA.Predictors.Factor))),
" factor predictor variable(s)."))
} else {
print("There are no factor predictor variables.")
}
## [1] "There are no factor predictor variables."
##################################
# Formulating a data quality assessment summary for factor predictors
##################################
if (length(names(DQA.Predictors.Factor))>0) {
##################################
# Formulating a function to determine the first mode
##################################
FirstModes <- function(x) {
ux <- unique(na.omit(x))
tab <- tabulate(match(x, ux))
ux[tab == max(tab)]
}
##################################
# Formulating a function to determine the second mode
##################################
SecondModes <- function(x) {
ux <- unique(na.omit(x))
tab <- tabulate(match(x, ux))
fm = ux[tab == max(tab)]
sm = x[!(x %in% fm)]
usm <- unique(sm)
tabsm <- tabulate(match(sm, usm))
ifelse(is.na(usm[tabsm == max(tabsm)])==TRUE,
return("x"),
return(usm[tabsm == max(tabsm)]))
}
(DQA.Predictors.Factor.Summary <- data.frame(
Column.Name= names(DQA.Predictors.Factor),
Column.Type=sapply(DQA.Predictors.Factor, function(x) class(x)),
Unique.Count=sapply(DQA.Predictors.Factor, function(x) length(unique(x))),
First.Mode.Value=sapply(DQA.Predictors.Factor, function(x) as.character(FirstModes(x)[1])),
Second.Mode.Value=sapply(DQA.Predictors.Factor, function(x) as.character(SecondModes(x)[1])),
First.Mode.Count=sapply(DQA.Predictors.Factor, function(x) sum(na.omit(x) == FirstModes(x)[1])),
Second.Mode.Count=sapply(DQA.Predictors.Factor, function(x) sum(na.omit(x) == SecondModes(x)[1])),
Unique.Count.Ratio=sapply(DQA.Predictors.Factor, function(x) format(round((length(unique(x))/nrow(DQA.Predictors.Factor)),3), nsmall=3)),
First.Second.Mode.Ratio=sapply(DQA.Predictors.Factor, function(x) format(round((sum(na.omit(x) == FirstModes(x)[1])/sum(na.omit(x) == SecondModes(x)[1])),3), nsmall=3)),
row.names=NULL)
)
}
##################################
# Formulating a data quality assessment summary for numeric predictors
##################################
if (length(names(DQA.Predictors.Numeric))>0) {
##################################
# Formulating a function to determine the first mode
##################################
FirstModes <- function(x) {
ux <- unique(na.omit(x))
tab <- tabulate(match(x, ux))
ux[tab == max(tab)]
}
##################################
# Formulating a function to determine the second mode
##################################
SecondModes <- function(x) {
ux <- unique(na.omit(x))
tab <- tabulate(match(x, ux))
fm = ux[tab == max(tab)]
sm = na.omit(x)[!(na.omit(x) %in% fm)]
usm <- unique(sm)
tabsm <- tabulate(match(sm, usm))
ifelse(is.na(usm[tabsm == max(tabsm)])==TRUE,
return(0.00001),
return(usm[tabsm == max(tabsm)]))
}
(DQA.Predictors.Numeric.Summary <- data.frame(
Column.Name= names(DQA.Predictors.Numeric),
Column.Type=sapply(DQA.Predictors.Numeric, function(x) class(x)),
Unique.Count=sapply(DQA.Predictors.Numeric, function(x) length(unique(x))),
Unique.Count.Ratio=sapply(DQA.Predictors.Numeric, function(x) format(round((length(unique(x))/nrow(DQA.Predictors.Numeric)),3), nsmall=3)),
First.Mode.Value=sapply(DQA.Predictors.Numeric, function(x) format(round((FirstModes(x)[1]),3),nsmall=3)),
Second.Mode.Value=sapply(DQA.Predictors.Numeric, function(x) format(round((SecondModes(x)[1]),3),nsmall=3)),
First.Mode.Count=sapply(DQA.Predictors.Numeric, function(x) sum(na.omit(x) == FirstModes(x)[1])),
Second.Mode.Count=sapply(DQA.Predictors.Numeric, function(x) sum(na.omit(x) == SecondModes(x)[1])),
First.Second.Mode.Ratio=sapply(DQA.Predictors.Numeric, function(x) format(round((sum(na.omit(x) == FirstModes(x)[1])/sum(na.omit(x) == SecondModes(x)[1])),3), nsmall=3)),
Minimum=sapply(DQA.Predictors.Numeric, function(x) format(round(min(x,na.rm = TRUE),3), nsmall=3)),
Mean=sapply(DQA.Predictors.Numeric, function(x) format(round(mean(x,na.rm = TRUE),3), nsmall=3)),
Median=sapply(DQA.Predictors.Numeric, function(x) format(round(median(x,na.rm = TRUE),3), nsmall=3)),
Maximum=sapply(DQA.Predictors.Numeric, function(x) format(round(max(x,na.rm = TRUE),3), nsmall=3)),
Skewness=sapply(DQA.Predictors.Numeric, function(x) format(round(skewness(x,na.rm = TRUE),3), nsmall=3)),
Kurtosis=sapply(DQA.Predictors.Numeric, function(x) format(round(kurtosis(x,na.rm = TRUE),3), nsmall=3)),
Percentile25th=sapply(DQA.Predictors.Numeric, function(x) format(round(quantile(x,probs=0.25,na.rm = TRUE),3), nsmall=3)),
Percentile75th=sapply(DQA.Predictors.Numeric, function(x) format(round(quantile(x,probs=0.75,na.rm = TRUE),3), nsmall=3)),
row.names=NULL)
)
}
## Column.Name Column.Type Unique.Count Unique.Count.Ratio
## 1 MolWeight numeric 646 0.679
## 2 NumCarbon integer 28 0.029
## 3 NumHalogen integer 11 0.012
## 4 HydrophilicFactor numeric 369 0.388
## First.Mode.Value Second.Mode.Value First.Mode.Count Second.Mode.Count
## 1 102.200 116.230 16 14
## 2 6.000 7.000 105 97
## 3 0.000 1.000 685 107
## 4 -0.828 -0.158 21 20
## First.Second.Mode.Ratio Minimum Mean Median Maximum Skewness Kurtosis
## 1 1.143 46.090 201.654 179.230 665.810 0.988 3.945
## 2 1.082 1.000 9.893 9.000 33.000 0.927 3.616
## 3 6.402 0.000 0.698 0.000 10.000 2.691 10.808
## 4 1.050 -0.985 -0.021 -0.314 13.483 3.404 27.504
## Percentile25th Percentile75th
## 1 122.605 264.340
## 2 6.000 12.000
## 3 0.000 1.000
## 4 -0.763 0.313
##################################
# Identifying potential data quality issues
##################################
##################################
# Checking for missing observations
##################################
if ((nrow(DQA.Summary[DQA.Summary$NA.Count>0,]))>0){
print(paste0("Missing observations noted for ",
(nrow(DQA.Summary[DQA.Summary$NA.Count>0,])),
" variable(s) with NA.Count>0 and Fill.Rate<1.0."))
DQA.Summary[DQA.Summary$NA.Count>0,]
} else {
print("No missing observations noted.")
}
## [1] "No missing observations noted."
##################################
# Checking for zero or near-zero variance predictors
##################################
if (length(names(DQA.Predictors.Factor))==0) {
print("No factor predictors noted.")
} else if (nrow(DQA.Predictors.Factor.Summary[as.numeric(as.character(DQA.Predictors.Factor.Summary$First.Second.Mode.Ratio))>5,])>0){
print(paste0("Low variance observed for ",
(nrow(DQA.Predictors.Factor.Summary[as.numeric(as.character(DQA.Predictors.Factor.Summary$First.Second.Mode.Ratio))>5,])),
" factor variable(s) with First.Second.Mode.Ratio>5."))
DQA.Predictors.Factor.Summary[as.numeric(as.character(DQA.Predictors.Factor.Summary$First.Second.Mode.Ratio))>5,]
} else {
print("No low variance factor predictors due to high first-second mode ratio noted.")
}
## [1] "No factor predictors noted."
if (length(names(DQA.Predictors.Numeric))==0) {
print("No numeric predictors noted.")
} else if (nrow(DQA.Predictors.Numeric.Summary[as.numeric(as.character(DQA.Predictors.Numeric.Summary$First.Second.Mode.Ratio))>5,])>0){
print(paste0("Low variance observed for ",
(nrow(DQA.Predictors.Numeric.Summary[as.numeric(as.character(DQA.Predictors.Numeric.Summary$First.Second.Mode.Ratio))>5,])),
" numeric variable(s) with First.Second.Mode.Ratio>5."))
DQA.Predictors.Numeric.Summary[as.numeric(as.character(DQA.Predictors.Numeric.Summary$First.Second.Mode.Ratio))>5,]
} else {
print("No low variance numeric predictors due to high first-second mode ratio noted.")
}
## [1] "Low variance observed for 1 numeric variable(s) with First.Second.Mode.Ratio>5."
## Column.Name Column.Type Unique.Count Unique.Count.Ratio First.Mode.Value
## 3 NumHalogen integer 11 0.012 0.000
## Second.Mode.Value First.Mode.Count Second.Mode.Count First.Second.Mode.Ratio
## 3 1.000 685 107 6.402
## Minimum Mean Median Maximum Skewness Kurtosis Percentile25th Percentile75th
## 3 0.000 0.698 0.000 10.000 2.691 10.808 0.000 1.000
if (length(names(DQA.Predictors.Numeric))==0) {
print("No numeric predictors noted.")
} else if (nrow(DQA.Predictors.Numeric.Summary[as.numeric(as.character(DQA.Predictors.Numeric.Summary$Unique.Count.Ratio))<0.01,])>0){
print(paste0("Low variance observed for ",
(nrow(DQA.Predictors.Numeric.Summary[as.numeric(as.character(DQA.Predictors.Numeric.Summary$Unique.Count.Ratio))<0.01,])),
" numeric variable(s) with Unique.Count.Ratio<0.01."))
DQA.Predictors.Numeric.Summary[as.numeric(as.character(DQA.Predictors.Numeric.Summary$Unique.Count.Ratio))<0.01,]
} else {
print("No low variance numeric predictors due to low unique count ratio noted.")
}
## [1] "No low variance numeric predictors due to low unique count ratio noted."
##################################
# Checking for skewed predictors
##################################
if (length(names(DQA.Predictors.Numeric))==0) {
print("No numeric predictors noted.")
} else if (nrow(DQA.Predictors.Numeric.Summary[as.numeric(as.character(DQA.Predictors.Numeric.Summary$Skewness))>3 |
as.numeric(as.character(DQA.Predictors.Numeric.Summary$Skewness))<(-3),])>0){
print(paste0("High skewness observed for ",
(nrow(DQA.Predictors.Numeric.Summary[as.numeric(as.character(DQA.Predictors.Numeric.Summary$Skewness))>3 |
as.numeric(as.character(DQA.Predictors.Numeric.Summary$Skewness))<(-3),])),
" numeric variable(s) with Skewness>3 or Skewness<(-3)."))
DQA.Predictors.Numeric.Summary[as.numeric(as.character(DQA.Predictors.Numeric.Summary$Skewness))>3 |
as.numeric(as.character(DQA.Predictors.Numeric.Summary$Skewness))<(-3),]
} else {
print("No skewed numeric predictors noted.")
}
## [1] "High skewness observed for 1 numeric variable(s) with Skewness>3 or Skewness<(-3)."
## Column.Name Column.Type Unique.Count Unique.Count.Ratio
## 4 HydrophilicFactor numeric 369 0.388
## First.Mode.Value Second.Mode.Value First.Mode.Count Second.Mode.Count
## 4 -0.828 -0.158 21 20
## First.Second.Mode.Ratio Minimum Mean Median Maximum Skewness Kurtosis
## 4 1.050 -0.985 -0.021 -0.314 13.483 3.404 27.504
## Percentile25th Percentile75th
## 4 -0.763 0.313
1.5 Predictive Model Development and Dichotomization
Thresholding
1.5.1 Logistic Regression Model Index and Probability Curve
Estimation
Logistic Regression
models the relationship between the probability of an event (among two
outcome levels) by having the log-odds of the event be a linear
combination of a set of predictors weighted by their respective
parameter estimates. The parameters are estimated via maximum likelihood
estimation by testing different values through multiple iterations to
optimize for the best fit of log odds. All of these iterations produce
the log likelihood function, and logistic regression seeks to maximize
this function to find the best parameter estimates. Given the optimal
parameters, the conditional probabilities for each observation can be
calculated, logged, and summed together to yield a predicted
probability.
[A] The logistic regression model from both the
stats
and
caret
packages was implemented. The
Log_Solubility_Class response was
regressed against the
MolWeight,
NumCarbon,
NumHalogen and
HydrophilicFactor predictors.
[B] The logistic curve was formulated by plotting the
predicted probabilities against the classification index using the logit
values.
[C] Discrimination power of the model was quantified
using the area of the receiver operating characteristic curve,
formulated for both the predicted probabilities and classification
index. However, the subsequent exercise on exploring dichotomization
thresholds was only applied on the predicted probabilities.
[D] The performance of the model is summarized as
follows:
[D.1] Final model configuration is fixed due to
the absence of a hyperparameter
[D.2] Apparent ROC Curve AUC = 0.91432
[D.3] Cross-Validated ROC Curve AUC =
0.91229
[E] The model allows for ranking of predictors in terms
of variable importance. The top-performing predictors in the model are
as follows:
[E.1] HydrophilicFactor variable (numeric)
[E.2] NumCarbon variable (numeric)
[E.3] NumHalogen variable (numeric)
##################################
# Creating a local object
# for the train set
##################################
PMA_PreModelling_Train_LR <- PMA_PreModelling_Train
##################################
# Formulating the Logistic Regression model
##################################
LR_Model <- PMA_PreModelling_Train_LR %>%
mutate(Log_Solubility_Class = as.factor(Log_Solubility_Class)) %>%
glm(formula = Log_Solubility_Class ~ MolWeight +
NumCarbon +
NumHalogen +
HydrophilicFactor,
family = binomial)
##################################
# Consolidating the model results
##################################
summary(LR_Model)
##
## Call:
## glm(formula = Log_Solubility_Class ~ MolWeight + NumCarbon +
## NumHalogen + HydrophilicFactor, family = binomial, data = .)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -2.2886 -0.4547 0.1621 0.5964 3.4332
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 5.432065 0.361092 15.043 < 2e-16 ***
## MolWeight -0.007881 0.002473 -3.187 0.00144 **
## NumCarbon -0.319809 0.044961 -7.113 1.14e-12 ***
## NumHalogen -0.767695 0.130296 -5.892 3.82e-09 ***
## HydrophilicFactor 1.092854 0.145385 7.517 5.61e-14 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 1308.45 on 950 degrees of freedom
## Residual deviance: 709.09 on 946 degrees of freedom
## AIC: 719.09
##
## Number of Fisher Scoring iterations: 6
##################################
# Computing the model predictions
##################################
LR_Model_Predictions <- augment(LR_Model,
type.predict = 'response',
newdata = PMA_PreModelling_Train_LR,
se_fit = TRUE) %>%
mutate(lower = .fitted - 1.96*.se.fit,
upper = .fitted + 1.96*.se.fit) %>%
mutate_if(is.numeric, ~ round(.,5))
LR_Model_Predictions <- as.data.frame(LR_Model_Predictions)
LR_Model_Predictions$LR_Prob <- LR_Model_Predictions$.fitted
LR_Model_Predictions$LR_SE <- LR_Model_Predictions$.se.fit
LR_Model_Predictions$LR_UCL <- LR_Model_Predictions$lower
LR_Model_Predictions$LR_LCL <- LR_Model_Predictions$upper
##################################
# Creating a classification index
# based from the model predictions
##################################
LR_Model_Predictions$LR_LP <- log(LR_Model_Predictions$LR_Prob/(1-LR_Model_Predictions$LR_Prob))
(LR_Model_Predictions <- LR_Model_Predictions[,!names(LR_Model_Predictions) %in%
c(".fitted",
".se.fit",
"lower",
"upper")])
## .rownames MolWeight NumCarbon NumHalogen HydrophilicFactor
## 1 661 208.28 14 0 -0.856
## 2 662 365.54 21 0 -0.370
## 3 663 206.31 13 0 -0.330
## 4 665 136.26 10 0 -0.960
## 5 668 229.75 9 1 -0.069
## 6 669 270.25 10 2 -0.651
## 7 670 343.23 17 2 -0.729
## 8 671 200.36 12 0 -0.835
## 9 672 376.51 22 1 0.194
## 10 673 232.36 14 0 0.353
## 11 674 230.28 14 0 -0.807
## 12 676 288.47 19 0 -0.432
## 13 677 156.24 12 0 -0.969
## 14 678 162.62 10 1 -0.877
## 15 679 148.27 11 0 -0.965
## 16 682 230.28 14 0 -0.314
## 17 683 316.48 13 0 -0.151
## 18 684 404.02 21 1 -0.346
## 19 685 290.49 19 0 -0.432
## 20 686 156.24 12 0 -0.969
## 21 688 274.71 15 1 -0.301
## 22 689 280.45 19 0 -0.891
## 23 690 311.89 17 1 -0.797
## 24 691 434.98 10 6 0.617
## 25 692 397.57 14 0 -0.155
## 26 693 296.43 14 0 0.410
## 27 695 328.52 19 0 -0.815
## 28 696 194.25 13 0 0.366
## 29 698 263.42 16 0 -0.353
## 30 699 172.01 7 2 -0.672
## 31 700 124.25 9 0 -0.954
## 32 702 100.23 7 0 -0.936
## 33 703 112.24 8 0 -0.946
## 34 704 156.24 12 0 -0.969
## 35 706 244.31 15 0 -0.818
## 36 708 154.22 12 0 -0.969
## 37 709 112.24 8 0 -0.946
## 38 711 408.64 24 0 1.561
## 39 712 268.38 18 0 0.213
## 40 713 281.25 14 3 0.410
## 41 714 402.53 23 0 0.194
## 42 715 258.11 11 2 -0.168
## 43 717 207.07 10 1 -0.877
## 44 718 373.99 20 1 -0.790
## 45 721 229.75 9 1 0.626
## 46 722 329.34 19 3 -0.782
## 47 723 112.24 8 0 -0.946
## 48 724 182.24 12 0 -0.835
## 49 726 289.54 12 3 -0.198
## 50 728 244.28 15 1 -0.334
## 51 729 314.79 6 3 -0.636
## 52 731 100.23 7 0 -0.936
## 53 732 188.66 12 1 -0.897
## 54 733 201.69 7 1 0.754
## 55 734 215.88 6 4 -0.576
## 56 735 298.46 20 0 -0.444
## 57 736 235.36 14 0 0.353
## 58 737 185.26 11 0 -0.822
## 59 739 352.79 17 1 -0.699
## 60 740 357.81 19 1 -0.317
## 61 741 200.36 12 0 -0.305
## 62 742 186.33 11 0 -0.822
## 63 743 215.88 6 4 -0.576
## 64 744 154.22 12 0 -0.969
## 65 746 278.38 16 0 -0.787
## 66 747 186.38 12 0 -0.352
## 67 749 446.06 23 1 -0.759
## 68 752 476.64 27 1 0.147
## 69 753 112.24 8 0 -0.946
## 70 754 310.50 19 0 -0.851
## 71 755 156.24 12 0 -0.969
## 72 757 226.12 7 1 -0.828
## 73 758 198.28 14 0 -0.912
## 74 759 296.60 10 0 -0.612
## 75 760 188.66 12 1 -0.897
## 76 761 434.55 24 1 0.193
## 77 762 166.23 13 0 -0.972
## 78 763 253.14 12 2 2.137
## 79 764 170.27 13 0 -0.972
## 80 765 428.13 17 2 -0.310
## 81 767 402.95 23 1 -0.813
## 82 768 167.22 12 0 -0.352
## 83 770 318.90 17 1 -0.797
## 84 771 318.54 12 3 -0.146
## 85 772 314.79 6 3 -0.636
## 86 773 126.27 9 0 -0.954
## 87 774 193.15 8 1 -0.848
## 88 775 254.30 16 0 -0.353
## 89 776 494.06 23 1 0.892
## 90 777 314.57 12 0 -0.692
## 91 778 465.70 26 0 2.281
## 92 779 193.26 14 0 0.301
## 93 780 272.75 5 6 -0.439
## 94 781 310.53 19 0 -0.851
## 95 782 208.22 14 0 -0.856
## 96 784 650.98 15 3 1.908
## 97 786 114.26 8 0 -0.946
## 98 787 223.10 12 2 -0.835
## 99 788 340.55 23 0 -0.909
## 100 789 223.10 12 2 -0.835
## 101 791 223.10 12 2 -0.835
## 102 792 352.46 18 3 -0.740
## 103 794 228.42 14 0 -0.352
## 104 798 256.32 18 0 -0.886
## 105 799 349.07 10 4 -0.545
## 106 800 306.56 19 0 -0.432
## 107 804 293.45 19 0 -0.851
## 108 805 460.67 27 0 0.132
## 109 807 384.53 9 0 -0.466
## 110 808 349.41 21 0 -0.370
## 111 809 312.39 19 0 -0.815
## 112 810 338.86 10 6 -0.612
## 113 813 284.54 18 0 -0.419
## 114 814 345.25 15 2 -0.672
## 115 818 257.54 12 3 -0.781
## 116 819 310.70 14 3 0.425
## 117 820 214.44 14 0 -0.395
## 118 821 257.54 12 3 -0.781
## 119 822 202.26 16 0 -0.979
## 120 823 243.32 18 0 0.186
## 121 827 380.90 12 6 -0.621
## 122 828 257.54 12 3 -0.781
## 123 829 393.88 14 1 -0.601
## 124 831 318.02 14 4 -0.763
## 125 832 223.10 12 2 -0.835
## 126 833 291.98 12 4 -0.734
## 127 834 320.04 14 4 -0.763
## 128 835 223.10 12 2 -0.835
## 129 836 291.98 12 4 -0.734
## 130 839 216.29 17 0 -0.980
## 131 840 228.30 18 0 -0.982
## 132 841 291.98 12 4 -0.734
## 133 842 206.30 16 0 -0.979
## 134 843 318.02 14 4 -0.763
## 135 846 230.32 18 0 -0.982
## 136 848 326.42 12 5 -0.692
## 137 849 291.98 12 4 -0.734
## 138 851 414.69 27 0 -0.481
## 139 854 291.98 12 4 -0.734
## 140 855 267.34 20 0 -0.479
## 141 856 360.86 12 6 -0.654
## 142 857 326.42 12 5 -0.692
## 143 858 360.86 12 6 -0.654
## 144 859 360.86 12 6 -0.654
## 145 860 291.98 12 4 -0.734
## 146 862 326.42 12 5 -0.692
## 147 863 395.30 12 7 -0.621
## 148 864 268.37 21 0 -0.985
## 149 865 252.32 20 0 -0.984
## 150 866 360.86 12 6 -0.654
## 151 867 252.32 20 0 -0.984
## 152 869 379.41 22 3 -0.778
## 153 870 360.86 12 6 -0.654
## 154 871 360.86 12 6 -0.654
## 155 872 360.86 12 6 -0.654
## 156 873 252.32 20 0 -0.984
## 157 875 429.74 12 8 -0.590
## 158 876 429.74 12 8 -0.590
## 159 877 498.62 12 10 -0.538
## 160 1190 170.22 12 0 -0.897
## 161 1191 134.67 7 1 -0.828
## 162 1192 302.50 20 0 -0.444
## 163 1193 148.27 11 0 -0.965
## 164 1194 402.53 23 0 -0.371
## 165 1195 181.44 6 3 -0.636
## 166 1197 134.24 10 0 -0.960
## 167 1198 235.90 6 2 -0.710
## 168 1199 241.41 10 0 0.573
## 169 1200 260.42 7 0 -0.523
## 170 1201 383.43 21 0 -0.742
## 171 1202 478.56 25 1 0.206
## 172 1203 112.24 8 0 -0.946
## 173 1204 331.38 15 0 0.407
## 174 1205 282.24 13 3 0.459
## 175 1206 226.30 4 5 -0.424
## 176 1207 284.46 17 0 -0.836
## 177 1208 416.63 24 0 -0.819
## 178 1209 266.32 6 5 0.079
## 179 1210 392.64 24 0 0.811
## 180 1212 296.44 20 0 0.169
## 181 1213 284.46 17 0 -0.836
## 182 1215 434.55 24 1 0.193
## 183 1216 184.27 12 0 -0.897
## 184 1217 331.21 17 2 -0.310
## 185 1219 270.40 18 0 0.213
## 186 1220 375.90 21 2 -0.370
## 187 1221 179.12 7 1 -0.828
## 188 1222 277.44 20 0 -0.938
## 189 1226 407.55 21 3 -0.742
## 190 1228 278.36 10 0 -0.612
## 191 1229 444.62 26 0 0.146
## 192 1230 223.24 14 0 -0.807
## 193 1231 182.28 14 0 -0.975
## 194 1233 316.53 21 0 -0.455
## 195 1234 156.24 12 0 -0.969
## 196 1236 340.50 22 0 -0.869
## 197 1237 322.54 7 3 -0.428
## 198 1239 304.69 10 3 -0.612
## 199 1242 260.74 4 6 -0.384
## 200 1244 188.66 12 1 -0.897
## 201 1245 162.30 12 0 -0.969
## 202 1246 266.36 18 0 -0.419
## 203 1247 350.49 23 0 -0.874
## 204 1249 362.79 14 1 -0.627
## 205 1250 354.86 10 6 -0.083
## 206 1251 265.90 8 4 -0.557
## 207 1253 342.90 11 1 -0.600
## 208 1254 223.10 12 2 -0.835
## 209 1255 192.27 15 0 -0.977
## 210 1256 208.32 16 0 -0.979
## 211 1257 257.54 12 3 -0.781
## 212 1259 380.90 12 6 -0.621
## 213 1260 257.54 12 3 -0.781
## 214 1262 391.31 21 2 -0.798
## 215 1264 373.30 10 7 -0.577
## 216 1265 291.98 12 4 -0.734
## 217 1266 354.48 14 5 -0.724
## 218 1267 416.39 21 0 0.926
## 219 1268 256.48 16 0 -0.389
## 220 1273 291.98 12 4 -0.734
## 221 1274 242.50 16 0 -0.429
## 222 1275 364.90 12 6 -0.654
## 223 1276 252.32 20 0 -0.984
## 224 1277 326.42 12 5 -0.692
## 225 1278 216.29 17 0 -0.980
## 226 1279 228.30 18 0 -0.982
## 227 1281 360.86 12 6 -0.654
## 228 1282 360.86 12 6 -0.654
## 229 1283 252.32 20 0 -0.984
## 230 1284 228.30 18 0 -0.982
## 231 1285 360.86 12 6 -0.654
## 232 1288 464.18 12 9 -0.563
## 233 1299 252.29 15 0 0.341
## 234 1301 288.47 19 0 -0.432
## 235 1302 290.82 6 6 -0.484
## 236 1307 350.60 9 3 -0.491
## 237 1309 354.48 14 5 -0.724
## 238 1310 409.76 10 8 -0.545
## 239 447 221.04 8 2 -0.053
## 240 448 464.89 21 1 4.124
## 241 451 70.15 5 0 -0.898
## 242 452 144.58 7 2 -0.742
## 243 453 145.17 9 0 -0.206
## 244 454 294.37 12 0 1.251
## 245 455 214.66 10 1 -0.166
## 246 456 217.29 13 0 -0.291
## 247 458 280.34 11 0 1.303
## 248 459 142.27 9 0 -0.864
## 249 460 82.16 6 0 -0.921
## 250 461 156.57 7 1 -0.065
## 251 462 112.21 6 0 -0.802
## 252 463 265.39 17 0 -0.370
## 253 464 202.28 2 5 -0.267
## 254 465 256.33 15 0 -0.301
## 255 466 243.68 9 1 -0.069
## 256 468 159.26 8 0 0.660
## 257 471 149.21 9 0 0.573
## 258 472 106.61 5 1 -0.767
## 259 473 70.15 5 0 -0.898
## 260 476 186.22 12 0 -0.305
## 261 477 240.31 10 0 -0.107
## 262 478 82.16 6 0 -0.921
## 263 479 70.15 5 0 -0.898
## 264 480 180.22 12 0 -0.835
## 265 482 141.05 5 2 -0.668
## 266 483 127.02 4 2 -0.614
## 267 484 184.26 12 0 2.173
## 268 486 158.27 9 0 -0.790
## 269 487 265.39 15 0 -0.776
## 270 488 194.25 11 0 -0.235
## 271 489 248.02 7 1 -0.065
## 272 490 283.83 15 1 -0.776
## 273 491 106.61 5 1 -0.767
## 274 492 236.30 12 0 -0.198
## 275 493 158.27 9 0 -0.206
## 276 494 241.46 6 3 1.688
## 277 495 324.46 20 0 -0.383
## 278 496 164.21 6 0 0.057
## 279 497 179.23 13 0 -0.905
## 280 498 156.62 8 1 -0.161
## 281 499 106.18 8 0 -0.946
## 282 501 261.07 9 2 -0.550
## 283 502 106.18 8 0 -0.946
## 284 503 326.48 20 0 0.214
## 285 505 169.62 8 1 -0.119
## 286 506 264.34 11 0 1.306
## 287 507 290.36 14 0 1.968
## 288 508 432.99 9 2 2.350
## 289 509 444.48 22 0 4.093
## 290 510 221.66 10 1 0.561
## 291 513 151.06 5 1 -0.767
## 292 514 267.34 11 0 1.306
## 293 515 160.18 10 0 0.509
## 294 516 148.22 10 0 -0.877
## 295 518 279.38 14 0 0.410
## 296 521 310.37 12 0 1.250
## 297 523 176.23 11 0 -0.822
## 298 524 218.09 9 2 -0.127
## 299 525 250.33 13 0 0.429
## 300 526 144.29 9 0 -0.257
## 301 530 149.26 10 0 -0.877
## 302 531 221.33 13 0 0.390
## 303 532 227.38 9 0 0.626
## 304 533 295.75 7 1 1.539
## 305 534 256.36 11 0 0.526
## 306 535 147.00 6 2 -0.710
## 307 536 294.43 19 0 -0.399
## 308 538 302.25 15 0 2.779
## 309 539 294.43 19 0 -0.399
## 310 542 360.49 21 0 0.213
## 311 543 120.64 6 1 -0.802
## 312 544 444.48 22 0 4.093
## 313 545 185.40 12 0 -0.897
## 314 546 170.92 2 6 -0.237
## 315 548 148.22 10 0 -0.877
## 316 549 222.00 6 2 -0.710
## 317 551 331.61 1 4 -0.180
## 318 552 413.46 22 0 -0.726
## 319 553 235.33 12 0 -0.229
## 320 554 240.73 9 1 0.634
## 321 556 214.23 13 0 -0.291
## 322 557 299.84 14 1 1.152
## 323 558 167.27 7 0 -0.065
## 324 559 72.17 5 0 -0.898
## 325 560 120.21 9 0 -0.954
## 326 561 191.45 6 2 -0.710
## 327 562 120.21 9 0 -0.954
## 328 563 212.26 14 0 -0.352
## 329 565 84.18 6 0 -0.921
## 330 566 162.02 6 2 0.843
## 331 567 310.33 11 3 0.557
## 332 568 261.07 8 1 -0.119
## 333 569 269.80 14 1 -0.763
## 334 571 220.06 8 2 0.679
## 335 572 250.38 14 0 1.142
## 336 574 96.19 7 0 -0.936
## 337 576 280.34 11 0 1.303
## 338 577 201.24 12 0 -0.265
## 339 579 201.24 12 0 -0.265
## 340 580 389.91 7 2 0.752
## 341 582 156.30 10 0 -0.877
## 342 583 120.21 9 0 -0.954
## 343 584 334.45 21 0 -0.830
## 344 586 299.31 12 0 -0.621
## 345 587 179.23 13 0 -0.905
## 346 588 242.38 11 0 0.512
## 347 589 408.64 24 0 1.561
## 348 591 324.46 20 0 -0.383
## 349 592 120.21 9 0 -0.954
## 350 593 301.32 16 2 -0.265
## 351 594 158.27 9 0 -0.790
## 352 595 223.82 8 1 -0.646
## 353 596 270.39 12 0 0.483
## 354 597 279.41 19 0 -0.891
## 355 598 184.31 11 0 -0.277
## 356 599 120.21 9 0 -0.954
## 357 600 286.38 13 0 -0.672
## 358 603 172.30 10 0 -0.244
## 359 604 254.39 12 0 0.468
## 360 605 300.43 19 0 -0.851
## 361 606 178.25 11 0 -0.822
## 362 608 235.90 6 2 -0.710
## 363 609 250.31 10 0 1.366
## 364 611 98.21 7 0 -0.936
## 365 612 249.11 9 2 -0.069
## 366 613 171.04 7 1 -0.828
## 367 614 126.59 7 1 -0.828
## 368 616 86.20 6 0 -0.921
## 369 617 271.39 17 0 -0.836
## 370 619 421.46 15 3 1.881
## 371 620 181.44 6 3 -0.636
## 372 621 213.25 13 0 0.390
## 373 622 128.18 10 0 -0.960
## 374 623 321.17 15 2 0.378
## 375 625 293.78 14 1 -0.689
## 376 628 392.51 22 1 0.882
## 377 629 392.64 24 0 0.811
## 378 630 266.42 18 0 -0.419
## 379 631 110.22 8 0 -0.946
## 380 632 236.72 2 6 -0.237
## 381 633 249.10 10 2 -0.134
## 382 635 311.45 14 0 0.425
## 383 636 394.48 21 1 1.662
## 384 637 286.45 19 0 -0.891
## 385 638 254.30 16 0 -0.353
## 386 639 250.31 10 0 1.366
## 387 641 324.41 19 0 -0.342
## 388 648 354.84 15 1 1.908
## 389 650 258.35 9 0 0.634
## 390 651 308.41 19 0 -0.815
## 391 653 392.64 24 0 0.811
## 392 654 293.45 17 0 -0.797
## 393 655 134.24 10 0 -0.960
## 394 656 162.62 10 1 -0.877
## 395 657 221.33 13 0 0.390
## 396 1082 106.61 5 1 -0.767
## 397 1083 198.28 14 0 -0.395
## 398 1084 156.30 10 0 -0.294
## 399 1086 157.01 6 1 -0.802
## 400 1088 214.67 9 1 -0.096
## 401 1089 188.30 12 0 1.244
## 402 1090 181.21 9 0 2.417
## 403 1091 194.30 12 0 0.408
## 404 1092 142.27 9 0 -0.864
## 405 1093 197.03 5 2 -0.439
## 406 1094 136.16 8 0 -0.161
## 407 1095 147.14 8 0 -0.119
## 408 1097 106.61 5 1 -0.767
## 409 1098 262.34 14 0 0.393
## 410 1101 380.68 8 3 2.272
## 411 1103 300.28 16 0 1.870
## 412 1104 249.32 11 0 1.309
## 413 1105 164.21 6 0 0.057
## 414 1106 129.28 8 0 0.630
## 415 1108 106.18 8 0 -0.946
## 416 1110 204.01 6 1 -0.802
## 417 1112 264.34 11 0 1.306
## 418 1113 228.31 15 0 0.295
## 419 1115 212.26 14 0 -0.352
## 420 1116 196.70 10 1 -0.748
## 421 1117 151.18 8 0 0.660
## 422 1119 255.48 8 3 -0.027
## 423 1120 362.51 21 0 0.898
## 424 1121 303.36 14 0 0.438
## 425 1122 374.52 22 0 0.872
## 426 1124 172.19 11 0 -0.822
## 427 1125 187.37 2 6 -0.237
## 428 1126 147.00 6 2 -0.710
## 429 1127 118.19 9 0 -0.954
## 430 1128 214.07 8 1 -0.119
## 431 1129 126.59 7 1 -0.828
## 432 1130 239.48 8 3 -0.053
## 433 1131 84.18 6 0 -0.921
## 434 1133 266.11 10 1 0.561
## 435 1135 142.27 9 0 -0.864
## 436 1136 171.04 7 1 -0.828
## 437 1138 665.81 33 0 4.421
## 438 1139 360.49 21 0 0.898
## 439 1141 84.18 6 0 -0.921
## 440 1142 147.00 6 2 -0.710
## 441 1143 322.39 19 0 -0.782
## 442 1144 285.37 17 0 0.284
## 443 1145 289.77 10 1 2.214
## 444 1146 173.29 9 0 0.590
## 445 1147 203.39 10 0 -0.748
## 446 1149 213.68 10 1 -0.166
## 447 1150 172.30 10 0 -0.807
## 448 1151 215.40 11 0 -0.766
## 449 1152 285.37 17 0 -0.797
## 450 1153 570.66 4 4 0.211
## 451 1156 186.33 11 0 -0.277
## 452 1158 288.27 15 0 1.921
## 453 1159 216.20 12 0 -0.734
## 454 1160 408.38 22 0 0.243
## 455 1161 330.77 12 1 2.062
## 456 1163 179.23 13 0 -0.905
## 457 1165 217.42 11 0 -0.766
## 458 1167 240.35 14 0 0.353
## 459 1168 343.52 20 0 -0.356
## 460 1169 142.21 11 0 -0.965
## 461 1170 312.46 12 0 1.251
## 462 1173 134.24 10 0 -0.960
## 463 1174 392.51 22 1 0.882
## 464 1178 302.50 20 0 -0.444
## 465 1180 98.21 7 0 -0.936
## 466 1181 86.20 6 0 -0.921
## 467 1182 360.49 21 0 0.213
## 468 1184 295.32 16 0 0.348
## 469 1186 240.48 6 0 -0.484
## 470 1187 236.72 12 1 -0.229
## 471 1188 271.36 15 0 -0.301
## 472 1295 215.72 8 1 0.686
## 473 1297 233.11 9 2 -0.096
## 474 1300 284.76 16 1 -0.787
## 475 1304 304.39 12 0 -0.621
## 476 1306 330.40 10 0 -0.517
## 477 218 122.18 8 0 -0.213
## 478 219 116.18 6 0 -0.710
## 479 221 127.58 6 1 0.846
## 480 223 116.23 7 0 -0.158
## 481 225 122.18 8 0 -0.213
## 482 226 236.35 13 0 0.411
## 483 227 169.62 8 1 -0.119
## 484 228 116.23 7 0 -0.158
## 485 229 150.19 9 0 -0.206
## 486 230 114.21 7 0 -0.828
## 487 232 122.18 8 0 -0.213
## 488 233 129.28 8 0 -0.213
## 489 234 84.15 4 0 -0.719
## 490 235 129.17 9 0 -0.864
## 491 238 133.40 2 3 -0.359
## 492 239 136.16 8 0 -0.768
## 493 241 117.13 3 0 4.626
## 494 242 117.16 8 0 -0.213
## 495 244 137.15 7 0 1.690
## 496 245 299.40 18 0 -0.354
## 497 246 107.17 7 0 0.728
## 498 247 156.20 10 0 -0.807
## 499 249 116.23 7 0 -0.158
## 500 250 122.13 7 0 -0.107
## 501 252 178.24 7 0 -0.565
## 502 253 163.00 6 2 -0.001
## 503 254 116.23 7 0 -0.158
## 504 255 142.22 8 0 -0.768
## 505 257 182.16 6 0 1.688
## 506 259 223.25 11 0 -0.168
## 507 260 163.19 9 0 -0.164
## 508 261 151.18 8 0 0.660
## 509 263 112.99 3 2 -0.539
## 510 264 142.22 8 0 -0.768
## 511 265 279.37 15 0 -0.738
## 512 267 116.23 7 0 -0.158
## 513 268 164.23 9 0 -0.164
## 514 269 214.27 8 0 1.522
## 515 270 265.34 14 0 0.393
## 516 271 218.36 9 0 -0.096
## 517 273 112.99 3 2 -0.539
## 518 274 102.20 6 0 -0.802
## 519 275 150.14 8 0 -0.702
## 520 276 78.12 6 0 -0.921
## 521 277 96.94 2 2 -0.431
## 522 278 183.20 7 0 -0.002
## 523 279 154.28 10 0 -0.877
## 524 281 127.58 6 1 0.846
## 525 282 218.29 9 0 2.350
## 526 285 187.86 2 2 -0.431
## 527 288 114.21 7 0 -0.828
## 528 291 130.26 8 0 -0.213
## 529 292 130.26 8 0 -0.213
## 530 293 218.22 12 0 1.250
## 531 294 123.00 3 1 -0.646
## 532 296 167.84 2 4 -0.307
## 533 297 171.24 7 0 0.748
## 534 299 178.24 7 0 -0.565
## 535 300 137.15 7 0 1.690
## 536 301 234.38 14 0 -0.314
## 537 302 112.14 6 0 -0.039
## 538 303 152.24 7 0 1.690
## 539 304 133.21 3 0 0.313
## 540 307 153.17 8 1 -0.119
## 541 309 165.21 9 0 -0.164
## 542 310 96.11 6 1 -0.802
## 543 311 116.23 7 0 -0.158
## 544 313 134.19 9 0 -0.864
## 545 314 236.31 5 0 0.895
## 546 315 346.41 19 0 0.966
## 547 316 130.26 8 0 -0.848
## 548 317 108.15 7 0 -0.828
## 549 318 144.24 8 0 -0.161
## 550 319 299.40 18 0 -0.354
## 551 321 234.43 15 0 -0.865
## 552 322 130.21 7 0 -0.742
## 553 323 247.37 15 0 -0.818
## 554 324 194.20 10 0 -0.696
## 555 326 208.27 1 3 -0.215
## 556 327 252.72 1 3 -0.215
## 557 328 289.41 17 0 -0.338
## 558 329 193.22 10 0 -0.166
## 559 330 130.21 7 0 -0.742
## 560 331 210.20 10 0 -0.134
## 561 334 149.21 9 0 -0.206
## 562 336 156.11 5 0 1.837
## 563 337 214.28 14 0 0.329
## 564 338 284.40 10 0 1.361
## 565 340 238.33 11 0 -0.634
## 566 342 138.14 6 0 0.839
## 567 343 92.58 4 1 -0.719
## 568 345 80.14 6 0 -0.921
## 569 348 171.24 7 0 0.748
## 570 349 92.58 4 1 -0.719
## 571 351 209.27 11 0 -0.200
## 572 352 324.44 15 0 0.393
## 573 354 150.24 10 0 -0.877
## 574 355 136.16 8 0 -0.161
## 575 356 204.25 11 0 0.496
## 576 357 175.20 10 0 0.530
## 577 359 149.21 9 0 -0.206
## 578 360 245.31 13 0 -0.709
## 579 361 170.00 3 1 -0.646
## 580 362 68.13 5 0 -0.898
## 581 363 130.26 8 0 -0.213
## 582 364 394.51 23 0 -0.785
## 583 366 130.26 8 0 -0.213
## 584 368 289.41 17 0 -0.338
## 585 369 110.19 6 0 -0.088
## 586 371 162.31 5 0 -0.530
## 587 372 136.16 8 0 -0.161
## 588 373 145.17 9 0 -0.206
## 589 374 185.26 8 0 0.679
## 590 375 92.15 7 0 -0.936
## 591 376 186.60 8 1 -0.083
## 592 377 145.17 9 0 -0.206
## 593 378 236.30 12 0 0.468
## 594 380 244.27 13 0 0.429
## 595 381 271.38 11 0 2.159
## 596 382 167.84 2 4 -0.307
## 597 385 198.25 9 0 0.616
## 598 387 228.26 14 0 0.353
## 599 388 144.18 10 0 -0.294
## 600 389 150.24 10 0 -0.294
## 601 390 200.63 9 1 -0.127
## 602 392 198.25 9 0 -0.096
## 603 393 200.63 9 1 -0.127
## 604 395 278.37 12 0 1.251
## 605 397 128.17 4 0 1.083
## 606 398 144.18 10 0 -0.294
## 607 401 144.24 8 0 -0.161
## 608 402 308.39 11 0 -0.600
## 609 403 150.08 6 4 -0.576
## 610 406 154.28 10 0 -0.294
## 611 407 314.40 15 0 1.114
## 612 408 179.24 10 0 0.530
## 613 409 151.19 7 0 2.675
## 614 410 152.16 8 0 -0.119
## 615 411 197.44 6 3 0.031
## 616 412 217.29 13 0 -0.291
## 617 414 222.26 12 0 -0.734
## 618 416 128.24 8 0 -0.848
## 619 419 178.24 7 0 -0.565
## 620 420 164.21 6 0 0.057
## 621 421 179.24 10 0 -0.202
## 622 422 137.03 4 1 -0.719
## 623 424 236.33 3 3 -0.460
## 624 425 128.14 8 0 -0.768
## 625 426 136.13 5 0 0.935
## 626 427 126.59 7 1 -0.828
## 627 428 170.22 12 0 -0.352
## 628 429 124.22 7 0 -0.828
## 629 430 127.02 4 2 -0.614
## 630 431 166.19 9 0 -0.727
## 631 432 270.37 9 0 1.424
## 632 435 137.03 4 1 -0.719
## 633 436 142.20 5 0 0.949
## 634 438 267.33 12 0 -0.171
## 635 441 142.20 5 0 0.949
## 636 442 235.07 9 2 -0.096
## 637 443 206.03 7 2 1.636
## 638 444 216.69 9 1 -0.096
## 639 445 148.17 9 0 -0.206
## 640 446 211.71 11 1 -0.766
## 641 981 76.53 3 1 -0.646
## 642 982 172.23 6 0 2.763
## 643 985 78.55 3 1 -0.646
## 644 987 200.26 10 0 0.547
## 645 988 527.63 27 0 3.025
## 646 990 128.19 7 0 -0.107
## 647 992 131.20 6 0 0.843
## 648 993 161.14 7 3 0.748
## 649 994 116.23 7 0 -0.158
## 650 995 112.10 4 0 1.083
## 651 998 127.26 8 0 -0.213
## 652 1001 163.82 1 3 -0.215
## 653 1002 157.21 6 0 0.839
## 654 1004 158.14 4 0 4.032
## 655 1005 155.97 2 1 -0.528
## 656 1006 130.26 8 0 -0.213
## 657 1007 197.21 9 0 3.384
## 658 1010 68.13 5 0 -0.898
## 659 1011 230.27 8 0 1.508
## 660 1012 194.20 10 0 -0.696
## 661 1013 179.19 9 0 0.605
## 662 1016 309.54 8 3 1.482
## 663 1017 115.25 7 0 0.728
## 664 1018 136.16 8 0 -0.768
## 665 1019 152.26 10 0 -0.877
## 666 1020 157.44 3 2 -0.539
## 667 1021 54.10 4 0 -0.861
## 668 1022 146.15 9 0 -0.790
## 669 1023 156.57 7 1 -0.065
## 670 1025 147.43 3 3 -0.460
## 671 1026 143.20 10 0 0.485
## 672 1027 157.23 11 0 -0.888
## 673 1028 266.29 11 0 3.047
## 674 1029 132.18 8 0 -0.161
## 675 1030 124.15 7 0 -0.107
## 676 1031 131.38 2 3 -0.359
## 677 1032 114.10 6 2 -0.710
## 678 1033 154.28 10 0 -0.294
## 679 1034 214.28 7 0 4.587
## 680 1035 152.26 10 0 -0.877
## 681 1036 130.21 7 0 -0.742
## 682 1037 92.58 4 1 -0.719
## 683 1038 114.10 6 2 -0.710
## 684 1039 68.13 5 0 -0.898
## 685 1041 152.26 10 0 -0.877
## 686 1044 201.89 3 2 -0.539
## 687 1045 241.70 10 1 -0.107
## 688 1046 225.34 11 0 -0.200
## 689 1047 334.42 16 0 1.860
## 690 1048 68.13 5 0 -0.898
## 691 1051 201.89 3 2 -0.539
## 692 1053 152.26 10 0 -0.877
## 693 1054 90.21 4 0 0.132
## 694 1055 197.44 6 3 0.031
## 695 1057 303.39 17 0 -0.762
## 696 1058 225.26 14 0 -0.314
## 697 1059 278.37 12 0 1.251
## 698 1060 143.31 9 0 -0.864
## 699 1061 242.24 14 0 0.374
## 700 1062 186.60 8 1 -0.083
## 701 1063 136.13 5 0 0.935
## 702 1064 163.07 6 1 -0.802
## 703 1065 198.29 13 0 2.089
## 704 1066 144.24 8 0 -0.768
## 705 1068 122.18 8 0 -0.848
## 706 1069 221.29 11 0 -0.168
## 707 1071 222.28 4 0 1.837
## 708 1075 130.26 8 0 -0.213
## 709 1076 260.26 14 0 -0.724
## 710 1077 150.24 10 0 -0.294
## 711 1078 145.17 9 0 -0.206
## 712 1292 180.17 9 0 -0.127
## 713 1293 180.19 7 0 0.022
## 714 1296 232.26 12 0 0.468
## 715 1 59.08 2 0 1.769
## 716 2 46.09 1 0 4.025
## 717 3 60.06 2 0 0.538
## 718 4 71.14 4 0 0.132
## 719 5 76.07 1 0 4.466
## 720 6 92.11 3 0 2.492
## 721 7 87.14 4 0 -0.614
## 722 8 80.10 4 0 -0.614
## 723 9 80.10 4 0 -0.614
## 724 10 182.20 6 0 4.881
## 725 11 103.14 4 0 2.214
## 726 12 86.16 4 0 1.164
## 727 13 85.17 5 0 0.004
## 728 14 95.11 5 0 0.046
## 729 15 95.11 5 0 0.046
## 730 16 132.13 5 0 0.949
## 731 17 101.17 5 0 -0.668
## 732 18 75.08 2 0 1.590
## 733 19 109.14 6 0 -0.710
## 734 22 155.22 8 0 -0.702
## 735 24 79.11 5 0 -0.767
## 736 26 180.18 6 0 3.757
## 737 27 110.12 6 0 0.846
## 738 29 96.10 4 0 0.180
## 739 30 58.09 3 0 -0.646
## 740 35 114.22 6 0 0.846
## 741 36 76.11 3 0 -0.539
## 742 39 150.15 5 0 2.934
## 743 40 107.17 7 0 -0.828
## 744 41 107.17 7 0 -0.828
## 745 43 180.18 6 0 4.881
## 746 44 70.10 4 0 -0.719
## 747 45 103.14 4 0 2.214
## 748 46 99.10 4 0 0.180
## 749 47 88.12 4 0 0.158
## 750 48 99.20 6 0 -0.802
## 751 50 74.09 3 0 -0.539
## 752 51 53.07 3 0 -0.646
## 753 52 86.15 5 0 -0.767
## 754 53 117.17 5 0 0.964
## 755 56 74.14 4 0 0.132
## 756 57 122.21 4 0 -0.535
## 757 59 137.16 6 0 1.786
## 758 62 185.25 9 0 0.605
## 759 63 86.15 5 0 -0.767
## 760 64 128.15 5 0 0.949
## 761 66 119.14 4 0 3.268
## 762 67 74.14 4 0 -0.719
## 763 68 96.09 5 0 -0.668
## 764 70 149.24 5 0 3.032
## 765 71 101.22 6 0 -0.802
## 766 72 67.10 4 0 0.132
## 767 74 254.28 10 0 0.592
## 768 75 114.17 5 0 -0.591
## 769 76 86.15 5 0 -0.767
## 770 77 88.12 4 0 0.158
## 771 78 160.20 6 0 0.836
## 772 79 118.10 4 0 1.083
## 773 80 88.17 5 0 0.004
## 774 81 86.10 4 0 -0.614
## 775 82 88.17 5 0 0.004
## 776 83 186.18 8 0 0.679
## 777 84 88.17 5 0 0.004
## 778 85 218.23 9 0 1.443
## 779 87 124.15 7 0 0.736
## 780 88 117.17 5 0 0.964
## 781 89 84.10 2 0 3.947
## 782 92 74.14 4 0 -0.719
## 783 94 108.15 7 0 -0.158
## 784 95 149.24 5 0 1.936
## 785 97 165.26 10 0 0.509
## 786 99 141.24 8 0 -0.161
## 787 101 137.15 7 0 -0.672
## 788 102 146.17 7 0 -0.614
## 789 103 137.20 8 0 1.594
## 790 105 88.12 4 0 -0.614
## 791 106 585.70 22 0 13.483
## 792 107 102.20 6 0 -0.088
## 793 108 155.18 6 0 2.763
## 794 109 146.26 8 0 0.647
## 795 110 146.17 5 0 4.068
## 796 111 58.09 3 0 -0.646
## 797 113 218.23 9 0 -0.586
## 798 114 102.20 6 0 -0.088
## 799 116 84.93 1 2 -0.264
## 800 117 117.17 5 0 0.964
## 801 118 88.17 5 0 -0.767
## 802 120 108.15 7 0 -0.158
## 803 121 121.15 7 0 -0.107
## 804 122 128.56 6 1 -0.039
## 805 123 128.56 6 1 -0.039
## 806 124 108.15 7 0 -0.158
## 807 125 102.20 6 0 -0.088
## 808 126 138.18 8 0 -0.161
## 809 127 102.20 6 0 -0.088
## 810 129 116.23 7 0 -0.158
## 811 131 181.26 10 0 0.530
## 812 132 100.13 5 0 -0.668
## 813 134 109.14 6 0 0.846
## 814 135 132.14 4 0 4.320
## 815 136 457.48 20 0 4.155
## 816 137 118.20 6 0 -0.710
## 817 138 119.14 6 0 -0.001
## 818 140 126.13 5 0 0.105
## 819 141 102.20 6 0 -0.088
## 820 142 100.13 5 0 -0.668
## 821 144 160.19 7 0 -0.614
## 822 147 162.30 6 0 -0.636
## 823 148 98.16 6 0 -0.802
## 824 151 103.10 2 0 1.385
## 825 153 107.17 7 0 0.728
## 826 154 116.23 7 0 -0.158
## 827 155 286.31 13 0 2.917
## 828 156 122.13 7 0 -0.107
## 829 158 114.21 7 0 -0.158
## 830 159 142.22 8 0 -0.768
## 831 160 129.38 1 2 -0.264
## 832 163 162.17 7 0 -0.565
## 833 164 122.18 8 0 -0.213
## 834 165 116.23 7 0 -0.158
## 835 168 121.15 7 0 0.736
## 836 169 122.13 7 0 -0.107
## 837 170 194.22 8 0 -0.557
## 838 171 150.20 8 0 1.574
## 839 172 108.10 6 0 -0.710
## 840 174 141.94 1 1 -0.315
## 841 175 116.23 7 0 -0.158
## 842 177 151.18 8 0 0.660
## 843 178 119.18 8 0 -0.213
## 844 179 130.09 4 1 1.055
## 845 180 116.23 7 0 -0.158
## 846 181 173.01 6 1 -0.039
## 847 183 102.20 6 0 -0.802
## 848 184 152.16 8 0 -0.119
## 849 185 162.17 7 0 -0.565
## 850 186 162.17 7 0 -0.565
## 851 188 281.39 15 0 0.361
## 852 189 102.20 6 0 -0.088
## 853 191 119.13 7 0 -0.742
## 854 192 175.09 4 2 -0.473
## 855 193 102.20 6 0 -0.088
## 856 196 106.13 7 0 -0.828
## 857 197 122.18 8 0 -0.213
## 858 198 210.25 11 0 -0.716
## 859 199 128.19 7 0 -0.742
## 860 200 239.35 13 0 2.063
## 861 202 204.25 11 0 2.219
## 862 203 214.27 8 0 1.522
## 863 204 116.18 6 0 -0.710
## 864 205 102.20 6 0 -0.088
## 865 206 142.08 4 0 1.031
## 866 209 135.18 8 0 0.647
## 867 210 116.18 6 0 -0.710
## 868 211 98.96 2 2 -0.431
## 869 212 165.21 9 0 -0.164
## 870 213 129.17 9 0 -0.864
## 871 214 100.18 6 0 -0.802
## 872 216 135.18 8 0 -0.161
## 873 217 114.21 7 0 -0.828
## 874 878 74.10 2 0 2.870
## 875 880 85.12 4 0 0.158
## 876 881 87.14 4 0 0.158
## 877 882 88.12 4 0 0.158
## 878 883 99.15 5 0 -0.668
## 879 884 109.14 6 0 -0.039
## 880 885 60.07 1 0 5.000
## 881 886 71.09 3 0 1.410
## 882 887 116.19 5 0 -0.591
## 883 888 59.13 3 0 -0.646
## 884 889 162.26 10 0 -0.807
## 885 890 180.18 6 0 3.757
## 886 891 122.14 4 0 3.268
## 887 892 74.09 3 0 -0.539
## 888 893 192.14 6 0 2.641
## 889 894 182.18 6 0 -0.526
## 890 896 90.04 2 0 1.471
## 891 897 107.17 7 0 -0.828
## 892 898 107.17 7 0 -0.828
## 893 900 114.08 3 3 0.313
## 894 901 146.17 7 0 -0.614
## 895 902 86.10 4 0 0.158
## 896 904 93.52 2 1 1.590
## 897 905 88.12 4 0 -0.614
## 898 906 74.14 4 0 -0.719
## 899 907 128.99 3 2 0.311
## 900 908 88.12 4 0 -0.614
## 901 909 110.12 6 0 0.846
## 902 910 257.44 4 3 0.240
## 903 911 88.17 5 0 -0.767
## 904 912 151.18 8 0 0.660
## 905 913 100.16 3 0 1.317
## 906 914 100.09 3 0 1.251
## 907 915 114.07 3 0 1.201
## 908 916 88.17 5 0 0.004
## 909 918 93.14 6 0 0.850
## 910 919 102.20 6 0 -0.088
## 911 920 118.20 6 0 -0.039
## 912 921 118.20 6 0 -0.710
## 913 922 199.98 3 1 0.311
## 914 923 101.22 6 0 -0.088
## 915 924 88.17 5 0 0.004
## 916 925 112.09 5 0 0.079
## 917 926 102.20 6 0 -0.088
## 918 928 102.15 5 0 -0.668
## 919 930 62.15 2 0 0.638
## 920 934 108.15 7 0 -0.158
## 921 935 102.18 3 0 1.317
## 922 936 122.18 8 0 -0.213
## 923 938 102.20 6 0 -0.088
## 924 939 100.18 6 0 -0.802
## 925 940 116.18 6 0 -0.039
## 926 941 146.16 6 0 0.839
## 927 942 114.12 4 0 1.083
## 928 943 211.29 11 0 -0.716
## 929 945 116.23 7 0 -0.158
## 930 946 116.23 7 0 -0.158
## 931 948 123.13 5 0 0.949
## 932 950 112.19 7 0 -0.828
## 933 951 150.20 8 0 1.574
## 934 952 149.21 9 0 -0.790
## 935 954 103.13 7 0 -0.828
## 936 956 102.20 6 0 -0.088
## 937 957 98.96 2 2 -0.431
## 938 959 128.56 6 1 -0.039
## 939 960 116.18 6 0 -0.039
## 940 962 102.20 6 0 -0.088
## 941 965 170.13 7 0 2.617
## 942 966 173.83 1 2 -0.264
## 943 967 159.20 10 0 -0.244
## 944 968 107.17 7 0 0.728
## 945 969 268.26 10 0 2.193
## 946 970 184.16 8 0 1.538
## 947 972 126.13 5 0 0.949
## 948 974 107.17 7 0 -0.158
## 949 975 114.21 7 0 -0.828
## 950 978 185.84 2 2 -0.431
## 951 979 102.20 6 0 -0.802
## Log_Solubility_Class LR_Prob LR_SE LR_UCL LR_LCL LR_LP
## 1 Low 0.16489 0.02712 0.11174 0.21805 -1.622284868
## 2 Low 0.01026 0.00378 0.00285 0.01767 -4.569189443
## 3 Low 0.32914 0.03190 0.26661 0.39167 -0.712077279
## 4 Low 0.52770 0.03708 0.45504 0.60037 0.110913564
## 5 Low 0.47501 0.03100 0.41425 0.53577 -0.100043358
## 6 Low 0.10501 0.02184 0.06221 0.14781 -2.142756961
## 7 Low 0.00642 0.00243 0.00166 0.01118 -5.041896464
## 8 Low 0.28961 0.03224 0.22641 0.35281 -0.897278925
## 9 Low 0.00590 0.00241 0.00118 0.01062 -5.126885454
## 10 Low 0.37971 0.03835 0.30454 0.45488 -0.490779307
## 11 Low 0.14905 0.02487 0.10031 0.19779 -1.742071552
## 12 Low 0.03261 0.00954 0.01391 0.05131 -3.389982734
## 13 Low 0.33270 0.03962 0.25504 0.41036 -0.695998537
## 14 Low 0.31566 0.03606 0.24498 0.38633 -0.773789185
## 15 Low 0.42336 0.03891 0.34709 0.49963 -0.308995274
## 16 Low 0.23089 0.02898 0.17408 0.28769 -1.203292596
## 17 Low 0.20025 0.04314 0.11570 0.28480 -1.384732593
## 18 Low 0.00363 0.00152 0.00065 0.00661 -5.614886026
## 19 Low 0.03211 0.00939 0.01370 0.05053 -3.405950936
## 20 Low 0.33270 0.03962 0.25504 0.41036 -0.695998537
## 21 Low 0.06743 0.01434 0.03933 0.09554 -2.626854193
## 22 Low 0.02128 0.00687 0.00781 0.03475 -3.828477931
## 23 Low 0.01628 0.00509 0.00629 0.02626 -4.101403943
## 24 Low 0.00590 0.00346 -0.00087 0.01268 -5.126885454
## 25 Low 0.08722 0.03424 0.02012 0.15433 -2.348061225
## 26 Low 0.28223 0.03850 0.20678 0.35768 -0.933426842
## 27 Low 0.01592 0.00540 0.00534 0.02650 -4.124131014
## 28 Low 0.53584 0.04422 0.44917 0.62250 0.143606289
## 29 Low 0.10464 0.02022 0.06500 0.14427 -2.146699984
## 30 Low 0.39364 0.04367 0.30805 0.47923 -0.432037083
## 31 Low 0.62994 0.03358 0.56412 0.69575 0.531959422
## 32 Low 0.79908 0.02471 0.75064 0.84751 1.380554247
## 33 Low 0.72215 0.02926 0.66480 0.77950 0.955151474
## 34 Low 0.33270 0.03962 0.25504 0.41036 -0.695998537
## 35 Low 0.10115 0.02010 0.06176 0.14054 -2.184511605
## 36 Low 0.33625 0.04027 0.25731 0.41518 -0.680050642
## 37 Low 0.72215 0.02926 0.66480 0.77950 0.955151474
## 38 Low 0.02280 0.00926 0.00465 0.04095 -3.757930803
## 39 Low 0.09913 0.02352 0.05304 0.14523 -2.206928843
## 40 Low 0.04242 0.01565 0.01175 0.07309 -3.116789319
## 41 Low 0.00751 0.00300 0.00162 0.01340 -4.883981471
## 42 Low 0.13716 0.02449 0.08917 0.18516 -1.839081147
## 43 Low 0.24525 0.02885 0.18870 0.30180 -1.124108470
## 44 Low 0.00390 0.00161 0.00075 0.00705 -5.542871101
## 45 Low 0.65915 0.03205 0.59633 0.72196 0.659508628
## 46 Low 0.00166 0.00088 -0.00005 0.00338 -6.399276297
## 47 Low 0.72215 0.02926 0.66480 0.77950 0.955151474
## 48 Low 0.31985 0.03360 0.25400 0.38570 -0.754461226
## 49 Low 0.03890 0.01207 0.01525 0.06255 -3.207084211
## 50 Low 0.08143 0.01808 0.04600 0.11687 -2.423074357
## 51 Low 0.12283 0.03980 0.04482 0.20084 -1.965899531
## 52 Low 0.79908 0.02471 0.75064 0.84751 1.380554247
## 53 Low 0.16240 0.02771 0.10808 0.21671 -1.640478232
## 54 Low 0.84026 0.02351 0.79417 0.88634 1.660163874
## 55 Low 0.13142 0.04075 0.05154 0.21130 -1.888461393
## 56 Low 0.02185 0.00710 0.00794 0.03576 -3.801462111
## 57 Low 0.37416 0.03766 0.30035 0.44797 -0.514411234
## 58 Low 0.39073 0.03336 0.32535 0.45611 -0.444244735
## 59 Low 0.01317 0.00445 0.00444 0.02189 -4.316556270
## 60 Low 0.01017 0.00352 0.00328 0.01706 -4.578091001
## 61 Low 0.42115 0.03010 0.36215 0.48015 -0.318054312
## 62 Low 0.38872 0.03341 0.32323 0.45421 -0.452695829
## 63 Low 0.13142 0.04075 0.05154 0.21130 -1.888461393
## 64 Low 0.33625 0.04027 0.25731 0.41518 -0.680050642
## 65 Low 0.06071 0.01462 0.03206 0.08937 -2.739015842
## 66 Low 0.43555 0.03228 0.37228 0.49882 -0.259242205
## 67 Low 0.00088 0.00046 -0.00002 0.00177 -7.034708263
## 68 Low 0.00052 0.00029 -0.00005 0.00109 -7.561161611
## 69 Low 0.72215 0.02926 0.66480 0.77950 0.955151474
## 70 Low 0.01761 0.00580 0.00624 0.02898 -4.021521456
## 71 Low 0.33270 0.03962 0.25504 0.41036 -0.695998537
## 72 Low 0.43505 0.05457 0.32809 0.54200 -0.261276265
## 73 Low 0.16733 0.02840 0.11167 0.22300 -1.604669495
## 74 Low 0.31595 0.07301 0.17286 0.45904 -0.772447041
## 75 Low 0.16240 0.02771 0.10808 0.21671 -1.640478232
## 76 Low 0.00198 0.00093 0.00015 0.00380 -6.222676471
## 77 Low 0.25015 0.03738 0.17689 0.32341 -1.097812449
## 78 Low 0.59853 0.09276 0.41671 0.78034 0.399343838
## 79 Low 0.24423 0.03611 0.17345 0.31500 -1.129626693
## 80 Low 0.00520 0.00220 0.00090 0.00951 -5.253883086
## 81 Low 0.00116 0.00057 0.00004 0.00229 -6.758174601
## 82 Low 0.47297 0.03812 0.39825 0.54768 -0.108225511
## 83 Low 0.01542 0.00489 0.00584 0.02499 -4.156549786
## 84 Low 0.03296 0.01026 0.01285 0.05308 -3.378945155
## 85 Low 0.12283 0.03980 0.04482 0.20084 -1.965899531
## 86 Low 0.62622 0.03333 0.56089 0.69155 0.516034358
## 87 Low 0.41505 0.03546 0.34555 0.48456 -0.343127379
## 88 Low 0.11156 0.02133 0.06975 0.15337 -2.074904553
## 89 Low 0.00365 0.00168 0.00035 0.00695 -5.609371434
## 90 Low 0.16231 0.04543 0.07327 0.25136 -1.641140017
## 91 Low 0.01695 0.00804 0.00118 0.03271 -4.060392150
## 92 Low 0.44042 0.05145 0.33959 0.54126 -0.239457685
## 93 Low 0.03222 0.01691 -0.00092 0.06535 -3.402417411
## 94 Low 0.01761 0.00580 0.00624 0.02897 -4.021521456
## 95 Low 0.16496 0.02713 0.11178 0.21813 -1.621776608
## 96 Low 0.00889 0.00637 -0.00359 0.02137 -4.713898478
## 97 Low 0.71895 0.02915 0.66180 0.77609 0.939259225
## 98 Low 0.06838 0.01710 0.03486 0.10190 -2.611844622
## 99 Low 0.00368 0.00164 0.00047 0.00689 -5.601155739
## 100 Low 0.06838 0.01710 0.03486 0.10190 -2.611844622
## 101 Low 0.06838 0.01710 0.03486 0.10190 -2.611844622
## 102 Low 0.00200 0.00097 0.00010 0.00390 -6.212606096
## 103 Low 0.22615 0.02886 0.16959 0.28271 -1.230179560
## 104 Low 0.03513 0.01026 0.01501 0.05525 -3.312937911
## 105 Low 0.01502 0.00620 0.00286 0.02718 -4.183238690
## 106 Low 0.02840 0.00838 0.01197 0.04483 -3.532555052
## 107 Low 0.02009 0.00647 0.00741 0.03278 -3.887238552
## 108 Low 0.00124 0.00065 -0.00003 0.00252 -6.691403130
## 109 Low 0.27169 0.11233 0.05152 0.49185 -0.986065071
## 110 Low 0.01163 0.00417 0.00346 0.01981 -4.442469155
## 111 Low 0.01804 0.00590 0.00647 0.02961 -3.996959060
## 112 Low 0.00330 0.00199 -0.00061 0.00720 -5.710527354
## 113 Low 0.04631 0.01207 0.02265 0.06997 -3.024980751
## 114 Low 0.01267 0.00429 0.00425 0.02109 -4.355767336
## 115 Low 0.02681 0.00920 0.00878 0.04483 -3.591804383
## 116 Low 0.03447 0.01200 0.01094 0.05800 -3.332587793
## 117 Low 0.23740 0.03085 0.17693 0.29786 -1.166987165
## 118 Low 0.02681 0.00920 0.00878 0.04483 -3.591804383
## 119 Low 0.08716 0.02142 0.04517 0.12914 -2.348815109
## 120 Low 0.11518 0.02931 0.05773 0.17263 -2.038888112
## 121 Low 0.00124 0.00079 -0.00031 0.00279 -6.691403130
## 122 Low 0.02681 0.00920 0.00878 0.04483 -3.591804383
## 123 Low 0.02728 0.01089 0.00594 0.04861 -3.573942438
## 124 Low 0.00425 0.00206 0.00021 0.00829 -5.456577239
## 125 Low 0.06838 0.01710 0.03486 0.10190 -2.611844622
## 126 Low 0.01015 0.00448 0.00138 0.01893 -4.580079711
## 127 Low 0.00418 0.00203 0.00021 0.00816 -5.473255272
## 128 Low 0.06838 0.01710 0.03486 0.10190 -2.611844622
## 129 Low 0.01015 0.00448 0.00138 0.01893 -4.580079711
## 130 Low 0.05840 0.01615 0.02673 0.09006 -2.780264666
## 131 Low 0.03928 0.01210 0.01555 0.06301 -3.196967520
## 132 Low 0.01015 0.00448 0.00138 0.01893 -4.580079711
## 133 Low 0.08466 0.02063 0.04422 0.12509 -2.380652346
## 134 Low 0.00425 0.00206 0.00021 0.00829 -5.456577239
## 135 Low 0.03868 0.01188 0.01540 0.06197 -3.212984669
## 136 Low 0.00378 0.00204 -0.00022 0.00779 -5.574244107
## 137 Low 0.01015 0.00448 0.00138 0.01893 -4.580079711
## 138 Low 0.00091 0.00049 -0.00005 0.00188 -7.001155544
## 139 Low 0.01015 0.00448 0.00138 0.01893 -4.580079711
## 140 Low 0.02674 0.00902 0.00906 0.04442 -3.594490690
## 141 Low 0.00140 0.00090 -0.00036 0.00316 -6.569882061
## 142 Low 0.00378 0.00204 -0.00022 0.00779 -5.574244107
## 143 Low 0.00140 0.00090 -0.00036 0.00316 -6.569882061
## 144 Low 0.00140 0.00090 -0.00036 0.00316 -6.569882061
## 145 Low 0.01015 0.00448 0.00138 0.01893 -4.580079711
## 146 Low 0.00378 0.00204 -0.00022 0.00779 -5.574244107
## 147 Low 0.00051 0.00038 -0.00023 0.00126 -7.580589702
## 148 Low 0.01126 0.00448 0.00248 0.02004 -4.475174783
## 149 Low 0.01750 0.00647 0.00482 0.03018 -4.027899463
## 150 Low 0.00140 0.00090 -0.00036 0.00316 -6.569882061
## 151 Low 0.01750 0.00647 0.00482 0.03018 -4.027899463
## 152 Low 0.00043 0.00026 -0.00008 0.00094 -7.751295257
## 153 Low 0.00140 0.00090 -0.00036 0.00316 -6.569882061
## 154 Low 0.00140 0.00090 -0.00036 0.00316 -6.569882061
## 155 Low 0.00140 0.00090 -0.00036 0.00316 -6.569882061
## 156 Low 0.01750 0.00647 0.00482 0.03018 -4.027899463
## 157 Low 0.00019 0.00016 -0.00012 0.00050 -8.568296468
## 158 Low 0.00019 0.00016 -0.00012 0.00050 -8.568296468
## 159 Low 0.00002 0.00003 -0.00003 0.00008 -10.819758284
## 160 Low 0.32575 0.03593 0.25533 0.39616 -0.727470746
## 161 Low 0.61288 0.03229 0.54960 0.67617 0.459434436
## 162 Low 0.02118 0.00687 0.00770 0.03465 -3.833290424
## 163 Low 0.42336 0.03891 0.34709 0.49963 -0.308995274
## 164 Low 0.00406 0.00176 0.00061 0.00752 -5.502504041
## 165 Low 0.28600 0.05446 0.17925 0.39274 -0.914891152
## 166 Low 0.53167 0.03750 0.45817 0.60517 0.126849820
## 167 Low 0.34135 0.05262 0.23821 0.44448 -0.657283942
## 168 Low 0.72263 0.03418 0.65564 0.78962 0.957544980
## 169 Low 0.63862 0.08035 0.48114 0.79611 0.569379562
## 170 Low 0.00596 0.00247 0.00111 0.01081 -5.116706966
## 171 Low 0.00103 0.00053 -0.00001 0.00207 -6.877165946
## 172 Low 0.72215 0.02926 0.66480 0.77950 0.955151474
## 173 Low 0.17770 0.03487 0.10936 0.24605 -1.532008557
## 174 Low 0.06001 0.01998 0.02086 0.09916 -2.751358022
## 175 Low 0.12647 0.04834 0.03172 0.22122 -1.932537348
## 176 Low 0.04069 0.01096 0.01921 0.06217 -3.160231914
## 177 Low 0.00162 0.00081 0.00004 0.00321 -6.423707816
## 178 Low 0.08803 0.03567 0.01811 0.15796 -2.337929429
## 179 Low 0.01153 0.00468 0.00236 0.02070 -4.451205959
## 180 Low 0.04246 0.01285 0.01728 0.06764 -3.115805039
## 181 Low 0.04069 0.01096 0.01921 0.06217 -3.160231914
## 182 Low 0.00198 0.00093 0.00015 0.00380 -6.222676471
## 183 Low 0.30191 0.03347 0.23631 0.36751 -0.838219075
## 184 Low 0.01110 0.00389 0.00347 0.01874 -4.489648106
## 185 Low 0.09772 0.02309 0.05247 0.14297 -2.222818647
## 186 Low 0.00205 0.00096 0.00017 0.00394 -6.187863382
## 187 Low 0.52725 0.03586 0.45697 0.59754 0.109108112
## 188 Low 0.01513 0.00541 0.00453 0.02573 -4.175830125
## 189 Low 0.00050 0.00028 -0.00005 0.00104 -7.600402335
## 190 Low 0.34781 0.06710 0.21629 0.47933 -0.628679577
## 191 Low 0.00197 0.00097 0.00007 0.00387 -6.227749793
## 192 Low 0.15622 0.02552 0.10620 0.20624 -1.686626527
## 193 Low 0.17546 0.03145 0.11382 0.23711 -1.547414558
## 194 Low 0.01371 0.00488 0.00414 0.02328 -4.275824935
## 195 Low 0.33270 0.03962 0.25504 0.41036 -0.695998537
## 196 Low 0.00529 0.00219 0.00099 0.00959 -5.236632992
## 197 Low 0.10722 0.03338 0.04180 0.17264 -2.119457392
## 198 Low 0.04152 0.01270 0.01663 0.06641 -3.139173457
## 199 Low 0.05079 0.02574 0.00034 0.10124 -2.927930575
## 200 Low 0.16240 0.02771 0.10808 0.21671 -1.640478232
## 201 Low 0.32218 0.03784 0.24802 0.39635 -0.743771370
## 202 Low 0.05307 0.01385 0.02593 0.08021 -2.881613376
## 203 Low 0.00354 0.00157 0.00046 0.00661 -5.640082271
## 204 Low 0.03365 0.01139 0.01134 0.05597 -3.357513031
## 205 Low 0.00517 0.00304 -0.00079 0.01113 -5.259699180
## 206 Low 0.05208 0.01829 0.01623 0.08793 -2.901489113
## 207 Low 0.09870 0.03140 0.03716 0.16024 -2.211753219
## 208 Low 0.06838 0.01710 0.03486 0.10190 -2.611844622
## 209 Low 0.12476 0.02649 0.07283 0.17669 -1.948106243
## 210 Low 0.08343 0.02025 0.04374 0.12313 -2.396630485
## 211 Low 0.02681 0.00920 0.00878 0.04483 -3.591804383
## 212 Low 0.00124 0.00079 -0.00031 0.00279 -6.691403130
## 213 Low 0.02681 0.00920 0.00878 0.04483 -3.591804383
## 214 Low 0.00114 0.00056 0.00004 0.00224 -6.775586366
## 215 Low 0.00121 0.00086 -0.00047 0.00290 -6.715924187
## 216 Low 0.01015 0.00448 0.00138 0.01893 -4.580079711
## 217 Low 0.00155 0.00090 -0.00021 0.00331 -6.467949146
## 218 Low 0.02782 0.00958 0.00904 0.04661 -3.553785786
## 219 Low 0.10608 0.02057 0.06576 0.14640 -2.131422759
## 220 Low 0.01015 0.00448 0.00138 0.01893 -4.580079711
## 221 Low 0.11255 0.02205 0.06933 0.15578 -2.064954614
## 222 Low 0.00136 0.00087 -0.00034 0.00306 -6.598909654
## 223 Low 0.01750 0.00647 0.00482 0.03018 -4.027899463
## 224 Low 0.00378 0.00204 -0.00022 0.00779 -5.574244107
## 225 Low 0.05840 0.01615 0.02673 0.09006 -2.780264666
## 226 Low 0.03928 0.01210 0.01555 0.06301 -3.196967520
## 227 Low 0.00140 0.00090 -0.00036 0.00316 -6.569882061
## 228 Low 0.00140 0.00090 -0.00036 0.00316 -6.569882061
## 229 Low 0.01750 0.00647 0.00482 0.03018 -4.027899463
## 230 Low 0.03928 0.01210 0.01555 0.06301 -3.196967520
## 231 Low 0.00140 0.00090 -0.00036 0.00316 -6.569882061
## 232 Low 0.00007 0.00006 -0.00006 0.00020 -9.566945313
## 233 Low 0.27274 0.03537 0.20342 0.34206 -0.980765087
## 234 Low 0.03261 0.00954 0.01391 0.05131 -3.389982734
## 235 Low 0.01957 0.01057 -0.00115 0.04030 -3.913993470
## 236 Low 0.04526 0.01605 0.01380 0.07671 -3.049015412
## 237 Low 0.00155 0.00090 -0.00021 0.00331 -6.467949146
## 238 Low 0.00044 0.00035 -0.00026 0.00113 -7.728295734
## 239 High 0.38656 0.04008 0.30801 0.46511 -0.461795365
## 240 High 0.22995 0.09640 0.04101 0.41888 -1.208593554
## 241 High 0.90877 0.01568 0.87803 0.93951 2.298708247
## 242 High 0.42743 0.05122 0.32704 0.52782 -0.292344466
## 243 High 0.76576 0.02219 0.72226 0.80926 1.184522574
## 244 High 0.65509 0.05080 0.55552 0.75466 0.641489117
## 245 High 0.39965 0.02708 0.34658 0.45272 -0.406923654
## 246 High 0.31952 0.03056 0.25962 0.37941 -0.755978562
## 247 High 0.75560 0.04255 0.67221 0.83899 1.128705908
## 248 High 0.61970 0.03053 0.55986 0.67955 0.488275075
## 249 High 0.86520 0.02004 0.82592 0.90448 1.859168496
## 250 High 0.75412 0.02360 0.70787 0.80037 1.120707894
## 251 High 0.85225 0.02003 0.81299 0.89151 1.752358255
## 252 High 0.07581 0.01704 0.04242 0.10920 -2.500687468
## 253 High 0.28251 0.08757 0.11086 0.45416 -0.932045062
## 254 High 0.15262 0.02446 0.10468 0.20056 -1.714198064
## 255 High 0.44773 0.03600 0.37718 0.51829 -0.209846685
## 256 High 0.91211 0.01498 0.88275 0.94148 2.339674564
## 257 High 0.88122 0.01963 0.84274 0.91970 2.004034268
## 258 High 0.80009 0.02513 0.75084 0.84935 1.386856956
## 259 High 0.90877 0.01568 0.87803 0.93951 2.298708247
## 260 High 0.44853 0.03242 0.38499 0.51207 -0.206611871
## 261 High 0.55554 0.04335 0.47058 0.64051 0.223080552
## 262 High 0.86520 0.02004 0.82592 0.90448 1.859168496
## 263 High 0.90877 0.01568 0.87803 0.93951 2.298708247
## 264 High 0.32332 0.03391 0.25686 0.38978 -0.738555943
## 265 High 0.61205 0.04462 0.52459 0.69951 0.455937513
## 266 High 0.72018 0.03972 0.64232 0.79804 0.945354641
## 267 High 0.92532 0.02647 0.87344 0.97720 2.516927305
## 268 High 0.60899 0.03008 0.55003 0.66795 0.443068712
## 269 High 0.09073 0.01912 0.05325 0.12821 -2.304754017
## 270 Low 0.53156 0.02725 0.47816 0.58496 0.126408054
## 271 Low 0.59868 0.05102 0.49867 0.69868 0.399968118
## 272 Low 0.03850 0.00970 0.01949 0.05751 -3.217836324
## 273 Low 0.80009 0.02513 0.75084 0.84935 1.386856956
## 274 Low 0.38122 0.03290 0.31674 0.44569 -0.484373161
## 275 Low 0.74674 0.02138 0.70483 0.78864 1.081300437
## 276 Low 0.75985 0.06300 0.63636 0.88334 1.151857317
## 277 Low 0.01908 0.00619 0.00695 0.03121 -3.939850241
## 278 Low 0.90732 0.01594 0.87607 0.93857 2.281342500
## 279 Low 0.24471 0.03422 0.17764 0.31178 -1.127027945
## 280 Low 0.66721 0.02871 0.61094 0.72348 0.695593179
## 281 Low 0.73163 0.02971 0.67339 0.78987 1.002908296
## 282 Low 0.16243 0.02935 0.10490 0.21996 -1.640257703
## 283 Low 0.73163 0.02971 0.67339 0.78987 1.002908296
## 284 Low 0.03546 0.01048 0.01492 0.05600 -3.303246003
## 285 Low 0.65453 0.02613 0.60331 0.70575 0.639011612
## 286 Low 0.77870 0.03736 0.70546 0.85193 1.258106616
## 287 Low 0.69361 0.05846 0.57903 0.80820 0.817051043
## 288 Low 0.54341 0.11318 0.32158 0.76524 0.174078267
## 289 Low 0.34666 0.12219 0.10718 0.58614 -0.633753195
## 290 Low 0.58234 0.03410 0.51552 0.64917 0.332386765
## 291 Low 0.73819 0.03062 0.67818 0.79819 1.036582194
## 292 Low 0.77460 0.03822 0.69968 0.84952 1.234470165
## 293 Low 0.82169 0.02563 0.77145 0.87192 1.527839586
## 294 Low 0.52682 0.03388 0.46042 0.59321 0.107383068
## 295 Low 0.31023 0.03637 0.23895 0.38150 -0.799044251
## 296 Low 0.62582 0.05750 0.51312 0.73851 0.514325825
## 297 Low 0.40780 0.03330 0.34252 0.47307 -0.373067558
## 298 Low 0.30170 0.03804 0.22713 0.37626 -0.839215664
## 299 Low 0.44287 0.03565 0.37299 0.51274 -0.229522333
## 300 Low 0.75689 0.02253 0.71274 0.80105 1.135703917
## 301 Low 0.52478 0.03376 0.45861 0.59094 0.099201272
## 302 Low 0.48910 0.03691 0.41675 0.56145 -0.043606909
## 303 Low 0.80936 0.02796 0.75456 0.86416 1.445856979
## 304 Low 0.85529 0.03833 0.78017 0.93041 1.776708853
## 305 Low 0.61504 0.03969 0.53724 0.69284 0.468547874
## 306 Low 0.51083 0.04763 0.41748 0.60419 0.043326777
## 307 Low 0.03227 0.00938 0.01388 0.05066 -3.400815117
## 308 Low 0.78412 0.06312 0.66041 0.90783 1.289839372
## 309 Low 0.03227 0.00938 0.01388 0.05066 -3.400815117
## 310 Low 0.01999 0.00663 0.00699 0.03299 -3.892330627
## 311 Low 0.71469 0.02925 0.65736 0.77201 0.918272574
## 312 Low 0.34666 0.12219 0.10718 0.58614 -0.633753195
## 313 Low 0.30004 0.03334 0.23469 0.36538 -0.847107391
## 314 Low 0.19469 0.08867 0.02089 0.36849 -1.419818746
## 315 Low 0.52682 0.03388 0.46042 0.59321 0.107383068
## 316 Low 0.36639 0.04953 0.26931 0.46346 -0.547735283
## 317 Low 0.31675 0.10798 0.10511 0.52839 -0.768748006
## 318 Low 0.00349 0.00160 0.00035 0.00662 -5.654357439
## 319 Low 0.37505 0.03282 0.31073 0.43937 -0.510612296
## 320 Low 0.64146 0.03406 0.57470 0.70822 0.581706598
## 321 Low 0.32478 0.03089 0.26423 0.38534 -0.731890534
## 322 Low 0.28555 0.04391 0.19949 0.37160 -0.917095871
## 323 Low 0.85864 0.01905 0.82130 0.89599 1.804039914
## 324 Low 0.90745 0.01577 0.87654 0.93836 2.282889429
## 325 Low 0.63733 0.03416 0.57037 0.70429 0.563794245
## 326 Low 0.42385 0.04467 0.33629 0.51141 -0.306988424
## 327 Low 0.63733 0.03416 0.57037 0.70429 0.563794245
## 328 Low 0.24921 0.03189 0.18672 0.31171 -1.102830071
## 329 Low 0.86333 0.02009 0.82396 0.90270 1.843227744
## 330 Low 0.83509 0.03623 0.76408 0.90611 1.622139633
## 331 Low 0.09744 0.02600 0.04648 0.14840 -2.225998366
## 332 Low 0.47958 0.05223 0.37721 0.58195 -0.081725457
## 333 Low 0.05879 0.01300 0.03332 0.08427 -2.773194509
## 334 Low 0.58562 0.04896 0.48966 0.68158 0.345887688
## 335 Low 0.55712 0.05118 0.45680 0.65743 0.229481804
## 336 Low 0.80414 0.02474 0.75565 0.85264 1.412373265
## 337 Low 0.75560 0.04255 0.67221 0.83899 1.128705908
## 338 Low 0.43014 0.02999 0.37136 0.48893 -0.281279980
## 339 Low 0.43014 0.02999 0.37136 0.48893 -0.281279980
## 340 Low 0.35591 0.10021 0.15949 0.55233 -0.593160578
## 341 Low 0.51092 0.03324 0.44578 0.57606 0.043686947
## 342 Low 0.63733 0.03416 0.57037 0.70429 0.563794245
## 343 Low 0.00795 0.00306 0.00195 0.01395 -4.826601581
## 344 Low 0.19104 0.04535 0.10216 0.27993 -1.443266642
## 345 Low 0.24471 0.03422 0.17764 0.31178 -1.127027945
## 346 Low 0.63725 0.03459 0.56945 0.70505 0.563448151
## 347 Low 0.02280 0.00926 0.00465 0.04095 -3.757930803
## 348 Low 0.01908 0.00619 0.00695 0.03121 -3.939850241
## 349 Low 0.63733 0.03416 0.57037 0.70429 0.563794245
## 350 Low 0.02014 0.00651 0.00739 0.03290 -3.884701817
## 351 Low 0.60899 0.03008 0.55003 0.66795 0.443068712
## 352 Low 0.40997 0.04195 0.32775 0.49220 -0.364089397
## 353 Low 0.49780 0.04255 0.41440 0.58119 -0.008800057
## 354 Low 0.02145 0.00693 0.00788 0.03503 -3.820347239
## 355 Low 0.53963 0.02776 0.48523 0.59403 0.158853206
## 356 Low 0.63733 0.03416 0.57037 0.70429 0.563794245
## 357 Low 0.15227 0.03290 0.08780 0.21675 -1.716906928
## 358 Low 0.64780 0.02444 0.59990 0.69570 0.609382811
## 359 Low 0.52520 0.03724 0.45221 0.59819 0.100885480
## 360 Low 0.01904 0.00618 0.00693 0.03114 -3.941989655
## 361 Low 0.40396 0.03325 0.33879 0.46913 -0.388991916
## 362 Low 0.34135 0.05262 0.23821 0.44448 -0.657283942
## 363 Low 0.85246 0.02885 0.79591 0.90902 1.754026962
## 364 Low 0.80162 0.02472 0.75318 0.85007 1.396450297
## 365 Low 0.26497 0.03509 0.19619 0.33374 -1.020294703
## 366 Low 0.54309 0.03375 0.47694 0.60925 0.172788617
## 367 Low 0.62788 0.03363 0.56196 0.69380 0.523132683
## 368 Low 0.86144 0.02014 0.82196 0.90093 1.827301963
## 369 Low 0.04491 0.01176 0.02187 0.06795 -3.057145090
## 370 Low 0.05049 0.01902 0.01321 0.08777 -2.934170765
## 371 Low 0.28600 0.05446 0.17925 0.39274 -0.914891152
## 372 Low 0.50502 0.03872 0.42912 0.58091 0.020080675
## 373 Low 0.54354 0.03894 0.46722 0.61986 0.174602228
## 374 Low 0.04659 0.01260 0.02191 0.07128 -3.018659106
## 375 Low 0.05309 0.01246 0.02867 0.07750 -2.881215465
## 376 Low 0.01098 0.00432 0.00250 0.01945 -4.500639118
## 377 Low 0.01153 0.00468 0.00236 0.02070 -4.451205959
## 378 Low 0.05304 0.01384 0.02592 0.08017 -2.882210508
## 379 Low 0.72533 0.02939 0.66774 0.78293 0.971056347
## 380 Low 0.12582 0.05756 0.01300 0.23865 -1.938433990
## 381 Low 0.19606 0.02993 0.13741 0.25472 -1.411103905
## 382 Low 0.26203 0.04112 0.18143 0.34264 -1.035444172
## 383 Low 0.03408 0.01264 0.00932 0.05885 -3.344370313
## 384 Low 0.02032 0.00658 0.00743 0.03321 -3.875620365
## 385 Low 0.11156 0.02133 0.06975 0.15337 -2.074904553
## 386 Low 0.85246 0.02885 0.79591 0.90902 1.754026962
## 387 Low 0.02726 0.00810 0.01138 0.04313 -3.574696405
## 388 Low 0.30065 0.05960 0.18384 0.41746 -0.844204535
## 389 Low 0.77039 0.04022 0.69157 0.84921 1.210514663
## 390 Low 0.01860 0.00605 0.00675 0.03046 -3.965818543
## 391 Low 0.01153 0.00468 0.00236 0.02070 -4.451205959
## 392 Low 0.03960 0.01080 0.01844 0.06076 -3.188520746
## 393 Low 0.53167 0.03750 0.45817 0.60517 0.126849820
## 394 Low 0.31566 0.03606 0.24498 0.38633 -0.773789185
## 395 Low 0.48910 0.03691 0.41675 0.56145 -0.043606909
## 396 High 0.80009 0.02513 0.75084 0.84935 1.386856956
## 397 High 0.26122 0.03526 0.19210 0.33034 -1.039637213
## 398 High 0.66392 0.02651 0.61196 0.71589 0.680812433
## 399 High 0.65285 0.03197 0.59020 0.71551 0.631590431
## 400 High 0.49732 0.02736 0.44369 0.55096 -0.010720103
## 401 High 0.81303 0.03865 0.73728 0.88878 1.469819833
## 402 High 0.97740 0.00867 0.96040 0.99440 3.766946079
## 403 High 0.62455 0.03603 0.55393 0.69517 0.508906084
## 404 High 0.61970 0.03053 0.55986 0.67955 0.488275075
## 405 High 0.56587 0.04640 0.47493 0.65681 0.265020341
## 406 High 0.83542 0.01837 0.79942 0.87142 1.624537818
## 407 High 0.82975 0.01813 0.79422 0.86529 1.583856506
## 408 High 0.80009 0.02513 0.75084 0.84935 1.386856956
## 409 High 0.33552 0.03534 0.26624 0.40479 -0.683323213
## 410 High 0.51330 0.09955 0.31818 0.70842 0.053212553
## 411 High 0.49804 0.07076 0.35935 0.63674 -0.007840040
## 412 High 0.79895 0.03377 0.73276 0.86515 1.379744732
## 413 High 0.90732 0.01594 0.87607 0.93857 2.281342500
## 414 Low 0.92712 0.01462 0.89846 0.95577 2.543268754
## 415 Low 0.73163 0.02971 0.67339 0.78987 1.002908296
## 416 Low 0.56493 0.05096 0.46505 0.66481 0.261194892
## 417 Low 0.77870 0.03736 0.70546 0.85193 1.258106616
## 418 Low 0.30110 0.04110 0.22056 0.38165 -0.842065235
## 419 Low 0.24921 0.03189 0.18672 0.31171 -1.102830071
## 420 Low 0.28876 0.02892 0.23207 0.34544 -0.901414032
## 421 Low 0.91709 0.01472 0.88823 0.94594 2.403449931
## 422 Low 0.18656 0.03941 0.10932 0.26380 -1.472519265
## 423 Low 0.04072 0.01282 0.01560 0.06583 -3.159463631
## 424 Low 0.27739 0.03998 0.19902 0.35575 -0.957445199
## 425 Low 0.02653 0.00916 0.00857 0.04449 -3.602590840
## 426 Low 0.41551 0.03353 0.34979 0.48123 -0.341232991
## 427 Low 0.17517 0.07958 0.01918 0.33115 -1.549420374
## 428 Low 0.51083 0.04763 0.41748 0.60419 0.043326777
## 429 Low 0.64100 0.03449 0.57340 0.70860 0.579707068
## 430 Low 0.57168 0.03036 0.51218 0.63118 0.288708815
## 431 Low 0.62788 0.03363 0.56196 0.69380 0.523132683
## 432 Low 0.20184 0.04220 0.11914 0.28455 -1.374833774
## 433 Low 0.86333 0.02009 0.82396 0.90270 1.843227744
## 434 Low 0.49552 0.03875 0.41957 0.57147 -0.017920480
## 435 Low 0.61970 0.03053 0.55986 0.67955 0.488275075
## 436 Low 0.54309 0.03375 0.47694 0.60925 0.172788617
## 437 Low 0.00392 0.00288 -0.00172 0.00956 -5.537735922
## 438 Low 0.04134 0.01300 0.01585 0.06683 -3.143705922
## 439 Low 0.86333 0.02009 0.82396 0.90270 1.843227744
## 440 Low 0.51083 0.04763 0.41748 0.60419 0.043326777
## 441 Low 0.01730 0.00573 0.00607 0.02852 -4.039597384
## 442 Low 0.12528 0.02454 0.07719 0.17336 -1.943352603
## 443 Low 0.83231 0.04224 0.74951 0.91511 1.602087931
## 444 Low 0.86210 0.01947 0.82393 0.90027 1.832842488
## 445 Low 0.45352 0.03898 0.37712 0.52992 -0.186458340
## 446 Low 0.40150 0.02711 0.34837 0.45464 -0.399218992
## 447 Low 0.49852 0.03250 0.43483 0.56222 -0.005920017
## 448 Low 0.34965 0.03734 0.27647 0.42283 -0.620578025
## 449 Low 0.04210 0.01121 0.02013 0.06406 -3.124695648
## 450 Low 0.03974 0.03388 -0.02666 0.10614 -3.184845844
## 451 Low 0.53567 0.02761 0.48157 0.58978 0.142922793
## 452 Low 0.61358 0.06642 0.48339 0.74376 0.462385794
## 453 Low 0.28665 0.03245 0.22305 0.35024 -0.911710224
## 454 Low 0.01039 0.00397 0.00261 0.01817 -4.556467121
## 455 Low 0.61615 0.06731 0.48422 0.74808 0.473238589
## 456 Low 0.24471 0.03422 0.17764 0.31178 -1.127027945
## 457 Low 0.34604 0.03782 0.27190 0.42017 -0.636491812
## 458 Low 0.36500 0.03667 0.29312 0.43687 -0.553727645
## 459 Low 0.01695 0.00562 0.00592 0.02797 -4.060392150
## 460 Low 0.43506 0.04061 0.35546 0.51466 -0.261235578
## 461 Low 0.62221 0.05844 0.50766 0.73675 0.498939171
## 462 Low 0.53167 0.03750 0.45817 0.60517 0.126849820
## 463 Low 0.01098 0.00432 0.00250 0.01945 -4.500639118
## 464 Low 0.02118 0.00687 0.00770 0.03465 -3.833290424
## 465 Low 0.80162 0.02472 0.75318 0.85007 1.396450297
## 466 Low 0.86144 0.02014 0.82196 0.90093 1.827301963
## 467 Low 0.01999 0.00663 0.00699 0.03299 -3.892330627
## 468 Low 0.16355 0.02780 0.10907 0.21803 -1.632047992
## 469 Low 0.74819 0.06394 0.62287 0.87351 1.088982122
## 470 Low 0.21598 0.02536 0.16628 0.26568 -1.289248719
## 471 Low 0.13792 0.02357 0.09172 0.18412 -1.832674267
## 472 Low 0.76050 0.02833 0.70498 0.81603 1.155422695
## 473 Low 0.28420 0.03594 0.21376 0.35464 -0.923722583
## 474 Low 0.02774 0.00769 0.01267 0.04280 -3.556747844
## 475 Low 0.18493 0.04591 0.09495 0.27491 -1.483296624
## 476 Low 0.28191 0.08283 0.11957 0.44425 -0.935007038
## 477 High 0.84262 0.01938 0.80463 0.88062 1.677852822
## 478 High 0.86076 0.01891 0.82371 0.89782 1.821616657
## 479 High 0.93490 0.01556 0.90441 0.96539 2.664515022
## 480 High 0.89136 0.01522 0.86152 0.92119 2.104708722
## 481 High 0.84262 0.01938 0.80463 0.88062 1.677852822
## 482 High 0.46530 0.03532 0.39607 0.53453 -0.139023483
## 483 High 0.65453 0.02613 0.60331 0.70575 0.639011612
## 484 High 0.89136 0.01522 0.86152 0.92119 2.104708722
## 485 High 0.75859 0.02174 0.71598 0.80120 1.144964714
## 486 High 0.80034 0.02303 0.75520 0.84547 1.388420717
## 487 High 0.84262 0.01938 0.80463 0.88062 1.677852822
## 488 High 0.83506 0.01894 0.79793 0.87219 1.621921807
## 489 High 0.93725 0.01191 0.91392 0.96059 2.703791478
## 490 High 0.64372 0.03130 0.58238 0.70506 0.591546910
## 491 High 0.73994 0.05339 0.63529 0.84459 1.045656728
## 492 High 0.72336 0.02578 0.67282 0.77389 0.961190002
## 493 High 0.99982 0.00014 0.99954 1.00009 8.622373691
## 494 High 0.84780 0.01977 0.80905 0.88655 1.717449313
## 495 High 0.98128 0.00625 0.96904 0.99353 3.959265371
## 496 High 0.04432 0.01156 0.02166 0.06698 -3.070987087
## 497 High 0.95869 0.01015 0.93879 0.97858 3.144463168
## 498 High 0.53021 0.03201 0.46748 0.59294 0.120987368
## 499 High 0.89136 0.01522 0.86152 0.92119 2.104708722
## 500 High 0.89225 0.01490 0.86305 0.92145 2.113932633
## 501 High 0.76336 0.03294 0.69880 0.82791 1.171189743
## 502 High 0.66643 0.04235 0.58342 0.74944 0.692082369
## 503 High 0.89136 0.01522 0.86152 0.92119 2.104708722
## 504 High 0.71370 0.02647 0.66182 0.76557 0.913422494
## 505 High 0.98059 0.00614 0.96856 0.99262 3.922366034
## 506 High 0.49277 0.03220 0.42966 0.55588 -0.028922016
## 507 High 0.74808 0.02117 0.70658 0.78958 1.088398347
## 508 High 0.91709 0.01472 0.88823 0.94594 2.403449931
## 509 High 0.81118 0.03246 0.74756 0.87480 1.457695797
## 510 High 0.71370 0.02647 0.66182 0.76557 0.913422494
## 511 High 0.08522 0.01896 0.04807 0.12238 -2.373447451
## 512 High 0.89136 0.01522 0.86152 0.92119 2.104708722
## 513 High 0.74653 0.02120 0.70497 0.78809 1.080190331
## 514 High 0.94523 0.01388 0.91802 0.97243 2.848285685
## 515 High 0.33026 0.03535 0.26098 0.39955 -0.707009354
## 516 High 0.67435 0.03582 0.60414 0.74455 0.727926079
## 517 High 0.81118 0.03246 0.74756 0.87480 1.457695797
## 518 High 0.86191 0.01902 0.82464 0.89919 1.831245210
## 519 High 0.71568 0.02631 0.66411 0.76725 0.923132775
## 520 High 0.86887 0.01999 0.82969 0.90805 1.891004319
## 521 High 0.88309 0.02418 0.83569 0.93049 2.022022713
## 522 High 0.85162 0.02207 0.80836 0.89488 1.747363867
## 523 High 0.51490 0.03333 0.44957 0.58023 0.059617652
## 524 High 0.93490 0.01556 0.90441 0.96539 2.664515022
## 525 High 0.96775 0.01107 0.94606 0.98945 3.401455746
## 526 High 0.78675 0.04405 0.70041 0.87310 1.305445350
## 527 High 0.80034 0.02303 0.75520 0.84547 1.388420717
## 528 High 0.83399 0.01890 0.79696 0.87103 1.614173384
## 529 High 0.83399 0.01890 0.79696 0.87103 1.614173384
## 530 High 0.77566 0.03772 0.70172 0.84960 1.240551522
## 531 High 0.88387 0.01987 0.84492 0.92281 2.029599740
## 532 High 0.51584 0.08691 0.34550 0.68617 0.063381209
## 533 High 0.93470 0.01259 0.91002 0.95938 2.661233586
## 534 High 0.76336 0.03294 0.69880 0.82791 1.171189743
## 535 High 0.98128 0.00625 0.96904 0.99353 3.959265371
## 536 High 0.22520 0.02852 0.16931 0.28109 -1.235616035
## 537 High 0.93001 0.01160 0.90727 0.95274 2.586842964
## 538 High 0.97897 0.00669 0.96585 0.99209 3.840551009
## 539 High 0.97735 0.00618 0.96523 0.98947 3.764684975
## 540 High 0.68323 0.02963 0.62517 0.74130 0.768655594
## 541 High 0.74507 0.02124 0.70343 0.78671 1.072489176
## 542 High 0.75242 0.03236 0.68900 0.81584 1.111560917
## 543 High 0.89136 0.01522 0.86152 0.92119 2.104708722
## 544 High 0.63459 0.03082 0.57418 0.69501 0.551959110
## 545 High 0.95020 0.01622 0.91842 0.98198 2.948657505
## 546 High 0.08958 0.02279 0.04491 0.13425 -2.318773951
## 547 High 0.71509 0.02709 0.66200 0.76818 0.920235068
## 548 High 0.80786 0.02278 0.76321 0.85251 1.436164503
## 549 High 0.82647 0.01833 0.79055 0.86240 1.560813124
## 550 High 0.04432 0.01156 0.02166 0.06698 -3.070987087
## 551 High 0.10359 0.02060 0.06321 0.14396 -2.157957098
## 552 High 0.79515 0.02298 0.75012 0.84019 1.356252772
## 553 High 0.09898 0.01984 0.06009 0.13787 -2.208609645
## 554 High 0.48570 0.03486 0.41739 0.55402 -0.057215603
## 555 High 0.71764 0.06315 0.59386 0.84141 0.932785198
## 556 High 0.64163 0.08558 0.47389 0.80937 0.582445841
## 557 High 0.06568 0.01510 0.03608 0.09528 -2.655024527
## 558 High 0.62942 0.02550 0.57944 0.67941 0.529729415
## 559 High 0.79515 0.02298 0.75012 0.84019 1.356252772
## 560 High 0.60609 0.02999 0.54731 0.66486 0.430906033
## 561 High 0.76000 0.02182 0.71724 0.80276 1.152679510
## 562 High 0.99015 0.00354 0.98321 0.99708 4.610384992
## 563 High 0.40745 0.04336 0.32246 0.49244 -0.374517032
## 564 High 0.81456 0.03948 0.73717 0.89195 1.479916711
## 565 High 0.34140 0.04251 0.25809 0.42472 -0.657061560
## 566 High 0.96582 0.00799 0.95016 0.98149 3.341336804
## 567 High 0.86643 0.02038 0.82649 0.90637 1.869755636
## 568 High 0.86705 0.02001 0.82783 0.90627 1.875123528
## 569 High 0.93470 0.01259 0.91002 0.95938 2.661233586
## 570 High 0.86643 0.02038 0.82649 0.90637 1.869755636
## 571 High 0.51157 0.02875 0.45522 0.56792 0.046288263
## 572 High 0.18353 0.03416 0.11658 0.25048 -1.492612028
## 573 High 0.52285 0.03365 0.45689 0.58881 0.091463709
## 574 High 0.83542 0.01837 0.79942 0.87142 1.624537818
## 575 High 0.69983 0.02878 0.64343 0.75623 0.846488468
## 576 High 0.80727 0.02452 0.75922 0.85532 1.432367939
## 577 High 0.76000 0.02182 0.71724 0.80276 1.152679510
## 578 High 0.19253 0.02970 0.13432 0.25074 -1.433653917
## 579 High 0.84012 0.03324 0.77496 0.90528 1.659121205
## 580 High 0.91009 0.01561 0.87950 0.94067 2.314734326
## 581 High 0.83399 0.01890 0.79696 0.87103 1.614173384
## 582 High 0.00276 0.00127 0.00027 0.00524 -5.889760783
## 583 High 0.83399 0.01890 0.79696 0.87103 1.614173384
## 584 High 0.06568 0.01510 0.03608 0.09528 -2.655024527
## 585 High 0.92748 0.01186 0.90424 0.95072 2.548608845
## 586 High 0.87810 0.02464 0.82981 0.92640 1.974559446
## 587 High 0.83542 0.01837 0.79942 0.87142 1.624537818
## 588 High 0.76576 0.02219 0.72226 0.80926 1.184522574
## 589 High 0.89619 0.01684 0.86317 0.92920 2.155590139
## 590 High 0.80911 0.02484 0.76042 0.85780 1.444237532
## 591 High 0.63287 0.02487 0.58413 0.68161 0.544549021
## 592 High 0.76576 0.02219 0.72226 0.80926 1.184522574
## 593 High 0.56057 0.03354 0.49482 0.62631 0.243475691
## 594 High 0.45468 0.03539 0.38532 0.52404 -0.181778903
## 595 High 0.89424 0.02877 0.83785 0.95063 2.134801819
## 596 High 0.51584 0.08691 0.34550 0.68617 0.063381209
## 597 High 0.84084 0.02140 0.79889 0.88279 1.664491407
## 598 High 0.38735 0.03941 0.31011 0.46459 -0.458465133
## 599 High 0.68489 0.02916 0.62774 0.74204 0.776336457
## 600 High 0.67450 0.02774 0.62013 0.72886 0.728609214
## 601 High 0.51651 0.02619 0.46519 0.56784 0.066064017
## 602 High 0.70815 0.02726 0.65472 0.76158 0.886415964
## 603 High 0.51651 0.02619 0.46519 0.56784 0.066064017
## 604 High 0.68300 0.04538 0.59406 0.77194 0.767593086
## 605 High 0.98696 0.00396 0.97920 0.99471 4.326607955
## 606 High 0.68489 0.02916 0.62774 0.74204 0.776336457
## 607 High 0.82647 0.01833 0.79055 0.86240 1.560813124
## 608 High 0.23648 0.06064 0.11762 0.35535 -1.172075682
## 609 High 0.20264 0.06747 0.07040 0.33488 -1.369875265
## 610 High 0.66747 0.02690 0.61475 0.72018 0.696764364
## 611 High 0.34853 0.04918 0.25214 0.44492 -0.625507038
## 612 High 0.80227 0.02433 0.75458 0.84996 1.400542746
## 613 High 0.99279 0.00328 0.98635 0.99923 4.925050210
## 614 High 0.82409 0.01835 0.78812 0.86006 1.544307247
## 615 High 0.42261 0.06378 0.29761 0.54761 -0.312068180
## 616 High 0.31952 0.03056 0.25962 0.37941 -0.755978562
## 617 High 0.27698 0.03306 0.21218 0.34178 -0.959491583
## 618 High 0.71832 0.02700 0.66541 0.77124 0.936143477
## 619 High 0.76336 0.03294 0.69880 0.82791 1.171189743
## 620 High 0.90732 0.01594 0.87607 0.93857 2.281342500
## 621 High 0.64579 0.02417 0.59841 0.69316 0.600584416
## 622 High 0.82045 0.02561 0.77025 0.87065 1.519399250
## 623 High 0.45122 0.07470 0.30481 0.59763 -0.195742607
## 624 High 0.73582 0.02539 0.68607 0.78558 1.024354835
## 625 High 0.97773 0.00588 0.96621 0.98925 3.781993077
## 626 High 0.62788 0.03363 0.56196 0.69380 0.523132683
## 627 High 0.46708 0.03703 0.39450 0.53965 -0.131870770
## 628 High 0.78743 0.02395 0.74048 0.83438 1.309503132
## 629 High 0.72018 0.03972 0.64232 0.79804 0.945354641
## 630 High 0.61052 0.03005 0.55162 0.66942 0.449498537
## 631 High 0.87859 0.02928 0.82120 0.93598 1.979145102
## 632 High 0.82045 0.02561 0.77025 0.87065 1.519399250
## 633 High 0.97701 0.00606 0.96513 0.98889 3.749437549
## 634 High 0.33194 0.04175 0.25011 0.41377 -0.699423760
## 635 High 0.97701 0.00606 0.96513 0.98889 3.749437549
## 636 High 0.28107 0.03570 0.21109 0.35105 -0.939160247
## 637 High 0.86082 0.03697 0.78837 0.93328 1.822117364
## 638 High 0.49335 0.02774 0.43898 0.54771 -0.026601569
## 639 High 0.76149 0.02190 0.71856 0.80443 1.160865802
## 640 High 0.20437 0.02599 0.15343 0.25531 -1.359202177
## 641 High 0.91651 0.01541 0.88631 0.94670 2.395846114
## 642 High 0.99438 0.00256 0.98936 0.99940 5.175787763
## 643 High 0.91528 0.01548 0.88494 0.94562 2.379878328
## 644 High 0.77790 0.02467 0.72954 0.82626 1.253470250
## 645 High 0.01704 0.00901 -0.00063 0.03470 -4.055004906
## 646 High 0.88757 0.01501 0.85815 0.91699 2.066156586
## 647 High 0.96772 0.00771 0.95260 0.98284 3.400494946
## 648 High 0.60773 0.08831 0.43465 0.78081 0.437780326
## 649 High 0.89136 0.01522 0.86152 0.92119 2.104708722
## 650 High 0.98849 0.00357 0.98149 0.99549 4.452962303
## 651 High 0.83724 0.01905 0.79990 0.87459 1.637834045
## 652 High 0.78298 0.04803 0.68883 0.87712 1.283117638
## 653 High 0.96050 0.00896 0.94293 0.97806 3.191153310
## 654 High 0.99933 0.00044 0.99848 1.00019 7.307562621
## 655 High 0.90190 0.02318 0.85647 0.94733 2.218516283
## 656 High 0.83399 0.01890 0.79696 0.87103 1.614173384
## 657 High 0.99097 0.00458 0.98199 0.99995 4.698131894
## 658 High 0.91009 0.01561 0.87950 0.94067 2.314734326
## 659 High 0.93743 0.01596 0.90614 0.96872 2.706856158
## 660 High 0.48570 0.03486 0.41739 0.55402 -0.057215603
## 661 High 0.85848 0.01966 0.81995 0.89701 1.802722334
## 662 High 0.43796 0.07669 0.28765 0.58827 -0.249445440
## 663 High 0.95609 0.01028 0.93595 0.97623 3.080709966
## 664 High 0.72336 0.02578 0.67282 0.77389 0.961190002
## 665 High 0.51888 0.03347 0.45327 0.58448 0.075555923
## 666 High 0.75164 0.03846 0.67625 0.82702 1.107378179
## 667 High 0.94189 0.01166 0.91904 0.96473 2.785550729
## 668 High 0.63148 0.02902 0.57460 0.68836 0.538571286
## 669 High 0.75412 0.02360 0.70787 0.80037 1.120707894
## 670 High 0.62361 0.06254 0.50103 0.74619 0.504899333
## 671 High 0.83691 0.02736 0.78329 0.89053 1.635414342
## 672 High 0.42667 0.03629 0.35554 0.49779 -0.295450586
## 673 High 0.95873 0.01640 0.92658 0.99087 3.145473648
## 674 High 0.83969 0.01850 0.80343 0.87595 1.655923335
## 675 High 0.89071 0.01492 0.86146 0.91996 2.098013998
## 676 High 0.74299 0.05324 0.63863 0.84735 1.061567591
## 677 High 0.57509 0.05679 0.46377 0.68640 0.302649168
## 678 High 0.66747 0.02690 0.61475 0.72018 0.696764364
## 679 High 0.99852 0.00099 0.99659 1.00046 6.514232095
## 680 High 0.51888 0.03347 0.45327 0.58448 0.075555923
## 681 High 0.79515 0.02298 0.75012 0.84019 1.356252772
## 682 High 0.86643 0.02038 0.82649 0.90637 1.869755636
## 683 High 0.57509 0.05679 0.46377 0.68640 0.302649168
## 684 High 0.91009 0.01561 0.87950 0.94067 2.314734326
## 685 High 0.51888 0.03347 0.45327 0.58448 0.075555923
## 686 High 0.68071 0.05505 0.57281 0.78861 0.757036590
## 687 High 0.36457 0.03010 0.30558 0.42357 -0.555583358
## 688 High 0.47992 0.03325 0.41474 0.54509 -0.080363223
## 689 High 0.42854 0.06772 0.29580 0.56127 -0.287810407
## 690 High 0.91009 0.01561 0.87950 0.94067 2.314734326
## 691 High 0.68071 0.05505 0.57281 0.78861 0.757036590
## 692 High 0.51888 0.03347 0.45327 0.58448 0.075555923
## 693 High 0.97304 0.00621 0.96086 0.98522 3.586070905
## 694 High 0.42261 0.06378 0.29761 0.54761 -0.312068180
## 695 High 0.03811 0.01061 0.01731 0.05891 -3.228423384
## 696 High 0.23799 0.02970 0.17977 0.29621 -1.163731023
## 697 High 0.68300 0.04538 0.59406 0.77194 0.767593086
## 698 High 0.61777 0.03054 0.55791 0.67763 0.480093698
## 699 High 0.36686 0.03674 0.29485 0.43887 -0.545711263
## 700 High 0.63287 0.02487 0.58413 0.68161 0.544549021
## 701 High 0.97773 0.00588 0.96621 0.98925 3.781993077
## 702 High 0.64195 0.03359 0.57612 0.70779 0.583837778
## 703 High 0.88024 0.03879 0.80421 0.95628 1.994704857
## 704 High 0.71043 0.02677 0.65796 0.76291 0.897473357
## 705 High 0.72788 0.02692 0.67511 0.78066 0.983893054
## 706 High 0.49663 0.03157 0.43475 0.55851 -0.013480204
## 707 High 0.98797 0.00484 0.97849 0.99746 4.408248803
## 708 High 0.83399 0.01890 0.79696 0.87103 1.614173384
## 709 High 0.13151 0.02441 0.08368 0.17934 -1.887673177
## 710 High 0.67450 0.02774 0.62013 0.72886 0.728609214
## 711 High 0.76576 0.02219 0.72226 0.80926 1.184522574
## 712 High 0.73008 0.02270 0.68559 0.77456 0.995028498
## 713 High 0.85782 0.02088 0.81689 0.89875 1.797300427
## 714 High 0.56840 0.03314 0.50344 0.63335 0.275326163
## 715 High 0.99809 0.00088 0.99637 0.99981 6.258740211
## 716 High 0.99989 0.00008 0.99973 1.00006 9.114920186
## 717 High 0.99266 0.00237 0.98801 0.99731 4.907049366
## 718 High 0.97672 0.00572 0.96551 0.98792 3.736605396
## 719 High 0.99992 0.00007 0.99979 1.00005 9.433403920
## 720 High 0.99845 0.00079 0.99691 1.00000 6.467949146
## 721 High 0.94241 0.01103 0.92078 0.96403 2.795091483
## 722 High 0.94535 0.01055 0.92466 0.96603 2.850606015
## 723 High 0.94535 0.01055 0.92466 0.96603 2.850606015
## 724 High 0.99940 0.00045 0.99852 1.00027 7.417980723
## 725 High 0.99686 0.00143 0.99405 0.99967 5.760387539
## 726 High 0.99139 0.00293 0.98564 0.99714 4.746183680
## 727 High 0.95954 0.00840 0.94308 0.97600 3.166140171
## 728 High 0.95826 0.00843 0.94175 0.97478 3.133659238
## 729 High 0.95826 0.00843 0.94175 0.97478 3.133659238
## 730 High 0.97873 0.00568 0.96759 0.98986 3.828958184
## 731 High 0.90934 0.01474 0.88046 0.93822 2.305602816
## 732 High 0.99737 0.00111 0.99519 0.99956 5.938137968
## 733 High 0.86728 0.01813 0.83174 0.90282 1.877120231
## 734 High 0.70746 0.02744 0.65367 0.76124 0.883079681
## 735 High 0.91461 0.01439 0.88640 0.94282 2.371268747
## 736 High 0.99797 0.00119 0.99565 1.00030 6.197687423
## 737 High 0.97261 0.00711 0.95867 0.98655 3.569805196
## 738 High 0.97320 0.00620 0.96105 0.98534 3.592187723
## 739 High 0.96473 0.00789 0.94927 0.98020 3.308815525
## 740 High 0.97174 0.00721 0.95761 0.98586 3.537640902
## 741 High 0.96387 0.00803 0.94813 0.97960 3.283832886
## 742 High 0.99715 0.00144 0.99433 0.99996 5.857582216
## 743 High 0.80906 0.02276 0.76444 0.85367 1.443913837
## 744 High 0.80906 0.02276 0.76444 0.85367 1.443913837
## 745 High 0.99941 0.00044 0.99854 1.00027 7.434797847
## 746 High 0.94346 0.01097 0.92195 0.96497 2.814605616
## 747 High 0.99686 0.00143 0.99405 0.99967 5.760387539
## 748 High 0.97258 0.00631 0.96022 0.98493 3.568679660
## 749 High 0.97419 0.00603 0.96237 0.98602 3.630844343
## 750 High 0.86470 0.01880 0.82785 0.90156 1.854888091
## 751 High 0.96442 0.00791 0.94890 0.97993 3.299743202
## 752 High 0.96605 0.00768 0.95099 0.98111 3.348326739
## 753 High 0.91018 0.01479 0.88118 0.93918 2.315834714
## 754 High 0.98135 0.00518 0.97121 0.99149 3.963083029
## 755 High 0.97617 0.00578 0.96484 0.98751 3.712691460
## 756 High 0.93118 0.01402 0.90369 0.95867 2.604958199
## 757 High 0.98768 0.00434 0.97917 0.99619 4.384134801
## 758 High 0.85258 0.02004 0.81330 0.89185 1.754981391
## 759 High 0.91018 0.01479 0.88118 0.93918 2.315834714
## 760 High 0.97937 0.00555 0.96850 0.99024 3.860163181
## 761 High 0.99887 0.00066 0.99758 1.00016 6.784407007
## 762 High 0.94174 0.01120 0.91978 0.96369 2.782813476
## 763 High 0.91259 0.01428 0.88461 0.94057 2.345677018
## 764 High 0.99745 0.00132 0.99487 1.00003 5.969108663
## 765 High 0.86283 0.01894 0.82570 0.89996 1.838996652
## 766 High 0.97743 0.00564 0.96638 0.98848 3.768305087
## 767 High 0.70618 0.03935 0.62906 0.78330 0.876902828
## 768 High 0.90782 0.01519 0.87806 0.93759 2.287302934
## 769 High 0.91018 0.01479 0.88118 0.93918 2.315834714
## 770 High 0.97419 0.00603 0.96237 0.98602 3.630844343
## 771 High 0.95947 0.00917 0.94149 0.97744 3.164338608
## 772 High 0.98794 0.00370 0.98068 0.99520 4.405727776
## 773 High 0.95861 0.00847 0.94202 0.97520 3.142445013
## 774 High 0.94285 0.01096 0.92138 0.96432 2.803227813
## 775 High 0.95861 0.00847 0.94202 0.97520 3.142445013
## 776 High 0.89551 0.01695 0.86228 0.92874 2.148302015
## 777 High 0.95861 0.00847 0.94202 0.97520 3.142445013
## 778 High 0.91765 0.01843 0.88153 0.95378 2.410837598
## 779 High 0.95344 0.01040 0.93305 0.97383 3.019334694
## 780 High 0.98135 0.00518 0.97121 0.99149 3.963083029
## 781 High 0.99978 0.00015 0.99948 1.00009 8.421662987
## 782 High 0.94174 0.01120 0.91978 0.96369 2.782813476
## 783 High 0.89737 0.01532 0.86734 0.92740 2.168337975
## 784 High 0.99161 0.00314 0.98546 0.99777 4.772289364
## 785 High 0.81574 0.02520 0.76636 0.86513 1.487747873
## 786 High 0.82984 0.01830 0.79397 0.86571 1.584493740
## 787 High 0.79868 0.02270 0.75418 0.84319 1.378064683
## 788 High 0.79744 0.02338 0.75161 0.84327 1.370370458
## 789 High 0.97165 0.00906 0.95389 0.98940 3.534368627
## 790 High 0.94199 0.01111 0.92022 0.96376 2.787379249
## 791 High 0.99980 0.00033 0.99915 1.00045 8.516993171
## 792 High 0.93160 0.01166 0.90874 0.95446 2.611530713
## 793 High 0.99508 0.00230 0.99057 0.99960 5.309514605
## 794 High 0.91893 0.01468 0.89017 0.94770 2.427896971
## 795 High 0.99920 0.00053 0.99816 1.00024 7.130098510
## 796 High 0.96473 0.00789 0.94927 0.98020 3.308815525
## 797 High 0.54821 0.04767 0.45477 0.64165 0.193440955
## 798 High 0.93160 0.01166 0.90874 0.95446 2.611530713
## 799 High 0.93207 0.01649 0.89976 0.96439 2.618930156
## 800 High 0.98135 0.00518 0.97121 0.99149 3.963083029
## 801 High 0.90887 0.01493 0.87960 0.93814 2.299915011
## 802 High 0.89737 0.01532 0.86734 0.92740 2.168337975
## 803 High 0.89299 0.01489 0.86381 0.92217 2.121653095
## 804 High 0.84418 0.02115 0.80273 0.88564 1.689664247
## 805 High 0.84418 0.02115 0.80273 0.88564 1.689664247
## 806 High 0.89737 0.01532 0.86734 0.92740 2.168337975
## 807 High 0.93160 0.01166 0.90874 0.95446 2.611530713
## 808 High 0.83322 0.01833 0.79730 0.86914 1.608622134
## 809 High 0.93160 0.01166 0.90874 0.95446 2.611530713
## 810 High 0.89136 0.01522 0.86152 0.92119 2.104708722
## 811 High 0.79973 0.02426 0.75218 0.84729 1.384607715
## 812 High 0.91002 0.01463 0.88133 0.93870 2.313879154
## 813 High 0.97282 0.00709 0.95892 0.98671 3.577717661
## 814 High 0.99960 0.00028 0.99905 1.00016 7.823645931
## 815 High 0.49279 0.13130 0.23545 0.75013 -0.028841999
## 816 High 0.85884 0.01917 0.82126 0.89643 1.805688643
## 817 High 0.92911 0.01163 0.90633 0.95190 2.573097759
## 818 High 0.95044 0.00958 0.93165 0.96922 2.953740979
## 819 High 0.93160 0.01166 0.90874 0.95446 2.611530713
## 820 High 0.91002 0.01463 0.88133 0.93870 2.313879154
## 821 High 0.77901 0.02735 0.72540 0.83261 1.259906431
## 822 High 0.82332 0.02863 0.76720 0.87944 1.539004760
## 823 High 0.86566 0.01874 0.82893 0.90239 1.863118322
## 824 High 0.99590 0.00158 0.99281 0.99900 5.492659877
## 825 High 0.95869 0.01015 0.93879 0.97858 3.144463168
## 826 High 0.89136 0.01522 0.86152 0.92119 2.104708722
## 827 High 0.90078 0.03463 0.83290 0.96866 2.205921448
## 828 High 0.89225 0.01490 0.86305 0.92145 2.113932633
## 829 High 0.89289 0.01523 0.86303 0.92275 2.120607049
## 830 High 0.71370 0.02647 0.66182 0.76557 0.913422494
## 831 High 0.90625 0.02081 0.86547 0.94703 2.268683541
## 832 High 0.78547 0.02679 0.73295 0.83799 1.297832676
## 833 High 0.84262 0.01938 0.80463 0.88062 1.677852822
## 834 High 0.89136 0.01522 0.86152 0.92119 2.104708722
## 835 High 0.95448 0.01034 0.93421 0.97474 3.043014900
## 836 High 0.89225 0.01490 0.86305 0.92145 2.113932633
## 837 High 0.67572 0.03846 0.60033 0.75111 0.734171449
## 838 High 0.96801 0.00959 0.94922 0.98681 3.409819064
## 839 High 0.86822 0.01804 0.83286 0.90358 1.885311273
## 840 High 0.94693 0.01410 0.91930 0.97456 2.881613376
## 841 High 0.89136 0.01522 0.86152 0.92119 2.104708722
## 842 High 0.91709 0.01472 0.88823 0.94594 2.403449931
## 843 High 0.84573 0.01961 0.80730 0.88417 1.701495846
## 844 High 0.97105 0.00793 0.95550 0.98660 3.512807756
## 845 High 0.89136 0.01522 0.86152 0.92119 2.104708722
## 846 High 0.79238 0.02250 0.74828 0.83649 1.339331589
## 847 High 0.86191 0.01902 0.82464 0.89919 1.831245210
## 848 High 0.82409 0.01835 0.78812 0.86006 1.544307247
## 849 High 0.78547 0.02679 0.73295 0.83799 1.297832676
## 850 High 0.78547 0.02679 0.73295 0.83799 1.297832676
## 851 High 0.23357 0.03232 0.17022 0.29692 -1.188261552
## 852 High 0.93160 0.01166 0.90874 0.95446 2.611530713
## 853 High 0.80901 0.02176 0.76635 0.85167 1.443590207
## 854 High 0.67274 0.04256 0.58932 0.75615 0.720603963
## 855 High 0.93160 0.01166 0.90874 0.95446 2.611530713
## 856 High 0.81032 0.02274 0.76574 0.85490 1.452090790
## 857 High 0.84262 0.01938 0.80463 0.88062 1.677852822
## 858 High 0.37160 0.03553 0.30196 0.44124 -0.525358899
## 859 High 0.79774 0.02269 0.75326 0.84221 1.372228731
## 860 High 0.83791 0.04222 0.75515 0.92066 1.642758960
## 861 High 0.93874 0.02012 0.89931 0.97818 2.729411449
## 862 High 0.94523 0.01388 0.91802 0.97243 2.848285685
## 863 High 0.86076 0.01891 0.82371 0.89782 1.821616657
## 864 High 0.93160 0.01166 0.90874 0.95446 2.611530713
## 865 High 0.98463 0.00456 0.97570 0.99356 4.159848379
## 866 High 0.92521 0.01455 0.89669 0.95372 2.515336553
## 867 High 0.86076 0.01891 0.82371 0.89782 1.821616657
## 868 High 0.88144 0.02430 0.83380 0.92907 2.006137772
## 869 High 0.74507 0.02124 0.70343 0.78671 1.072489176
## 870 High 0.64372 0.03130 0.58238 0.70506 0.591546910
## 871 High 0.86380 0.01887 0.82681 0.90078 1.847216867
## 872 High 0.83648 0.01840 0.80042 0.87253 1.632267304
## 873 High 0.80034 0.02303 0.75520 0.84547 1.388420717
## 874 High 0.99935 0.00038 0.99861 1.00009 7.337887984
## 875 High 0.97478 0.00595 0.96312 0.98644 3.654574474
## 876 High 0.97439 0.00601 0.96262 0.98616 3.638828734
## 877 High 0.97419 0.00603 0.96237 0.98602 3.630844343
## 878 High 0.91065 0.01454 0.88214 0.93915 2.321597389
## 879 High 0.93153 0.01151 0.90897 0.95409 2.610432703
## 880 High 0.99996 0.00004 0.99989 1.00003 10.126591103
## 881 High 0.99574 0.00168 0.99244 0.99904 5.454217019
## 882 High 0.90648 0.01546 0.87618 0.93678 2.271393650
## 883 High 0.96445 0.00794 0.94889 0.98001 3.300617834
## 884 High 0.51830 0.03186 0.45585 0.58074 0.073232712
## 885 High 0.99797 0.00119 0.99565 1.00030 6.197687423
## 886 High 0.99884 0.00067 0.99753 1.00016 6.758174601
## 887 High 0.96442 0.00791 0.94890 0.97993 3.299743202
## 888 High 0.99250 0.00322 0.98618 0.99882 4.885323992
## 889 High 0.81796 0.03334 0.75261 0.88331 1.502586993
## 890 High 0.99663 0.00135 0.99399 0.99928 5.689466843
## 891 High 0.80906 0.02276 0.76444 0.85367 1.443913837
## 892 High 0.80906 0.02276 0.76444 0.85367 1.443913837
## 893 High 0.83376 0.04720 0.74126 0.92626 1.612513064
## 894 High 0.79744 0.02338 0.75161 0.84327 1.370370458
## 895 High 0.97459 0.00598 0.96288 0.98630 3.646874072
## 896 High 0.99347 0.00259 0.98840 0.99854 5.024796922
## 897 High 0.94199 0.01111 0.92022 0.96376 2.787379249
## 898 High 0.94174 0.01120 0.91978 0.96369 2.782813476
## 899 High 0.90555 0.02088 0.86464 0.94647 2.260471900
## 900 High 0.94199 0.01111 0.92022 0.96376 2.787379249
## 901 High 0.97261 0.00711 0.95867 0.98655 3.569805196
## 902 High 0.52076 0.07152 0.38057 0.66094 0.083087767
## 903 High 0.90887 0.01493 0.87960 0.93814 2.299915011
## 904 High 0.91709 0.01472 0.88823 0.94594 2.403449931
## 905 High 0.99407 0.00213 0.98989 0.99826 5.121783414
## 906 High 0.99364 0.00225 0.98924 0.99804 5.051346591
## 907 High 0.99251 0.00256 0.98749 0.99752 4.886668291
## 908 High 0.95861 0.00847 0.94202 0.97520 3.142445013
## 909 High 0.97606 0.00676 0.96282 0.98931 3.707973360
## 910 High 0.93160 0.01166 0.90874 0.95446 2.611530713
## 911 High 0.92683 0.01183 0.90364 0.95003 2.538984661
## 912 High 0.85884 0.01917 0.82126 0.89643 1.805688643
## 913 High 0.92192 0.01997 0.88278 0.96106 2.468724510
## 914 High 0.93209 0.01165 0.90927 0.95492 2.619246077
## 915 High 0.95861 0.00847 0.94202 0.97520 3.142445013
## 916 High 0.95417 0.00890 0.93672 0.97162 3.035902954
## 917 High 0.93160 0.01166 0.90874 0.95446 2.611530713
## 918 High 0.90870 0.01483 0.87963 0.93778 2.297864219
## 919 High 0.99331 0.00222 0.98896 0.99765 5.000428926
## 920 High 0.89737 0.01532 0.86734 0.92740 2.168337975
## 921 High 0.99398 0.00216 0.98974 0.99822 5.106629826
## 922 High 0.84262 0.01938 0.80463 0.88062 1.677852822
## 923 High 0.93160 0.01166 0.90874 0.95446 2.611530713
## 924 High 0.86380 0.01887 0.82681 0.90078 1.847216867
## 925 High 0.92790 0.01175 0.90488 0.95093 2.554869924
## 926 High 0.96367 0.00835 0.94731 0.98004 3.278105066
## 927 High 0.98831 0.00361 0.98122 0.99539 4.437262638
## 928 High 0.36969 0.03577 0.29958 0.43980 -0.533546945
## 929 High 0.89136 0.01522 0.86152 0.92119 2.104708722
## 930 High 0.89136 0.01522 0.86152 0.92119 2.104708722
## 931 High 0.98015 0.00539 0.96958 0.99072 3.899501614
## 932 High 0.80287 0.02292 0.75794 0.84780 1.404329398
## 933 High 0.96801 0.00959 0.94922 0.98681 3.409819064
## 934 High 0.62585 0.02914 0.56874 0.68296 0.514453939
## 935 High 0.81393 0.02273 0.76938 0.85847 1.475751420
## 936 High 0.93160 0.01166 0.90874 0.95446 2.611530713
## 937 High 0.88144 0.02430 0.83380 0.92907 2.006137772
## 938 High 0.84418 0.02115 0.80273 0.88564 1.689664247
## 939 High 0.92790 0.01175 0.90488 0.95093 2.554869924
## 940 High 0.93160 0.01166 0.90874 0.95446 2.611530713
## 941 High 0.99110 0.00382 0.98360 0.99859 4.712764161
## 942 High 0.87195 0.03084 0.81150 0.93241 1.918311270
## 943 High 0.67098 0.02584 0.62033 0.72163 0.712620791
## 944 High 0.95869 0.01015 0.93879 0.97858 3.144463168
## 945 High 0.92528 0.02206 0.88204 0.96852 2.516348600
## 946 High 0.95702 0.01134 0.93478 0.97925 3.103089399
## 947 High 0.97969 0.00548 0.96894 0.99044 3.876122819
## 948 High 0.89808 0.01534 0.86801 0.92815 2.176070959
## 949 High 0.80034 0.02303 0.75520 0.84547 1.388420717
## 950 High 0.78941 0.04320 0.70473 0.87409 1.321372716
## 951 High 0.86191 0.01902 0.82464 0.89919 1.831245210
##################################
# Winsorizing confidence interval values
# beyond the 0 and 1 range
##################################
LR_Model_Predictions$LR_LCL <- ifelse(LR_Model_Predictions$LR_LCL<0.000,0.001,
LR_Model_Predictions$LR_LCL)
LR_Model_Predictions$LR_UCL <- ifelse(LR_Model_Predictions$LR_UCL>0.995,0.999,
LR_Model_Predictions$LR_UCL)
##################################
# Formulating the probability curve
# using the model predictions and
# the classification index
##################################
LR_Model_Predictions %>%
ggplot(aes(x = LR_LP ,
y = LR_Prob,
color = Log_Solubility_Class)) +
scale_colour_manual(values=c("#1846BA","#B80000")) +
geom_point(size=3) +
geom_line(color="black") +
geom_errorbar(aes(ymin = LR_LCL,
ymax = LR_UCL),
width = .1) +
xlab("Log Solubility Classification Index (Logit Values)") +
ylab("Estimated Log Solubility Class Probability") +
labs(color = "Log Solubility Class") +
scale_x_continuous( limits=c(-11,11), breaks=seq(-11,11,by=1)) +
scale_y_continuous( limits=c(0,1), breaks=seq(0,1,by=0.1),labels = scales::percent) +
ggtitle("Estimated Log Solubility Class Probabilities Based on Classification Index") +
theme_light() +
theme(plot.title = element_text(color="black", size=14, face="bold", hjust=0.50),
axis.title.x = element_text(color="black", size=12, face="bold"),
axis.title.y = element_text(color="black", size=12, face="bold"),
legend.position="top")

##################################
# Formulating the corresponding
# receiver operating characteristic (ROC) curve
# using the model predictions
##################################
LR_Prob_Low <- LR_Model_Predictions[LR_Model_Predictions$Log_Solubility_Class=="Low",
c("LR_Prob")]
LR_Prob_High <- LR_Model_Predictions[LR_Model_Predictions$Log_Solubility_Class=="High",
c("LR_Prob")]
LR_Model_Prob_ROC <- roc.curve(scores.class1 = LR_Prob_Low,
scores.class0 = LR_Prob_High,
curve = TRUE)
plot(LR_Model_Prob_ROC,
xlab="1-Specificity",
ylab="Sensitivity",
main="ROC Curve of the Log Solubility Class Probabilities",
color=TRUE,
lwd=8,
legend=3)

##################################
# Formulating the corresponding
# receiver operating characteristic (ROC) curve
# using the classification index
##################################
LR_LP_Low <- LR_Model_Predictions[LR_Model_Predictions$Log_Solubility_Class=="Low",
c("LR_LP")]
LR_LP_High <- LR_Model_Predictions[LR_Model_Predictions$Log_Solubility_Class=="High",
c("LR_LP")]
LR_Model_LP_ROC <- roc.curve(scores.class1 = LR_LP_Low,
scores.class0 = LR_LP_High,
curve = TRUE)
plot(LR_Model_LP_ROC,
xlab="1-Specificity",
ylab="Sensitivity",
main="ROC Curve of the Log Solubility Classification Index",
color=TRUE,
lwd=8,
legend=3)

##################################
# Conducting internal model validation
##################################
##################################
# Creating consistent fold assignments
# for the 10-Fold Cross Validation process
##################################
set.seed(12345678)
KFold_Indices <- createFolds(PMA_PreModelling_Train_LR$Log_Solubility_Class,
k = 10,
returnTrain=TRUE)
KFold_Control <- trainControl(method="cv",
index=KFold_Indices,
summaryFunction = twoClassSummary,
classProbs = TRUE)
##################################
# Setting the conditions
# for hyperparameter tuning
##################################
# No hyperparameter tuning process conducted
# hyperparameter=intercept fixed to TRUE
##################################
# Running the logistic regression model
# by setting the caret method to 'glm'
##################################
set.seed(12345678)
LR_Tune <- train(x = PMA_PreModelling_Train_LR[,!names(PMA_PreModelling_Train_LR) %in% c("Log_Solubility_Class")],
y = PMA_PreModelling_Train_LR$Log_Solubility_Class,
method = "glm",
metric = "ROC",
trControl = KFold_Control)
##################################
# Reporting the cross-validation results
# for the train set
##################################
LR_Tune
## Generalized Linear Model
##
## 951 samples
## 4 predictor
## 2 classes: 'Low', 'High'
##
## No pre-processing
## Resampling: Cross-Validated (10 fold)
## Summary of sample sizes: 857, 855, 857, 855, 856, 856, ...
## Resampling results:
##
## ROC Sens Spec
## 0.9122974 0.7539313 0.8722061
##
## Call: NULL
##
## Coefficients:
## (Intercept) MolWeight NumCarbon NumHalogen
## 5.432065 -0.007881 -0.319809 -0.767695
## HydrophilicFactor
## 1.092854
##
## Degrees of Freedom: 950 Total (i.e. Null); 946 Residual
## Null Deviance: 1308
## Residual Deviance: 709.1 AIC: 719.1
## parameter ROC Sens Spec ROCSD SensSD SpecSD
## 1 none 0.9122974 0.7539313 0.8722061 0.0302158 0.05915241 0.05739572
(LR_Train_ROCCurveAUC <- LR_Tune$results$ROC)
## [1] 0.9122974
##################################
# Identifying and plotting the
# best model predictors
##################################
LR_VarImp <- varImp(LR_Tune, scale = TRUE)
plot(LR_VarImp,
top=4,
scales=list(y=list(cex = .95)),
main="Ranked Variable Importance : Logistic Regression",
xlab="Scaled Variable Importance Metrics",
ylab="Predictors",
cex=2,
origin=0,
alpha=0.45)

1.5.2 Threshold Criterion Using Minimum Sensitivity
(TC_MinValueSe)
Minimum
Sensitivity is a threshold criterion based on setting a minimum
value for Sensitivity and maximizing Specificity, subject to this
condition. Hence, in a case where there is more than one cutpoint
fulfilling this condition, those which yield maximum Specificity are
chosen. If several cutpoints still remain, those yielding the greatest
Sensitivity are chosen.
[A] The
MinValueSe
threshold criterion from the
OptimalCutpoints
package was used which applies a minimum value set for
sensitivity.
[B] The threshold criterion requires to set 1
parameter:
[B.1] valueSe
= minimum sensitivity value set at 0.90
[C] The determined optimal dichotomization threshold
and corresponding classification metrics are summarized as
follows:
[C.1] Log Solubility Class Probability Threshold =
0.44773
[C.2] Apparent Sensitivity = 0.90458
[C.3] Apparent Specificity = 0.73536
######################################
# Creating a function to gather the
# classification metrics applying the
# threshold criterion using a
# minimum value set for sensitivity
######################################
ClassificationMetrics.Class0.LO.MINVALUESE <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("MinValueSe"),
data = SourceData,
pop.prev = NULL,
control = control.cutpoints(valueSe=0.90),
ci.fit = FALSE,
direction = c("<"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$MinValueSe$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$MinValueSe$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$MinValueSe$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$MinValueSe$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
ClassificationMetrics.Class0.HI.MINVALUESE <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("MinValueSe"),
data = SourceData,
pop.prev = NULL,
control = control.cutpoints(valueSe=0.90),
ci.fit = FALSE,
direction = c(">"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$MinValueSe$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$MinValueSe$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$MinValueSe$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$MinValueSe$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
######################################
# Gather the classification metrics
######################################
OPTTH.List <- list()
THSEN.List <- list()
THSPE.List <- list()
THAUC.List <- list()
PREDI.List <- list()
for (i in 7:7) {
Response.i <- colnames(LR_Model_Predictions)[6]
Predictor.i <- colnames(LR_Model_Predictions)[i]
ClassificationMetrics.i <- ClassificationMetrics.Class0.LO.MINVALUESE(LR_Model_Predictions,
Response.i,
Predictor.i)
OPTTH.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[1],fmt='%#.5f')))
THSEN.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[2],fmt='%#.5f')))
THSPE.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[3],fmt='%#.5f')))
THAUC.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[4],fmt='%#.5f')))
PREDI.List <- append(PREDI.List,Predictor.i)
OPTTH.List <- append(OPTTH.List,OPTTH.i)
THSEN.List <- append(THSEN.List,THSEN.i)
THSPE.List <- append(THSPE.List,THSPE.i)
THAUC.List <- append(THAUC.List,THAUC.i)
}

ClassificationMetrics.SUMMARY <- cbind(PREDI.List,
THAUC.List,
OPTTH.List,
THSEN.List,
THSPE.List
)
colnames(ClassificationMetrics.SUMMARY) <- c("Predictor",
"ROCAUC",
"OptimalThreshold.MINVALUESE",
"ApparentSensitivity",
"ApparentSpecificity")
MINVALUESE.SUMMARY <- as.data.frame(ClassificationMetrics.SUMMARY)
MINVALUESE.SUMMARY$ROCAUC <- as.numeric(as.character(MINVALUESE.SUMMARY$ROCAUC))
MINVALUESE.SUMMARY$ApparentSensitivity <- as.numeric(as.character(MINVALUESE.SUMMARY$ApparentSensitivity))
MINVALUESE.SUMMARY$ApparentSpecificity <- as.numeric(as.character(MINVALUESE.SUMMARY$ApparentSpecificity))
(MINVALUESE.Threshold <- MINVALUESE.SUMMARY$OptimalThreshold.MINVALUESE)
## $OptimalThreshold.MINVALUESE
## [1] 0.44773
(MINVALUESE.Sensitivity <- MINVALUESE.SUMMARY$ApparentSensitivity)
## [1] 0.90458
(MINVALUESE.Specificity <- MINVALUESE.SUMMARY$ApparentSpecificity)
## [1] 0.73536
1.5.3 Threshold Criterion Using Minimum Specificity
(TC_MinValueSp)
Minimum
Specificity is a threshold criterion based on setting a minimum
value for Specificity and maximizing Sensitivity, subject to this
condition. Hence, in a case where there is more than one cutpoint
fulfilling this condition, those which yield maximum Sensitivity are
chosen. If several cutpoints still remain, those yielding the greatest
Specificity are chosen.
[A] The
MinValueSp
threshold criterion from the
OptimalCutpoints
package was used which applies a minimum value set for
specificity.
[B] The threshold criterion requires to set 1
parameter:
[B.1] valueSp
= minimum specificity value set at 0.90
[C] The determined optimal dichotomization threshold
and corresponding classification metrics are summarized as
follows:
[C.1] Log Solubility Class Probability Threshold =
0.72336
[C.2] Apparent Sensitivity = 0.72710
[C.3] Apparent Specificity = 0.90398
######################################
# Creating a function to gather the
# classification metrics applying the
# threshold criterion using a
# minimum value set for specificity
######################################
ClassificationMetrics.Class0.LO.MINVALUESP <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("MinValueSp"),
data = SourceData,
pop.prev = NULL,
control = control.cutpoints(valueSp=0.90),
ci.fit = FALSE,
direction = c("<"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$MinValueSp$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$MinValueSp$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$MinValueSp$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$MinValueSp$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
ClassificationMetrics.Class0.HI.MINVALUESP <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("MinValueSp"),
data = SourceData,
pop.prev = NULL,
control = control.cutpoints(valueSp=0.90),
ci.fit = FALSE,
direction = c(">"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$MinValueSp$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$MinValueSp$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$MinValueSp$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$MinValueSp$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
######################################
# Gather the classification metrics
######################################
OPTTH.List <- list()
THSEN.List <- list()
THSPE.List <- list()
THAUC.List <- list()
PREDI.List <- list()
for (i in 7:7) {
Response.i <- colnames(LR_Model_Predictions)[6]
Predictor.i <- colnames(LR_Model_Predictions)[i]
ClassificationMetrics.i <- ClassificationMetrics.Class0.LO.MINVALUESP(LR_Model_Predictions,
Response.i,
Predictor.i)
OPTTH.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[1],fmt='%#.5f')))
THSEN.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[2],fmt='%#.5f')))
THSPE.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[3],fmt='%#.5f')))
THAUC.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[4],fmt='%#.5f')))
PREDI.List <- append(PREDI.List,Predictor.i)
OPTTH.List <- append(OPTTH.List,OPTTH.i)
THSEN.List <- append(THSEN.List,THSEN.i)
THSPE.List <- append(THSPE.List,THSPE.i)
THAUC.List <- append(THAUC.List,THAUC.i)
}

ClassificationMetrics.SUMMARY <- cbind(PREDI.List,
THAUC.List,
OPTTH.List,
THSEN.List,
THSPE.List
)
colnames(ClassificationMetrics.SUMMARY) <- c("Predictor",
"ROCAUC",
"OptimalThreshold.MINVALUESP",
"ApparentSensitivity",
"ApparentSpecificity")
MINVALUESP.SUMMARY <- as.data.frame(ClassificationMetrics.SUMMARY)
MINVALUESP.SUMMARY$ROCAUC <- as.numeric(as.character(MINVALUESP.SUMMARY$ROCAUC))
MINVALUESP.SUMMARY$ApparentSensitivity <- as.numeric(as.character(MINVALUESP.SUMMARY$ApparentSensitivity))
MINVALUESP.SUMMARY$ApparentSpecificity <- as.numeric(as.character(MINVALUESP.SUMMARY$ApparentSpecificity))
(MINVALUESP.Threshold <- MINVALUESP.SUMMARY$OptimalThreshold.MINVALUESP)
## $OptimalThreshold.MINVALUESP
## [1] 0.72336
(MINVALUESP.Sensitivity <- MINVALUESP.SUMMARY$ApparentSensitivity)
## [1] 0.7271
(MINVALUESP.Specificity <- MINVALUESP.SUMMARY$ApparentSpecificity)
## [1] 0.90398
1.5.4 Threshold Criterion Using Maximum Product of Specificity and
Sensitivity (TC_MaxProdSpSe)
Maximum
Product of Specificity and Sensitivity is a threshold criterion
based on the optimally highest product of Specificity and Sensitivity,
which is the same as the method based on maximization of the Accuracy
Area.
[A] The
MaxProdSpSe
threshold criterion from the
OptimalCutpoints
package was used which maximizes the product of specificity and
sensitivity or accuracy area.
[B] The threshold criterion does not require to set any
parameter.
[C] The determined optimal dichotomization threshold
and corresponding classification metrics are summarized as
follows:
[C.1] Log Solubility Class Probability Threshold =
0.64163
[C.2] Apparent Sensitivity = 0.80153
[C.3] Apparent Specificity = 0.87354
######################################
# Creating a function to gather the
# classification metrics applying the
# threshold criterion which maximizes the
# product of specificity and sensitivity
# or accuracy area
######################################
ClassificationMetrics.Class0.LO.MAXPRODSPSE <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("MaxProdSpSe"),
data = SourceData,
pop.prev = NULL,
ci.fit = FALSE,
direction = c("<"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$MaxProdSpSe$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$MaxProdSpSe$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$MaxProdSpSe$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$MaxProdSpSe$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
ClassificationMetrics.Class0.HI.MAXPRODSPSE <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("MaxProdSpSe"),
data = SourceData,
pop.prev = NULL,
ci.fit = FALSE,
direction = c(">"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$MaxProdSpSe$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$MaxProdSpSe$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$MaxProdSpSe$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$MaxProdSpSe$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
######################################
# Gather the classification metrics
######################################
OPTTH.List <- list()
THSEN.List <- list()
THSPE.List <- list()
THAUC.List <- list()
PREDI.List <- list()
for (i in 7:7) {
Response.i <- colnames(LR_Model_Predictions)[6]
Predictor.i <- colnames(LR_Model_Predictions)[i]
ClassificationMetrics.i <- ClassificationMetrics.Class0.LO.MAXPRODSPSE(LR_Model_Predictions,
Response.i,
Predictor.i)
OPTTH.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[1],fmt='%#.5f')))
THSEN.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[2],fmt='%#.5f')))
THSPE.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[3],fmt='%#.5f')))
THAUC.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[4],fmt='%#.5f')))
PREDI.List <- append(PREDI.List,Predictor.i)
OPTTH.List <- append(OPTTH.List,OPTTH.i)
THSEN.List <- append(THSEN.List,THSEN.i)
THSPE.List <- append(THSPE.List,THSPE.i)
THAUC.List <- append(THAUC.List,THAUC.i)
}

ClassificationMetrics.SUMMARY <- cbind(PREDI.List,
THAUC.List,
OPTTH.List,
THSEN.List,
THSPE.List
)
colnames(ClassificationMetrics.SUMMARY) <- c("Predictor",
"ROCAUC",
"OptimalThreshold.MAXPRODSPSE",
"ApparentSensitivity",
"ApparentSpecificity")
MAXPRODSPSE.SUMMARY <- as.data.frame(ClassificationMetrics.SUMMARY)
MAXPRODSPSE.SUMMARY$ROCAUC <- as.numeric(as.character(MAXPRODSPSE.SUMMARY$ROCAUC))
MAXPRODSPSE.SUMMARY$ApparentSensitivity <- as.numeric(as.character(MAXPRODSPSE.SUMMARY$ApparentSensitivity))
MAXPRODSPSE.SUMMARY$ApparentSpecificity <- as.numeric(as.character(MAXPRODSPSE.SUMMARY$ApparentSpecificity))
(MAXPRODSPSE.Threshold <- MAXPRODSPSE.SUMMARY$OptimalThreshold.MAXPRODSPSE)
## $OptimalThreshold.MAXPRODSPSE
## [1] 0.64163
(MAXPRODSPSE.Sensitivity <- MAXPRODSPSE.SUMMARY$ApparentSensitivity)
## [1] 0.80153
(MAXPRODSPSE.Specificity <- MAXPRODSPSE.SUMMARY$ApparentSpecificity)
## [1] 0.87354
1.5.5 Threshold Criterion Using ROC Curve Point Closest to Point
(0,1) (TC_ROC01)
ROC
Curve Point Closest to Point (0,1) is a threshold criterion which
refers to the upper leftmost corner of the unit square.
[A] The
ROC01
threshold criterion from the
OptimalCutpoints
package was used which minimizes the distance between the ROC plot and
point (0,1).
[B] The threshold criterion does not require to set any
parameter.
[C] The determined optimal dichotomization threshold
and corresponding classification metrics are summarized as
follows:
[C.1] Log Solubility Class Probability Threshold =
0.64163
[C.2] Apparent Sensitivity = 0.80153
[C.3] Apparent Specificity = 0.87354
######################################
# Creating a function to gather the
# classification metrics applying the
# threshold criterion which minimizes the
# distance between the ROC plot and point (0,1)
######################################
ClassificationMetrics.Class0.LO.ROC01 <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("ROC01"),
data = SourceData,
pop.prev = NULL,
ci.fit = FALSE,
direction = c("<"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$ROC01$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$ROC01$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$ROC01$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$ROC01$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
ClassificationMetrics.Class0.HI.ROC01 <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("ROC01"),
data = SourceData,
pop.prev = NULL,
ci.fit = FALSE,
direction = c(">"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$ROC01$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$ROC01$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$ROC01$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$ROC01$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
######################################
# Gather the classification metrics
######################################
OPTTH.List <- list()
THSEN.List <- list()
THSPE.List <- list()
THAUC.List <- list()
PREDI.List <- list()
for (i in 7:7) {
Response.i <- colnames(LR_Model_Predictions)[6]
Predictor.i <- colnames(LR_Model_Predictions)[i]
ClassificationMetrics.i <- ClassificationMetrics.Class0.LO.ROC01(LR_Model_Predictions,
Response.i,
Predictor.i)
OPTTH.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[1],fmt='%#.5f')))
THSEN.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[2],fmt='%#.5f')))
THSPE.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[3],fmt='%#.5f')))
THAUC.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[4],fmt='%#.5f')))
PREDI.List <- append(PREDI.List,Predictor.i)
OPTTH.List <- append(OPTTH.List,OPTTH.i)
THSEN.List <- append(THSEN.List,THSEN.i)
THSPE.List <- append(THSPE.List,THSPE.i)
THAUC.List <- append(THAUC.List,THAUC.i)
}

ClassificationMetrics.SUMMARY <- cbind(PREDI.List,
THAUC.List,
OPTTH.List,
THSEN.List,
THSPE.List
)
colnames(ClassificationMetrics.SUMMARY) <- c("Predictor",
"ROCAUC",
"OptimalThreshold.ROC01",
"ApparentSensitivity",
"ApparentSpecificity")
ROC01.SUMMARY <- as.data.frame(ClassificationMetrics.SUMMARY)
ROC01.SUMMARY$ROCAUC <- as.numeric(as.character(ROC01.SUMMARY$ROCAUC))
ROC01.SUMMARY$ApparentSensitivity <- as.numeric(as.character(ROC01.SUMMARY$ApparentSensitivity))
ROC01.SUMMARY$ApparentSpecificity <- as.numeric(as.character(ROC01.SUMMARY$ApparentSpecificity))
(ROC01.Threshold <- ROC01.SUMMARY$OptimalThreshold.ROC01)
## $OptimalThreshold.ROC01
## [1] 0.64163
(ROC01.Sensitivity <- ROC01.SUMMARY$ApparentSensitivity)
## [1] 0.80153
(ROC01.Specificity <- ROC01.SUMMARY$ApparentSpecificity)
## [1] 0.87354
1.5.6 Threshold Criterion Using Sensitivity Equals Specificity
(TC_SpEqualSe)
Sensitivity
Equals Specificity is a threshold criterion based on the equality of
Sensitivity and Specificity. Since Specificity may not be exactly equal
to Sensitivity, the absolute value of the difference between them is
minimized.
[A] The
SpEqualSe
threshold criterion from the
OptimalCutpoints
package was applies the ROC curve point where specificity approximately
equals sensitivity.
[B] The threshold criterion does not require to set any
parameter.
[C] The determined optimal dichotomization threshold
and corresponding classification metrics are summarized as
follows:
[C.1] Log Solubility Class Probability Threshold =
0.61205
[C.2] Apparent Sensitivity = 0.82634
[C.3] Apparent Specificity = 0.82670
######################################
# Creating a function to gather the
# classification metrics applying the
# threshold criterion using the ROC curve point where
# specificity approximately equals sensitivity
######################################
ClassificationMetrics.Class0.LO.SPEQUALSE <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("SpEqualSe"),
data = SourceData,
pop.prev = NULL,
ci.fit = FALSE,
direction = c("<"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$SpEqualSe$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$SpEqualSe$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$SpEqualSe$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$SpEqualSe$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
ClassificationMetrics.Class0.HI.SPEQUALSE <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("SpEqualSe"),
data = SourceData,
pop.prev = NULL,
ci.fit = FALSE,
direction = c(">"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$SpEqualSe$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$SpEqualSe$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$SpEqualSe$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$SpEqualSe$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
######################################
# Gather the classification metrics
######################################
OPTTH.List <- list()
THSEN.List <- list()
THSPE.List <- list()
THAUC.List <- list()
PREDI.List <- list()
for (i in 7:7) {
Response.i <- colnames(LR_Model_Predictions)[6]
Predictor.i <- colnames(LR_Model_Predictions)[i]
ClassificationMetrics.i <- ClassificationMetrics.Class0.LO.SPEQUALSE(LR_Model_Predictions,
Response.i,
Predictor.i)
OPTTH.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[1],fmt='%#.5f')))
THSEN.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[2],fmt='%#.5f')))
THSPE.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[3],fmt='%#.5f')))
THAUC.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[4],fmt='%#.5f')))
PREDI.List <- append(PREDI.List,Predictor.i)
OPTTH.List <- append(OPTTH.List,OPTTH.i)
THSEN.List <- append(THSEN.List,THSEN.i)
THSPE.List <- append(THSPE.List,THSPE.i)
THAUC.List <- append(THAUC.List,THAUC.i)
}

ClassificationMetrics.SUMMARY <- cbind(PREDI.List,
THAUC.List,
OPTTH.List,
THSEN.List,
THSPE.List
)
colnames(ClassificationMetrics.SUMMARY) <- c("Predictor",
"ROCAUC",
"OptimalThreshold.SPEQUALSE",
"ApparentSensitivity",
"ApparentSpecificity")
SPEQUALSE.SUMMARY <- as.data.frame(ClassificationMetrics.SUMMARY)
SPEQUALSE.SUMMARY$ROCAUC <- as.numeric(as.character(SPEQUALSE.SUMMARY$ROCAUC))
SPEQUALSE.SUMMARY$ApparentSensitivity <- as.numeric(as.character(SPEQUALSE.SUMMARY$ApparentSensitivity))
SPEQUALSE.SUMMARY$ApparentSpecificity <- as.numeric(as.character(SPEQUALSE.SUMMARY$ApparentSpecificity))
(SPEQUALSE.Threshold <- SPEQUALSE.SUMMARY$OptimalThreshold.SPEQUALSE)
## $OptimalThreshold.SPEQUALSE
## [1] 0.61205
(SPEQUALSE.Sensitivity <- SPEQUALSE.SUMMARY$ApparentSensitivity)
## [1] 0.82634
(SPEQUALSE.Specificity <- SPEQUALSE.SUMMARY$ApparentSpecificity)
## [1] 0.8267
1.5.7 Threshold Criterion Using Youden’s Index (TC_Youden)
Youden
is a threshold criterion based on Youden’s Index which is computed as
the maximum of the sum of Sensitivity and Specificity subtracted by 1.
This is identical from an optimization point of view to the method that
maximizes the sum of Sensitivity and Specificity and to the criterion
that maximizes concordance, which is a monotone function of the
AUC.
[A] The
Youden
threshold criterion from the
OptimalCutpoints
package was used which applies the Youden Index.
[B] The threshold criterion requires to set 2
parameters:
[B.1] generalized.Youden = logical value
indicating the application of the generalized youden methodology set at
FALSE
[B.2] costs.benefits.Youden = logical value
indicating the application of the cost benefit methodology set at
FALSE
[C] The determined optimal dichotomization threshold
and corresponding classification metrics are summarized as
follows:
[C.1] Log Solubility Class Probability Threshold =
0.64163
[C.2] Apparent Sensitivity = 0.80153
[C.3] Apparent Specificity = 0.87354
######################################
# Creating a function to gather the
# classification metrics applying the
# threshold criterion using the
# Youden Index
######################################
ClassificationMetrics.Class0.LO.YOUDEN <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("Youden"),
data = SourceData,
pop.prev = NULL,
control = control.cutpoints(generalized.Youden=FALSE,
costs.benefits.Youden=FALSE),
ci.fit = FALSE,
direction = c("<"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$Youden$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$Youden$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$Youden$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$Youden$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
ClassificationMetrics.Class0.HI.YOUDEN <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("Youden"),
data = SourceData,
pop.prev = NULL,
control = control.cutpoints(generalized.Youden=FALSE,
costs.benefits.Youden=FALSE),
ci.fit = FALSE,
direction = c(">"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$Youden$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$Youden$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$Youden$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$Youden$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
######################################
# Gather the classification metrics
######################################
OPTTH.List <- list()
THSEN.List <- list()
THSPE.List <- list()
THAUC.List <- list()
PREDI.List <- list()
for (i in 7:7) {
Response.i <- colnames(LR_Model_Predictions)[6]
Predictor.i <- colnames(LR_Model_Predictions)[i]
ClassificationMetrics.i <- ClassificationMetrics.Class0.LO.YOUDEN(LR_Model_Predictions,
Response.i,
Predictor.i)
OPTTH.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[1],fmt='%#.5f')))
THSEN.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[2],fmt='%#.5f')))
THSPE.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[3],fmt='%#.5f')))
THAUC.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[4],fmt='%#.5f')))
PREDI.List <- append(PREDI.List,Predictor.i)
OPTTH.List <- append(OPTTH.List,OPTTH.i)
THSEN.List <- append(THSEN.List,THSEN.i)
THSPE.List <- append(THSPE.List,THSPE.i)
THAUC.List <- append(THAUC.List,THAUC.i)
}

ClassificationMetrics.SUMMARY <- cbind(PREDI.List,
THAUC.List,
OPTTH.List,
THSEN.List,
THSPE.List
)
colnames(ClassificationMetrics.SUMMARY) <- c("Predictor",
"ROCAUC",
"OptimalThreshold.YOUDEN",
"ApparentSensitivity",
"ApparentSpecificity")
YOUDEN.SUMMARY <- as.data.frame(ClassificationMetrics.SUMMARY)
YOUDEN.SUMMARY$ROCAUC <- as.numeric(as.character(YOUDEN.SUMMARY$ROCAUC))
YOUDEN.SUMMARY$ApparentSensitivity <- as.numeric(as.character(YOUDEN.SUMMARY$ApparentSensitivity))
YOUDEN.SUMMARY$ApparentSpecificity <- as.numeric(as.character(YOUDEN.SUMMARY$ApparentSpecificity))
(YOUDEN.Threshold <- YOUDEN.SUMMARY$OptimalThreshold.YOUDEN)
## $OptimalThreshold.YOUDEN
## [1] 0.64163
(YOUDEN.Sensitivity <- YOUDEN.SUMMARY$ApparentSensitivity)
## [1] 0.80153
(YOUDEN.Specificity <- YOUDEN.SUMMARY$ApparentSpecificity)
## [1] 0.87354
1.5.8 Threshold Criterion Using Maximum Efficiency
(TC_MaxEfficiency)
Maximum
Efficiency is a threshold criterion based on maximization of the
Efficiency, Accuracy, Validity Index or percentage of cases correctly
classified. This criterion is similar to the criterion based on
minimization of the Misclassification Rate which measures the error in
cases where diseased and disease-free patients are misdiagnosed.
[A] The
MaxEfficiency threshold criterion from the
OptimalCutpoints
package was used which maximizes the efficiency or minimizes error
rate.
[B] The threshold criterion requires to set 1
parameter:
[B.1] costs.benefits.Efficiency = logical value
indicating the application of the cost benefit methodology set at
FALSE
[C] The determined optimal dichotomization threshold
and corresponding classification metrics are summarized as
follows:
[C.1] Log Solubility Class Probability Threshold =
0.64163
[C.2] Apparent Sensitivity = 0.80153
[C.3] Apparent Specificity = 0.87354
######################################
# Creating a function to gather the
# classification metrics applying the
# threshold criterion which maximizes the
# efficiency or minimizes error rate
######################################
ClassificationMetrics.Class0.LO.MAXEFFICIENCY <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("MaxEfficiency"),
data = SourceData,
pop.prev = NULL,
control = control.cutpoints(costs.benefits.Efficiency=TRUE),
ci.fit = FALSE,
direction = c("<"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$MaxEfficiency$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$MaxEfficiency$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$MaxEfficiency$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$MaxEfficiency$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
ClassificationMetrics.Class0.HI.MAXEFFICIENCY <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("MaxEfficiency"),
data = SourceData,
pop.prev = NULL,
control = control.cutpoints(costs.benefits.Efficiency=TRUE),
ci.fit = FALSE,
direction = c(">"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$MaxEfficiency$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$MaxEfficiency$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$MaxEfficiency$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$MaxEfficiency$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
######################################
# Gather the classification metrics
######################################
OPTTH.List <- list()
THSEN.List <- list()
THSPE.List <- list()
THAUC.List <- list()
PREDI.List <- list()
for (i in 7:7) {
Response.i <- colnames(LR_Model_Predictions)[6]
Predictor.i <- colnames(LR_Model_Predictions)[i]
ClassificationMetrics.i <- ClassificationMetrics.Class0.LO.MAXEFFICIENCY(LR_Model_Predictions,
Response.i,
Predictor.i)
OPTTH.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[1],fmt='%#.5f')))
THSEN.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[2],fmt='%#.5f')))
THSPE.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[3],fmt='%#.5f')))
THAUC.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[4],fmt='%#.5f')))
PREDI.List <- append(PREDI.List,Predictor.i)
OPTTH.List <- append(OPTTH.List,OPTTH.i)
THSEN.List <- append(THSEN.List,THSEN.i)
THSPE.List <- append(THSPE.List,THSPE.i)
THAUC.List <- append(THAUC.List,THAUC.i)
}

ClassificationMetrics.SUMMARY <- cbind(PREDI.List,
THAUC.List,
OPTTH.List,
THSEN.List,
THSPE.List
)
colnames(ClassificationMetrics.SUMMARY) <- c("Predictor",
"ROCAUC",
"OptimalThreshold.MAXEFFICIENCY",
"ApparentSensitivity",
"ApparentSpecificity")
MAXEFFICIENCY.SUMMARY <- as.data.frame(ClassificationMetrics.SUMMARY)
MAXEFFICIENCY.SUMMARY$ROCAUC <- as.numeric(as.character(MAXEFFICIENCY.SUMMARY$ROCAUC))
MAXEFFICIENCY.SUMMARY$ApparentSensitivity <- as.numeric(as.character(MAXEFFICIENCY.SUMMARY$ApparentSensitivity))
MAXEFFICIENCY.SUMMARY$ApparentSpecificity <- as.numeric(as.character(MAXEFFICIENCY.SUMMARY$ApparentSpecificity))
(MAXEFFICIENCY.Threshold <- MAXEFFICIENCY.SUMMARY$OptimalThreshold.MAXEFFICIENCY)
## $OptimalThreshold.MAXEFFICIENCY
## [1] 0.64163
(MAXEFFICIENCY.Sensitivity <- MAXEFFICIENCY.SUMMARY$ApparentSensitivity)
## [1] 0.80153
(MAXEFFICIENCY.Specificity <- MAXEFFICIENCY.SUMMARY$ApparentSpecificity)
## [1] 0.87354
1.5.9 Threshold Criterion Using Minimization of Most Frequent Error
(TC_Minimax)
Minimization
of Most Frequent Error is a threshold criterion based on the
optimally lowest most frequent error. In a case where there is more than
one cutpoint fulfilling this condition, those which yield maximum
Sensitivity or maximum Specificity are chosen.
[A] The
Minimax
threshold criterion from the
OptimalCutpoints
package was used which minimizes the most frequent error.
[B] The threshold criterion does not require to set any
parameter.
[C] The determined optimal dichotomization threshold
and corresponding classification metrics are summarized as
follows:
[C.1] Log Solubility Class Probability Threshold =
0.56057
[C.2] Apparent Sensitivity = 0.84351
[C.3] Apparent Specificity = 0.81030
######################################
# Creating a function to gather the
# classification metrics applying the
# threshold criterion which minimizes the
# most frequent error
######################################
ClassificationMetrics.Class0.LO.MINIMAX <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("Minimax"),
data = SourceData,
pop.prev = NULL,
ci.fit = FALSE,
direction = c("<"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$Minimax$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$Minimax$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$Minimax$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$Minimax$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
ClassificationMetrics.Class0.HI.MINIMAX <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("Minimax"),
data = SourceData,
pop.prev = NULL,
ci.fit = FALSE,
direction = c(">"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$Minimax$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$Minimax$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$Minimax$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$Minimax$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
######################################
# Gather the classification metrics
######################################
OPTTH.List <- list()
THSEN.List <- list()
THSPE.List <- list()
THAUC.List <- list()
PREDI.List <- list()
for (i in 7:7) {
Response.i <- colnames(LR_Model_Predictions)[6]
Predictor.i <- colnames(LR_Model_Predictions)[i]
ClassificationMetrics.i <- ClassificationMetrics.Class0.LO.MINIMAX(LR_Model_Predictions,
Response.i,
Predictor.i)
OPTTH.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[1],fmt='%#.5f')))
THSEN.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[2],fmt='%#.5f')))
THSPE.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[3],fmt='%#.5f')))
THAUC.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[4],fmt='%#.5f')))
PREDI.List <- append(PREDI.List,Predictor.i)
OPTTH.List <- append(OPTTH.List,OPTTH.i)
THSEN.List <- append(THSEN.List,THSEN.i)
THSPE.List <- append(THSPE.List,THSPE.i)
THAUC.List <- append(THAUC.List,THAUC.i)
}

ClassificationMetrics.SUMMARY <- cbind(PREDI.List,
THAUC.List,
OPTTH.List,
THSEN.List,
THSPE.List
)
colnames(ClassificationMetrics.SUMMARY) <- c("Predictor",
"ROCAUC",
"OptimalThreshold.MINIMAX",
"ApparentSensitivity",
"ApparentSpecificity")
MINIMAX.SUMMARY <- as.data.frame(ClassificationMetrics.SUMMARY)
MINIMAX.SUMMARY$ROCAUC <- as.numeric(as.character(MINIMAX.SUMMARY$ROCAUC))
MINIMAX.SUMMARY$ApparentSensitivity <- as.numeric(as.character(MINIMAX.SUMMARY$ApparentSensitivity))
MINIMAX.SUMMARY$ApparentSpecificity <- as.numeric(as.character(MINIMAX.SUMMARY$ApparentSpecificity))
(MINIMAX.Threshold <- MINIMAX.SUMMARY$OptimalThreshold.MINIMAX)
## $OptimalThreshold.MINIMAX
## [1] 0.56057
(MINIMAX.Sensitivity <- MINIMAX.SUMMARY$ApparentSensitivity)
## [1] 0.84351
(MINIMAX.Specificity <- MINIMAX.SUMMARY$ApparentSpecificity)
## [1] 0.8103
1.5.10 Threshold Criterion Using Maximum Diagnostic Odds Ratio
(TC_MaxDOR)
Maximum
Diagnostic Odds Ratio is a threshold criterion based on the
optimally highest Diagnostic Odds Ratio (DOR).
[A] The
MaxDOR
threshold criterion from the
OptimalCutpoints
package was used which maximizes the Diagnostic Odds Ratio.
[B] The threshold criterion does not require to set any
parameter.
[C] The determined optimal dichotomization threshold
and corresponding classification metrics are summarized as
follows:
[C.1] Log Solubility Class Probability Threshold =
0.92748
[C.2] Apparent Sensitivity = 0.31298
[C.3] Apparent Specificity = 1.00000
######################################
# Creating a function to gather the
# classification metrics applying the
# threshold criterion which maximizes the
# Diagnostic Odds Ratio
######################################
ClassificationMetrics.Class0.LO.MAXDOR <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("MaxDOR"),
data = SourceData,
pop.prev = NULL,
ci.fit = FALSE,
direction = c("<"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$MaxDOR$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$MaxDOR$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$MaxDOR$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$MaxDOR$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
ClassificationMetrics.Class0.HI.MAXDOR <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("MaxDOR"),
data = SourceData,
pop.prev = NULL,
ci.fit = FALSE,
direction = c(">"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$MaxDOR$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$MaxDOR$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$MaxDOR$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$MaxDOR$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
######################################
# Gather the classification metrics
######################################
OPTTH.List <- list()
THSEN.List <- list()
THSPE.List <- list()
THAUC.List <- list()
PREDI.List <- list()
for (i in 7:7) {
Response.i <- colnames(LR_Model_Predictions)[6]
Predictor.i <- colnames(LR_Model_Predictions)[i]
ClassificationMetrics.i <- ClassificationMetrics.Class0.LO.MAXDOR(LR_Model_Predictions,
Response.i,
Predictor.i)
OPTTH.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[1],fmt='%#.5f')))
THSEN.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[2],fmt='%#.5f')))
THSPE.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[3],fmt='%#.5f')))
THAUC.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[4],fmt='%#.5f')))
PREDI.List <- append(PREDI.List,Predictor.i)
OPTTH.List <- append(OPTTH.List,OPTTH.i)
THSEN.List <- append(THSEN.List,THSEN.i)
THSPE.List <- append(THSPE.List,THSPE.i)
THAUC.List <- append(THAUC.List,THAUC.i)
}

ClassificationMetrics.SUMMARY <- cbind(PREDI.List,
THAUC.List,
OPTTH.List,
THSEN.List,
THSPE.List
)
colnames(ClassificationMetrics.SUMMARY) <- c("Predictor",
"ROCAUC",
"OptimalThreshold.MAXDOR",
"ApparentSensitivity",
"ApparentSpecificity")
MAXDOR.SUMMARY <- as.data.frame(ClassificationMetrics.SUMMARY)
MAXDOR.SUMMARY$ROCAUC <- as.numeric(as.character(MAXDOR.SUMMARY$ROCAUC))
MAXDOR.SUMMARY$ApparentSensitivity <- as.numeric(as.character(MAXDOR.SUMMARY$ApparentSensitivity))
MAXDOR.SUMMARY$ApparentSpecificity <- as.numeric(as.character(MAXDOR.SUMMARY$ApparentSpecificity))
(MAXDOR.Threshold <- MAXDOR.SUMMARY$OptimalThreshold.MAXDOR)
## $OptimalThreshold.MAXDOR
## [1] 0.92748
(MAXDOR.Sensitivity <- MAXDOR.SUMMARY$ApparentSensitivity)
## [1] 0.31298
(MAXDOR.Specificity <- MAXDOR.SUMMARY$ApparentSpecificity)
## [1] 1
1.5.11 Threshold Criterion Using Maximum Kappa (TC_MaxKappa)
Maximum
Kappa is a threshold criterion based on the optimally highest Kappa
Index which makes full use of the information in the confusion matrix to
assess the improvement over chance prediction.
[A] The
MaxKappa
threshold criterion from the
OptimalCutpoints
package was used which maximizes the Kappa Index.
[B] The threshold criterion does not require to set any
parameter.
[C] The determined optimal dichotomization threshold
and corresponding classification metrics are summarized as
follows:
[C.1] Log Solubility Class Probability Threshold =
0.64163
[C.2] Apparent Sensitivity = 0.80153
[C.3] Apparent Specificity = 0.87354
######################################
# Creating a function to gather the
# classification metrics applying the
# threshold criterion which maximizes the
# Kappa Index
######################################
ClassificationMetrics.Class0.LO.MAXKAPPA <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("MaxKappa"),
data = SourceData,
pop.prev = NULL,
control = control.cutpoints(weighted.Kappa=FALSE),
ci.fit = FALSE,
direction = c("<"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$MaxKappa$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$MaxKappa$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$MaxKappa$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$MaxKappa$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
ClassificationMetrics.Class0.HI.MAXKAPPA <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("MaxKappa"),
data = SourceData,
pop.prev = NULL,
control = control.cutpoints(weighted.Kappa=FALSE),
ci.fit = FALSE,
direction = c(">"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$MaxKappa$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$MaxKappa$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$MaxKappa$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$MaxKappa$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
######################################
# Gather the classification metrics
######################################
OPTTH.List <- list()
THSEN.List <- list()
THSPE.List <- list()
THAUC.List <- list()
PREDI.List <- list()
for (i in 7:7) {
Response.i <- colnames(LR_Model_Predictions)[6]
Predictor.i <- colnames(LR_Model_Predictions)[i]
ClassificationMetrics.i <- ClassificationMetrics.Class0.LO.MAXKAPPA(LR_Model_Predictions,
Response.i,
Predictor.i)
OPTTH.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[1],fmt='%#.5f')))
THSEN.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[2],fmt='%#.5f')))
THSPE.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[3],fmt='%#.5f')))
THAUC.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[4],fmt='%#.5f')))
PREDI.List <- append(PREDI.List,Predictor.i)
OPTTH.List <- append(OPTTH.List,OPTTH.i)
THSEN.List <- append(THSEN.List,THSEN.i)
THSPE.List <- append(THSPE.List,THSPE.i)
THAUC.List <- append(THAUC.List,THAUC.i)
}

ClassificationMetrics.SUMMARY <- cbind(PREDI.List,
THAUC.List,
OPTTH.List,
THSEN.List,
THSPE.List
)
colnames(ClassificationMetrics.SUMMARY) <- c("Predictor",
"ROCAUC",
"OptimalThreshold.MAXKAPPA",
"ApparentSensitivity",
"ApparentSpecificity")
MAXKAPPA.SUMMARY <- as.data.frame(ClassificationMetrics.SUMMARY)
MAXKAPPA.SUMMARY$ROCAUC <- as.numeric(as.character(MAXKAPPA.SUMMARY$ROCAUC))
MAXKAPPA.SUMMARY$ApparentSensitivity <- as.numeric(as.character(MAXKAPPA.SUMMARY$ApparentSensitivity))
MAXKAPPA.SUMMARY$ApparentSpecificity <- as.numeric(as.character(MAXKAPPA.SUMMARY$ApparentSpecificity))
(MAXKAPPA.Threshold <- MAXKAPPA.SUMMARY$OptimalThreshold.MAXKAPPA)
## $OptimalThreshold.MAXKAPPA
## [1] 0.64163
(MAXKAPPA.Sensitivity <- MAXKAPPA.SUMMARY$ApparentSensitivity)
## [1] 0.80153
(MAXKAPPA.Specificity <- MAXKAPPA.SUMMARY$ApparentSpecificity)
## [1] 0.87354
1.5.12 Threshold Criterion Using Minimum Negative Predictive Value
(TC_MinValueNPV)
Minimum
Negative Predictive Value is a threshold criterion based on setting
a minimum value for Negative Predictive Value. In a case where there is
more than one cutpoint fulfilling this condition, those which yield the
maximum Positive Predictive Value are chosen. If several cutpoints still
remain, those yielding the highest Negative Predictive Value are
chosen.
[A] The
MinValueNPV
threshold criterion from the
OptimalCutpoints
package was used which applies a minimum value set for negative
predictive value.
[B] The threshold criterion requires to set 1
parameter:
[B.1] valueNPV
= minimum NPV value set at 0.90
[C] The determined optimal dichotomization threshold
and corresponding classification metrics are summarized as
follows:
[C.1] Log Solubility Class Probability Threshold =
0.31952
[C.2] Apparent Sensitivity = 0.94656
[C.3] Apparent Specificity = 0.62763
######################################
# Creating a function to gather the
# classification metrics applying the
# threshold criterion using a
# minimum value set for negative predictive value
######################################
ClassificationMetrics.Class0.LO.MINVALUENPV <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("MinValueNPV"),
data = SourceData,
pop.prev = NULL,
control = control.cutpoints(valueNPV=0.90),
ci.fit = FALSE,
direction = c("<"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$MinValueNPV$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$MinValueNPV$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$MinValueNPV$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$MinValueNPV$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
ClassificationMetrics.Class0.HI.MINVALUENPV <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("MinValueNPV"),
data = SourceData,
pop.prev = NULL,
control = control.cutpoints(valueNPV=0.90),
ci.fit = FALSE,
direction = c(">"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$MinValueNPV$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$MinValueNPV$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$MinValueNPV$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$MinValueNPV$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
######################################
# Gather the classification metrics
######################################
OPTTH.List <- list()
THSEN.List <- list()
THSPE.List <- list()
THAUC.List <- list()
PREDI.List <- list()
for (i in 7:7) {
Response.i <- colnames(LR_Model_Predictions)[6]
Predictor.i <- colnames(LR_Model_Predictions)[i]
ClassificationMetrics.i <- ClassificationMetrics.Class0.LO.MINVALUENPV(LR_Model_Predictions,
Response.i,
Predictor.i)
OPTTH.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[1],fmt='%#.5f')))
THSEN.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[2],fmt='%#.5f')))
THSPE.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[3],fmt='%#.5f')))
THAUC.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[4],fmt='%#.5f')))
PREDI.List <- append(PREDI.List,Predictor.i)
OPTTH.List <- append(OPTTH.List,OPTTH.i)
THSEN.List <- append(THSEN.List,THSEN.i)
THSPE.List <- append(THSPE.List,THSPE.i)
THAUC.List <- append(THAUC.List,THAUC.i)
}

ClassificationMetrics.SUMMARY <- cbind(PREDI.List,
THAUC.List,
OPTTH.List,
THSEN.List,
THSPE.List
)
colnames(ClassificationMetrics.SUMMARY) <- c("Predictor",
"ROCAUC",
"OptimalThreshold.MINVALUENPV",
"ApparentSensitivity",
"ApparentSpecificity")
MINVALUENPV.SUMMARY <- as.data.frame(ClassificationMetrics.SUMMARY)
MINVALUENPV.SUMMARY$ROCAUC <- as.numeric(as.character(MINVALUENPV.SUMMARY$ROCAUC))
MINVALUENPV.SUMMARY$ApparentSensitivity <- as.numeric(as.character(MINVALUENPV.SUMMARY$ApparentSensitivity))
MINVALUENPV.SUMMARY$ApparentSpecificity <- as.numeric(as.character(MINVALUENPV.SUMMARY$ApparentSpecificity))
(MINVALUENPV.Threshold <- MINVALUENPV.SUMMARY$OptimalThreshold.MINVALUENPV)
## $OptimalThreshold.MINVALUENPV
## [1] 0.31952
(MINVALUENPV.Sensitivity <- MINVALUENPV.SUMMARY$ApparentSensitivity)
## [1] 0.94656
(MINVALUENPV.Specificity <- MINVALUENPV.SUMMARY$ApparentSpecificity)
## [1] 0.62763
1.5.13 Threshold Criterion Using Minimum Positive Predictive Value
(TC_MinValuePPV)
Minimum
Positive Predictive Value is a threshold criterion based on setting
a minimum value for Positive Predictive Value. In a case where there is
more than one cutpoint fulfilling this condition, those which yield the
maximum Negative Predictive Value are chosen. If several cutpoints still
remain, those yielding the highest Positive Predictive Value are
chosen.
[A] The
MinValuePPV
threshold criterion from the
OptimalCutpoints
package was used which applies a minimum value set for negative
predictive value.
[B] The threshold criterion requires to set 1
parameter:
[B.1] valuePPV
= minimum PPV value set at 0.90
[C] The determined optimal dichotomization threshold
and corresponding classification metrics are summarized as
follows:
[C.1] Log Solubility Class Probability Threshold =
0.72336
[C.2] Apparent Sensitivity = 0.72710
[C.3] Apparent Specificity = 0.90398
######################################
# Creating a function to gather the
# classification metrics applying the
# threshold criterion using a
# minimum value set for positive predictive value
######################################
ClassificationMetrics.Class0.LO.MINVALUEPPV <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("MinValuePPV"),
data = SourceData,
pop.prev = NULL,
control = control.cutpoints(valuePPV=0.90),
ci.fit = FALSE,
direction = c("<"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$MinValuePPV$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$MinValuePPV$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$MinValuePPV$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$MinValuePPV$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
ClassificationMetrics.Class0.HI.MINVALUEPPV <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("MinValuePPV"),
data = SourceData,
pop.prev = NULL,
control = control.cutpoints(valuePPV=0.90),
ci.fit = FALSE,
direction = c(">"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$MinValuePPV$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$MinValuePPV$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$MinValuePPV$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$MinValuePPV$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
######################################
# Gather the classification metrics
######################################
OPTTH.List <- list()
THSEN.List <- list()
THSPE.List <- list()
THAUC.List <- list()
PREDI.List <- list()
for (i in 7:7) {
Response.i <- colnames(LR_Model_Predictions)[6]
Predictor.i <- colnames(LR_Model_Predictions)[i]
ClassificationMetrics.i <- ClassificationMetrics.Class0.LO.MINVALUEPPV(LR_Model_Predictions,
Response.i,
Predictor.i)
OPTTH.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[1],fmt='%#.5f')))
THSEN.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[2],fmt='%#.5f')))
THSPE.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[3],fmt='%#.5f')))
THAUC.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[4],fmt='%#.5f')))
PREDI.List <- append(PREDI.List,Predictor.i)
OPTTH.List <- append(OPTTH.List,OPTTH.i)
THSEN.List <- append(THSEN.List,THSEN.i)
THSPE.List <- append(THSPE.List,THSPE.i)
THAUC.List <- append(THAUC.List,THAUC.i)
}

ClassificationMetrics.SUMMARY <- cbind(PREDI.List,
THAUC.List,
OPTTH.List,
THSEN.List,
THSPE.List
)
colnames(ClassificationMetrics.SUMMARY) <- c("Predictor",
"ROCAUC",
"OptimalThreshold.MINVALUEPPV",
"ApparentSensitivity",
"ApparentSpecificity")
MINVALUEPPV.SUMMARY <- as.data.frame(ClassificationMetrics.SUMMARY)
MINVALUEPPV.SUMMARY$ROCAUC <- as.numeric(as.character(MINVALUEPPV.SUMMARY$ROCAUC))
MINVALUEPPV.SUMMARY$ApparentSensitivity <- as.numeric(as.character(MINVALUEPPV.SUMMARY$ApparentSensitivity))
MINVALUEPPV.SUMMARY$ApparentSpecificity <- as.numeric(as.character(MINVALUEPPV.SUMMARY$ApparentSpecificity))
(MINVALUEPPV.Threshold <- MINVALUEPPV.SUMMARY$OptimalThreshold.MINVALUEPPV)
## $OptimalThreshold.MINVALUEPPV
## [1] 0.72336
(MINVALUEPPV.Sensitivity <- MINVALUEPPV.SUMMARY$ApparentSensitivity)
## [1] 0.7271
(MINVALUEPPV.Specificity <- MINVALUEPPV.SUMMARY$ApparentSpecificity)
## [1] 0.90398
1.5.14 Threshold Criterion Using Negative Equals Positive Predictive
Values (TC_NPVEqualPPV)
Negative
Equals Positive Predictive Values is a threshold criterion based on
the equality of NPV and PPV. Since NPV may not be exactly equal to PPV,
the absolute value of the difference between them is minimized.
[A] The
NPVEqualPPV
threshold criterion from the
OptimalCutpoints
package was used which applied the ROC curve point where NPV
approximately equals PPV.
[B] The threshold criterion does not require to set any
parameter.
[C] The determined optimal dichotomization threshold
and corresponding classification metrics are summarized as
follows:
[C.1] Log Solubility Class Probability Threshold =
0.72336
[C.2] Apparent Sensitivity = 0.72710
[C.3] Apparent Specificity = 0.90398
######################################
# Creating a function to gather the
# classification metrics applying the
# threshold criterion using the ROC curve point where
# negative predictive value approximately equals
# positive predictive value
######################################
ClassificationMetrics.Class0.LO.NPVEQUALPPV <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("NPVEqualPPV"),
data = SourceData,
pop.prev = NULL,
ci.fit = FALSE,
direction = c("<"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$NPVEqualPPV$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$NPVEqualPPV$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$NPVEqualPPV$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$NPVEqualPPV$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
ClassificationMetrics.Class0.HI.NPVEQUALPPV <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("NPVEqualPPV"),
data = SourceData,
pop.prev = NULL,
ci.fit = FALSE,
direction = c(">"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$NPVEqualPPV$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$NPVEqualPPV$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$NPVEqualPPV$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$NPVEqualPPV$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
######################################
# Gather the classification metrics
######################################
OPTTH.List <- list()
THSEN.List <- list()
THSPE.List <- list()
THAUC.List <- list()
PREDI.List <- list()
for (i in 7:7) {
Response.i <- colnames(LR_Model_Predictions)[6]
Predictor.i <- colnames(LR_Model_Predictions)[i]
ClassificationMetrics.i <- ClassificationMetrics.Class0.LO.NPVEQUALPPV(LR_Model_Predictions,
Response.i,
Predictor.i)
OPTTH.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[1],fmt='%#.5f')))
THSEN.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[2],fmt='%#.5f')))
THSPE.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[3],fmt='%#.5f')))
THAUC.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[4],fmt='%#.5f')))
PREDI.List <- append(PREDI.List,Predictor.i)
OPTTH.List <- append(OPTTH.List,OPTTH.i)
THSEN.List <- append(THSEN.List,THSEN.i)
THSPE.List <- append(THSPE.List,THSPE.i)
THAUC.List <- append(THAUC.List,THAUC.i)
}

ClassificationMetrics.SUMMARY <- cbind(PREDI.List,
THAUC.List,
OPTTH.List,
THSEN.List,
THSPE.List
)
colnames(ClassificationMetrics.SUMMARY) <- c("Predictor",
"ROCAUC",
"OptimalThreshold.NPVEQUALPPV",
"ApparentSensitivity",
"ApparentSpecificity")
NPVEQUALPPV.SUMMARY <- as.data.frame(ClassificationMetrics.SUMMARY)
NPVEQUALPPV.SUMMARY$ROCAUC <- as.numeric(as.character(NPVEQUALPPV.SUMMARY$ROCAUC))
NPVEQUALPPV.SUMMARY$ApparentSensitivity <- as.numeric(as.character(NPVEQUALPPV.SUMMARY$ApparentSensitivity))
NPVEQUALPPV.SUMMARY$ApparentSpecificity <- as.numeric(as.character(NPVEQUALPPV.SUMMARY$ApparentSpecificity))
(NPVEQUALPPV.Threshold <- NPVEQUALPPV.SUMMARY$OptimalThreshold.NPVEQUALPPV)
## $OptimalThreshold.NPVEQUALPPV
## [1] 0.5183
(NPVEQUALPPV.Sensitivity <- NPVEQUALPPV.SUMMARY$ApparentSensitivity)
## [1] 0.8626
(NPVEQUALPPV.Specificity <- NPVEQUALPPV.SUMMARY$ApparentSpecificity)
## [1] 0.76815
1.5.15 Threshold Criterion Using Minimum P-Value (TC_MinPvalue)
Minimum
P-Value is a threshold criterion based on the optimally lowest
p-value associated with the statistical Chi-squared test which measures
the association between the marker and the binary result obtained on
using the cutpoint.
[A] The
MinPvalue
threshold criterion from the
OptimalCutpoints
package was used which minimizes the p-value associated with the
statistical Chi-squared test which measures the association between the
predictor and the binary result obtained on using the cut-point.
[B] The threshold criterion requires to set 1
parameter:
[B.1] adjusted.pvalue = method for adjusting the
p-value value set at PADJMS (p-value adjustment using the method
proposed by Miller and Siegmund)
[C] The determined optimal dichotomization threshold
and corresponding classification metrics are summarized as
follows:
[C.1] Log Solubility Class Probability Threshold =
0.00200
[C.2] Apparent Sensitivity = 1.00000
[C.3] Apparent Specificity = 0.08431
######################################
# Creating a function to gather the
# classification metrics applying the
# threshold criterion which minimizes
# the p-value associated with the statistical Chi-squared test
# which measures the association between the predictor and the
# binary result obtained on using the cut-point
######################################
ClassificationMetrics.Class0.LO.MINPVALUE <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("MinPvalue"),
data = SourceData,
pop.prev = NULL,
control = control.cutpoints(adjusted.pvalue = "PADJMS"),
ci.fit = FALSE,
direction = c("<"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$MinPvalue$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$MinPvalue$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$MinPvalue$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$MinPvalue$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
ClassificationMetrics.Class0.HI.MINPVALUE <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("MinPvalue"),
data = SourceData,
pop.prev = NULL,
control = control.cutpoints(adjusted.pvalue = "PADJMS"),
ci.fit = FALSE,
direction = c(">"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$MinPvalue$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$MinPvalue$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$MinPvalue$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$MinPvalue$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
######################################
# Gather the classification metrics
######################################
OPTTH.List <- list()
THSEN.List <- list()
THSPE.List <- list()
THAUC.List <- list()
PREDI.List <- list()
for (i in 7:7) {
Response.i <- colnames(LR_Model_Predictions)[6]
Predictor.i <- colnames(LR_Model_Predictions)[i]
ClassificationMetrics.i <- ClassificationMetrics.Class0.LO.MINPVALUE(LR_Model_Predictions,
Response.i,
Predictor.i)
OPTTH.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[1],fmt='%#.5f')))
THSEN.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[2],fmt='%#.5f')))
THSPE.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[3],fmt='%#.5f')))
THAUC.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[4],fmt='%#.5f')))
PREDI.List <- append(PREDI.List,Predictor.i)
OPTTH.List <- append(OPTTH.List,OPTTH.i)
THSEN.List <- append(THSEN.List,THSEN.i)
THSPE.List <- append(THSPE.List,THSPE.i)
THAUC.List <- append(THAUC.List,THAUC.i)
}

ClassificationMetrics.SUMMARY <- cbind(PREDI.List,
THAUC.List,
OPTTH.List,
THSEN.List,
THSPE.List
)
colnames(ClassificationMetrics.SUMMARY) <- c("Predictor",
"ROCAUC",
"OptimalThreshold.MINPVALUE",
"ApparentSensitivity",
"ApparentSpecificity")
MINPVALUE.SUMMARY <- as.data.frame(ClassificationMetrics.SUMMARY)
MINPVALUE.SUMMARY$ROCAUC <- as.numeric(as.character(MINPVALUE.SUMMARY$ROCAUC))
MINPVALUE.SUMMARY$ApparentSensitivity <- as.numeric(as.character(MINPVALUE.SUMMARY$ApparentSensitivity))
MINPVALUE.SUMMARY$ApparentSpecificity <- as.numeric(as.character(MINPVALUE.SUMMARY$ApparentSpecificity))
(MINPVALUE.Threshold <- MINPVALUE.SUMMARY$OptimalThreshold.MINPVALUE)
## $OptimalThreshold.MINPVALUE
## [1] 0.002
(MINPVALUE.Sensitivity <- MINPVALUE.SUMMARY$ApparentSensitivity)
## [1] 1
(MINPVALUE.Specificity <- MINPVALUE.SUMMARY$ApparentSpecificity)
## [1] 0.08431
1.5.16 Threshold Criterion Using ROC Curve Point Closest to Observed
Prevalence (TC_ObservedPrev)
Observed
Prevalence is a threshold criterion based on setting the closest
value to the observed prevalence using the prevalence estimated from the
sample. The criterion is thus valid in cases where the diagnostic test
takes values in the interval (0,1), and it is a useful method in cases
where preserving prevalence is of prime importance.
[A] The
ObservedPrev threshold criterion from the
OptimalCutpoints
package was used which applied the ROC curve point closest to the
observed prevalence.
[B] The threshold criterion does not require to set any
parameter.
[C] The determined optimal dichotomization threshold
and corresponding classification metrics are summarized as
follows:
[C.1] Log Solubility Class Probability Threshold =
0.54821
[C.2] Apparent Sensitivity = 0.84733
[C.3] Apparent Specificity = 0.80796
######################################
# Creating a function to gather the
# classification metrics applying the
# threshold criterion using the point
# with closest value to observed prevalence
######################################
ClassificationMetrics.Class0.LO.OBSERVEDPREV <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("ObservedPrev"),
data = SourceData,
pop.prev = NULL,
ci.fit = FALSE,
direction = c("<"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$ObservedPrev$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$ObservedPrev$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$ObservedPrev$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$ObservedPrev$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
ClassificationMetrics.Class0.HI.OBSERVEDPREV <- function(SourceData,Response,Predictor){
ThresholdCriterion <- optimal.cutpoints(X = Predictor,
status = Response,
tag.healthy = "Low",
methods = c("ObservedPrev"),
data = SourceData,
pop.prev = NULL,
ci.fit = FALSE,
direction = c(">"))
summary(ThresholdCriterion)
(OptimalThreshold <- ThresholdCriterion$ObservedPrev$Global$optimal.cutoff$cutoff[1])
(ThresholdSensitivity <- ThresholdCriterion$ObservedPrev$Global$optimal.cutoff$Se[,1][1])
(ThresholdSpecificity <- ThresholdCriterion$ObservedPrev$Global$optimal.cutoff$Sp[,1][1])
(ThresholdROCAUC <- ThresholdCriterion$ObservedPrev$Global$measures.acc$AUC[1])
plot.optimal.cutpoints(ThresholdCriterion,
col=c("#5680E9"),
legend=FALSE,
which=1,
xlim=c(0,1),
ylim=c(0,1),
lwd=8)
ClassificationMetricsList <-list(OptimalThreshold,
ThresholdSensitivity,
ThresholdSpecificity,
ThresholdROCAUC)
return(ClassificationMetricsList)
}
######################################
# Gather the classification metrics
######################################
OPTTH.List <- list()
THSEN.List <- list()
THSPE.List <- list()
THAUC.List <- list()
PREDI.List <- list()
for (i in 7:7) {
Response.i <- colnames(LR_Model_Predictions)[6]
Predictor.i <- colnames(LR_Model_Predictions)[i]
ClassificationMetrics.i <- ClassificationMetrics.Class0.LO.OBSERVEDPREV(LR_Model_Predictions,
Response.i,
Predictor.i)
OPTTH.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[1],fmt='%#.5f')))
THSEN.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[2],fmt='%#.5f')))
THSPE.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[3],fmt='%#.5f')))
THAUC.i <- as.numeric(as.character(sprintf(ClassificationMetrics.i[4],fmt='%#.5f')))
PREDI.List <- append(PREDI.List,Predictor.i)
OPTTH.List <- append(OPTTH.List,OPTTH.i)
THSEN.List <- append(THSEN.List,THSEN.i)
THSPE.List <- append(THSPE.List,THSPE.i)
THAUC.List <- append(THAUC.List,THAUC.i)
}

ClassificationMetrics.SUMMARY <- cbind(PREDI.List,
THAUC.List,
OPTTH.List,
THSEN.List,
THSPE.List
)
colnames(ClassificationMetrics.SUMMARY) <- c("Predictor",
"ROCAUC",
"OptimalThreshold.OBSERVEDPREV",
"ApparentSensitivity",
"ApparentSpecificity")
OBSERVEDPREV.SUMMARY <- as.data.frame(ClassificationMetrics.SUMMARY)
OBSERVEDPREV.SUMMARY$ROCAUC <- as.numeric(as.character(OBSERVEDPREV.SUMMARY$ROCAUC))
OBSERVEDPREV.SUMMARY$ApparentSensitivity <- as.numeric(as.character(OBSERVEDPREV.SUMMARY$ApparentSensitivity))
OBSERVEDPREV.SUMMARY$ApparentSpecificity <- as.numeric(as.character(OBSERVEDPREV.SUMMARY$ApparentSpecificity))
(OBSERVEDPREV.Threshold <- OBSERVEDPREV.SUMMARY$OptimalThreshold.OBSERVEDPREV)
## $OptimalThreshold.OBSERVEDPREV
## [1] 0.54821
(OBSERVEDPREV.Sensitivity <- OBSERVEDPREV.SUMMARY$ApparentSensitivity)
## [1] 0.84733
(OBSERVEDPREV.Specificity <- OBSERVEDPREV.SUMMARY$ApparentSpecificity)
## [1] 0.80796
1.6 Dichotomization Threshold Criterion Evaluation Summary
Criterion performance comparison:
[A] The dichotomization threshold criteria which
supported a generally optimal classification performance considering the
relative weights of sensitivity and specificity by demonstrating the
best and most consistent metrics are as follows:
[A.1] MaxProdSpSe with Apparent Sensitivity =
0.80153, Apparent Specificity = 0.87354
[A.2] ROC01 with Apparent Sensitivity = 0.80153,
Apparent Specificity = 0.87354
[A.3] Youden with Apparent Sensitivity = 0.80153,
Apparent Specificity = 0.87354
[A.4] MaxEfficiency with Apparent Sensitivity =
0.80153, Apparent Specificity = 0.87354
[A.5] MaxKappa with Apparent Sensitivity =
0.80153, Apparent Specificity = 0.87354
[B] The dichotomization threshold criterion which
supported a generally optimal classification performance considering the
equal weights of sensitivity and specificity by demonstrating the best
and most consistent metrics are as follows:
[B.1] SpEqualSe with Apparent Sensitivity =
0.82634, Apparent Specificity = 0.82670
[C] The dichotomization threshold criteria which
supported a specifically optimal classification performance considering
more weight on maintaining low false negative cases by demonstrating the
best and most consistent metrics are as follows:
[C.1] MinValueSe with Apparent Sensitivity =
0.90458, Apparent Specificity = 0.73536
[C.2] MinValueNPV with Apparent Sensitivity =
0.94656, Apparent Specificity = 0.62763
[D] The dichotomization threshold criteria which
supported a specifically optimal classification performance considering
more weight on maintaining low false positive cases by demonstrating the
best and most consistent metrics are as follows:
[D.1] MinValueSp with Apparent Sensitivity =
0.72710, Apparent Specificity = 0.90398
[D.2] MinValuePPV with Apparent Sensitivity =
0.72710, Apparent Specificity = 0.90398
##################################
# Consolidating all evaluation results
# using the sensitivity and specificity metrics
##################################
Criterion <- c('MinValueSe','MinValueSp','MaxProdSpSe','ROC01','SpEqualSe',
'Youden','MaxEfficiency','Minimax','MaxDOR','MaxKappa',
'MinValueNPV','MinValuePPV','NPVEqualPPV','MinPvalue','ObservedPrev',
'MinValueSe','MinValueSp','MaxProdSpSe','ROC01','SpEqualSe',
'Youden','MaxEfficiency','Minimax','MaxDOR','MaxKappa',
'MinValueNPV','MinValuePPV','NPVEqualPPV','MinPvalue','ObservedPrev')
Set <- c(rep('Apparent Sensitivity',15),rep('Apparent Specificity',15))
ClassificationMetrics <- c(MINVALUESE.Sensitivity,MINVALUESP.Sensitivity,MAXPRODSPSE.Sensitivity,ROC01.Sensitivity,SPEQUALSE.Sensitivity,
YOUDEN.Sensitivity,MAXEFFICIENCY.Sensitivity,MINIMAX.Sensitivity,MAXDOR.Sensitivity,MAXKAPPA.Sensitivity,
MINVALUENPV.Sensitivity,MINVALUEPPV.Sensitivity,NPVEQUALPPV.Sensitivity,MINPVALUE.Sensitivity,OBSERVEDPREV.Sensitivity,
MINVALUESE.Specificity,MINVALUESP.Specificity,MAXPRODSPSE.Specificity,ROC01.Specificity,SPEQUALSE.Specificity,
YOUDEN.Specificity,MAXEFFICIENCY.Specificity,MINIMAX.Specificity,MAXDOR.Specificity,MAXKAPPA.Specificity,
MINVALUENPV.Specificity,MINVALUEPPV.Specificity,NPVEQUALPPV.Specificity,MINPVALUE.Specificity,OBSERVEDPREV.Specificity)
ClassificationMetrics_Summary <- as.data.frame(cbind(Criterion,Set,ClassificationMetrics))
ClassificationMetrics_Summary$ClassificationMetrics <- as.numeric(as.character(ClassificationMetrics_Summary$ClassificationMetrics))
ClassificationMetrics_Summary$Set <- factor(ClassificationMetrics_Summary$Set,
levels = c("Apparent Sensitivity",
"Apparent Specificity"))
ClassificationMetrics_Summary$Criterion <- factor(ClassificationMetrics_Summary$Criterion,
levels = c('MinValueSe',
'MinValueSp',
'MaxProdSpSe',
'ROC01',
'SpEqualSe',
'Youden',
'MaxEfficiency',
'Minimax',
'MaxDOR',
'MaxKappa',
'MinValueNPV',
'MinValuePPV',
'NPVEqualPPV',
'MinPvalue',
'ObservedPrev'))
print(ClassificationMetrics_Summary, row.names=FALSE)
## Criterion Set ClassificationMetrics
## MinValueSe Apparent Sensitivity 0.90458
## MinValueSp Apparent Sensitivity 0.72710
## MaxProdSpSe Apparent Sensitivity 0.80153
## ROC01 Apparent Sensitivity 0.80153
## SpEqualSe Apparent Sensitivity 0.82634
## Youden Apparent Sensitivity 0.80153
## MaxEfficiency Apparent Sensitivity 0.80153
## Minimax Apparent Sensitivity 0.84351
## MaxDOR Apparent Sensitivity 0.31298
## MaxKappa Apparent Sensitivity 0.80153
## MinValueNPV Apparent Sensitivity 0.94656
## MinValuePPV Apparent Sensitivity 0.72710
## NPVEqualPPV Apparent Sensitivity 0.86260
## MinPvalue Apparent Sensitivity 1.00000
## ObservedPrev Apparent Sensitivity 0.84733
## MinValueSe Apparent Specificity 0.73536
## MinValueSp Apparent Specificity 0.90398
## MaxProdSpSe Apparent Specificity 0.87354
## ROC01 Apparent Specificity 0.87354
## SpEqualSe Apparent Specificity 0.82670
## Youden Apparent Specificity 0.87354
## MaxEfficiency Apparent Specificity 0.87354
## Minimax Apparent Specificity 0.81030
## MaxDOR Apparent Specificity 1.00000
## MaxKappa Apparent Specificity 0.87354
## MinValueNPV Apparent Specificity 0.62763
## MinValuePPV Apparent Specificity 0.90398
## NPVEqualPPV Apparent Specificity 0.76815
## MinPvalue Apparent Specificity 0.08431
## ObservedPrev Apparent Specificity 0.80796
(ClassificationMetrics_Plot <- dotplot(Criterion ~ ClassificationMetrics,
data = ClassificationMetrics_Summary,
groups = Set,
main = "Dichotomization Threshold Criteria Performance Comparison",
ylab = "Dichotomization Threshold Criterion",
xlab = "Classification Performance",
auto.key = list(adj = 1),
type=c("p", "h"),
origin = 0,
alpha = 0.45,
pch = 16,
cex = 2))
