1 FRESA.CAD Regresion Benchmark

1.1 BRCA Recurence RISK Data Set


lesionsSumDiffRed <- NULL
load("RadiomicsBRCA.RDATA")



ExperimentName <- "HSJBC_Oncotype"
bswimsReps <- 20;
theData <- lesionsSumDiffRed;
theOutcome <- "Risk";
reps <- 100;
fraction <- 0.9;

FRESAFileName <- paste(ExperimentName,"FRESAMethod.RDATA",sep = "_")
CVFileName <- paste(ExperimentName,"CVMethod.RDATA",sep = "_")

1.2 Benchmark


MODELFRESA <- FRESA.Model(formula = paste(theOutcome," ~ 1"),data = theData,repeats = bswimsReps)
save(MODELFRESA,file= FRESAFileName)

cp <- RegresionBenchmark(theData = theData, theOutcome = theOutcome, reps = reps, fraction = fraction)
save(cp,file = CVFileName)



elapcol <- names(cp$times[[1]]) == "elapsed"
cputimes <- list(Fresa = mean(cp$times$BSWiMS[ elapcol ]),LASSO = mean(cp$times$LASSO[ elapcol ]),RF = mean(cp$times$RF[ elapcol ]),SVM = mean(cp$times$SVM[ elapcol ]))

featsize <- list(BSWiMS = cp$jaccard_filter$BSWiMS$averageLength,
                 LASSO = cp$jaccard_filter$LASSO$averageLength,
                 RPART= cp$jaccard_filter$rpart$averageLength,
                 RF = cp$jaccard_filter$RF$averageLength,
                 FT = cp$jaccard_filter$FT$averageLength,
                 TT = cp$jaccard_filter$TT$averageLength,
                 WT = cp$jaccard_filter$WT$averageLength,
                 BF = cp$jaccard_filter$BT$averageLength,
                 pearson = cp$jaccard_filter$pearson$averageLength,
                 spearman = cp$jaccard_filter$spearman$averageLength,
                 kendall = cp$jaccard_filter$kendall$averageLength,
                 mRMR = cp$jaccard_filter$mRMR$averageLength
                 )

jaccard <- list(BSWiMS = cp$jaccard_filter$BSWiMS$Jaccard.SM,
                 LASSO = cp$jaccard_filter$LASSO$Jaccard.SM,
                 RPART = cp$jaccard_filter$rpart$Jaccard.SM,
                 RF = cp$jaccard_filter$RF$Jaccard.SM,
                 FT = cp$jaccard_filter$FT$Jaccard.SM,
                 TT = cp$jaccard_filter$TT$Jaccard.SM,
                 WT = cp$jaccard_filter$WT$Jaccard.SM,
                 BT = cp$jaccard_filter$BT$Jaccard.SM,
                 pearson = cp$jaccard_filter$pearson$Jaccard.SM,
                 spearman = cp$jaccard_filter$spearman$Jaccard.SM,
                 kendall = cp$jaccard_filter$kendall$Jaccard.SM,
                 mRMR = cp$jaccard_filter$mRMR$Jaccard.SM
                 )

1.2.1 Results

#The Times
pander::pander(cputimes)
  • Fresa: 0.6421
  • LASSO: 0.2318
  • RF: 1.762
  • SVM: 0.0098
pander::pander(featsize)
  • BSWiMS: 6.53
  • LASSO: 16.84
  • RPART: 24.95
  • RF: 6.65
  • FT: 8.15
  • TT: 0
  • WT: 0
  • BF: 0
  • pearson: 2.7
  • spearman: 4.8
  • kendall: 4.02
  • mRMR: 31



bp <- barPlotCiError(as.matrix(cp$CorTable),metricname = "Pearson Correlation",thesets = cp$thesets,themethod = cp$theMethod,main = "Pearson Correlation",offsets = c(0.5,0.05),args.legend = list(x = "bottomright"))


pander::pander(bp$barMatrix,caption = "Pearson Correlation",round = 3)
Pearson Correlation
  Regression Algorithm
BSWiMS 0.403
RF 0.218
RPART 0.113
LASSO 0.398
SVM 0.381
ENS 0.398
pander::pander(bp$ciTable,caption = "Pearson Correlation with 95%CI",round = 3)
Pearson Correlation with 95%CI
  Pearson Correlation lower upper
Regression Algorithm 0.403 0.187 0.582
Regression Algorithm 0.218 -0.017 0.429
Regression Algorithm 0.113 -0.124 0.337
Regression Algorithm 0.398 0.182 0.578
Regression Algorithm 0.381 0.162 0.564
Regression Algorithm 0.398 0.182 0.578


bp <- barPlotCiError(as.matrix(cp$RMSETable),metricname = "RMSE",thesets = cp$thesets,themethod = cp$theMethod,main = "RMSE",offsets = c(0.5,5),args.legend = list(x = "bottomright"))

pander::pander(bp$barMatrix,caption = "RMSE",round = 3)
RMSE
  Regression Algorithm
BSWiMS 50.71
RF 54.07
RPART 61.35
LASSO 50.75
SVM 51.28
ENS 50.76
pander::pander(bp$ciTable,caption = "RMSE with 95%CI",round = 3)
RMSE with 95%CI
  RMSE lower upper
Regression Algorithm 50.71 43.57 60.68
Regression Algorithm 54.07 46.46 64.7
Regression Algorithm 61.35 52.71 73.41
Regression Algorithm 50.75 43.6 60.72
Regression Algorithm 51.28 44.06 61.36
Regression Algorithm 50.76 43.61 60.74

  
bp <- barPlotCiError(as.matrix(cp$BiasTable),metricname = "BIAS",thesets = cp$thesets,themethod = cp$theMethod,main = "BIAS",offsets = c(0.5,0.5),args.legend = list(x = "bottomright"))

pander::pander(bp$barMatrix,caption = "BIAS",round = 3)
BIAS
  Regression Algorithm
BSWiMS -0.117
RF 0.615
RPART -2.126
LASSO 0.636
SVM -3.262
ENS -0.532
pander::pander(bp$ciTable,caption = "BIAS with 95%CI",round = 3)
BIAS with 95%CI
  BIAS lower upper
Regression Algorithm -0.117 -12.12 11.89
Regression Algorithm 0.615 -12.18 13.41
Regression Algorithm -2.126 -16.64 12.39
Regression Algorithm 0.636 -11.38 12.65
Regression Algorithm -3.262 -15.38 8.851
Regression Algorithm -0.532 -12.55 11.48


barplot(unlist(jaccard),las = 2,cex.axis = 1,cex.names = 0.7,main = "Jaccard Index",xlab = "Filter Method",ylab = "Jaccard")

barplot(unlist(featsize)+1,las = 2,cex.axis = 1,cex.names = 0.7,log = "y",xlab = "Filter Method",ylab = "# of Features+1")




bp <- barPlotCiError(as.matrix(cp$CorTable_filter),metricname = "Pearson Correlation",thesets = cp$theFiltersets,themethod = cp$theRegressMethod,main = "Pearson Correlation",offsets = c(0.0,0.10),args.legend = list(x = "bottomright"),angle=45)


pander::pander(bp$barMatrix,caption = "Pearson Correlation",round = 3)
Pearson Correlation (continued below)
  BSWiMS LASSO RPART RF:BSWiMS F-Test t-Test
Linear Regression 0.432 0.459 0.385 0.356 0.465 -0.685
Robust Regression 0.422 0.445 0.371 0.389 0.454 -0.685
Ridge Regression 0.434 0.462 0.412 0.355 0.463 -0.685
LASSO 0.431 0.433 0.418 0.336 0.424 -0.685
SVM 0.471 0.41 0.323 0.453 0.382 -0.685
Random Forest 0.422 0.381 0.26 0.339 0.347 -0.685
Table continues below
  W-Test Binomial Test Pearson Spearman Kendall
Linear Regression -0.685 -0.685 0.345 0.369 0.368
Robust Regression -0.685 -0.685 0.336 0.381 0.383
Ridge Regression -0.685 -0.685 0.344 0.376 0.381
LASSO -0.685 -0.685 0.346 0.362 0.366
SVM -0.685 -0.685 0.446 0.382 0.354
Random Forest -0.685 -0.685 0.338 0.332 0.32
  mRMR
Linear Regression 0.417
Robust Regression 0.385
Ridge Regression 0.414
LASSO 0.401
SVM 0.381
Random Forest 0.337
pander::pander(bp$ciTable,caption = "Pearson Correlation with 95%CI",round = 3)
Pearson Correlation with 95%CI
  Pearson Correlation lower upper
BSWiMS 0.432 0.221 0.605
BSWiMS 0.422 0.209 0.597
BSWiMS 0.434 0.224 0.606
BSWiMS 0.431 0.22 0.604
BSWiMS 0.471 0.267 0.634
BSWiMS 0.422 0.209 0.596
LASSO 0.459 0.253 0.626
LASSO 0.445 0.236 0.614
LASSO 0.462 0.256 0.628
LASSO 0.433 0.222 0.605
LASSO 0.41 0.195 0.587
LASSO 0.381 0.162 0.564
RPART 0.385 0.167 0.567
RPART 0.371 0.151 0.556
RPART 0.412 0.198 0.589
RPART 0.418 0.204 0.593
RPART 0.323 0.097 0.517
RPART 0.26 0.028 0.465
RF:BSWiMS 0.356 0.134 0.544
RF:BSWiMS 0.389 0.171 0.57
RF:BSWiMS 0.355 0.133 0.543
RF:BSWiMS 0.336 0.112 0.528
RF:BSWiMS 0.453 0.245 0.62
RF:BSWiMS 0.339 0.115 0.53
F-Test 0.465 0.259 0.63
F-Test 0.454 0.246 0.621
F-Test 0.463 0.258 0.629
F-Test 0.424 0.212 0.598
F-Test 0.382 0.163 0.565
F-Test 0.347 0.124 0.537
t-Test -0.685 -0.792 -0.538
t-Test -0.685 -0.792 -0.538
t-Test -0.685 -0.792 -0.538
t-Test -0.685 -0.792 -0.538
t-Test -0.685 -0.792 -0.538
t-Test -0.685 -0.792 -0.538
W-Test -0.685 -0.792 -0.538
W-Test -0.685 -0.792 -0.538
W-Test -0.685 -0.792 -0.538
W-Test -0.685 -0.792 -0.538
W-Test -0.685 -0.792 -0.538
W-Test -0.685 -0.792 -0.538
Binomial Test -0.685 -0.792 -0.538
Binomial Test -0.685 -0.792 -0.538
Binomial Test -0.685 -0.792 -0.538
Binomial Test -0.685 -0.792 -0.538
Binomial Test -0.685 -0.792 -0.538
Binomial Test -0.685 -0.792 -0.538
Pearson 0.345 0.121 0.535
Pearson 0.336 0.112 0.528
Pearson 0.344 0.12 0.534
Pearson 0.346 0.123 0.536
Pearson 0.446 0.238 0.616
Pearson 0.338 0.113 0.529
Spearman 0.369 0.149 0.555
Spearman 0.381 0.162 0.564
Spearman 0.376 0.157 0.56
Spearman 0.362 0.14 0.549
Spearman 0.382 0.163 0.565
Spearman 0.332 0.107 0.525
Kendall 0.368 0.147 0.554
Kendall 0.383 0.165 0.566
Kendall 0.381 0.162 0.564
Kendall 0.366 0.146 0.553
Kendall 0.354 0.131 0.542
Kendall 0.32 0.094 0.515
mRMR 0.417 0.204 0.593
mRMR 0.385 0.167 0.567
mRMR 0.414 0.2 0.59
mRMR 0.401 0.185 0.58
mRMR 0.381 0.162 0.564
mRMR 0.337 0.113 0.529


bp <- barPlotCiError(as.matrix(cp$RMSETable_filter),metricname = "RMSE",thesets = cp$theFiltersets,themethod = cp$theRegressMethod,main = "RMSE",offsets = c(0.0,10.0),args.legend = list(x = "bottomright"),angle=45)

pander::pander(bp$barMatrix,caption = "RMSE",round = 3)
RMSE (continued below)
  BSWiMS LASSO RPART RF:BSWiMS F-Test t-Test
Linear Regression 51.03 51.09 53.97 53.54 49.68 56.33
Robust Regression 51.85 51.7 55.2 52.58 50.31 56.33
Ridge Regression 50.84 50.95 52.22 53.59 49.67 56.33
LASSO 50.5 52.3 50.33 53.07 50.55 56.33
SVM 49.66 50.66 52.73 50.02 52.48 56.33
Random Forest 50.42 51.14 54.11 52.78 52.29 56.33
Table continues below
  W-Test Binomial Test Pearson Spearman Kendall
Linear Regression 56.33 56.33 52.85 52.38 52.37
Robust Regression 56.33 56.33 53.45 52.3 52.3
Ridge Regression 56.33 56.33 52.88 52.1 51.77
LASSO 56.33 56.33 52.69 52.48 52.09
SVM 56.33 56.33 50.14 52.16 52.91
Random Forest 56.33 56.33 52.52 52.44 52.92
  mRMR
Linear Regression 58.39
Robust Regression 62.11
Ridge Regression 58.25
LASSO 53.47
SVM 51.28
Random Forest 52.2
pander::pander(bp$ciTable,caption = "RMSE with 95%CI",round = 3)
RMSE with 95%CI
  RMSE lower upper
BSWiMS 51.03 43.85 61.06
BSWiMS 51.85 44.55 62.05
BSWiMS 50.84 43.68 60.84
BSWiMS 50.5 43.39 60.43
BSWiMS 49.66 42.67 59.42
BSWiMS 50.42 43.31 60.33
LASSO 51.09 43.89 61.13
LASSO 51.7 44.42 61.87
LASSO 50.95 43.77 60.96
LASSO 52.3 44.93 62.57
LASSO 50.66 43.53 60.62
LASSO 51.14 43.93 61.19
RPART 53.97 46.37 64.57
RPART 55.2 47.43 66.05
RPART 52.22 44.87 62.48
RPART 50.33 43.24 60.22
RPART 52.73 45.3 63.1
RPART 54.11 46.49 64.74
RF:BSWiMS 53.54 46 64.07
RF:BSWiMS 52.58 45.18 62.92
RF:BSWiMS 53.59 46.04 64.12
RF:BSWiMS 53.07 45.6 63.5
RF:BSWiMS 50.02 42.97 59.85
RF:BSWiMS 52.78 45.34 63.15
F-Test 49.68 42.68 59.44
F-Test 50.31 43.23 60.2
F-Test 49.67 42.67 59.43
F-Test 50.55 43.43 60.48
F-Test 52.48 45.09 62.79
F-Test 52.29 44.92 62.56
t-Test 56.33 48.4 67.4
t-Test 56.33 48.4 67.4
t-Test 56.33 48.4 67.4
t-Test 56.33 48.4 67.4
t-Test 56.33 48.4 67.4
t-Test 56.33 48.4 67.4
W-Test 56.33 48.4 67.4
W-Test 56.33 48.4 67.4
W-Test 56.33 48.4 67.4
W-Test 56.33 48.4 67.4
W-Test 56.33 48.4 67.4
W-Test 56.33 48.4 67.4
Binomial Test 56.33 48.4 67.4
Binomial Test 56.33 48.4 67.4
Binomial Test 56.33 48.4 67.4
Binomial Test 56.33 48.4 67.4
Binomial Test 56.33 48.4 67.4
Binomial Test 56.33 48.4 67.4
Pearson 52.85 45.4 63.23
Pearson 53.45 45.92 63.95
Pearson 52.88 45.43 63.27
Pearson 52.69 45.27 63.05
Pearson 50.14 43.08 60
Pearson 52.52 45.12 62.85
Spearman 52.38 45.01 62.68
Spearman 52.3 44.93 62.58
Spearman 52.1 44.76 62.34
Spearman 52.48 45.09 62.8
Spearman 52.16 44.81 62.41
Spearman 52.44 45.05 62.74
Kendall 52.37 45 62.66
Kendall 52.3 44.94 62.58
Kendall 51.77 44.48 61.94
Kendall 52.09 44.75 62.32
Kendall 52.91 45.45 63.3
Kendall 52.92 45.47 63.32
mRMR 58.39 50.17 69.86
mRMR 62.11 53.36 74.31
mRMR 58.25 50.04 69.7
mRMR 53.47 45.94 63.97
mRMR 51.28 44.05 61.36
mRMR 52.2 44.85 62.46

  

bp <- barPlotCiError(as.matrix(cp$BiasTable_filter),metricname = "BIAS",thesets = cp$theFiltersets,themethod = cp$theRegressMethod,main = "BIAS",offsets = c(0.5,5),args.legend = list(x = "topleft"),angle=45)

pander::pander(bp$barMatrix,caption = "BIAS",round = 3)
BIAS (continued below)
  BSWiMS LASSO RPART RF:BSWiMS F-Test t-Test
Linear Regression -0.209 0.316 3.051 -0.94 0.16 -0.058
Robust Regression 0.921 1.2 3.51 -1.454 1.613 -0.058
Ridge Regression 0.019 0.154 3.016 -0.541 0.012 -0.058
LASSO 0.079 0.621 1.878 -0.824 -0.928 -0.058
SVM -0.671 -1.691 1.035 0.479 -2.427 -0.058
Random Forest -1.52 -0.5 0.359 0.182 -1.843 -0.058
Table continues below
  W-Test Binomial Test Pearson Spearman Kendall
Linear Regression -0.058 -0.058 0.047 -0.957 0.627
Robust Regression -0.058 -0.058 1.482 0.635 1.957
Ridge Regression -0.058 -0.058 0.071 -0.965 0.097
LASSO -0.058 -0.058 -0.087 -0.968 -0.296
SVM -0.058 -0.058 2.567 0.967 0.82
Random Forest -0.058 -0.058 -0.252 -0.721 -1.711
  mRMR
Linear Regression 3.348
Robust Regression 4.06
Ridge Regression 3.316
LASSO 0.647
SVM -3.262
Random Forest 0.166
pander::pander(bp$ciTable,caption = "BIAS with 95%CI",round = 3)
BIAS with 95%CI
  BIAS lower upper
BSWiMS -0.209 -12.29 11.87
BSWiMS 0.921 -11.35 13.19
BSWiMS 0.019 -12.02 12.05
BSWiMS 0.079 -11.88 12.03
BSWiMS -0.671 -12.43 11.08
BSWiMS -1.52 -13.45 10.41
LASSO 0.316 -11.78 12.41
LASSO 1.2 -11.04 13.44
LASSO 0.154 -11.91 12.21
LASSO 0.621 -11.76 13
LASSO -1.691 -13.68 10.29
LASSO -0.5 -12.6 11.6
RPART 3.051 -9.703 15.8
RPART 3.51 -9.53 16.55
RPART 3.016 -9.325 15.36
RPART 1.878 -10.03 13.78
RPART 1.035 -11.44 13.51
RPART 0.359 -12.45 13.16
RF:BSWiMS -0.94 -13.61 11.73
RF:BSWiMS -1.454 -13.9 10.99
RF:BSWiMS -0.541 -13.22 12.14
RF:BSWiMS -0.824 -13.38 11.74
RF:BSWiMS 0.479 -11.36 12.32
RF:BSWiMS 0.182 -12.31 12.67
F-Test 0.16 -11.6 11.92
F-Test 1.613 -10.29 13.52
F-Test 0.012 -11.74 11.77
F-Test -0.928 -12.89 11.04
F-Test -2.427 -14.84 9.981
F-Test -1.843 -14.21 10.53
t-Test -0.058 -13.39 13.28
t-Test -0.058 -13.39 13.28
t-Test -0.058 -13.39 13.28
t-Test -0.058 -13.39 13.28
t-Test -0.058 -13.39 13.28
t-Test -0.058 -13.39 13.28
W-Test -0.058 -13.39 13.28
W-Test -0.058 -13.39 13.28
W-Test -0.058 -13.39 13.28
W-Test -0.058 -13.39 13.28
W-Test -0.058 -13.39 13.28
W-Test -0.058 -13.39 13.28
Binomial Test -0.058 -13.39 13.28
Binomial Test -0.058 -13.39 13.28
Binomial Test -0.058 -13.39 13.28
Binomial Test -0.058 -13.39 13.28
Binomial Test -0.058 -13.39 13.28
Binomial Test -0.058 -13.39 13.28
Pearson 0.047 -12.46 12.56
Pearson 1.482 -11.16 14.13
Pearson 0.071 -12.45 12.59
Pearson -0.087 -12.56 12.38
Pearson 2.567 -9.287 14.42
Pearson -0.252 -12.68 12.18
Spearman -0.957 -13.35 11.44
Spearman 0.635 -11.74 13.01
Spearman -0.965 -13.29 11.36
Spearman -0.968 -13.39 11.45
Spearman 0.967 -11.38 13.31
Spearman -0.721 -13.13 11.69
Kendall 0.627 -11.77 13.02
Kendall 1.957 -10.41 14.33
Kendall 0.097 -12.16 12.35
Kendall -0.296 -12.62 12.03
Kendall 0.82 -11.7 13.34
Kendall -1.711 -14.23 10.81
mRMR 3.348 -10.45 17.15
mRMR 4.06 -10.61 18.73
mRMR 3.316 -10.45 17.08
mRMR 0.647 -12.01 13.3
mRMR -3.262 -15.38 8.851
mRMR 0.166 -12.19 12.52

1.3 Features Analysis

#load(file = CVFileName)

ff <- names(cp$TheCVEvaluations$BSWIMS$featureFrequency)
ff <- c(ff,names(cp$TheCVEvaluations$LASSO$featureFrequency))
ff <- c(ff,names(cp$TheCVEvaluations$RPART$featureFrequency))
ff <- c(ff,names(cp$TheCVEvaluations$FRF$featureFrequency))
ff <- c(ff,names(cp$TheCVEvaluations$FT$featureFrequency))
ff <- c(ff,names(cp$TheCVEvaluations$TT$featureFrequency))
ff <- c(ff,names(cp$TheCVEvaluations$WT$featureFrequency))
ff <- c(ff,names(cp$TheCVEvaluations$BT$featureFrequency))
ff <- c(ff,names(cp$TheCVEvaluations$pearson$featureFrequency))
ff <- c(ff,names(cp$TheCVEvaluations$kendall$featureFrequency))
ff <- c(ff,names(cp$TheCVEvaluations$spearman$featureFrequency))
ff <- c(ff,names(cp$TheCVEvaluations$mRMR$featureFrequency))
ff <- unique(ff)

Nvar <- min(c(1000,length(ff)))
selFrequency <- matrix(0,nrow=Nvar,ncol=length(cp$theFiltersets))
rownames(selFrequency) <- names(cp$TheCVEvaluations$RF$featureFrequency)[1:Nvar]
selnames <- rownames(selFrequency)
colnames(selFrequency) <- cp$theFiltersets
ff <- cp$TheCVEvaluations$BSWIMS$featureFrequency
fnames <- selnames %in% names(ff)
selFrequency[fnames,"BSWiMS"] <- ff[selnames[fnames]]
ff <- cp$TheCVEvaluations$LASSO$featureFrequency
fnames <- selnames %in% names(ff)
selFrequency[fnames,"LASSO"] <- ff[selnames[fnames]]
ff <- cp$TheCVEvaluations$RPART$featureFrequency
fnames <- selnames %in% names(ff)
selFrequency[fnames,"RPART"] <- ff[selnames[fnames]]
ff <- cp$TheCVEvaluations$FRF$featureFrequency
fnames <- selnames %in% names(ff)
selFrequency[fnames,"RF:BSWiMS"] <- ff[selnames[fnames]]
ff <- cp$TheCVEvaluations$FT$featureFrequency
fnames <- selnames %in% names(ff)
selFrequency[fnames,"F-Test"] <- ff[selnames[fnames]]
ff <- cp$TheCVEvaluations$TT$featureFrequency
fnames <- selnames %in% names(ff)
selFrequency[fnames,"t-Test"] <- ff[selnames[fnames]]
ff <- cp$TheCVEvaluations$WT$featureFrequency
fnames <- selnames %in% names(ff)
selFrequency[fnames,"W-Test"] <- ff[selnames[fnames]]
ff <- cp$TheCVEvaluations$BT$featureFrequency
fnames <- selnames %in% names(ff)
selFrequency[fnames,"Binomial Test"] <- ff[selnames[fnames]]
ff <- cp$TheCVEvaluations$pearson$featureFrequency
fnames <- selnames %in% names(ff)
selFrequency[fnames,"Pearson"] <- ff[selnames[fnames]]
ff <- cp$TheCVEvaluations$kendall$featureFrequency
fnames <- selnames %in% names(ff)
selFrequency[fnames,"Kendall"] <- ff[selnames[fnames]]
ff <- cp$TheCVEvaluations$spearman$featureFrequency
fnames <- selnames %in% names(ff)
selFrequency[fnames,"Spearman"] <- ff[selnames[fnames]]
ff <- cp$TheCVEvaluations$mRMR$featureFrequency
fnames <- selnames %in% names(ff)
selFrequency[fnames,"mRMR"] <- ff[selnames[fnames]]
selFrequency <- selFrequency/reps
rm <- rowMeans(selFrequency)
selFrequency <- selFrequency[rm>0.02,]
gplots::heatmap.2(selFrequency,trace="none",mar=c(10,10),main="Features",cexRow = 0.6)


pander::pander(summary(MODELFRESA$BSWiMS.model,caption="Model",round = 3))
  • coefficients:

    Table continues below
      Estimate lower mean upper
    D_CC_LH_3_ICDF_0.99 -0.5748 -0.7162 -0.5748 -0.4334
    D_CC_HL_4_ICDF_0.999 -0.06073 -0.06823 -0.06073 -0.05323
    S_CC_GLCM_4_Inertia -5.84 -8.24 -5.84 -3.44
    S_CC_FRACTAL_ICDF_0.95 0.1345 0.1272 0.1345 0.1418
    D_CC_LH_3_ICDF_0.999 -0.04685 -0.04773 -0.04685 -0.04597
    S_CC_FRACTAL_ICDF_0.75 0.007493 0.003408 0.007493 0.01158
    S_CC_LH_2_Mean -0.172 -0.2941 -0.172 -0.04986
    S_CC_FRACTAL_Mean 0.01165 0.005211 0.01165 0.01809
    S_CC_FRACTAL_ICDF_0.25 0.001734 0.0006536 0.001734 0.002815
    Table continues below
      u.MSE r.MSE model.MSE NeRI F.pvalue
    D_CC_LH_3_ICDF_0.99 2443 2140 1594 0.2113 2.677e-06
    D_CC_HL_4_ICDF_0.999 2750 1998 1594 0.07042 3.226e-05
    S_CC_GLCM_4_Inertia 2684 2144 1553 0.4085 9.27e-07
    S_CC_FRACTAL_ICDF_0.95 2422 2304 1820 0.3114 1.997e-05
    D_CC_LH_3_ICDF_0.999 2503 2456 2090 0.145 0.0003702
    S_CC_FRACTAL_ICDF_0.75 2483 2844 2360 0.167 0.0001565
    S_CC_LH_2_Mean 2586 2508 2251 0.1268 0.002861
    S_CC_FRACTAL_Mean 2473 2627 2193 0.1956 0.0001926
    S_CC_FRACTAL_ICDF_0.25 2535 2743 2375 0.1549 0.0007834
      t.pvalue Sign.pvalue Wilcox.pvalue
    D_CC_LH_3_ICDF_0.99 0.002706 0.02541 0.00595
    D_CC_HL_4_ICDF_0.999 0.01312 0.3177 0.01647
    S_CC_GLCM_4_Inertia 0.0003231 0.0003834 0.0001619
    S_CC_FRACTAL_ICDF_0.95 0.004744 0.002917 0.004736
    D_CC_LH_3_ICDF_0.999 0.02278 0.1301 0.03753
    S_CC_FRACTAL_ICDF_0.75 0.02969 0.09671 0.01799
    S_CC_LH_2_Mean 0.07385 0.1681 0.06131
    S_CC_FRACTAL_Mean 0.0283 0.06167 0.01766
    S_CC_FRACTAL_ICDF_0.25 0.05441 0.112 0.03928
  • MSE: 1494
  • R2: 0.5183
  • bootstrap:

gain <- length(MODELFRESA$BSWiMS.models$formula.list)/bswimsReps
gplots::heatmap.2(gain*MODELFRESA$BSWiMS.models$bagging$formulaNetwork,trace="none",mar=c(10,10),main="B:SWiMS Formula Network")