# 산술연산자
2+2
## [1] 4
3*3
## [1] 9
4/5
## [1] 0.8
2+2
## [1] 4
3*3
## [1] 9
4/5
## [1] 0.8
x = 3
x
## [1] 3
y <- 4
y
## [1] 4
a <- "hello"
a
## [1] "hello"
# 주석실행안됨

0/0
## [1] NaN
# NA 결측값

# 공유하는 데이터는 C 드라이브 아래 data에 저장합니다.
data("airquality")

df<-airquality
df
##     Ozone Solar.R Wind Temp Month Day
## 1      41     190  7.4   67     5   1
## 2      36     118  8.0   72     5   2
## 3      12     149 12.6   74     5   3
## 4      18     313 11.5   62     5   4
## 5      NA      NA 14.3   56     5   5
## 6      28      NA 14.9   66     5   6
## 7      23     299  8.6   65     5   7
## 8      19      99 13.8   59     5   8
## 9       8      19 20.1   61     5   9
## 10     NA     194  8.6   69     5  10
## 11      7      NA  6.9   74     5  11
## 12     16     256  9.7   69     5  12
## 13     11     290  9.2   66     5  13
## 14     14     274 10.9   68     5  14
## 15     18      65 13.2   58     5  15
## 16     14     334 11.5   64     5  16
## 17     34     307 12.0   66     5  17
## 18      6      78 18.4   57     5  18
## 19     30     322 11.5   68     5  19
## 20     11      44  9.7   62     5  20
## 21      1       8  9.7   59     5  21
## 22     11     320 16.6   73     5  22
## 23      4      25  9.7   61     5  23
## 24     32      92 12.0   61     5  24
## 25     NA      66 16.6   57     5  25
## 26     NA     266 14.9   58     5  26
## 27     NA      NA  8.0   57     5  27
## 28     23      13 12.0   67     5  28
## 29     45     252 14.9   81     5  29
## 30    115     223  5.7   79     5  30
## 31     37     279  7.4   76     5  31
## 32     NA     286  8.6   78     6   1
## 33     NA     287  9.7   74     6   2
## 34     NA     242 16.1   67     6   3
## 35     NA     186  9.2   84     6   4
## 36     NA     220  8.6   85     6   5
## 37     NA     264 14.3   79     6   6
## 38     29     127  9.7   82     6   7
## 39     NA     273  6.9   87     6   8
## 40     71     291 13.8   90     6   9
## 41     39     323 11.5   87     6  10
## 42     NA     259 10.9   93     6  11
## 43     NA     250  9.2   92     6  12
## 44     23     148  8.0   82     6  13
## 45     NA     332 13.8   80     6  14
## 46     NA     322 11.5   79     6  15
## 47     21     191 14.9   77     6  16
## 48     37     284 20.7   72     6  17
## 49     20      37  9.2   65     6  18
## 50     12     120 11.5   73     6  19
## 51     13     137 10.3   76     6  20
## 52     NA     150  6.3   77     6  21
## 53     NA      59  1.7   76     6  22
## 54     NA      91  4.6   76     6  23
## 55     NA     250  6.3   76     6  24
## 56     NA     135  8.0   75     6  25
## 57     NA     127  8.0   78     6  26
## 58     NA      47 10.3   73     6  27
## 59     NA      98 11.5   80     6  28
## 60     NA      31 14.9   77     6  29
## 61     NA     138  8.0   83     6  30
## 62    135     269  4.1   84     7   1
## 63     49     248  9.2   85     7   2
## 64     32     236  9.2   81     7   3
## 65     NA     101 10.9   84     7   4
## 66     64     175  4.6   83     7   5
## 67     40     314 10.9   83     7   6
## 68     77     276  5.1   88     7   7
## 69     97     267  6.3   92     7   8
## 70     97     272  5.7   92     7   9
## 71     85     175  7.4   89     7  10
## 72     NA     139  8.6   82     7  11
## 73     10     264 14.3   73     7  12
## 74     27     175 14.9   81     7  13
## 75     NA     291 14.9   91     7  14
## 76      7      48 14.3   80     7  15
## 77     48     260  6.9   81     7  16
## 78     35     274 10.3   82     7  17
## 79     61     285  6.3   84     7  18
## 80     79     187  5.1   87     7  19
## 81     63     220 11.5   85     7  20
## 82     16       7  6.9   74     7  21
## 83     NA     258  9.7   81     7  22
## 84     NA     295 11.5   82     7  23
## 85     80     294  8.6   86     7  24
## 86    108     223  8.0   85     7  25
## 87     20      81  8.6   82     7  26
## 88     52      82 12.0   86     7  27
## 89     82     213  7.4   88     7  28
## 90     50     275  7.4   86     7  29
## 91     64     253  7.4   83     7  30
## 92     59     254  9.2   81     7  31
## 93     39      83  6.9   81     8   1
## 94      9      24 13.8   81     8   2
## 95     16      77  7.4   82     8   3
## 96     78      NA  6.9   86     8   4
## 97     35      NA  7.4   85     8   5
## 98     66      NA  4.6   87     8   6
## 99    122     255  4.0   89     8   7
## 100    89     229 10.3   90     8   8
## 101   110     207  8.0   90     8   9
## 102    NA     222  8.6   92     8  10
## 103    NA     137 11.5   86     8  11
## 104    44     192 11.5   86     8  12
## 105    28     273 11.5   82     8  13
## 106    65     157  9.7   80     8  14
## 107    NA      64 11.5   79     8  15
## 108    22      71 10.3   77     8  16
## 109    59      51  6.3   79     8  17
## 110    23     115  7.4   76     8  18
## 111    31     244 10.9   78     8  19
## 112    44     190 10.3   78     8  20
## 113    21     259 15.5   77     8  21
## 114     9      36 14.3   72     8  22
## 115    NA     255 12.6   75     8  23
## 116    45     212  9.7   79     8  24
## 117   168     238  3.4   81     8  25
## 118    73     215  8.0   86     8  26
## 119    NA     153  5.7   88     8  27
## 120    76     203  9.7   97     8  28
## 121   118     225  2.3   94     8  29
## 122    84     237  6.3   96     8  30
## 123    85     188  6.3   94     8  31
## 124    96     167  6.9   91     9   1
## 125    78     197  5.1   92     9   2
## 126    73     183  2.8   93     9   3
## 127    91     189  4.6   93     9   4
## 128    47      95  7.4   87     9   5
## 129    32      92 15.5   84     9   6
## 130    20     252 10.9   80     9   7
## 131    23     220 10.3   78     9   8
## 132    21     230 10.9   75     9   9
## 133    24     259  9.7   73     9  10
## 134    44     236 14.9   81     9  11
## 135    21     259 15.5   76     9  12
## 136    28     238  6.3   77     9  13
## 137     9      24 10.9   71     9  14
## 138    13     112 11.5   71     9  15
## 139    46     237  6.9   78     9  16
## 140    18     224 13.8   67     9  17
## 141    13      27 10.3   76     9  18
## 142    24     238 10.3   68     9  19
## 143    16     201  8.0   82     9  20
## 144    13     238 12.6   64     9  21
## 145    23      14  9.2   71     9  22
## 146    36     139 10.3   81     9  23
## 147     7      49 10.3   69     9  24
## 148    14      20 16.6   63     9  25
## 149    30     193  6.9   70     9  26
## 150    NA     145 13.2   77     9  27
## 151    14     191 14.3   75     9  28
## 152    18     131  8.0   76     9  29
## 153    20     223 11.5   68     9  30

getwd() setwd(“C:/Users/super/OneDrive/문서/01 Study/ADsP”)

getwd()

a<-c(1,2,3) mean(a) max(a) min(a) median(a)

install.packages(“ggplot2”)

library(ggplot2)

data(“diamonds”) df<-diamonds df

x<-c(1,2,3)

mode(x) is.numeric(x) is.logical(x) is.character(x) length(x)

1:5

5:1

a<-c(1,3,1,3) a

class(a)

a*3

b <- factor(a) b

class(b)

levels(b)

b*3

x <- c(“a”,“b”,“c”)

x[1] x[-1] x[c(2,3)]

x[-2]

c(2,3)

gender<-c(“male”,“female”,“male”) bloodtype<-c(“AB”,“O”,“B”) height<-c(170,175,165) weight<-c(70,65,55) df<-data.frame(gender, bloodtype, height, weight) df

class(df)

car<-c(“kia”,“bmw”,“toyota”) df2<-data.frame(df,car) # car 변수 추가 df2

data(iris) head(iris) str(iris)

df<-iris df

iris[,c(1:2)] # 1,2열의 데이터를 추출한다 iris[,c(1,3,5)] # 1,3,5열의 데이터를 추출한다. iris[,c(“Sepal.Length”,“Species”)] # 1과 5열의 데이터를 추출한다. iris[1:5,] # 1~5행의 행을 추출한다. iris[1:5,c(1,3)] #1~5행의 데이터 중 1,3열의 데이터를 추출한다. iris[ ,-5] #5열의 데이터를 제외하고 추출한다.

iris$Species

iris$Sepal.Length

library(ggplot2)

data("diamonds")
df<-diamonds
df
## # A tibble: 53,940 × 10
##    carat cut       color clarity depth table price     x     y     z
##    <dbl> <ord>     <ord> <ord>   <dbl> <dbl> <int> <dbl> <dbl> <dbl>
##  1  0.23 Ideal     E     SI2      61.5    55   326  3.95  3.98  2.43
##  2  0.21 Premium   E     SI1      59.8    61   326  3.89  3.84  2.31
##  3  0.23 Good      E     VS1      56.9    65   327  4.05  4.07  2.31
##  4  0.29 Premium   I     VS2      62.4    58   334  4.2   4.23  2.63
##  5  0.31 Good      J     SI2      63.3    58   335  4.34  4.35  2.75
##  6  0.24 Very Good J     VVS2     62.8    57   336  3.94  3.96  2.48
##  7  0.24 Very Good I     VVS1     62.3    57   336  3.95  3.98  2.47
##  8  0.26 Very Good H     SI1      61.9    55   337  4.07  4.11  2.53
##  9  0.22 Fair      E     VS2      65.1    61   337  3.87  3.78  2.49
## 10  0.23 Very Good H     VS1      59.4    61   338  4     4.05  2.39
## # ℹ 53,930 more rows
x<-c(1,2,3)

mode(x)
## [1] "numeric"
is.numeric(x)
## [1] TRUE
is.logical(x)
## [1] FALSE
is.character(x)
## [1] FALSE
length(x)
## [1] 3
1:5
## [1] 1 2 3 4 5
5:1
## [1] 5 4 3 2 1
a<-c(1,3,1,3)
a
## [1] 1 3 1 3
class(a)
## [1] "numeric"
a*3
## [1] 3 9 3 9
b <- factor(a)
b
## [1] 1 3 1 3
## Levels: 1 3
class(b)
## [1] "factor"
levels(b)
## [1] "1" "3"
b*3
## Warning in Ops.factor(b, 3): '*' not meaningful for factors
## [1] NA NA NA NA
x <- c("a","b","c")

x[1]
## [1] "a"
x[-1]
## [1] "b" "c"
x[c(2,3)]
## [1] "b" "c"
x[-2]
## [1] "a" "c"
c(2,3)
## [1] 2 3
gender<-c("male","female","male")
bloodtype<-c("AB","O","B")
height<-c(170,175,165)
weight<-c(70,65,55)
df<-data.frame(gender, bloodtype, height, weight)
df
##   gender bloodtype height weight
## 1   male        AB    170     70
## 2 female         O    175     65
## 3   male         B    165     55
class(df)
## [1] "data.frame"
car<-c("kia","bmw","toyota")
df2<-data.frame(df,car) # car 변수 추가
df2
##   gender bloodtype height weight    car
## 1   male        AB    170     70    kia
## 2 female         O    175     65    bmw
## 3   male         B    165     55 toyota
data(iris)
head(iris)
##   Sepal.Length Sepal.Width Petal.Length Petal.Width Species
## 1          5.1         3.5          1.4         0.2  setosa
## 2          4.9         3.0          1.4         0.2  setosa
## 3          4.7         3.2          1.3         0.2  setosa
## 4          4.6         3.1          1.5         0.2  setosa
## 5          5.0         3.6          1.4         0.2  setosa
## 6          5.4         3.9          1.7         0.4  setosa
str(iris)
## 'data.frame':    150 obs. of  5 variables:
##  $ Sepal.Length: num  5.1 4.9 4.7 4.6 5 5.4 4.6 5 4.4 4.9 ...
##  $ Sepal.Width : num  3.5 3 3.2 3.1 3.6 3.9 3.4 3.4 2.9 3.1 ...
##  $ Petal.Length: num  1.4 1.4 1.3 1.5 1.4 1.7 1.4 1.5 1.4 1.5 ...
##  $ Petal.Width : num  0.2 0.2 0.2 0.2 0.2 0.4 0.3 0.2 0.2 0.1 ...
##  $ Species     : Factor w/ 3 levels "setosa","versicolor",..: 1 1 1 1 1 1 1 1 1 1 ...
df<-iris
df
##     Sepal.Length Sepal.Width Petal.Length Petal.Width    Species
## 1            5.1         3.5          1.4         0.2     setosa
## 2            4.9         3.0          1.4         0.2     setosa
## 3            4.7         3.2          1.3         0.2     setosa
## 4            4.6         3.1          1.5         0.2     setosa
## 5            5.0         3.6          1.4         0.2     setosa
## 6            5.4         3.9          1.7         0.4     setosa
## 7            4.6         3.4          1.4         0.3     setosa
## 8            5.0         3.4          1.5         0.2     setosa
## 9            4.4         2.9          1.4         0.2     setosa
## 10           4.9         3.1          1.5         0.1     setosa
## 11           5.4         3.7          1.5         0.2     setosa
## 12           4.8         3.4          1.6         0.2     setosa
## 13           4.8         3.0          1.4         0.1     setosa
## 14           4.3         3.0          1.1         0.1     setosa
## 15           5.8         4.0          1.2         0.2     setosa
## 16           5.7         4.4          1.5         0.4     setosa
## 17           5.4         3.9          1.3         0.4     setosa
## 18           5.1         3.5          1.4         0.3     setosa
## 19           5.7         3.8          1.7         0.3     setosa
## 20           5.1         3.8          1.5         0.3     setosa
## 21           5.4         3.4          1.7         0.2     setosa
## 22           5.1         3.7          1.5         0.4     setosa
## 23           4.6         3.6          1.0         0.2     setosa
## 24           5.1         3.3          1.7         0.5     setosa
## 25           4.8         3.4          1.9         0.2     setosa
## 26           5.0         3.0          1.6         0.2     setosa
## 27           5.0         3.4          1.6         0.4     setosa
## 28           5.2         3.5          1.5         0.2     setosa
## 29           5.2         3.4          1.4         0.2     setosa
## 30           4.7         3.2          1.6         0.2     setosa
## 31           4.8         3.1          1.6         0.2     setosa
## 32           5.4         3.4          1.5         0.4     setosa
## 33           5.2         4.1          1.5         0.1     setosa
## 34           5.5         4.2          1.4         0.2     setosa
## 35           4.9         3.1          1.5         0.2     setosa
## 36           5.0         3.2          1.2         0.2     setosa
## 37           5.5         3.5          1.3         0.2     setosa
## 38           4.9         3.6          1.4         0.1     setosa
## 39           4.4         3.0          1.3         0.2     setosa
## 40           5.1         3.4          1.5         0.2     setosa
## 41           5.0         3.5          1.3         0.3     setosa
## 42           4.5         2.3          1.3         0.3     setosa
## 43           4.4         3.2          1.3         0.2     setosa
## 44           5.0         3.5          1.6         0.6     setosa
## 45           5.1         3.8          1.9         0.4     setosa
## 46           4.8         3.0          1.4         0.3     setosa
## 47           5.1         3.8          1.6         0.2     setosa
## 48           4.6         3.2          1.4         0.2     setosa
## 49           5.3         3.7          1.5         0.2     setosa
## 50           5.0         3.3          1.4         0.2     setosa
## 51           7.0         3.2          4.7         1.4 versicolor
## 52           6.4         3.2          4.5         1.5 versicolor
## 53           6.9         3.1          4.9         1.5 versicolor
## 54           5.5         2.3          4.0         1.3 versicolor
## 55           6.5         2.8          4.6         1.5 versicolor
## 56           5.7         2.8          4.5         1.3 versicolor
## 57           6.3         3.3          4.7         1.6 versicolor
## 58           4.9         2.4          3.3         1.0 versicolor
## 59           6.6         2.9          4.6         1.3 versicolor
## 60           5.2         2.7          3.9         1.4 versicolor
## 61           5.0         2.0          3.5         1.0 versicolor
## 62           5.9         3.0          4.2         1.5 versicolor
## 63           6.0         2.2          4.0         1.0 versicolor
## 64           6.1         2.9          4.7         1.4 versicolor
## 65           5.6         2.9          3.6         1.3 versicolor
## 66           6.7         3.1          4.4         1.4 versicolor
## 67           5.6         3.0          4.5         1.5 versicolor
## 68           5.8         2.7          4.1         1.0 versicolor
## 69           6.2         2.2          4.5         1.5 versicolor
## 70           5.6         2.5          3.9         1.1 versicolor
## 71           5.9         3.2          4.8         1.8 versicolor
## 72           6.1         2.8          4.0         1.3 versicolor
## 73           6.3         2.5          4.9         1.5 versicolor
## 74           6.1         2.8          4.7         1.2 versicolor
## 75           6.4         2.9          4.3         1.3 versicolor
## 76           6.6         3.0          4.4         1.4 versicolor
## 77           6.8         2.8          4.8         1.4 versicolor
## 78           6.7         3.0          5.0         1.7 versicolor
## 79           6.0         2.9          4.5         1.5 versicolor
## 80           5.7         2.6          3.5         1.0 versicolor
## 81           5.5         2.4          3.8         1.1 versicolor
## 82           5.5         2.4          3.7         1.0 versicolor
## 83           5.8         2.7          3.9         1.2 versicolor
## 84           6.0         2.7          5.1         1.6 versicolor
## 85           5.4         3.0          4.5         1.5 versicolor
## 86           6.0         3.4          4.5         1.6 versicolor
## 87           6.7         3.1          4.7         1.5 versicolor
## 88           6.3         2.3          4.4         1.3 versicolor
## 89           5.6         3.0          4.1         1.3 versicolor
## 90           5.5         2.5          4.0         1.3 versicolor
## 91           5.5         2.6          4.4         1.2 versicolor
## 92           6.1         3.0          4.6         1.4 versicolor
## 93           5.8         2.6          4.0         1.2 versicolor
## 94           5.0         2.3          3.3         1.0 versicolor
## 95           5.6         2.7          4.2         1.3 versicolor
## 96           5.7         3.0          4.2         1.2 versicolor
## 97           5.7         2.9          4.2         1.3 versicolor
## 98           6.2         2.9          4.3         1.3 versicolor
## 99           5.1         2.5          3.0         1.1 versicolor
## 100          5.7         2.8          4.1         1.3 versicolor
## 101          6.3         3.3          6.0         2.5  virginica
## 102          5.8         2.7          5.1         1.9  virginica
## 103          7.1         3.0          5.9         2.1  virginica
## 104          6.3         2.9          5.6         1.8  virginica
## 105          6.5         3.0          5.8         2.2  virginica
## 106          7.6         3.0          6.6         2.1  virginica
## 107          4.9         2.5          4.5         1.7  virginica
## 108          7.3         2.9          6.3         1.8  virginica
## 109          6.7         2.5          5.8         1.8  virginica
## 110          7.2         3.6          6.1         2.5  virginica
## 111          6.5         3.2          5.1         2.0  virginica
## 112          6.4         2.7          5.3         1.9  virginica
## 113          6.8         3.0          5.5         2.1  virginica
## 114          5.7         2.5          5.0         2.0  virginica
## 115          5.8         2.8          5.1         2.4  virginica
## 116          6.4         3.2          5.3         2.3  virginica
## 117          6.5         3.0          5.5         1.8  virginica
## 118          7.7         3.8          6.7         2.2  virginica
## 119          7.7         2.6          6.9         2.3  virginica
## 120          6.0         2.2          5.0         1.5  virginica
## 121          6.9         3.2          5.7         2.3  virginica
## 122          5.6         2.8          4.9         2.0  virginica
## 123          7.7         2.8          6.7         2.0  virginica
## 124          6.3         2.7          4.9         1.8  virginica
## 125          6.7         3.3          5.7         2.1  virginica
## 126          7.2         3.2          6.0         1.8  virginica
## 127          6.2         2.8          4.8         1.8  virginica
## 128          6.1         3.0          4.9         1.8  virginica
## 129          6.4         2.8          5.6         2.1  virginica
## 130          7.2         3.0          5.8         1.6  virginica
## 131          7.4         2.8          6.1         1.9  virginica
## 132          7.9         3.8          6.4         2.0  virginica
## 133          6.4         2.8          5.6         2.2  virginica
## 134          6.3         2.8          5.1         1.5  virginica
## 135          6.1         2.6          5.6         1.4  virginica
## 136          7.7         3.0          6.1         2.3  virginica
## 137          6.3         3.4          5.6         2.4  virginica
## 138          6.4         3.1          5.5         1.8  virginica
## 139          6.0         3.0          4.8         1.8  virginica
## 140          6.9         3.1          5.4         2.1  virginica
## 141          6.7         3.1          5.6         2.4  virginica
## 142          6.9         3.1          5.1         2.3  virginica
## 143          5.8         2.7          5.1         1.9  virginica
## 144          6.8         3.2          5.9         2.3  virginica
## 145          6.7         3.3          5.7         2.5  virginica
## 146          6.7         3.0          5.2         2.3  virginica
## 147          6.3         2.5          5.0         1.9  virginica
## 148          6.5         3.0          5.2         2.0  virginica
## 149          6.2         3.4          5.4         2.3  virginica
## 150          5.9         3.0          5.1         1.8  virginica
iris[,c(1:2)] # 1,2열의 데이터를 추출한다
##     Sepal.Length Sepal.Width
## 1            5.1         3.5
## 2            4.9         3.0
## 3            4.7         3.2
## 4            4.6         3.1
## 5            5.0         3.6
## 6            5.4         3.9
## 7            4.6         3.4
## 8            5.0         3.4
## 9            4.4         2.9
## 10           4.9         3.1
## 11           5.4         3.7
## 12           4.8         3.4
## 13           4.8         3.0
## 14           4.3         3.0
## 15           5.8         4.0
## 16           5.7         4.4
## 17           5.4         3.9
## 18           5.1         3.5
## 19           5.7         3.8
## 20           5.1         3.8
## 21           5.4         3.4
## 22           5.1         3.7
## 23           4.6         3.6
## 24           5.1         3.3
## 25           4.8         3.4
## 26           5.0         3.0
## 27           5.0         3.4
## 28           5.2         3.5
## 29           5.2         3.4
## 30           4.7         3.2
## 31           4.8         3.1
## 32           5.4         3.4
## 33           5.2         4.1
## 34           5.5         4.2
## 35           4.9         3.1
## 36           5.0         3.2
## 37           5.5         3.5
## 38           4.9         3.6
## 39           4.4         3.0
## 40           5.1         3.4
## 41           5.0         3.5
## 42           4.5         2.3
## 43           4.4         3.2
## 44           5.0         3.5
## 45           5.1         3.8
## 46           4.8         3.0
## 47           5.1         3.8
## 48           4.6         3.2
## 49           5.3         3.7
## 50           5.0         3.3
## 51           7.0         3.2
## 52           6.4         3.2
## 53           6.9         3.1
## 54           5.5         2.3
## 55           6.5         2.8
## 56           5.7         2.8
## 57           6.3         3.3
## 58           4.9         2.4
## 59           6.6         2.9
## 60           5.2         2.7
## 61           5.0         2.0
## 62           5.9         3.0
## 63           6.0         2.2
## 64           6.1         2.9
## 65           5.6         2.9
## 66           6.7         3.1
## 67           5.6         3.0
## 68           5.8         2.7
## 69           6.2         2.2
## 70           5.6         2.5
## 71           5.9         3.2
## 72           6.1         2.8
## 73           6.3         2.5
## 74           6.1         2.8
## 75           6.4         2.9
## 76           6.6         3.0
## 77           6.8         2.8
## 78           6.7         3.0
## 79           6.0         2.9
## 80           5.7         2.6
## 81           5.5         2.4
## 82           5.5         2.4
## 83           5.8         2.7
## 84           6.0         2.7
## 85           5.4         3.0
## 86           6.0         3.4
## 87           6.7         3.1
## 88           6.3         2.3
## 89           5.6         3.0
## 90           5.5         2.5
## 91           5.5         2.6
## 92           6.1         3.0
## 93           5.8         2.6
## 94           5.0         2.3
## 95           5.6         2.7
## 96           5.7         3.0
## 97           5.7         2.9
## 98           6.2         2.9
## 99           5.1         2.5
## 100          5.7         2.8
## 101          6.3         3.3
## 102          5.8         2.7
## 103          7.1         3.0
## 104          6.3         2.9
## 105          6.5         3.0
## 106          7.6         3.0
## 107          4.9         2.5
## 108          7.3         2.9
## 109          6.7         2.5
## 110          7.2         3.6
## 111          6.5         3.2
## 112          6.4         2.7
## 113          6.8         3.0
## 114          5.7         2.5
## 115          5.8         2.8
## 116          6.4         3.2
## 117          6.5         3.0
## 118          7.7         3.8
## 119          7.7         2.6
## 120          6.0         2.2
## 121          6.9         3.2
## 122          5.6         2.8
## 123          7.7         2.8
## 124          6.3         2.7
## 125          6.7         3.3
## 126          7.2         3.2
## 127          6.2         2.8
## 128          6.1         3.0
## 129          6.4         2.8
## 130          7.2         3.0
## 131          7.4         2.8
## 132          7.9         3.8
## 133          6.4         2.8
## 134          6.3         2.8
## 135          6.1         2.6
## 136          7.7         3.0
## 137          6.3         3.4
## 138          6.4         3.1
## 139          6.0         3.0
## 140          6.9         3.1
## 141          6.7         3.1
## 142          6.9         3.1
## 143          5.8         2.7
## 144          6.8         3.2
## 145          6.7         3.3
## 146          6.7         3.0
## 147          6.3         2.5
## 148          6.5         3.0
## 149          6.2         3.4
## 150          5.9         3.0
iris[,c(1,3,5)] # 1,3,5열의 데이터를 추출한다.
##     Sepal.Length Petal.Length    Species
## 1            5.1          1.4     setosa
## 2            4.9          1.4     setosa
## 3            4.7          1.3     setosa
## 4            4.6          1.5     setosa
## 5            5.0          1.4     setosa
## 6            5.4          1.7     setosa
## 7            4.6          1.4     setosa
## 8            5.0          1.5     setosa
## 9            4.4          1.4     setosa
## 10           4.9          1.5     setosa
## 11           5.4          1.5     setosa
## 12           4.8          1.6     setosa
## 13           4.8          1.4     setosa
## 14           4.3          1.1     setosa
## 15           5.8          1.2     setosa
## 16           5.7          1.5     setosa
## 17           5.4          1.3     setosa
## 18           5.1          1.4     setosa
## 19           5.7          1.7     setosa
## 20           5.1          1.5     setosa
## 21           5.4          1.7     setosa
## 22           5.1          1.5     setosa
## 23           4.6          1.0     setosa
## 24           5.1          1.7     setosa
## 25           4.8          1.9     setosa
## 26           5.0          1.6     setosa
## 27           5.0          1.6     setosa
## 28           5.2          1.5     setosa
## 29           5.2          1.4     setosa
## 30           4.7          1.6     setosa
## 31           4.8          1.6     setosa
## 32           5.4          1.5     setosa
## 33           5.2          1.5     setosa
## 34           5.5          1.4     setosa
## 35           4.9          1.5     setosa
## 36           5.0          1.2     setosa
## 37           5.5          1.3     setosa
## 38           4.9          1.4     setosa
## 39           4.4          1.3     setosa
## 40           5.1          1.5     setosa
## 41           5.0          1.3     setosa
## 42           4.5          1.3     setosa
## 43           4.4          1.3     setosa
## 44           5.0          1.6     setosa
## 45           5.1          1.9     setosa
## 46           4.8          1.4     setosa
## 47           5.1          1.6     setosa
## 48           4.6          1.4     setosa
## 49           5.3          1.5     setosa
## 50           5.0          1.4     setosa
## 51           7.0          4.7 versicolor
## 52           6.4          4.5 versicolor
## 53           6.9          4.9 versicolor
## 54           5.5          4.0 versicolor
## 55           6.5          4.6 versicolor
## 56           5.7          4.5 versicolor
## 57           6.3          4.7 versicolor
## 58           4.9          3.3 versicolor
## 59           6.6          4.6 versicolor
## 60           5.2          3.9 versicolor
## 61           5.0          3.5 versicolor
## 62           5.9          4.2 versicolor
## 63           6.0          4.0 versicolor
## 64           6.1          4.7 versicolor
## 65           5.6          3.6 versicolor
## 66           6.7          4.4 versicolor
## 67           5.6          4.5 versicolor
## 68           5.8          4.1 versicolor
## 69           6.2          4.5 versicolor
## 70           5.6          3.9 versicolor
## 71           5.9          4.8 versicolor
## 72           6.1          4.0 versicolor
## 73           6.3          4.9 versicolor
## 74           6.1          4.7 versicolor
## 75           6.4          4.3 versicolor
## 76           6.6          4.4 versicolor
## 77           6.8          4.8 versicolor
## 78           6.7          5.0 versicolor
## 79           6.0          4.5 versicolor
## 80           5.7          3.5 versicolor
## 81           5.5          3.8 versicolor
## 82           5.5          3.7 versicolor
## 83           5.8          3.9 versicolor
## 84           6.0          5.1 versicolor
## 85           5.4          4.5 versicolor
## 86           6.0          4.5 versicolor
## 87           6.7          4.7 versicolor
## 88           6.3          4.4 versicolor
## 89           5.6          4.1 versicolor
## 90           5.5          4.0 versicolor
## 91           5.5          4.4 versicolor
## 92           6.1          4.6 versicolor
## 93           5.8          4.0 versicolor
## 94           5.0          3.3 versicolor
## 95           5.6          4.2 versicolor
## 96           5.7          4.2 versicolor
## 97           5.7          4.2 versicolor
## 98           6.2          4.3 versicolor
## 99           5.1          3.0 versicolor
## 100          5.7          4.1 versicolor
## 101          6.3          6.0  virginica
## 102          5.8          5.1  virginica
## 103          7.1          5.9  virginica
## 104          6.3          5.6  virginica
## 105          6.5          5.8  virginica
## 106          7.6          6.6  virginica
## 107          4.9          4.5  virginica
## 108          7.3          6.3  virginica
## 109          6.7          5.8  virginica
## 110          7.2          6.1  virginica
## 111          6.5          5.1  virginica
## 112          6.4          5.3  virginica
## 113          6.8          5.5  virginica
## 114          5.7          5.0  virginica
## 115          5.8          5.1  virginica
## 116          6.4          5.3  virginica
## 117          6.5          5.5  virginica
## 118          7.7          6.7  virginica
## 119          7.7          6.9  virginica
## 120          6.0          5.0  virginica
## 121          6.9          5.7  virginica
## 122          5.6          4.9  virginica
## 123          7.7          6.7  virginica
## 124          6.3          4.9  virginica
## 125          6.7          5.7  virginica
## 126          7.2          6.0  virginica
## 127          6.2          4.8  virginica
## 128          6.1          4.9  virginica
## 129          6.4          5.6  virginica
## 130          7.2          5.8  virginica
## 131          7.4          6.1  virginica
## 132          7.9          6.4  virginica
## 133          6.4          5.6  virginica
## 134          6.3          5.1  virginica
## 135          6.1          5.6  virginica
## 136          7.7          6.1  virginica
## 137          6.3          5.6  virginica
## 138          6.4          5.5  virginica
## 139          6.0          4.8  virginica
## 140          6.9          5.4  virginica
## 141          6.7          5.6  virginica
## 142          6.9          5.1  virginica
## 143          5.8          5.1  virginica
## 144          6.8          5.9  virginica
## 145          6.7          5.7  virginica
## 146          6.7          5.2  virginica
## 147          6.3          5.0  virginica
## 148          6.5          5.2  virginica
## 149          6.2          5.4  virginica
## 150          5.9          5.1  virginica
iris[,c("Sepal.Length","Species")] # 1과 5열의 데이터를 추출한다.
##     Sepal.Length    Species
## 1            5.1     setosa
## 2            4.9     setosa
## 3            4.7     setosa
## 4            4.6     setosa
## 5            5.0     setosa
## 6            5.4     setosa
## 7            4.6     setosa
## 8            5.0     setosa
## 9            4.4     setosa
## 10           4.9     setosa
## 11           5.4     setosa
## 12           4.8     setosa
## 13           4.8     setosa
## 14           4.3     setosa
## 15           5.8     setosa
## 16           5.7     setosa
## 17           5.4     setosa
## 18           5.1     setosa
## 19           5.7     setosa
## 20           5.1     setosa
## 21           5.4     setosa
## 22           5.1     setosa
## 23           4.6     setosa
## 24           5.1     setosa
## 25           4.8     setosa
## 26           5.0     setosa
## 27           5.0     setosa
## 28           5.2     setosa
## 29           5.2     setosa
## 30           4.7     setosa
## 31           4.8     setosa
## 32           5.4     setosa
## 33           5.2     setosa
## 34           5.5     setosa
## 35           4.9     setosa
## 36           5.0     setosa
## 37           5.5     setosa
## 38           4.9     setosa
## 39           4.4     setosa
## 40           5.1     setosa
## 41           5.0     setosa
## 42           4.5     setosa
## 43           4.4     setosa
## 44           5.0     setosa
## 45           5.1     setosa
## 46           4.8     setosa
## 47           5.1     setosa
## 48           4.6     setosa
## 49           5.3     setosa
## 50           5.0     setosa
## 51           7.0 versicolor
## 52           6.4 versicolor
## 53           6.9 versicolor
## 54           5.5 versicolor
## 55           6.5 versicolor
## 56           5.7 versicolor
## 57           6.3 versicolor
## 58           4.9 versicolor
## 59           6.6 versicolor
## 60           5.2 versicolor
## 61           5.0 versicolor
## 62           5.9 versicolor
## 63           6.0 versicolor
## 64           6.1 versicolor
## 65           5.6 versicolor
## 66           6.7 versicolor
## 67           5.6 versicolor
## 68           5.8 versicolor
## 69           6.2 versicolor
## 70           5.6 versicolor
## 71           5.9 versicolor
## 72           6.1 versicolor
## 73           6.3 versicolor
## 74           6.1 versicolor
## 75           6.4 versicolor
## 76           6.6 versicolor
## 77           6.8 versicolor
## 78           6.7 versicolor
## 79           6.0 versicolor
## 80           5.7 versicolor
## 81           5.5 versicolor
## 82           5.5 versicolor
## 83           5.8 versicolor
## 84           6.0 versicolor
## 85           5.4 versicolor
## 86           6.0 versicolor
## 87           6.7 versicolor
## 88           6.3 versicolor
## 89           5.6 versicolor
## 90           5.5 versicolor
## 91           5.5 versicolor
## 92           6.1 versicolor
## 93           5.8 versicolor
## 94           5.0 versicolor
## 95           5.6 versicolor
## 96           5.7 versicolor
## 97           5.7 versicolor
## 98           6.2 versicolor
## 99           5.1 versicolor
## 100          5.7 versicolor
## 101          6.3  virginica
## 102          5.8  virginica
## 103          7.1  virginica
## 104          6.3  virginica
## 105          6.5  virginica
## 106          7.6  virginica
## 107          4.9  virginica
## 108          7.3  virginica
## 109          6.7  virginica
## 110          7.2  virginica
## 111          6.5  virginica
## 112          6.4  virginica
## 113          6.8  virginica
## 114          5.7  virginica
## 115          5.8  virginica
## 116          6.4  virginica
## 117          6.5  virginica
## 118          7.7  virginica
## 119          7.7  virginica
## 120          6.0  virginica
## 121          6.9  virginica
## 122          5.6  virginica
## 123          7.7  virginica
## 124          6.3  virginica
## 125          6.7  virginica
## 126          7.2  virginica
## 127          6.2  virginica
## 128          6.1  virginica
## 129          6.4  virginica
## 130          7.2  virginica
## 131          7.4  virginica
## 132          7.9  virginica
## 133          6.4  virginica
## 134          6.3  virginica
## 135          6.1  virginica
## 136          7.7  virginica
## 137          6.3  virginica
## 138          6.4  virginica
## 139          6.0  virginica
## 140          6.9  virginica
## 141          6.7  virginica
## 142          6.9  virginica
## 143          5.8  virginica
## 144          6.8  virginica
## 145          6.7  virginica
## 146          6.7  virginica
## 147          6.3  virginica
## 148          6.5  virginica
## 149          6.2  virginica
## 150          5.9  virginica
iris[1:5,] # 1~5행의 행을 추출한다.
##   Sepal.Length Sepal.Width Petal.Length Petal.Width Species
## 1          5.1         3.5          1.4         0.2  setosa
## 2          4.9         3.0          1.4         0.2  setosa
## 3          4.7         3.2          1.3         0.2  setosa
## 4          4.6         3.1          1.5         0.2  setosa
## 5          5.0         3.6          1.4         0.2  setosa
iris[1:5,c(1,3)] #1~5행의 데이터 중 1,3열의 데이터를 추출한다.
##   Sepal.Length Petal.Length
## 1          5.1          1.4
## 2          4.9          1.4
## 3          4.7          1.3
## 4          4.6          1.5
## 5          5.0          1.4
iris[ ,-5] #5열의 데이터를 제외하고 추출한다.
##     Sepal.Length Sepal.Width Petal.Length Petal.Width
## 1            5.1         3.5          1.4         0.2
## 2            4.9         3.0          1.4         0.2
## 3            4.7         3.2          1.3         0.2
## 4            4.6         3.1          1.5         0.2
## 5            5.0         3.6          1.4         0.2
## 6            5.4         3.9          1.7         0.4
## 7            4.6         3.4          1.4         0.3
## 8            5.0         3.4          1.5         0.2
## 9            4.4         2.9          1.4         0.2
## 10           4.9         3.1          1.5         0.1
## 11           5.4         3.7          1.5         0.2
## 12           4.8         3.4          1.6         0.2
## 13           4.8         3.0          1.4         0.1
## 14           4.3         3.0          1.1         0.1
## 15           5.8         4.0          1.2         0.2
## 16           5.7         4.4          1.5         0.4
## 17           5.4         3.9          1.3         0.4
## 18           5.1         3.5          1.4         0.3
## 19           5.7         3.8          1.7         0.3
## 20           5.1         3.8          1.5         0.3
## 21           5.4         3.4          1.7         0.2
## 22           5.1         3.7          1.5         0.4
## 23           4.6         3.6          1.0         0.2
## 24           5.1         3.3          1.7         0.5
## 25           4.8         3.4          1.9         0.2
## 26           5.0         3.0          1.6         0.2
## 27           5.0         3.4          1.6         0.4
## 28           5.2         3.5          1.5         0.2
## 29           5.2         3.4          1.4         0.2
## 30           4.7         3.2          1.6         0.2
## 31           4.8         3.1          1.6         0.2
## 32           5.4         3.4          1.5         0.4
## 33           5.2         4.1          1.5         0.1
## 34           5.5         4.2          1.4         0.2
## 35           4.9         3.1          1.5         0.2
## 36           5.0         3.2          1.2         0.2
## 37           5.5         3.5          1.3         0.2
## 38           4.9         3.6          1.4         0.1
## 39           4.4         3.0          1.3         0.2
## 40           5.1         3.4          1.5         0.2
## 41           5.0         3.5          1.3         0.3
## 42           4.5         2.3          1.3         0.3
## 43           4.4         3.2          1.3         0.2
## 44           5.0         3.5          1.6         0.6
## 45           5.1         3.8          1.9         0.4
## 46           4.8         3.0          1.4         0.3
## 47           5.1         3.8          1.6         0.2
## 48           4.6         3.2          1.4         0.2
## 49           5.3         3.7          1.5         0.2
## 50           5.0         3.3          1.4         0.2
## 51           7.0         3.2          4.7         1.4
## 52           6.4         3.2          4.5         1.5
## 53           6.9         3.1          4.9         1.5
## 54           5.5         2.3          4.0         1.3
## 55           6.5         2.8          4.6         1.5
## 56           5.7         2.8          4.5         1.3
## 57           6.3         3.3          4.7         1.6
## 58           4.9         2.4          3.3         1.0
## 59           6.6         2.9          4.6         1.3
## 60           5.2         2.7          3.9         1.4
## 61           5.0         2.0          3.5         1.0
## 62           5.9         3.0          4.2         1.5
## 63           6.0         2.2          4.0         1.0
## 64           6.1         2.9          4.7         1.4
## 65           5.6         2.9          3.6         1.3
## 66           6.7         3.1          4.4         1.4
## 67           5.6         3.0          4.5         1.5
## 68           5.8         2.7          4.1         1.0
## 69           6.2         2.2          4.5         1.5
## 70           5.6         2.5          3.9         1.1
## 71           5.9         3.2          4.8         1.8
## 72           6.1         2.8          4.0         1.3
## 73           6.3         2.5          4.9         1.5
## 74           6.1         2.8          4.7         1.2
## 75           6.4         2.9          4.3         1.3
## 76           6.6         3.0          4.4         1.4
## 77           6.8         2.8          4.8         1.4
## 78           6.7         3.0          5.0         1.7
## 79           6.0         2.9          4.5         1.5
## 80           5.7         2.6          3.5         1.0
## 81           5.5         2.4          3.8         1.1
## 82           5.5         2.4          3.7         1.0
## 83           5.8         2.7          3.9         1.2
## 84           6.0         2.7          5.1         1.6
## 85           5.4         3.0          4.5         1.5
## 86           6.0         3.4          4.5         1.6
## 87           6.7         3.1          4.7         1.5
## 88           6.3         2.3          4.4         1.3
## 89           5.6         3.0          4.1         1.3
## 90           5.5         2.5          4.0         1.3
## 91           5.5         2.6          4.4         1.2
## 92           6.1         3.0          4.6         1.4
## 93           5.8         2.6          4.0         1.2
## 94           5.0         2.3          3.3         1.0
## 95           5.6         2.7          4.2         1.3
## 96           5.7         3.0          4.2         1.2
## 97           5.7         2.9          4.2         1.3
## 98           6.2         2.9          4.3         1.3
## 99           5.1         2.5          3.0         1.1
## 100          5.7         2.8          4.1         1.3
## 101          6.3         3.3          6.0         2.5
## 102          5.8         2.7          5.1         1.9
## 103          7.1         3.0          5.9         2.1
## 104          6.3         2.9          5.6         1.8
## 105          6.5         3.0          5.8         2.2
## 106          7.6         3.0          6.6         2.1
## 107          4.9         2.5          4.5         1.7
## 108          7.3         2.9          6.3         1.8
## 109          6.7         2.5          5.8         1.8
## 110          7.2         3.6          6.1         2.5
## 111          6.5         3.2          5.1         2.0
## 112          6.4         2.7          5.3         1.9
## 113          6.8         3.0          5.5         2.1
## 114          5.7         2.5          5.0         2.0
## 115          5.8         2.8          5.1         2.4
## 116          6.4         3.2          5.3         2.3
## 117          6.5         3.0          5.5         1.8
## 118          7.7         3.8          6.7         2.2
## 119          7.7         2.6          6.9         2.3
## 120          6.0         2.2          5.0         1.5
## 121          6.9         3.2          5.7         2.3
## 122          5.6         2.8          4.9         2.0
## 123          7.7         2.8          6.7         2.0
## 124          6.3         2.7          4.9         1.8
## 125          6.7         3.3          5.7         2.1
## 126          7.2         3.2          6.0         1.8
## 127          6.2         2.8          4.8         1.8
## 128          6.1         3.0          4.9         1.8
## 129          6.4         2.8          5.6         2.1
## 130          7.2         3.0          5.8         1.6
## 131          7.4         2.8          6.1         1.9
## 132          7.9         3.8          6.4         2.0
## 133          6.4         2.8          5.6         2.2
## 134          6.3         2.8          5.1         1.5
## 135          6.1         2.6          5.6         1.4
## 136          7.7         3.0          6.1         2.3
## 137          6.3         3.4          5.6         2.4
## 138          6.4         3.1          5.5         1.8
## 139          6.0         3.0          4.8         1.8
## 140          6.9         3.1          5.4         2.1
## 141          6.7         3.1          5.6         2.4
## 142          6.9         3.1          5.1         2.3
## 143          5.8         2.7          5.1         1.9
## 144          6.8         3.2          5.9         2.3
## 145          6.7         3.3          5.7         2.5
## 146          6.7         3.0          5.2         2.3
## 147          6.3         2.5          5.0         1.9
## 148          6.5         3.0          5.2         2.0
## 149          6.2         3.4          5.4         2.3
## 150          5.9         3.0          5.1         1.8
iris$Species
##   [1] setosa     setosa     setosa     setosa     setosa     setosa    
##   [7] setosa     setosa     setosa     setosa     setosa     setosa    
##  [13] setosa     setosa     setosa     setosa     setosa     setosa    
##  [19] setosa     setosa     setosa     setosa     setosa     setosa    
##  [25] setosa     setosa     setosa     setosa     setosa     setosa    
##  [31] setosa     setosa     setosa     setosa     setosa     setosa    
##  [37] setosa     setosa     setosa     setosa     setosa     setosa    
##  [43] setosa     setosa     setosa     setosa     setosa     setosa    
##  [49] setosa     setosa     versicolor versicolor versicolor versicolor
##  [55] versicolor versicolor versicolor versicolor versicolor versicolor
##  [61] versicolor versicolor versicolor versicolor versicolor versicolor
##  [67] versicolor versicolor versicolor versicolor versicolor versicolor
##  [73] versicolor versicolor versicolor versicolor versicolor versicolor
##  [79] versicolor versicolor versicolor versicolor versicolor versicolor
##  [85] versicolor versicolor versicolor versicolor versicolor versicolor
##  [91] versicolor versicolor versicolor versicolor versicolor versicolor
##  [97] versicolor versicolor versicolor versicolor virginica  virginica 
## [103] virginica  virginica  virginica  virginica  virginica  virginica 
## [109] virginica  virginica  virginica  virginica  virginica  virginica 
## [115] virginica  virginica  virginica  virginica  virginica  virginica 
## [121] virginica  virginica  virginica  virginica  virginica  virginica 
## [127] virginica  virginica  virginica  virginica  virginica  virginica 
## [133] virginica  virginica  virginica  virginica  virginica  virginica 
## [139] virginica  virginica  virginica  virginica  virginica  virginica 
## [145] virginica  virginica  virginica  virginica  virginica  virginica 
## Levels: setosa versicolor virginica
iris$Sepal.Length
##   [1] 5.1 4.9 4.7 4.6 5.0 5.4 4.6 5.0 4.4 4.9 5.4 4.8 4.8 4.3 5.8 5.7 5.4 5.1
##  [19] 5.7 5.1 5.4 5.1 4.6 5.1 4.8 5.0 5.0 5.2 5.2 4.7 4.8 5.4 5.2 5.5 4.9 5.0
##  [37] 5.5 4.9 4.4 5.1 5.0 4.5 4.4 5.0 5.1 4.8 5.1 4.6 5.3 5.0 7.0 6.4 6.9 5.5
##  [55] 6.5 5.7 6.3 4.9 6.6 5.2 5.0 5.9 6.0 6.1 5.6 6.7 5.6 5.8 6.2 5.6 5.9 6.1
##  [73] 6.3 6.1 6.4 6.6 6.8 6.7 6.0 5.7 5.5 5.5 5.8 6.0 5.4 6.0 6.7 6.3 5.6 5.5
##  [91] 5.5 6.1 5.8 5.0 5.6 5.7 5.7 6.2 5.1 5.7 6.3 5.8 7.1 6.3 6.5 7.6 4.9 7.3
## [109] 6.7 7.2 6.5 6.4 6.8 5.7 5.8 6.4 6.5 7.7 7.7 6.0 6.9 5.6 7.7 6.3 6.7 7.2
## [127] 6.2 6.1 6.4 7.2 7.4 7.9 6.4 6.3 6.1 7.7 6.3 6.4 6.0 6.9 6.7 6.9 5.8 6.8
## [145] 6.7 6.7 6.3 6.5 6.2 5.9

as.integer(3.14) as.numeric(FALSE) as.logical(0.45)

setwd(“C:/Users/super/OneDrive/문서/01 Study/ADsP/data”)

getwd()

data <- read.csv(“Data1.csv”,header=TRUE, sep=“,” ) data

data2 <- read.csv(“mtcars.csv”) data2

data(iris) head(iris)

head(iris,3)

tail(iris) tail(iris,3)

str(iris)

tail(data2) tail(data2,3) str(data2)

tail(data) tail(data,3) str(data)

dim(data) dim(data2)

ls(iris)

rm(list = ls( ))

ls()

mtcars<-read.csv(“mtcars.csv”)

mean(mtcars\(mpg) median(mtcars\)mpg) quantile(mtcars\(mpg) IQR(mtcars\)mpg)

mtcars$mpg

summary(iris) dim(iris)

as.integer(3.14)
## [1] 3
as.numeric(FALSE)
## [1] 0
as.logical(0.45)
## [1] TRUE
setwd("C:/Users/super/OneDrive/문서/01 Study/ADsP/data")

getwd()
## [1] "C:/Users/super/OneDrive/문서/01 Study/ADsP/data"
data <- read.csv("Data1.csv",header=TRUE, sep="," )
data
##      Q1 Q2 Q3 Q4 Q5 Q6 Q7 Q8 Q9 Q10 Q11 Q12 Q13 Q14 Q15 Q16 Q17 Q18 Q19 Q20
## 1     4  4  2  3  4  2  2  4  4   4   4   4   4   4   4   4   4   4   4   4
## 2     4  4  4  4  4  3  2  4  4   4   4   4   4   4   4   4   3   4   2   1
## 3     4  4  4  4  2  4  4  4  4   2   4   4   4   4   3   4   4   4   4   3
## 4     5  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 5     4  4  4  4  4  4  4  4  2   4   4   4   4   4   4   4   4   4   4   4
## 6     4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 7     4  2  4  4  4  4  4  5  5   5   5   5   5   5   4   5   2   4   4   4
## 8     4  2  4  4  4  4  4  5  5   5   5   5   5   5   2   2   2   4   2   2
## 9     4  4  4  4  2  1  3  2  3   2   4   3   4   5   3   4   4   4   4   4
## 10    4  4  2  2  4  2  4  2  4   4   4   4   4   4   4   4   4   4   2   2
## 11    4  4  4  4  4  2  4  4  4   2   4   4   4   4   4   4   4   4   4   4
## 12    4  4  2  4  2  2  4  4  4   4   3   3   4   4   3   4   4   4   4   4
## 13    4  4  4  4  4  4  5  4  2   4   4   4   2   3   1   4   3   4   1   4
## 14    4  2  4  2  4  4  4  4  2   4   4   3   4   4   4   4   2   4   4   2
## 15    4  4  4  4  4  3  4  3  4   3   4   3   4   5   4   5   4   4   4   4
## 16    4  4  4  4  4  5  5  5  5   4   4   4   4   4   4   4   5   5   4   5
## 17    4  2  4  4  4  2  4  4  2   4   5   5   5   5   5   5   4   2   5   5
## 18    4  4  3  2  3  2  3  2  4   3   4   4   4   4   4   4   4   4   4   4
## 19    4  2  2  2  1  1  4  4  2   2   3   4   4   4   4   4   3   4   2   2
## 20    4  2  3  4  2  4  4  4  4   3   3   2   4   4   4   4   4   4   3   4
## 21    4  3  3  4  2  2  4  4  4   3   4   3   3   4   3   4   2   4   3   3
## 22    3  2  2  1  1  1  2  4  4   3   3   4   2   4   3   4   4   4   3   3
## 23    5  5  5  5  5  5  5  5  5   5   5   5   5   5   4   4   4   4   4   5
## 24    4  4  4  4  3  4  5  3  3   2   3   4   4   3   2   4   5   2   1   4
## 25    4  4  4  4  4  4  5  5  5   5   5   5   5   5   5   5   3   3   2   5
## 26    4  4  4  4  5  2  4  2  5   4   2   2   4   2   2   5   4   5   1   1
## 27    4  4  4  4  2  4  4  4  4   4   3   3   3   3   3   5   4   5   1   4
## 28    4  1  1  4  3  1  5  4  4   3   4   4   4   4   4   5   1   4   2   4
## 29    4  4  4  5  5  4  4  4  4   3   5   4   4   4   4   4   4   5   4   3
## 30    4  4  2  2  4  4  2  4  2   4   2   2   2   4   2   5   4   4   1   1
## 31    4  3  4  4  4  4  4  4  4   4   2   4   4   4   4   5   4   5   2   5
## 32    4  4  4  4  3  3  4  2  4   4   2   4   4   4   3   5   5   5   3   5
## 33    4  4  4  4  4  4  4  2  4   4   4   4   4   4   4   4   4   4   4   5
## 34    4  4  4  4  4  4  4  2  4   4   5   4   2   4   2   5   1   4   4   2
## 35    4  4  4  4  4  1  4  2  4   4   3   3   2   3   2   5   1   4   2   4
## 36    4  4  4  4  3  2  5  5  5   4   2   4   4   1   3   3   5   5   5   4
## 37    4  4  4  4  3  2  5  5  5   4   3   4   3   4   4   4   5   5   3   4
## 38    4  4  4  4  3  1  5  5  5   4   3   4   3   4   4   4   5   5   2   4
## 39    4  4  4  4  4  4  4  2  4   4   2   2   4   2   2   4   2   4   4   4
## 40    4  4  4  4  4  4  4  4  4   4   4   4   4   4   3   4   2   4   2   3
## 41    3  4  4  4  4  4  4  2  2   4   5   4   3   5   4   4   4   4   4   4
## 42    4  4  4  4  4  4  4  4  4   4   2   2   4   4   2   4   2   4   2   4
## 43    4  4  4  4  4  4  4  5  4   4   4   5   4   5   5   5   2   4   4   1
## 44    2  2  2  2  2  2  2  2  2   2   2   2   2   2   2   2   2   2   2   2
## 45    4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 46    4  4  4  4  3  3  4  2  4   4   4   4   4   4   4   4   4   4   4   3
## 47    4  4  4  4  2  2  4  4  4   4   1   1   4   2   2   4   2   5   4   4
## 48    1  2  1  2  1  1  1  1  1   1   1   1   1   2   3   2   2   2   2   2
## 49    4  4  2  4  2  3  3  4  2   3   4   4   3   4   3   3   3   4   3   2
## 50    4  3  4  2  2  2  2  2  4   4   4   2   4   2   4   2   3   4   4   2
## 51    2  2  4  4  5  1  5  4  4   4   4   4   4   4   4   4   4   4   4   4
## 52    4  4  3  3  3  2  4  4  4   2   4   4   4   4   2   4   4   4   2   4
## 53    4  3  4  4  3  2  4  4  4   4   4   2   4   4   2   4   3   2   4   3
## 54    1  4  2  4  4  3  2  4  2   3   4   4   5   5   5   5   2   5   4   4
## 55    1  1  1  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 56    4  2  3  4  2  3  3  3  3   3   3   3   3   5   5   4   4   4   3   4
## 57    4  4  2  4  4  2  4  4  4   4   4   4   4   5   4   5   4   4   2   3
## 58    4  4  2  4  4  2  4  4  4   4   4   4   4   4   5   5   4   4   2   2
## 59    4  2  4  4  4  2  4  4  4   4   4   4   4   4   4   4   4   4   2   4
## 60    4  2  4  4  4  2  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 61    4  4  4  4  4  2  4  4  4   3   4   4   4   4   4   4   4   4   2   4
## 62    4  4  4  4  4  2  4  4  4   3   4   4   4   4   4   4   4   4   4   4
## 63    4  4  4  4  4  2  4  4  4   4   4   4   4   4   2   4   4   4   4   4
## 64    1  4  4  4  3  1  4  3  4   3   4   4   4   4   4   4   3   4   4   4
## 65    4  4  3  2  4  2  2  4  4   4   4   3   4   4   3   4   4   4   4   3
## 66    4  5  4  1  5  2  2  4  4   4   4   4   4   4   1   4   4   4   2   4
## 67    4  3  4  4  2  3  4  4  4   3   4   4   4   4   4   4   4   4   4   4
## 68    4  5  5  4  4  4  4  4  4   4   5   5   4   5   4   4   4   4   4   4
## 69    4  4  4  4  4  4  2  4  4   3   3   3   3   4   3   2   2   3   3   2
## 70    1  2  2  3  4  3  2  2  4   2   3   3   4   4   4   4   4   4   4   4
## 71    4  4  4  4  4  4  3  4  4   4   3   4   4   4   4   4   4   4   4   4
## 72    4  4  4  4  4  2  5  4  5   4   4   4   4   4   4   4   4   4   4   4
## 73    4  4  4  4  3  3  4  3  4   3   3   3   3   3   3   4   4   4   3   3
## 74    2  4  4  4  4  4  2  2  2   4   2   2   4   4   2   4   4   4   3   4
## 75    4  3  3  4  3  2  4  3  4   3   4   4   4   4   4   4   4   4   4   3
## 76    4  4  3  2  3  4  4  2  4   3   4   3   4   4   3   4   3   4   4   3
## 77    4  4  3  3  2  2  2  2  3   3   4   4   4   4   4   4   4   4   3   3
## 78    2  4  2  2  4  2  2  4  4   4   4   2   2   2   3   3   3   4   4   3
## 79    1  1  2  2  2  3  3  1  2   1   3   1   1   1   3   3   4   1   1   3
## 80    4  1  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 81    1  2  2  2  2  4  2  2  2   2   4   4   2   4   2   4   4   2   4   4
## 82    4  2  2  2  2  2  2  2  2   2   4   2   2   4   4   4   4   4   2   2
## 83    2  4  2  4  4  4  4  4  4   2   4   4   4   4   4   4   4   4   3   4
## 84    5  5  5  5  5  5  5  5  3   2   5   5   5   5   5   5   5   5   5   5
## 85    2  1  2  4  2  5  5  3  4   3   4   4   4   4   4   4   4   4   3   2
## 86    4  4  4  4  4  2  4  2  4   4   2   2   4   4   4   4   4   2   2   4
## 87    4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 88    3  2  2  2  1  2  4  4  4   3   4   3   4   3   4   3   4   4   4   4
## 89    4  4  4  4  4  4  3  2  4   4   4   2   4   4   3   4   4   4   4   4
## 90    4  4  4  4  3  4  2  2  2   4   4   2   4   5   3   4   4   4   2   2
## 91    4  2  2  2  2  3  4  5  4   3   3   3   1   4   3   1   3   3   3   3
## 92    4  3  3  4  3  2  3  1  3   3   4   2   2   4   4   4   2   4   2   4
## 93    4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 94    5  4  4  4  4  5  4  4  4   4   5   4   4   4   4   5   4   4   4   4
## 95    4  4  2  4  4  4  4  5  5   2   4   4   4   4   4   5   5   4   4   4
## 96    2  2  2  2  2  1  1  3  2   3   3   4   4   4   3   4   4   5   4   4
## 97    4  4  4  4  4  4  4  4  4   4   4   4   3   4   4   4   4   4   4   4
## 98    5  5  2  5  5  3  4  2  2   5   2   2   4   3   3   5   3   5   4   5
## 99    4  4  4  4  4  4  2  4  2   4   4   4   4   4   4   5   5   5   4   4
## 100   4  2  4  4  4  2  4  5  4   4   5   5   5   5   5   5   2   4   4   4
## 101   4  4  4  4  4  2  5  5  4   4   4   4   4   4   2   4   4   5   2   4
## 102   4  2  3  2  2  2  4  4  4   4   3   2   4   4   2   4   4   4   3   4
## 103   3  4  4  4  4  4  3  4  5   4   4   5   4   3   4   4   4   4   2   4
## 104   4  2  3  2  4  4  2  3  2   3   2   3   4   4   4   4   4   4   4   4
## 105   4  4  4  4  4  3  4  4  4   4   3   4   4   4   4   4   4   4   3   4
## 106   4  4  4  3  4  4  4  4  4   3   4   4   4   5   4   4   4   4   4   5
## 107   2  2  1  2  2  2  2  2  2   3   2   2   2   2   2   4   4   4   4   4
## 108   5  2  2  2  2  2  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 109   4  4  2  4  2  4  3  2  2   3   2   2   2   2   2   5   4   4   4   4
## 110   3  4  4  2  3  4  2  5  2   3   3   3   3   5   2   5   2   5   4   3
## 111   5  3  3  3  3  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 112   2  4  2  2  2  1  4  4  5   5   5   2   1   3   4   4   4   5   4   5
## 113   3  4  2  2  2  2  2  2  2   2   2   2   2   2   2   3   3   4   4   4
## 114   5  2  4  4  4  2  3  4  4   2   4   4   4   4   4   4   3   2   4   4
## 115   4  2  3  3  3  3  3  3  3   3   3   3   4   4   4   4   4   4   3   3
## 116   3  4  4  4  4  4  4  4  4   3   4   5   4   4   4   5   5   5   5   5
## 117   4  4  2  4  4  2  5  5  5   4   3   3   3   3   3   5   4   4   4   4
## 118   4  1  1  1  1  1  1  1  1   1   4   4   4   4   4   4   4   4   4   4
## 119   4  4  4  4  4  1  4  3  5   4   4   4   5   5   5   4   4   4   4   4
## 120   4  4  4  4  4  4  2  4  4   3   4   4   4   4   4   4   4   4   4   4
## 121   2  3  2  2  2  1  2  2  2   2   2   3   2   2   3   4   4   4   4   4
## 122   5  5  5  5  5  5  5  5  2   5   5   5   5   5   5   5   5   5   5   5
## 123   4  2  4  2  4  2  4  2  2   4   4   2   2   4   4   4   4   3   3   3
## 124   4  4  4  4  4  4  4  4  3   4   4   3   3   4   4   5   5   5   5   5
## 125   4  3  4  4  3  4  2  2  4   3   4   3   4   4   4   4   4   4   4   3
## 126   4  2  2  2  2  4  1  4  4   3   4   4   4   3   3   4   4   4   4   4
## 127   4  4  4  4  4  4  4  3  4   4   4   4   4   4   3   5   5   4   4   4
## 128   4  2  3  2  3  4  4  4  4   3   4   4   4   4   4   4   4   4   4   4
## 129   4  2  3  2  2  1  2  4  2   3   4   3   4   4   3   4   5   4   5   4
## 130   2  2  2  2  2  2  3  4  2   2   3   4   4   4   3   4   4   4   4   4
## 131   4  4  4  4  4  2  3  2  2   2   4   5   5   4   3   5   5   5   5   5
## 132   4  2  4  2  2  2  5  5  5   2   4   2   4   4   4   5   5   5   4   4
## 133   3  3  4  4  3  4  3  3  3   4   5   5   5   5   5   5   5   5   5   5
## 134   5  4  5  4  4  4  4  4  5   4   5   4   4   4   4   4   4   5   4   4
## 135   4  3  3  4  4  2  2  4  2   4   4   4   4   4   3   4   4   4   3   3
## 136   4  4  4  3  3  3  4  4  4   3   4   3   3   3   4   5   4   4   3   2
## 137   5  4  5  4  4  4  5  4  5   5   5   1   4   4   5   5   4   4   1   1
## 138   4  4  2  4  4  3  4  2  2   4   2   4   2   4   4   4   2   2   4   4
## 139   4  4  3  2  2  4  4  4  4   4   2   5   5   5   4   4   1   4   2   1
## 140   2  2  2  1  1  2  2  2  2   3   3   1   2   3   1   2   2   2   2   5
## 141   4  4  4  3  3  3  3  2  4   4   4   4   3   4   4   4   4   4   2   3
## 142   4  4  2  2  4  2  4  4  4   3   4   4   4   4   3   4   4   4   4   4
## 143   4  2  2  2  2  2  2  4  4   3   4   3   3   4   3   4   3   4   4   4
## 144   3  2  2  2  2  1  4  4  4   3   3   2   3   3   3   3   3   3   3   3
## 145   4  3  2  2  2  2  4  2  4   3   4   3   4   4   4   4   4   4   4   3
## 146   5  4  5  5  5  4  4  5  5   5   5   4   4   5   5   4   4   4   4   4
## 147   4  3  2  4  3  2  4  4  4   3   4   4   4   4   3   4   4   4   4   4
## 148   2  2  2  1  2  2  4  4  2   2   3   4   2   4   3   4   4   4   3   4
## 149   2  4  2  2  2  2  4  4  2   4   2   4   4   4   2   4   4   4   4   2
## 150   5  3  5  3  4  5  5  5  5   5   5   5   5   5   5   5   5   5   3   5
## 151   4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   3   3   3   3
## 152   2  2  2  2  2  2  2  2  4   3   2   2   2   2   2   2   2   4   2   2
## 153   4  2  4  4  4  2  4  4  4   4   4   4   4   4   4   4   4   4   2   4
## 154   4  3  2  2  2  2  2  2  4   2   4   4   4   4   4   4   2   3   3   3
## 155   4  4  2  4  3  2  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 156   4  4  3  3  4  4  4  4  4   3   4   4   4   4   3   4   4   4   4   4
## 157   4  3  4  4  3  4  4  3  4   3   2   2   3   4   3   4   4   4   4   4
## 158   4  2  4  4  4  4  4  4  4   4   5   5   4   5   5   5   3   5   3   4
## 159   4  4  4  4  4  4  4  4  4   4   5   5   4   5   5   5   3   5   3   4
## 160   4  4  4  4  4  4  4  1  4   4   5   5   4   5   5   5   3   5   3   4
## 161   4  4  4  4  5  4  4  4  4   4   3   2   2   4   2   4   2   4   3   3
## 162   4  2  4  3  2  3  4  4  4   2   4   4   4   4   4   4   4   4   2   2
## 163   3  2  4  2  2  2  4  2  4   2   2   4   4   4   4   4   4   4   4   4
## 164   3  4  2  2  2  1  2  2  4   2   2   2   2   4   2   4   4   4   3   4
## 165   4  4  4  4  4  3  4  4  4   4   4   4   4   5   4   4   5   5   4   4
## 166   2  2  2  2  2  2  1  1  1   1   2   2   2   3   2   4   2   3   3   3
## 167   4  3  4  4  4  2  4  4  2   4   3   4   4   4   4   4   2   4   3   4
## 168   4  4  2  2  2  2  4  4  4   3   3   4   4   4   2   4   4   4   4   4
## 169   4  4  2  4  4  2  2  4  4   3   4   4   4   4   3   4   2   4   4   4
## 170   4  4  4  4  4  4  3  4  3   3   4   3   3   3   4   3   3   3   3   3
## 171   4  4  4  4  4  2  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 172   4  4  2  4  4  2  4  5  4   4   4   4   4   4   4   5   5   5   4   3
## 173   4  4  4  4  4  2  4  4  1   4   4   4   4   4   4   4   4   4   4   4
## 174   5  5  4  4  4  5  5  5  5   5   5   4   4   5   5   4   4   4   3   4
## 175   4  5  4  4  4  4  4  4  5   3   4   4   4   4   4   4   4   4   4   4
## 176   3  3  4  4  3  4  4  4  4   3   3   3   2   4   3   4   4   5   2   2
## 177   4  3  2  3  2  1  4  1  4   2   4   3   4   4   4   4   4   4   4   4
## 178   4  4  4  3  3  3  3  3  3   3   3   3   3   3   3   3   3   3   3   3
## 179   4  4  4  4  4  4  4  5  5   4   4   3   4   4   4   4   4   4   4   4
## 180   3  3  2  3  3  3  4  4  4   3   4   4   4   4   4   4   4   4   3   4
## 181   4  3  3  3  3  3  3  3  3   3   3   3   4   4   4   4   4   4   3   3
## 182   4  4  4  4  2  4  2  4  4   4   4   4   4   4   4   4   4   4   4   4
## 183   4  4  4  4  2  4  4  4  4   4   4   4   4   4   2   4   4   4   4   4
## 184   4  4  4  4  4  4  4  4  2   4   3   4   4   4   2   4   4   4   3   3
## 185   4  2  4  4  2  1  2  3  2   2   2   4   4   4   4   4   4   4   2   4
## 186   4  4  4  4  4  3  2  3  2   3   2   4   4   2   4   4   5   1   1   1
## 187   4  4  2  3  4  1  2  2  4   4   4   4   4   4   5   5   5   5   5   5
## 188   5  2  2  3  3  2  4  5  4   3   2   2   4   4   4   4   3   3   4   4
## 189   4  3  4  2  4  4  1  2  2   3   3   2   4   4   3   4   3   4   3   4
## 190   4  4  4  4  4  2  4  4  4   4   4   2   4   4   2   4   4   4   2   2
## 191   4  4  4  4  5  4  2  4  2   4   3   4   4   4   4   5   5   5   2   5
## 192   2  4  2  4  4  2  2  4  4   4   3   2   4   4   2   4   1   5   3   2
## 193   2  4  2  4  4  2  4  4  4   3   4   2   4   4   4   4   3   5   3   2
## 194   4  4  4  4  4  4  4  2  2   4   2   3   4   3   3   4   2   5   4   5
## 195   4  4  4  4  4  4  4  2  2   4   2   3   4   3   3   4   2   5   4   5
## 196   5  5  5  5  5  5  5  5  5   5   5   5   5   5   5   4   4   2   4   5
## 197   4  4  3  4  4  4  4  4  4   3   4   2   4   4   3   4   4   4   3   3
## 198   3  2  2  2  2  1  5  4  4   4   5   5   5   5   4   4   3   4   2   4
## 199   4  4  3  4  4  5  4  4  4   3   4   4   4   4   4   4   2   4   4   4
## 200   4  3  2  4  4  4  4  4  4   3   4   4   4   4   4   4   4   4   4   3
## 201   4  4  2  4  4  2  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 202   4  3  5  3  1  2  3  2  3   2   3   3   1   3   3   2   2   3   3   3
## 203   4  2  2  2  2  2  1  1  2   2   4   4   4   5   4   5   4   5   5   5
## 204   3  4  4  3  3  3  4  4  4   3   4   3   3   3   4   5   4   4   3   2
## 205   1  5  4  4  4  4  5  4  5   5   5   1   4   4   5   5   4   4   1   1
## 206   4  4  4  4  4  3  4  2  2   4   2   4   2   4   4   4   2   2   4   4
## 207   2  4  4  2  2  4  4  4  4   4   2   5   5   5   4   4   1   4   2   1
## 208   2  2  2  1  1  2  2  2  2   3   3   1   2   3   1   2   2   2   2   5
## 209   2  4  4  3  3  3  3  2  4   4   4   4   3   4   4   4   4   4   2   3
## 210   4  4  4  2  4  2  4  4  4   3   4   4   4   4   3   4   4   4   4   4
## 211   4  4  2  2  2  2  2  4  4   3   4   3   3   4   3   4   3   4   4   4
## 212   3  3  2  2  2  1  4  4  4   3   3   2   3   3   3   3   3   3   3   3
## 213   4  4  3  2  2  2  4  2  4   3   4   3   4   4   4   4   4   4   4   3
## 214   4  5  4  5  5  4  4  5  5   5   5   4   4   5   5   4   4   4   4   4
## 215   4  4  3  4  3  2  4  4  4   3   4   4   4   4   3   4   4   4   4   4
## 216   3  2  2  1  2  2  4  4  2   2   3   4   2   4   3   4   4   4   3   4
## 217   4  2  4  2  2  2  4  4  2   4   2   4   4   4   2   4   4   4   4   2
## 218   3  5  3  3  4  5  5  5  5   5   5   5   5   5   5   5   5   5   3   5
## 219   3  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   3   3   3   3
## 220   2  2  2  2  2  2  2  2  4   3   2   2   2   2   2   2   2   4   2   2
## 221   2  4  2  4  4  2  4  4  4   4   4   4   4   4   4   4   4   4   2   4
## 222   3  4  3  2  2  2  2  2  4   2   4   4   4   4   4   4   2   3   3   3
## 223   4  4  4  4  3  2  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 224   4  4  4  3  4  4  4  4  4   3   4   4   4   4   3   4   4   4   4   4
## 225   4  4  3  4  3  4  4  3  4   3   2   2   3   4   3   4   4   4   4   4
## 226   3  4  2  4  4  4  4  4  4   4   5   5   4   5   5   5   3   5   3   4
## 227   3  4  4  4  4  4  4  4  4   4   5   5   4   5   5   5   3   5   3   4
## 228   3  4  4  4  4  4  4  1  4   4   5   5   4   5   5   5   3   5   3   4
## 229   3  4  4  4  5  4  4  4  4   4   3   2   2   4   2   4   2   4   3   3
## 230   2  4  2  3  2  3  4  4  4   2   4   4   4   4   4   4   4   4   2   2
## 231   3  3  3  3  4  4  4  4  3   4   3   4   4   4   5   4   3   4   3   3
## 232   4  2  2  2  2  3  3  3  3   4   3   3   3   4   4   4   2   4   4   4
## 233   2  2  2  3  2  2  3  3  3   3   4   3   3   3   4   4   4   2   4   4
## 234   2  2  2  4  2  2  2  2  2   2   2   2   2   2   2   2   2   2   2   2
## 235   3  3  3  3  3  3  3  3  3   3   3   3   3   3   3   3   3   3   3   3
## 236   4  4  4  3  3  4  5  5  5   5   4   4   4   4   3   4   4   4   4   4
## 237   2  2  2  2  2  3  3  3  3   4   3   3   3   4   4   4   2   4   4   4
## 238   3  3  3  2  3  4  4  4  4   4   3   4   4   4   5   4   3   5   4   3
## 239   3  3  3  3  4  4  4  4  3   4   3   4   4   4   5   4   3   4   3   3
## 240   4  4  4  3  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 241   5  4  4  4  4  4  2  3  5   4   4   4   4   4   4   4   4   5   4   4
## 242   4  3  2  4  4  4  2  2  4   4   3   2   2   4   2   4   4   4   4   4
## 243   4  3  4  4  4  4  3  4  4   4   3   3   3   4   4   3   2   4   4   4
## 244   2  2  2  2  1  1  4  4  4   2   2   4   4   4   4   5   5   5   2   4
## 245   2  4  2  2  3  4  3  2  2   2   2   2   4   4   4   4   4   4   2   4
## 246   4  3  4  3  3  4  2  2  3   4   3   2   4   4   3   4   4   4   3   4
## 247   3  2  2  4  2  1  4  4  4   2   2   2   3   4   2   4   4   5   3   4
## 248   4  4  4  3  3  4  4  1  4   3   4   4   4   5   4   4   3   5   3   4
## 249   4  1  1  4  3  4  2  4  4   4   2   3   3   4   2   4   2   4   3   3
## 250   4  4  2  2  4  3  4  2  2   2   2   2   4   4   4   4   4   4   3   2
## 251   2  4  4  3  4  2  3  2  2   2   4   4   4   4   4   3   3   4   4   4
## 252   4  4  4  4  4  4  2  4  2   4   4   4   4   4   4   4   4   4   4   4
## 253   1  1  1  2  3  1  2  2  2   2   1   2   3   2   3   2   2   2   2   2
## 254   4  4  4  4  3  4  4  3  4   3   4   4   4   4   4   4   4   3   3   3
## 255   4  4  4  4  2  4  5  4  4   4   4   4   4   4   4   4   4   4   2   4
## 256   4  3  4  4  4  4  3  4  4   4   4   3   4   3   4   4   4   4   4   4
## 257   4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 258   4  3  2  4  2  2  2  2  4   4   4   2   4   4   4   4   4   4   3   4
## 259   3  4  2  1  1  2  4  4  4   3   5   2   2   5   4   4   3   4   3   3
## 260   4  3  3  3  3  3  3  3  3   3   3   3   4   4   4   4   4   4   3   3
## 261   4  3  4  3  2  2  4  4  3   3   3   4   3   3   4   4   4   4   4   4
## 262   3  2  2  3  1  1  1  2  2   2   2   2   2   2   3   2   3   2   4   4
## 263   4  4  3  4  3  2  2  2  4   4   4   3   4   4   4   4   4   4   4   4
## 264   2  2  2  2  2  2  3  4  3   3   2   3   3   4   3   4   2   4   4   3
## 265   4  3  4  4  3  4  4  4  4   4   4   4   4   4   3   4   4   4   4   4
## 266   4  5  4  2  5  2  4  4  4   4   5   5   5   5   5   4   4   3   4   3
## 267   4  2  4  2  2  4  1  2  4   2   3   4   4   4   2   4   4   4   2   4
## 268   5  3  4  4  4  4  2  4  4   3   4   4   4   4   4   4   2   4   2   3
## 269   2  2  3  2  2  1  3  4  3   2   4   5   5   4   4   4   4   5   4   4
## 270   4  5  4  3  4  4  3  4  5   5   4   5   5   4   3   5   4   5   4   4
## 271   4  4  2  4  2  2  5  4  4   4   5   5   5   5   5   4   4   4   4   3
## 272   4  4  2  4  4  2  2  4  2   2   2   2   2   4   2   2   2   4   4   2
## 273   3  4  1  4  4  4  4  2  4   4   4   4   4   2   4   4   4   2   2   3
## 274   4  4  4  4  4  4  4  2  4   4   4   4   4   4   4   4   4   4   4   4
## 275   4  3  2  2  2  2  2  2  3   3   2   2   4   4   4   4   3   4   3   3
## 276   3  3  3  2  2  1  2  2  4   2   5   2   2   5   5   5   4   5   5   5
## 277   1  2  2  2  1  1  1  2  2   3   2   2   2   3   2   2   2   2   3   2
## 278   1  2  2  2  1  1  1  2  2   3   2   2   2   3   2   2   2   2   3   2
## 279   4  4  2  4  2  2  2  2  2   2   3   3   4   4   2   4   4   4   3   3
## 280   4  4  2  2  2  4  2  3  4   2   4   2   4   4   4   4   2   4   2   4
## 281   4  4  2  4  4  2  1  4  4   4   2   1   4   4   4   4   4   4   4   4
## 282   4  4  4  4  4  2  4  4  4   3   4   3   4   4   3   4   4   4   3   4
## 283   4  4  4  4  4  4  5  4  4   4   4   4   4   4   4   4   4   4   4   4
## 284   4  3  3  2  2  2  4  4  4   2   3   4   4   4   2   4   4   4   1   3
## 285   3  2  2  2  2  2  3  3  4   2   3   2   3   3   3   4   4   4   2   4
## 286   4  4  2  2  4  2  2  4  4   2   4   3   4   4   3   4   4   4   4   3
## 287   4  3  4  4  3  2  4  4  4   3   3   4   4   5   3   4   4   4   4   4
## 288   4  4  4  2  4  4  4  4  4   3   3   2   4   4   2   4   3   4   4   4
## 289   4  4  2  4  4  2  4  4  4   4   4   4   4   4   4   2   2   4   4   4
## 290   4  3  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   5   3   3
## 291   2  3  2  3  2  2  2  3  2   2   3   2   3   2   4   3   4   2   2   2
## 292   3  2  2  2  1  3  4  2  4   2   3   2   4   4   4   5   4   4   4   4
## 293   5  4  4  4  4  4  4  4  2   3   4   4   4   4   4   4   4   5   3   4
## 294   4  2  2  3  4  2  4  4  4   3   4   4   4   4   4   4   2   4   4   4
## 295   4  3  3  4  4  4  4  3  4   3   4   4   4   4   4   4   3   3   4   4
## 296   5  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   2
## 297   4  4  2  2  2  1  2  2  2   2   4   2   2   4   4   4   4   4   4   4
## 298   3  3  3  4  3  2  2  3  2   2   4   4   4   4   4   4   4   4   4   4
## 299   4  4  2  3  3  4  4  4  4   3   3   4   4   3   3   4   4   4   3   4
## 300   4  3  4  2  2  5  2  2  5   1   3   1   3   4   3   4   4   5   3   4
## 301   4  4  2  2  2  2  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 302   4  4  4  4  4  4  4  4  4   3   4   4   4   3   3   4   4   5   3   4
## 303   4  4  4  4  3  2  4  2  4   2   3   2   4   4   3   4   4   4   4   4
## 304   5  4  3  3  3  3  4  2  3   3   4   4   5   4   4   4   4   4   3   3
## 305   2  2  2  3  2  2  4  4  4   4   2   3   4   4   2   4   3   4   2   4
## 306   4  4  4  4  4  3  3  3  3   3   4   4   4   4   4   4   4   4   4   4
## 307   2  4  4  2  4  4  4  2  4   4   4   4   4   4   4   4   4   4   4   4
## 308   3  3  4  4  4  4  3  4  4   3   3   4   4   4   4   4   4   4   3   3
## 309   2  2  2  2  2  2  3  3  3   3   1   2   2   2   2   4   4   4   4   4
## 310   2  2  1  2  2  1  4  3  4   2   3   3   4   4   4   4   2   4   3   3
## 311   4  4  4  4  4  4  4  4  2   4   4   4   4   4   4   4   4   4   4   3
## 312   2  3  2  2  2  3  2  2  2   2   3   2   4   4   2   4   4   4   2   4
## 313   4  4  4  4  4  2  4  4  4   4   5   2   4   4   4   4   4   4   4   4
## 314   4  3  2  4  2  2  4  4  4   3   3   2   4   4   3   4   4   5   4   3
## 315   4  4  3  4  4  2  3  3  4   3   4   3   3   5   5   4   5   5   4   4
## 316   4  2  2  2  2  4  4  2  4   2   4   3   5   4   4   4   4   4   4   4
## 317   4  3  3  3  4  4  4  4  4   3   5   5   5   5   5   4   4   4   4   5
## 318   2  1  2  1  1  2  1  2  4   2   4   4   2   4   4   2   3   3   2   4
## 319   4  2  1  4  4  1  3  4  3   4   5   5   4   5   5   4   4   1   4   4
## 320   2  2  2  2  2  1  1  2  1   3   4   4   4   4   3   4   3   4   4   3
## 321   4  3  3  4  4  4  2  5  2   3   3   4   3   2   2   4   3   5   4   3
## 322   4  2  2  3  3  2  2  4  4   3   3   2   3   4   4   4   4   3   3   3
## 323   4  3  3  2  3  2  4  4  4   3   3   4   4   4   3   4   4   4   3   3
## 324   3  4  4  3  3  4  2  4  2   3   3   2   4   4   4   4   2   4   4   4
## 325   4  2  1  4  4  1  3  4  3   4   5   5   4   5   5   4   4   1   4   4
## 326   4  4  4  4  4  3  4  4  3   4   3   4   4   4   4   4   4   4   3   3
## 327   4  4  2  4  4  2  4  2  4   4   2   4   4   4   4   4   4   4   4   4
## 328   4  4  4  2  4  2  4  4  4   2   4   4   4   4   4   4   4   4   4   4
## 329   4  3  2  4  2  1  2  2  4   1   3   2   3   4   3   4   2   4   3   2
## 330   5  5  4  4  4  2  4  4  2   4   4   4   4   4   4   4   2   4   4   4
## 331   4  4  4  2  2  2  2  3  2   2   5   5   5   4   4   4   4   4   4   4
## 332   4  2  2  4  2  4  4  4  4   4   3   3   3   3   3   5   4   4   4   4
## 333   4  4  2  2  4  4  3  4  4   2   3   2   2   3   2   4   4   4   3   4
## 334   2  3  2  2  3  2  1  1  1   3   3   2   3   3   3   4   4   4   5   5
## 335   5  4  4  2  2  1  4  5  4   4   4   4   2   4   2   4   4   5   2   2
## 336   4  4  4  4  4  4  4  5  4   4   4   4   4   4   4   4   4   4   4   4
## 337   2  2  2  2  2  2  2  3  3   2   2   3   2   2   2   2   2   2   2   2
## 338   4  4  5  4  3  2  4  2  4   3   4   2   5   5   3   5   4   4   3   4
## 339   4  4  4  3  4  5  4  3  3   4   4   4   4   4   4   5   4   5   4   5
## 340   4  4  4  4  4  2  4  4  4   3   4   3   4   4   3   4   4   4   3   3
## 341   4  4  4  4  4  4  2  3  2   2   4   4   4   4   4   4   4   4   4   4
## 342   4  5  2  2  2  4  4  4  4   4   4   4   4   4   4   4   4   4   3   2
## 343   2  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   3   3   3
## 344   4  4  3  3  3  3  4  2  4   4   4   4   4   4   4   5   3   4   3   3
## 345   5  5  3  4  4  3  4  4  4   3   3   2   2   4   3   4   4   4   3   3
## 346   4  3  4  3  3  3  4  2  2   3   4   3   4   3   3   4   2   5   4   4
## 347   4  4  4  4  4  5  4  5  4   4   4   3   4   4   4   3   4   4   4   3
## 348   4  2  2  3  2  4  2  2  2   2   4   2   3   4   3   4   4   4   2   2
## 349   4  4  4  2  4  2  2  2  3   2   3   4   4   3   3   4   4   4   3   3
## 350   4  4  2  2  2  2  2  2  2   2   2   2   4   2   2   4   4   4   2   2
## 351   4  4  4  3  3  2  2  2  2   3   4   4   4   4   4   4   3   3   3   3
## 352   4  4  4  4  4  4  4  2  4   4   4   4   4   4   4   4   4   4   3   2
## 353   5  3  4  4  4  3  3  4  3   3   4   4   4   4   4   4   4   4   4   4
## 354   4  3  4  4  2  4  2  1  2   4   4   4   4   5   4   4   2   4   3   3
## 355   4  3  4  3  2  2  3  4  4   2   4   4   4   4   4   3   5   4   3   3
## 356   2  3  2  1  4  2  3  1  5   2   2   3   3   3   2   4   2   5   3   4
## 357   4  3  2  3  3  2  2  2  2   3   3   3   2   2   2   2   2   2   3   2
## 358   4  4  4  4  4  4  4  2  4   4   4   4   4   4   4   4   4   5   4   4
## 359   2  2  2  2  2  3  2  4  4   2   2   2   2   2   2   2   4   3   2   3
## 360   2  4  2  2  2  4  2  4  2   2   4   4   4   4   4   2   2   4   2   4
## 361   4  4  2  3  3  4  2  4  4   3   4   2   4   4   4   4   4   4   4   1
## 362   4  4  2  3  4  2  4  4  4   2   4   4   4   4   3   3   2   4   4   4
## 363   4  4  4  4  4  4  4  4  4   4   4   4   4   3   4   4   4   3   3   3
## 364   4  4  2  4  3  2  2  2  1   2   4   2   4   4   3   4   4   5   2   3
## 365   2  2  2  2  2  2  2  2  2   2   3   3   3   3   3   4   4   4   2   3
## 366   2  3  3  2  3  2  2  2  2   3   3   2   2   3   3   3   2   4   2   2
## 367   2  4  2  2  2  2  2  3  3   2   4   4   4   4   4   5   5   5   5   5
## 368   4  2  2  2  2  2  2  2  3   2   2   2   4   4   4   4   4   4   4   4
## 369   4  3  2  2  2  2  3  2  4   2   4   2   5   4   4   5   2   4   4   4
## 370   4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   2   4   4   4
## 371   4  3  3  3  3  3  3  3  3   3   4   4   4   4   4   3   3   4   4   4
## 372   4  4  4  4  4  4  3  4  4   4   4   4   4   4   4   4   4   4   3   3
## 373   2  4  2  2  2  3  2  2  4   4   3   2   4   4   4   4   4   4   2   2
## 374   3  3  4  4  2  4  4  4  4   2   4   4   4   4   4   4   2   4   4   4
## 375   3  3  4  4  2  3  2  1  2   2   4   4   4   4   4   4   2   4   3   4
## 376   2  4  2  2  2  5  2  2  2   2   2   3   4   4   4   4   4   5   3   2
## 377   4  4  3  3  2  2  4  4  4   3   3   3   3   3   3   3   2   3   3   3
## 378   4  4  3  4  4  4  3  3  2   4   4   4   3   4   4   5   4   4   4   4
## 379   4  3  1  2  2  2  2  4  2   2   2   3   4   4   4   5   4   4   4   4
## 380   4  2  2  2  2  2  1  1  2   2   4   4   4   5   4   5   4   5   5   5
## 381   2  2  2  4  2  2  2  3  2   3   2   3   3   3   4   3   4   3   3   3
## 382   2  2  2  2  2  2  4  2  5   2   2   2   2   2   2   5   2   4   2   2
## 383   4  4  4  4  4  4  4  4  4   4   4   2   4   2   4   4   2   2   4   4
## 384   4  4  4  2  4  4  2  4  4   2   4   4   4   4   4   4   4   4   2   4
## 385   4  4  4  4  4  4  2  4  4   2   4   4   2   4   4   4   4   4   4   4
## 386   4  4  4  3  5  4  2  3  5   4   3   3   4   3   3   4   4   4   4   4
## 387   4  3  2  2  2  2  2  4  2   2   4   3   3   2   5   5   5   5   4   4
## 388   4  4  2  2  2  2  3  4  5   2   4   4   4   4   3   5   1   5   4   4
## 389   3  4  4  4  4  4  1  4  2   3   4   5   4   5   4   4   4   4   4   4
## 390   4  4  4  4  4  2  4  4  2   4   4   4   4   4   4   4   4   4   4   4
## 391   2  3  2  2  4  2  1  2  4   2   4   4   4   4   4   4   4   4   4   4
## 392   4  2  2  4  2  2  2  2  2   2   4   4   4   4   4   4   4   4   4   4
## 393   4  2  4  2  4  5  1  2  4   3   4   4   4   3   4   4   4   4   5   4
## 394   2  2  2  2  2  4  4  4  4   3   2   4   4   4   3   4   4   4   4   2
## 395   4  4  4  4  3  4  3  2  4   3   4   3   4   4   3   4   4   4   4   4
## 396   2  1  1  1  2  1  1  1  1   1   3   2   2   2   1   1   1   1   2   1
## 397   4  4  3  3  3  3  4  4  4   4   4   5   5   4   3   3   4   4   4   3
## 398   4  4  2  1  2  4  4  4  4   3   4   4   4   5   4   4   4   4   2   2
## 399   5  5  2  2  4  4  4  4  5   4   4   4   4   4   4   4   4   4   4   4
## 400   4  2  1  2  4  2  4  5  2   4   5   4   4   5   4   4   4   4   4   4
## 401   4  4  3  4  3  4  2  2  2   2   3   4   3   4   2   3   2   4   3   3
## 402   3  3  2  2  3  4  4  4  2   2   3   3   3   4   2   3   5   4   5   3
## 403   4  3  2  3  4  2  2  4  2   4   4   4   4   4   4   4   4   4   4   4
## 404   4  4  2  4  2  3  2  4  4   3   4   3   2   4   4   4   4   4   5   3
## 405   2  2  2  2  2  2  2  2  2   2   2   2   2   2   2   2   2   2   2   2
## 406   2  1  2  2  2  4  4  2  2   2   2   2   4   2   4   4   4   4   2   4
## 407   4  3  2  4  2  2  4  4  5   3   4   3   4   5   4   4   4   5   4   5
## 408   4  4  4  2  4  4  4  3  4   4   4   4   4   4   4   4   4   4   2   4
## 409   3  4  3  3  4  1  4  2  4   3   2   5   5   5   5   5   2   3   4   5
## 410   2  1  4  3  2  4  2  2  2   3   4   4   4   4   5   4   5   5   4   2
## 411   1  3  2  4  4  2  1  4  4   4   1   2   2   4   4   3   4   4   4   3
## 412   3  3  3  2  2  1  3  4  2   2   2   5   4   4   4   4   4   4   1   2
## 413   3  4  4  4  4  2  2  4  4   2   2   4   4   4   4   1   4   4   4   2
## 414   4  4  4  2  2  4  4  4  4   3   4   4   5   5   4   3   4   4   3   2
## 415   4  4  4  3  4  5  4  4  4   4   4   4   4   4   4   4   3   3   4   4
## 416   4  2  4  3  2  3  4  4  4   2   4   4   4   4   4   4   4   4   2   2
## 417   4  4  4  4  2  4  4  4  4   2   4   4   4   4   4   4   4   4   4   4
## 418   4  4  4  2  4  4  3  4  4   4   5   5   3   5   5   5   4   5   5   4
## 419   4  4  2  3  2  2  1  4  2   4   2   4   2   4   4   5   2   5   4   2
## 420   4  4  4  4  4  4  5  4  4   4   4   4   4   4   4   5   4   4   4   4
## 421   4  3  2  2  2  2  4  4  3   3   4   4   2   4   3   4   4   4   3   3
## 422   4  3  4  2  4  4  4  4  4   2   4   2   3   4   4   4   3   4   4   3
## 423   5  5  5  5  5  3  5  3  5   5   5   5   5   4   5   4   4   3   4   4
## 424   4  4  3  2  3  2  4  2  4   3   4   3   4   4   4   4   3   4   4   3
## 425   4  4  4  4  4  4  4  4  4   4   3   2   4   2   3   4   4   4   3   4
## 426   4  2  2  2  3  4  4  4  4   4   4   3   4   4   3   4   3   4   3   3
## 427   4  4  2  2  4  4  2  4  4   4   2   4   4   4   2   4   4   5   4   4
## 428   3  4  2  2  4  4  4  4  3   2   2   2   2   3   3   4   3   4   3   3
## 429   2  2  2  2  1  1  4  2  1   1   2   2   4   4   1   5   1   4   4   4
## 430   4  2  4  3  3  4  5  5  5   3   3   4   4   4   3   5   4   3   2   3
## 431   4  4  4  3  4  4  4  2  4   3   3   4   3   4   3   5   5   5   4   4
## 432   4  3  3  4  4  4  4  4  4   1   3   3   4   4   4   4   3   4   4   3
## 433   4  4  4  4  5  4  5  5  5   5   4   4   2   5   5   4   4   4   5   4
## 434   4  2  4  2  4  2  5  4  4   2   5   4   4   5   5   4   4   5   2   3
## 435   2  4  2  2  2  4  4  2  4   3   4   2   4   4   4   4   2   4   4   4
## 436   4  4  4  3  4  4  4  4  2   3   4   4   4   4   4   4   4   4   4   4
## 437   5  5  4  4  2  5  5  4  4   3   4   4   4   5   4   5   4   4   4   4
## 438   4  4  3  3  3  3  3  4  3   3   3   4   4   4   3   4   4   4   4   3
## 439   2  2  2  2  2  2  2  4  2   2   3   3   3   4   4   4   4   4   4   3
## 440   5  4  3  4  4  4  4  4  5   4   3   4   3   3   4   4   3   4   4   3
## 441   4  4  4  4  3  2  2  2  1   2   4   3   4   4   3   4   3   4   3   2
## 442   4  3  2  3  4  2  2  4  1   4   4   5   5   5   3   5   5   5   3   2
## 443   4  4  2  4  4  4  2  4  2   4   2   2   4   2   2   4   2   4   2   2
## 444   4  4  4  3  4  4  4  2  3   3   4   4   4   4   4   4   4   4   3   3
## 445   3  3  2  2  2  2  1  1  2   2   2   2   2   2   2   4   4   4   4   4
## 446   2  2  2  2  2  4  2  2  2   2   4   3   2   4   2   4   4   4   2   3
## 447   4  4  3  2  2  1  4  3  4   2   5   4   4   5   4   4   4   4   4   4
## 448   3  2  1  2  2  1  2  2  2   1   3   1   4   4   3   4   3   4   3   4
## 449   4  4  3  2  2  3  3  4  4   4   4   4   3   4   4   4   4   4   4   3
## 450   1  2  2  2  2  2  2  2  4   2   4   4   1   4   3   4   4   4   4   3
## 451   3  3  2  3  2  2  2  3  2   3   4   2   3   4   2   4   4   4   3   3
## 452   4  4  3  3  2  2  2  2  2   2   3   3   4   4   3   3   4   3   3   4
## 453   4  3  2  2  2  2  2  4  2   2   3   3   3   3   3   4   4   4   3   3
## 454   4  3  4  3  2  2  4  4  2   3   4   4   4   4   3   4   4   4   3   4
## 455   4  3  4  2  4  2  4  2  4   4   3   3   4   5   4   4   4   5   4   4
## 456   4  3  3  3  3  1  2  4  2   2   5   4   4   4   4   4   4   4   5   3
## 457   3  4  2  2  2  2  3  2  3   2   4   4   3   5   4   5   3   4   4   4
## 458   4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 459   4  4  4  2  3  2  2  4  2   2   3   4   4   4   4   4   4   4   4   4
## 460   4  3  2  3  4  4  4  2  4   2   5   4   4   4   4   4   4   4   4   4
## 461   4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   3
## 462   2  4  5  2  2  2  5  4  4   5   5   5   5   5   5   5   4   4   4   4
## 463   4  4  2  2  4  4  2  4  4   3   4   3   4   4   4   4   3   4   4   4
## 464   4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 465   2  3  2  2  2  2  4  4  5   4   4   4   4   4   4   4   2   4   3   2
## 466   4  3  1  2  2  4  5  2  4   2   3   1   4   4   4   4   4   4   2   2
## 467   3  3  3  3  3  3  4  4  3   4   3   3   3   4   4   4   4   3   4   2
## 468   3  3  3  5  5  3  3  3  5   5   3   3   3   5   5   3   3   3   5   5
## 469   5  5  5  5  5  5  4  5  5   5   5   5   5   5   5   5   5   4   4   4
## 470   5  5  5  5  5  5  3  5  5   5   3   5   5   4   5   4   4   4   4   4
## 471   4  4  4  4  3  4  3  3  3   3   3   3   3   3   3   3   3   3   3   3
## 472   3  3  3  3  4  3  4  3  3   4   3   5   3   2   5   5   5   5   5   5
## 473   4  3  2  2  2  4  2  4  2   2   2   2   2   2   2   4   4   4   2   3
## 474   4  4  2  2  2  2  4  2  4   2   4   2   4   4   4   4   4   4   4   4
## 475   2  2  2  2  2  2  2  2  4   2   2   2   2   3   2   4   3   2   2   3
## 476   4  4  4  3  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   2
## 477   4  3  3  3  3  4  4  4  4   3   3   3   4   4   4   4   2   3   4   4
## 478   2  4  2  4  4  4  4  2  4   2   4   2   4   4   2   4   4   4   2   2
## 479   4  4  4  4  4  2  5  4  4   4   4   4   4   4   4   4   4   4   4   4
## 480   4  4  3  3  3  2  4  4  2   3   4   4   4   4   4   4   4   4   4   4
## 481   3  3  2  2  2  2  2  4  4   2   4   4   5   5   5   5   5   4   4   4
## 482   4  3  2  3  3  2  2  4  4   3   4   4   3   4   3   4   2   3   2   3
## 483   2  2  1  1  1  2  1  1  1   2   3   1   4   4   4   5   5   5   5   5
## 484   4  4  4  3  3  3  4  3  4   4   3   3   3   3   3   3   4   4   4   3
## 485   4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 486   5  4  4  4  4  4  2  2  3   5   5   5   5   5   4   4   5   5   4   3
## 487   4  1  4  4  4  4  4  4  4   2   4   2   4   4   4   4   4   4   4   4
## 488   4  2  2  2  2  5  4  4  3   2   4   2   4   4   3   4   2   5   3   4
## 489   2  2  2  3  2  2  2  4  4   3   2   4   4   2   2   4   4   4   3   4
## 490   4  4  2  2  2  2  4  4  4   4   4   2   4   4   2   4   3   4   3   2
## 491   4  4  3  4  4  3  4  4  4   4   4   4   4   4   4   4   4   4   4   3
## 492   4  4  4  4  4  4  4  5  5   4   4   4   4   4   4   4   3   4   4   4
## 493   4  4  4  4  4  4  3  4  5   4   4   4   4   4   4   4   4   4   4   4
## 494   5  3  3  3  5  2  1  4  1   3   2   2   2   3   3   4   3   4   2   3
## 495   4  4  4  4  4  4  5  3  4   5   5   5   5   5   5   5   5   5   5   5
## 496   4  3  2  3  2  2  2  2  4   2   4   2   2   4   3   4   4   4   4   4
## 497   4  4  2  2  2  2  2  3  3   3   4   3   3   4   4   3   3   4   4   2
## 498   4  4  4  4  4  3  4  4  2   3   3   4   4   4   3   4   4   4   4   4
## 499   3  2  2  3  2  4  2  3  2   2   3   2   3   2   3   3   1   2   2   2
## 500   5  5  3  5  2  3  2  4  2   2   3   4   3   4   3   3   2   2   5   2
## 501   3  4  2  2  3  2  3  2  2   3   4   4   4   4   3   5   3   5   4   3
## 502   2  2  2  2  2  1  1  4  1   2   3   4   3   4   3   4   2   4   3   3
## 503   4  4  4  3  4  4  4  5  5   3   3   3   4   4   3   4   4   4   4   4
## 504   3  3  2  2  3  3  2  3  2   3   3   3   3   3   3   4   3   5   3   3
## 505   4  4  4  4  2  4  4  4  4   4   4   5   5   5   5   5   5   5   5   5
## 506   4  4  2  2  4  4  1  4  2   2   4   4   4   4   4   4   4   4   4   4
## 507   4  4  4  4  2  4  4  4  4   3   4   3   3   4   4   4   3   4   4   3
## 508   5  2  2  4  2  2  3  4  4   4   4   3   4   4   3   4   3   4   4   3
## 509   4  1  2  2  3  4  4  4  4   3   4   4   4   4   4   4   3   4   3   3
## 510   4  3  4  2  2  4  4  4  4   4   4   4   4   4   4   4   4   4   4   2
## 511   4  3  3  4  3  4  2  4  4   3   4   4   4   4   4   4   4   4   4   3
## 512   2  3  4  4  4  3  4  2  2   4   4   4   3   4   4   4   3   3   4   4
## 513   3  5  4  3  4  4  4  4  2   4   3   3   3   3   4   3   3   3   3   3
## 514   2  2  2  3  2  2  2  2  3   3   3   2   3   3   3   2   4   4   2   4
## 515   3  4  2  2  2  1  3  4  4   2   4   4   4   4   3   4   4   4   2   4
## 516   5  4  5  5  2  4  5  5  5   4   5   5   5   5   4   5   4   5   4   4
## 517   5  5  4  4  5  4  4  2  4   4   4   4   4   4   4   4   5   5   4   5
## 518   4  4  4  4  4  4  2  2  4   4   4   2   4   4   4   4   4   4   4   4
## 519   5  5  5  5  5  3  5  5  5   5   5   5   5   5   5   5   5   5   5   5
## 520   4  3  4  2  4  4  2  4  3   3   2   2   2   2   2   2   2   4   2   4
## 521   3  3  2  2  2  2  2  3  3   3   4   3   4   4   4   5   5   5   3   4
## 522   4  4  4  4  4  4  4  4  4   4   4   4   4   5   5   4   4   4   4   4
## 523   4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   3   3
## 524   4  4  4  2  3  4  3  2  2   3   4   3   4   4   4   4   5   5   4   4
## 525   5  4  4  4  4  4  3  3  4   4   4   4   5   5   4   4   5   4   4   4
## 526   2  2  3  3  3  2  2  2  2   2   3   3   3   3   3   4   4   4   4   4
## 527   2  2  2  2  2  2  2  2  2   2   2   2   2   2   2   2   2   2   2   2
## 528   4  4  2  4  4  2  3  2  4   3   4   3   4   4   4   4   3   4   3   2
## 529   3  2  3  3  3  1  2  2  2   2   1   3   3   2   3   2   2   1   4   3
## 530   3  2  2  3  2  2  2  3  2   2   3   2   3   2   1   3   3   2   3   2
## 531   2  1  2  1  2  1  1  1  1   2   1   2   3   2   2   3   1   1   2   3
## 532   4  3  2  2  2  2  4  4  1   3   4   3   4   4   3   4   4   4   3   3
## 533   4  4  2  4  2  2  4  2  4   4   2   2   2   2   2   4   4   4   2   2
## 534   4  4  4  4  3  4  4  4  4   3   4   3   4   4   3   4   4   4   3   3
## 535   4  4  4  4  4  4  4  4  4   3   4   4   4   4   4   4   4   3   3   3
## 536   2  1  2  1  1  5  2  1  2   2   1   4   4   4   4   4   4   4   3   3
## 537   4  3  2  2  2  3  4  2  4   3   3   2   4   4   3   5   4   5   4   4
## 538   2  2  2  2  2  2  2  2  2   2   2   2   2   2   2   4   4   4   2   2
## 539   3  2  2  2  2  2  3  4  4   2   3   4   4   4   4   4   4   4   4   3
## 540   4  4  4  4  4  2  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 541   4  4  4  4  2  2  2  2  4   4   4   2   4   4   4   4   4   4   4   3
## 542   5  4  5  4  3  4  2  3  2   2   4   2   4   3   4   2   4   3   3   4
## 543   2  2  3  2  2  2  2  4  4   2   1   4   4   4   3   4   4   4   3   4
## 544   4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 545   4  4  4  4  4  2  4  4  4   3   4   4   4   4   4   4   4   4   4   4
## 546   2  4  2  2  4  2  2  3  2   2   2   2   5   4   4   5   4   5   3   5
## 547   2  4  4  4  2  4  4  4  2   2   2   1   4   4   2   4   2   4   4   2
## 548   4  4  4  4  4  3  4  4  4   3   4   5   4   5   4   5   4   4   4   4
## 549   3  2  2  3  2  2  2  2  4   2   4   2   2   4   4   4   4   4   4   3
## 550   4  4  4  4  3  3  3  3  3   3   3   3   4   4   3   4   3   4   3   4
## 551   1  3  3  1  2  2  1  2  2   3   3   3   4   2   3   1   3   4   3   3
## 552   2  3  2  2  3  2  2  2  2   3   4   4   2   4   3   4   4   4   4   3
## 553   2  3  2  2  3  3  3  2  2   2   2   3   3   2   1   2   2   2   1   2
## 554   3  2  2  2  2  1  2  2  2   2   2   2   2   2   2   2   2   2   2   2
## 555   2  2  2  3  3  3  2  2  2   3   2   2   3   4   2   3   2   4   4   4
## 556   2  2  2  3  2  2  2  3  2   2   2   2   3   2   1   3   1   1   1   2
## 557   3  2  2  4  3  2  2  2  3   2   3   2   4   4   4   4   4   4   4   4
## 558   4  4  4  2  4  4  2  4  4   2   2   2   2   2   4   4   4   2   3   4
## 559   4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 560   4  3  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 561   1  4  1  4  5  2  1  2  2   2   4   4   4   4   4   3   4   2   3   3
## 562   5  4  4  4  4  3  4  4  4   4   2   2   4   2   2   4   4   4   4   3
## 563   4  3  2  2  2  3  2  3  2   2   3   4   4   4   3   4   4   3   3   4
## 564   4  4  3  4  3  2  3  2  4   4   4   3   4   4   4   4   4   4   4   4
## 565   4  3  4  2  2  2  2  4  4   3   4   3   2   4   2   4   3   4   4   4
## 566   4  4  1  1  2  1  2  1  4   1   4   4   4   4   4   4   4   4   3   4
## 567   3  4  3  3  3  4  2  3  2   3   3   4   4   5   4   4   4   4   3   4
## 568   2  2  2  2  2  2  2  3  3   3   3   2   2   3   2   3   3   3   2   3
## 569   4  4  3  3  2  2  4  4  4   4   3   4   4   4   4   4   4   4   4   4
## 570   2  2  3  4  2  2  2  4  2   2   4   2   2   4   3   4   4   4   3   2
## 571   4  2  2  4  2  2  2  4  2   2   4   2   3   2   1   3   4   2   4   2
## 572   4  4  2  4  2  4  4  3  2   2   3   4   3   2   4   3   4   2   3   2
## 573   2  2  1  2  2  2  2  1  2   1   1   1   2   2   2   2   1   1   2   2
## 574   4  4  2  4  2  4  4  3  2   2   3   4   3   2   4   3   4   2   3   2
## 575   1  2  1  2  1  1  1  1  1   1   1   1   1   3   2   2   4   1   2   2
## 576   4  3  3  3  2  3  4  3  4   3   3   4   3   2   3   1   3   2   4   4
## 577   2  4  2  4  2  1  4  4  5   2   2   4   4   5   4   5   4   5   4   4
## 578   4  4  2  2  2  4  4  4  4   3   4   3   3   3   3   3   3   3   3   3
## 579   2  3  2  3  2  4  4  2  4   4   3   2   4   5   2   4   3   5   5   5
## 580   4  4  2  4  4  4  4  4  4   3   3   4   4   4   4   3   4   4   3   4
## 581   4  3  4  4  4  2  4  4  4   3   4   4   4   4   4   5   5   5   5   5
## 582   4  3  4  3  3  2  4  2  4   2   4   4   4   4   4   4   4   4   4   3
## 583   4  3  4  3  2  3  2  4  4   3   4   3   4   4   3   4   4   4   3   4
## 584   2  1  2  1  1  1  3  4  2   3   3   3   4   4   4   4   4   4   3   4
## 585   5  4  3  5  3  4  4  4  2   3   3   2   3   4   2   5   4   5   2   2
## 586   4  4  4  4  4  4  4  2  4   4   4   2   2   4   3   4   4   4   4   3
## 587   4  2  4  4  4  4  4  4  4   3   4   4   4   4   4   4   4   4   2   4
## 588   4  4  4  4  3  4  3  3  2   3   3   3   4   2   3   4   2   4   2   4
## 589   4  4  4  4  4  5  4  4  4   4   5   4   4   4   4   3   4   4   4   4
## 590   4  4  4  4  4  4  3  4  4   4   4   4   4   4   4   4   4   4   4   4
## 591   3  3  3  3  3  3  3  4  3   3   3   3   4   3   3   4   3   3   4   3
## 592   4  4  4  4  4  4  4  2  4   4   2   2   2   4   4   4   4   4   2   4
## 593   4  2  2  4  2  2  4  4  4   3   4   4   4   4   4   4   4   4   4   4
## 594   2  2  3  3  2  2  2  2  2   2   2   3   2   2   3   3   3   2   2   2
## 595   4  2  2  4  4  3  2  4  2   2   3   4   5   3   3   5   4   5   4   4
## 596   3  2  3  2  2  2  4  3  4   3   3   4   3   4   4   4   3   4   3   4
## 597   3  2  2  2  2  2  2  4  2   2   3   2   2   2   2   4   3   4   3   4
## 598   2  4  4  4  4  2  4  4  4   4   4   2   4   4   2   2   3   4   3   2
## 599   4  4  4  4  4  4  4  4  4   3   4   4   4   4   4   5   2   4   2   4
## 600   2  4  4  4  4  4  2  2  4   2   3   2   2   4   2   4   4   4   4   4
## 601   4  3  2  4  2  4  4  4  4   2   3   2   2   4   4   4   4   4   3   3
## 602   5  4  4  4  4  4  4  3  4   4   4   4   4   5   5   5   4   5   5   5
## 603   2  2  2  2  2  2  2  2  2   2   3   2   2   2   2   2   2   3   2   2
## 604   4  3  3  4  4  4  4  3  3   3   4   3   4   3   4   4   4   4   4   4
## 605   5  5  5  5  5  5  5  5  4   4   5   5   5   5   5   5   5   4   4   4
## 606   3  4  3  4  2  2  3  4  4   2   3   2   3   3   3   4   2   3   3   2
## 607   4  4  4  4  3  4  4  4  4   3   4   3   4   4   3   4   2   4   4   3
## 608   3  3  3  3  3  2  3  3  4   3   3   3   2   2   2   2   2   3   2   2
## 609   4  4  2  2  4  2  2  4  4   4   4   2   4   4   4   4   4   4   4   4
## 610   3  3  2  2  2  2  2  4  3   3   3   3   3   3   3   5   4   4   3   3
## 611   2  2  4  4  2  1  1  1  4   2   4   4   5   5   5   5   4   5   5   5
## 612   5  4  4  4  4  4  3  4  3   4   4   4   4   5   4   4   4   4   4   4
## 613   4  3  3  3  5  4  4  4  2   3   2   5   3   3   5   2   2   3   5   4
## 614   5  4  4  2  3  4  2  5  5   5   5   5   5   5   5   4   4   5   5   5
## 615   4  4  4  4  5  5  5  4  5   5   5   5   5   5   5   5   5   5   5   4
## 616   4  4  4  1  1  1  1  1  5   4   4   4   4   4   4   5   4   5   5   4
## 617   2  5  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 618   5  4  4  4  4  2  2  2  2   4   4   4   4   4   4   5   2   4   5   5
## 619   4  4  2  2  2  4  2  2  4   4   4   4   5   5   5   5   4   5   5   5
## 620   2  4  2  2  2  2  2  2  2   2   2   2   4   4   4   4   4   4   4   4
## 621   4  4  4  4  4  4  5  4  5   4   4   2   4   4   5   4   5   5   5   4
## 622   4  3  2  3  3  2  2  5  2   2   4   2   2   4   4   4   4   4   3   4
## 623   4  3  3  2  2  2  2  2  2   2   2   3   3   3   3   4   3   4   3   3
## 624   4  4  4  4  4  4  4  2  4   4   4   4   4   4   4   5   4   5   5   4
## 625   3  3  3  3  2  4  1  2  2   3   3   4   4   4   3   4   3   4   3   4
## 626   3  4  2  2  2  1  1  4  4   2   4   3   3   3   4   4   4   5   4   2
## 627   3  3  3  3  4  4  4  4  3   4   3   4   4   4   5   4   3   4   3   3
## 628   2  2  2  2  2  3  3  3  3   4   3   3   3   4   4   4   2   4   4   4
## 629   4  4  4  4  2  4  4  3  4   4   4   4   4   4   4   4   4   4   4   4
## 630   4  3  4  4  4  4  3  2  3   3   4   2   4   4   4   4   4   4   3   3
## 631   3  4  3  4  4  4  4  2  4   3   4   4   4   4   5   4   4   4   4   4
## 632   4  4  4  4  4  4  3  4  4   4   4   4   4   4   3   4   4   4   3   3
## 633   4  3  3  3  3  3  3  3  3   3   3   3   4   4   4   4   4   4   3   3
## 634   4  3  2  5  2  2  3  2  3   3   3   4   4   4   3   4   3   4   4   4
## 635   3  3  2  2  4  2  2  4  2   2   4   2   4   4   4   4   4   4   3   3
## 636   4  2  3  2  2  2  5  2  2   2   4   4   4   4   5   4   3   4   4   4
## 637   4  4  2  4  3  4  3  2  1   1   1   3   3   2   3   4   4   4   2   3
## 638   4  3  3  4  4  2  2  2  2   2   3   3   2   3   3   2   3   3   3   3
## 639   4  3  2  4  4  4  4  4  5   3   5   4   4   5   4   4   4   4   4   4
## 640   4  2  2  3  4  3  2  2  2   2   4   4   4   4   3   4   4   4   2   4
## 641   4  3  3  3  2  2  4  2  2   3   4   3   3   4   4   4   4   5   5   5
## 642   4  3  2  2  2  2  2  2  2   3   4   4   4   4   4   3   4   4   4   4
## 643   2  2  2  2  2  2  2  2  2   2   3   3   3   3   3   4   4   4   4   4
## 644   3  2  2  4  2  3  4  5  4   2   3   2   2   3   3   3   1   4   2   2
## 645   2  2  2  2  2  2  4  2  3   3   4   4   4   4   4   2   2   2   2   2
## 646   3  3  2  3  2  2  3  2  2   3   2   2   2   2   2   4   3   4   2   2
## 647   3  2  4  4  2  2  4  2  4   2   4   4   4   4   4   4   4   4   4   2
## 648   3  4  4  4  3  3  2  3  2   3   3   3   4   4   3   4   2   3   4   3
## 649   4  3  2  3  4  2  2  4  4   3   4   4   4   4   4   2   4   4   4   4
## 650   4  3  4  4  3  2  2  3  4   3   3   3   3   3   3   3   4   4   2   3
## 651   4  4  4  2  4  4  4  4  4   4   4   2   4   4   4   4   4   4   2   4
## 652   4  4  4  4  2  2  2  2  4   4   4   4   4   4   4   4   4   4   4   4
## 653   3  4  3  3  3  3  3  3  4   3   4   4   5   5   5   4   4   5   4   4
## 654   4  4  4  1  2  2  3  2  4   3   4   4   4   4   3   4   4   4   3   4
## 655   2  2  2  2  2  2  4  3  3   3   2   3   4   4   3   3   4   4   4   3
## 656   3  2  2  3  2  2  3  2  3   2   2   2   3   4   2   3   2   1   2   4
## 657   4  3  2  3  4  1  3  1  3   4   2   2   2   5   1   5   1   1   2   3
## 658   1  1  2  1  4  2  3  2  3   3   1   1   4   3   3   3   3   3   3   2
## 659   3  2  2  3  1  1  2  1  2   2   2   3   3   3   2   3   2   2   2   3
## 660   3  2  2  3  2  1  1  2  1   1   3   2   2   4   4   2   2   1   4   4
## 661   3  2  2  3  2  1  1  3  1   2   1   2   2   2   2   2   2   1   3   4
## 662   2  2  3  3  2  2  2  2  3   2   2   2   3   2   2   2   2   2   2   4
## 663   2  2  2  3  3  2  2  2  2   3   1   1   3   2   4   2   2   1   3   4
## 664   3  2  2  3  2  2  3  2  3   2   2   2   3   4   2   3   2   1   2   4
## 665   4  3  2  3  4  1  3  1  3   4   2   2   2   5   1   5   1   1   2   3
## 666   1  1  2  1  4  2  3  2  3   3   1   1   4   3   3   3   3   3   3   2
## 667   3  2  2  3  1  1  2  1  2   2   2   3   3   3   2   3   2   2   2   3
## 668   3  2  2  3  2  1  2  2  2   2   1   3   2   2   2   2   2   2   2   4
## 669   2  2  2  2  2  2  2  2  2   2   1   2   2   3   2   3   2   1   2   4
## 670   2  2  3  2  2  2  2  2  2   3   1   1   2   2   2   1   1   2   2   4
## 671   2  2  2  3  2  2  2  3  3   2   2   3   2   4   2   3   2   2   2   3
## 672   1  1  1  1  3  1  1  1  1   3   1   1   2   3   3   3   3   1   1   2
## 673   4  3  2  2  2  4  4  2  2   4   4   4   4   4   4   4   4   4   4   4
## 674   4  4  4  3  4  2  4  4  2   3   4   3   4   4   4   4   4   5   5   4
## 675   2  2  2  2  2  2  3  2  2   2   3   2   4   4   4   4   4   4   2   4
## 676   4  4  4  4  4  4  3  4  4   4   3   4   4   4   4   4   4   4   4   3
## 677   3  3  2  2  2  2  2  4  2   2   4   2   4   4   4   4   4   4   2   2
## 678   4  3  2  2  2  2  4  4  2   2   4   4   4   4   4   2   4   4   4   2
## 679   3  3  3  3  3  3  4  4  4   3   4   4   4   4   4   4   4   4   3   3
## 680   4  4  2  4  2  1  2  2  1   2   4   4   4   3   3   4   2   4   3   4
## 681   4  2  2  2  2  1  2  1  1   2   1   4   2   4   2   3   4   2   4   2
## 682   4  3  3  3  3  3  3  3  3   3   3   3   4   4   4   4   4   4   3   3
## 683   3  2  2  2  2  2  2  4  2   2   3   2   3   3   2   3   2   4   2   2
## 684   4  5  2  2  4  2  2  2  2   2   3   3   4   3   3   5   1   5   3   5
## 685   4  4  4  4  4  5  4  4  4   3   4   2   4   4   5   5   4   4   4   5
## 686   2  4  2  3  3  2  4  2  2   2   4   4   4   4   4   4   4   4   3   4
## 687   5  5  4  4  4  2  4  4  2   2   3   4   5   5   5   4   5   5   4   5
## 688   4  3  2  3  4  2  2  2  2   2   4   3   4   4   3   4   4   4   4   3
## 689   3  4  1  2  3  1  5  4  4   3   4   2   4   4   4   5   4   5   3   3
## 690   4  4  4  4  4  2  4  2  4   2   4   4   4   4   4   4   2   4   3   2
## 691   4  3  2  3  3  4  2  3  5   1   4   4   5   5   5   3   4   5   2   3
## 692   4  4  2  2  2  2  2  2  2   3   4   4   4   4   4   4   5   5   5   5
## 693   3  2  2  2  3  2  2  2  3   3   3   4   4   4   4   5   5   5   4   4
## 694   2  4  1  1  2  4  2  2  2   2   1   2   4   3   2   5   2   5   5   4
## 695   3  2  2  2  2  2  2  3  2   2   3   2   2   3   3   3   2   4   3   2
## 696   4  4  2  2  2  2  2  2  4   2   4   4   4   4   4   4   2   4   4   2
## 697   4  3  2  4  2  2  2  2  2   2   3   4   4   4   4   4   5   4   4   4
## 698   2  2  2  2  2  1  2  2  4   2   4   2   2   4   4   4   4   4   4   4
## 699   3  3  2  2  1  2  2  3  2   1   3   2   4   4   3   4   3   3   4   4
## 700   5  4  3  2  4  2  4  5  4   3   5   4   4   4   4   5   4   5   4   2
## 701   4  5  4  4  4  4  4  4  5   3   3   4   5   5   5   5   4   4   4   5
## 702   4  3  2  2  3  2  2  2  2   3   3   2   4   3   3   4   5   5   3   3
## 703   3  2  1  2  2  2  2  2  2   2   3   2   2   2   2   4   4   4   2   2
## 704   4  4  4  4  2  2  3  2  4   3   5   3   4   5   5   4   5   5   5   4
## 705   3  3  2  2  2  2  2  2  2   2   2   2   2   4   2   5   2   5   2   5
## 706   4  3  1  3  3  1  1  1  1   2   3   2   4   4   4   4   2   4   3   2
## 707   4  4  4  5  4  4  5  2  5   4   4   4   5   4   4   5   3   5   4   4
## 708   4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 709   2  3  3  3  2  2  3  3  2   4   3   4   4   4   3   3   4   3   3   3
## 710   4  4  4  4  4  2  3  2  1   2   4   3   4   4   4   5   5   5   3   4
## 711   4  3  4  4  4  2  3  2  3   3   4   4   4   4   4   4   4   4   4   4
## 712   5  4  4  4  4  3  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 713   4  3  3  3  3  2  4  3  2   2   3   3   2   3   3   4   5   5   5   5
## 714   4  4  4  4  4  2  2  2  4   3   4   4   4   4   4   5   5   5   5   5
## 715   4  4  1  2  2  2  4  2  4   2   4   4   4   4   4   4   4   5   4   4
## 716   4  4  4  3  3  2  2  2  2   3   4   4   5   5   4   5   5   5   5   5
## 717   5  4  4  4  4  4  4  4  4   4   4   5   4   4   5   4   5   5   4   4
## 718   4  4  3  4  3  1  1  4  1   3   3   3   3   2   4   3   5   5   3   4
## 719   4  3  4  2  2  1  4  4  4   2   5   5   5   5   5   4   5   5   4   4
## 720   4  4  3  4  4  3  3  2  4   3   4   4   4   4   4   3   4   4   4   4
## 721   3  2  2  2  2  2  2  2  2   2   2   3   2   2   3   2   2   2   2   3
## 722   4  3  2  2  3  3  2  2  3   3   3   3   3   1   5   1   1   2   3   5
## 723   3  2  2  3  2  1  1  2  2   2   1   2   2   2   3   3   2   1   3   4
## 724   3  2  3  2  2  2  2  2  2   3   1   3   3   2   2   3   2   2   2   4
## 725   2  2  2  3  2  2  2  2  2   3   1   3   3   3   2   3   2   1   2   4
## 726   2  2  2  3  2  1  2  2  2   1   1   2   1   2   1   2   2   1   2   2
## 727   3  2  1  3  2  2  3  2  2   2   3   3   2   2   2   2   1   1   1   3
## 728   2  2  2  3  3  2  2  2  2   2   2   2   2   2   2   3   2   2   2   4
## 729   2  2  2  2  2  3  2  2  2   2   2   2   2   3   3   2   2   2   4   4
## 730   2  2  2  3  3  2  2  2  2   2   1   2   2   2   2   2   2   2   2   2
## 731   3  2  2  2  2  2  2  2  2   2   2   3   2   2   3   2   2   2   2   3
## 732   4  3  1  2  2  2  4  4  4   3   3   2   2   2   3   4   4   4   3   3
## 733   4  4  3  2  4  3  3  4  2   3   4   4   4   4   4   4   4   4   4   3
## 734   3  3  2  4  1  4  4  1  1   2   3   3   2   2   2   3   3   3   3   3
## 735   4  4  2  4  4  4  4  2  2   2   4   2   4   4   5   5   1   4   4   4
## 736   5  3  4  4  4  4  3  4  5   3   4   3   3   4   4   5   5   5   4   4
## 737   4  4  4  4  4  4  4  4  4   3   4   4   4   4   4   4   4   4   4   4
## 738   3  3  4  4  4  4  2  2  2   3   4   2   2   4   4   4   2   4   3   4
## 739   1  2  1  2  1  2  4  1  4   2   2   4   4   4   4   5   4   5   4   5
## 740   4  3  4  4  2  4  4  4  4   3   4   4   4   4   4   4   4   4   3   3
## 741   5  4  5  4  4  5  5  5  5   5   5   4   4   5   5   4   2   5   5   4
## 742   4  4  3  4  4  2  2  3  1   2   4   4   4   4   3   4   2   1   3   2
## 743   4  4  3  2  4  5  3  4  4   3   3   4   4   4   4   4   4   4   3   4
## 744   2  2  2  2  3  2  2  2  4   2   3   2   4   4   4   4   4   4   3   2
## 745   4  4  4  4  4  4  2  2  4   4   4   4   4   4   4   4   4   4   4   4
## 746   4  3  3  4  4  4  2  4  2   3   3   2   4   3   3   4   4   4   3   4
## 747   4  4  3  4  4  2  4  4  4   1   4   3   2   4   3   4   3   4   3   2
## 748   4  4  2  4  4  4  4  4  4   2   4   4   4   4   4   4   4   4   4   4
## 749   4  2  4  2  4  2  5  2  5   4   4   4   4   4   4   4   2   4   4   3
## 750   4  3  3  4  4  2  4  2  4   3   5   5   5   5   5   5   5   5   4   4
## 751   2  3  2  4  4  1  2  2  2   2   2   4   4   4   4   4   2   4   4   4
## 752   3  3  3  3  4  3  2  4  2   3   2   4   3   4   3   3   3   4   2   4
## 753   2  3  2  4  4  4  3  4  4   3   3   3   4   3   3   3   4   4   3   4
## 754   4  4  3  3  4  4  2  1  2   2   3   4   4   4   2   4   5   5   3   4
## 755   5  5  4  2  5  4  4  4  5   4   5   2   4   5   4   5   5   5   4   2
## 756   4  3  4  2  2  4  4  2  2   2   4   2   4   5   5   4   2   4   4   4
## 757   4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 758   4  3  3  2  3  2  2  4  2   3   3   3   3   4   4   3   4   4   4   4
## 759   2  3  3  3  3  3  2  3  4   3   4   4   4   4   3   4   3   5   4   4
## 760   2  3  4  2  3  4  3  3  4   3   4   4   4   4   3   4   3   4   5   4
## 761   4  4  5  4  5  4  4  3  4   4   4   3   3   4   4   4   4   5   4   3
## 762   4  2  1  1  1  1  1  1  1   1   2   4   4   4   4   4   4   4   1   4
## 763   2  4  2  4  2  3  1  1  2   4   4   4   4   5   4   4   4   4   4   4
## 764   3  3  2  3  2  4  4  3  2   2   3   4   3   2   4   3   4   2   3   2
## 765   3  3  2  3  2  4  4  3  2   2   3   4   3   2   4   3   4   2   3   2
## 766   2  2  4  4  1  2  3  4  2   3   1   2   4   2   2   3   2   2   3   3
## 767   3  3  2  4  3  3  3  3  3   3   3   3   2   2   3   3   3   3   3   2
## 768   1  2  2  3  2  1  1  2  2   1   1   1   3   2   2   2   2   1   2   3
## 769   1  1  2  2  3  1  2  2  2   1   1   3   3   4   1   2   2   2   2   2
## 770   3  3  2  4  2  1  3  2  2   3   1   2   2   2   5   4   2   2   3   4
## 771   3  2  2  2  2  2  2  2  2   2   1   3   3   3   3   2   2   2   2   3
## 772   2  2  2  2  2  2  2  2  2   2   2   2   2   3   2   2   2   1   2   2
## 773   2  3  2  2  2  1  1  2  2   2   2   2   2   2   2   2   2   1   2   4
## 774   2  2  4  4  1  2  3  4  2   3   1   2   4   2   2   3   2   2   3   3
## 775   3  3  2  4  3  3  3  3  3   3   3   3   2   2   3   3   3   3   3   2
## 776   1  2  2  3  2  1  1  2  2   1   1   1   3   2   2   2   2   1   2   3
## 777   1  1  2  2  3  1  2  2  2   1   1   3   3   4   1   2   2   2   2   2
## 778   3  3  2  4  2  1  3  2  2   3   1   2   2   2   5   4   2   2   3   4
## 779   2  1  2  2  3  2  1  1  2   2   1   2   1   2   2   3   2   1   1   2
## 780   4  3  4  4  3  2  4  4  2   2   4   4   4   4   4   4   4   4   4   4
## 781   4  2  4  4  2  2  2  4  2   2   4   4   4   4   4   4   4   4   4   4
## 782   5  5  5  5  5  5  5  5  5   5   5   5   5   5   5   5   5   5   5   5
## 783   4  3  3  4  4  3  5  5  2   2   4   4   4   5   5   4   3   4   5   3
## 784   4  4  3  3  2  2  4  4  2   2   4   4   4   4   4   4   4   4   4   4
## 785   4  2  2  4  1  1  4  4  4   3   4   4   4   5   5   4   4   4   5   4
## 786   4  3  3  2  3  1  4  4  4   3   4   3   4   4   3   4   4   4   4   3
## 787   5  2  4  4  3  4  4  4  4   2   4   3   3   4   4   5   4   5   4   3
## 788   4  4  4  4  4  4  5  4  4   3   4   4   4   4   4   4   4   4   4   4
## 789   4  4  4  4  4  4  4  4  4   4   5   5   5   5   4   4   5   5   4   4
## 790   2  2  2  2  1  2  2  3  2   1   4   3   4   4   4   4   4   4   3   2
## 791   2  3  2  2  3  2  3  4  2   2   4   4   4   4   4   4   4   4   4   2
## 792   4  4  4  4  4  4  4  2  4   2   4   2   4   4   3   5   4   5   4   4
## 793   3  3  3  3  2  2  4  2  2   3   5   4   4   4   4   4   4   4   5   5
## 794   2  4  2  2  2  1  2  2  2   2   4   4   2   2   4   4   5   4   4   4
## 795   4  3  4  3  4  4  3  2  2   3   4   3   3   4   4   4   3   3   3   3
## 796   4  4  4  4  5  4  4  4  3   4   4   4   4   4   4   4   4   4   3   3
## 797   4  4  4  4  4  4  4  2  5   2   3   4   4   4   3   3   2   4   4   4
## 798   4  2  2  2  2  2  2  2  2   3   4   3   4   4   3   4   4   4   3   4
## 799   4  2  2  1  4  4  3  4  4   3   4   4   4   4   2   5   2   5   3   3
## 800   5  5  4  4  4  3  2  4  4   4   2   2   2   4   2   4   4   4   3   3
## 801   3  2  2  2  2  1  3  2  4   2   4   2   2   4   3   4   5   5   4   4
## 802   4  4  4  4  4  4  4  5  4   4   3   4   4   4   5   5   5   5   5   5
## 803   2  2  2  2  2  4  4  2  4   2   2   2   2   4   2   2   2   4   3   2
## 804   4  3  1  2  2  2  1  2  1   2   4   3   4   3   3   4   2   5   3   4
## 805   4  4  4  4  4  4  4  4  4   2   4   4   4   4   4   4   2   5   5   4
## 806   4  2  2  2  2  4  1  4  2   3   4   2   2   4   2   4   4   2   2   2
## 807   4  4  3  3  2  2  2  2  2   3   4   4   4   4   4   5   3   5   5   5
## 808   4  4  4  4  4  2  3  2  4   2   4   4   5   5   5   5   4   5   5   5
## 809   4  3  4  3  4  3  4  2  2   4   4   4   4   4   4   3   3   4   3   3
## 810   4  4  4  4  4  4  4  4  4   3   3   3   4   4   4   4   4   4   4   4
## 811   5  4  5  4  5  4  4  4  4   4   4   4   4   4   4   4   4   4   5   4
## 812   4  4  2  2  2  2  2  2  2   3   4   4   4   4   3   4   4   4   4   3
## 813   4  3  3  4  4  4  2  1  2   2   4   5   4   4   4   4   4   5   3   4
## 814   1  1  1  2  2  1  1  2  2   2   2   2   3   2   2   2   2   1   2   3
## 815   3  2  3  1  2  3  3  2  3   3   2   1   2   2   2   2   1   1   2   3
## 816   2  2  3  2  2  1  2  2  2   2   1   2   2   2   2   2   2   2   2   2
## 817   4  4  3  4  2  2  4  4  4   4   4   2   4   4   4   4   4   4   4   4
## 818   4  4  2  2  2  2  2  2  2   2   3   2   2   3   3   3   2   2   3   2
## 819   2  2  2  2  3  4  2  2  2   2   4   4   4   4   4   4   4   4   4   4
## 820   2  2  1  2  2  1  4  1  4   2   4   2   5   4   4   4   4   4   2   2
## 821   2  2  3  2  4  2  2  2  2   3   4   4   4   4   4   3   4   4   3   3
## 822   3  1  1  2  2  1  1  1  1   3   4   2   4   4   4   4   5   5   3   3
## 823   4  4  3  3  3  4  2  2  2   2   4   2   4   4   3   3   4   4   3   4
## 824   4  3  3  4  3  4  2  4  4   4   4   4   3   4   4   4   4   4   3   3
## 825   5  5  4  4  4  4  3  2  4   4   4   4   4   4   4   4   3   4   4   4
## 826   4  4  3  4  3  2  3  2  2   3   4   3   4   4   4   4   4   4   4   4
## 827   4  3  4  3  3  2  2  2  4   3   3   3   4   3   4   4   4   4   4   3
## 828   1  3  2  2  2  2  2  2  1   2   2   3   1   1   1   1   1   1   2   2
## 829   4  3  4  2  4  4  4  4  4   4   4   3   3   3   4   4   4   4   4   4
## 830   2  5  2  2  4  4  2  2  2   4   4   4   4   4   4   4   2   4   4   4
## 831   4  4  4  4  4  4  4  4  2   4   4   4   4   4   4   4   4   4   3   4
## 832   4  3  2  3  2  2  4  2  4   2   4   3   4   3   2   2   2   2   3   2
## 833   2  4  4  2  2  4  2  2  2   2   4   4   4   4   4   5   5   4   4   4
## 834   4  3  4  4  3  1  2  4  2   3   4   2   4   4   4   4   5   5   3   2
## 835   4  3  2  2  2  2  2  2  2   2   4   3   4   4   3   4   4   4   4   4
## 836   5  5  5  5  5  4  4  4  4   3   3   3   4   4   4   4   4   4   3   3
## 837   2  2  2  2  4  4  1  1  1   2   3   4   4   4   3   4   4   4   3   2
## 838   2  2  2  2  2  3  2  2  2   3   3   3   4   4   3   4   4   4   4   4
## 839   4  3  2  4  4  4  4  4  4   4   3   4   4   2   3   4   2   4   4   4
## 840   5  3  4  5  5  3  4  4  2   4   4   2   4   4   4   5   4   4   4   4
## 841   2  4  1  4  2  4  2  2  4   2   2   2   4   4   4   4   4   5   2   3
## 842   4  3  4  3  2  2  4  4  4   2   4   3   4   4   4   4   2   4   4   4
## 843   2  3  2  2  2  2  2  2  1   2   3   4   4   3   3   4   3   3   3   3
## 844   2  3  2  2  2  3  4  5  4   4   4   4   4   5   5   5   1   4   5   4
## 845   4  3  4  4  4  4  4  3  2   4   4   4   4   4   4   3   3   4   3   3
## 846   5  5  5  5  3  5  5  5  5   5   5   4   5   5   5   5   4   4   4   4
## 847   3  3  3  3  3  3  3  3  3   3   3   3   3   3   3   3   3   3   3   2
## 848   2  2  2  2  2  2  3  2  3   3   2   2   2   2   2   2   2   2   2   2
## 849   4  4  2  4  4  2  2  2  2   3   4   2   2   3   2   4   4   4   4   4
## 850   3  2  2  3  2  2  2  3  2   2   3   2   3   2   1   3   3   2   3   2
## 851   3  2  2  3  2  2  2  3  2   2   3   2   3   2   1   3   3   2   3   2
## 852   3  2  2  3  2  2  2  3  2   2   3   2   3   2   1   3   3   2   3   2
## 853   1  1  1  1  3  1  1  1  1   1   1   1   2   4   1   1   4   1   3   4
## 854   2  2  2  3  3  2  2  2  2   2   2   2   3   2   2   3   2   2   2   2
## 855   2  3  2  3  3  3  3  2  2   3   3   3   2   3   3   2   2   2   3   3
## 856   2  2  2  3  3  2  3  2  2   2   2   2   2   3   2   2   2   2   3   3
## 857   3  3  2  3  2  1  1  3  2   3   3   3   2   2   3   2   2   2   3   3
## 858   2  2  2  2  2  2  2  2  2   2   2   2   3   2   3   2   2   2   2   2
## 859   5  5  3  4  4  2  3  5  3   2   4   3   4   3   3   5   4   5   3   4
## 860   4  5  2  3  3  4  4  4  4   4   4   3   5   3   4   5   2   5   4   4
## 861   4  4  2  4  2  4  4  2  2   2   4   2   4   4   4   4   4   4   2   4
## 862   4  4  4  3  2  2  4  3  2   3   4   5   5   5   4   5   4   5   4   5
## 863   4  3  4  2  2  2  2  4  2   2   4   2   2   4   2   2   4   4   2   2
## 864   4  2  2  2  2  2  3  4  2   2   4   3   4   4   2   4   4   4   3   3
## 865   4  4  2  2  4  3  4  2  2   2   2   2   4   4   4   4   4   4   3   2
## 866   4  4  2  2  4  2  2  4  4   2   4   4   4   4   4   4   4   4   4   2
## 867   4  4  2  3  2  2  2  2  4   3   3   2   3   3   2   3   2   4   2   2
## 868   4  4  3  3  3  4  5  4  5   3   4   3   2   4   4   5   5   5   4   5
## 869   4  4  1  3  2  1  1  1  1   2   2   4   4   4   4   4   4   4   4   4
## 870   4  2  4  4  4  2  3  2  2   4   4   4   4   4   4   4   4   4   3   4
## 871   4  4  4  4  4  4  4  4  4   4   4   3   3   4   4   4   3   4   4   3
## 872   4  5  4  2  4  4  5  3  4   2   4   3   4   4   5   4   4   4   5   3
## 873   4  3  1  4  4  2  4  2  2   2   4   5   5   5   5   4   2   4   4   4
## 874   4  3  3  3  3  2  2  2  2   2   4   2   4   4   2   2   2   2   2   2
## 875   2  3  2  2  2  2  2  2  4   3   4   4   2   4   4   4   4   4   3   2
## 876   4  3  4  4  3  3  4  2  4   4   4   3   4   3   4   4   4   4   3   3
## 877   4  3  4  2  3  2  2  1  3   2   2   3   3   2   3   4   4   4   3   3
## 878   4  4  3  3  3  4  4  4  4   3   4   4   4   4   4   4   4   4   4   4
## 879   5  5  4  5  5  3  2  4  4   4   4   4   4   5   4   4   4   4   4   4
## 880   4  4  2  2  2  2  2  2  4   3   2   2   2   1   3   4   4   4   4   4
## 881   4  3  2  4  4  2  2  2  2   2   4   2   2   4   3   4   4   4   3   3
## 882   4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 883   2  3  2  3  4  2  2  2  3   2   4   4   4   4   4   3   4   4   3   3
## 884   4  4  4  4  4  5  5  4  4   4   4   5   5   4   3   4   4   3   3   3
## 885   2  2  2  2  2  2  2  2  4   2   4   4   4   4   3   4   4   4   4   3
## 886   3  4  4  4  4  5  4  4  3   4   4   4   4   4   4   3   4   4   4   4
## 887   2  3  2  2  2  2  2  2  2   2   2   2   2   3   3   3   2   2   1   2
## 888   3  5  4  4  4  5  4  4  4   4   4   4   4   4   4   4   5   3   3   3
## 889   3  2  2  3  2  2  2  3  2   2   3   2   3   2   1   3   3   2   3   2
## 890   3  3  2  3  2  2  3  2  1   2   1   2   3   2   3   1   4   1   4   4
## 891   1  1  1  2  3  2  2  2  2   1   2   2   2   2   2   2   2   2   2   3
## 892   3  3  2  2  2  2  2  2  3   2   1   3   3   3   2   4   1   2   2   3
## 893   3  2  2  2  3  2  3  2  3   2   2   2   2   3   2   2   2   2   2   2
## 894   3  3  3  4  3  2  3  3  3   2   3   3   3   2   2   2   2   1   3   4
## 895   1  1  2  2  2  1  1  2  2   2   2   2   2   2   2   2   2   2   3   4
## 896   3  3  2  3  2  2  3  2  1   2   1   2   3   2   3   1   4   1   4   4
## 897   1  1  1  2  3  2  2  2  2   1   2   2   2   2   2   2   2   2   2   3
## 898   3  3  2  2  2  2  2  2  3   2   1   3   3   3   2   4   1   2   2   3
## 899   3  2  2  2  3  2  3  2  3   2   2   2   2   3   2   2   2   2   2   2
## 900   2  2  2  2  1  1  1  2  2   2   1   3   2   2   4   2   2   1   2   3
## 901   3  3  1  3  3  2  2  2  3   2   3   3   4   1   4   4   4   4   5   5
## 902   1  1  1  1  2  1  1  1  1   2   1   1   3   2   3   2   2   1   3   2
## 903   4  3  1  1  1  2  4  4  4   2   4   2   4   5   4   5   4   5   3   2
## 904   4  2  4  4  2  3  2  2  2   2   4   4   4   4   4   4   4   4   4   2
## 905   4  4  2  2  2  3  4  4  2   2   4   4   4   4   4   4   4   4   4   4
## 906   5  5  5  5  4  2  2  4  2   4   5   5   5   5   4   5   4   4   2   2
## 907   2  2  2  2  2  2  2  2  2   2   4   4   4   4   4   5   4   5   2   4
## 908   4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 909   5  3  4  2  2  1  2  3  4   2   3   2   2   4   3   5   5   5   2   2
## 910   1  4  3  2  2  1  1  2  2   2   3   4   4   4   4   4   4   4   3   4
## 911   4  3  2  3  2  2  2  4  2   1   3   2   2   4   4   4   5   5   4   4
## 912   4  4  5  4  4  5  4  4  4   5   3   2   4   4   4   4   4   4   4   4
## 913   4  2  2  2  2  2  2  2  2   2   2   4   4   4   4   4   4   4   4   4
## 914   4  3  3  3  2  2  2  2  2   3   4   2   3   3   4   4   4   4   4   4
## 915   3  3  2  2  2  2  2  2  2   2   2   2   4   2   2   4   4   4   4   2
## 916   3  2  1  2  2  2  2  2  2   2   4   4   4   5   5   4   4   5   2   2
## 917   4  4  4  2  4  4  4  2  5   4   4   4   4   4   5   4   4   4   4   4
## 918   4  3  2  2  3  3  2  2  2   2   4   3   4   4   4   5   5   5   4   4
## 919   4  4  3  4  2  2  4  4  4   4   4   2   4   4   4   4   4   4   4   4
## 920   4  4  4  3  3  4  4  2  4   4   3   3   4   4   4   4   4   5   4   3
## 921   4  4  4  4  4  3  3  4  4   4   4   4   5   4   5   4   4   4   4   5
## 922   4  3  2  4  2  2  4  4  4   3   4   3   3   4   4   4   3   5   4   4
## 923   2  4  2  4  3  2  1  2  2   2   2   2   4   4   4   4   4   4   2   4
## 924   4  3  4  4  4  4  2  4  4   3   4   4   5   4   4   4   4   5   5   5
## 925   3  3  2  2  2  3  2  4  2   3   3   2   3   3   2   4   4   4   3   3
## 926   2  2  2  2  2  2  2  2  2   2   2   2   2   2   2   4   2   4   2   3
## 927   4  4  4  3  2  2  4  4  1   2   4   4   4   4   4   4   4   4   4   4
## 928   2  3  2  2  2  2  4  4  4   3   5   4   4   4   5   5   5   5   5   5
## 929   2  3  2  2  2  2  4  4  4   3   5   4   4   4   5   5   5   5   5   5
## 930   4  3  4  4  4  4  4  4  4   4   4   4   4   4   4   4   3   4   4   4
## 931   4  4  3  3  4  4  3  2  4   4   4   3   4   4   4   4   3   4   4   3
## 932   4  2  2  2  4  4  2  2  2   2   3   2   4   2   3   4   4   4   4   3
## 933   4  3  3  3  3  4  4  5  5   2   4   4   4   4   4   5   5   5   5   5
## 934   4  3  3  2  4  3  2  4  4   2   2   2   4   4   4   4   5   5   3   4
## 935   3  3  1  1  1  1  2  1  2   1   2   2   2   2   2   4   3   4   4   4
## 936   5  5  2  2  2  2  2  2  2   2   4   2   4   4   3   4   4   4   4   2
## 937   2  3  4  2  1  1  4  4  4   3   3   4   3   3   3   4   5   4   3   4
## 938   1  1  1  1  3  2  1  1  2   2   1   1   3   2   3   2   4   2   4   4
## 939   3  2  3  3  2  1  3  2  3   3   2   3   4   3   2   3   3   3   2   4
## 940   2  2  2  3  2  2  2  2  2   3   2   2   2   2   3   2   2   3   2   3
## 941   3  3  3  3  3  3  3  3  3   3   2   3   3   3   2   2   2   3   3   3
## 942   1  1  1  2  2  1  1  2  2   3   1   2   2   2   3   2   2   1   1   4
## 943   2  2  2  2  3  2  2  3  3   2   3   3   2   2   2   2   3   2   3   2
## 944   2  3  3  2  2  2  2  2  3   3   2   2   2   2   2   2   2   2   2   2
## 945   4  4  2  2  2  2  2  4  2   2   3   2   4   2   4   4   4   4   3   3
## 946   4  3  2  3  4  3  4  2  2   3   4   4   4   5   4   5   4   4   5   4
## 947   4  3  2  2  3  2  4  4  4   3   4   4   3   4   3   4   4   4   3   4
## 948   5  3  4  4  4  4  4  4  4   3   3   2   4   4   3   4   4   4   3   3
## 949   4  3  2  2  4  2  1  2  2   1   3   1   4   4   2   4   4   4   2   2
## 950   1  3  3  1  1  2  4  2  4   4   4   4   4   4   3   4   4   4   3   4
## 951   4  4  4  3  4  4  5  4  4   3   4   5   5   5   5   4   4   5   4   5
## 952   2  2  3  3  2  2  4  2  2   2   3   4   4   4   4   4   5   4   4   4
## 953   3  1  2  2  2  2  2  1  2   2   2   2   2   2   2   4   2   4   3   3
## 954   4  4  3  4  4  4  4  4  4   4   4   3   4   4   4   4   4   4   4   4
## 955   4  4  5  4  3  3  2  3  5   4   4   5   5   5   4   4   5   5   4   3
## 956   4  4  4  2  2  2  4  4  4   2   4   4   4   4   4   5   5   5   4   4
## 957   4  4  3  4  4  4  4  2  2   3   3   3   4   4   4   4   4   4   4   4
## 958   4  3  3  2  3  4  4  4  3   2   4   4   3   3   4   4   3   4   3   4
## 959   4  4  4  3  3  3  4  3  4   2   4   4   4   5   5   5   5   5   4   5
## 960   3  3  2  4  2  2  4  4  5   2   4   3   4   4   4   4   4   4   4   4
## 961   4  2  2  3  3  1  1  1  1   1   1   4   4   3   3   4   4   4   3   4
## 962   4  4  3  3  3  2  3  3  4   4   4   4   4   4   4   3   2   3   4   4
## 963   4  4  4  4  4  2  4  4  4   4   4   4   4   4   4   4   4   4   4   3
## 964   5  4  3  3  3  2  4  4  4   3   3   4   4   4   3   3   4   4   3   3
## 965   1  3  1  1  1  4  1  4  1   1   4   4   4   5   4   5   4   4   4   4
## 966   2  3  2  2  2  2  2  2  2   2   3   3   4   3   3   4   3   4   3   3
## 967   2  2  2  2  2  2  2  2  2   2   2   2   2   2   2   3   2   3   2   2
## 968   3  2  3  2  2  2  3  3  2   3   4   3   4   3   3   3   2   3   3   3
## 969   3  2  2  3  2  3  3  2  4   2   3   3   3   4   3   4   4   4   3   2
## 970   4  4  4  3  4  3  4  3  4   3   5   3   4   3   5   3   4   5   4   5
## 971   4  4  2  4  4  2  4  5  4   3   4   4   3   4   4   4   2   2   4   2
## 972   3  1  2  2  1  1  4  3  1   1   2   2   5   4   5   5   5   5   4   4
## 973   5  4  4  4  4  3  2  4  2   4   5   3   4   5   5   5   4   4   5   4
## 974   3  5  4  4  4  5  5  5  5   5   5   4   4   5   5   4   4   2   3   3
## 975   3  4  3  4  2  4  3  4  4   4   4   5   5   5   5   3   5   5   5   2
## 976   2  4  2  2  2  2  3  3  2   2   4   4   4   4   4   4   4   4   4   2
## 977   4  2  2  3  2  2  2  4  1   1   4   2   4   4   4   5   4   4   4   4
## 978   4  3  3  3  3  4  5  3  4   3   4   2   3   4   4   4   3   3   3   3
## 979   4  4  2  2  2  4  4  4  4   2   4   4   4   4   4   4   4   4   4   2
## 980   5  4  3  4  4  3  4  4  5   3   4   4   4   4   5   4   3   4   4   3
## 981   4  5  4  3  3  3  4  4  4   3   3   3   3   5   5   3   3   3   3   3
## 982   2  3  3  2  2  2  2  2  2   2   2   2   2   2   3   3   2   2   3   3
## 983   3  3  2  3  2  1  2  1  3   3   2   3   2   2   3   3   2   1   3   3
## 984   3  3  3  2  3  2  3  2  3   3   2   3   3   2   2   2   2   2   2   4
## 985   3  4  4  3  4  5  4  4  4   4   3   3   3   3   3   4   3   3   3   3
## 986   4  3  4  2  4  2  2  2  4   3   3   4   3   2   2   4   4   3   3   2
## 987   4  4  4  4  4  2  2  4  2   2   3   3   2   2   2   4   2   4   3   2
## 988   4  2  2  2  3  1  4  2  2   2   4   4   4   4   4   4   3   4   3   3
## 989   4  4  4  4  4  4  4  4  4   3   4   4   4   4   3   4   4   4   3   3
## 990   4  3  2  3  2  4  4  2  2   2   4   2   2   2   3   4   4   4   3   3
## 991   4  3  3  4  4  2  2  2  2   4   4   3   4   3   3   5   5   5   4   4
## 992   4  3  2  2  2  2  2  2  4   3   4   4   4   4   4   4   4   4   3   3
## 993   4  4  4  3  3  2  2  2  3   3   4   4   4   4   4   4   4   3   3   3
## 994   4  3  4  2  4  4  4  4  5   3   4   4   4   5   5   5   5   5   4   5
## 995   2  3  1  2  2  1  4  4  4   2   3   2   4   4   3   5   4   5   4   4
## 996   4  4  4  3  4  3  2  4  4   4   5   5   4   5   5   4   4   5   4   4
## 997   4  2  2  2  1  1  2  2  2   1   4   2   2   4   4   4   1   4   4   4
## 998   4  3  2  2  4  2  2  2  2   3   2   4   4   4   2   4   2   4   3   4
## 999   3  3  2  2  3  2  2  3  2   2   2   2   3   3   2   3   4   4   2   2
## 1000  4  2  2  2  2  2  2  4  4   3   2   4   4   4   4   2   4   4   4   4
## 1001  4  4  4  4  2  2  4  3  5   5   5   5   4   5   5   4   4   5   5   4
## 1002  3  3  3  3  3  4  4  3  3   3   4   4   4   4   4   4   4   4   3   3
## 1003  4  4  4  4  3  4  4  4  3   4   4   4   4   4   4   4   4   4   4   4
## 1004  4  3  3  3  3  3  3  3  3   3   3   3   4   4   4   4   4   4   3   3
## 1005  4  3  4  3  2  1  4  4  4   3   4   4   4   4   2   4   2   4   4   3
## 1006  3  3  2  2  4  2  2  2  2   2   4   2   4   4   4   2   3   4   4   2
## 1007  4  4  4  4  4  4  4  2  4   4   4   4   4   4   5   5   5   5   5   5
## 1008  4  2  1  3  2  1  2  2  2   2   4   4   4   3   3   4   3   4   4   3
## 1009  4  4  2  2  3  4  1  1  4   4   3   2   3   4   2   4   4   4   2   3
## 1010  4  4  3  3  2  2  2  4  4   3   3   3   2   4   4   4   4   4   4   4
## 1011  2  2  2  4  2  2  2  2  3   3   1   2   2   1   1   1   1   1   3   3
## 1012  3  4  4  3  3  2  3  3  3   2   3   3   3   2   3   2   2   3   3   4
## 1013  4  3  3  4  4  4  3  4  3   3   4   4   2   4   2   2   3   3   3   2
## 1014  2  1  1  1  3  1  1  1  2   3   1   3   2   1   5   5   2   1   2   3
## 1015  3  4  4  3  3  2  3  3  3   2   3   3   3   2   3   2   2   3   3   4
## 1016  4  3  3  3  3  4  4  4  2   3   4   4   4   4   3   4   5   4   4   4
## 1017  4  4  3  3  4  4  3  2  4   2   3   3   2   3   3   4   4   4   3   4
## 1018  4  4  4  4  4  5  4  4  4   4   4   4   3   4   3   4   4   4   4   3
## 1019  4  4  4  4  4  4  4  2  4   4   2   4   4   4   4   4   4   4   2   4
## 1020  4  4  4  4  4  4  4  2  4   3   4   4   4   4   3   2   2   4   2   4
## 1021  4  4  2  2  2  2  4  4  2   4   4   4   4   4   2   4   2   4   3   2
## 1022  4  4  3  4  4  4  2  4  3   2   3   2   3   2   2   5   2   5   2   3
## 1023  2  2  2  2  2  2  4  2  2   2   3   2   2   2   2   4   2   4   2   2
## 1024  2  4  2  2  2  3  1  2  2   2   4   2   4   4   4   4   4   5   4   4
## 1025  5  3  2  3  3  2  4  5  1   3   4   3   3   4   4   5   3   4   4   4
## 1026  4  4  3  2  2  1  1  3  4   3   1   3   2   4   1   5   2   3   3   3
## 1027  3  4  2  4  4  2  2  2  2   2   4   4   4   4   4   4   4   4   4   3
## 1028  3  3  2  2  2  2  2  2  4   3   3   4   4   4   3   4   4   4   4   4
## 1029  4  4  4  3  4  4  4  4  4   3   3   4   4   3   4   4   4   4   4   4
## 1030  2  4  1  1  1  2  2  4  2   1   2   2   2   4   3   4   5   4   3   4
## 1031  4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 1032  4  4  4  4  4  4  4  4  4   2   2   2   2   2   2   4   2   4   2   2
## 1033  2  2  1  2  1  2  2  1  1   1   4   3   2   2   2   4   4   4   4   4
## 1034  4  4  4  3  4  2  2  2  2   4   4   2   4   4   3   4   4   4   3   4
## 1035  4  2  4  2  2  3  2  4  2   3   4   2   4   4   4   4   4   4   4   2
## 1036  5  5  5  5  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 1037  4  4  4  3  4  4  4  4  4   3   4   4   4   4   4   4   4   4   4   4
## 1038  4  4  3  4  4  2  2  5  2   4   5   4   4   4   4   5   4   3   4   4
## 1039  5  4  4  4  4  4  4  4  5   4   5   5   5   5   5   5   5   5   5   5
## 1040  4  4  4  4  4  5  5  5  5   5   5   4   5   4   5   5   5   5   5   4
## 1041  5  4  4  4  4  3  2  2  2   3   3   4   4   5   5   3   3   5   3   3
## 1042  3  4  2  2  4  4  2  2  2   2   4   4   2   4   2   4   4   4   4   4
## 1043  4  4  4  4  4  4  2  2  4   3   3   2   4   4   2   4   4   4   4   3
## 1044  4  3  4  4  4  4  4  4  4   3   4   4   4   4   4   4   4   4   3   2
## 1045  4  4  2  2  4  4  4  4  2   1   4   5   4   4   4   5   5   5   3   4
## 1046  4  4  4  4  3  3  4  2  2   3   3   4   4   4   4   4   3   4   3   4
## 1047  4  4  2  2  4  4  4  4  4   2   3   5   5   5   5   4   5   4   2   5
## 1048  4  2  4  3  3  4  4  4  4   4   4   3   4   4   4   4   5   5   4   4
## 1049  4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 1050  4  4  2  2  2  2  2  2  2   2   2   3   4   3   2   4   4   4   2   4
## 1051  3  3  3  3  3  3  3  3  2   3   3   3   3   3   3   3   3   3   3   2
## 1052  4  4  3  4  4  1  4  2  4   3   2   3   2   4   3   4   4   4   3   2
## 1053  3  3  2  2  2  4  4  4  4   4   3   3   2   4   2   4   4   4   4   4
## 1054  4  5  2  2  4  2  2  1  1   2   4   4   4   4   4   4   2   2   3   2
## 1055  2  3  2  2  1  4  4  1  1   1   4   1   2   4   4   4   2   4   2   4
## 1056  4  3  2  2  3  2  2  2  3   4   2   4   4   4   4   4   4   5   3   3
## 1057  4  2  2  2  1  1  3  2  3   2   3   4   4   2   2   3   4   3   3   2
## 1058  4  4  2  3  2  2  2  2  1   2   4   2   2   3   2   4   4   4   4   4
## 1059  5  5  5  5  5  5  5  5  5   5   4   4   4   3   3   3   3   2   3   2
## 1060  3  2  2  2  2  1  4  4  2   3   3   3   3   3   3   4   4   4   4   2
## 1061  4  4  4  3  3  2  4  4  4   3   4   4   4   4   4   4   4   5   4   3
## 1062  3  3  2  2  2  4  1  1  2   3   1   5   5   2   4   5   5   4   4   4
## 1063  3  4  2  2  3  4  2  2  4   4   3   4   4   4   3   4   4   5   4   3
## 1064  2  4  4  4  2  2  2  2  4   2   3   2   2   1   2   4   2   4   2   2
## 1065  2  4  2  2  2  2  2  4  4   2   2   3   4   2   2   4   4   4   3   2
## 1066  2  2  2  2  2  4  4  2  4   4   4   4   5   4   4   4   5   4   5   5
## 1067  4  2  1  3  2  2  2  4  3   2   3   3   2   3   3   4   5   5   3   2
## 1068  3  4  3  2  4  1  1  2  3   5   4   3   4   4   4   3   5   3   3   4
## 1069  4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 1070  2  4  1  1  3  2  1  1  1   1   1   5   5   4   3   4   4   5   1   4
## 1071  4  4  4  4  4  4  4  4  4   4   4   4   4   5   4   4   5   5   5   3
## 1072  2  2  2  2  2  2  2  2  2   2   2   4   4   4   4   4   5   5   5   5
## 1073  4  4  4  4  4  2  4  3  2   2   3   4   4   4   4   4   4   4   4   3
## 1074  2  3  3  4  3  1  4  4  2   3   5   4   3   4   4   5   5   5   5   5
## 1075  4  4  4  4  4  4  2  2  2   3   3   2   2   4   3   2   2   2   2   2
## 1076  4  3  2  3  2  2  2  4  4   2   3   3   2   4   4   3   1   3   3   2
## 1077  4  3  4  3  3  2  3  4  3   2   3   4   3   2   2   2   4   4   3   2
## 1078  2  4  2  2  2  2  2  4  3   2   4   4   4   4   4   4   3   4   4   3
## 1079  4  3  2  2  2  1  2  2  3   1   3   4   3   4   4   4   2   3   3   3
## 1080  3  3  3  3  4  2  2  2  4   2   4   4   4   4   4   4   4   4   4   4
## 1081  2  2  4  3  2  2  2  4  2   2   3   2   3   2   3   3   2   4   3   2
## 1082  4  3  2  2  2  3  2  4  4   3   3   3   3   3   3   4   4   4   3   3
## 1083  5  5  5  3  2  5  5  5  5   5   4   4   4   4   4   4   4   3   4   3
## 1084  4  4  3  3  4  2  4  4  2   2   4   4   4   4   4   4   4   4   3   3
## 1085  2  3  3  2  2  3  3  1  2   2   3   4   4   4   4   4   4   4   4   4
## 1086  5  5  4  4  5  4  5  3  5   5   5   5   5   5   5   4   3   5   5   4
## 1087  5  4  2  2  3  1  4  2  4   3   5   5   5   5   5   2   2   5   5   3
## 1088  2  3  1  2  2  4  3  1  1   3   1   2   1   2   3   4   5   5   3   2
## 1089  5  2  2  1  2  1  2  2  1   3   4   4   4   4   4   4   2   4   3   4
## 1090  4  4  3  2  4  4  4  2  1   2   5   5   5   5   5   5   5   4   5   3
## 1091  3  3  2  2  2  1  3  2  1   2   3   2   2   3   2   4   4   4   4   4
## 1092  4  4  3  4  3  2  2  3  2   2   4   5   4   4   4   4   5   4   4   3
## 1093  4  3  3  3  3  2  4  3  3   4   4   3   4   4   3   4   4   4   4   3
## 1094  4  3  3  3  3  2  4  4  2   2   4   4   4   4   4   4   4   4   3   3
## 1095  4  4  2  3  4  5  4  1  3   3   2   3   3   3   3   2   2   2   1   1
## 1096  3  3  2  2  2  3  2  2  3   3   2   3   3   3   4   5   5   3   3   3
## 1097  3  4  2  2  1  2  2  4  3   3   2   2   2   2   3   5   4   5   4   3
## 1098  4  4  3  3  3  2  4  2  2   2   2   2   2   3   2   4   5   5   2   2
## 1099  4  4  4  4  4  2  4  3  2   4   4   4   4   3   5   4   2   4   4   2
## 1100  4  4  4  3  3  3  2  4  2   4   5   5   5   5   5   5   5   4   5   4
## 1101  4  2  4  3  3  4  5  5  5   5   5   5   5   5   5   5   2   3   1   4
## 1102  4  3  2  2  4  2  2  4  2   3   4   4   4   4   4   3   2   4   4   2
## 1103  4  3  4  4  4  4  4  4  4   4   4   4   4   4   4   5   5   5   4   5
## 1104  5  2  2  2  5  3  4  4  2   2   4   4   4   4   4   4   2   5   4   3
## 1105  4  5  4  3  4  5  3  4  5   4   5   3   4   5   4   4   3   5   4   3
## 1106  4  4  2  4  5  5  5  4  4   3   4   4   4   5   4   4   4   4   4   3
## 1107  2  2  2  2  2  3  3  2  1   1   3   4   4   4   3   5   5   4   2   3
## 1108  4  3  4  4  3  4  3  4  4   3   4   4   3   3   3   4   3   4   3   3
## 1109  5  4  3  4  4  2  5  4  3   2   4   5   4   3   4   5   5   5   4   5
## 1110  4  4  2  4  4  4  2  4  3   3   4   5   4   4   3   4   4   4   3   3
## 1111  4  4  2  4  4  3  2  2  2   2   2   3   3   2   2   2   3   3   3   2
## 1112  4  3  3  4  3  2  4  3  4   3   4   5   4   5   5   4   4   5   3   4
## 1113  3  3  3  3  4  4  4  4  4   3   3   3   3   3   3   3   4   4   4   4
## 1114  4  4  4  4  4  3  3  3  4   4   5   5   5   4   4   4   4   4   4   4
## 1115  2  2  2  2  2  2  2  2  2   2   3   3   3   3   3   3   3   3   2   2
## 1116  2  2  2  2  2  3  3  3  3   3   2   2   2   2   3   2   2   2   3   3
## 1117  4  4  4  4  4  4  4  4  4   4   5   5   5   5   5   4   4   4   4   4
## 1118  3  3  3  3  3  3  3  3  3   3   3   3   3   3   3   3   3   3   3   3
## 1119  4  4  4  4  4  4  4  4  4   4   5   5   5   5   5   4   4   4   3   3
## 1120  5  5  5  5  5  4  4  4  4   4   5   5   5   5   5   4   4   4   4   4
## 1121  1  1  2  2  2  3  3  3  3   3   4   3   3   3   3   2   2   2   2   2
## 1122  1  1  1  1  1  2  2  2  2   2   3   3   3   3   3   3   3   3   3   3
## 1123  4  3  2  4  5  2  4  2  2   4   4   4   5   5   5   4   5   5   5   4
## 1124  5  5  4  4  4  3  2  4  4   3   5   5   5   5   5   5   5   5   4   5
## 1125  4  4  4  3  4  3  5  4  5   3   5   5   5   5   5   4   5   5   4   3
## 1126  3  2  1  4  2  2  4  4  2   2   4   4   4   4   4   5   5   2   2   4
## 1127  3  2  2  2  2  2  2  2  3   4   4   4   3   4   4   4   3   2   3   4
## 1128  3  3  2  3  4  2  2  2  1   1   3   2   3   2   2   2   4   2   3   2
## 1129  4  4  4  4  4  3  4  3  4   4   4   4   4   4   3   4   4   4   4   3
## 1130  4  4  4  4  3  3  4  4  4   4   5   4   4   5   5   4   3   3   3   3
## 1131  3  4  2  2  3  2  2  4  2   3   3   1   2   3   3   4   4   4   3   4
## 1132  4  4  4  3  3  2  2  2  4   3   4   4   4   3   3   3   4   2   3   2
## 1133  3  3  2  2  4  1  2  2  2   2   4   4   4   4   4   4   4   4   4   4
## 1134  2  3  2  2  2  2  2  2  2   2   2   2   2   1   1   3   2   3   2   3
## 1135  2  3  2  2  2  2  4  2  4   2   3   4   3   3   4   4   2   4   2   4
## 1136  2  2  2  2  2  2  2  2  2   2   2   2   2   4   4   5   4   5   4   5
## 1137  5  4  4  4  2  2  2  4  2   2   2   2   2   2   2   4   4   5   4   3
## 1138  4  4  2  2  2  2  2  1  1   3   3   2   2   3   3   4   3   4   4   4
## 1139  2  1  2  3  1  1  3  2  1   1   3   3   3   4   3   3   5   3   4   3
## 1140  3  4  3  2  3  2  3  2  4   3   2   3   3   3   4   3   4   4   4   3
## 1141  3  4  3  3  4  2  4  4  3   2   1   1   2   1   2   2   5   3   2   1
## 1142  4  4  3  3  4  3  3  2  2   2   3   4   4   4   3   3   2   4   3   3
## 1143  3  5  2  3  2  2  5  3  4   4   3   3   4   4   5   4   5   5   3   5
## 1144  4  4  4  4  4  4  4  4  4   3   4   4   4   4   4   4   4   4   4   4
## 1145  2  2  3  4  4  2  2  3  4   3   4   4   5   4   3   2   3   4   3   5
## 1146  3  3  2  2  4  4  2  2  2   2   2   3   2   3   2   4   4   4   3   4
## 1147  4  3  2  3  2  1  2  2  4   2   4   4   4   4   3   4   4   4   3   3
## 1148  3  4  3  3  4  2  3  3  2   3   4   3   4   4   3   4   4   4   4   4
## 1149  4  3  3  3  2  2  2  4  2   2   4   4   4   4   4   3   2   4   4   3
## 1150  4  2  4  3  5  1  2  4  2   3   3   4   4   5   3   3   4   4   3   4
## 1151  4  4  5  3  2  1  5  4  4   2   5   5   4   4   5   4   5   5   4   3
## 1152  3  2  2  3  4  4  2  1  1   2   3   4   3   4   4   3   5   4   2   2
## 1153  3  4  3  4  4  3  2  2  2   2   3   3   3   3   4   4   4   3   2   4
## 1154  4  4  4  4  5  5  5  5  5   5   5   5   5   5   5   5   5   4   5   5
## 1155  2  3  2  2  4  2  4  2  4   4   4   4   4   4   4   4   4   3   3   3
## 1156  4  4  4  4  4  3  3  3  3   4   4   4   4   4   3   2   2   2   3   3
## 1157  3  3  3  3  3  4  4  3  3   4   4   4   4   3   3   4   4   4   3   3
## 1158  2  2  2  2  2  3  2  2  2   2   3   3   3   4   4   2   2   2   3   3
## 1159  2  2  2  2  2  3  3  3  3   3   4   4   4   3   3   3   3   3   3   3
## 1160  4  4  4  4  4  3  4  4  3   3   4   4   4   4   4   4   3   4   4   4
## 1161  3  3  3  3  3  3  3  3  3   3   4   4   4   4   4   3   3   4   4   4
## 1162  3  3  3  3  3  3  3  3  3   3   4   4   4   4   4   3   3   2   2   2
## 1163  3  3  3  3  3  3  3  3  3   3   4   4   4   4   3   3   3   4   2   2
## 1164  3  3  3  3  3  3  3  3  3   3   4   4   4   4   3   3   3   2   2   2
## 1165  4  4  4  4  4  3  4  4  3   3   4   4   4   4   4   4   2   4   4   4
## 1166  5  5  5  4  4  4  4  4  3   3   5   5   5   5   5   5   1   5   4   4
## 1167  5  5  5  4  4  4  4  4  3   3   5   5   5   5   5   5   1   4   4   4
## 1168  5  5  5  4  4  4  4  4  3   3   5   5   5   5   5   5   2   4   4   4
## 1169  5  4  4  4  4  4  3  3  4   4   5   4   4   4   3   4   4   3   2   1
## 1170  4  4  3  4  3  3  5  5  3   3   4   4   4   4   5   5   5   5   4   4
## 1171  4  3  4  4  2  4  4  4  3   3   4   4   4   4   4   4   4   4   3   2
## 1172  3  3  3  2  2  2  2  2  2   2   2   3   2   2   2   2   2   2   2   2
## 1173  4  3  2  3  3  1  2  2  2   2   3   2   3   3   3   2   4   4   3   2
## 1174  4  3  3  2  2  2  3  2  2   1   1   2   2   2   3   3   3   3   1   2
## 1175  4  4  2  2  2  2  2  2  4   2   2   2   4   2   2   2   2   2   2   2
## 1176  2  3  2  2  4  2  2  2  4   2   4   4   4   4   4   4   4   4   4   4
## 1177  4  4  4  4  1  1  3  2  4   3   4   2   4   5   4   4   2   4   5   5
## 1178  3  4  2  4  4  2  2  2  1   1   4   4   4   4   4   3   3   2   4   3
## 1179  3  3  3  3  2  2  2  2  4   3   4   4   4   4   4   4   4   4   3   2
## 1180  4  3  2  2  2  2  2  1  1   2   3   3   3   3   3   3   4   4   3   2
## 1181  5  1  4  4  5  4  2  5  1   1   5   5   5   5   4   2   2   5   5   1
## 1182  2  3  2  2  2  3  4  2  2   4   4   4   4   4   2   4   2   2   2   3
## 1183  5  4  4  4  3  5  4  4  5   4   4   4   5   4   5   4   5   5   4   4
## 1184  3  4  1  2  1  1  1  2  2   2   2   4   3   4   2   4   2   2   2   2
## 1185  5  4  4  4  4  4  3  5  2   3   4   4   4   4   4   5   5   4   5   4
## 1186  2  2  2  2  2  3  2  2  2   3   2   1   2   2   2   3   2   3   2   2
## 1187  3  3  3  3  3  2  3  3  3   2   2   2   2   1   1   2   2   2   3   2
## 1188  4  4  4  4  4  4  3  4  4   4   4   5   4   5   5   5   5   5   3   2
## 1189  4  4  4  4  4  3  3  4  4   3   4   4   4   4   4   4   4   4   4   4
## 1190  4  3  3  4  3  2  4  4  4   4   4   4   3   3   3   3   3   3   3   3
## 1191  4  3  3  4  3  2  4  4  4   4   4   4   3   3   3   3   3   3   3   3
## 1192  2  2  2  2  2  2  3  2  2   2   3   4   2   2   2   3   3   3   3   3
## 1193  4  2  2  2  2  2  4  2  4   2   4   4   4   4   4   4   4   4   4   4
## 1194  4  4  4  3  3  3  4  4  4   3   5   5   5   5   5   4   2   4   4   4
## 1195  5  4  4  4  4  4  3  4  4   3   5   5   5   5   5   5   1   4   5   5
## 1196  2  2  2  2  2  2  2  2  2   2   3   3   2   2   2   2   2   2   2   2
## 1197  3  3  3  3  3  3  4  4  4   3   4   4   4   4   4   4   2   2   2   2
## 1198  4  4  4  4  4  4  4  4  4   4   5   5   4   5   5   3   2   3   3   3
## 1199  3  3  3  3  3  3  3  3  3   3   2   2   3   3   3   4   2   3   3   3
## 1200  4  3  3  3  3  2  4  4  4   3   4   5   5   5   5   5   2   5   5   4
## 1201  4  4  2  2  3  1  3  3  2   2   4   4   3   4   3   3   4   3   4   2
## 1202  4  3  2  3  2  2  2  2  2   2   1   1   2   3   3   3   4   4   3   3
## 1203  4  3  4  2  2  2  4  3  3   4   4   5   4   5   5   5   4   5   4   3
## 1204  2  4  3  2  5  2  2  1  1   5   3   3   5   4   4   4   2   3   4   3
## 1205  4  3  4  3  3  2  2  4  4   3   4   4   3   4   4   3   4   4   3   2
## 1206  4  4  2  2  1  1  2  3  4   2   3   2   2   3   3   2   5   5   2   2
## 1207  3  3  3  2  3  4  2  1  2   3   4   4   4   4   3   5   4   5   5   4
## 1208  4  3  3  3  3  2  4  3  3   3   5   5   4   4   4   4   3   4   3   3
## 1209  2  3  4  3  2  2  4  5  4   3   4   4   5   5   5   4   2   5   4   5
## 1210  4  3  3  3  4  2  5  3  3   2   4   4   4   4   4   3   2   2   5   5
## 1211  4  3  3  3  3  4  3  2  4   3   4   5   4   4   4   3   3   4   2   4
## 1212  3  2  2  2  2  1  2  4  3   3   4   4   4   4   4   4   4   5   3   2
## 1213  4  2  2  2  2  2  2  1  1   2   3   4   4   4   4   4   3   4   3   3
## 1214  4  3  2  3  2  3  2  2  2   2   3   4   4   4   3   4   5   4   3   3
## 1215  4  4  3  2  2  1  3  3  2   2   4   4   2   4   3   4   4   4   4   3
## 1216  4  2  4  4  4  4  4  4  4   2   4   4   4   4   4   3   4   4   4   2
## 1217  4  4  4  4  4  4  5  5  5   5   5   5   5   5   5   5   5   5   5   5
## 1218  4  3  2  3  4  3  3  3  2   3   5   3   5   5   3   2   2   2   3   4
## 1219  4  3  2  4  2  2  2  2  2   2   3   4   4   4   3   4   4   4   2   2
## 1220  5  5  4  3  3  2  2  2  2   4   4   3   4   4   3   3   3   3   4   3
## 1221  4  5  4  4  5  4  3  2  2   4   4   5   5   4   4   5   4   5   4   5
## 1222  5  5  5  4  4  4  3  3  3   3   4   4   4   4   3   3   3   4   4   1
## 1223  3  3  2  3  2  2  3  1  1   2   3   1   1   2   2   4   5   5   3   3
## 1224  4  4  3  3  4  2  2  2  2   2   4   4   5   5   4   4   5   5   5   4
## 1225  4  4  2  2  2  2  3  4  2   4   3   4   2   3   3   2   4   4   3   2
## 1226  4  4  3  2  4  1  4  2  1   3   4   4   4   4   4   5   4   4   4   4
## 1227  3  3  1  2  1  3  2  2  1   3   3   1   2   4   2   4   5   5   2   2
## 1228  5  4  5  4  4  2  4  4  4   3   4   3   3   4   3   4   5   3   2   3
## 1229  2  4  3  3  2  2  1  2  3   3   2   3   2   3   3   3   3   2   3   2
## 1230  5  2  2  3  1  1  1  2  1   1   2   5   4   3   3   4   5   4   2   3
## 1231  2  3  3  3  3  3  3  3  3   3   2   3   4   3   3   3   3   3   3   3
## 1232  3  5  5  5  5  5  3  3  3   4   5   5   5   5   5   5   5   5   5   2
## 1233  3  3  3  3  4  3  4  4  4   3   3   3   3   3   3   3   3   3   3   2
## 1234  3  4  4  4  3  4  4  4  4   3   3   3   3   3   3   3   3   3   3   3
## 1235  4  2  3  2  2  2  2  2  4   1   4   3   3   3   3   4   2   5   4   3
## 1236  3  3  3  3  3  3  4  4  4   3   4   4   4   4   4   4   2   2   2   2
## 1237  4  4  4  4  4  3  3  4  4   3   4   4   4   4   4   4   4   4   4   4
## 1238  4  4  4  4  4  4  4  4  4   4   5   5   5   4   5   3   2   3   3   3
## 1239  4  4  4  3  3  3  4  4  4   3   5   5   5   5   5   4   2   4   4   4
## 1240  2  2  2  2  2  2  3  2  2   2   3   4   2   2   2   3   3   3   3   3
## 1241  3  2  2  2  3  1  3  2  4   3   4   4   4   4   5   4   4   5   4   3
## 1242  4  3  3  4  3  2  4  4  4   4   4   4   3   3   3   3   3   3   3   3
## 1243  2  2  2  2  2  2  2  2  2   2   3   3   2   2   2   2   2   2   2   2
## 1244  3  3  3  3  3  3  3  3  3   3   2   2   3   3   3   4   2   3   3   3
## 1245  5  4  4  4  4  4  3  4  4   3   5   5   5   5   5   5   1   4   5   5
## 1246  4  2  2  2  3  2  4  4  4   2   4   4   4   4   4   5   4   4   2   2
## 1247  2  3  2  2  2  4  2  2  4   2   4   2   4   4   4   4   3   3   4   4
## 1248  4  4  4  4  4  4  2  2  2   2   4   4   4   4   4   4   4   2   2   3
## 1249  3  3  2  2  3  2  2  4  4   3   3   4   4   3   3   4   4   4   4   2
## 1250  3  3  2  2  2  3  4  3  2   3   4   4   4   4   4   4   4   4   3   3
## 1251  2  3  2  2  2  2  2  2  2   3   2   3   3   2   2   4   4   3   3   2
## 1252  4  4  2  2  2  2  2  2  2   2   4   4   4   4   4   2   2   4   5   3
## 1253  4  3  3  3  2  2  2  2  2   2   4   4   3   4   3   5   3   5   3   4
## 1254  4  4  4  4  4  3  5  4  4   3   4   4   4   5   4   4   5   4   4   4
## 1255  4  3  2  2  2  1  4  2  2   3   3   2   3   4   3   4   4   4   3   3
## 1256  4  4  3  2  3  2  4  4  5   3   4   4   4   5   4   5   3   5   4   3
## 1257  4  4  2  2  2  2  2  2  1   1   1   1   2   4   4   4   4   4   4   4
## 1258  3  2  3  3  4  2  2  1  2   3   3   2   2   4   3   4   4   4   3   4
## 1259  3  4  2  3  4  4  4  2  4   3   4   5   4   4   5   4   2   5   3   4
## 1260  3  4  3  3  4  3  2  2  1   1   3   3   3   3   3   4   4   4   4   4
## 1261  2  2  2  2  3  4  1  1  5   2   3   4   4   4   4   5   4   4   4   4
## 1262  4  3  2  2  4  2  4  2  4   2   2   2   2   3   2   4   4   5   2   2
## 1263  1  3  3  4  2  3  4  4  4   4   4   4   4   4   4   4   4   4   4   3
## 1264  4  2  2  2  2  2  2  4  2   3   4   2   3   4   4   3   4   4   2   3
## 1265  4  4  3  4  4  3  4  4  4   3   4   4   4   3   4   3   4   4   4   4
## 1266  2  3  4  2  2  2  2  4  2   2   4   2   4   4   2   4   4   4   4   4
## 1267  5  5  5  5  5  4  5  5  5   5   5   5   5   5   5   5   4   4   4   4
## 1268  4  4  4  4  4  4  5  4  4   3   4   4   4   4   4   5   4   4   4   3
## 1269  3  3  3  3  3  4  3  3  4   3   4   3   3   3   3   3   3   3   3   2
## 1270  4  4  4  4  4  5  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 1271  2  2  2  2  2  2  2  2  3   3   3   3   3   3   3   3   3   3   3   3
## 1272  4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 1273  4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 1274  4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 1275  5  5  5  5  5  5  4  4  4   4   5   5   5   5   5   4   4   4   4   4
## 1276  4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 1277  3  3  3  3  3  3  3  3  3   3   3   3   3   3   4   3   4   4   4   4
## 1278  3  3  3  3  3  3  4  4  4   4   4   4   4   3   3   3   3   2   2   2
## 1279  4  4  4  4  4  4  4  4  4   4   5   5   5   5   5   4   4   4   4   4
## 1280  3  3  3  3  3  3  3  3  3   3   3   3   3   3   3   3   3   3   3   3
## 1281  4  2  4  4  3  2  4  5  4   3   4   4   4   5   4   5   5   4   4   4
## 1282  3  4  2  2  4  2  2  2  2   2   3   4   4   4   4   4   2   4   2   3
## 1283  4  4  2  2  2  2  1  1  1   2   3   2   2   3   3   4   4   4   3   3
## 1284  3  4  3  2  4  3  4  4  4   4   4   4   4   5   4   5   5   5   3   3
## 1285  4  2  2  2  2  1  2  4  3   5   4   4   5   5   5   4   4   5   4   4
## 1286  2  5  1  4  4  4  2  4  2   3   4   4   4   4   4   5   2   5   3   3
## 1287  4  1  4  4  4  4  2  2  4   3   5   4   5   5   5   4   4   5   3   5
## 1288  3  2  2  2  2  2  2  1  2   3   2   2   2   2   3   4   4   4   3   4
## 1289  4  4  4  2  3  3  2  4  4   4   5   4   3   5   4   4   3   4   4   3
## 1290  5  5  4  4  4  4  5  4  4   3   4   4   4   4   4   4   5   4   3   3
## 1291  4  4  4  3  4  4  3  4  4   4   4   4   4   4   4   4   4   4   4   4
## 1292  4  3  3  2  2  3  1  2  4   3   4   4   4   4   4   4   5   5   2   3
## 1293  4  4  4  4  4  4  4  4  4   3   4   4   4   4   4   4   3   3   3   3
## 1294  4  4  4  2  2  2  2  4  2   2   4   4   4   4   4   4   4   4   3   3
## 1295  4  3  4  4  2  2  2  4  2   2   5   5   4   5   5   4   3   4   4   4
## 1296  4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 1297  2  2  2  2  2  2  2  2  2   2   3   1   2   2   1   3   3   4   2   1
## 1298  4  2  3  3  2  2  4  4  4   3   4   3   4   5   4   4   4   5   4   4
## 1299  4  4  4  4  4  2  3  3  4   3   5   5   5   5   5   5   4   4   4   2
## 1300  4  3  3  3  4  2  2  2  3   3   3   3   3   3   4   4   4   4   4   4
## 1301  3  4  3  2  4  4  2  4  4   3   4   4   4   4   4   4   4   2   4   4
## 1302  4  4  2  4  2  2  2  2  2   2   3   3   2   3   4   4   4   4   4   4
## 1303  2  2  2  2  2  2  4  3  2   2   3   4   4   4   4   3   3   4   4   5
## 1304  4  3  4  4  4  4  4  4  4   3   4   4   4   4   4   3   4   4   3   3
## 1305  5  5  4  3  4  4  3  4  5   5   4   4   4   4   4   4   4   4   4   4
## 1306  4  4  4  4  4  1  4  2  1   3   4   4   4   4   4   3   2   4   2   2
## 1307  3  4  2  2  4  2  2  2  1   4   4   4   4   4   4   4   3   5   3   3
## 1308  4  4  4  3  4  3  3  4  4   3   4   4   4   4   4   4   3   4   4   3
## 1309  2  4  2  2  2  2  2  2  4   2   3   2   4   4   4   4   4   4   3   2
## 1310  3  2  2  3  2  1  3  3  2   2   1   3   3   3   3   4   4   4   2   2
## 1311  3  3  2  2  2  2  3  2  4   3   3   2   2   2   2   4   4   4   3   3
## 1312  3  2  2  3  2  4  2  2  2   2   4   4   5   5   5   4   3   4   3   3
## 1313  3  4  2  2  2  2  2  2  2   2   2   2   2   2   2   4   4   4   4   4
## 1314  5  3  4  4  4  2  4  4  4   2   5   5   5   5   5   4   4   5   3   4
## 1315  4  4  4  3  3  4  4  4  4   4   4   2   3   4   4   4   3   4   2   2
## 1316  4  4  3  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 1317  5  3  2  2  2  4  3  2  2   1   2   2   3   2   2   4   4   4   3   4
## 1318  4  4  4  4  4  4  4  4  2   3   4   4   4   4   4   3   4   4   4   5
## 1319  4  4  4  4  4  4  4  4  5   4   4   4   4   4   4   4   4   4   4   3
## 1320  4  4  4  4  3  2  2  3  2   2   4   4   4   4   4   4   4   4   4   4
## 1321  4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 1322  2  2  2  3  3  2  3  2  2   4   3   4   4   3   2   2   3   2   3   2
## 1323  4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   3   3   3   4   4
## 1324  5  5  4  5  4  4  5  4  4   4   4   4   4   4   4   4   4   4   4   4
## 1325  3  3  3  3  2  2  3  2  3   3   2   2   2   2   2   2   3   2   2   2
## 1326  4  4  4  4  4  4  4  3  4   3   3   3   4   4   4   4   4   4   4   4
## 1327  5  5  5  4  4  4  4  4  4   4   5   5   5   5   5   5   5   5   5   4
## 1328  3  3  3  3  3  3  3  3  4   4   3   4   4   4   3   4   4   3   4   4
## 1329  2  2  2  2  2  2  3  3  2   2   4   4   4   4   3   3   3   3   3   3
## 1330  4  3  3  4  3  2  4  4  4   4   4   4   3   3   3   3   3   3   3   3
## 1331  5  4  4  4  4  4  3  4  4   3   5   5   5   5   5   5   4   1   5   5
## 1332  4  3  3  4  2  2  4  2  2   4   4   4   4   4   4   4   4   3   3   4
## 1333  4  4  4  3  3  3  4  4  4   4   5   5   5   5   5   4   2   4   4   4
## 1334  2  2  2  2  2  2  3  2  2   2   3   4   2   2   2   3   3   3   3   3
## 1335  2  3  2  2  2  2  2  2  2   2   2   2   3   3   3   4   4   4   3   4
## 1336  4  3  3  3  3  3  3  3  3   2   3   4   3   3   3   2   3   3   3   3
## 1337  3  3  3  3  3  3  4  4  4   3   4   4   4   4   4   4   3   3   3   3
## 1338  2  2  2  2  2  3  2  2  2   2   2   2   2   2   2   2   2   2   2   2
## 1339  4  4  4  4  4  4  4  4  4   4   5   5   5   4   5   3   2   3   3   3
## 1340  4  4  4  4  4  3  3  4  4   3   4   4   4   4   4   4   4   4   4   4
## 1341  4  3  2  2  2  1  2  4  2   2   2   4   4   4   3   4   2   4   3   2
## 1342  4  4  3  4  4  2  2  2  2   3   3   4   4   3   3   3   3   3   3   3
## 1343  4  2  2  2  1  2  2  4  2   2   2   2   2   4   2   2   4   2   2   4
## 1344  4  4  4  3  3  3  3  2  4   4   4   4   4   4   4   4   4   4   4   4
## 1345  2  4  4  2  2  2  4  2  4   2   4   4   4   4   4   4   3   4   3   4
## 1346  4  2  2  2  2  2  2  4  2   2   4   4   4   4   3   4   4   4   2   2
## 1347  2  3  2  3  2  1  3  2  1   1   3   2   2   2   3   3   4   4   2   1
## 1348  3  3  2  2  2  2  4  4  2   2   4   4   4   4   4   4   2   4   4   2
## 1349  4  3  2  3  1  2  2  2  4   3   4   3   4   4   4   4   4   5   3   4
## 1350  3  2  2  3  1  2  2  2  1   3   2   2   4   1   3   4   5   4   3   2
## 1351  2  2  2  2  2  2  2  2  2   2   2   4   4   4   4   5   4   5   4   4
## 1352  4  3  3  3  4  4  4  3  4   3   4   4   4   4   4   3   4   3   4   4
## 1353  3  2  1  3  1  1  1  1  5   2   4   4   4   4   4   4   4   4   4   4
## 1354  2  4  2  2  3  3  2  1  2   2   3   2   4   3   4   4   3   4   3   4
## 1355  4  4  3  3  4  3  4  2  4   3   3   3   4   4   3   3   4   4   3   3
## 1356  4  4  4  4  2  2  4  2  2   4   4   2   4   4   4   5   4   4   4   4
## 1357  2  3  2  2  3  2  2  2  4   3   4   3   4   3   4   4   4   4   4   4
## 1358  4  2  5  4  4  4  2  2  4   4   4   4   4   4   4   5   4   4   4   5
## 1359  4  4  2  4  4  4  4  4  4   3   3   4   4   4   4   5   4   5   4   5
## 1360  4  2  4  4  4  2  2  2  2   3   3   4   4   4   3   4   4   4   3   3
## 1361  3  3  2  3  3  3  3  2  3   4   4   4   4   4   5   5   4   4   4   4
## 1362  4  4  3  4  4  4  2  3  2   3   3   4   3   4   3   4   4   4   3   4
## 1363  2  2  1  1  1  1  2  2  1   2   4   2   3   4   3   4   2   4   3   2
## 1364  4  3  2  2  3  2  4  4  2   3   4   4   4   5   4   4   5   5   4   4
## 1365  4  4  2  3  4  2  2  2  2   3   5   4   4   4   4   4   5   5   3   2
## 1366  4  4  2  3  3  2  1  4  2   3   5   5   4   5   4   4   4   4   3   3
## 1367  4  3  2  2  2  1  3  1  4   2   1   4   4   2   3   2   3   3   3   3
## 1368  4  3  3  2  2  2  2  2  2   2   2   2   2   4   3   4   3   4   2   3
## 1369  4  3  3  3  3  3  3  3  3   3   3   3   4   4   4   4   4   4   3   3
## 1370  4  3  2  2  2  1  4  4  4   2   4   3   4   2   2   4   4   4   4   4
## 1371  2  2  2  4  2  2  2  2  2   3   2   2   2   4   2   4   3   4   2   3
## 1372  2  2  2  2  2  2  4  2  2   2   2   2   3   2   2   4   3   4   4   4
## 1373  3  2  1  2  2  4  2  2  2   1   4   4   5   4   4   4   2   4   2   2
## 1374  2  4  4  2  2  2  1  2  4   4   4   4   4   4   3   4   5   4   2   2
## 1375  3  2  2  4  3  4  4  3  4   2   2   2   1   3   2   4   2   4   2   2
## 1376  3  3  3  3  3  4  4  3  4   3   4   4   4   4   4   5   4   4   4   4
## 1377  5  4  3  3  3  4  4  4  3   3   4   4   3   4   4   3   2   4   2   4
## 1378  4  4  3  4  4  2  2  2  2   3   4   4   4   4   3   4   3   4   4   3
## 1379  3  2  2  2  2  2  4  3  1   1   3   4   4   4   4   5   5   5   5   4
## 1380  5  3  3  4  4  2  2  2  2   2   4   3   3   4   4   4   4   4   4   3
## 1381  4  3  2  3  4  2  2  4  2   3   3   4   4   3   3   4   4   4   3   4
## 1382  4  3  2  2  3  2  4  4  3   3   4   4   4   4   4   4   2   4   4   3
## 1383  3  4  4  3  4  3  4  4  4   4   3   4   4   4   4   4   4   4   4   4
## 1384  1  2  2  2  1  2  1  2  2   3   3   3   3   2   3   3   3   4   3   3
## 1385  4  4  4  4  4  3  3  3  3   3   3   4   5   3   3   3   5   3   3   2
## 1386  3  3  3  3  3  3  3  3  3   3   3   3   3   3   3   3   3   3   3   3
## 1387  4  3  2  4  4  2  4  2  4   3   4   3   3   4   3   4   2   3   2   2
## 1388  4  3  3  2  2  2  4  4  4   3   3   3   3   3   3   3   2   4   2   2
## 1389  3  3  4  3  3  2  2  4  1   2   3   3   3   3   3   3   4   2   3   3
## 1390  2  4  2  3  3  3  3  3  3   3   3   3   3   3   3   3   3   3   3   3
## 1391  2  2  2  2  1  1  1  1  1   1   4   3   3   4   3   4   4   4   4   4
## 1392  3  2  2  1  3  2  4  3  4   2   3   3   4   4   4   4   4   5   4   3
## 1393  4  4  2  1  2  3  4  2  3   2   4   2   4   4   3   4   4   5   4   5
## 1394  4  3  3  3  2  2  2  4  4   2   3   3   3   4   4   4   4   4   5   4
## 1395  4  3  4  4  3  4  4  4  3   3   4   4   4   4   4   5   5   4   4   4
## 1396  4  4  3  4  2  1  1  2  1   3   4   2   4   3   4   3   4   4   3   3
## 1397  3  4  2  2  3  4  2  2  2   2   3   3   3   4   4   3   4   4   3   4
## 1398  4  3  3  2  3  4  3  4  4   3   3   4   4   4   4   3   4   5   5   4
## 1399  4  3  3  4  2  2  2  4  4   3   4   3   2   4   2   4   4   4   2   3
## 1400  4  4  2  2  4  2  2  4  4   3   4   2   4   4   4   4   2   4   4   2
## 1401  4  4  1  2  1  2  2  1  1   1   5   4   4   4   4   4   4   5   3   2
## 1402  4  4  2  2  3  4  4  2  4   3   3   3   2   3   3   4   4   4   4   3
## 1403  4  3  2  2  3  2  2  3  3   4   4   4   4   4   4   4   2   4   4   4
## 1404  4  2  2  2  2  3  2  2  2   3   3   3   4   4   4   5   3   5   4   5
## 1405  4  4  4  4  4  4  4  3  5   5   4   4   4   4   4   2   2   4   4   4
## 1406  4  3  2  2  2  2  2  4  2   3   4   4   4   4   4   4   4   4   3   3
## 1407  4  4  4  4  3  4  3  4  4   3   2   2   3   2   3   4   3   4   2   2
## 1408  4  3  4  2  2  2  4  4  2   2   3   4   4   4   3   2   4   4   4   4
## 1409  2  4  2  3  2  2  2  2  2   2   1   3   3   2   3   4   4   2   4   4
## 1410  4  3  2  2  2  2  2  2  2   3   3   4   4   4   4   4   4   4   4   3
## 1411  2  3  1  1  3  1  2  1  1   2   3   3   4   2   4   4   3   4   3   3
## 1412  2  3  1  1  1  2  2  2  4   2   3   3   2   4   3   3   4   4   4   4
## 1413  4  3  3  4  2  2  2  3  2   2   3   2   4   3   4   4   2   4   3   3
## 1414  4  4  4  4  4  3  4  4  3   4   4   4   5   5   5   4   3   4   4   4
## 1415  4  4  4  3  4  4  4  3  4   4   3   4   3   4   3   4   4   4   4   4
## 1416  4  2  4  3  4  3  4  4  2   3   4   4   4   4   4   4   4   4   4   3
## 1417  5  4  4  2  3  4  2  4  4   4   4   4   4   4   4   4   4   5   4   4
## 1418  4  4  4  4  4  4  4  4  4   4   4   4   4   4   3   4   2   4   3   3
## 1419  3  4  3  3  3  3  2  2  1   2   3   4   4   5   5   4   2   4   3   3
## 1420  3  4  3  3  3  3  2  2  1   2   3   4   4   5   5   4   2   4   3   3
## 1421  4  4  4  3  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 1422  4  3  3  3  3  1  2  3  2   3   4   4   4   4   4   3   2   4   3   3
## 1423  3  4  2  5  4  5  4  4  4   3   3   4   4   4   5   5   4   5   3   4
## 1424  5  4  3  3  4  2  2  4  4   3   4   4   4   4   4   4   3   4   4   4
## 1425  4  4  2  4  4  4  4  2  4   4   3   4   4   3   4   4   4   5   4   3
## 1426  3  2  2  2  4  2  2  2  4   3   3   4   4   4   4   4   2   4   4   4
## 1427  3  3  3  3  2  4  2  2  2   2   2   4   4   4   3   4   4   4   4   4
## 1428  4  4  2  4  4  2  2  4  2   2   4   2   4   4   3   4   3   5   2   2
## 1429  4  4  4  4  4  4  4  3  3   2   4   3   4   4   3   4   4   4   4   4
## 1430  2  2  2  4  2  2  4  4  2   2   5   4   5   5   5   5   5   4   4   4
## 1431  2  2  2  4  2  2  4  4  2   2   5   4   5   5   5   5   5   4   4   4
## 1432  4  2  3  2  4  2  4  4  2   2   3   2   4   4   3   4   2   4   4   4
## 1433  5  3  2  2  2  2  2  2  2   2   3   2   3   2   2   3   4   4   2   2
## 1434  4  4  3  4  4  2  2  5  4   3   4   5   5   4   4   5   2   3   3   3
## 1435  4  4  3  3  2  2  2  4  2   2   4   3   4   4   4   4   2   4   3   4
## 1436  4  4  4  4  3  3  3  3  4   4   4   4   4   4   4   5   5   5   5   3
## 1437  3  4  3  4  3  4  3  3  3   2   4   3   3   3   4   4   4   3   2   4
## 1438  2  2  2  2  2  3  3  2  3   3   3   2   2   2   2   2   2   2   2   2
## 1439  4  5  4  4  5  5  5  5  5   5   5   5   5   5   5   5   5   5   4   4
## 1440  3  2  4  4  4  4  3  3  3   3   4   4   4   3   3   3   4   3   4   3
## 1441  5  5  4  4  4  4  4  4  4   4   3   4   4   4   4   3   4   3   4   4
## 1442  4  4  3  4  4  4  4  4  3   3   3   4   4   4   3   4   2   4   4   3
## 1443  4  3  3  4  3  2  4  4  4   4   4   4   3   3   3   3   3   3   3   3
## 1444  4  4  4  4  4  3  3  4  4   3   4   4   4   4   4   4   4   4   4   4
## 1445  4  4  4  3  3  3  4  4  4   3   5   5   5   5   5   4   2   4   4   4
## 1446  2  2  2  2  2  2  3  2  2   2   3   4   2   2   2   3   3   3   3   3
## 1447  2  2  2  2  2  2  2  2  2   2   3   3   2   2   2   2   2   2   2   2
## 1448  2  2  2  2  2  2  2  2  2   2   3   3   2   2   2   2   2   2   2   2
## 1449  4  4  4  4  4  3  3  4  4   3   4   4   4   4   4   4   4   4   4   4
## 1450  4  4  4  3  3  3  4  4  4   3   5   5   5   5   5   4   2   4   4   4
## 1451  2  2  2  2  2  2  3  2  2   2   3   4   2   2   2   3   3   3   3   3
## 1452  5  4  4  4  4  4  3  4  4   3   5   5   5   5   5   5   1   4   5   5
## 1453  2  2  2  2  2  2  2  2  2   2   3   3   2   2   2   2   2   2   2   2
## 1454  4  3  3  3  3  3  3  3  3   2   3   4   3   3   3   2   3   3   3   3
## 1455  4  4  4  4  4  4  4  4  4   4   5   5   5   4   5   3   2   3   3   3
## 1456  4  3  3  4  3  2  4  4  4   4   4   4   3   3   3   3   3   3   3   3
## 1457  3  3  3  3  3  3  4  4  4   3   4   4   4   4   4   4   2   2   2   2
## 1458  3  3  3  3  3  3  4  4  4   3   4   4   4   4   4   4   2   2   2   2
## 1459  2  2  2  2  2  3  2  2  2   2   2   2   2   2   2   2   2   2   2   2
## 1460  3  3  3  3  3  3  3  3  3   3   2   2   3   3   3   4   2   3   3   3
## 1461  4  3  3  3  3  3  3  3  3   2   3   4   3   3   3   2   3   3   3   3
## 1462  4  4  4  4  4  4  4  4  4   4   5   5   5   4   5   3   2   3   3   3
## 1463  3  4  3  4  2  2  2  4  2   2   4   3   4   4   4   4   4   4   4   4
## 1464  4  4  3  3  3  3  3  3  3   3   3   3   3   3   3   3   3   3   3   3
## 1465  4  4  2  4  3  4  4  2  4   2   3   3   4   4   4   5   5   5   3   4
## 1466  4  4  4  3  2  3  2  2  4   3   4   3   4   4   4   4   4   4   4   2
## 1467  4  2  1  4  1  1  1  1  1   1   4   4   2   4   4   4   5   5   4   2
## 1468  5  3  4  3  4  2  2  4  2   2   5   4   4   5   4   5   5   5   4   5
## 1469  4  3  4  4  4  3  4  4  5   3   4   5   5   5   5   4   5   4   5   4
## 1470  4  3  3  3  3  3  3  3  3   3   3   3   4   4   4   4   4   4   3   3
## 1471  2  4  2  4  3  2  1  1  4   4   4   4   4   4   4   4   4   5   5   4
## 1472  5  3  2  2  2  2  2  1  2   1   2   2   2   3   3   4   4   5   2   2
## 1473  4  3  2  2  1  1  1  1  2   3   2   2   3   3   3   4   5   5   3   5
## 1474  4  3  4  2  2  2  4  2  2   2   5   5   5   5   5   5   5   5   5   5
## 1475  3  4  3  4  3  2  2  3  3   3   3   4   4   4   4   4   4   4   4   4
## 1476  1  4  3  2  3  4  4  4  4   3   4   4   4   4   4   4   5   5   3   3
## 1477  4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   1
## 1478  4  4  2  2  3  1  2  4  1   2   3   3   3   3   3   2   3   2   2   2
## 1479  2  2  2  2  2  2  4  3  4   2   4   4   4   4   4   4   3   4   2   2
## 1480  4  3  2  2  2  3  2  4  2   3   4   4   4   2   4   4   4   4   4   2
## 1481  4  3  4  2  3  3  2  2  4   2   5   4   3   5   4   4   4   5   4   4
## 1482  4  4  4  2  4  4  2  2  4   3   3   2   4   2   4   4   4   4   4   4
## 1483  3  2  2  2  2  2  4  2  2   3   2   2   2   2   2   3   4   4   2   2
## 1484  4  2  2  2  2  2  2  2  2   2   2   2   4   2   2   4   4   2   2   2
## 1485  4  2  2  2  2  2  2  4  2   2   4   4   4   4   4   4   4   4   3   4
## 1486  4  2  3  4  4  2  4  4  4   4   4   4   4   4   4   4   2   5   4   4
## 1487  4  4  3  2  2  4  4  4  4   3   4   4   4   4   4   5   5   5   4   5
## 1488  4  4  3  3  3  4  4  4  2   3   3   4   4   4   4   4   3   4   2   2
## 1489  4  3  2  3  4  2  2  4  2   1   3   4   4   4   3   3   4   5   3   2
## 1490  3  3  3  3  3  3  3  3  4   3   3   4   3   3   3   3   3   3   4   4
## 1491  4  4  4  4  2  2  2  2  4   4   4   2   4   4   2   4   4   4   4   4
## 1492  2  2  2  4  2  4  2  4  3   3   4   3   3   4   4   4   4   4   4   4
## 1493  4  4  2  2  4  4  3  4  2   2   4   4   4   2   4   4   4   5   4   4
## 1494  4  3  2  4  4  3  2  2  2   3   4   3   3   4   4   4   4   4   3   4
## 1495  2  2  2  2  2  2  2  2  4   2   2   2   2   4   3   4   4   4   2   2
## 1496  4  2  2  2  2  1  2  2  2   2   4   4   4   4   4   4   4   5   4   3
## 1497  4  3  2  4  2  2  4  4  4   3   4   3   4   4   4   4   5   5   4   3
## 1498  5  4  4  5  5  5  5  5  4   3   5   4   4   5   5   4   3   5   5   4
## 1499  4  4  4  2  4  3  4  4  3   2   5   5   5   5   5   4   4   4   4   4
## 1500  3  3  3  3  3  3  3  2  2   2   2   2   2   2   3   4   4   4   3   3
## 1501  4  4  4  4  4  4  4  2  4   5   5   5   5   5   5   4   4   4   4   5
## 1502  4  3  4  4  2  4  2  1  4   1   4   3   4   4   4   2   5   4   2   4
## 1503  2  2  1  2  2  2  1  1  1   2   3   2   4   3   3   4   4   4   3   3
## 1504  4  4  4  4  3  2  2  3  4   4   4   4   3   4   4   4   4   4   4   4
## 1505  4  2  2  4  2  2  2  4  4   2   4   2   4   4   4   4   4   5   4   3
## 1506  3  3  2  2  2  2  2  2  4   2   3   2   2   3   2   4   2   4   4   2
## 1507  4  3  4  3  4  4  4  4  2   3   4   4   4   4   4   4   4   4   4   4
## 1508  2  2  2  4  4  2  4  2  2   2   3   4   4   4   4   4   4   4   4   4
## 1509  4  4  2  4  2  1  2  2  2   2   4   3   2   3   3   4   2   4   4   4
## 1510  2  2  2  2  2  2  4  2  4   2   2   4   4   4   4   4   2   4   4   4
## 1511  3  4  2  2  2  2  2  2  2   2   2   2   2   2   2   4   4   4   3   3
## 1512  4  3  3  4  4  2  4  2  2   2   3   3   3   3   3   4   4   4   4   4
## 1513  2  2  2  3  3  2  2  4  2   2   3   2   3   4   3   4   2   4   3   2
## 1514  3  4  2  2  4  2  2  4  2   2   4   4   4   4   4   2   4   4   4   4
## 1515  4  3  2  3  4  2  2  2  2   3   4   3   3   3   3   4   4   4   3   4
## 1516  3  4  3  2  2  1  1  1  4   2   3   1   5   4   4   5   5   5   3   1
## 1517  3  3  2  3  2  1  3  2  2   2   3   2   3   4   4   3   2   5   4   3
## 1518  4  4  2  3  2  1  2  2  1   2   3   4   4   4   3   5   4   5   3   2
## 1519  4  2  1  1  2  1  3  4  4   2   3   3   3   3   3   4   4   2   3   3
## 1520  4  4  4  2  3  4  2  5  4   4   4   4   4   4   4   4   4   5   3   4
## 1521  3  3  2  2  4  2  4  1  4   4   3   5   5   5   5   5   1   1   5   5
## 1522  4  4  2  2  3  2  2  2  1   2   3   2   4   4   3   4   2   5   2   2
## 1523  4  3  3  3  3  2  4  4  2   3   4   4   4   4   3   3   4   2   3   2
## 1524  4  4  2  4  2  2  2  2  4   4   4   4   4   4   4   4   4   4   4   4
## 1525  4  4  2  4  2  4  2  2  2   4   4   4   4   4   4   4   4   4   4   4
## 1526  4  4  2  4  2  2  2  2  2   2   3   4   4   4   4   4   4   4   2   2
## 1527  2  3  2  2  3  4  2  2  2   2   2   3   2   1   2   2   2   4   2   2
## 1528  4  4  4  3  4  3  3  4  2   4   4   4   4   5   4   4   5   3   3   3
## 1529  4  3  3  3  4  2  2  4  4   3   4   4   4   5   5   4   4   5   4   4
## 1530  2  2  2  2  2  2  2  4  4   2   2   2   4   4   2   4   4   4   2   2
## 1531  4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 1532  4  4  4  4  4  3  4  4  2   3   4   4   4   4   4   4   4   4   4   4
## 1533  4  2  2  2  2  4  2  2  4   2   3   3   4   4   4   4   4   4   2   3
## 1534  4  4  3  4  5  4  5  4  5   4   4   3   5   5   5   4   5   4   4   4
## 1535  4  4  3  3  3  4  5  5  5   5   4   4   4   4   4   4   4   4   3   3
## 1536  4  4  4  4  3  4  5  5  5   5   4   5   5   5   4   4   4   4   5   3
## 1537  2  3  3  3  3  3  3  3  3   2   3   3   3   3   3   3   3   3   3   3
## 1538  2  4  1  2  2  1  1  1  2   2   4   2   4   4   4   5   5   5   5   5
## 1539  4  4  3  4  4  2  2  4  2   2   4   4   4   3   3   4   3   4   3   3
## 1540  5  4  4  3  4  4  5  2  1   2   5   5   3   5   4   5   5   5   5   5
## 1541  4  3  2  3  3  2  4  3  2   1   4   3   3   4   4   3   5   4   3   3
## 1542  2  3  2  3  2  2  1  2  1   3   3   1   2   2   2   4   3   4   2   2
## 1543  5  3  4  4  3  2  4  5  2   2   5   4   5   5   5   5   5   5   3   2
## 1544  4  3  4  4  4  5  5  4  4   4   4   5   4   4   5   4   4   5   4   5
## 1545  2  3  2  2  2  4  3  2  4   3   3   3   4   3   3   3   4   4   4   4
## 1546  4  4  3  3  4  2  4  3  4   3   4   4   4   5   4   4   3   5   4   4
## 1547  3  2  4  4  4  1  2  2  2   3   4   4   4   4   4   4   4   2   3   2
## 1548  4  3  3  2  2  1  4  3  4   3   2   4   4   4   4   4   4   4   3   3
## 1549  4  4  2  4  2  2  2  2  4   3   4   5   5   5   5   4   4   4   4   4
## 1550  4  3  3  2  3  2  2  4  4   3   3   2   3   4   4   4   4   4   4   3
## 1551  4  5  5  3  4  4  5  4  5   5   5   5   5   5   5   5   5   5   5   5
## 1552  2  4  3  2  3  2  5  2  4   3   4   2   4   5   5   5   5   4   3   3
## 1553  4  3  2  4  2  2  5  2  2   2   4   2   4   2   4   2   4   2   4   3
## 1554  4  3  4  2  4  2  4  5  4   4   5   5   5   5   5   3   5   4   5   2
## 1555  4  4  4  3  2  2  3  3  3   2   4   5   5   5   5   4   2   5   5   4
## 1556  4  3  4  4  2  4  4  2  4   3   4   4   4   4   4   4   4   4   4   4
## 1557  4  3  3  3  4  4  3  4  4   3   4   4   4   4   4   4   4   4   4   4
## 1558  4  4  3  4  4  2  3  4  4   4   2   2   1   3   4   5   2   5   5   5
## 1559  2  3  4  4  3  2  2  2  4   2   4   4   4   4   4   4   3   5   4   4
## 1560  2  1  2  2  5  3  3  5  4   2   5   5   5   5   5   3   5   4   3   3
## 1561  2  3  2  2  4  3  2  2  4   3   4   3   4   4   3   4   2   5   4   4
## 1562  3  3  2  3  2  2  3  3  4   3   4   4   4   4   4   4   5   4   4   4
## 1563  2  3  2  4  2  4  2  2  2   3   2   3   4   4   4   2   2   3   2   4
## 1564  4  2  3  3  4  4  2  4  4   3   4   4   4   5   4   4   4   4   4   4
## 1565  3  4  3  3  5  4  3  2  2   4   5   5   4   4   3   2   2   2   3   4
## 1566  2  4  2  2  2  2  2  4  1   2   2   2   4   4   4   4   5   5   5   3
## 1567  2  4  3  2  4  1  4  2  4   2   5   5   5   5   5   5   5   5   5   5
## 1568  4  4  4  4  4  3  4  3  3   2   4   4   4   4   4   4   5   4   4   4
## 1569  5  3  2  4  4  4  5  4  4   3   4   4   4   4   4   4   5   5   4   4
## 1570  4  4  3  3  4  2  4  2  4   3   4   4   4   4   3   4   3   4   3   3
## 1571  3  1  5  5  5  4  3  1  1   1   2   2   4   4   5   5   5   5   4   4
## 1572  2  4  2  3  4  4  3  2  2   1   2   2   1   2   3   3   2   4   3   2
## 1573  2  4  1  2  2  1  4  1  4   4   3   3   3   3   3   4   2   5   2   4
## 1574  3  4  2  4  3  4  4  3  2   2   3   4   4   4   4   4   4   4   5   5
## 1575  4  3  2  4  2  2  4  4  4   4   4   4   4   4   4   4   4   4   2   2
## 1576  5  4  2  4  3  4  3  4  4   4   4   4   4   5   4   4   4   4   3   4
## 1577  2  3  2  2  1  2  3  3  1   3   3   3   3   3   3   4   4   4   4   4
## 1578  4  4  4  4  4  4  4  4  4   4   4   4   4   4   4   4   4   4   4   4
## 1579  4  5  4  5  5  4  5  4  4   4   4   5   4   4   4   3   4   5   4   5
## 1580  4  5  4  4  4  5  4  5  5   4   4   4   3   4   4   4   4   3   3   4
## 1581  5  4  3  3  4  4  3  4  4   4   3   3   4   4   5   4   5   3   4   5
## 1582  2  3  5  4  5  4  4  1  1   2   4   2   3   3   4   5   5   5   3   4
## 1583  2  3  5  4  5  4  4  1  2   2   3   4   3   3   3   5   4   5   4   4
## 1584  4  4  2  3  2  2  3  3  1   3   3   4   3   3   4   4   3   4   4   4
## 1585  3  3  2  3  3  2  2  3  1   2   3   4   4   4   4   4   4   4   4   4
## 1586  3  2  1  2  2  4  4  3  4   4   4   4   3   4   5   4   4   4   4   4
## 1587  4  5  4  4  4  4  5  4  4   5   4   4   5   4   4   4   4   3   3   4
## 1588  4  3  4  3  4  4  4  2  2   2   4   4   4   5   4   4   5   3   3   3
## 1589  4  3  2  3  3  4  3  2  2   4   4   4   4   4   3   3   4   2   2   2
## 1590  3  3  2  3  2  1  2  2  1   1   1   1   2   2   4   4   4   4   5   1
## 1591  4  2  3  4  3  2  4  4  4   2   3   3   3   3   3   4   4   4   2   2
## 1592  2  3  1  1  2  2  2  2  1   3   4   4   3   2   1   2   2   1   1   2
## 1593  3  4  2  4  4  4  2  2  4   2   4   5   5   5   4   4   2   2   5   2
## 1594  2  3  2  2  1  1  4  2  1   2   2   2   2   1   3   4   1   1   1   1
## 1595  5  2  4  2  2  4  3  2  5   3   2   2   4   4   3   4   5   5   3   4
## 1596  4  3  2  4  2  1  4  4  5   2   4   5   5   5   5   5   4   4   5   4
## 1597  4  4  4  4  4  5  3  4  4   4   5   5   4   4   4   4   4   4   5   4
## 1598  3  3  2  3  4  3  4  2  1   3   4   4   4   3   4   4   2   4   4   3
## 1599  5  4  4  4  2  2  5  2  4   2   5   5   5   5   5   5   4   5   5   4
## 1600  4  4  4  4  3  2  3  2  3   5   4   4   5   5   5   5   5   4   4   4
## 1601  4  3  2  2  2  2  4  3  4   2   3   4   3   3   4   4   3   4   4   2
## 1602  4  4  4  4  4  2  4  1  4   3   4   4   4   4   4   4   2   3   4   3
## 1603  4  3  2  2  3  4  4  2  2   3   4   3   4   4   3   5   5   4   3   4
## 1604  3  2  4  4  4  5  4  2  2   2   3   2   3   3   5   4   4   4   3   4
## 1605  3  3  2  3  3  2  2  2  1   2   4   4   4   4   4   4   4   4   3   4
## 1606  4  3  4  4  3  4  2  2  2   3   3   3   2   2   3   2   4   2   2   2
## 1607  4  5  2  2  4  4  2  2  2   4   3   3   2   4   3   4   3   4   2   2
## 1608  4  5  3  4  4  4  2  4  5   3   3   4   4   3   3   5   5   5   3   2
## 1609  4  4  3  4  4  4  4  3  2   3   4   4   4   4   4   4   2   5   3   3
## 1610  2  4  4  4  4  2  3  2  2   3   3   2   2   3   2   4   3   4   3   2
## 1611  4  4  2  4  4  4  1  2  1   3   4   3   3   5   4   5   3   5   5   4
## 1612  4  3  3  2  3  4  3  2  2   3   3   4   3   4   4   3   4   3   4   3
## 1613  4  3  2  3  4  1  4  2  2   3   4   4   4   3   3   2   3   3   4   2
## 1614  5  4  4  4  4  2  4  3  1   2   3   4   4   4   4   4   3   4   3   3
## 1615  4  3  2  3  4  2  2  4  2   2   4   2   4   3   3   4   4   2   3   4
## 1616  5  3  3  3  2  2  4  4  2   2   3   2   4   5   4   3   5   4   3   2
## 1617  4  4  2  4  2  2  2  2  4   2   2   4   3   4   2   2   2   2   4   4
## 1618  4  4  3  3  3  4  4  4  2   2   4   4   4   4   4   4   4   4   4   4
## 1619  4  4  3  3  3  4  4  4  2   2   4   4   4   4   4   4   4   4   4   4
## 1620  5  3  3  3  3  4  4  4  1   1   5   4   4   4   3   4   4   4   4   4
## 1621  4  3  3  4  3  1  3  2  1   2   4   3   3   4   4   4   3   4   2   2
## 1622  3  3  3  3  3  2  3  3  3   2   4   4   3   3   5   4   4   3   3   3
## 1623  5  3  3  5  3  2  2  5  2   2   5   4   4   4   3   4   4   4   4   3
## 1624  4  3  4  3  3  2  4  4  2   3   4   4   4   4   3   4   4   4   3   4
## 1625  4  4  4  4  4  4  5  4  4   4   3   4   4   4   4   4   2   3   3   3
## 1626  4  3  3  4  2  2  4  3  2   2   3   4   4   4   3   4   4   4   3   2
## 1627  3  3  4  4  3  1  2  4  4   3   4   4   4   4   2   5   1   5   5   5
## 1628  4  2  3  4  4  1  2  4  1   2   3   4   3   2   4   2   2   3   2   2
## 1629  3  2  2  3  3  2  2  2  2   2   3   3   4   4   2   4   2   4   4   2
## 1630  4  4  4  4  4  4  2  2  2   2   3   5   5   5   3   4   3   3   3   3
## 1631  2  5  1  4  4  3  2  2  3   2   4   3   3   4   5   2   2   2   2   2
## 1632  4  4  4  4  4  3  2  2  2   1   4   3   3   4   3   2   2   2   2   2
## 1633  4  3  3  3  2  2  2  2  2   2   3   2   2   3   2   4   4   4   3   2
## 1634  3  3  2  3  3  2  2  2  2   2   3   3   3   3   3   4   4   3   3   3
## 1635  3  3  4  4  3  1  2  4  4   3   4   4   4   4   2   5   1   5   5   5
## 1636  4  3  2  2  3  4  3  2  2   3   4   4   4   4   3   4   2   4   3   3
## 1637  3  3  2  3  3  2  2  2  2   2   3   3   3   3   3   4   4   3   3   3
## 1638  3  2  2  2  1  1  5  1  1   2   3   3   3   2   2   2   2   2   3   3
## 1639  3  3  3  3  3  4  4  3  2   4   2   2   4   4   4   4   4   4   3   3
## 1640  4  5  4  4  5  4  4  2  2   4   4   4   3   4   4   5   5   5   3   5
## 1641  4  2  2  2  4  2  3  2  2   2   3   3   4   4   4   4   4   4   3   3
## 1642  4  4  1  3  3  2  4  1  2   4   4   4   3   4   5   2   5   1   2   4
## 1643  4  3  2  3  4  2  2  2  2   2   2   2   2   3   3   4   4   2   3   3
## 1644  4  4  3  4  4  4  4  4  2   3   4   4   4   4   4   4   4   4   4   2
## 1645  3  2  1  4  1  2  3  2  1   1   3   4   4   4   3   4   2   4   2   2
## 1646  3  2  2  2  2  2  2  2  2   2   2   2   4   4   2   2   2   4   4   4
## 1647  5  4  2  4  4  3  4  2  5   5   5   5   4   5   5   4   4   5   5   5
## 1648  3  3  2  3  4  4  3  4  5   3   4   4   4   5   5   4   5   5   3   3
## 1649  1  2  1  2  1  3  2  4  3   3   2   2   2   2   2   3   3   3   5   4
## 1650  3  3  3  3  3  4  3  4  3   4   1   1   2   5   2   3   3   3   3   3
## 1651  5  3  2  3  3  1  2  2  2   2   3   4   3   2   2   2   2   3   3   2
## 1652  2  2  2  3  3  4  3  2  1   3   5   4   3   2   1   3   4   2   1   5
## 1653  5  1  3  4  1  2  4  5  4   1   3   1   1   4   4   3   5   5   2   2
## 1654  3  3  2  2  2  3  3  2  4   2   3   3   2   2   2   2   4   2   2   2
## 1655  5  3  3  2  3  4  4  1  3   2   3   4   4   4   3   3   4   3   2   2
## 1656  3  2  1  1  3  5  4  3  2   1   3   5   5   3   3   4   5   5   3   4
## 1657  3  3  3  3  3  3  2  3  3   3   3   3   2   3   2   1   3   3   3   3
## 1658  3  3  2  3  2  4  4  3  2   2   3   4   3   2   4   3   4   2   3   2
## 1659  5  5  3  5  2  3  2  4  2   2   3   4   3   4   3   3   2   2   5   2
## 1660  3  3  2  3  2  4  4  3  2   2   3   4   3   2   4   3   4   2   3   2
## 1661  5  4  2  4  4  2  2  2  2   4   3   3   2   2   2   3   3   3   3   2
## 1662  1  1  1  1  1  1  1  1  1   1   1   2   3   4   3   3   3   3   3   3
## 1663  4  3  2  4  3  2  2  4  2   2   3   4   4   4   3   4   4   4   3   3
## 1664  3  2  4  2  4  4  2  4  4   3   4   2   2   4   3   4   4   5   4   4
## 1665  4  4  3  4  4  3  3  4  1   3   4   4   4   4   3   4   3   4   4   3
## 1666  3  2  3  2  3  4  4  5  4   2   1   4   4   2   2   5   5   4   3   2
## 1667  5  4  4  4  5  3  3  3  3   3   5   5   5   5   5   5   4   4   5   5
## 1668  4  4  4  4  4  3  4  4  4   4   4   3   3   3   3   3   4   3   4   4
## 1669  4  4  4  4  4  5  4  4  5   5   5   5   5   5   5   4   4   5   5   5
## 1670  4  4  4  2  4  4  4  2  4   4   4   4   4   4   4   3   3   2   4   3
## 1671  5  5  3  5  2  3  2  4  2   2   3   4   3   4   3   3   2   2   5   2
## 1672  4  4  3  3  2  2  4  3  4   2   3   2   3   2   4   3   2   2   2   2
## 1673  4  4  3  3  2  2  4  3  4   2   3   2   3   2   4   3   2   2   2   2
## 1674  2  2  2  2  3  2  2  2  2   2   3   3   4   4   4   4   3   4   4   3
## 1675  3  4  4  2  5  3  4  3  2   4   4   3   3   5   4   4   2   3   3   3
## 1676  5  5  4  4  5  4  4  2  2   4   4   4   4   4   4   4   4   4   4   4
## 1677  4  4  4  4  4  3  3  3  1   2   3   3   3   3   3   3   3   4   3   2
## 1678  4  2  1  1  2  1  2  5  2   1   4   4   3   4   4   3   2   4   2   3
## 1679  4  2  2  2  2  2  4  4  2   3   5   5   5   5   5   5   4   5   4   4
## 1680  5  5  5  5  5  5  5  1  5   5   5   5   5   5   5   5   5   5   5   5
## 1681  5  5  4  3  4  5  5  3  3   3   4   4   4   4   4   5   2   4   4   4
## 1682  4  3  3  2  4  3  3  2  2   2   4   4   4   4   4   4   2   2   3   3
## 1683  4  3  5  5  3  3  5  4  4   3   5   5   4   3   5   3   3   3   2   2
## 1684  3  5  2  3  2  1  1  1  1   2   4   4   3   4   5   2   3   3   2   1
## 1685  4  4  4  4  2  4  2  2  2   2   3   3   2   3   3   2   3   4   2   2
## 1686  2  2  2  2  2  4  5  4  4   3   4   3   3   3   4   4   2   4   3   4
## 1687  3  3  3  3  3  3  3  3  3   3   4   3   3   4   3   5   3   3   3   3
## 1688  5  3  3  3  4  4  2  3  2   3   4   4   4   4   4   3   4   4   3   3
## 1689  4  4  3  4  4  4  4  4  2   3   4   3   4   4   4   4   3   4   3   3
## 1690  4  4  3  3  2  2  4  3  4   2   3   2   3   2   4   3   2   2   2   2
## 1691  5  2  4  4  3  3  4  4  4   3   5   5   5   4   4   2   4   4   4   2
## 1692  3  4  3  3  3  3  3  4  4   4   4   3   3   3   3   3   3   4   3   2
## 1693  4  3  2  4  4  4  4  2  4   3   4   3   4   5   4   4   4   4   3   2
## 1694  4  3  2  2  2  2  3  4  2   2   2   2   2   3   2   4   4   4   4   3
## 1695  4  3  2  3  2  4  2  2  2   3   4   4   4   4   4   4   4   4   3   3
## 1696  4  3  2  3  3  2  4  4  4   3   2   1   3   2   2   2   3   2   3   1
## 1697  5  4  3  3  4  3  2  2  2   3   4   4   4   4   4   4   4   4   4   3
## 1698  2  2  2  3  3  3  3  3  4   3   3   4   3   3   4   4   3   3   4   3
## 1699  3  3  2  3  3  4  5  3  2   3   4   3   3   3   3   3   3   3   3   3
## 1700  4  3  2  4  4  2  2  2  2   2   4   4   4   4   4   3   2   4   2   2
## 1701  4  2  2  5  3  4  5  2  2   1   4   4   3   4   2   3   5   5   1   3
## 1702  2  2  2  2  2  4  5  4  4   3   4   3   3   3   4   4   2   4   3   4
## 1703  4  4  4  4  4  4  4  4  5   3   3   5   3   3   3   3   3   3   3   2
## 1704  3  3  2  3  2  4  4  3  2   2   3   4   3   2   4   3   4   2   3   2
## 1705  5  5  3  5  2  3  2  4  2   2   3   4   3   4   3   3   2   2   5   2
## 1706  5  5  3  5  2  3  2  4  2   2   3   4   3   4   3   3   2   2   5   2
## 1707  4  4  3  2  1  4  3  4  2   4   3   4   4   4   3   4   4   5   4   4
## 1708  3  3  4  3  3  4  4  2  3   3   4   2   3   3   3   4   4   2   3   3
## 1709  5  4  4  3  5  3  3  3  2   2   3   3   3   4   3   4   4   4   3   3
## 1710  4  3  2  2  3  2  4  3  2   3   3   3   3   4   4   4   3   3   3   2
## 1711  3  2  3  3  4  2  2  4  2   4   4   3   3   4   4   4   4   4   3   3
## 1712  3  3  3  2  2  2  3  4  3   3   4   3   3   4   4   4   4   4   3   3
## 1713  3  2  2  2  2  3  3  4  4   3   5   5   5   4   3   2   2   4   2   3
## 1714  3  4  4  3  4  4  3  4  4   5   5   4   4   4   5   4   3   3   4   5
## 1715  5  5  5  4  4  4  4  2  5   3   4   4   4   4   4   4   5   4   2   3
## 1716  4  4  4  4  4  4  4  2  2   2   3   4   4   4   4   4   4   4   3   5
## 1717  4  4  4  4  4  3  4  4  2   3   4   4   4   4   4   3   4   4   2   2
## 1718  3  3  2  4  4  4  2  1  3   2   3   4   4   4   4   4   2   4   4   2
## 1719  4  3  2  2  2  3  4  3  2   3   4   4   4   5   4   3   3   4   3   2
## 1720  4  5  4  2  3  2  4  2  5   4   4   3   3   3   4   3   2   3   4   3
## 1721  4  4  3  3  3  2  3  2  2   2   4   3   4   5   4   4   3   4   3   3
## 1722  5  4  4  4  5  3  4  2  3   3   4   4   4   5   4   3   2   4   3   3
## 1723  4  2  3  4  4  4  2  3  4   3   4   4   5   5   4   3   4   4   4   4
## 1724  1  1  1  1  5  5  3  1  1   1   1   5   3   1   5   5   1   4   1   3
## 1725  4  4  3  4  2  4  2  4  4   1   2   2   2   5   4   5   2   4   3   2
## 1726  5  5  4  4  5  5  3  5  5   5   4   5   4   3   5   4   4   2   3   3
## 1727  3  2  2  2  2  2  2  2  3   2   2   2   2   3   3   3   2   4   2   2
## 1728  4  3  5  4  3  3  3  3  1   3   3   3   3   3   3   4   5   2   3   3
## 1729  3  4  3  2  3  2  3  3  2   2   3   3   3   4   3   3   3   4   3   3
## 1730  4  4  4  3  4  4  5  4  5   3   4   4   4   3   3   4   4   4   4   3
## 1731  4  5  5  5  4  4  4  4  4   3   4   3   3   3   3   4   5   5   3   5
## 1732  4  4  4  4  4  4  2  4  4   3   3   4   2   4   2   1   3   3   3   3
## 1733  3  3  3  3  3  3  3  3  3   3   3   3   3   3   3   3   4   3   3   4
## 1734  4  4  4  4  4  4  4  4  4   3   4   4   4   4   4   4   4   4   4   4
## 1735  4  4  2  2  4  2  4  4  1   2   3   2   4   4   3   4   3   4   4   3
## 1736  3  3  2  3  2  4  4  3  2   2   3   4   3   2   4   3   4   2   3   2
## 1737  4  3  3  4  3  2  4  4  2   2   4   4   5   3   3   4   5   5   5   3
## 1738  3  4  3  4  4  4  4  4  4   3   4   4   4   4   4   4   4   4   4   4
## 1739  4  4  5  3  3  4  4  4  4   3   4   5   5   5   5   5   5   5   5   5
## 1740  4  4  4  4  4  1  2  4  5   3   3   4   4   4   5   4   3   5   5   4
## 1741  3  4  3  3  3  3  4  4  4   4   3   3   4   3   3   3   3   3   3   2
## 1742  4  4  2  2  2  1  4  3  4   2   3   3   3   3   3   4   4   4   4   4
## 1743  3  3  3  2  3  3  3  4  4   4   3   4   4   4   4   4   4   5   5   3
## 1744  4  3  4  2  4  2  4  4  4   4   4   4   4   4   4   4   2   2   4   5
## 1745  4  3  3  3  2  2  4  3  2   2   4   4   4   4   4   3   4   3   3   2
## 1746  3  1  4  4  2  4  3  2  1   1   3   4   3   2   3   3   4   4   2   3
## 1747  4  3  3  3  2  1  3  3  4   2   3   3   3   4   4   4   3   4   3   3
## 1748  3  3  3  2  3  3  3  4  4   3   3   3   3   3   3   4   3   5   3   3
## 1749  4  1  3  4  4  2  2  3  2   1   3   3   4   3   3   3   3   5   3   4
## 1750  4  4  4  3  3  3  3  3  3   3   3   4   4   4   4   4   4   4   4   4
## 1751  3  4  4  4  4  4  4  4  3   3   4   4   4   4   3   4   4   3   4   3
## 1752  5  4  5  4  4  4  4  4  4   4   4   5   5   5   4   4   3   3   3   5
## 1753  4  5  5  5  5  4  4  4  4   5   4   4   5   4   4   3   5   5   5   5
## 1754  5  5  5  5  5  5  5  5  5   5   5   5   5   5   5   5   5   5   5   3
## 1755  4  4  4  4  4  5  5  5  4   5   4   4   5   4   4   4   4   4   4   4
## 1756  5  4  3  3  4  3  4  2  2   3   3   3   3   3   4   3   3   4   3   3
## 1757  3  3  2  3  2  4  4  3  2   2   3   4   3   2   4   3   4   2   3   2
## 1758  3  3  2  3  2  4  4  3  2   2   3   4   3   2   4   3   4   2   3   2
## 1759  3  3  2  3  2  4  4  3  2   2   3   4   3   2   4   3   4   2   3   2
## 1760  4  4  3  3  2  2  4  3  4   2   3   2   3   2   4   3   2   2   2   2
## 1761  4  3  3  4  2  2  4  4  2   2   4   4   3   4   3   5   3   5   4   4
## 1762  5  5  5  4  5  4  5  5  5   5   4   5   5   4   4   4   4   5   3   2
## 1763  3  3  3  2  2  2  3  2  4   2   2   4   3   4   3   3   2   4   4   4
## 1764  5  5  4  3  3  2  4  4  3   3   5   4   4   5   4   5   5   5   4   3
## 1765  3  3  1  2  4  1  1  3  1   1   3   3   3   3   1   3   2   3   2   2
## 1766  3  3  1  2  4  1  1  3  1   1   3   3   3   3   1   3   2   3   2   2
## 1767  4  3  3  3  3  3  3  3  3   3   3   3   3   3   3   3   3   3   3   2
## 1768  4  4  3  4  4  4  4  5  5   4   4   4   5   5   3   5   2   3   5   3
## 1769  3  3  2  2  1  1  2  4  4   3   3   2   3   4   2   3   4   4   2   3
## 1770  4  4  3  2  3  3  4  3  4   2   3   4   4   3   5   4   4   4   2   3
## 1771  4  4  5  5  5  3  3  4  4   4   5   5   4   5   5   4   3   4   3   4
## 1772  4  4  4  2  4  5  4  2  4   5   5   5   5   5   5   5   5   5   4   3
## 1773  2  4  3  2  3  2  4  2  4   3   4   4   3   3   3   4   4   4   4   3
## 1774  4  2  2  4  2  4  2  4  3   2   3   2   3   3   3   4   3   4   3   4
## 1775  4  5  5  5  5  4  5  4  5   4   5   5   5   5   5   4   3   5   5   5
## 1776  4  3  3  2  2  2  3  4  2   2   3   2   2   3   3   4   5   4   3   4
## 1777  5  4  3  4  5  4  5  5  3   5   4   5   4   4   4   4   5   5   2   4
## 1778  3  4  4  4  4  2  4  2  3   4   4   4   5   4   4   5   5   4   4   3
## 1779  4  3  2  4  2  1  4  3  1   1   4   5   4   4   4   3   3   3   3   2
## 1780  5  2  3  3  2  3  4  3  4   3   3   3   3   3   3   3   2   3   2   2
## 1781  5  4  3  2  5  5  5  2  5   4   5   5   5   5   5   4   4   5   5   5
## 1782  5  4  3  3  2  4  3  3  2   3   3   3   2   4   3   3   5   4   2   2
## 1783  4  4  4  3  4  5  5  4  2   3   4   5   3   5   5   5   4   5   3   4
## 1784  3  2  3  2  3  4  3  2  2   2   3   4   4   5   5   4   4   5   3   3
## 1785  5  5  5  5  5  2  5  5  3   3   5   5   5   5   5   4   3   4   4   3
## 1786  4  5  4  4  4  3  3  4  4   4   4   4   4   5   5   4   3   3   4   3
## 1787  3  3  3  3  3  3  3  3  3   3   4   3   3   3   3   3   4   3   3   4
## 1788  4  4  4  4  4  4  4  4  4   4   3   4   4   4   4   4   4   4   4   4
## 1789  4  4  4  4  4  4  4  5  4   4   4   4   4   4   4   4   5   4   4   4
## 1790  3  2  1  2  2  2  3  4  4   2   2   3   4   3   4   4   3   4   1   3
## 1791  4  2  4  3  4  2  4  2  2   3   4   4   3   5   4   4   5   4   4   3
## 1792  2  3  2  3  4  2  2  2  2   3   4   3   3   4   4   3   3   3   3   3
## 1793  3  2  3  3  2  4  2  2  2   3   4   4   4   4   4   5   4   4   3   3
## 1794  4  3  4  4  5  5  4  3  4   5   5   5   5   5   4   5   4   5   4   4
## 1795  4  3  2  2  4  4  4  4  4   3   4   3   4   4   5   5   5   5   2   2
## 1796  3  2  2  3  4  4  3  1  1   2   4   4   3   4   4   4   3   4   4   3
## 1797  4  5  4  4  3  2  3  2  2   3   4   4   4   4   4   4   5   5   3   4
## 1798  4  4  4  4  4  2  4  4  4   2   5   5   5   5   5   5   5   5   4   4
## 1799  4  3  3  4  4  2  4  4  4   3   4   4   3   4   3   3   4   4   3   2
## 1800  3  4  2  2  1  2  2  2  4   3   2   1   2   2   4   5   5   5   4   3
## 1801  4  3  4  4  4  4  4  4  2   2   4   4   4   4   4   4   4   3   3   3
## 1802  4  4  4  4  4  2  4  4  2   3   5   5   4   5   5   4   3   4   4   4
## 1803  4  3  4  4  4  4  4  4  2   2   4   4   4   4   4   4   4   3   3   3
## 1804  4  4  4  4  4  2  4  4  2   3   5   5   4   5   5   4   3   4   4   4
## 1805  5  5  3  5  2  4  2  2  3   4   4   4   4   4   5   5   5   5   5   4
## 1806  5  4  5  4  3  4  4  4  4   3   4   4   4   2   2   4   5   4   4   3
## 1807  4  4  3  3  3  2  4  4  2   2   4   4   4   4   3   3   4   5   4   3
## 1808  4  2  2  2  2  2  2  2  2   2   2   4   4   2   2   4   4   4   2   4
## 1809  4  4  3  4  4  4  2  4  3   3   4   4   4   4   4   3   3   4   3   2
## 1810  4  4  4  4  4  4  3  4  2   3   3   2   2   4   4   4   2   2   2   3
## 1811  2  2  3  2  4  2  3  2  4   4   4   4   4   4   3   4   2   2   3   2
## 1812  3  1  3  4  3  1  2  3  1   3   4   4   3   5   3   4   5   5   4   5
## 1813  5  4  4  5  5  3  5  4  2   3   4   4   4   5   4   3   4   4   4   3
## 1814  4  3  2  3  3  1  3  2  2   2   4   4   3   3   3   3   3   3   3   3
## 1815  3  2  1  2  2  2  3  3  1   2   2   2   2   2   2   3   4   4   3   3
## 1816  3  5  2  2  4  5  2  1  1   3   4   4   4   4   4   4   3   3   3   1
## 1817  3  1  1  1  1  1  1  1  1   1   2   3   2   3   2   3   3   3   2   2
## 1818  4  4  3  4  3  4  3  3  2   3   4   3   3   4   3   5   5   4   3   4
## 1819  5  5  3  4  4  4  5  2  3   3   4   4   4   4   4   4   3   3   3   3
## 1820  3  2  2  1  2  1  1  1  1   2   3   2   3   3   3   2   1   5   4   3
## 1821  5  2  3  3  2  1  2  3  1   2   3   3   4   4   4   4   3   5   3   4
## 1822  5  5  2  4  1  1  1  1  1   1   3   3   2   3   1   4   1   5   1   2
## 1823  5  4  4  4  4  3  3  2  4   3   3   3   4   3   3   3   4   4   3   4
## 1824  5  5  5  5  5  5  5  5  4   3   5   5   5   5   5   5   3   3   3   3
## 1825  3  3  3  3  3  3  3  3  3   3   3   3   3   3   3   3   3   3   3   3
## 1826  3  5  3  5  4  3  3  3  3   4   4   5   5   5   3   3   3   4   2   3
## 1827  5  2  3  4  3  4  2  2  2   2   3   4   2   2   2   1   2   2   2   3
## 1828  4  3  2  3  3  2  2  4  2   4   4   3   5   4   4   3   4   5   4   5
## 1829  3  3  2  3  2  4  4  3  2   2   3   4   3   2   4   3   4   2   3   2
## 1830  4  3  4  2  2  4  4  4  2   2   4   4   4   3   4   4   3   3   4   3
## 1831  5  3  2  2  2  2  4  2  1   2   3   2   2   4   3   4   4   5   3   3
## 1832  2  2  2  2  3  1  2  2  3   3   4   3   4   5   4   5   4   5   4   3
## 1833  4  2  4  4  3  2  2  4  1   2   2   2   2   2   2   4   4   4   4   2
## 1834  4  3  2  3  4  2  2  4  3   2   4   3   2   4   2   3   5   4   3   2
## 1835  4  3  2  3  4  2  2  4  2   2   4   3   4   4   4   2   4   4   2   2
## 1836  3  2  1  3  1  1  1  1  5   2   4   4   4   4   4   4   4   4   4   4
## 1837  4  3  1  2  2  1  4  4  1   2   3   4   4   4   4   5   4   5   4   4
## 1838  1  3  2  2  2  2  2  2  4   2   2   2   2   2   1   2   4   4   3   4
## 1839  4  3  4  4  4  3  2  1  2   3   4   4   4   4   4   4   5   5   2   3
## 1840  4  4  3  2  4  2  4  4  2   3   4   4   3   3   3   4   4   4   4   4
## 1841  4  3  2  4  4  2  2  2  2   3   2   4   4   2   2   4   4   4   3   3
## 1842  3  4  2  1  4  4  3  4  3   3   4   4   4   4   4   4   3   3   3   3
## 1843  3  2  2  4  4  2  2  4  4   2   4   4   4   4   4   4   4   4   3   4
## 1844  5  4  4  3  4  1  4  3  2   3   4   4   4   4   3   5   5   5   5   3
## 1845  3  4  4  3  2  2  2  1  4   2   4   3   4   4   3   2   4   4   3   2
## 1846  4  4  3  2  4  2  4  5  4   3   3   4   3   4   4   4   5   5   3   4
## 1847  4  3  3  2  2  5  4  2  1   2   2   3   4   2   2   2   1   4   2   2
## 1848  4  4  3  4  3  2  2  2  2   2   3   3   2   3   3   4   4   4   4   3
## 1849  4  4  5  5  5  5  4  4  4   4   5   5   4   5   5   5   5   4   5   4
## 1850  4  4  3  2  4  2  4  2  1   2   4   3   4   4   4   3   5   3   3   3
## 1851  4  3  2  3  3  4  2  4  2   3   4   4   4   3   3   4   2   4   3   3
## 1852  4  4  4  3  3  3  4  4  3   3   4   4   4   4   4   4   4   4   3   4
## 1853  3  3  3  2  3  2  2  1  1   2   4   4   4   3   3   3   3   3   2   4
## 1854  4  2  3  3  4  3  5  2  2   3   4   4   3   5   3   5   3   4   3   4
## 1855  5  3  2  3  3  2  2  4  1   2   4   5   3   4   3   4   4   5   3   4
## 1856  3  4  2  2  2  1  3  3  3   2   4   4   4   4   4   4   4   4   3   2
## 1857  2  2  2  2  2  3  2  2  2   3   3   3   3   3   3   3   2   2   2   3
## 1858  3  3  3  3  3  3  4  3  3   3   3   4   3   3   3   3   3   3   3   3
## 1859  4  2  2  2  1  3  3  4  4   1   4   5   2   4   4   5   4   5   3   3
## 1860  3  3  3  3  2  3  3  4  3   3   3   2   2   2   2   2   3   2   2   3
## 1861  4  4  3  2  3  2  4  2  4   2   4   5   4   4   4   4   5   4   3   3
## 1862  4  3  2  3  2  2  2  1  3   2   5   5   5   5   5   3   2   2   4   2
## 1863  1  2  2  2  2  1  1  1  1   1   1   3   2   3   3   3   3   3   3   2
## 1864  4  4  2  4  4  2  4  4  4   3   3   4   4   4   4   2   4   4   3   2
## 1865  4  4  4  4  4  4  4  4  5   4   4   4   4   5   4   4   4   5   4   4
## 1866  2  2  2  2  2  2  4  2  5   2   5   5   5   5   5   5   5   5   5   5
## 1867  4  3  3  2  3  2  3  3  4   3   3   5   4   4   4   4   4   4   5   4
## 1868  4  4  3  3  3  2  4  3  2   2   4   3   3   4   3   4   4   5   4   4
## 1869  5  5  5  3  5  5  5  1  5   3   2   3   3   3   3   3   4   1   3   3
## 1870  4  3  2  2  4  3  4  4  4   3   4   4   4   4   4   4   3   3   2   3
## 1871  5  5  4  5  5  5  4  2  4   4   4   4   4   4   4   4   4   5   3   4
## 1872  4  4  4  3  3  4  4  4  4   3   4   4   4   4   4   4   2   2   4   2
## 1873  2  3  4  3  2  2  2  4  4   2   4   4   4   4   4   2   5   1   4   2
## 1874  3  3  2  2  3  2  2  2  4   2   4   4   4   4   4   4   4   4   4   4
## 1875  3  2  1  2  2  1  2  1  2   2   3   3   4   4   4   4   2   5   4   5
## 1876  3  3  3  3  4  3  5  2  4   4   4   5   4   5   4   5   5   5   5   4
## 1877  4  3  2  2  3  2  1  1  2   3   4   4   4   2   2   4   3   5   2   1
## 1878  3  2  2  2  1  4  3  2  1   1   5   5   5   2   2   3   4   4   3   4
## 1879  4  5  4  4  4  4  3  4  3   3   4   4   4   4   4   4   4   3   4   3
## 1880  4  4  2  3  4  4  5  4  4   2   4   4   4   4   4   4   4   4   4   2
## 1881  5  5  4  4  4  4  4  2  4   2   4   4   4   4   5   4   4   4   4   4
## 1882  5  3  2  3  4  3  5  4  2   3   4   4   4   4   4   4   4   3   3   3
## 1883  4  4  4  4  5  5  5  5  5   5   4   5   5   5   4   4   4   5   5   5
## 1884  4  4  3  3  4  1  2  2  1   3   4   4   3   4   4   4   2   5   4   4
## 1885  3  3  3  3  3  4  4  3  4   4   5   5   5   5   4   4   4   4   4   4
## 1886  2  2  1  2  2  2  1  1  1   2   2   3   3   3   4   4   5   4   3   4
## 1887  5  5  5  5  5  4  5  1  1   5   5   5   5   5   5   3   1   3   3   3
## 1888  4  4  2  2  4  4  3  4  2   2   4   4   4   4   4   4   4   3   4   4
## 1889  5  5  5  5  5  5  5  5  4   4   5   5   5   5   5   5   5   5   5   5
## 1890  4  3  3  2  2  1  2  4  4   1   3   3   5   3   3   3   3   3   3   3
## 1891  3  2  2  2  2  2  3  3  4   3   4   3   3   5   3   3   3   5   4   2
## 1892  3  5  3  3  4  2  2  2  4   2   3   4   4   3   4   3   4   4   3   4
## 1893  3  4  3  2  3  4  2  2  2   2   4   4   3   2   2   3   4   3   2   3
## 1894  5  4  4  4  4  4  4  3  2   3   4   4   4   4   5   4   3   3   3   3
## 1895  2  4  1  2  2  1  1  4  2   1   1   3   3   2   3   1   4   2   2   2
## 1896  4  4  4  3  4  2  4  3  4   3   4   5   5   5   5   5   3   5   3   5
## 1897  2  4  1  2  2  1  1  4  2   1   1   3   3   2   3   1   4   2   2   2
## 1898  5  5  4  5  5  5  3  1  4   3   1   4   4   3   5   3   3   3   3   4
## 1899  1  3  1  1  1  3  1  1  2   2   1   3   1   1   1   3   3   2   2   2
## 1900  4  3  4  4  4  4  5  5  2   4   4   4   4   4   3   4   4   4   3   3
## 1901  4  4  3  4  4  4  5  5  4   4   4   4   4   4   4   5   4   5   4   4
## 1902  4  4  2  4  4  2  4  2  4   2   4   4   4   4   3   3   2   4   2   4
## 1903  4  4  4  4  3  2  4  3  4   4   5   2   4   5   5   5   5   5   5   5
## 1904  5  5  5  4  4  4  4  4  5   4   4   4   4   4   4   3   2   4   3   3
## 1905  5  5  4  5  4  4  3  3  2   2   5   5   4   4   4   3   4   3   2   2
## 1906  4  4  4  4  4  4  4  4  4   4   4   4   4   5   4   4   4   5   4   4
## 1907  4  2  3  4  3  4  2  2  2   2   3   2   2   3   3   2   4   4   2   3
## 1908  4  2  1  2  2  4  5  2  2   2   3   3   2   4   3   4   4   4   2   2
## 1909  4  4  3  4  4  4  3  3  2   3   4   3   4   3   4   4   3   3   3   4
## 1910  3  4  2  2  2  4  2  1  4   3   2   2   2   3   3   4   3   4   2   4
## 1911  4  4  3  4  4  4  3  4  3   4   4   3   3   4   2   4   3   3   3   4
## 1912  4  4  3  3  3  2  4  4  4   3   3   4   4   4   2   4   2   4   4   3
## 1913  4  4  3  3  4  4  4  3  4   3   4   4   4   4   4   4   3   3   3   3
## 1914  4  3  2  4  3  4  4  4  2   2   3   3   2   3   3   4   2   4   4   3
## 1915  2  3  2  3  2  2  4  3  2   2   3   4   4   4   4   4   3   4   3   2
## 1916  4  3  2  1  2  1  1  2  1   4   2   3   3   4   4   5   4   4   1   4
## 1917  4  4  4  4  4  4  4  2  4   3   4   4   4   4   4   3   4   4   4   2
## 1918  4  4  2  3  4  2  3  2  2   2   3   2   2   3   3   3   4   4   3   3
## 1919  3  3  4  3  3  3  3  4  3   4   4   3   4   4   3   4   4   2   2   2
## 1920  4  4  3  4  4  2  2  3  4   2   2   4   3   4   4   3   4   4   3   4
## 1921  2  2  2  1  2  2  2  2  2   2   1   3   2   1   3   2   2   2   2   2
## 1922  3  2  2  2  3  1  1  1  1   1   3   3   3   4   4   4   4   5   2   2
## 1923  5  4  4  4  4  2  2  2  2   3   3   4   3   4   3   3   3   4   4   4
## 1924  4  4  4  2  2  4  2  4  4   3   3   2   3   4   3   4   4   4   3   4
## 1925  3  3  1  1  2  1  1  1  1   1   4   4   3   2   2   3   4   4   3   2
##      Gender EDU  BF  BM Happiness Peace
## 1         0   1 3.4 3.2       4.0   4.0
## 2         0   1 4.0 3.4       4.0   2.8
## 3         0   2 3.6 3.6       3.8   3.8
## 4         0   1 4.2 4.0       4.0   4.0
## 5         0   2 4.0 3.6       4.0   4.0
## 6         0   1 4.0 4.0       4.0   4.0
## 7         0   1 3.6 4.6       4.8   3.8
## 8         0   1 3.6 4.6       4.4   2.4
## 9         1   4 3.6 2.2       3.8   4.0
## 10        0   3 3.2 3.2       4.0   3.2
## 11        0   2 4.0 3.2       4.0   4.0
## 12        0   1 3.2 3.6       3.4   3.9
## 13        0   1 4.0 3.8       2.8   3.2
## 14        1   3 3.2 3.6       3.8   3.2
## 15        1   3 4.0 3.4       4.0   4.1
## 16        1   2 4.0 4.8       4.0   4.6
## 17        0   1 3.6 3.2       5.0   4.2
## 18        1   1 3.2 2.8       3.9   4.0
## 19        0   1 2.2 2.6       3.8   3.0
## 20        0   4 3.0 3.8       3.5   3.8
## 21        1   3 3.2 3.4       3.4   3.2
## 22        1   2 1.8 2.8       3.2   3.6
## 23        0   1 5.0 5.0       4.8   4.2
## 24        1   2 3.8 3.4       3.2   3.2
## 25        0   2 4.0 4.8       5.0   3.6
## 26        0   3 4.2 3.4       2.4   3.2
## 27        0   1 3.6 4.0       3.0   3.8
## 28        0   1 2.6 3.4       4.0   3.2
## 29        1   2 4.4 3.8       4.2   4.0
## 30        1   1 3.2 3.2       2.4   3.0
## 31        0   1 3.8 4.0       3.6   4.2
## 32        0   1 3.8 3.4       3.4   4.6
## 33        0   1 4.0 3.6       4.0   4.2
## 34        1   1 4.0 3.6       3.4   3.2
## 35        1   1 4.0 3.0       2.6   3.2
## 36        0   1 3.8 4.2       2.8   4.4
## 37        0   1 3.8 4.2       3.6   4.2
## 38        0   1 3.8 4.0       3.6   4.0
## 39        1   3 4.0 3.6       2.4   3.6
## 40        1   3 4.0 4.0       3.8   3.0
## 41        0   1 3.8 3.2       4.2   4.0
## 42        1   2 4.0 4.0       2.8   3.2
## 43        1   2 4.0 4.2       4.6   3.2
## 44        0   1 2.0 2.0       2.0   2.0
## 45        0   1 4.0 4.0       4.0   4.0
## 46        0   2 3.8 3.4       4.0   3.8
## 47        1   3 3.6 3.6       2.0   3.8
## 48        0   1 1.4 1.0       1.6   2.0
## 49        0   1 3.2 3.0       3.6   3.0
## 50        0   1 3.0 2.8       3.2   3.0
## 51        0   1 3.4 3.6       4.0   4.0
## 52        0   1 3.4 3.2       3.6   3.6
## 53        0   2 3.6 3.6       3.1   3.2
## 54        0   1 3.0 2.8       4.6   4.0
## 55        1   2 2.2 4.0       4.0   4.0
## 56        0   1 3.0 3.0       4.0   3.8
## 57        0   1 3.6 3.6       4.2   3.6
## 58        0   1 3.6 3.6       4.2   3.4
## 59        0   1 3.6 3.6       4.0   3.6
## 60        0   1 3.6 3.6       4.0   4.0
## 61        0   1 4.0 3.4       4.0   3.6
## 62        0   1 4.0 3.4       4.0   4.0
## 63        0   1 4.0 3.6       3.6   4.0
## 64        0   1 3.2 3.0       4.0   3.8
## 65        0   1 3.4 3.2       3.6   3.8
## 66        0   2 3.8 3.2       3.4   3.6
## 67        1   4 3.4 3.6       4.0   4.0
## 68        1   4 4.4 4.0       4.6   4.0
## 69        1   3 4.0 3.4       3.2   2.4
## 70        0   1 2.4 2.6       3.6   4.0
## 71        1   3 4.0 3.8       3.8   4.0
## 72        1   3 4.0 4.0       4.0   4.0
## 73        1   3 3.8 3.4       3.0   3.6
## 74        0   1 3.6 2.8       2.8   3.7
## 75        0   3 3.4 3.2       4.0   3.8
## 76        1   1 3.2 3.4       3.6   3.6
## 77        0   2 3.2 2.4       4.0   3.6
## 78        1   3 2.8 3.2       2.6   3.4
## 79        0   2 1.6 2.0       1.8   2.3
## 80        0   1 3.4 4.0       4.0   4.0
## 81        0   1 1.8 2.4       3.2   3.6
## 82        0   1 2.4 2.0       3.2   3.2
## 83        0   2 3.2 3.6       4.0   3.8
## 84        0   2 5.0 4.0       5.0   5.0
## 85        0   1 2.2 4.0       4.0   3.4
## 86        0   2 4.0 3.2       3.2   3.2
## 87        1   3 4.0 4.0       4.0   4.0
## 88        1   3 2.0 3.4       3.6   3.8
## 89        0   2 4.0 3.4       3.4   4.0
## 90        1   3 3.8 2.8       3.6   3.2
## 91        1   1 2.4 3.7       2.9   2.6
## 92        0   1 3.4 2.4       3.2   3.2
## 93        1   3 4.0 4.0       4.0   4.0
## 94        0   1 4.2 4.2       4.2   4.2
## 95        0   1 3.6 4.0       4.0   4.4
## 96        0   1 2.0 2.0       3.6   4.2
## 97        1   1 4.0 4.0       3.8   4.0
## 98        1   1 4.4 3.2       2.8   4.4
## 99        1   1 4.0 3.2       4.0   4.6
## 100       0   1 3.6 3.8       5.0   3.8
## 101       1   2 4.0 4.0       3.6   3.8
## 102       0   1 2.6 3.6       3.0   3.8
## 103       0   1 3.8 4.0       4.0   3.6
## 104       1   1 3.0 2.8       3.4   4.0
## 105       1   3 4.0 3.8       3.9   3.8
## 106       1   3 3.8 3.8       4.2   4.2
## 107       0   1 1.8 2.2       2.0   4.0
## 108       0   1 2.6 3.6       4.0   4.0
## 109       1   1 3.2 2.8       2.0   4.2
## 110       0   1 3.2 3.2       3.2   3.8
## 111       1   1 3.4 4.0       4.0   4.0
## 112       0   1 2.4 3.8       3.0   4.4
## 113       0   1 2.6 2.0       2.0   3.6
## 114       0   1 3.8 3.0       4.0   3.4
## 115       0   1 2.9 3.0       3.5   3.6
## 116       1   1 3.8 3.8       4.2   5.0
## 117       0   1 3.6 4.2       3.0   4.2
## 118       0   1 1.6 1.0       4.0   4.0
## 119       1   2 4.0 3.4       4.6   4.0
## 120       1   3 4.0 3.4       4.0   4.0
## 121       0   1 2.2 1.8       2.4   4.0
## 122       1   2 5.0 4.4       5.0   5.0
## 123       0   1 3.2 2.8       3.2   3.4
## 124       1   1 4.0 3.8       3.6   5.0
## 125       1   4 3.6 3.0       3.8   3.8
## 126       1   1 2.4 3.2       3.6   4.0
## 127       0   1 4.0 3.8       3.8   4.4
## 128       0   1 2.8 3.8       4.0   4.0
## 129       0   1 2.6 2.4       3.6   4.4
## 130       0   1 2.0 2.6       3.6   4.0
## 131       0   1 4.0 2.2       4.2   5.0
## 132       1   2 2.8 3.8       3.6   4.6
## 133       0   1 3.4 3.4       5.0   5.0
## 134       0   1 4.4 4.2       4.2   4.2
## 135       0   3 3.6 2.8       3.8   3.6
## 136       0   1 3.6 3.6       3.4   3.6
## 137       0   1 4.4 4.6       3.8   3.0
## 138       0   1 3.6 3.0       3.1   3.2
## 139       0   2 3.0 4.0       4.2   2.4
## 140       0   1 1.6 2.2       2.0   2.6
## 141       0   1 3.6 3.2       3.8   3.5
## 142       0   1 3.2 3.4       3.8   4.0
## 143       1   1 2.4 3.0       3.4   3.8
## 144       1   3 2.2 3.2       2.8   3.0
## 145       0   1 2.6 3.0       3.8   3.8
## 146       0   4 4.8 4.6       4.6   4.0
## 147       0   1 3.2 3.4       3.8   4.0
## 148       0   2 1.8 2.8       3.2   3.8
## 149       0   1 2.4 3.2       3.2   3.6
## 150       1   3 4.0 5.0       5.0   4.6
## 151       1   2 4.0 4.0       4.0   3.2
## 152       0   4 2.0 2.6       2.0   2.4
## 153       0   1 3.6 3.6       4.0   3.6
## 154       0   2 2.6 2.4       4.0   3.0
## 155       1   2 3.4 3.6       4.0   4.0
## 156       0   2 3.6 3.8       3.8   4.0
## 157       1   4 3.6 3.6       2.8   4.0
## 158       0   1 3.6 4.0       4.8   4.0
## 159       0   1 4.0 4.0       4.8   4.0
## 160       0   1 4.0 3.4       4.8   4.0
## 161       0   2 4.2 4.0       2.6   3.2
## 162       0   3 3.0 3.4       4.0   3.2
## 163       0   1 2.6 2.8       3.6   4.0
## 164       0   1 2.6 2.2       2.4   3.8
## 165       0   1 4.0 3.8       4.2   4.4
## 166       1   1 2.0 1.2       2.2   3.0
## 167       1   1 3.8 3.2       3.8   3.4
## 168       0   1 2.8 3.4       3.4   4.0
## 169       1   1 3.6 3.0       3.8   3.6
## 170       0   1 4.0 3.4       3.4   3.0
## 171       0   1 4.0 3.6       4.0   4.0
## 172       0   1 3.6 3.8       4.0   4.4
## 173       1   1 4.0 3.0       4.0   4.0
## 174       0   1 4.4 5.0       4.6   3.8
## 175       0   1 4.2 4.0       4.0   4.0
## 176       0   1 3.4 3.8       3.1   3.4
## 177       1   1 2.8 2.4       3.8   3.9
## 178       0   2 3.6 3.0       3.0   3.0
## 179       0   2 4.0 4.4       3.8   4.0
## 180       0   2 2.8 3.6       4.0   3.8
## 181       0   1 3.2 3.0       3.5   3.6
## 182       0   1 3.6 3.6       4.0   4.0
## 183       0   2 3.6 4.0       3.6   4.0
## 184       0   1 4.0 3.6       3.4   3.5
## 185       0   1 3.2 2.0       3.6   3.6
## 186       0   2 4.0 2.6       3.2   2.4
## 187       0   1 3.4 2.6       4.2   5.0
## 188       0   1 3.0 3.6       3.2   3.6
## 189       0   1 3.4 2.4       3.2   3.6
## 190       0   2 4.0 3.6       3.2   3.2
## 191       1   1 4.2 3.2       3.8   4.4
## 192       0   1 3.2 3.2       3.0   3.0
## 193       1   2 3.2 3.4       3.6   3.4
## 194       1   1 4.0 3.2       3.0   4.0
## 195       1   2 4.0 3.2       3.0   4.0
## 196       0   2 5.0 5.0       5.0   3.7
## 197       1   1 3.8 3.8       3.4   3.6
## 198       0   1 2.2 3.6       4.8   3.4
## 199       1   1 3.8 4.0       4.0   3.6
## 200       0   1 3.4 3.8       4.0   3.8
## 201       1   1 3.6 3.6       4.0   4.0
## 202       1   1 3.2 2.4       2.6   2.6
## 203       0   1 2.4 1.6       4.2   4.8
## 204       0   1 3.4 3.6       3.4   3.6
## 205       0   1 3.6 4.6       3.8   3.0
## 206       0   1 4.0 3.0       3.1   3.2
## 207       0   2 2.8 4.0       4.2   2.4
## 208       0   1 1.6 2.2       2.0   2.6
## 209       0   1 3.2 3.2       3.8   3.5
## 210       0   1 3.6 3.4       3.8   4.0
## 211       1   1 2.8 3.0       3.4   3.8
## 212       1   3 2.4 3.2       2.8   3.0
## 213       0   1 3.0 3.0       3.8   3.8
## 214       0   4 4.6 4.6       4.6   4.0
## 215       0   1 3.6 3.4       3.8   4.0
## 216       0   2 2.0 2.8       3.2   3.8
## 217       0   1 2.8 3.2       3.2   3.6
## 218       1   3 3.6 5.0       5.0   4.6
## 219       1   2 3.8 4.0       4.0   3.2
## 220       0   4 2.0 2.6       2.0   2.4
## 221       0   1 3.2 3.6       4.0   3.6
## 222       0   2 2.8 2.4       4.0   3.0
## 223       1   2 3.8 3.6       4.0   4.0
## 224       0   2 3.8 3.8       3.8   4.0
## 225       1   4 3.6 3.6       2.8   4.0
## 226       0   1 3.4 4.0       4.8   4.0
## 227       0   1 3.8 4.0       4.8   4.0
## 228       0   1 3.8 3.4       4.8   4.0
## 229       0   2 4.0 4.0       2.6   3.2
## 230       0   3 2.6 3.4       4.0   3.2
## 231       0   2 3.2 3.8       4.0   3.4
## 232       1   2 2.4 3.2       3.4   3.6
## 233       0   2 2.2 2.8       3.4   3.6
## 234       1   3 2.4 2.0       2.0   2.0
## 235       1   3 3.0 3.0       3.0   3.0
## 236       0   4 3.6 4.8       3.8   4.0
## 237       1   3 2.0 3.2       3.4   3.6
## 238       0   2 2.8 4.0       4.0   3.8
## 239       0   2 3.2 3.8       4.0   3.4
## 240       1   3 3.8 4.0       4.0   4.0
## 241       1   4 4.2 3.6       4.0   4.2
## 242       1   3 3.4 3.2       2.6   4.0
## 243       1   2 3.8 3.8       3.4   3.4
## 244       0   3 1.8 3.0       3.6   4.2
## 245       0   3 2.6 2.6       3.2   3.6
## 246       0   3 3.4 3.0       3.2   3.8
## 247       0   3 2.6 3.0       2.6   4.0
## 248       0   3 3.6 3.2       4.2   3.8
## 249       0   3 2.6 3.6       2.8   3.2
## 250       0   2 3.2 2.6       3.2   3.4
## 251       0   1 3.4 2.2       4.0   3.6
## 252       0   2 4.0 3.2       4.0   4.0
## 253       0   3 1.6 1.8       2.2   2.0
## 254       1   1 3.8 3.6       4.0   3.5
## 255       0   3 3.6 4.2       4.0   3.6
## 256       1   2 3.8 3.8       3.6   4.0
## 257       0   1 4.0 4.0       4.0   4.0
## 258       0   3 3.0 2.8       3.6   3.8
## 259       0   2 2.2 3.4       3.6   3.4
## 260       0   2 3.2 3.0       3.5   3.6
## 261       1   2 3.2 3.2       3.4   4.0
## 262       0   3 2.2 1.6       2.2   3.0
## 263       0   2 3.6 2.8       3.8   4.0
## 264       1   3 2.0 3.0       3.0   3.4
## 265       0   1 3.6 4.0       3.8   4.0
## 266       0   1 4.0 3.6       5.0   3.6
## 267       0   1 2.8 2.6       3.4   3.6
## 268       0   3 4.0 3.4       4.0   3.0
## 269       0   2 2.2 2.6       4.4   4.2
## 270       1   4 4.0 4.2       4.2   4.4
## 271       0   3 3.2 3.8       5.0   3.8
## 272       1   2 3.6 2.4       2.4   2.8
## 273       0   1 3.2 3.6       3.6   2.9
## 274       1   3 4.0 3.6       4.0   4.0
## 275       0   1 2.6 2.4       3.1   3.4
## 276       0   2 2.6 2.2       3.8   4.8
## 277       1   4 1.6 1.8       2.2   2.2
## 278       1   4 1.6 1.8       2.2   2.2
## 279       0   2 3.2 2.0       3.2   3.6
## 280       1   2 2.8 3.0       3.6   3.2
## 281       0   2 3.6 3.0       3.0   4.0
## 282       0   1 4.0 3.4       3.6   3.8
## 283       1   1 4.0 4.2       4.0   4.0
## 284       0   1 2.8 3.2       3.4   3.2
## 285       0   3 2.2 2.8       2.8   3.6
## 286       0   3 3.2 2.8       3.6   3.8
## 287       0   2 3.6 3.4       3.8   4.0
## 288       0   2 3.6 3.8       3.0   3.8
## 289       0   1 3.6 3.6       4.0   3.2
## 290       0   3 3.8 4.0       4.0   3.8
## 291       1   3 2.4 2.2       2.8   2.6
## 292       0   1 2.0 3.0       3.4   4.2
## 293       1   2 4.2 3.4       4.0   4.0
## 294       0   2 3.0 3.4       4.0   3.6
## 295       0   2 3.6 3.6       4.0   3.6
## 296       0   1 4.2 4.0       4.0   3.6
## 297       0   3 2.8 1.8       3.2   4.0
## 298       0   1 3.2 2.2       4.0   4.0
## 299       0   1 3.2 3.8       3.4   3.8
## 300       0   1 3.0 3.0       2.8   4.0
## 301       1   2 2.8 3.6       4.0   4.0
## 302       1   3 4.0 3.8       3.6   4.0
## 303       0   2 3.8 2.8       3.2   4.0
## 304       0   3 3.6 3.0       4.2   3.6
## 305       0   1 2.2 3.6       3.0   3.4
## 306       0   1 4.0 3.0       4.0   4.0
## 307       0   1 3.2 3.6       4.0   4.0
## 308       1   3 3.6 3.6       3.8   3.6
## 309       0   1 2.0 2.8       1.8   4.0
## 310       0   1 1.8 2.8       3.6   3.2
## 311       1   1 4.0 3.6       4.0   3.8
## 312       1   3 2.2 2.2       3.0   3.6
## 313       0   1 4.0 3.6       3.8   4.0
## 314       0   2 3.0 3.4       3.2   4.0
## 315       1   3 3.8 3.0       4.0   4.4
## 316       1   1 2.4 3.2       4.0   4.0
## 317       1   2 3.4 3.8       5.0   4.2
## 318       1   2 1.4 2.2       3.6   2.8
## 319       0   1 3.0 3.0       4.8   3.4
## 320       0   1 2.0 1.6       3.8   3.6
## 321       1   1 3.6 3.2       2.8   3.8
## 322       0   1 2.8 3.0       3.2   3.4
## 323       1   2 3.0 3.4       3.6   3.6
## 324       1   1 3.4 3.0       3.4   3.6
## 325       0   1 3.0 3.0       4.8   3.4
## 326       1   2 4.0 3.6       3.8   3.6
## 327       1   1 3.6 3.2       3.6   4.0
## 328       1   4 3.6 3.2       4.0   4.0
## 329       0   3 3.0 2.0       3.0   3.0
## 330       1   4 4.4 3.2       4.0   3.6
## 331       0   1 3.2 2.2       4.6   4.0
## 332       1   1 2.8 4.0       3.0   4.2
## 333       1   2 3.2 3.4       2.4   3.8
## 334       0   1 2.4 1.6       2.8   4.4
## 335       0   1 3.4 3.6       3.2   3.4
## 336       0   1 4.0 4.2       4.0   4.0
## 337       0   1 2.0 2.4       2.2   2.0
## 338       0   1 4.0 3.0       3.8   4.1
## 339       1   2 3.8 3.8       4.0   4.6
## 340       1   3 4.0 3.4       3.6   3.6
## 341       1   1 4.0 2.6       4.0   4.0
## 342       1   2 3.0 4.0       4.0   3.4
## 343       0   2 3.6 4.0       3.9   3.4
## 344       1   3 3.4 3.4       4.0   3.6
## 345       0   3 4.2 3.6       2.8   3.6
## 346       0   2 3.4 2.8       3.4   3.8
## 347       1   3 4.0 4.4       3.8   3.6
## 348       1   3 2.6 2.4       3.2   3.2
## 349       1   3 3.6 2.2       3.4   3.6
## 350       1   2 2.8 2.0       2.4   3.2
## 351       1   2 3.6 2.2       4.0   3.2
## 352       1   2 4.0 3.6       4.0   3.4
## 353       1   3 4.0 3.2       4.0   4.0
## 354       1   4 3.4 2.6       4.2   3.2
## 355       0   3 3.2 3.0       4.0   3.6
## 356       0   2 2.4 2.6       2.6   3.6
## 357       1   3 3.0 2.2       2.4   2.2
## 358       0   2 4.0 3.6       4.0   4.2
## 359       1   2 2.0 3.0       2.0   2.8
## 360       0   1 2.4 2.8       4.0   2.8
## 361       1   2 3.2 3.4       3.6   3.4
## 362       0   3 3.4 3.2       3.8   3.4
## 363       1   2 4.0 4.0       3.8   3.4
## 364       0   4 3.4 1.8       3.4   3.6
## 365       0   1 2.0 2.0       3.0   3.4
## 366       1   3 2.6 2.2       2.6   2.6
## 367       0   3 2.4 2.4       4.0   5.0
## 368       0   4 2.4 2.2       3.2   4.0
## 369       0   2 2.6 2.6       3.8   3.8
## 370       1   3 4.0 4.0       4.0   3.6
## 371       0   3 3.2 3.0       4.0   3.6
## 372       1   2 4.0 3.8       4.0   3.6
## 373       0   2 2.4 3.0       3.4   3.2
## 374       0   2 3.2 3.6       4.0   3.6
## 375       1   4 3.2 2.0       4.0   3.4
## 376       0   1 2.4 2.6       3.4   3.6
## 377       0   2 3.2 3.4       3.0   2.8
## 378       0   3 3.8 3.2       3.8   4.2
## 379       0   3 2.4 2.4       3.4   4.2
## 380       0   3 2.4 1.6       4.2   4.8
## 381       1   1 2.4 2.4       3.0   3.2
## 382       0   1 2.0 3.0       2.0   3.0
## 383       0   1 4.0 4.0       3.2   3.2
## 384       0   2 3.6 3.2       4.0   3.6
## 385       0   1 4.0 3.2       3.6   4.0
## 386       1   4 4.0 3.6       3.2   4.0
## 387       0   2 2.6 2.4       3.4   4.6
## 388       0   2 2.8 3.2       3.8   3.8
## 389       0   1 3.8 2.8       4.4   4.0
## 390       1   2 4.0 3.2       4.0   4.0
## 391       1   2 2.6 2.2       4.0   4.0
## 392       1   1 2.8 2.0       4.0   4.0
## 393       0   1 3.2 3.0       3.8   4.2
## 394       1   3 2.0 3.8       3.4   3.6
## 395       1   2 3.8 3.2       3.6   4.0
## 396       1   3 1.4 1.0       2.0   1.2
## 397       0   3 3.4 3.8       4.2   3.6
## 398       1   2 2.6 3.8       4.2   3.2
## 399       1   2 3.6 4.2       4.0   4.0
## 400       0   1 2.6 3.4       4.4   4.0
## 401       0   2 3.6 2.4       3.2   3.0
## 402       1   3 2.6 3.2       3.0   4.0
## 403       1   3 3.2 2.8       4.0   4.0
## 404       0   3 3.2 3.2       3.4   4.0
## 405       1   2 2.0 2.0       2.0   2.0
## 406       0   3 1.8 2.8       2.8   3.6
## 407       0   2 3.0 3.6       4.0   4.4
## 408       1   1 3.6 3.8       4.0   3.6
## 409       1   3 3.4 2.8       4.4   3.8
## 410       1   3 2.4 2.6       4.2   4.0
## 411       0   1 2.8 3.0       2.6   3.6
## 412       0   1 2.6 2.4       3.8   3.0
## 413       0   3 3.8 2.8       3.6   3.0
## 414       0   3 3.2 3.8       4.4   3.2
## 415       0   2 3.8 4.2       4.0   3.6
## 416       0   3 3.0 3.4       4.0   3.2
## 417       0   2 3.6 3.6       4.0   4.0
## 418       0   3 3.6 3.8       4.6   4.6
## 419       0   3 3.0 2.6       3.2   3.6
## 420       1   2 4.0 4.2       4.0   4.2
## 421       0   3 2.6 3.2       3.4   3.6
## 422       0   3 3.4 3.6       3.4   3.6
## 423       1   3 5.0 4.2       4.8   3.8
## 424       0   3 3.2 3.0       3.8   3.6
## 425       0   2 4.0 4.0       2.8   3.8
## 426       0   2 2.6 4.0       3.6   3.4
## 427       0   1 3.2 3.6       3.2   4.2
## 428       0   3 3.0 3.4       2.4   3.4
## 429       0   3 1.8 1.8       2.6   3.6
## 430       0   2 3.2 4.4       3.6   3.3
## 431       1   3 3.8 3.4       3.4   4.6
## 432       1   2 3.6 3.4       3.6   3.6
## 433       0   2 4.2 4.8       4.0   4.2
## 434       0   3 3.2 3.4       4.6   3.6
## 435       1   3 2.4 3.4       3.6   3.6
## 436       0   3 3.8 3.4       4.0   4.0
## 437       0   3 4.0 4.2       4.2   4.2
## 438       1   4 3.4 3.2       3.6   3.8
## 439       0   3 2.0 2.4       3.4   3.8
## 440       0   3 4.0 4.2       3.4   3.6
## 441       0   3 3.8 1.8       3.6   3.2
## 442       1   2 3.2 2.6       4.4   4.0
## 443       1   3 3.6 3.2       2.4   2.8
## 444       1   2 3.8 3.2       4.0   3.6
## 445       0   4 2.4 1.6       2.0   4.0
## 446       0   3 2.0 2.4       3.0   3.4
## 447       0   3 3.0 2.8       4.4   4.0
## 448       0   1 2.0 1.6       3.0   3.6
## 449       1   3 3.0 3.6       3.8   3.8
## 450       0   1 1.8 2.4       3.2   3.8
## 451       0   4 2.6 2.4       3.0   3.6
## 452       1   2 3.2 2.0       3.4   3.4
## 453       0   2 2.6 2.4       3.0   3.6
## 454       0   2 3.2 3.0       3.8   3.8
## 455       0   3 3.4 3.2       3.8   4.2
## 456       1   4 3.2 2.2       4.2   4.0
## 457       0   3 2.6 2.4       4.0   4.0
## 458       0   3 4.0 4.0       4.0   4.0
## 459       1   2 3.4 2.4       3.8   4.0
## 460       1   3 3.2 3.2       4.2   4.0
## 461       1   3 4.0 4.0       4.0   3.8
## 462       0   3 3.0 4.0       5.0   4.2
## 463       0   2 3.2 3.4       3.8   3.8
## 464       0   2 4.0 4.0       4.0   4.0
## 465       1   3 2.2 3.8       4.0   3.0
## 466       0   3 2.4 3.4       3.2   3.2
## 467       0   3 3.0 3.6       3.4   3.4
## 468       1   3 3.8 3.8       3.8   3.8
## 469       1   2 5.0 4.8       5.0   4.4
## 470       0   3 5.0 4.6       4.4   4.0
## 471       1   3 3.8 3.2       3.0   3.0
## 472       1   2 3.2 3.4       3.6   5.0
## 473       0   3 2.6 2.8       2.0   3.4
## 474       0   3 2.8 2.8       3.6   4.0
## 475       0   2 2.0 2.4       2.2   2.8
## 476       1   2 3.8 4.0       4.0   3.6
## 477       1   4 3.2 3.8       3.6   3.4
## 478       0   2 3.2 3.2       3.2   3.2
## 479       0   3 4.0 3.8       4.0   4.0
## 480       1   2 3.4 3.0       4.0   3.9
## 481       0   3 2.4 2.8       4.6   4.4
## 482       1   2 3.0 3.0       3.6   2.8
## 483       0   3 1.4 1.4       3.2   5.0
## 484       1   2 3.6 3.6       3.0   3.6
## 485       1   4 4.0 4.0       4.0   4.0
## 486       0   3 4.2 3.2       4.8   4.2
## 487       0   3 3.4 3.6       3.5   3.9
## 488       0   3 2.4 3.6       3.4   3.6
## 489       0   2 2.2 3.0       2.8   3.8
## 490       0   2 2.8 3.6       3.2   3.2
## 491       1   2 3.8 3.8       4.0   3.8
## 492       0   3 4.0 4.4       4.0   3.8
## 493       1   4 4.0 4.0       4.0   4.0
## 494       0   2 3.8 2.2       2.4   3.2
## 495       1   4 4.0 4.2       5.0   5.0
## 496       0   2 2.8 2.4       3.0   4.0
## 497       0   3 2.8 2.6       3.6   3.2
## 498       1   4 4.0 3.2       3.6   4.0
## 499       1   3 2.4 2.6       2.6   2.0
## 500       1   3 4.0 2.6       3.4   2.8
## 501       0   4 2.8 2.4       3.8   4.0
## 502       1   2 2.0 1.8       3.4   3.2
## 503       1   3 3.8 4.2       3.4   4.0
## 504       1   1 2.6 2.6       3.0   3.6
## 505       0   2 3.6 4.0       4.8   5.0
## 506       1   3 3.2 2.6       4.0   4.0
## 507       0   2 3.6 3.8       3.6   3.6
## 508       0   2 3.0 3.4       3.6   3.6
## 509       0   2 2.4 3.8       4.0   3.4
## 510       0   2 3.0 4.0       4.0   3.6
## 511       0   3 3.4 3.4       4.0   3.8
## 512       0   2 3.4 3.0       3.8   3.6
## 513       1   3 3.8 3.6       3.2   3.0
## 514       0   4 2.2 2.4       2.8   3.2
## 515       0   1 2.6 2.8       3.8   3.6
## 516       0   4 4.2 4.6       4.8   4.4
## 517       1   2 4.6 3.6       4.0   4.6
## 518       1   2 4.0 3.2       3.6   4.0
## 519       1   2 5.0 4.6       5.0   5.0
## 520       1   3 3.4 3.2       2.0   2.8
## 521       1   4 2.4 2.6       3.8   4.4
## 522       0   4 4.0 4.0       4.4   4.0
## 523       1   1 4.0 4.0       4.0   3.6
## 524       1   4 3.4 2.8       3.8   4.4
## 525       0   3 4.2 3.6       4.4   4.2
## 526       0   2 2.6 2.0       3.0   4.0
## 527       0   2 2.0 2.0       2.0   2.0
## 528       0   3 3.6 2.8       3.8   3.2
## 529       0   3 2.8 1.8       2.4   2.4
## 530       0   3 2.4 2.2       2.2   2.6
## 531       1   3 1.6 1.2       2.0   2.0
## 532       0   1 2.6 2.8       3.6   3.6
## 533       0   3 3.2 3.2       2.0   3.2
## 534       0   2 3.8 3.8       3.6   3.6
## 535       0   1 4.0 3.8       4.0   3.4
## 536       1   2 1.4 2.4       3.4   3.6
## 537       0   4 2.6 3.2       3.2   4.4
## 538       0   3 2.0 2.0       2.0   3.2
## 539       0   2 2.2 3.0       3.8   3.8
## 540       0   2 4.0 3.6       4.0   4.0
## 541       0   2 3.6 2.8       3.6   3.8
## 542       0   1 4.2 2.6       3.4   3.2
## 543       0   2 2.2 2.8       3.2   3.8
## 544       0   2 4.0 4.0       4.0   4.0
## 545       1   2 4.0 3.4       4.0   4.0
## 546       1   3 2.8 2.2       3.4   4.4
## 547       0   2 3.2 3.2       2.6   3.2
## 548       1   3 4.0 3.6       4.4   4.2
## 549       0   2 2.4 2.4       3.2   3.8
## 550       0   2 3.8 3.0       3.4   3.6
## 551       0   3 2.0 2.0       3.0   2.8
## 552       0   1 2.4 2.2       3.4   3.8
## 553       0   3 2.4 2.4       2.2   1.8
## 554       0   3 2.2 1.8       2.0   2.0
## 555       1   3 2.4 2.4       2.6   3.4
## 556       1   3 2.2 2.2       2.0   1.6
## 557       0   2 2.8 2.2       3.4   4.0
## 558       0   2 3.6 3.2       2.4   3.5
## 559       0   3 4.0 4.0       4.0   4.0
## 560       1   2 3.8 4.0       4.0   4.0
## 561       0   2 3.0 1.8       4.0   3.0
## 562       1   3 4.2 3.8       2.4   3.8
## 563       0   2 2.6 2.4       3.6   3.6
## 564       1   3 3.6 3.0       3.8   4.0
## 565       0   2 3.0 3.0       3.0   3.8
## 566       0   1 2.4 1.8       4.0   3.8
## 567       1   2 3.2 2.8       4.0   3.8
## 568       0   2 2.0 2.6       2.4   2.8
## 569       0   2 3.2 3.6       3.8   4.0
## 570       0   3 2.6 2.4       3.0   3.4
## 571       1   4 2.8 2.4       2.4   3.0
## 572       1   3 3.2 3.0       3.2   2.8
## 573       0   3 1.8 1.6       1.6   1.6
## 574       0   3 3.2 3.0       3.2   2.8
## 575       1   2 1.4 1.0       1.6   2.2
## 576       0   4 3.0 3.4       3.0   2.8
## 577       0   3 2.8 3.2       3.8   4.4
## 578       0   3 2.8 3.8       3.2   3.0
## 579       0   3 2.4 3.6       3.2   4.4
## 580       0   3 3.6 3.8       3.8   3.6
## 581       1   1 3.8 3.4       4.0   5.0
## 582       0   4 3.4 2.8       4.0   3.8
## 583       0   2 3.2 3.2       3.6   3.8
## 584       1   2 1.4 2.6       3.6   3.8
## 585       0   2 4.0 3.4       2.8   3.6
## 586       0   3 4.0 3.6       3.0   3.8
## 587       0   3 3.6 3.8       4.0   3.6
## 588       1   3 3.8 3.0       3.0   3.2
## 589       1   3 4.0 4.2       4.2   3.8
## 590       1   3 4.0 3.8       4.0   4.0
## 591       0   2 3.0 3.2       3.2   3.4
## 592       1   2 4.0 3.6       2.8   3.5
## 593       0   3 2.8 3.4       4.0   4.0
## 594       0   2 2.4 2.0       2.4   2.4
## 595       1   2 3.2 2.6       3.6   4.4
## 596       0   2 2.4 3.2       3.6   3.6
## 597       1   1 2.2 2.4       2.2   3.6
## 598       1   2 3.6 3.6       3.2   2.8
## 599       0   2 4.0 3.8       4.0   3.4
## 600       1   3 3.6 2.8       2.6   4.0
## 601       1   3 3.0 3.6       3.0   3.6
## 602       1   3 4.2 3.8       4.4   4.8
## 603       1   2 2.0 2.0       2.2   2.2
## 604       1   3 3.6 3.4       3.6   4.0
## 605       1   3 5.0 4.6       5.0   4.4
## 606       0   3 3.2 3.0       2.8   2.8
## 607       0   3 3.8 3.8       3.6   3.4
## 608       1   2 3.0 3.0       2.4   2.2
## 609       0   1 3.2 3.2       3.6   4.0
## 610       0   3 2.4 2.8       3.0   3.8
## 611       0   4 2.8 1.8       4.6   4.8
## 612       0   3 4.2 3.6       4.2   4.0
## 613       1   2 3.6 3.4       3.6   3.2
## 614       1   2 3.6 4.2       5.0   4.6
## 615       0   3 4.2 4.8       5.0   4.8
## 616       0   3 2.8 2.4       4.0   4.6
## 617       1   1 3.8 4.0       4.0   4.0
## 618       0   4 4.2 2.4       4.0   4.2
## 619       0   3 2.8 3.2       4.6   4.8
## 620       0   3 2.4 2.0       3.2   4.0
## 621       0   3 4.0 4.4       3.8   4.6
## 622       0   3 3.0 2.6       3.2   3.8
## 623       0   2 2.8 2.0       2.8   3.4
## 624       0   3 4.0 3.6       4.0   4.6
## 625       1   3 2.8 2.4       3.6   3.6
## 626       1   3 2.6 2.4       3.4   3.8
## 627       0   2 3.2 3.8       4.0   3.4
## 628       1   2 2.0 3.2       3.4   3.6
## 629       0   3 3.6 3.8       4.0   4.0
## 630       1   3 3.8 3.0       3.6   3.6
## 631       0   4 3.6 3.4       4.2   4.0
## 632       0   3 4.0 3.8       3.8   3.6
## 633       0   2 3.2 3.0       3.5   3.6
## 634       1   3 3.2 2.6       3.6   3.8
## 635       1   3 2.8 2.4       3.6   3.6
## 636       0   4 2.6 2.6       4.2   3.8
## 637       0   2 3.4 2.2       2.4   3.4
## 638       0   3 3.6 2.0       2.8   2.8
## 639       1   3 3.4 4.0       4.4   4.0
## 640       1   3 3.0 2.2       3.8   3.6
## 641       1   3 3.0 2.6       3.6   4.6
## 642       0   2 2.6 2.2       4.0   3.8
## 643       1   3 2.0 2.0       3.0   4.0
## 644       0   3 2.6 3.6       2.6   2.4
## 645       0   3 2.0 2.8       4.0   2.0
## 646       1   3 2.6 2.4       2.0   3.0
## 647       1   4 3.0 2.8       4.0   3.6
## 648       0   2 3.6 2.5       3.4   3.2
## 649       1   3 3.2 3.0       4.0   3.6
## 650       0   1 3.6 2.8       3.0   3.2
## 651       1   3 3.6 4.0       3.6   3.6
## 652       1   3 3.6 2.8       4.0   4.0
## 653       0   3 3.2 3.2       4.6   4.2
## 654       0   2 3.0 2.8       3.8   3.8
## 655       0   2 2.0 3.0       3.2   3.6
## 656       1   3 2.4 2.4       2.6   2.4
## 657       0   3 3.2 2.4       2.4   2.4
## 658       1   3 1.8 2.6       2.4   2.8
## 659       1   3 2.2 1.6       2.6   2.4
## 660       0   3 2.4 1.2       3.0   2.6
## 661       0   2 2.4 1.6       1.8   2.4
## 662       0   3 2.4 2.2       2.2   2.4
## 663       0   4 2.4 2.2       2.2   2.4
## 664       1   3 2.4 2.4       2.6   2.4
## 665       0   3 3.2 2.4       2.4   2.4
## 666       1   3 1.8 2.6       2.4   2.8
## 667       1   3 2.2 1.6       2.6   2.4
## 668       0   3 2.4 1.8       2.0   2.4
## 669       1   3 2.0 2.0       2.0   2.4
## 670       1   3 2.2 2.2       1.6   2.0
## 671       0   3 2.2 2.4       2.6   2.4
## 672       1   3 1.4 1.4       2.0   2.0
## 673       0   3 2.6 3.2       4.0   4.0
## 674       0   3 3.8 3.0       3.8   4.4
## 675       0   4 2.0 2.2       3.4   3.6
## 676       1   3 4.0 3.8       3.8   3.8
## 677       1   3 2.4 2.4       3.6   3.2
## 678       0   3 2.6 2.8       4.0   3.2
## 679       0   3 3.0 3.6       4.0   3.6
## 680       1   2 3.2 1.6       3.6   3.4
## 681       1   3 2.4 1.4       2.6   3.0
## 682       0   3 3.2 3.0       3.5   3.6
## 683       0   2 2.2 2.4       2.6   2.6
## 684       1   2 3.4 2.0       3.2   3.8
## 685       1   3 4.0 4.0       3.8   4.4
## 686       0   3 2.8 2.4       4.0   3.8
## 687       0   3 4.4 2.8       4.4   4.6
## 688       1   4 3.2 2.0       3.6   3.8
## 689       0   4 2.6 3.4       3.6   4.0
## 690       0   4 4.0 2.8       4.0   3.0
## 691       0   3 3.0 3.0       4.6   3.4
## 692       0   2 2.8 2.2       4.0   4.8
## 693       0   2 2.4 2.4       3.8   4.6
## 694       1   4 2.0 2.4       2.4   4.2
## 695       0   2 2.2 2.2       2.6   2.8
## 696       0   3 2.8 2.4       4.0   3.2
## 697       0   2 3.0 2.0       3.8   4.2
## 698       0   4 2.0 2.2       3.2   4.0
## 699       0   3 2.2 2.0       3.2   3.6
## 700       0   2 3.6 3.6       4.2   4.0
## 701       1   3 4.2 4.0       4.4   4.4
## 702       0   3 2.8 2.2       3.0   4.0
## 703       0   3 2.0 2.0       2.2   3.2
## 704       0   2 3.6 2.8       4.4   4.6
## 705       0   3 2.4 2.0       2.4   3.8
## 706       0   4 2.8 1.2       3.4   3.0
## 707       0   3 4.2 4.0       4.2   4.2
## 708       1   4 4.0 4.0       4.0   4.0
## 709       1   3 2.6 2.8       3.6   3.2
## 710       1   3 4.0 2.0       3.8   4.4
## 711       1   3 3.8 2.6       4.0   4.0
## 712       1   2 4.2 3.8       4.0   4.0
## 713       0   4 3.2 2.6       2.8   4.8
## 714       0   4 4.0 2.6       4.0   5.0
## 715       0   4 2.6 2.8       4.0   4.2
## 716       0   3 3.6 2.2       4.4   5.0
## 717       1   3 4.2 4.0       4.4   4.4
## 718       1   2 3.6 2.0       3.0   4.0
## 719       0   3 3.0 3.0       5.0   4.4
## 720       0   3 3.8 3.0       4.0   3.8
## 721       1   3 2.2 2.0       2.4   2.2
## 722       1   4 2.8 2.6       3.0   2.4
## 723       0   3 2.4 1.6       2.0   2.6
## 724       1   4 2.4 2.2       2.2   2.6
## 725       0   3 2.2 2.2       2.4   2.4
## 726       1   3 2.2 1.6       1.4   1.8
## 727       0   4 2.2 2.2       2.4   1.6
## 728       0   3 2.4 2.0       2.0   2.6
## 729       1   4 2.0 2.2       2.4   2.8
## 730       0   2 2.4 2.0       1.8   2.0
## 731       1   3 2.2 2.0       2.4   2.2
## 732       0   3 2.4 3.4       2.4   3.6
## 733       1   3 3.4 3.0       4.0   3.8
## 734       1   3 2.6 2.4       2.5   3.0
## 735       1   4 3.6 2.8       3.8   3.6
## 736       0   3 4.0 3.8       3.6   4.6
## 737       0   3 4.0 3.8       4.0   4.0
## 738       1   2 3.6 2.6       3.2   3.4
## 739       0   2 1.4 2.6       3.6   4.6
## 740       1   3 3.4 3.8       4.0   3.6
## 741       0   3 4.4 5.0       4.6   4.0
## 742       1   4 3.8 2.0       3.8   2.4
## 743       0   3 3.4 3.8       3.8   3.8
## 744       1   3 2.2 2.4       3.4   3.4
## 745       0   2 4.0 3.2       4.0   4.0
## 746       1   4 3.6 3.0       3.0   3.8
## 747       0   2 3.8 3.0       3.2   3.2
## 748       0   4 3.6 3.6       4.0   4.0
## 749       0   3 3.2 3.6       4.0   3.4
## 750       1   3 3.6 3.0       5.0   4.6
## 751       0   3 3.0 1.8       3.6   3.6
## 752       1   2 3.2 2.8       3.2   3.2
## 753       1   3 3.0 3.6       3.2   3.6
## 754       1   3 3.6 2.2       3.4   4.2
## 755       0   2 4.2 4.2       4.0   4.2
## 756       0   4 3.0 2.8       4.0   3.6
## 757       0   2 4.0 4.0       4.0   4.0
## 758       0   3 3.0 2.6       3.4   3.8
## 759       0   2 2.8 3.0       3.8   4.0
## 760       0   3 2.8 3.4       3.8   4.0
## 761       0   2 4.4 3.8       3.6   4.0
## 762       0   3 1.8 1.0       3.6   3.4
## 763       1   2 2.8 2.2       4.2   4.0
## 764       1   3 2.6 3.0       3.2   2.8
## 765       1   3 2.6 3.0       3.2   2.8
## 766       1   4 2.6 2.8       2.2   2.6
## 767       1   3 3.0 3.0       2.6   2.8
## 768       1   3 2.0 1.4       1.8   2.0
## 769       1   3 1.8 1.6       2.4   2.0
## 770       1   3 2.8 2.2       2.4   3.0
## 771       0   3 2.2 2.0       2.6   2.2
## 772       1   3 2.0 2.0       2.2   1.8
## 773       0   3 2.2 1.6       2.0   2.2
## 774       1   4 2.6 2.8       2.2   2.6
## 775       1   3 3.0 3.0       2.6   2.8
## 776       1   3 2.0 1.4       1.8   2.0
## 777       1   3 1.8 1.6       2.4   2.0
## 778       1   3 2.8 2.2       2.4   3.0
## 779       0   4 2.0 1.6       1.6   1.8
## 780       0   3 3.6 2.8       4.0   4.0
## 781       0   2 3.2 2.4       4.0   4.0
## 782       1   3 5.0 5.0       5.0   5.0
## 783       0   3 3.6 3.4       4.4   3.8
## 784       0   4 3.2 2.8       4.0   4.0
## 785       1   2 2.6 3.2       4.4   4.2
## 786       1   3 3.0 3.2       3.6   3.8
## 787       0   3 3.6 3.6       3.6   4.2
## 788       1   3 4.0 4.0       4.0   4.0
## 789       1   3 4.0 4.0       4.8   4.4
## 790       0   3 1.8 2.0       3.8   3.4
## 791       0   3 2.4 2.6       4.0   3.6
## 792       0   3 4.0 3.2       3.4   4.4
## 793       1   4 2.8 2.6       4.2   4.4
## 794       0   2 2.4 1.8       3.2   4.2
## 795       1   1 3.6 2.8       3.6   3.2
## 796       0   3 4.2 3.8       4.0   3.6
## 797       0   2 4.0 3.4       3.6   3.4
## 798       0   2 2.4 2.2       3.6   3.8
## 799       0   2 2.6 3.6       3.6   3.6
## 800       0   3 4.4 3.4       2.4   3.6
## 801       0   3 2.2 2.4       3.0   4.4
## 802       0   2 4.0 4.2       4.0   5.0
## 803       1   4 2.0 3.2       2.4   2.6
## 804       0   3 2.4 1.6       3.4   3.6
## 805       0   3 4.0 3.6       4.0   4.0
## 806       0   2 2.4 2.8       2.8   2.8
## 807       0   3 3.2 2.2       4.0   4.6
## 808       0   3 4.0 2.6       4.6   4.8
## 809       1   2 3.6 3.0       4.0   3.2
## 810       1   2 4.0 3.8       3.6   4.0
## 811       0   3 4.6 4.0       4.0   4.2
## 812       0   3 2.8 2.2       3.8   3.8
## 813       0   3 3.6 2.2       4.2   4.0
## 814       0   3 1.4 1.6       2.2   2.0
## 815       1   3 2.2 2.8       1.8   1.8
## 816       1   3 2.2 1.8       1.8   2.0
## 817       0   3 3.4 3.6       3.6   4.0
## 818       0   2 2.8 2.0       2.6   2.4
## 819       1   4 2.2 2.4       4.0   4.0
## 820       0   3 1.8 2.4       3.8   3.2
## 821       1   2 2.6 2.2       4.0   3.4
## 822       0   1 1.8 1.4       3.6   4.0
## 823       1   4 3.4 2.4       3.4   3.6
## 824       1   3 3.4 3.6       3.8   3.6
## 825       1   4 4.4 3.4       4.0   3.8
## 826       1   3 3.6 2.4       3.8   4.0
## 827       0   3 3.4 2.6       3.3   3.8
## 828       0   1 2.0 1.8       1.6   1.4
## 829       1   3 3.4 4.0       3.4   4.0
## 830       0   3 3.0 2.8       4.0   3.6
## 831       1   2 4.0 3.6       4.0   3.8
## 832       1   3 2.8 2.8       3.2   2.2
## 833       0   3 2.8 2.4       4.0   4.4
## 834       0   3 3.6 2.4       3.6   3.8
## 835       0   3 2.6 2.0       3.6   4.0
## 836       0   2 5.0 3.8       3.6   3.6
## 837       0   3 2.4 1.8       3.6   3.5
## 838       0   3 2.0 2.4       3.4   4.0
## 839       0   3 3.4 4.0       3.2   3.6
## 840       1   4 4.4 3.4       3.6   4.2
## 841       0   3 2.6 2.8       3.2   3.6
## 842       1   3 3.2 3.2       3.8   3.6
## 843       1   3 2.2 1.8       3.4   3.2
## 844       0   2 2.2 4.0       4.4   3.8
## 845       1   2 3.8 3.4       4.0   3.2
## 846       1   3 4.6 5.0       4.8   4.2
## 847       1   2 3.0 3.0       3.0   2.8
## 848       0   2 2.0 2.6       2.0   2.0
## 849       0   3 3.6 2.2       2.6   4.0
## 850       1   3 2.4 2.2       2.2   2.6
## 851       0   3 2.4 2.2       2.2   2.6
## 852       0   3 2.4 2.2       2.2   2.6
## 853       0   3 1.4 1.0       1.8   2.6
## 854       1   4 2.4 2.0       2.2   2.2
## 855       1   4 2.6 2.6       2.8   2.4
## 856       0   2 2.4 2.2       2.2   2.4
## 857       0   4 2.6 2.0       2.6   2.4
## 858       0   2 2.0 2.0       2.4   2.0
## 859       0   3 4.2 3.0       3.4   4.2
## 860       0   3 3.4 4.0       3.8   4.0
## 861       0   3 3.2 2.8       3.6   3.6
## 862       0   3 3.4 2.8       4.6   4.6
## 863       0   3 3.0 2.4       2.8   2.8
## 864       0   3 2.4 2.6       3.4   3.6
## 865       1   2 3.2 2.6       3.2   3.4
## 866       0   2 3.2 2.8       4.0   3.6
## 867       0   3 3.0 2.6       2.6   2.6
## 868       1   4 3.4 4.2       3.4   4.8
## 869       0   2 2.8 1.2       3.6   4.0
## 870       1   2 3.6 2.6       4.0   3.8
## 871       0   3 4.0 4.0       3.6   3.6
## 872       0   2 3.8 3.6       4.0   4.0
## 873       0   4 3.2 2.4       4.8   3.6
## 874       0   3 3.2 2.0       3.2   2.0
## 875       0   3 2.2 2.6       3.5   3.4
## 876       1   2 3.6 3.4       3.6   3.6
## 877       1   4 3.2 2.0       2.6   3.6
## 878       0   4 3.4 3.8       4.0   4.0
## 879       0   2 4.8 3.4       4.2   4.0
## 880       0   2 2.8 2.6       2.0   4.0
## 881       0   2 3.4 2.0       3.0   3.6
## 882       0   1 4.0 4.0       4.0   4.0
## 883       0   3 2.8 2.2       4.0   3.4
## 884       0   3 4.0 4.4       4.2   3.4
## 885       0   3 2.0 2.4       3.8   3.8
## 886       1   3 3.8 4.0       4.0   3.8
## 887       0   2 2.2 2.0       2.4   2.0
## 888       1   3 4.0 4.2       4.0   3.6
## 889       0   4 2.4 2.2       2.2   2.6
## 890       0   3 2.6 2.0       2.2   2.8
## 891       1   3 1.6 1.8       2.0   2.2
## 892       1   4 2.4 2.2       2.4   2.4
## 893       0   3 2.4 2.4       2.2   2.0
## 894       0   2 3.2 2.6       2.6   2.5
## 895       1   4 1.6 1.6       2.0   2.6
## 896       0   3 2.6 2.0       2.2   2.8
## 897       1   3 1.6 1.8       2.0   2.2
## 898       1   4 2.4 2.2       2.4   2.4
## 899       0   3 2.4 2.4       2.2   2.0
## 900       1   2 1.8 1.6       2.4   2.0
## 901       0   4 2.6 2.2       3.0   4.4
## 902       1   3 1.2 1.2       2.0   2.0
## 903       1   3 2.0 3.2       3.8   3.8
## 904       0   3 3.2 2.2       4.0   3.6
## 905       0   3 2.8 3.0       4.0   4.0
## 906       0   2 4.8 2.8       4.8   3.4
## 907       0   4 2.0 2.0       4.0   4.0
## 908       0   3 4.0 4.0       4.0   4.0
## 909       0   2 3.2 2.4       2.8   3.8
## 910       1   2 2.4 1.6       3.8   3.8
## 911       0   3 2.8 2.2       3.0   4.4
## 912       0   3 4.2 4.4       3.4   4.0
## 913       0   4 2.4 2.0       3.6   4.0
## 914       0   2 3.0 2.2       3.2   4.0
## 915       1   3 2.4 2.0       2.4   3.6
## 916       1   4 2.0 2.0       4.4   3.4
## 917       0   1 3.6 3.8       4.2   4.0
## 918       0   3 2.8 2.2       3.8   4.6
## 919       0   3 3.4 3.6       3.6   4.0
## 920       0   3 3.6 3.6       3.6   4.0
## 921       1   3 4.0 3.6       4.4   4.2
## 922       0   3 3.0 3.4       3.6   4.0
## 923       0   3 3.0 1.8       3.2   3.6
## 924       0   4 3.8 3.4       4.2   4.6
## 925       1   4 2.4 2.8       2.6   3.6
## 926       0   2 2.0 2.0       2.0   3.0
## 927       0   3 3.4 2.6       4.0   4.0
## 928       1   3 2.2 3.4       4.4   5.0
## 929       1   3 2.2 3.4       4.4   5.0
## 930       1   4 3.8 4.0       4.0   3.8
## 931       1   4 3.6 3.4       3.8   3.6
## 932       1   3 2.8 2.4       2.8   3.8
## 933       1   3 3.2 4.0       4.0   5.0
## 934       1   3 3.2 3.0       3.2   4.2
## 935       0   3 1.8 1.4       2.0   3.8
## 936       0   2 3.2 2.0       3.4   3.5
## 937       0   2 2.4 3.2       3.2   4.0
## 938       1   4 1.4 1.6       2.0   3.2
## 939       1   3 2.6 2.4       2.8   3.0
## 940       1   3 2.2 2.2       2.2   2.4
## 941       0   3 3.0 3.0       2.6   2.6
## 942       1   3 1.4 1.8       2.0   2.0
## 943       0   3 2.2 2.4       2.4   2.4
## 944       0   3 2.4 2.4       2.0   2.0
## 945       0   2 2.8 2.4       3.0   3.6
## 946       1   3 3.2 2.8       4.2   4.4
## 947       0   2 2.8 3.4       3.6   3.7
## 948       1   3 4.0 3.8       3.2   3.6
## 949       1   2 3.0 1.6       2.8   3.2
## 950       0   2 1.8 3.2       3.8   3.8
## 951       0   4 3.8 4.0       4.8   4.4
## 952       0   3 2.4 2.4       3.8   4.2
## 953       0   4 2.0 1.8       2.0   3.2
## 954       1   3 3.8 4.0       3.8   4.0
## 955       1   4 4.0 3.4       4.6   4.2
## 956       0   3 3.2 3.2       4.0   4.6
## 957       1   4 3.8 3.0       3.6   4.0
## 958       1   3 3.0 3.4       3.6   3.6
## 959       0   4 3.6 3.2       4.4   4.8
## 960       0   3 2.8 3.4       3.8   4.0
## 961       0   1 2.8 1.0       3.0   3.8
## 962       0   3 3.4 3.2       4.0   3.2
## 963       0   2 4.0 3.6       4.0   3.8
## 964       1   4 3.6 3.4       3.6   3.4
## 965       0   3 1.4 2.2       4.2   4.2
## 966       1   4 2.2 2.0       3.2   3.4
## 967       1   2 2.0 2.0       2.0   2.4
## 968       1   3 2.4 2.6       3.4   2.8
## 969       0   3 2.4 2.8       3.2   3.4
## 970       0   3 3.8 3.4       4.0   4.2
## 971       0   3 3.6 3.6       3.8   2.8
## 972       0   2 1.8 2.0       3.6   4.6
## 973       0   2 4.2 3.0       4.4   4.4
## 974       0   2 4.0 5.0       4.6   3.2
## 975       1   3 3.2 3.8       4.8   4.0
## 976       0   2 2.4 2.4       4.0   3.6
## 977       0   3 2.6 2.0       3.6   4.2
## 978       0   2 3.2 3.8       3.4   3.2
## 979       0   3 2.8 3.6       4.0   3.6
## 980       0   3 4.0 3.8       4.2   3.6
## 981       0   3 3.8 3.6       3.8   3.0
## 982       0   3 2.4 2.0       2.2   2.6
## 983       0   3 2.6 2.0       2.4   2.4
## 984       0   4 2.8 2.6       2.4   2.4
## 985       1   3 3.6 4.2       3.0   3.2
## 986       0   3 3.4 2.6       2.8   3.2
## 987       0   4 4.0 2.4       2.4   3.0
## 988       0   3 2.6 2.2       4.0   3.4
## 989       1   4 4.0 3.8       3.8   3.6
## 990       1   3 2.8 2.8       2.6   3.6
## 991       1   2 3.6 2.4       3.4   4.6
## 992       0   4 2.6 2.6       4.0   3.6
## 993       0   4 3.6 2.4       4.0   3.4
## 994       0   4 3.4 4.0       4.4   4.8
## 995       0   3 2.0 3.0       3.2   4.4
## 996       0   3 3.8 3.4       4.8   4.2
## 997       0   3 2.2 1.6       3.2   3.4
## 998       1   3 3.0 2.2       3.2   3.4
## 999       0   2 2.6 2.2       2.4   3.0
## 1000      0   3 2.4 3.0       3.6   3.6
## 1001      0   3 3.6 3.8       4.8   4.4
## 1002      1   3 3.0 3.4       4.0   3.6
## 1003      0   1 3.8 3.8       4.0   4.0
## 1004      0   1 3.2 3.0       3.5   3.6
## 1005      0   1 3.2 3.2       3.6   3.4
## 1006      1   4 2.8 2.0       3.6   3.0
## 1007      1   2 4.0 3.6       4.2   5.0
## 1008      1   3 2.4 1.8       3.6   3.6
## 1009      0   1 3.0 2.8       2.8   3.4
## 1010      1   2 3.2 3.0       3.2   4.0
## 1011      0   3 2.4 2.4       1.4   1.8
## 1012      0   3 3.4 2.6       2.8   2.8
## 1013      1   3 3.6 3.4       3.2   2.6
## 1014      1   3 1.6 1.6       2.4   2.6
## 1015      0   3 3.4 2.6       2.8   2.8
## 1016      0   3 3.2 3.4       3.8   4.2
## 1017      1   4 3.6 3.0       2.8   3.8
## 1018      1   4 4.0 4.2       3.6   3.8
## 1019      1   3 4.0 3.6       3.6   3.6
## 1020      1   2 4.0 3.4       3.8   2.8
## 1021      1   2 2.8 3.2       3.6   3.0
## 1022      0   2 3.8 3.0       2.4   3.4
## 1023      0   3 2.0 2.4       2.2   2.8
## 1024      0   3 2.4 2.0       3.6   4.2
## 1025      0   3 3.2 3.0       3.6   4.0
## 1026      0   1 3.0 2.4       2.2   3.2
## 1027      0   3 3.4 2.0       4.0   3.8
## 1028      1   4 2.4 2.6       3.6   4.0
## 1029      1   3 3.8 3.8       3.6   4.0
## 1030      1   2 1.8 2.2       2.6   4.0
## 1031      1   3 4.0 4.0       4.0   4.0
## 1032      0   1 4.0 3.6       2.0   2.8
## 1033      1   3 1.6 1.4       2.6   4.0
## 1034      0   3 3.8 2.4       3.4   3.8
## 1035      0   2 2.8 2.8       3.6   3.6
## 1036      1   4 4.8 4.0       4.0   4.0
## 1037      1   3 3.8 3.8       4.0   4.0
## 1038      0   2 3.8 3.0       4.2   4.0
## 1039      1   3 4.2 4.2       5.0   5.0
## 1040      0   3 4.0 5.0       4.6   4.8
## 1041      0   3 4.2 2.4       4.2   3.4
## 1042      1   3 3.0 2.4       3.2   4.0
## 1043      0   3 4.0 3.0       3.0   3.8
## 1044      0   2 3.8 3.8       4.0   3.4
## 1045      0   4 3.2 3.0       4.2   4.4
## 1046      1   3 3.8 2.8       3.8   3.6
## 1047      0   2 3.2 3.6       4.7   4.0
## 1048      0   2 3.2 4.0       3.9   4.4
## 1049      1   3 4.0 4.0       4.0   4.0
## 1050      1   2 2.8 2.0       2.8   3.6
## 1051      1   2 3.0 2.8       3.0   2.8
## 1052      1   4 3.8 2.8       2.8   3.4
## 1053      0   3 2.4 4.0       2.8   4.0
## 1054      1   4 3.4 1.6       4.0   2.6
## 1055      0   3 2.0 2.2       3.0   3.2
## 1056      0   3 2.8 2.6       3.6   3.8
## 1057      1   3 2.2 2.2       3.0   3.0
## 1058      1   3 3.0 1.8       2.6   4.0
## 1059      1   3 5.0 5.0       3.6   2.6
## 1060      0   3 2.2 2.8       3.0   3.6
## 1061      0   3 3.6 3.4       4.0   4.0
## 1062      1   3 2.4 2.2       3.4   4.4
## 1063      1   3 2.8 3.2       3.6   4.0
## 1064      0   2 3.2 2.4       2.0   2.8
## 1065      0   3 2.4 2.8       2.6   3.4
## 1066      0   3 2.0 3.6       4.2   4.6
## 1067      0   3 2.4 2.6       2.8   3.8
## 1068      0   3 3.2 2.4       3.8   3.6
## 1069      0   3 4.0 4.0       4.0   4.0
## 1070      0   2 2.2 1.2       3.6   3.6
## 1071      0   3 4.0 4.0       4.2   4.4
## 1072      1   3 2.0 2.0       3.6   4.8
## 1073      1   3 4.0 2.6       3.8   3.8
## 1074      0   3 3.0 2.8       4.0   5.0
## 1075      0   2 4.0 2.6       2.8   2.0
## 1076      0   3 2.8 2.8       3.2   2.4
## 1077      0   3 3.4 2.8       2.8   3.0
## 1078      1   3 2.4 2.6       4.0   3.6
## 1079      1   3 2.6 1.8       3.6   3.0
## 1080      0   3 3.2 2.4       4.0   4.0
## 1081      1   3 2.6 2.4       2.6   2.8
## 1082      0   3 2.6 3.2       3.0   3.6
## 1083      0   3 4.0 5.0       4.0   3.6
## 1084      0   3 3.6 2.8       4.0   3.6
## 1085      1   3 2.4 2.2       3.8   4.0
## 1086      1   3 4.6 4.4       5.0   4.2
## 1087      0   3 3.2 2.8       5.0   3.4
## 1088      1   3 2.0 2.4       1.8   3.8
## 1089      1   4 2.4 1.8       4.0   3.4
## 1090      0   3 3.4 2.6       5.0   4.4
## 1091      0   3 2.4 1.8       2.4   4.0
## 1092      0   3 3.6 2.2       4.2   4.0
## 1093      0   3 3.2 3.2       3.6   3.8
## 1094      1   3 3.2 2.8       4.0   3.6
## 1095      1   3 3.4 3.2       2.8   1.6
## 1096      0   3 2.4 2.6       3.0   3.8
## 1097      0   2 2.4 2.8       2.2   4.2
## 1098      1   3 3.4 2.4       2.2   3.6
## 1099      1   3 4.0 3.0       4.0   3.2
## 1100      0   3 3.6 3.0       5.0   4.6
## 1101      1   3 3.2 4.8       5.0   3.0
## 1102      0   3 3.0 2.6       4.0   3.0
## 1103      0   3 3.8 4.0       4.0   4.8
## 1104      1   4 3.2 3.0       4.0   3.6
## 1105      1   4 4.0 4.2       4.2   3.8
## 1106      1   3 3.8 4.2       4.2   3.8
## 1107      1   3 2.0 2.0       3.6   3.8
## 1108      0   3 3.6 3.6       3.4   3.4
## 1109      0   2 4.0 3.2       4.0   4.8
## 1110      0   2 3.6 3.2       4.0   3.6
## 1111      1   4 3.6 2.2       2.4   2.6
## 1112      0   3 3.4 3.2       4.6   4.0
## 1113      1   3 3.2 3.8       3.0   3.8
## 1114      0   2 4.0 3.4       4.6   4.0
## 1115      0   3 2.0 2.0       3.0   2.6
## 1116      0   3 2.0 3.0       2.2   2.4
## 1117      0   4 4.0 4.0       5.0   4.0
## 1118      1   2 3.0 3.0       3.0   3.0
## 1119      1   3 4.0 4.0       5.0   3.6
## 1120      0   3 5.0 4.0       5.0   4.0
## 1121      1   3 1.6 3.0       3.2   2.0
## 1122      1   3 1.0 2.0       3.0   3.0
## 1123      1   3 3.6 2.8       4.6   4.6
## 1124      1   3 4.4 3.2       5.0   4.8
## 1125      1   3 3.8 4.0       5.0   4.2
## 1126      0   3 2.4 2.8       4.0   3.6
## 1127      0   3 2.2 2.6       3.8   3.2
## 1128      1   3 3.0 1.6       2.4   2.6
## 1129      1   3 4.0 3.6       3.8   3.8
## 1130      0   3 3.8 3.8       4.6   3.2
## 1131      1   3 2.8 2.6       2.4   3.8
## 1132      0   3 3.6 2.6       3.6   2.8
## 1133      0   3 2.8 1.8       4.0   4.0
## 1134      1   1 2.2 2.0       1.6   2.6
## 1135      1   3 2.2 2.8       3.4   3.2
## 1136      0   3 2.0 2.0       2.8   4.6
## 1137      0   2 3.8 2.4       2.0   4.0
## 1138      0   4 2.8 1.8       2.6   3.8
## 1139      0   4 1.8 1.6       3.2   3.6
## 1140      0   3 3.0 2.8       3.0   3.6
## 1141      1   2 3.4 3.0       1.4   2.6
## 1142      1   3 3.6 2.4       3.6   3.0
## 1143      0   3 3.0 3.6       3.8   4.4
## 1144      0   3 4.0 3.8       4.0   4.0
## 1145      0   3 3.0 2.8       4.0   3.4
## 1146      0   3 2.8 2.4       2.4   3.8
## 1147      0   4 2.8 2.2       3.8   3.6
## 1148      0   3 3.4 2.6       3.6   4.0
## 1149      1   3 3.0 2.4       4.0   3.2
## 1150      0   3 3.6 2.4       3.8   3.6
## 1151      0   4 3.6 3.2       4.6   4.2
## 1152      0   2 2.8 2.0       3.6   3.2
## 1153      1   3 3.6 2.2       3.2   3.4
## 1154      1   3 4.2 5.0       5.0   4.8
## 1155      1   3 2.6 3.2       4.0   3.4
## 1156      1   3 4.0 3.2       3.8   2.4
## 1157      0   2 3.0 3.6       3.6   3.6
## 1158      0   3 2.0 2.2       3.4   2.4
## 1159      0   2 2.0 3.0       3.6   3.0
## 1160      0   3 4.0 3.4       4.0   3.8
## 1161      1   3 3.0 3.0       4.0   3.6
## 1162      1   3 3.0 3.0       4.0   2.4
## 1163      1   3 3.0 3.0       3.8   2.8
## 1164      1   3 3.0 3.0       3.8   2.4
## 1165      0   2 4.0 3.4       4.0   3.6
## 1166      0   3 4.6 3.6       5.0   3.8
## 1167      0   3 4.6 3.6       5.0   3.6
## 1168      0   3 4.6 3.6       5.0   3.8
## 1169      0   3 4.2 3.6       4.0   2.8
## 1170      0   3 3.6 3.8       4.2   4.6
## 1171      0   3 3.4 3.6       4.0   3.4
## 1172      1   3 2.6 2.0       2.2   2.0
## 1173      0   3 3.0 1.8       2.8   3.0
## 1174      0   2 2.8 2.0       2.0   2.4
## 1175      1   3 2.8 2.4       2.4   2.0
## 1176      0   3 2.6 2.4       4.0   4.0
## 1177      0   4 3.4 2.6       3.8   4.0
## 1178      0   3 3.4 1.6       4.0   3.0
## 1179      1   3 2.8 2.6       4.0   3.4
## 1180      1   3 2.6 1.6       3.0   3.2
## 1181      0   2 3.8 2.6       4.8   3.0
## 1182      1   3 2.2 3.0       3.6   2.6
## 1183      1   2 4.0 4.4       4.4   4.4
## 1184      0   3 2.2 1.6       3.0   2.4
## 1185      0   4 4.2 3.4       4.0   4.6
## 1186      0   3 2.0 2.4       1.8   2.4
## 1187      1   3 3.0 2.6       1.6   2.2
## 1188      1   3 4.0 3.8       4.6   4.0
## 1189      0   3 4.0 3.4       4.0   4.0
## 1190      1   2 3.4 3.6       3.4   3.0
## 1191      1   2 3.4 3.6       3.4   3.0
## 1192      0   2 2.0 2.2       2.6   3.0
## 1193      1   2 2.4 2.8       4.0   4.0
## 1194      0   2 3.6 3.6       5.0   3.6
## 1195      0   3 4.2 3.6       5.0   4.0
## 1196      1   2 2.0 2.0       2.4   2.0
## 1197      1   3 3.0 3.6       4.0   2.4
## 1198      0   3 4.0 4.0       4.8   2.8
## 1199      0   3 3.0 3.0       2.6   3.0
## 1200      0   4 3.2 3.4       4.8   4.2
## 1201      0   3 3.0 2.2       3.6   3.2
## 1202      0   2 2.8 2.0       2.0   3.4
## 1203      0   3 3.0 3.2       4.6   4.2
## 1204      0   2 3.2 2.2       3.8   3.2
## 1205      0   4 3.4 3.0       3.8   3.2
## 1206      0   3 2.6 2.4       2.6   3.2
## 1207      0   2 2.8 2.4       3.8   4.6
## 1208      0   3 3.2 3.0       4.4   3.4
## 1209      0   4 2.8 3.6       4.6   4.0
## 1210      1   4 3.4 3.0       4.0   3.4
## 1211      1   4 3.2 3.2       4.2   3.2
## 1212      0   4 2.2 2.6       4.0   3.6
## 1213      0   3 2.4 1.6       3.8   3.4
## 1214      0   3 2.8 2.2       3.6   3.8
## 1215      0   4 3.0 2.2       3.4   3.8
## 1216      0   3 3.6 3.6       4.0   3.4
## 1217      1   4 4.0 4.8       5.0   5.0
## 1218      0   4 3.2 2.8       4.2   2.6
## 1219      0   3 3.0 2.0       3.6   3.2
## 1220      1   3 4.0 2.4       3.6   3.2
## 1221      1   3 4.4 3.0       4.4   4.6
## 1222      0   3 4.6 3.2       3.8   3.0
## 1223      1   3 2.6 1.8       1.8   4.0
## 1224      0   4 3.6 2.0       4.4   4.6
## 1225      0   3 2.8 3.0       3.0   3.0
## 1226      0   3 3.4 2.2       4.0   4.2
## 1227      0   3 2.0 2.2       2.4   3.6
## 1228      1   3 4.4 3.4       3.4   3.4
## 1229      1   3 2.8 2.2       2.6   2.6
## 1230      0   3 2.6 1.2       3.4   3.6
## 1231      1   4 2.8 3.0       3.0   3.0
## 1232      0   4 4.6 3.6       5.0   4.4
## 1233      1   3 3.2 3.6       3.0   2.8
## 1234      0   3 3.6 3.8       3.0   3.0
## 1235      0   2 2.6 2.2       3.2   3.6
## 1236      1   3 3.0 3.6       4.0   2.4
## 1237      0   3 4.0 3.4       4.0   4.0
## 1238      0   3 4.0 4.0       4.8   2.8
## 1239      0   2 3.6 3.6       5.0   3.6
## 1240      0   2 2.0 2.2       2.6   3.0
## 1241      0   4 2.4 2.6       4.2   4.0
## 1242      1   2 3.4 3.6       3.4   3.0
## 1243      1   2 2.0 2.0       2.4   2.0
## 1244      0   3 3.0 3.0       2.6   3.0
## 1245      0   3 4.2 3.6       5.0   4.0
## 1246      0   3 2.6 3.2       4.0   3.4
## 1247      0   3 2.2 2.8       3.6   3.6
## 1248      1   3 4.0 2.4       4.0   3.0
## 1249      0   3 2.6 3.0       3.4   3.6
## 1250      0   3 2.4 3.0       4.0   3.6
## 1251      1   3 2.2 2.2       2.4   3.2
## 1252      0   3 2.8 2.0       4.0   3.2
## 1253      1   2 3.0 2.0       3.6   4.0
## 1254      0   3 4.0 3.8       4.2   4.2
## 1255      0   3 2.6 2.4       3.0   3.6
## 1256      0   3 3.2 3.6       4.2   4.0
## 1257      0   2 2.8 1.6       2.4   4.0
## 1258      0   3 3.0 2.0       2.8   3.8
## 1259      1   3 3.2 3.4       4.4   3.6
## 1260      1   3 3.4 1.8       3.0   4.0
## 1261      0   3 2.2 2.6       3.8   4.2
## 1262      0   3 3.0 2.8       2.2   3.4
## 1263      0   3 2.6 3.8       4.0   3.8
## 1264      1   3 2.4 2.6       3.4   3.2
## 1265      1   3 3.8 3.6       3.8   3.8
## 1266      0   3 2.6 2.4       3.2   4.0
## 1267      1   3 5.0 4.8       5.0   4.2
## 1268      1   4 4.0 4.0       4.0   4.0
## 1269      0   3 3.0 3.4       3.2   2.8
## 1270      1   4 4.0 4.2       4.0   4.0
## 1271      0   3 2.0 2.4       3.0   3.0
## 1272      0   3 4.0 4.0       4.0   4.0
## 1273      0   2 4.0 4.0       4.0   4.0
## 1274      1   3 4.0 4.0       4.0   4.0
## 1275      1   3 5.0 4.2       5.0   4.0
## 1276      1   3 4.0 4.0       4.0   4.0
## 1277      1   3 3.0 3.0       3.2   3.8
## 1278      0   3 3.0 3.8       3.6   2.4
## 1279      0   3 4.0 4.0       5.0   4.0
## 1280      0   3 3.0 3.0       3.0   3.0
## 1281      1   3 3.4 3.6       4.2   4.4
## 1282      1   3 3.0 2.0       3.8   3.0
## 1283      0   3 2.8 1.4       2.6   3.6
## 1284      1   3 3.2 3.8       4.2   4.2
## 1285      0   3 2.4 3.0       4.6   4.2
## 1286      0   3 3.2 3.0       4.0   3.6
## 1287      1   2 3.4 3.0       4.8   4.2
## 1288      1   3 2.2 2.0       2.2   3.8
## 1289      0   2 3.4 3.4       4.2   3.6
## 1290      0   3 4.4 4.0       4.0   3.8
## 1291      1   3 3.8 3.8       4.0   4.0
## 1292      0   3 2.8 2.6       4.0   3.8
## 1293      0   3 4.0 3.8       4.0   3.2
## 1294      1   3 3.2 2.4       4.0   3.6
## 1295      1   4 3.4 2.4       4.8   3.8
## 1296      1   3 4.0 4.0       4.0   4.0
## 1297      1   3 2.0 2.0       1.8   2.6
## 1298      0   4 2.8 3.4       4.0   4.2
## 1299      1   3 4.0 3.0       5.0   3.8
## 1300      1   2 3.5 2.4       3.2   4.0
## 1301      1   2 3.2 3.4       4.0   3.6
## 1302      1   3 3.2 2.0       3.0   4.0
## 1303      1   2 2.0 2.6       3.8   3.8
## 1304      0   2 3.8 3.8       4.0   3.4
## 1305      1   4 4.2 4.2       4.0   4.0
## 1306      0   3 4.0 2.2       4.0   2.6
## 1307      1   3 3.0 2.2       4.0   3.6
## 1308      0   3 3.8 3.4       4.0   3.6
## 1309      1   3 2.4 2.4       3.4   3.4
## 1310      0   3 2.4 2.2       2.6   3.2
## 1311      0   2 2.4 2.8       2.2   3.6
## 1312      0   3 2.4 2.4       4.6   3.4
## 1313      0   3 2.6 2.0       2.0   4.0
## 1314      1   3 4.0 3.2       5.0   4.0
## 1315      0   4 3.6 4.0       3.4   3.0
## 1316      0   3 3.8 4.0       4.0   4.0
## 1317      1   3 2.8 2.4       2.2   3.8
## 1318      1   3 4.0 3.4       4.0   4.0
## 1319      0   2 4.0 4.2       4.0   3.8
## 1320      1   3 3.8 2.2       4.0   4.0
## 1321      1   3 4.0 4.0       4.0   4.0
## 1322      1   3 2.4 2.6       3.2   2.4
## 1323      1   4 4.0 4.0       4.0   3.4
## 1324      0   4 4.6 4.2       4.0   4.0
## 1325      0   3 2.8 2.6       2.0   2.2
## 1326      0   3 4.0 3.6       3.6   4.0
## 1327      0   4 4.6 4.0       5.0   4.8
## 1328      1   3 3.0 3.4       3.6   3.8
## 1329      1   3 2.0 2.4       3.8   3.0
## 1330      1   2 3.4 3.6       3.4   3.0
## 1331      0   4 4.2 3.6       5.0   4.0
## 1332      0   3 3.2 2.8       4.0   3.6
## 1333      1   2 3.6 3.8       5.0   3.6
## 1334      0   2 2.0 2.2       2.6   3.0
## 1335      1   2 2.2 2.0       2.6   3.8
## 1336      1   3 3.2 2.8       3.2   2.8
## 1337      1   3 3.0 3.6       4.0   3.2
## 1338      1   3 2.0 2.2       2.0   2.0
## 1339      0   3 4.0 4.0       4.8   2.8
## 1340      0   3 4.0 3.4       4.0   4.0
## 1341      1   3 2.6 2.2       3.4   3.0
## 1342      1   3 3.8 2.2       3.4   3.0
## 1343      0   3 2.2 2.4       2.4   2.8
## 1344      1   4 3.6 3.2       4.0   4.0
## 1345      1   2 2.8 2.8       4.0   3.6
## 1346      0   3 2.4 2.4       3.8   3.2
## 1347      0   3 2.4 1.6       2.4   2.8
## 1348      0   3 2.4 2.8       4.0   3.2
## 1349      1   3 2.6 2.6       3.8   4.0
## 1350      1   2 2.2 2.0       2.4   3.6
## 1351      1   3 2.0 2.0       3.6   4.4
## 1352      1   2 3.4 3.6       4.0   3.6
## 1353      0   3 2.0 2.0       4.0   4.0
## 1354      1   3 2.6 2.0       3.2   3.6
## 1355      1   2 3.6 3.2       3.4   3.4
## 1356      1   3 3.6 2.8       3.6   4.2
## 1357      1   2 2.4 2.6       3.6   4.0
## 1358      1   3 3.8 3.2       4.0   4.4
## 1359      1   4 3.6 3.8       3.8   4.6
## 1360      1   2 3.6 2.2       3.6   3.6
## 1361      1   4 2.8 3.0       4.2   4.2
## 1362      1   2 3.8 2.8       3.4   3.8
## 1363      0   3 1.4 1.6       3.2   3.0
## 1364      0   3 2.8 3.0       4.2   4.4
## 1365      1   3 3.4 2.2       4.2   3.8
## 1366      0   3 3.2 2.4       4.6   3.6
## 1367      1   4 2.6 2.2       2.8   2.8
## 1368      1   4 2.8 2.0       2.6   3.2
## 1369      1   3 3.2 3.0       3.5   3.6
## 1370      0   3 2.6 3.0       3.0   4.0
## 1371      0   2 2.4 2.2       2.4   3.2
## 1372      0   3 2.0 2.4       2.2   3.8
## 1373      0   3 2.0 2.2       4.2   2.8
## 1374      0   2 2.8 2.6       3.8   3.4
## 1375      0   3 2.8 3.4       2.0   2.8
## 1376      1   3 3.0 3.6       4.0   4.2
## 1377      1   3 3.6 3.6       3.8   3.0
## 1378      1   4 3.8 2.2       3.8   3.6
## 1379      1   4 2.2 2.2       3.8   4.8
## 1380      1   4 3.8 2.0       3.6   3.9
## 1381      1   3 3.2 2.6       3.4   3.8
## 1382      0   3 2.8 3.2       4.0   3.4
## 1383      0   3 3.6 3.8       3.8   4.0
## 1384      0   3 1.6 2.0       2.8   3.2
## 1385      0   2 4.0 3.0       3.6   3.2
## 1386      0   3 3.0 3.0       3.0   3.0
## 1387      1   3 3.4 3.0       3.4   2.6
## 1388      0   3 2.8 3.4       3.0   2.6
## 1389      1   2 3.2 2.2       3.0   3.0
## 1390      0   2 2.8 3.0       3.0   3.0
## 1391      0   4 1.8 1.0       3.4   4.0
## 1392      0   4 2.2 3.0       3.6   4.0
## 1393      0   3 2.6 2.8       3.4   4.4
## 1394      1   3 3.0 2.8       3.4   4.2
## 1395      1   2 3.6 3.6       4.0   4.4
## 1396      1   2 3.4 1.6       3.4   3.4
## 1397      1   3 2.8 2.4       3.4   3.6
## 1398      0   4 3.0 3.6       3.8   4.2
## 1399      0   3 3.2 3.0       3.0   3.4
## 1400      0   3 3.2 3.0       3.6   3.2
## 1401      0   4 2.4 1.4       4.2   3.6
## 1402      0   3 3.0 3.4       2.8   3.8
## 1403      1   2 2.8 2.8       4.0   3.6
## 1404      0   3 2.4 2.4       3.6   4.4
## 1405      0   3 4.0 4.2       4.0   3.2
## 1406      0   3 2.6 2.6       4.0   3.6
## 1407      0   3 3.8 3.6       2.4   3.0
## 1408      1   3 3.0 2.8       3.6   3.6
## 1409      0   2 2.6 2.0       2.4   3.6
## 1410      0   3 2.6 2.2       3.8   3.8
## 1411      1   3 2.0 1.4       3.2   3.4
## 1412      0   3 1.6 2.4       3.0   3.8
## 1413      0   3 3.2 2.2       3.2   3.2
## 1414      0   3 4.0 3.6       4.6   3.8
## 1415      1   4 3.8 3.8       3.4   4.0
## 1416      1   4 3.4 3.2       4.0   3.8
## 1417      0   3 3.6 3.6       4.0   4.2
## 1418      0   4 4.0 4.0       3.8   3.2
## 1419      1   3 3.2 2.0       4.2   3.2
## 1420      1   3 3.2 2.0       4.2   3.2
## 1421      1   3 3.8 4.0       4.0   4.0
## 1422      0   3 3.2 2.2       4.0   3.0
## 1423      1   4 3.6 4.0       4.0   4.2
## 1424      1   3 3.8 3.0       4.0   3.8
## 1425      0   3 3.6 3.6       3.6   4.0
## 1426      0   2 2.6 2.6       3.8   3.6
## 1427      0   2 2.8 2.4       3.4   4.0
## 1428      0   2 3.6 2.4       3.4   3.2
## 1429      0   3 4.0 3.2       3.6   4.0
## 1430      0   3 2.4 2.8       4.8   4.4
## 1431      0   3 2.4 2.8       4.8   4.4
## 1432      0   4 3.0 2.8       3.2   3.6
## 1433      0   2 2.8 2.0       2.4   3.0
## 1434      0   2 3.8 3.2       4.4   3.2
## 1435      0   3 3.2 2.4       3.8   3.4
## 1436      1   2 3.8 3.4       4.0   4.6
## 1437      1   3 3.4 3.0       3.4   3.4
## 1438      1   2 2.0 2.8       2.2   2.0
## 1439      1   4 4.4 5.0       5.0   4.6
## 1440      0   3 3.4 3.2       3.6   3.4
## 1441      0   3 4.4 4.0       3.8   3.6
## 1442      1   3 3.8 3.6       3.6   3.4
## 1443      1   2 3.4 3.6       3.4   3.0
## 1444      0   3 4.0 3.4       4.0   4.0
## 1445      1   2 3.6 3.6       5.0   3.6
## 1446      0   2 2.0 2.2       2.6   3.0
## 1447      1   2 2.0 2.0       2.4   2.0
## 1448      1   2 2.0 2.0       2.4   2.0
## 1449      0   3 4.0 3.4       4.0   4.0
## 1450      0   2 3.6 3.6       5.0   3.6
## 1451      0   2 2.0 2.2       2.6   3.0
## 1452      0   4 4.2 3.6       5.0   4.0
## 1453      1   2 2.0 2.0       2.4   2.0
## 1454      1   3 3.2 2.8       3.2   2.8
## 1455      0   3 4.0 4.0       4.8   2.8
## 1456      1   2 3.4 3.6       3.4   3.0
## 1457      1   3 3.0 3.6       4.0   2.4
## 1458      1   3 3.0 3.6       4.0   2.4
## 1459      1   3 2.0 2.2       2.0   2.0
## 1460      0   3 3.0 3.0       2.6   3.0
## 1461      1   3 3.2 2.8       3.2   2.8
## 1462      0   3 4.0 4.0       4.8   2.8
## 1463      0   4 3.2 2.4       3.8   4.0
## 1464      1   2 3.4 3.0       3.0   3.0
## 1465      1   3 3.4 3.2       3.6   4.4
## 1466      0   3 3.4 2.8       3.8   3.6
## 1467      0   3 2.4 1.0       3.6   4.0
## 1468      0   3 3.8 2.4       4.4   4.8
## 1469      1   3 3.8 3.8       4.8   4.4
## 1470      0   3 3.2 3.0       3.5   3.6
## 1471      0   4 3.0 2.4       4.0   4.4
## 1472      1   3 2.8 1.6       2.4   3.4
## 1473      1   3 2.4 1.6       2.6   4.4
## 1474      0   4 3.0 2.4       5.0   5.0
## 1475      1   4 3.4 2.6       3.8   4.0
## 1476      1   2 2.6 3.8       4.0   4.0
## 1477      1   4 4.0 4.0       4.0   3.4
## 1478      1   2 3.0 2.0       3.0   2.2
## 1479      1   3 2.0 3.0       4.0   3.0
## 1480      1   3 2.6 2.8       3.6   3.6
## 1481      0   2 3.2 2.6       4.2   4.2
## 1482      1   3 3.6 3.0       3.0   4.0
## 1483      0   3 2.2 2.6       2.0   3.0
## 1484      0   2 2.4 2.0       2.4   2.8
## 1485      1   3 2.4 2.4       4.0   3.8
## 1486      1   3 3.4 3.6       4.0   3.8
## 1487      0   3 3.0 3.8       4.0   4.8
## 1488      1   3 3.4 3.4       3.8   3.0
## 1489      0   3 3.2 2.2       3.6   3.4
## 1490      0   2 3.0 3.2       3.2   3.4
## 1491      1   3 3.6 2.8       3.2   4.0
## 1492      0   2 2.4 3.2       3.6   4.0
## 1493      1   4 3.2 3.0       3.6   4.2
## 1494      0   4 3.4 2.4       3.6   3.8
## 1495      0   2 2.0 2.4       2.6   3.2
## 1496      0   3 2.4 1.8       4.0   4.0
## 1497      0   3 3.0 3.4       3.8   4.2
## 1498      1   2 4.6 4.4       4.6   4.2
## 1499      0   2 3.6 3.2       5.0   4.0
## 1500      1   3 3.0 2.4       2.2   3.6
## 1501      1   3 4.0 3.8       5.0   4.2
## 1502      1   2 3.4 2.4       3.8   3.4
## 1503      0   2 1.8 1.4       3.0   3.6
## 1504      0   3 3.8 3.0       3.8   4.0
## 1505      0   4 2.8 2.8       3.6   4.0
## 1506      1   3 2.4 2.4       2.4   3.2
## 1507      1   3 3.6 3.4       4.0   4.0
## 1508      0   2 2.8 2.4       3.8   4.0
## 1509      0   3 3.2 1.8       3.0   3.6
## 1510      0   3 2.0 2.8       3.6   3.6
## 1511      0   3 2.6 2.0       2.0   3.6
## 1512      1   3 3.6 2.4       3.0   4.0
## 1513      0   3 2.4 2.4       3.0   3.0
## 1514      0   3 3.0 2.4       4.0   3.6
## 1515      1   3 3.2 2.2       3.3   3.8
## 1516      0   2 2.8 1.8       3.4   3.8
## 1517      1   3 2.6 2.0       3.2   3.4
## 1518      0   3 3.0 1.6       3.6   3.8
## 1519      0   2 2.0 2.8       3.0   3.2
## 1520      0   3 3.4 3.8       4.0   4.0
## 1521      0   2 2.8 3.0       4.6   3.4
## 1522      0   2 3.0 1.8       3.2   3.0
## 1523      1   3 3.2 3.0       3.8   2.8
## 1524      1   3 3.2 2.8       4.0   4.0
## 1525      1   3 3.2 2.8       4.0   4.0
## 1526      0   2 3.2 2.0       3.8   3.2
## 1527      0   3 2.4 2.4       2.0   2.4
## 1528      1   3 3.8 3.2       4.2   3.6
## 1529      0   3 3.4 3.0       4.4   4.2
## 1530      1   4 2.0 2.8       2.8   3.2
## 1531      0   3 4.0 4.0       4.0   4.0
## 1532      0   4 4.0 3.2       4.0   4.0
## 1533      1   3 2.4 2.8       3.6   3.4
## 1534      0   3 4.0 4.4       4.4   4.2
## 1535      1   3 3.4 4.8       4.0   3.6
## 1536      0   2 3.8 4.8       4.6   4.0
## 1537      1   2 2.8 2.8       3.0   3.0
## 1538      0   3 2.2 1.4       3.6   5.0
## 1539      0   2 3.8 2.4       3.6   3.4
## 1540      1   4 4.0 2.8       4.4   5.0
## 1541      1   3 3.0 2.4       3.6   3.6
## 1542      0   3 2.4 1.8       2.0   3.0
## 1543      0   3 3.8 3.0       4.8   4.0
## 1544      0   3 3.8 4.4       4.4   4.4
## 1545      1   2 2.2 3.2       3.2   3.8
## 1546      0   3 3.6 3.2       4.2   4.0
## 1547      1   3 3.4 2.0       4.0   3.0
## 1548      0   3 2.8 3.0       3.6   3.6
## 1549      0   4 3.2 2.6       4.8   4.0
## 1550      0   4 3.0 3.0       3.2   3.8
## 1551      1   4 4.2 4.6       5.0   5.0
## 1552      1   3 2.8 3.2       4.0   4.0
## 1553      1   2 3.0 2.6       3.2   3.0
## 1554      0   3 3.4 3.8       5.0   3.8
## 1555      0   4 3.4 2.6       4.8   4.0
## 1556      0   3 3.4 3.4       4.0   4.0
## 1557      0   3 3.4 3.6       4.0   4.0
## 1558      0   2 3.8 3.4       2.4   4.4
## 1559      0   4 3.2 2.4       4.0   4.0
## 1560      0   3 2.4 3.4       5.0   3.6
## 1561      0   3 2.6 2.8       3.6   3.8
## 1562      1   3 2.6 3.0       4.0   4.2
## 1563      1   3 2.6 2.6       3.4   2.6
## 1564      0   3 3.2 3.4       4.2   4.0
## 1565      1   2 3.6 3.0       4.2   2.6
## 1566      1   2 2.4 2.2       3.2   4.4
## 1567      1   2 3.0 2.6       5.0   5.0
## 1568      1   2 4.0 3.0       4.0   4.2
## 1569      0   2 3.6 4.0       4.0   4.4
## 1570      1   3 3.6 3.0       3.8   3.4
## 1571      0   3 3.8 2.0       3.4   4.6
## 1572      0   3 3.0 2.4       2.0   2.8
## 1573      0   3 2.2 2.8       3.0   3.4
## 1574      0   2 3.2 3.0       3.8   4.4
## 1575      0   2 3.0 3.6       4.0   3.2
## 1576      0   3 3.6 3.8       4.2   3.8
## 1577      0   3 2.0 2.4       3.0   4.0
## 1578      1   3 4.0 4.0       4.0   4.0
## 1579      0   3 4.6 4.2       4.2   4.2
## 1580      1   3 4.2 4.6       3.8   3.6
## 1581      1   3 3.8 3.8       3.8   4.2
## 1582      1   3 3.8 2.4       3.2   4.4
## 1583      0   3 3.8 2.6       3.2   4.4
## 1584      0   2 3.0 2.4       3.4   3.8
## 1585      0   2 2.8 2.0       3.8   4.0
## 1586      0   3 2.0 3.8       4.0   4.0
## 1587      0   3 4.2 4.4       4.2   3.6
## 1588      0   3 3.6 2.8       4.2   3.6
## 1589      0   3 3.0 3.0       3.8   2.6
## 1590      0   1 2.6 1.4       2.0   3.6
## 1591      1   3 3.2 3.2       3.0   3.2
## 1592      1   3 1.9 2.0       2.8   1.6
## 1593      1   3 3.4 2.8       4.6   3.0
## 1594      0   3 2.0 2.0       2.0   1.6
## 1595      1   2 3.0 3.4       3.0   4.2
## 1596      0   3 3.0 3.2       4.8   4.4
## 1597      0   3 4.0 4.0       4.4   4.2
## 1598      0   3 3.0 2.6       3.8   3.4
## 1599      0   3 3.8 3.0       5.0   4.6
## 1600      1   3 3.8 3.0       4.6   4.4
## 1601      0   2 2.6 3.0       3.4   3.4
## 1602      1   2 4.0 2.8       4.0   3.2
## 1603      0   2 2.8 3.0       3.6   4.2
## 1604      0   3 3.4 3.0       3.2   3.8
## 1605      0   2 2.8 1.8       4.0   3.8
## 1606      1   2 3.6 2.6       2.6   2.4
## 1607      0   3 3.4 2.8       3.0   3.0
## 1608      1   3 4.0 3.6       3.4   4.0
## 1609      0   3 3.8 3.2       4.0   3.4
## 1610      1   3 3.6 2.4       2.4   3.2
## 1611      0   3 3.6 2.2       3.8   4.4
## 1612      0   2 3.0 2.8       3.6   3.4
## 1613      0   3 3.2 2.4       3.6   2.8
## 1614      0   3 4.2 2.4       3.8   3.4
## 1615      0   3 3.2 2.4       3.2   3.4
## 1616      0   3 3.2 2.8       3.6   3.4
## 1617      0   3 3.2 2.4       3.0   2.8
## 1618      0   4 3.4 3.2       4.0   4.0
## 1619      0   3 3.4 3.2       4.0   4.0
## 1620      1   3 3.4 2.8       4.0   4.0
## 1621      1   3 3.4 1.8       3.6   3.0
## 1622      1   3 3.0 2.6       3.8   3.4
## 1623      0   3 3.8 2.6       4.0   3.8
## 1624      0   4 3.4 3.0       3.8   3.8
## 1625      1   4 4.0 4.2       3.8   3.0
## 1626      0   3 3.2 2.6       3.6   3.4
## 1627      1   3 3.4 2.8       3.6   4.2
## 1628      0   3 3.4 2.0       3.2   2.2
## 1629      0   3 2.6 2.0       3.2   3.2
## 1630      1   3 4.0 2.4       4.2   3.2
## 1631      1   3 3.2 2.4       3.8   2.0
## 1632      0   2 4.0 2.0       3.4   2.0
## 1633      0   3 3.0 2.0       2.4   3.4
## 1634      1   3 2.8 2.0       3.0   3.4
## 1635      1   3 3.4 2.8       3.6   4.2
## 1636      0   3 2.8 2.8       3.8   3.2
## 1637      1   3 2.8 2.0       3.0   3.4
## 1638      0   3 2.0 2.0       2.6   2.4
## 1639      0   2 3.0 3.4       3.2   3.6
## 1640      0   3 4.4 3.2       3.8   4.6
## 1641      0   3 2.8 2.2       3.6   3.6
## 1642      0   3 3.0 2.6       4.0   2.8
## 1643      0   3 3.2 2.0       2.4   3.2
## 1644      0   3 3.8 3.4       4.0   3.6
## 1645      0   3 2.2 1.8       3.6   2.8
## 1646      1   3 2.2 2.0       2.8   3.2
## 1647      0   3 3.8 3.8       4.8   4.6
## 1648      1   2 3.0 3.8       4.4   4.0
## 1649      0   2 1.4 3.0       2.0   3.6
## 1650      0   2 3.0 3.6       2.2   3.0
## 1651      0   2 3.2 1.8       2.8   2.4
## 1652      0   2 2.4 2.6       3.0   3.0
## 1653      0   2 2.8 3.2       2.6   3.4
## 1654      1   2 2.4 2.8       2.4   2.4
## 1655      1   2 3.2 2.8       3.6   2.8
## 1656      0   2 2.0 3.0       3.8   4.2
## 1657      0   2 3.0 2.8       2.6   2.6
## 1658      0   2 2.6 3.0       3.2   2.8
## 1659      0   2 4.0 2.6       3.4   2.8
## 1660      1   2 2.6 3.0       3.2   2.8
## 1661      0   3 3.8 2.4       2.4   2.8
## 1662      1   3 1.0 1.0       2.6   3.0
## 1663      0   2 3.2 2.4       3.6   3.6
## 1664      1   2 3.0 3.4       3.0   4.2
## 1665      0   2 3.8 2.8       3.8   3.6
## 1666      1   3 2.6 3.8       2.6   3.8
## 1667      1   2 4.4 3.0       5.0   4.6
## 1668      1   3 4.0 3.8       3.2   3.6
## 1669      1   3 4.0 4.6       5.0   4.6
## 1670      0   2 3.6 3.6       4.0   3.0
## 1671      1   2 4.0 2.6       3.4   2.8
## 1672      0   2 3.2 3.0       2.8   2.2
## 1673      1   2 3.2 3.0       2.8   2.2
## 1674      0   3 2.2 2.0       3.6   3.6
## 1675      1   2 3.6 3.2       3.8   3.0
## 1676      0   2 4.6 3.2       4.0   4.0
## 1677      0   2 4.0 2.4       3.0   3.0
## 1678      0   2 2.0 2.2       3.8   2.8
## 1679      0   2 2.4 3.0       5.0   4.4
## 1680      1   2 5.0 4.2       5.0   5.0
## 1681      1   3 4.2 3.8       4.0   3.8
## 1682      1   3 3.2 2.4       4.0   2.8
## 1683      0   3 4.0 3.8       4.4   2.6
## 1684      1   3 3.0 1.2       4.0   2.2
## 1685      1   3 3.6 2.4       2.8   2.6
## 1686      1   3 2.0 4.0       3.4   3.4
## 1687      0   3 3.0 3.0       3.4   3.4
## 1688      0   3 3.6 2.8       4.0   3.4
## 1689      0   3 3.8 3.4       3.8   3.4
## 1690      0   2 3.2 3.0       2.8   2.2
## 1691      1   2 3.6 3.6       4.6   3.2
## 1692      0   3 3.2 3.6       3.2   3.0
## 1693      0   1 3.4 3.4       4.0   3.4
## 1694      0   3 2.6 2.6       2.2   3.8
## 1695      0   3 2.8 2.6       4.0   3.6
## 1696      1   2 3.0 3.4       2.0   2.2
## 1697      1   2 3.8 2.4       4.0   3.8
## 1698      0   3 2.4 3.2       3.4   3.4
## 1699      0   3 2.8 3.4       3.2   3.0
## 1700      0   3 3.4 2.0       4.0   2.6
## 1701      0   2 3.2 2.8       3.4   3.4
## 1702      1   3 2.0 4.0       3.4   3.4
## 1703      1   2 4.0 4.0       3.4   2.8
## 1704      1   2 2.6 3.0       3.2   2.8
## 1705      1   3 4.0 2.6       3.4   2.8
## 1706      1   3 4.0 2.6       3.4   2.8
## 1707      0   3 2.8 3.4       3.6   4.2
## 1708      0   2 3.2 3.2       3.0   3.2
## 1709      1   3 4.2 2.6       3.2   3.6
## 1710      0   2 2.8 2.8       3.4   3.0
## 1711      0   3 3.0 2.8       3.6   3.6
## 1712      0   3 2.6 3.0       3.6   3.6
## 1713      0   3 2.2 3.4       4.4   2.6
## 1714      0   3 3.6 4.0       4.4   3.8
## 1715      0   3 4.6 3.6       4.0   3.6
## 1716      0   3 4.0 2.8       3.8   4.0
## 1717      1   3 4.0 3.2       4.0   3.0
## 1718      1   3 3.2 2.4       3.8   3.2
## 1719      1   3 2.6 3.0       4.2   3.0
## 1720      1   3 3.6 3.4       3.4   3.0
## 1721      0   3 3.4 2.2       4.0   3.4
## 1722      1   3 4.4 3.0       4.2   3.0
## 1723      1   3 3.4 3.2       4.4   3.8
## 1724      1   3 1.8 2.2       3.0   2.8
## 1725      1   3 3.4 3.0       3.0   3.2
## 1726      1   3 4.6 4.6       4.2   3.2
## 1727      0   3 2.2 2.2       2.4   2.6
## 1728      0   2 3.8 2.6       3.0   3.4
## 1729      1   3 3.0 2.4       3.2   3.2
## 1730      1   3 3.8 4.2       3.6   3.8
## 1731      1   3 4.6 3.8       3.2   4.4
## 1732      0   2 4.0 3.4       3.0   2.6
## 1733      0   3 3.0 3.0       3.0   3.4
## 1734      1   3 4.0 3.8       4.0   4.0
## 1735      0   3 3.2 2.6       3.2   3.6
## 1736      1   2 2.6 3.0       3.2   2.8
## 1737      0   3 3.4 2.8       3.8   4.4
## 1738      0   3 3.6 3.8       4.0   4.0
## 1739      1   3 3.8 3.8       4.8   5.0
## 1740      0   3 4.0 3.0       4.0   4.2
## 1741      0   3 3.2 3.8       3.2   2.8
## 1742      0   3 2.8 2.8       3.0   4.0
## 1743      0   3 2.8 3.6       3.8   4.2
## 1744      0   3 3.4 3.6       4.0   3.4
## 1745      0   3 3.0 2.6       4.0   3.0
## 1746      0   3 2.8 2.2       3.0   3.2
## 1747      0   3 3.0 2.6       3.4   3.4
## 1748      0   3 2.8 3.4       3.0   3.6
## 1749      1   3 3.2 2.0       3.2   3.6
## 1750      1   3 3.6 3.0       3.8   4.0
## 1751      1   3 3.8 3.6       3.8   3.6
## 1752      1   3 4.4 4.0       4.6   3.6
## 1753      1   3 4.8 4.2       4.2   4.6
## 1754      1   3 5.0 5.0       5.0   4.6
## 1755      1   3 4.0 4.8       4.2   4.0
## 1756      1   3 3.8 2.8       3.2   3.2
## 1757      1   2 2.6 3.0       3.2   2.8
## 1758      1   2 2.6 3.0       3.2   2.8
## 1759      0   2 2.6 3.0       3.2   2.8
## 1760      1   3 3.2 3.0       2.8   2.2
## 1761      0   3 3.2 2.8       3.6   4.2
## 1762      0   3 4.8 4.8       4.4   3.6
## 1763      1   2 2.6 2.6       3.2   3.4
## 1764      0   3 4.0 3.2       4.4   4.4
## 1765      0   3 2.6 1.4       2.6   2.4
## 1766      0   3 2.6 1.4       2.6   2.4
## 1767      1   3 3.2 3.0       3.0   2.8
## 1768      1   2 3.8 4.4       4.2   3.6
## 1769      0   3 2.2 2.8       2.8   3.2
## 1770      0   4 3.2 3.2       3.8   3.4
## 1771      0   3 4.6 3.6       4.8   3.6
## 1772      1   3 3.6 4.0       5.0   4.4
## 1773      1   2 2.8 3.0       3.4   3.8
## 1774      0   3 2.8 3.0       2.8   3.6
## 1775      1   3 4.8 4.4       5.0   4.4
## 1776      0   3 2.8 2.6       2.6   4.0
## 1777      0   3 4.2 4.4       4.2   4.0
## 1778      0   3 3.8 3.0       4.2   4.2
## 1779      1   3 3.0 2.0       4.2   2.8
## 1780      0   3 3.0 3.4       3.0   2.4
## 1781      1   3 3.8 4.2       5.0   4.6
## 1782      1   2 3.4 3.0       3.0   3.2
## 1783      1   3 3.8 3.8       4.4   4.2
## 1784      1   2 2.6 2.6       4.2   3.8
## 1785      0   3 5.0 3.6       5.0   3.6
## 1786      1   1 4.2 3.6       4.4   3.4
## 1787      0   3 3.0 3.0       3.2   3.4
## 1788      0   3 4.0 4.0       3.8   4.0
## 1789      1   3 4.0 4.2       4.0   4.2
## 1790      0   3 2.0 3.0       3.2   3.0
## 1791      1   3 3.4 2.6       4.0   4.0
## 1792      0   3 2.8 2.2       3.6   3.0
## 1793      1   3 2.6 2.6       4.0   3.8
## 1794      0   3 4.0 4.2       4.8   4.4
## 1795      0   3 3.0 3.8       4.0   3.8
## 1796      0   3 2.8 2.2       3.8   3.6
## 1797      0   3 4.0 2.4       4.0   4.2
## 1798      1   2 4.0 3.2       5.0   4.6
## 1799      0   3 3.6 3.4       3.6   3.2
## 1800      0   3 2.4 2.6       2.2   4.4
## 1801      1   3 3.8 3.2       4.0   3.4
## 1802      0   3 4.0 3.0       4.8   3.8
## 1803      1   3 3.8 3.2       4.0   3.4
## 1804      0   3 4.0 3.0       4.8   3.8
## 1805      0   3 4.0 3.0       4.2   4.8
## 1806      1   3 4.2 3.8       3.2   4.0
## 1807      0   3 3.4 2.8       3.8   3.8
## 1808      1   3 2.4 2.0       2.8   3.6
## 1809      1   3 3.8 3.2       4.0   3.0
## 1810      1   4 4.0 3.2       3.0   2.6
## 1811      1   2 2.6 3.0       3.8   2.6
## 1812      1   3 2.8 2.0       3.8   4.6
## 1813      0   3 4.6 3.4       4.2   3.6
## 1814      0   3 3.0 2.0       3.4   3.0
## 1815      0   3 2.0 2.2       2.0   3.4
## 1816      0   3 3.2 2.4       4.0   2.8
## 1817      0   3 1.4 1.0       2.4   2.6
## 1818      1   1 3.6 3.0       3.4   4.2
## 1819      1   3 4.2 3.4       4.0   3.2
## 1820      1   3 2.0 1.2       2.8   3.0
## 1821      0   4 3.0 1.8       3.6   3.8
## 1822      0   3 3.4 1.0       2.4   2.6
## 1823      1   3 4.2 3.0       3.2   3.6
## 1824      0   3 5.0 4.4       5.0   3.4
## 1825      1   2 3.0 3.0       3.0   3.0
## 1826      1   3 4.0 3.2       4.4   3.0
## 1827      1   2 3.4 2.4       2.6   2.0
## 1828      1   3 3.0 2.8       4.0   4.2
## 1829      1   1 2.6 3.0       3.2   2.8
## 1830      0   4 3.0 3.2       3.8   3.4
## 1831      0   3 2.8 2.2       2.8   3.8
## 1832      0   3 2.2 2.2       4.0   4.2
## 1833      0   3 3.4 2.2       2.0   3.6
## 1834      0   3 3.2 2.6       3.0   3.4
## 1835      0   3 3.2 2.4       3.8   2.8
## 1836      0   3 2.0 2.0       4.0   4.0
## 1837      0   3 2.4 2.4       3.8   4.4
## 1838      1   3 2.0 2.4       1.8   3.4
## 1839      1   3 3.8 2.2       4.0   3.8
## 1840      1   1 3.4 3.0       3.4   4.0
## 1841      0   3 3.4 2.2       2.8   3.6
## 1842      0   3 2.8 3.4       4.0   3.2
## 1843      0   3 3.0 2.8       4.0   3.8
## 1844      0   3 4.0 2.6       3.8   4.6
## 1845      0   3 3.2 2.2       3.6   3.0
## 1846      1   3 3.4 3.6       3.6   4.2
## 1847      0   3 2.8 2.8       2.6   2.2
## 1848      1   2 3.6 2.0       2.8   3.8
## 1849      0   3 4.6 4.2       4.8   4.6
## 1850      1   3 3.4 2.2       3.8   3.4
## 1851      1   2 3.0 3.0       3.6   3.2
## 1852      0   3 3.6 3.4       4.0   3.8
## 1853      0   3 2.8 1.6       3.6   3.0
## 1854      0   3 3.2 3.0       3.8   3.8
## 1855      1   3 3.2 2.2       3.8   4.0
## 1856      1   2 2.6 2.4       4.0   3.4
## 1857      0   3 2.0 2.4       3.0   2.4
## 1858      0   3 3.0 3.2       3.2   3.0
## 1859      0   3 2.2 3.0       3.8   4.0
## 1860      0   2 2.8 3.2       2.2   2.4
## 1861      1   2 3.2 2.8       4.2   3.8
## 1862      0   3 2.8 2.0       5.0   2.6
## 1863      1   2 1.8 1.0       2.4   2.8
## 1864      1   2 3.6 3.4       3.8   3.0
## 1865      1   4 4.0 4.2       4.2   4.2
## 1866      1   3 2.0 3.0       5.0   5.0
## 1867      0   3 3.0 3.0       4.0   4.2
## 1868      1   4 3.4 2.6       3.4   4.2
## 1869      1   3 4.6 3.8       2.8   2.8
## 1870      0   3 3.0 3.6       4.0   3.0
## 1871      1   3 4.8 3.8       4.0   4.0
## 1872      0   3 3.6 3.8       4.0   2.8
## 1873      0   3 2.8 2.8       4.0   2.8
## 1874      1   3 2.6 2.4       4.0   4.0
## 1875      0   3 2.0 1.6       3.6   4.0
## 1876      0   3 3.2 3.6       4.4   4.8
## 1877      0   2 2.8 1.8       3.2   3.0
## 1878      1   3 2.0 2.2       3.8   3.6
## 1879      1   3 4.2 3.4       4.0   3.6
## 1880      0   3 3.4 3.8       4.0   3.6
## 1881      1   3 4.4 3.2       4.2   4.0
## 1882      0   3 3.4 3.4       4.0   3.4
## 1883      1   3 4.2 5.0       4.6   4.6
## 1884      0   2 3.6 1.8       3.8   3.8
## 1885      0   3 3.0 3.8       4.8   4.0
## 1886      0   2 1.8 1.4       3.0   4.0
## 1887      0   3 5.0 3.2       5.0   2.6
## 1888      1   4 3.2 3.0       4.0   3.8
## 1889      1   3 5.0 4.6       5.0   5.0
## 1890      1   2 2.8 2.4       3.4   3.0
## 1891      0   3 2.2 3.0       3.6   3.4
## 1892      1   3 3.6 2.4       3.6   3.6
## 1893      0   2 3.0 2.4       3.0   3.0
## 1894      1   3 4.2 3.2       4.2   3.2
## 1895      1   3 2.2 1.8       2.4   2.2
## 1896      1   3 3.8 3.2       4.8   4.2
## 1897      1   3 2.2 1.8       2.4   2.2
## 1898      1   3 4.8 3.2       3.4   3.2
## 1899      1   2 1.4 1.8       1.4   2.4
## 1900      0   3 3.8 4.0       3.8   3.6
## 1901      0   3 3.8 4.4       4.0   4.4
## 1902      0   3 3.6 2.8       3.8   3.0
## 1903      1   3 3.8 3.4       4.2   5.0
## 1904      1   3 4.6 4.2       4.0   3.0
## 1905      1   3 4.6 2.8       4.4   2.8
## 1906      1   3 4.0 4.0       4.2   4.2
## 1907      0   2 3.2 2.4       2.6   3.0
## 1908      1   3 2.2 3.0       3.0   3.2
## 1909      0   3 3.8 3.0       3.6   3.4
## 1910      1   3 2.6 2.8       2.4   3.4
## 1911      1   3 3.8 3.6       3.2   3.4
## 1912      1   2 3.4 3.4       3.4   3.4
## 1913      1   3 3.6 3.6       4.0   3.2
## 1914      1   3 3.2 3.2       2.8   3.4
## 1915      1   3 2.4 2.6       3.8   3.2
## 1916      0   3 2.4 1.8       3.2   3.6
## 1917      0   3 4.0 3.4       4.0   3.4
## 1918      0   2 3.4 2.2       2.6   3.4
## 1919      1   2 3.2 3.4       3.6   2.8
## 1920      1   3 3.8 2.6       3.4   3.6
## 1921      1   2 1.8 2.0       2.0   2.0
## 1922      0   2 2.4 1.0       3.4   3.4
## 1923      0   2 4.2 2.2       3.4   3.6
## 1924      1   2 3.2 3.4       3.0   3.8
## 1925      0   3 2.0 1.0       3.0   3.2
data2 <- read.csv("mtcars.csv")
data2
##                      X  mpg cyl  disp  hp drat    wt  qsec vs am gear carb
## 1            Mazda RX4 21.0   6 160.0 110 3.90 2.620 16.46  0  1    4    4
## 2        Mazda RX4 Wag 21.0   6 160.0 110 3.90 2.875 17.02  0  1    4    4
## 3           Datsun 710 22.8   4 108.0  93 3.85 2.320 18.61  1  1    4    1
## 4       Hornet 4 Drive 21.4   6 258.0 110 3.08 3.215 19.44  1  0    3    1
## 5    Hornet Sportabout 18.7   8 360.0 175 3.15 3.440 17.02  0  0    3    2
## 6              Valiant 18.1   6 225.0 105 2.76 3.460 20.22  1  0    3    1
## 7           Duster 360 14.3   8 360.0 245 3.21 3.570 15.84  0  0    3    4
## 8            Merc 240D 24.4   4 146.7  62 3.69 3.190 20.00  1  0    4    2
## 9             Merc 230 22.8   4 140.8  95 3.92 3.150 22.90  1  0    4    2
## 10            Merc 280 19.2   6 167.6 123 3.92 3.440 18.30  1  0    4    4
## 11           Merc 280C 17.8   6 167.6 123 3.92 3.440 18.90  1  0    4    4
## 12          Merc 450SE 16.4   8 275.8 180 3.07 4.070 17.40  0  0    3    3
## 13          Merc 450SL 17.3   8 275.8 180 3.07 3.730 17.60  0  0    3    3
## 14         Merc 450SLC 15.2   8 275.8 180 3.07 3.780 18.00  0  0    3    3
## 15  Cadillac Fleetwood 10.4   8 472.0 205 2.93 5.250 17.98  0  0    3    4
## 16 Lincoln Continental 10.4   8 460.0 215 3.00 5.424 17.82  0  0    3    4
## 17   Chrysler Imperial 14.7   8 440.0 230 3.23 5.345 17.42  0  0    3    4
## 18            Fiat 128 32.4   4  78.7  66 4.08 2.200 19.47  1  1    4    1
## 19         Honda Civic 30.4   4  75.7  52 4.93 1.615 18.52  1  1    4    2
## 20      Toyota Corolla 33.9   4  71.1  65 4.22 1.835 19.90  1  1    4    1
## 21       Toyota Corona 21.5   4 120.1  97 3.70 2.465 20.01  1  0    3    1
## 22    Dodge Challenger 15.5   8 318.0 150 2.76 3.520 16.87  0  0    3    2
## 23         AMC Javelin 15.2   8 304.0 150 3.15 3.435 17.30  0  0    3    2
## 24          Camaro Z28 13.3   8 350.0 245 3.73 3.840 15.41  0  0    3    4
## 25    Pontiac Firebird 19.2   8 400.0 175 3.08 3.845 17.05  0  0    3    2
## 26           Fiat X1-9 27.3   4  79.0  66 4.08 1.935 18.90  1  1    4    1
## 27       Porsche 914-2 26.0   4 120.3  91 4.43 2.140 16.70  0  1    5    2
## 28        Lotus Europa 30.4   4  95.1 113 3.77 1.513 16.90  1  1    5    2
## 29      Ford Pantera L 15.8   8 351.0 264 4.22 3.170 14.50  0  1    5    4
## 30        Ferrari Dino 19.7   6 145.0 175 3.62 2.770 15.50  0  1    5    6
## 31       Maserati Bora 15.0   8 301.0 335 3.54 3.570 14.60  0  1    5    8
## 32          Volvo 142E 21.4   4 121.0 109 4.11 2.780 18.60  1  1    4    2
data(iris)
head(iris)
##   Sepal.Length Sepal.Width Petal.Length Petal.Width Species
## 1          5.1         3.5          1.4         0.2  setosa
## 2          4.9         3.0          1.4         0.2  setosa
## 3          4.7         3.2          1.3         0.2  setosa
## 4          4.6         3.1          1.5         0.2  setosa
## 5          5.0         3.6          1.4         0.2  setosa
## 6          5.4         3.9          1.7         0.4  setosa
head(iris,3)
##   Sepal.Length Sepal.Width Petal.Length Petal.Width Species
## 1          5.1         3.5          1.4         0.2  setosa
## 2          4.9         3.0          1.4         0.2  setosa
## 3          4.7         3.2          1.3         0.2  setosa
tail(iris)
##     Sepal.Length Sepal.Width Petal.Length Petal.Width   Species
## 145          6.7         3.3          5.7         2.5 virginica
## 146          6.7         3.0          5.2         2.3 virginica
## 147          6.3         2.5          5.0         1.9 virginica
## 148          6.5         3.0          5.2         2.0 virginica
## 149          6.2         3.4          5.4         2.3 virginica
## 150          5.9         3.0          5.1         1.8 virginica
tail(iris,3)
##     Sepal.Length Sepal.Width Petal.Length Petal.Width   Species
## 148          6.5         3.0          5.2         2.0 virginica
## 149          6.2         3.4          5.4         2.3 virginica
## 150          5.9         3.0          5.1         1.8 virginica
str(iris)
## 'data.frame':    150 obs. of  5 variables:
##  $ Sepal.Length: num  5.1 4.9 4.7 4.6 5 5.4 4.6 5 4.4 4.9 ...
##  $ Sepal.Width : num  3.5 3 3.2 3.1 3.6 3.9 3.4 3.4 2.9 3.1 ...
##  $ Petal.Length: num  1.4 1.4 1.3 1.5 1.4 1.7 1.4 1.5 1.4 1.5 ...
##  $ Petal.Width : num  0.2 0.2 0.2 0.2 0.2 0.4 0.3 0.2 0.2 0.1 ...
##  $ Species     : Factor w/ 3 levels "setosa","versicolor",..: 1 1 1 1 1 1 1 1 1 1 ...
tail(data2)
##                 X  mpg cyl  disp  hp drat    wt qsec vs am gear carb
## 27  Porsche 914-2 26.0   4 120.3  91 4.43 2.140 16.7  0  1    5    2
## 28   Lotus Europa 30.4   4  95.1 113 3.77 1.513 16.9  1  1    5    2
## 29 Ford Pantera L 15.8   8 351.0 264 4.22 3.170 14.5  0  1    5    4
## 30   Ferrari Dino 19.7   6 145.0 175 3.62 2.770 15.5  0  1    5    6
## 31  Maserati Bora 15.0   8 301.0 335 3.54 3.570 14.6  0  1    5    8
## 32     Volvo 142E 21.4   4 121.0 109 4.11 2.780 18.6  1  1    4    2
tail(data2,3)
##                X  mpg cyl disp  hp drat   wt qsec vs am gear carb
## 30  Ferrari Dino 19.7   6  145 175 3.62 2.77 15.5  0  1    5    6
## 31 Maserati Bora 15.0   8  301 335 3.54 3.57 14.6  0  1    5    8
## 32    Volvo 142E 21.4   4  121 109 4.11 2.78 18.6  1  1    4    2
str(data2)
## 'data.frame':    32 obs. of  12 variables:
##  $ X   : chr  "Mazda RX4" "Mazda RX4 Wag" "Datsun 710" "Hornet 4 Drive" ...
##  $ mpg : num  21 21 22.8 21.4 18.7 18.1 14.3 24.4 22.8 19.2 ...
##  $ cyl : int  6 6 4 6 8 6 8 4 4 6 ...
##  $ disp: num  160 160 108 258 360 ...
##  $ hp  : int  110 110 93 110 175 105 245 62 95 123 ...
##  $ drat: num  3.9 3.9 3.85 3.08 3.15 2.76 3.21 3.69 3.92 3.92 ...
##  $ wt  : num  2.62 2.88 2.32 3.21 3.44 ...
##  $ qsec: num  16.5 17 18.6 19.4 17 ...
##  $ vs  : int  0 0 1 1 0 1 0 1 1 1 ...
##  $ am  : int  1 1 1 0 0 0 0 0 0 0 ...
##  $ gear: int  4 4 4 3 3 3 3 4 4 4 ...
##  $ carb: int  4 4 1 1 2 1 4 2 2 4 ...
tail(data)
##      Q1 Q2 Q3 Q4 Q5 Q6 Q7 Q8 Q9 Q10 Q11 Q12 Q13 Q14 Q15 Q16 Q17 Q18 Q19 Q20
## 1920  4  4  3  4  4  2  2  3  4   2   2   4   3   4   4   3   4   4   3   4
## 1921  2  2  2  1  2  2  2  2  2   2   1   3   2   1   3   2   2   2   2   2
## 1922  3  2  2  2  3  1  1  1  1   1   3   3   3   4   4   4   4   5   2   2
## 1923  5  4  4  4  4  2  2  2  2   3   3   4   3   4   3   3   3   4   4   4
## 1924  4  4  4  2  2  4  2  4  4   3   3   2   3   4   3   4   4   4   3   4
## 1925  3  3  1  1  2  1  1  1  1   1   4   4   3   2   2   3   4   4   3   2
##      Gender EDU  BF  BM Happiness Peace
## 1920      1   3 3.8 2.6       3.4   3.6
## 1921      1   2 1.8 2.0       2.0   2.0
## 1922      0   2 2.4 1.0       3.4   3.4
## 1923      0   2 4.2 2.2       3.4   3.6
## 1924      1   2 3.2 3.4       3.0   3.8
## 1925      0   3 2.0 1.0       3.0   3.2
tail(data,3)
##      Q1 Q2 Q3 Q4 Q5 Q6 Q7 Q8 Q9 Q10 Q11 Q12 Q13 Q14 Q15 Q16 Q17 Q18 Q19 Q20
## 1923  5  4  4  4  4  2  2  2  2   3   3   4   3   4   3   3   3   4   4   4
## 1924  4  4  4  2  2  4  2  4  4   3   3   2   3   4   3   4   4   4   3   4
## 1925  3  3  1  1  2  1  1  1  1   1   4   4   3   2   2   3   4   4   3   2
##      Gender EDU  BF  BM Happiness Peace
## 1923      0   2 4.2 2.2       3.4   3.6
## 1924      1   2 3.2 3.4       3.0   3.8
## 1925      0   3 2.0 1.0       3.0   3.2
str(data)
## 'data.frame':    1925 obs. of  26 variables:
##  $ Q1       : int  4 4 4 5 4 4 4 4 4 4 ...
##  $ Q2       : int  4 4 4 4 4 4 2 2 4 4 ...
##  $ Q3       : int  2 4 4 4 4 4 4 4 4 2 ...
##  $ Q4       : int  3 4 4 4 4 4 4 4 4 2 ...
##  $ Q5       : int  4 4 2 4 4 4 4 4 2 4 ...
##  $ Q6       : int  2 3 4 4 4 4 4 4 1 2 ...
##  $ Q7       : int  2 2 4 4 4 4 4 4 3 4 ...
##  $ Q8       : int  4 4 4 4 4 4 5 5 2 2 ...
##  $ Q9       : int  4 4 4 4 2 4 5 5 3 4 ...
##  $ Q10      : int  4 4 2 4 4 4 5 5 2 4 ...
##  $ Q11      : int  4 4 4 4 4 4 5 5 4 4 ...
##  $ Q12      : int  4 4 4 4 4 4 5 5 3 4 ...
##  $ Q13      : int  4 4 4 4 4 4 5 5 4 4 ...
##  $ Q14      : int  4 4 4 4 4 4 5 5 5 4 ...
##  $ Q15      : int  4 4 3 4 4 4 4 2 3 4 ...
##  $ Q16      : int  4 4 4 4 4 4 5 2 4 4 ...
##  $ Q17      : int  4 3 4 4 4 4 2 2 4 4 ...
##  $ Q18      : int  4 4 4 4 4 4 4 4 4 4 ...
##  $ Q19      : int  4 2 4 4 4 4 4 2 4 2 ...
##  $ Q20      : int  4 1 3 4 4 4 4 2 4 2 ...
##  $ Gender   : int  0 0 0 0 0 0 0 0 1 0 ...
##  $ EDU      : int  1 1 2 1 2 1 1 1 4 3 ...
##  $ BF       : num  3.4 4 3.6 4.2 4 4 3.6 3.6 3.6 3.2 ...
##  $ BM       : num  3.2 3.4 3.6 4 3.6 4 4.6 4.6 2.2 3.2 ...
##  $ Happiness: num  4 4 3.8 4 4 4 4.8 4.4 3.8 4 ...
##  $ Peace    : num  4 2.8 3.8 4 4 4 3.8 2.4 4 3.2 ...
dim(data)
## [1] 1925   26
dim(data2)
## [1] 32 12
ls(iris)
## [1] "Petal.Length" "Petal.Width"  "Sepal.Length" "Sepal.Width"  "Species"
rm(list = ls( ))

ls()
## character(0)
mtcars<-read.csv("mtcars.csv")

mean(mtcars$mpg)
## [1] 20.09062
median(mtcars$mpg)
## [1] 19.2
quantile(mtcars$mpg)
##     0%    25%    50%    75%   100% 
## 10.400 15.425 19.200 22.800 33.900
IQR(mtcars$mpg)
## [1] 7.375
mtcars$mpg
##  [1] 21.0 21.0 22.8 21.4 18.7 18.1 14.3 24.4 22.8 19.2 17.8 16.4 17.3 15.2 10.4
## [16] 10.4 14.7 32.4 30.4 33.9 21.5 15.5 15.2 13.3 19.2 27.3 26.0 30.4 15.8 19.7
## [31] 15.0 21.4
summary(iris)
##   Sepal.Length    Sepal.Width     Petal.Length    Petal.Width   
##  Min.   :4.300   Min.   :2.000   Min.   :1.000   Min.   :0.100  
##  1st Qu.:5.100   1st Qu.:2.800   1st Qu.:1.600   1st Qu.:0.300  
##  Median :5.800   Median :3.000   Median :4.350   Median :1.300  
##  Mean   :5.843   Mean   :3.057   Mean   :3.758   Mean   :1.199  
##  3rd Qu.:6.400   3rd Qu.:3.300   3rd Qu.:5.100   3rd Qu.:1.800  
##  Max.   :7.900   Max.   :4.400   Max.   :6.900   Max.   :2.500  
##        Species  
##  setosa    :50  
##  versicolor:50  
##  virginica :50  
##                 
##                 
## 
dim(iris)
## [1] 150   5

install.packages(“dplyr”)

rm(list=ls())

setwd(“C:/Users/super/OneDrive/문서/01 Study/ADsP/data”) getwd()

6장 결측치와 이상치 처리

01 결측치

데이터에서 값이 비어있는 상태를 의미

library(dplyr)

data(‘airquality’) summary(airquality)

str(airquality) head(airquality) airquality

NA 값을 제외한 5월 평균 오존 농도

mean(airquality[1:31,1], na.rm=T)

02 이상치

데이터에서 다른 관측값들과 비교해 현저히 크거나 작아

통계적으로 동떨어진 값을 의미합니다.

data(iris)

상자그림은 데이터의 분포와 이상치를 한눈에 보여주는 시각화

시각화 도구입니다.

중앙값, 사분위수, 범위, 이상치 등을 확인하는데 널리 사용

summary(iris) boxplot(iris\(Petal.Length~iris\)Species, data=iris)

data(“ChickWeight”) summary(ChickWeight) boxplot(ChickWeight\(weight~ChickWeight\)Diet, data=ChickWeight)

data(“chickwts”) summary(chickwts) boxplot(chickwts\(weight~chickwts\)feed, data=chickwts)

hist(chickwts\(weight) density(chickwts\)weight)

rm(list=ls())

setwd("C:/Users/super/OneDrive/문서/01 Study/ADsP/data")
getwd()
## [1] "C:/Users/super/OneDrive/문서/01 Study/ADsP/data"
# 6장 결측치와 이상치 처리
# 01 결측치
# 데이터에서 값이 비어있는 상태를 의미


library(dplyr)
## 
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
## 
##     filter, lag
## The following objects are masked from 'package:base':
## 
##     intersect, setdiff, setequal, union
data('airquality')
summary(airquality)
##      Ozone           Solar.R           Wind             Temp      
##  Min.   :  1.00   Min.   :  7.0   Min.   : 1.700   Min.   :56.00  
##  1st Qu.: 18.00   1st Qu.:115.8   1st Qu.: 7.400   1st Qu.:72.00  
##  Median : 31.50   Median :205.0   Median : 9.700   Median :79.00  
##  Mean   : 42.13   Mean   :185.9   Mean   : 9.958   Mean   :77.88  
##  3rd Qu.: 63.25   3rd Qu.:258.8   3rd Qu.:11.500   3rd Qu.:85.00  
##  Max.   :168.00   Max.   :334.0   Max.   :20.700   Max.   :97.00  
##  NA's   :37       NA's   :7                                       
##      Month            Day      
##  Min.   :5.000   Min.   : 1.0  
##  1st Qu.:6.000   1st Qu.: 8.0  
##  Median :7.000   Median :16.0  
##  Mean   :6.993   Mean   :15.8  
##  3rd Qu.:8.000   3rd Qu.:23.0  
##  Max.   :9.000   Max.   :31.0  
## 
str(airquality)
## 'data.frame':    153 obs. of  6 variables:
##  $ Ozone  : int  41 36 12 18 NA 28 23 19 8 NA ...
##  $ Solar.R: int  190 118 149 313 NA NA 299 99 19 194 ...
##  $ Wind   : num  7.4 8 12.6 11.5 14.3 14.9 8.6 13.8 20.1 8.6 ...
##  $ Temp   : int  67 72 74 62 56 66 65 59 61 69 ...
##  $ Month  : int  5 5 5 5 5 5 5 5 5 5 ...
##  $ Day    : int  1 2 3 4 5 6 7 8 9 10 ...
head(airquality)
##   Ozone Solar.R Wind Temp Month Day
## 1    41     190  7.4   67     5   1
## 2    36     118  8.0   72     5   2
## 3    12     149 12.6   74     5   3
## 4    18     313 11.5   62     5   4
## 5    NA      NA 14.3   56     5   5
## 6    28      NA 14.9   66     5   6
airquality
##     Ozone Solar.R Wind Temp Month Day
## 1      41     190  7.4   67     5   1
## 2      36     118  8.0   72     5   2
## 3      12     149 12.6   74     5   3
## 4      18     313 11.5   62     5   4
## 5      NA      NA 14.3   56     5   5
## 6      28      NA 14.9   66     5   6
## 7      23     299  8.6   65     5   7
## 8      19      99 13.8   59     5   8
## 9       8      19 20.1   61     5   9
## 10     NA     194  8.6   69     5  10
## 11      7      NA  6.9   74     5  11
## 12     16     256  9.7   69     5  12
## 13     11     290  9.2   66     5  13
## 14     14     274 10.9   68     5  14
## 15     18      65 13.2   58     5  15
## 16     14     334 11.5   64     5  16
## 17     34     307 12.0   66     5  17
## 18      6      78 18.4   57     5  18
## 19     30     322 11.5   68     5  19
## 20     11      44  9.7   62     5  20
## 21      1       8  9.7   59     5  21
## 22     11     320 16.6   73     5  22
## 23      4      25  9.7   61     5  23
## 24     32      92 12.0   61     5  24
## 25     NA      66 16.6   57     5  25
## 26     NA     266 14.9   58     5  26
## 27     NA      NA  8.0   57     5  27
## 28     23      13 12.0   67     5  28
## 29     45     252 14.9   81     5  29
## 30    115     223  5.7   79     5  30
## 31     37     279  7.4   76     5  31
## 32     NA     286  8.6   78     6   1
## 33     NA     287  9.7   74     6   2
## 34     NA     242 16.1   67     6   3
## 35     NA     186  9.2   84     6   4
## 36     NA     220  8.6   85     6   5
## 37     NA     264 14.3   79     6   6
## 38     29     127  9.7   82     6   7
## 39     NA     273  6.9   87     6   8
## 40     71     291 13.8   90     6   9
## 41     39     323 11.5   87     6  10
## 42     NA     259 10.9   93     6  11
## 43     NA     250  9.2   92     6  12
## 44     23     148  8.0   82     6  13
## 45     NA     332 13.8   80     6  14
## 46     NA     322 11.5   79     6  15
## 47     21     191 14.9   77     6  16
## 48     37     284 20.7   72     6  17
## 49     20      37  9.2   65     6  18
## 50     12     120 11.5   73     6  19
## 51     13     137 10.3   76     6  20
## 52     NA     150  6.3   77     6  21
## 53     NA      59  1.7   76     6  22
## 54     NA      91  4.6   76     6  23
## 55     NA     250  6.3   76     6  24
## 56     NA     135  8.0   75     6  25
## 57     NA     127  8.0   78     6  26
## 58     NA      47 10.3   73     6  27
## 59     NA      98 11.5   80     6  28
## 60     NA      31 14.9   77     6  29
## 61     NA     138  8.0   83     6  30
## 62    135     269  4.1   84     7   1
## 63     49     248  9.2   85     7   2
## 64     32     236  9.2   81     7   3
## 65     NA     101 10.9   84     7   4
## 66     64     175  4.6   83     7   5
## 67     40     314 10.9   83     7   6
## 68     77     276  5.1   88     7   7
## 69     97     267  6.3   92     7   8
## 70     97     272  5.7   92     7   9
## 71     85     175  7.4   89     7  10
## 72     NA     139  8.6   82     7  11
## 73     10     264 14.3   73     7  12
## 74     27     175 14.9   81     7  13
## 75     NA     291 14.9   91     7  14
## 76      7      48 14.3   80     7  15
## 77     48     260  6.9   81     7  16
## 78     35     274 10.3   82     7  17
## 79     61     285  6.3   84     7  18
## 80     79     187  5.1   87     7  19
## 81     63     220 11.5   85     7  20
## 82     16       7  6.9   74     7  21
## 83     NA     258  9.7   81     7  22
## 84     NA     295 11.5   82     7  23
## 85     80     294  8.6   86     7  24
## 86    108     223  8.0   85     7  25
## 87     20      81  8.6   82     7  26
## 88     52      82 12.0   86     7  27
## 89     82     213  7.4   88     7  28
## 90     50     275  7.4   86     7  29
## 91     64     253  7.4   83     7  30
## 92     59     254  9.2   81     7  31
## 93     39      83  6.9   81     8   1
## 94      9      24 13.8   81     8   2
## 95     16      77  7.4   82     8   3
## 96     78      NA  6.9   86     8   4
## 97     35      NA  7.4   85     8   5
## 98     66      NA  4.6   87     8   6
## 99    122     255  4.0   89     8   7
## 100    89     229 10.3   90     8   8
## 101   110     207  8.0   90     8   9
## 102    NA     222  8.6   92     8  10
## 103    NA     137 11.5   86     8  11
## 104    44     192 11.5   86     8  12
## 105    28     273 11.5   82     8  13
## 106    65     157  9.7   80     8  14
## 107    NA      64 11.5   79     8  15
## 108    22      71 10.3   77     8  16
## 109    59      51  6.3   79     8  17
## 110    23     115  7.4   76     8  18
## 111    31     244 10.9   78     8  19
## 112    44     190 10.3   78     8  20
## 113    21     259 15.5   77     8  21
## 114     9      36 14.3   72     8  22
## 115    NA     255 12.6   75     8  23
## 116    45     212  9.7   79     8  24
## 117   168     238  3.4   81     8  25
## 118    73     215  8.0   86     8  26
## 119    NA     153  5.7   88     8  27
## 120    76     203  9.7   97     8  28
## 121   118     225  2.3   94     8  29
## 122    84     237  6.3   96     8  30
## 123    85     188  6.3   94     8  31
## 124    96     167  6.9   91     9   1
## 125    78     197  5.1   92     9   2
## 126    73     183  2.8   93     9   3
## 127    91     189  4.6   93     9   4
## 128    47      95  7.4   87     9   5
## 129    32      92 15.5   84     9   6
## 130    20     252 10.9   80     9   7
## 131    23     220 10.3   78     9   8
## 132    21     230 10.9   75     9   9
## 133    24     259  9.7   73     9  10
## 134    44     236 14.9   81     9  11
## 135    21     259 15.5   76     9  12
## 136    28     238  6.3   77     9  13
## 137     9      24 10.9   71     9  14
## 138    13     112 11.5   71     9  15
## 139    46     237  6.9   78     9  16
## 140    18     224 13.8   67     9  17
## 141    13      27 10.3   76     9  18
## 142    24     238 10.3   68     9  19
## 143    16     201  8.0   82     9  20
## 144    13     238 12.6   64     9  21
## 145    23      14  9.2   71     9  22
## 146    36     139 10.3   81     9  23
## 147     7      49 10.3   69     9  24
## 148    14      20 16.6   63     9  25
## 149    30     193  6.9   70     9  26
## 150    NA     145 13.2   77     9  27
## 151    14     191 14.3   75     9  28
## 152    18     131  8.0   76     9  29
## 153    20     223 11.5   68     9  30
# NA 값을 제외한 5월 평균 오존 농도

mean(airquality[1:31,1], na.rm=T)
## [1] 23.61538
# 02 이상치
# 데이터에서 다른 관측값들과 비교해 현저히 크거나 작아 
# 통계적으로 동떨어진 값을 의미합니다.

data(iris)

# 상자그림은 데이터의 분포와 이상치를 한눈에 보여주는 시각화
# 시각화 도구입니다.
# 중앙값, 사분위수, 범위, 이상치 등을 확인하는데 널리 사용

summary(iris)
##   Sepal.Length    Sepal.Width     Petal.Length    Petal.Width   
##  Min.   :4.300   Min.   :2.000   Min.   :1.000   Min.   :0.100  
##  1st Qu.:5.100   1st Qu.:2.800   1st Qu.:1.600   1st Qu.:0.300  
##  Median :5.800   Median :3.000   Median :4.350   Median :1.300  
##  Mean   :5.843   Mean   :3.057   Mean   :3.758   Mean   :1.199  
##  3rd Qu.:6.400   3rd Qu.:3.300   3rd Qu.:5.100   3rd Qu.:1.800  
##  Max.   :7.900   Max.   :4.400   Max.   :6.900   Max.   :2.500  
##        Species  
##  setosa    :50  
##  versicolor:50  
##  virginica :50  
##                 
##                 
## 
boxplot(iris$Petal.Length~iris$Species, data=iris)

data("ChickWeight")
summary(ChickWeight)
##      weight           Time           Chick     Diet   
##  Min.   : 35.0   Min.   : 0.00   13     : 12   1:220  
##  1st Qu.: 63.0   1st Qu.: 4.00   9      : 12   2:120  
##  Median :103.0   Median :10.00   20     : 12   3:120  
##  Mean   :121.8   Mean   :10.72   10     : 12   4:118  
##  3rd Qu.:163.8   3rd Qu.:16.00   17     : 12          
##  Max.   :373.0   Max.   :21.00   19     : 12          
##                                  (Other):506
boxplot(ChickWeight$weight~ChickWeight$Diet, data=ChickWeight)

data("chickwts")
summary(chickwts)
##      weight             feed   
##  Min.   :108.0   casein   :12  
##  1st Qu.:204.5   horsebean:10  
##  Median :258.0   linseed  :12  
##  Mean   :261.3   meatmeal :11  
##  3rd Qu.:323.5   soybean  :14  
##  Max.   :423.0   sunflower:12
boxplot(chickwts$weight~chickwts$feed, data=chickwts)

hist(chickwts$weight)

density(chickwts$weight)
## 
## Call:
##  density.default(x = chickwts$weight)
## 
## Data: chickwts$weight (71 obs.); Bandwidth 'bw' = 29.96
## 
##        x                y            
##  Min.   : 18.13   Min.   :2.463e-06  
##  1st Qu.:141.81   1st Qu.:2.843e-04  
##  Median :265.50   Median :2.063e-03  
##  Mean   :265.50   Mean   :2.017e-03  
##  3rd Qu.:389.19   3rd Qu.:3.716e-03  
##  Max.   :512.87   Max.   :4.268e-03

getwd()

setwd(“C:/Users/super/OneDrive/문서/01 Study/ADsP/data”)

install.packages(“caret”) library(caret)

data(iris) nrow(iris)

idx <- createDataPartition(iris$Species, p=0.6, list=FALSE) idx

train <- iris[idx,] test <- iris[-idx,]

round(0.7811, 2)

library(dplyr) library(ggplot2)

data(“diamonds”) diamonds %>% head %>% dim

summary(diamonds) str(diamonds)

dim(head(diamonds))

diamonds1<-diamonds %>% rename(c=clarity,p=price) head(diamonds1,3)

첫시작

rm(list=ls())

library(caret) library(dplyr) library(ggplot2)

summary(diamonds) str(diamonds)

count(diamonds,cut)

df1<-diamonds %>% select(carat,price) head(df1,3)

diamonds %>% filter(cut==“Good”) %>% head(3)

diamonds %>% mutate(Ratio=price/carat,Double=Ratio*2) %>% select(price, carat, Ratio, Double) %>% head(3)

설치파일

install.packages(“reshape2”)

새로운 시작

data(‘airquality’) summary(airquality)

library(reshape2) library(dplyr)

head(airquality) tail(airquality)

melt() 함수는 데이터프레임을 “넓은 형식(wide format)”에서

“긴 형식(long format)”으로 변화

df<-melt(airquality, id.vars = c(“Month”, “Day”))

head(df) names(df)

df1<-dplyr::rename(df, enviro = variable, measure = value)

library(ggplot2)

ggplot(df1, aes(x = enviro, y = measure)) + geom_boxplot(fill = “skyblue”, color = “black”) + labs(title = “환경 변수별 분포 (boxplot)”, x = “환경 변수”, y = “측정값”) + theme_minimal()

ozone : 이상값 존재

Temp : 이상값 없음

mtcars<-read.csv(“mtcars.csv”)

data(mtcars) head(mtcars)

mtcars$car <- rownames(mtcars)

head(mtcars)

melt(mtcars, id.vars = “car”, variable.name = “feature”, value.name = “value”)

ggplot(mtcars, aes(x = hp, y = mpg, label = rownames(mtcars))) + geom_point() + geom_text(nudge_y = 1, size = 3) + labs(title = “마력(hp) vs 연비(mpg)”, x = “마력”, y = “연비”) + theme_minimal()

nudge_y = 1 , 텍스트를 y축 방향으로 1만큼 위로 이동시켜 겹치지 않게함

# 새로운 시작
data('airquality')
summary(airquality)
##      Ozone           Solar.R           Wind             Temp      
##  Min.   :  1.00   Min.   :  7.0   Min.   : 1.700   Min.   :56.00  
##  1st Qu.: 18.00   1st Qu.:115.8   1st Qu.: 7.400   1st Qu.:72.00  
##  Median : 31.50   Median :205.0   Median : 9.700   Median :79.00  
##  Mean   : 42.13   Mean   :185.9   Mean   : 9.958   Mean   :77.88  
##  3rd Qu.: 63.25   3rd Qu.:258.8   3rd Qu.:11.500   3rd Qu.:85.00  
##  Max.   :168.00   Max.   :334.0   Max.   :20.700   Max.   :97.00  
##  NA's   :37       NA's   :7                                       
##      Month            Day      
##  Min.   :5.000   Min.   : 1.0  
##  1st Qu.:6.000   1st Qu.: 8.0  
##  Median :7.000   Median :16.0  
##  Mean   :6.993   Mean   :15.8  
##  3rd Qu.:8.000   3rd Qu.:23.0  
##  Max.   :9.000   Max.   :31.0  
## 
library(reshape2)
library(dplyr)

head(airquality)
##   Ozone Solar.R Wind Temp Month Day
## 1    41     190  7.4   67     5   1
## 2    36     118  8.0   72     5   2
## 3    12     149 12.6   74     5   3
## 4    18     313 11.5   62     5   4
## 5    NA      NA 14.3   56     5   5
## 6    28      NA 14.9   66     5   6
tail(airquality)
##     Ozone Solar.R Wind Temp Month Day
## 148    14      20 16.6   63     9  25
## 149    30     193  6.9   70     9  26
## 150    NA     145 13.2   77     9  27
## 151    14     191 14.3   75     9  28
## 152    18     131  8.0   76     9  29
## 153    20     223 11.5   68     9  30
# melt() 함수는 데이터프레임을 "넓은 형식(wide format)"에서
# "긴 형식(long format)"으로 변화

df<-melt(airquality, id.vars = c("Month", "Day"))

head(df)
##   Month Day variable value
## 1     5   1    Ozone    41
## 2     5   2    Ozone    36
## 3     5   3    Ozone    12
## 4     5   4    Ozone    18
## 5     5   5    Ozone    NA
## 6     5   6    Ozone    28
names(df)
## [1] "Month"    "Day"      "variable" "value"
df1<-dplyr::rename(df, enviro = variable, measure = value)

library(ggplot2)

ggplot(df1, aes(x = enviro, y = measure)) +
  geom_boxplot(fill = "skyblue", color = "black") +
  labs(title = "환경 변수별 분포 (boxplot)",
       x = "환경 변수",
       y = "측정값") +
  theme_minimal()
## Warning: Removed 44 rows containing non-finite outside the scale range
## (`stat_boxplot()`).

# ozone : 이상값 존재
# Temp : 이상값 없음

#mtcars<-read.csv("mtcars.csv")

data(mtcars)
head(mtcars)
##                    mpg cyl disp  hp drat    wt  qsec vs am gear carb
## Mazda RX4         21.0   6  160 110 3.90 2.620 16.46  0  1    4    4
## Mazda RX4 Wag     21.0   6  160 110 3.90 2.875 17.02  0  1    4    4
## Datsun 710        22.8   4  108  93 3.85 2.320 18.61  1  1    4    1
## Hornet 4 Drive    21.4   6  258 110 3.08 3.215 19.44  1  0    3    1
## Hornet Sportabout 18.7   8  360 175 3.15 3.440 17.02  0  0    3    2
## Valiant           18.1   6  225 105 2.76 3.460 20.22  1  0    3    1
mtcars$car <- rownames(mtcars)

head(mtcars)
##                    mpg cyl disp  hp drat    wt  qsec vs am gear carb
## Mazda RX4         21.0   6  160 110 3.90 2.620 16.46  0  1    4    4
## Mazda RX4 Wag     21.0   6  160 110 3.90 2.875 17.02  0  1    4    4
## Datsun 710        22.8   4  108  93 3.85 2.320 18.61  1  1    4    1
## Hornet 4 Drive    21.4   6  258 110 3.08 3.215 19.44  1  0    3    1
## Hornet Sportabout 18.7   8  360 175 3.15 3.440 17.02  0  0    3    2
## Valiant           18.1   6  225 105 2.76 3.460 20.22  1  0    3    1
##                                 car
## Mazda RX4                 Mazda RX4
## Mazda RX4 Wag         Mazda RX4 Wag
## Datsun 710               Datsun 710
## Hornet 4 Drive       Hornet 4 Drive
## Hornet Sportabout Hornet Sportabout
## Valiant                     Valiant
melt(mtcars, id.vars = "car",
     variable.name = "feature",
     value.name = "value")
##                     car feature   value
## 1             Mazda RX4     mpg  21.000
## 2         Mazda RX4 Wag     mpg  21.000
## 3            Datsun 710     mpg  22.800
## 4        Hornet 4 Drive     mpg  21.400
## 5     Hornet Sportabout     mpg  18.700
## 6               Valiant     mpg  18.100
## 7            Duster 360     mpg  14.300
## 8             Merc 240D     mpg  24.400
## 9              Merc 230     mpg  22.800
## 10             Merc 280     mpg  19.200
## 11            Merc 280C     mpg  17.800
## 12           Merc 450SE     mpg  16.400
## 13           Merc 450SL     mpg  17.300
## 14          Merc 450SLC     mpg  15.200
## 15   Cadillac Fleetwood     mpg  10.400
## 16  Lincoln Continental     mpg  10.400
## 17    Chrysler Imperial     mpg  14.700
## 18             Fiat 128     mpg  32.400
## 19          Honda Civic     mpg  30.400
## 20       Toyota Corolla     mpg  33.900
## 21        Toyota Corona     mpg  21.500
## 22     Dodge Challenger     mpg  15.500
## 23          AMC Javelin     mpg  15.200
## 24           Camaro Z28     mpg  13.300
## 25     Pontiac Firebird     mpg  19.200
## 26            Fiat X1-9     mpg  27.300
## 27        Porsche 914-2     mpg  26.000
## 28         Lotus Europa     mpg  30.400
## 29       Ford Pantera L     mpg  15.800
## 30         Ferrari Dino     mpg  19.700
## 31        Maserati Bora     mpg  15.000
## 32           Volvo 142E     mpg  21.400
## 33            Mazda RX4     cyl   6.000
## 34        Mazda RX4 Wag     cyl   6.000
## 35           Datsun 710     cyl   4.000
## 36       Hornet 4 Drive     cyl   6.000
## 37    Hornet Sportabout     cyl   8.000
## 38              Valiant     cyl   6.000
## 39           Duster 360     cyl   8.000
## 40            Merc 240D     cyl   4.000
## 41             Merc 230     cyl   4.000
## 42             Merc 280     cyl   6.000
## 43            Merc 280C     cyl   6.000
## 44           Merc 450SE     cyl   8.000
## 45           Merc 450SL     cyl   8.000
## 46          Merc 450SLC     cyl   8.000
## 47   Cadillac Fleetwood     cyl   8.000
## 48  Lincoln Continental     cyl   8.000
## 49    Chrysler Imperial     cyl   8.000
## 50             Fiat 128     cyl   4.000
## 51          Honda Civic     cyl   4.000
## 52       Toyota Corolla     cyl   4.000
## 53        Toyota Corona     cyl   4.000
## 54     Dodge Challenger     cyl   8.000
## 55          AMC Javelin     cyl   8.000
## 56           Camaro Z28     cyl   8.000
## 57     Pontiac Firebird     cyl   8.000
## 58            Fiat X1-9     cyl   4.000
## 59        Porsche 914-2     cyl   4.000
## 60         Lotus Europa     cyl   4.000
## 61       Ford Pantera L     cyl   8.000
## 62         Ferrari Dino     cyl   6.000
## 63        Maserati Bora     cyl   8.000
## 64           Volvo 142E     cyl   4.000
## 65            Mazda RX4    disp 160.000
## 66        Mazda RX4 Wag    disp 160.000
## 67           Datsun 710    disp 108.000
## 68       Hornet 4 Drive    disp 258.000
## 69    Hornet Sportabout    disp 360.000
## 70              Valiant    disp 225.000
## 71           Duster 360    disp 360.000
## 72            Merc 240D    disp 146.700
## 73             Merc 230    disp 140.800
## 74             Merc 280    disp 167.600
## 75            Merc 280C    disp 167.600
## 76           Merc 450SE    disp 275.800
## 77           Merc 450SL    disp 275.800
## 78          Merc 450SLC    disp 275.800
## 79   Cadillac Fleetwood    disp 472.000
## 80  Lincoln Continental    disp 460.000
## 81    Chrysler Imperial    disp 440.000
## 82             Fiat 128    disp  78.700
## 83          Honda Civic    disp  75.700
## 84       Toyota Corolla    disp  71.100
## 85        Toyota Corona    disp 120.100
## 86     Dodge Challenger    disp 318.000
## 87          AMC Javelin    disp 304.000
## 88           Camaro Z28    disp 350.000
## 89     Pontiac Firebird    disp 400.000
## 90            Fiat X1-9    disp  79.000
## 91        Porsche 914-2    disp 120.300
## 92         Lotus Europa    disp  95.100
## 93       Ford Pantera L    disp 351.000
## 94         Ferrari Dino    disp 145.000
## 95        Maserati Bora    disp 301.000
## 96           Volvo 142E    disp 121.000
## 97            Mazda RX4      hp 110.000
## 98        Mazda RX4 Wag      hp 110.000
## 99           Datsun 710      hp  93.000
## 100      Hornet 4 Drive      hp 110.000
## 101   Hornet Sportabout      hp 175.000
## 102             Valiant      hp 105.000
## 103          Duster 360      hp 245.000
## 104           Merc 240D      hp  62.000
## 105            Merc 230      hp  95.000
## 106            Merc 280      hp 123.000
## 107           Merc 280C      hp 123.000
## 108          Merc 450SE      hp 180.000
## 109          Merc 450SL      hp 180.000
## 110         Merc 450SLC      hp 180.000
## 111  Cadillac Fleetwood      hp 205.000
## 112 Lincoln Continental      hp 215.000
## 113   Chrysler Imperial      hp 230.000
## 114            Fiat 128      hp  66.000
## 115         Honda Civic      hp  52.000
## 116      Toyota Corolla      hp  65.000
## 117       Toyota Corona      hp  97.000
## 118    Dodge Challenger      hp 150.000
## 119         AMC Javelin      hp 150.000
## 120          Camaro Z28      hp 245.000
## 121    Pontiac Firebird      hp 175.000
## 122           Fiat X1-9      hp  66.000
## 123       Porsche 914-2      hp  91.000
## 124        Lotus Europa      hp 113.000
## 125      Ford Pantera L      hp 264.000
## 126        Ferrari Dino      hp 175.000
## 127       Maserati Bora      hp 335.000
## 128          Volvo 142E      hp 109.000
## 129           Mazda RX4    drat   3.900
## 130       Mazda RX4 Wag    drat   3.900
## 131          Datsun 710    drat   3.850
## 132      Hornet 4 Drive    drat   3.080
## 133   Hornet Sportabout    drat   3.150
## 134             Valiant    drat   2.760
## 135          Duster 360    drat   3.210
## 136           Merc 240D    drat   3.690
## 137            Merc 230    drat   3.920
## 138            Merc 280    drat   3.920
## 139           Merc 280C    drat   3.920
## 140          Merc 450SE    drat   3.070
## 141          Merc 450SL    drat   3.070
## 142         Merc 450SLC    drat   3.070
## 143  Cadillac Fleetwood    drat   2.930
## 144 Lincoln Continental    drat   3.000
## 145   Chrysler Imperial    drat   3.230
## 146            Fiat 128    drat   4.080
## 147         Honda Civic    drat   4.930
## 148      Toyota Corolla    drat   4.220
## 149       Toyota Corona    drat   3.700
## 150    Dodge Challenger    drat   2.760
## 151         AMC Javelin    drat   3.150
## 152          Camaro Z28    drat   3.730
## 153    Pontiac Firebird    drat   3.080
## 154           Fiat X1-9    drat   4.080
## 155       Porsche 914-2    drat   4.430
## 156        Lotus Europa    drat   3.770
## 157      Ford Pantera L    drat   4.220
## 158        Ferrari Dino    drat   3.620
## 159       Maserati Bora    drat   3.540
## 160          Volvo 142E    drat   4.110
## 161           Mazda RX4      wt   2.620
## 162       Mazda RX4 Wag      wt   2.875
## 163          Datsun 710      wt   2.320
## 164      Hornet 4 Drive      wt   3.215
## 165   Hornet Sportabout      wt   3.440
## 166             Valiant      wt   3.460
## 167          Duster 360      wt   3.570
## 168           Merc 240D      wt   3.190
## 169            Merc 230      wt   3.150
## 170            Merc 280      wt   3.440
## 171           Merc 280C      wt   3.440
## 172          Merc 450SE      wt   4.070
## 173          Merc 450SL      wt   3.730
## 174         Merc 450SLC      wt   3.780
## 175  Cadillac Fleetwood      wt   5.250
## 176 Lincoln Continental      wt   5.424
## 177   Chrysler Imperial      wt   5.345
## 178            Fiat 128      wt   2.200
## 179         Honda Civic      wt   1.615
## 180      Toyota Corolla      wt   1.835
## 181       Toyota Corona      wt   2.465
## 182    Dodge Challenger      wt   3.520
## 183         AMC Javelin      wt   3.435
## 184          Camaro Z28      wt   3.840
## 185    Pontiac Firebird      wt   3.845
## 186           Fiat X1-9      wt   1.935
## 187       Porsche 914-2      wt   2.140
## 188        Lotus Europa      wt   1.513
## 189      Ford Pantera L      wt   3.170
## 190        Ferrari Dino      wt   2.770
## 191       Maserati Bora      wt   3.570
## 192          Volvo 142E      wt   2.780
## 193           Mazda RX4    qsec  16.460
## 194       Mazda RX4 Wag    qsec  17.020
## 195          Datsun 710    qsec  18.610
## 196      Hornet 4 Drive    qsec  19.440
## 197   Hornet Sportabout    qsec  17.020
## 198             Valiant    qsec  20.220
## 199          Duster 360    qsec  15.840
## 200           Merc 240D    qsec  20.000
## 201            Merc 230    qsec  22.900
## 202            Merc 280    qsec  18.300
## 203           Merc 280C    qsec  18.900
## 204          Merc 450SE    qsec  17.400
## 205          Merc 450SL    qsec  17.600
## 206         Merc 450SLC    qsec  18.000
## 207  Cadillac Fleetwood    qsec  17.980
## 208 Lincoln Continental    qsec  17.820
## 209   Chrysler Imperial    qsec  17.420
## 210            Fiat 128    qsec  19.470
## 211         Honda Civic    qsec  18.520
## 212      Toyota Corolla    qsec  19.900
## 213       Toyota Corona    qsec  20.010
## 214    Dodge Challenger    qsec  16.870
## 215         AMC Javelin    qsec  17.300
## 216          Camaro Z28    qsec  15.410
## 217    Pontiac Firebird    qsec  17.050
## 218           Fiat X1-9    qsec  18.900
## 219       Porsche 914-2    qsec  16.700
## 220        Lotus Europa    qsec  16.900
## 221      Ford Pantera L    qsec  14.500
## 222        Ferrari Dino    qsec  15.500
## 223       Maserati Bora    qsec  14.600
## 224          Volvo 142E    qsec  18.600
## 225           Mazda RX4      vs   0.000
## 226       Mazda RX4 Wag      vs   0.000
## 227          Datsun 710      vs   1.000
## 228      Hornet 4 Drive      vs   1.000
## 229   Hornet Sportabout      vs   0.000
## 230             Valiant      vs   1.000
## 231          Duster 360      vs   0.000
## 232           Merc 240D      vs   1.000
## 233            Merc 230      vs   1.000
## 234            Merc 280      vs   1.000
## 235           Merc 280C      vs   1.000
## 236          Merc 450SE      vs   0.000
## 237          Merc 450SL      vs   0.000
## 238         Merc 450SLC      vs   0.000
## 239  Cadillac Fleetwood      vs   0.000
## 240 Lincoln Continental      vs   0.000
## 241   Chrysler Imperial      vs   0.000
## 242            Fiat 128      vs   1.000
## 243         Honda Civic      vs   1.000
## 244      Toyota Corolla      vs   1.000
## 245       Toyota Corona      vs   1.000
## 246    Dodge Challenger      vs   0.000
## 247         AMC Javelin      vs   0.000
## 248          Camaro Z28      vs   0.000
## 249    Pontiac Firebird      vs   0.000
## 250           Fiat X1-9      vs   1.000
## 251       Porsche 914-2      vs   0.000
## 252        Lotus Europa      vs   1.000
## 253      Ford Pantera L      vs   0.000
## 254        Ferrari Dino      vs   0.000
## 255       Maserati Bora      vs   0.000
## 256          Volvo 142E      vs   1.000
## 257           Mazda RX4      am   1.000
## 258       Mazda RX4 Wag      am   1.000
## 259          Datsun 710      am   1.000
## 260      Hornet 4 Drive      am   0.000
## 261   Hornet Sportabout      am   0.000
## 262             Valiant      am   0.000
## 263          Duster 360      am   0.000
## 264           Merc 240D      am   0.000
## 265            Merc 230      am   0.000
## 266            Merc 280      am   0.000
## 267           Merc 280C      am   0.000
## 268          Merc 450SE      am   0.000
## 269          Merc 450SL      am   0.000
## 270         Merc 450SLC      am   0.000
## 271  Cadillac Fleetwood      am   0.000
## 272 Lincoln Continental      am   0.000
## 273   Chrysler Imperial      am   0.000
## 274            Fiat 128      am   1.000
## 275         Honda Civic      am   1.000
## 276      Toyota Corolla      am   1.000
## 277       Toyota Corona      am   0.000
## 278    Dodge Challenger      am   0.000
## 279         AMC Javelin      am   0.000
## 280          Camaro Z28      am   0.000
## 281    Pontiac Firebird      am   0.000
## 282           Fiat X1-9      am   1.000
## 283       Porsche 914-2      am   1.000
## 284        Lotus Europa      am   1.000
## 285      Ford Pantera L      am   1.000
## 286        Ferrari Dino      am   1.000
## 287       Maserati Bora      am   1.000
## 288          Volvo 142E      am   1.000
## 289           Mazda RX4    gear   4.000
## 290       Mazda RX4 Wag    gear   4.000
## 291          Datsun 710    gear   4.000
## 292      Hornet 4 Drive    gear   3.000
## 293   Hornet Sportabout    gear   3.000
## 294             Valiant    gear   3.000
## 295          Duster 360    gear   3.000
## 296           Merc 240D    gear   4.000
## 297            Merc 230    gear   4.000
## 298            Merc 280    gear   4.000
## 299           Merc 280C    gear   4.000
## 300          Merc 450SE    gear   3.000
## 301          Merc 450SL    gear   3.000
## 302         Merc 450SLC    gear   3.000
## 303  Cadillac Fleetwood    gear   3.000
## 304 Lincoln Continental    gear   3.000
## 305   Chrysler Imperial    gear   3.000
## 306            Fiat 128    gear   4.000
## 307         Honda Civic    gear   4.000
## 308      Toyota Corolla    gear   4.000
## 309       Toyota Corona    gear   3.000
## 310    Dodge Challenger    gear   3.000
## 311         AMC Javelin    gear   3.000
## 312          Camaro Z28    gear   3.000
## 313    Pontiac Firebird    gear   3.000
## 314           Fiat X1-9    gear   4.000
## 315       Porsche 914-2    gear   5.000
## 316        Lotus Europa    gear   5.000
## 317      Ford Pantera L    gear   5.000
## 318        Ferrari Dino    gear   5.000
## 319       Maserati Bora    gear   5.000
## 320          Volvo 142E    gear   4.000
## 321           Mazda RX4    carb   4.000
## 322       Mazda RX4 Wag    carb   4.000
## 323          Datsun 710    carb   1.000
## 324      Hornet 4 Drive    carb   1.000
## 325   Hornet Sportabout    carb   2.000
## 326             Valiant    carb   1.000
## 327          Duster 360    carb   4.000
## 328           Merc 240D    carb   2.000
## 329            Merc 230    carb   2.000
## 330            Merc 280    carb   4.000
## 331           Merc 280C    carb   4.000
## 332          Merc 450SE    carb   3.000
## 333          Merc 450SL    carb   3.000
## 334         Merc 450SLC    carb   3.000
## 335  Cadillac Fleetwood    carb   4.000
## 336 Lincoln Continental    carb   4.000
## 337   Chrysler Imperial    carb   4.000
## 338            Fiat 128    carb   1.000
## 339         Honda Civic    carb   2.000
## 340      Toyota Corolla    carb   1.000
## 341       Toyota Corona    carb   1.000
## 342    Dodge Challenger    carb   2.000
## 343         AMC Javelin    carb   2.000
## 344          Camaro Z28    carb   4.000
## 345    Pontiac Firebird    carb   2.000
## 346           Fiat X1-9    carb   1.000
## 347       Porsche 914-2    carb   2.000
## 348        Lotus Europa    carb   2.000
## 349      Ford Pantera L    carb   4.000
## 350        Ferrari Dino    carb   6.000
## 351       Maserati Bora    carb   8.000
## 352          Volvo 142E    carb   2.000
ggplot(mtcars, aes(x = hp, y = mpg, label = rownames(mtcars))) +
  geom_point() +
  geom_text(nudge_y = 1, size = 3) +
  labs(title = "마력(hp) vs 연비(mpg)", x = "마력", y = "연비") +
  theme_minimal()

# nudge_y = 1 , 텍스트를 y축 방향으로 1만큼 위로 이동시켜 겹치지 않게함

install.packages(“plyr”)

library(plyr)

head(airquality)

ddply(airquality, .(Month), summarise, Mean_Ozone = mean(Ozone, na.rm = TRUE))

data(infert)

head(infert) str(infert)

infert$age attach(infert)

age

sd(age)

sqrt(var(age))

var(age)^(1/2)

install.packages(“ISLR”) library(ISLR) summary(Wage)

str(Wage)

par(mfrow = c(1, 3))

hist(rnorm(10), main = “n = 10”) hist(rnorm(100), main = “n = 100”) hist(rnorm(1000), main = “n = 1000”)

mx<-matrix(c(23,41,12,35,67,1,24,7,53),nrow=3) as.vector(mx)

DF <- c(“Monday”,“Tuesday”,“Wednesday”) substr(DF,1,3)

install.packages(“gapminder”)

library(gapminder) head(gapminder)

date_vec<-c(“2020-05-01”, “2021-08-15”, “2022-12-31”) year_vec<-substr(date_vec, 1, 4) print(year_vec)

install.packages(“dplyr”)

library(dplyr)

library(ggplot2) data(economics) head(economics)

1. Date → 문자형으로 변환

date_str <- as.character(economics$date)

2. substr()로 연도 추출 (1~4번째 문자)

economics$year <- substr(date_str, 1, 4)

economics %>% group_by(year) %>%summarise(mean_pce = mean(pce))

economics %>% group_by(year) %>% summarise(mean_pop = mean(pop))

economics %>% group_by(year) %>% summarise(mean_psavert = mean(psavert))

as.Date(“08 / 13 / 2013”,“%m / %d / %Y”)

as.Date(“08 / 13 / 2013”,“%D / %M / %Y”)

as.Date(“08 / 13 / 2013”“,”%d / %m / %Y”)

as.Date(“08 / 13 / 2013”,“%M / %D / %Y”)

새로운 시작

회귀분석 개요

1. 예측

2. 관계분석

3. 모델링

단순 선형회기

data(mtcars) head(mtcars)

model <- lm(mpg ~ hp, data = mtcars) summary(model)

Multiple R-squared(결정계수)

회귀분석에서 모델이 종속변수의 변동을 얼마나 잘 설명하는지를 나타내는 지표

결정계수 0~1사이의 값을 갖는다.

결정계수 1이라면 선형회귀 직선위에 모든 관측치가 있음을 의미함

3개의 회귀모형 중 세번째 모델인

model_3 <- lm(height ~ weight, data = women) 가 가장 우수했음

결정계수 값이 0.75 이상이면 좋은 설명을 가진 모형입니다.

Estimate Std. Error : 표준오차 = t value(통계량)

t 통계량이 절대값이 클수록 통계적으로 유의(significance)합니다.

Signif. codes : 유의수준

’*’ 가 있으면 유의수준 0.05에서 유의한 변수입니다.

회귀모형의 유의성 검정 판단에 필요한 F-statistic, p-value

귀무가설 : 회귀계수는 0이다.

대립가설 : 회귀계수는 0이 아니다.

p-value: 1.091e-14 < 0.05(유의수준)

귀무가설 기각이므로 통계적으로 유의미한 모형이라 합니다.

결과적으로 회귀계수 하나라도 0이 아니면 통계적으로 유의미한 모형입니다.

표본크기-독립변수의 수-1=자유도

n-1-1=30

1 and 30 DF : 1 DF(분자자유도) = 회귀모형에 사용된 설명변수 계수

30 DF (분모 자유도) = 잔차의 자유도, 즉 n(관측치수)-p(독립변수수)-1

Residual standard error : 3.863

평균적으로 예측값과 실제값의 차이가 약 3.863 정도라는 뜻

Estimate :회귀계수

Intercept : 절편

y=-0.06823+30.09886x

산점도와 회귀선 시각화

library(ggplot2)

ggplot(mtcars, aes(x = hp, y = mpg)) + geom_point() + # 산점도 geom_smooth(method = “lm”, se = TRUE) + # 회귀선과 신뢰구간 theme_minimal() + labs(title = “MPG vs Horsepower”, x = “Horsepower”, y = “Miles per Gallon”)

새로운 데이터로 예측

new_data <- data.frame(hp = c(100, 150, 200)) predict(model, newdata = new_data)

2025.5.28

faithful 데이터셋

data(faithful) head(faithful) summary(faithful) str(faithful)

model_1 <- lm(eruptions ~ waiting, data = faithful) summary(model_1) format(2.2e-16, scientific=FALSE)

Residuals : 잔차(오차)

Coefficients : 회귀계수(Estimate)

(Intercept) : 절편(상수) : -1.874016

독립변수 temperature 회귀계수는: 0.075628

Pr(>|t|) 유의확률입니다.

0.00000000000000022 < 유의수준 0.05

귀무가설 : 회귀계수는 0이다.

대립가설 : 회귀계수는 0이 아니다.

따라서 귀무가설 기각, 통계적으로 유의한 변수라고 한다.

표본크기-독립변수의 수-1=자유도

n-1-1=270

1 and 30 DF : 1 DF(분자자유도) = 회귀모형에 사용된 설명변수 계수

30 DF (분모 자유도) = 잔차의 자유도, 즉 n(관측치수)-p(독립변수수)-1

Residual standard error : 0.4965

평균적으로 예측값과 실제값의 차이가 약 0.4965 정도라는 뜻

pressure 데이터셋

data(pressure) head(pressure) summary(pressure) str(pressure)

model_2 <- lm(temperature ~ pressure, data = pressure) summary(model_2)

Residuals : 잔차(오차)

Coefficients : 회귀계수(Estimate)

(Intercept) : 절편(상수) : 132.79072

독립변수 temperature 회귀계수는: 0.37969

Pr(>|t|) 유의확률입니다.

0.000171 < 유의수준 0.05

귀무가설 : 회귀계수는 0이다.

대립가설 : 회귀계수는 0이 아니다.

따라서 귀무가설 기각, 통계적으로 유의한 변수라고 한다.

표본크기-독립변수의 수-1=자유도

n-1-1=17

1 and 30 DF : 1 DF(분자자유도) = 회귀모형에 사용된 설명변수 계수

30 DF (분모 자유도) = 잔차의 자유도, 즉 n(관측치수)-p(독립변수수)-1

Residual standard error : 75.56

평균적으로 예측값과 실제값의 차이가 약 75.56 정도라는 뜻

women 데이터셋

data(women) head(women) summary(women) str(women)

model_3 <- lm(height ~ weight, data = women) summary(model_3) format(1.091e-14, scientific=FALSE)

Residuals : 잔차(오차)

Coefficients : 회귀계수(Estimate)

(Intercept) : 절편(상수) : 25.723456

독립변수 temperature 회귀계수는: 0.287249

Pr(>|t|) 유의확률입니다.

0.00000000000001091 < 유의수준 0.05

귀무가설 : 회귀계수는 0이다.

대립가설 : 회귀계수는 0이 아니다.

따라서 귀무가설 기각, 통계적으로 유의한 변수라고 한다.

표본크기-독립변수의 수-1=자유도

n-1-1=13

1 and 30 DF : 1 DF(분자자유도) = 회귀모형에 사용된 설명변수 계수

30 DF (분모 자유도) = 잔차의 자유도, 즉 n(관측치수)-p(독립변수수)-1

Residual standard error : 0.44

평균적으로 예측값과 실제값의 차이가 약 0.44 정도라는 뜻

## 2025.5.28
# faithful 데이터셋
data(faithful)
head(faithful)
##   eruptions waiting
## 1     3.600      79
## 2     1.800      54
## 3     3.333      74
## 4     2.283      62
## 5     4.533      85
## 6     2.883      55
summary(faithful)
##    eruptions        waiting    
##  Min.   :1.600   Min.   :43.0  
##  1st Qu.:2.163   1st Qu.:58.0  
##  Median :4.000   Median :76.0  
##  Mean   :3.488   Mean   :70.9  
##  3rd Qu.:4.454   3rd Qu.:82.0  
##  Max.   :5.100   Max.   :96.0
str(faithful)
## 'data.frame':    272 obs. of  2 variables:
##  $ eruptions: num  3.6 1.8 3.33 2.28 4.53 ...
##  $ waiting  : num  79 54 74 62 85 55 88 85 51 85 ...
model_1 <- lm(eruptions ~ waiting, data = faithful)
summary(model_1)
## 
## Call:
## lm(formula = eruptions ~ waiting, data = faithful)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.29917 -0.37689  0.03508  0.34909  1.19329 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept) -1.874016   0.160143  -11.70   <2e-16 ***
## waiting      0.075628   0.002219   34.09   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.4965 on 270 degrees of freedom
## Multiple R-squared:  0.8115, Adjusted R-squared:  0.8108 
## F-statistic:  1162 on 1 and 270 DF,  p-value: < 2.2e-16
format(2.2e-16, scientific=FALSE)
## [1] "0.00000000000000022"
# Residuals : 잔차(오차)
# Coefficients : 회귀계수(Estimate)
# (Intercept) : 절편(상수) : -1.874016
# 독립변수 temperature 회귀계수는: 0.075628
# Pr(>|t|) 유의확률입니다.
# 0.00000000000000022 < 유의수준 0.05
# 귀무가설 : 회귀계수는 0이다.
# 대립가설 : 회귀계수는 0이 아니다.
# 따라서 귀무가설 기각, 통계적으로 유의한 변수라고 한다.

# 표본크기-독립변수의 수-1=자유도
# n-1-1=270
# 1 and 30 DF : 1 DF(분자자유도) = 회귀모형에 사용된 설명변수 계수
# 30 DF (분모 자유도) = 잔차의 자유도, 즉 n(관측치수)-p(독립변수수)-1
# Residual standard error : 0.4965
# 평균적으로 예측값과 실제값의 차이가 약 0.4965 정도라는 뜻

# pressure 데이터셋
data(pressure)
head(pressure)
##   temperature pressure
## 1           0   0.0002
## 2          20   0.0012
## 3          40   0.0060
## 4          60   0.0300
## 5          80   0.0900
## 6         100   0.2700
summary(pressure)
##   temperature     pressure       
##  Min.   :  0   Min.   :  0.0002  
##  1st Qu.: 90   1st Qu.:  0.1800  
##  Median :180   Median :  8.8000  
##  Mean   :180   Mean   :124.3367  
##  3rd Qu.:270   3rd Qu.:126.5000  
##  Max.   :360   Max.   :806.0000
str(pressure)
## 'data.frame':    19 obs. of  2 variables:
##  $ temperature: num  0 20 40 60 80 100 120 140 160 180 ...
##  $ pressure   : num  0.0002 0.0012 0.006 0.03 0.09 0.27 0.75 1.85 4.2 8.8 ...
model_2 <- lm(temperature ~ pressure, data = pressure)
summary(model_2)
## 
## Call:
## lm(formula = temperature ~ pressure, data = pressure)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -132.791  -62.813    6.507   67.033   90.759 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept) 132.79072   19.94314   6.658 4.03e-06 ***
## pressure      0.37969    0.07929   4.788 0.000171 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 75.56 on 17 degrees of freedom
## Multiple R-squared:  0.5742, Adjusted R-squared:  0.5492 
## F-statistic: 22.93 on 1 and 17 DF,  p-value: 0.000171
# Residuals : 잔차(오차)
# Coefficients : 회귀계수(Estimate)
# (Intercept) : 절편(상수) : 132.79072
# 독립변수 temperature 회귀계수는: 0.37969
# Pr(>|t|) 유의확률입니다.
# 0.000171 < 유의수준 0.05
# 귀무가설 : 회귀계수는 0이다.
# 대립가설 : 회귀계수는 0이 아니다.
# 따라서 귀무가설 기각, 통계적으로 유의한 변수라고 한다.

# 표본크기-독립변수의 수-1=자유도
# n-1-1=17
# 1 and 30 DF : 1 DF(분자자유도) = 회귀모형에 사용된 설명변수 계수
# 30 DF (분모 자유도) = 잔차의 자유도, 즉 n(관측치수)-p(독립변수수)-1
# Residual standard error : 75.56
# 평균적으로 예측값과 실제값의 차이가 약 75.56 정도라는 뜻

# women 데이터셋
data(women)
head(women)
##   height weight
## 1     58    115
## 2     59    117
## 3     60    120
## 4     61    123
## 5     62    126
## 6     63    129
summary(women)
##      height         weight     
##  Min.   :58.0   Min.   :115.0  
##  1st Qu.:61.5   1st Qu.:124.5  
##  Median :65.0   Median :135.0  
##  Mean   :65.0   Mean   :136.7  
##  3rd Qu.:68.5   3rd Qu.:148.0  
##  Max.   :72.0   Max.   :164.0
str(women)
## 'data.frame':    15 obs. of  2 variables:
##  $ height: num  58 59 60 61 62 63 64 65 66 67 ...
##  $ weight: num  115 117 120 123 126 129 132 135 139 142 ...
model_3 <- lm(height ~ weight, data = women)
summary(model_3)
## 
## Call:
## lm(formula = height ~ weight, data = women)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -0.83233 -0.26249  0.08314  0.34353  0.49790 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept) 25.723456   1.043746   24.64 2.68e-12 ***
## weight       0.287249   0.007588   37.85 1.09e-14 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.44 on 13 degrees of freedom
## Multiple R-squared:  0.991,  Adjusted R-squared:  0.9903 
## F-statistic:  1433 on 1 and 13 DF,  p-value: 1.091e-14
format(1.091e-14, scientific=FALSE)
## [1] "0.00000000000001091"
# Residuals : 잔차(오차)
# Coefficients : 회귀계수(Estimate)
# (Intercept) : 절편(상수) : 25.723456
# 독립변수 temperature 회귀계수는: 0.287249
# Pr(>|t|) 유의확률입니다.
# 0.00000000000001091 < 유의수준 0.05
# 귀무가설 : 회귀계수는 0이다.
# 대립가설 : 회귀계수는 0이 아니다.
# 따라서 귀무가설 기각, 통계적으로 유의한 변수라고 한다.

# 표본크기-독립변수의 수-1=자유도
# n-1-1=13
# 1 and 30 DF : 1 DF(분자자유도) = 회귀모형에 사용된 설명변수 계수
# 30 DF (분모 자유도) = 잔차의 자유도, 즉 n(관측치수)-p(독립변수수)-1
# Residual standard error : 0.44
# 평균적으로 예측값과 실제값의 차이가 약 0.44 정도라는 뜻

library(dplyr)

glimpse(mtcars)

install.packages(“lmtest”)

2025.5.29

model <-lm(mpg~hp, data = mtcars) #독립변수 1개, 단순선형(직선) 회귀

par(mfrow = c(2, 2))

library(lmtest)

dwtest(model)

Durbin-Watson Test for Autocorrelation (자기상관 검정)

p-value < 0.05 → 잔차 간에 자기상관 있음 → 독립성 가정 위반

DW 통계량은 보통 2에 가까우면 이상적 (1보다 작으면 양의 자기상관 의심)

plot(model, which = 1)

회귀모형을 적합한 후 plot(model) 명령어로 확인할 수 있는 잔차 플롯

4가지는 회귀모형의 가정이 잘 만족되는지 진단하는 중요한 도구입니다.

각 which = 1~4 플롯의 의미와 해석은 다음과 같습니다

Residuals vs Fitted Plot (잔차 대 적합값 플롯)

목적: 선형성(linearity) 및 등분산성(homoscedasticity) 검정

이상적 모습: 잔차들이 0을 중심으로 무작위로 흩어짐

U자형 / 곡선 → 비선형 관계 존재 가능성

잔차의 퍼짐이 점점 커짐/작아짐 → 이분산성 존재 가능성

이분산성이란, 회귀분석에서 잔차의 분산이 일정하지

않은 현상을 의미합니다.

플롯에서 잔차가 0을 중심으로 무작위로 흩어져야 함.

U자형, 곡선 등 비선형 패턴이 보이면 선형성 가정 위반

plot(model, which = 2)

Normal Q-Q Plot (정규 Q-Q 플롯)

이상적 모습: 점들이 대각선 위에 일직선

꼬리가 위나 아래로 휘어짐 → 잔차가 정규분포가 아닐 수 있음

극단값 존재 → 이상치 영향 의심

plot(model, which = 3)

Scale-Location Plot (√|Standardized Residuals| vs Fitted)

목적: 등분산성(Homoscedasticity) 검정

이상적 모습: 점들이 고르게 흩어짐 (수평선 주변에 무작위 분포)

위쪽으로 퍼지거나 깔때기 모양 → 이분산성 존재

plot(model, which = 4)

Cook’s Distance Plot

목적: 영향력 있는 관측치 식별

이상적 모습: 모든 관측치가 낮은 쿡의 거리 값

특정 관측치가 다른 점들보다 월등히 높은 경우 → 해당 점이 모델에 큰 영향

faithful data

dwtest(model_1) plot(model_1, which = 1) plot(model_1, which = 2) plot(model_1, which = 3) plot(model_1, which = 4)

1. Durbin-Watson Test for Autocorrelation

: p-value( = 1) > 0.05 -> 잔차 간에 자기상관 없음

2. Residuals vs Fitted Plot :비선형 패턴이 보이면 선형성 가정 위반

3. Q-Q plot : 이상적 모습: 점들이 대각선 위에 일직선

4. Scale Location Plot : 위쪽으로 퍼지거나 깔때기 모양 -> 이분산성 존재

5. Cook’s Distance Plot : 158, 197, 203 관측치가 월등히 높음

pressure data

dwtest(model_2) plot(model_2, which = 1) plot(model_2, which = 2) plot(model_2, which = 3) plot(model_2, which = 4)

1. Durbin-Watson Test for Autocorrelation

: p-value( = 5.205e-13) < 0.05 > 잔차 간에 자기상관 있음

2. Residuals vs Fitted Plot :비선형 패턴 -> 선형성 가정 위반

3. Q-Q plot : 꼬리가 휘어짐 > 잔차가 정규분포가 아닐 수 있음

4. Scale Location Plot : 위쪽으로 퍼지거나 깔때기 모양 -> 이분산성 존재

5. Cook’s Distance Plot : 19 관측치가 월등히 높음

women data

dwtest(model_3) plot(model_3, which = 1) plot(model_3, which = 2) plot(model_3, which = 3) plot(model_3, which = 4)

1. Durbin-Watson Test for Autocorrelation

: p-value( = 9.623e-08) < 0.05 > 잔차 간에 자기상관 있음

2. Residuals vs Fitted Plot :비선형 패턴 -> 선형성 가정 위반

3. Q-Q plot : 꼬리가 휘어짐 > 잔차가 정규분포가 아닐 수 있음

4. Scale Location Plot : 위쪽으로 퍼지거나 깔때기 모양 -> 이분산성 존재

5. Cook’s Distance Plot : 15 관측치가 월등히 높음

# 2025.5.29

model <-lm(mpg~hp, data = mtcars)
#독립변수 1개, 단순선형(직선) 회귀

par(mfrow = c(2, 2))

library(lmtest)
## Loading required package: zoo
## 
## Attaching package: 'zoo'
## The following objects are masked from 'package:base':
## 
##     as.Date, as.Date.numeric
dwtest(model)
## 
##  Durbin-Watson test
## 
## data:  model
## DW = 1.1338, p-value = 0.00411
## alternative hypothesis: true autocorrelation is greater than 0
# Durbin-Watson Test for Autocorrelation (자기상관 검정)
# p-value < 0.05 → 잔차 간에 자기상관 있음 → 독립성 가정 위반
# DW 통계량은 보통 2에 가까우면 이상적 (1보다 작으면 양의 자기상관 의심)

plot(model, which = 1)

# 회귀모형을 적합한 후 plot(model) 명령어로 확인할 수 있는 잔차 플롯
# 4가지는 회귀모형의 가정이 잘 만족되는지 진단하는 중요한 도구입니다.
# 각 which = 1~4 플롯의 의미와 해석은 다음과 같습니다
# Residuals vs Fitted Plot (잔차 대 적합값 플롯)
# 목적: 선형성(linearity) 및 등분산성(homoscedasticity) 검정
# 이상적 모습: 잔차들이 0을 중심으로 무작위로 흩어짐
# U자형 / 곡선 → 비선형 관계 존재 가능성
# 잔차의 퍼짐이 점점 커짐/작아짐 → 이분산성 존재 가능성
# 이분산성이란, 회귀분석에서 잔차의 분산이 일정하지 
# 않은 현상을 의미합니다.

# 플롯에서 잔차가 0을 중심으로 무작위로 흩어져야 함.
# U자형, 곡선 등 비선형 패턴이 보이면 선형성 가정 위반

plot(model, which = 2)

#  Normal Q-Q Plot (정규 Q-Q 플롯)
# 이상적 모습: 점들이 대각선 위에 일직선
# 꼬리가 위나 아래로 휘어짐 → 잔차가 정규분포가 아닐 수 있음
# 극단값 존재 → 이상치 영향 의심

plot(model, which = 3)

# Scale-Location Plot (√|Standardized Residuals| vs Fitted)
# 목적: 등분산성(Homoscedasticity) 검정
# 이상적 모습: 점들이 고르게 흩어짐 (수평선 주변에 무작위 분포)
# 위쪽으로 퍼지거나 깔때기 모양 → 이분산성 존재

plot(model, which = 4)

# Cook’s Distance Plot
# 목적: 영향력 있는 관측치 식별
# 이상적 모습: 모든 관측치가 낮은 쿡의 거리 값
# 특정 관측치가 다른 점들보다 월등히 높은 경우 → 해당 점이 모델에 큰 영향



#### faithful data

dwtest(model_1)
## 
##  Durbin-Watson test
## 
## data:  model_1
## DW = 2.561, p-value = 1
## alternative hypothesis: true autocorrelation is greater than 0
plot(model_1, which = 1)
plot(model_1, which = 2)
plot(model_1, which = 3)
plot(model_1, which = 4)

# 1. Durbin-Watson Test for Autocorrelation
#    : p-value( = 1) > 0.05 -> 잔차 간에 자기상관 없음
# 2. Residuals vs Fitted Plot :비선형 패턴이 보이면 선형성 가정 위반
# 3. Q-Q plot : 이상적 모습: 점들이 대각선 위에 일직선
# 4. Scale Location Plot : 위쪽으로 퍼지거나 깔때기 모양 -> 이분산성 존재
# 5. Cook’s Distance Plot : 158, 197, 203 관측치가 월등히 높음



#### pressure data

dwtest(model_2)
## 
##  Durbin-Watson test
## 
## data:  model_2
## DW = 0.13453, p-value = 5.205e-13
## alternative hypothesis: true autocorrelation is greater than 0
plot(model_2, which = 1)
plot(model_2, which = 2)
plot(model_2, which = 3)
plot(model_2, which = 4)

# 1. Durbin-Watson Test for Autocorrelation
#    : p-value( =  5.205e-13) < 0.05 -> 잔차 간에 자기상관 있음
# 2. Residuals vs Fitted Plot :비선형 패턴 -> 선형성 가정 위반
# 3. Q-Q plot : 꼬리가 휘어짐 > 잔차가 정규분포가 아닐 수 있음
# 4. Scale Location Plot : 위쪽으로 퍼지거나 깔때기 모양 -> 이분산성 존재
# 5. Cook’s Distance Plot : 19 관측치가 월등히 높음




#### women data

dwtest(model_3)
## 
##  Durbin-Watson test
## 
## data:  model_3
## DW = 0.31156, p-value = 9.623e-08
## alternative hypothesis: true autocorrelation is greater than 0
plot(model_3, which = 1)
plot(model_3, which = 2)
plot(model_3, which = 3)
plot(model_3, which = 4)

# 1. Durbin-Watson Test for Autocorrelation
#    : p-value( =  9.623e-08) < 0.05 -> 잔차 간에 자기상관 있음
# 2. Residuals vs Fitted Plot :비선형 패턴 -> 선형성 가정 위반
# 3. Q-Q plot : 꼬리가 휘어짐 > 잔차가 정규분포가 아닐 수 있음
# 4. Scale Location Plot : 위쪽으로 퍼지거나 깔때기 모양 -> 이분산성 존재
# 5. Cook’s Distance Plot : 15 관측치가 월등히 높음

성별과 바르질 점수 데이터 생성

gender <- factor(c(rep(“male”, 10), rep(“female”, 10))) score <- c(70, 72, 68, 75, 74, 71, 73, 76, 69, 72, 65, 66, 64, 67, 63, 62, 68, 66, 65, 64)

df <- data.frame(gender, score) glimpse((df)) model <- lm(score ~ gender, data = df) summary(model)

새로운 시작

rm(list=ls()) install.packages(“car”) # 한 번만 설치

library(car) # 매번 로드

data(mtcars) summary(mtcars) model <- lm(mpg ~ wt + hp + disp, data = mtcars) vif(model)

2025.5.30 새로운 시작

다중회귀 분석 1

데이터 확인

data(mtcars)

다중 선형회귀모형 적합

model <- lm(mpg ~ wt + hp + drat, data = mtcars)

회귀 결과 요약

summary(model)

다중회귀 분석 2

데이터 확인

data(iris)

다중 선형회귀모형 적합

model <- lm(Sepal.Length ~ Sepal.Width + Petal.Length + Petal.Width, data = iris)

회귀 결과 요약

summary(model)

다중회귀 분석 3

결측치 제거

data(airquality) airquality_clean <- na.omit(airquality)

다중 선형회귀모형 적합

model <- lm(Ozone ~ Solar.R + Wind + Temp, data = airquality_clean)

회귀 결과 요약

summary(model)

2025.6.2 새로운 시작

data(mtcars)

종속변수: mpg

모든 변수 포함한 전체 모형

full_model <- lm(mpg ~ ., data = mtcars)

절편만 포함한 시작 모형

null_model <- lm(mpg ~ 1, data = mtcars)

1.후진제거법 (Backward Elimination)

backward_model <- step(full_model, direction = “backward”) summary(backward_model)

2.전진선택법 (Forward Selection)

forward_model <- step(null_model, scope = list(lower = null_model, upper = full_model), direction = “forward”)

summary(forward_model)

3.단계별방법 (Stepwise Method)

stepwise_model <- step(null_model, scope = list(lower = null_model, upper = full_model), direction = “both”)

summary(stepwise_model)

direction = “both”: 전진선택과 후진제거를 모두 수행

step() 함수는 AIC 기준으로 변수를 추가하거나 제거하며 최적의 조합을 찾습니다.

### 2025.6.2 새로운 시작

data(mtcars)

# 종속변수: mpg
# 모든 변수 포함한 전체 모형
full_model <- lm(mpg ~ ., data = mtcars)

# 절편만 포함한 시작 모형
null_model <- lm(mpg ~ 1, data = mtcars)

# 1.후진제거법 (Backward Elimination)
backward_model <- step(full_model, direction = "backward")
## Start:  AIC=70.9
## mpg ~ cyl + disp + hp + drat + wt + qsec + vs + am + gear + carb
## 
##        Df Sum of Sq    RSS    AIC
## - cyl   1    0.0799 147.57 68.915
## - vs    1    0.1601 147.66 68.932
## - carb  1    0.4067 147.90 68.986
## - gear  1    1.3531 148.85 69.190
## - drat  1    1.6270 149.12 69.249
## - disp  1    3.9167 151.41 69.736
## - hp    1    6.8399 154.33 70.348
## - qsec  1    8.8641 156.36 70.765
## <none>              147.49 70.898
## - am    1   10.5467 158.04 71.108
## - wt    1   27.0144 174.51 74.280
## 
## Step:  AIC=68.92
## mpg ~ disp + hp + drat + wt + qsec + vs + am + gear + carb
## 
##        Df Sum of Sq    RSS    AIC
## - vs    1    0.2685 147.84 66.973
## - carb  1    0.5201 148.09 67.028
## - gear  1    1.8211 149.40 67.308
## - drat  1    1.9826 149.56 67.342
## - disp  1    3.9009 151.47 67.750
## - hp    1    7.3632 154.94 68.473
## <none>              147.57 68.915
## - qsec  1   10.0933 157.67 69.032
## - am    1   11.8359 159.41 69.384
## - wt    1   27.0280 174.60 72.297
## 
## Step:  AIC=66.97
## mpg ~ disp + hp + drat + wt + qsec + am + gear + carb
## 
##        Df Sum of Sq    RSS    AIC
## - carb  1    0.6855 148.53 65.121
## - gear  1    2.1437 149.99 65.434
## - drat  1    2.2139 150.06 65.449
## - disp  1    3.6467 151.49 65.753
## - hp    1    7.1060 154.95 66.475
## <none>              147.84 66.973
## - am    1   11.5694 159.41 67.384
## - qsec  1   15.6830 163.53 68.200
## - wt    1   27.3799 175.22 70.410
## 
## Step:  AIC=65.12
## mpg ~ disp + hp + drat + wt + qsec + am + gear
## 
##        Df Sum of Sq    RSS    AIC
## - gear  1     1.565 150.09 63.457
## - drat  1     1.932 150.46 63.535
## <none>              148.53 65.121
## - disp  1    10.110 158.64 65.229
## - am    1    12.323 160.85 65.672
## - hp    1    14.826 163.35 66.166
## - qsec  1    26.408 174.94 68.358
## - wt    1    69.127 217.66 75.350
## 
## Step:  AIC=63.46
## mpg ~ disp + hp + drat + wt + qsec + am
## 
##        Df Sum of Sq    RSS    AIC
## - drat  1     3.345 153.44 62.162
## - disp  1     8.545 158.64 63.229
## <none>              150.09 63.457
## - hp    1    13.285 163.38 64.171
## - am    1    20.036 170.13 65.466
## - qsec  1    25.574 175.67 66.491
## - wt    1    67.572 217.66 73.351
## 
## Step:  AIC=62.16
## mpg ~ disp + hp + wt + qsec + am
## 
##        Df Sum of Sq    RSS    AIC
## - disp  1     6.629 160.07 61.515
## <none>              153.44 62.162
## - hp    1    12.572 166.01 62.682
## - qsec  1    26.470 179.91 65.255
## - am    1    32.198 185.63 66.258
## - wt    1    69.043 222.48 72.051
## 
## Step:  AIC=61.52
## mpg ~ hp + wt + qsec + am
## 
##        Df Sum of Sq    RSS    AIC
## - hp    1     9.219 169.29 61.307
## <none>              160.07 61.515
## - qsec  1    20.225 180.29 63.323
## - am    1    25.993 186.06 64.331
## - wt    1    78.494 238.56 72.284
## 
## Step:  AIC=61.31
## mpg ~ wt + qsec + am
## 
##        Df Sum of Sq    RSS    AIC
## <none>              169.29 61.307
## - am    1    26.178 195.46 63.908
## - qsec  1   109.034 278.32 75.217
## - wt    1   183.347 352.63 82.790
summary(backward_model)
## 
## Call:
## lm(formula = mpg ~ wt + qsec + am, data = mtcars)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -3.4811 -1.5555 -0.7257  1.4110  4.6610 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)   9.6178     6.9596   1.382 0.177915    
## wt           -3.9165     0.7112  -5.507 6.95e-06 ***
## qsec          1.2259     0.2887   4.247 0.000216 ***
## am            2.9358     1.4109   2.081 0.046716 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 2.459 on 28 degrees of freedom
## Multiple R-squared:  0.8497, Adjusted R-squared:  0.8336 
## F-statistic: 52.75 on 3 and 28 DF,  p-value: 1.21e-11
# 2.전진선택법 (Forward Selection)
forward_model <- step(null_model, 
                      scope = list(lower = null_model, upper = full_model),
                      direction = "forward")
## Start:  AIC=115.94
## mpg ~ 1
## 
##        Df Sum of Sq     RSS     AIC
## + wt    1    847.73  278.32  73.217
## + cyl   1    817.71  308.33  76.494
## + disp  1    808.89  317.16  77.397
## + hp    1    678.37  447.67  88.427
## + drat  1    522.48  603.57  97.988
## + vs    1    496.53  629.52  99.335
## + am    1    405.15  720.90 103.672
## + carb  1    341.78  784.27 106.369
## + gear  1    259.75  866.30 109.552
## + qsec  1    197.39  928.66 111.776
## <none>              1126.05 115.943
## 
## Step:  AIC=73.22
## mpg ~ wt
## 
##        Df Sum of Sq    RSS    AIC
## + cyl   1    87.150 191.17 63.198
## + hp    1    83.274 195.05 63.840
## + qsec  1    82.858 195.46 63.908
## + vs    1    54.228 224.09 68.283
## + carb  1    44.602 233.72 69.628
## + disp  1    31.639 246.68 71.356
## <none>              278.32 73.217
## + drat  1     9.081 269.24 74.156
## + gear  1     1.137 277.19 75.086
## + am    1     0.002 278.32 75.217
## 
## Step:  AIC=63.2
## mpg ~ wt + cyl
## 
##        Df Sum of Sq    RSS    AIC
## + hp    1   14.5514 176.62 62.665
## + carb  1   13.7724 177.40 62.805
## <none>              191.17 63.198
## + qsec  1   10.5674 180.60 63.378
## + gear  1    3.0281 188.14 64.687
## + disp  1    2.6796 188.49 64.746
## + vs    1    0.7059 190.47 65.080
## + am    1    0.1249 191.05 65.177
## + drat  1    0.0010 191.17 65.198
## 
## Step:  AIC=62.66
## mpg ~ wt + cyl + hp
## 
##        Df Sum of Sq    RSS    AIC
## <none>              176.62 62.665
## + am    1    6.6228 170.00 63.442
## + disp  1    6.1762 170.44 63.526
## + carb  1    2.5187 174.10 64.205
## + drat  1    2.2453 174.38 64.255
## + qsec  1    1.4010 175.22 64.410
## + gear  1    0.8558 175.76 64.509
## + vs    1    0.0599 176.56 64.654
summary(forward_model)
## 
## Call:
## lm(formula = mpg ~ wt + cyl + hp, data = mtcars)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -3.9290 -1.5598 -0.5311  1.1850  5.8986 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept) 38.75179    1.78686  21.687  < 2e-16 ***
## wt          -3.16697    0.74058  -4.276 0.000199 ***
## cyl         -0.94162    0.55092  -1.709 0.098480 .  
## hp          -0.01804    0.01188  -1.519 0.140015    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 2.512 on 28 degrees of freedom
## Multiple R-squared:  0.8431, Adjusted R-squared:  0.8263 
## F-statistic: 50.17 on 3 and 28 DF,  p-value: 2.184e-11
# 3.단계별방법 (Stepwise Method)
stepwise_model <- step(null_model,
                       scope = list(lower = null_model, upper = full_model),
                       direction = "both")
## Start:  AIC=115.94
## mpg ~ 1
## 
##        Df Sum of Sq     RSS     AIC
## + wt    1    847.73  278.32  73.217
## + cyl   1    817.71  308.33  76.494
## + disp  1    808.89  317.16  77.397
## + hp    1    678.37  447.67  88.427
## + drat  1    522.48  603.57  97.988
## + vs    1    496.53  629.52  99.335
## + am    1    405.15  720.90 103.672
## + carb  1    341.78  784.27 106.369
## + gear  1    259.75  866.30 109.552
## + qsec  1    197.39  928.66 111.776
## <none>              1126.05 115.943
## 
## Step:  AIC=73.22
## mpg ~ wt
## 
##        Df Sum of Sq     RSS     AIC
## + cyl   1     87.15  191.17  63.198
## + hp    1     83.27  195.05  63.840
## + qsec  1     82.86  195.46  63.908
## + vs    1     54.23  224.09  68.283
## + carb  1     44.60  233.72  69.628
## + disp  1     31.64  246.68  71.356
## <none>               278.32  73.217
## + drat  1      9.08  269.24  74.156
## + gear  1      1.14  277.19  75.086
## + am    1      0.00  278.32  75.217
## - wt    1    847.73 1126.05 115.943
## 
## Step:  AIC=63.2
## mpg ~ wt + cyl
## 
##        Df Sum of Sq    RSS    AIC
## + hp    1    14.551 176.62 62.665
## + carb  1    13.772 177.40 62.805
## <none>              191.17 63.198
## + qsec  1    10.567 180.60 63.378
## + gear  1     3.028 188.14 64.687
## + disp  1     2.680 188.49 64.746
## + vs    1     0.706 190.47 65.080
## + am    1     0.125 191.05 65.177
## + drat  1     0.001 191.17 65.198
## - cyl   1    87.150 278.32 73.217
## - wt    1   117.162 308.33 76.494
## 
## Step:  AIC=62.66
## mpg ~ wt + cyl + hp
## 
##        Df Sum of Sq    RSS    AIC
## <none>              176.62 62.665
## - hp    1    14.551 191.17 63.198
## + am    1     6.623 170.00 63.442
## + disp  1     6.176 170.44 63.526
## - cyl   1    18.427 195.05 63.840
## + carb  1     2.519 174.10 64.205
## + drat  1     2.245 174.38 64.255
## + qsec  1     1.401 175.22 64.410
## + gear  1     0.856 175.76 64.509
## + vs    1     0.060 176.56 64.654
## - wt    1   115.354 291.98 76.750
summary(stepwise_model)
## 
## Call:
## lm(formula = mpg ~ wt + cyl + hp, data = mtcars)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -3.9290 -1.5598 -0.5311  1.1850  5.8986 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept) 38.75179    1.78686  21.687  < 2e-16 ***
## wt          -3.16697    0.74058  -4.276 0.000199 ***
## cyl         -0.94162    0.55092  -1.709 0.098480 .  
## hp          -0.01804    0.01188  -1.519 0.140015    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 2.512 on 28 degrees of freedom
## Multiple R-squared:  0.8431, Adjusted R-squared:  0.8263 
## F-statistic: 50.17 on 3 and 28 DF,  p-value: 2.184e-11
# direction = "both": 전진선택과 후진제거를 모두 수행
# step() 함수는 AIC 기준으로 변수를 추가하거나 제거하며 최적의 조합을 찾습니다.

2025.6.9 시작

rm(list=ls())

1. 데이터 로드 (내장 데이터셋)

data(chickwts) df<-chickwts summary(df) str(df) head(df) # 2. chickwts\(weight 벡터 요약 summary(chickwts\)weight) length(chickwts\(weight) # 관측치 수 확인 # 3. 일표본 t-검정: 평균이 260인지 검정 (양측 검정) t.test(chickwts\)weight, mu = 260)

 결과 해석

- 귀무가설(H0): 평균 무게 = 260g

- 대립가설(H1): 평균 무게 ≠ 260g

- p-value < 0.05이면 귀무가설 기각 (260과 유의한 차이)

group 1의 extra 수면 증가량이 평균 1.5와 유의하게 다른지 확인하기 위한 양측검정 예시

1. sleep 데이터 불러오기

data(sleep) head(sleep) summary(sleep) # 2. group 1만 추출 group1 <- subset(sleep, group == 1)$extra # 3. 단일표본 t-검정 수행 (모평균 1.5와 비교) t.test(group1, mu = 1.5)

귀무가설: ctrl 그룹의 평균 무게는 5.0이다. # 1. 데이터 불러오기

data(“PlantGrowth”) # 2. ctrl 그룹 추출 ctrl <- subset(PlantGrowth, group == “ctrl”)$weight # 3. 일표본 t-검정 (mu = 5) t.test(ctrl, mu = 5)

2개의 집단 비교

data(“PlantGrowth”) # 대조군 vs 실험군1 비교 group_ctrl <- subset(PlantGrowth, group == “ctrl”)\(weight group_trt1 <- subset(PlantGrowth, group == "trt1")\)weight # 독립표본 t-검정 t.test(group_ctrl, group_trt1, var.equal = TRUE)

##sleep 데이터셋을 이용한 독립표본 t-검정 R 코드 # 1. 데이터 불러오기 data(sleep) # 2. 데이터 구조 확인 str(sleep) # ‘data.frame’: 20 obs. of 3 variables: # $ extra : num 0.7 -1.6 -0.2 -1.2 -0.1 3.4 3.7 0.8 0 2 … # $ group : Factor w/ 2 levels “1”,“2”: 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 # $ ID : int 1 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 8 9 10 # 3. 독립표본 t-검정 수행 t_test_result <- t.test(extra ~ group, data = sleep, var.equal = TRUE) # 4. 결과 출력 print(t_test_result)

library(ggplot2) # 대응표본 t-검정(Paired Sample t-test) # sleep 데이터셋 사용 data(sleep) ggplot(sleep, aes(group, extra)) + geom_boxplot() # 대응표본 t-test 수행 before <- sleep\(extra[1:10] after <- sleep\)extra[11:20]

t.test(before, after, paired = TRUE)

2025.6.10 시작

rm(list=ls()) ####################

data(“Nile”) library(dplyr)

glimpse(Nile) head(Nile)

install.packages(“tseries”)

library(tseries) # install.packages(“zoo”) library(zoo) library(ggplot2)

str(Nile) ts.plot(Nile, main = “Flows by year”, ylab = “flows”, col =“blue”)

2. 평균 일정성 확인(이동평균선 시각화)

roll_mean <- rollmean(Nile, k = 10, align = “right”, fill = NA) plot(Nile, main = “mean stability”, ylab = “flows”) lines(roll_mean, col = “red”, lwd = 2)

3. 분산 일정성 확인(이동분산 시각화)

roll_var <- rollapply(Nile, width = 10, FUN = var, align = “right”, fill = NA) plot(roll_var, type = “l”, col = “darkgreen”, lwd = 2, main = “variance stability”, ylab = “variance”)

4. 공분산 시간차 의존 확인 (ACF 그래프)

acf(Nile, main = “ACF graph”)

예를 들어, 월별 강수량 데이터 일때,

이때, 7월과 8월의 차이, 또는 3월과 4월의 차이는 존재함

하지만 그차이가 오직 몇개월 치이냐에만 따라 달라지고,

그게 2020년 3월 이냐 2024년 3월 이냐는 중요하지 않다면,

공분산이 절대적인 시간과 무관하고, 오직 시간 간격에만 의존한다고 함

1차 차분

Nile.diff1 = diff(Nile, differences = 1) plot(Nile.diff1)

2차 차분

Nile.diff2 = diff(Nile, differences = 2) plot(Nile.diff2)

이동평균법 적용 및 시각화

1. 패키지 다운로드

library(zoo) library(ggplot2)

2. 데이터

ts_data <- AirPassengers

3. 이동평균

ma_12 <- rollmean(ts_data, k=12, align = “center”, fill = NA)

4. 시각화위한 데이터 생성

df <- data.frame( date = as.Date(time(ts_data)), original = as.numeric(ts_data), ma = as.numeric(ma_12) )

5. 시각화

ggplot(df, aes(x = date)) + geom_line(aes(y = original), color = “gray50”, size = 1, linetype = “dashed”) + geom_line(aes(y = ma), color = “blue”, size = 1.2) + labs(title = “AirPassengers : ori vs 12 month”, x = “year”, y = “number of passengers”) + theme_minimal()

분해시계열 대표적인 그래프 이해해

#자료형 확인 class(AirPassengers)

#시계열 요소분해 m <- decompose(AirPassengers, type = “additive”)

#decompose() 함수 결과를 시각화하기 plot(m)

2025.6.11 시작

rm(list=ls()) ####################

1. 데이터 준비

data(iris) set.seed(123) # 재현 가능성 확보 # 2. 데이터 셔플 및 분할 (70% 훈련, 30% 검증) index <- sample(1:nrow(iris), 0.7 * nrow(iris)) # 무작위로 70% 인덱스 선택 train_data <- iris[index, ] test_data <- iris[-index, ]

nrow(train_data) nrow(test_data)

k-Fold 교차검증증

install.packages(“caret”)

library(caret)

1. 패키지 로드

library(caret) # 2. 데이터 준비 data(iris) set.seed(123) # 재현 가능성 확보 # 3. 교차검증 설정 (10-Fold) control <- trainControl(method = “cv”, number = 10) # 4. 모델 학습 (예: 의사결정나무) model <- train(Species ~ ., data = iris, method = “rpart”, # 분류 트리 trControl = control) model$resample

mean(model\(resample\)Accuracy)

표준편차차

sd(model\(resample\)Accuracy) # 표준편차(Accuracy) 0.01 ~ 0.03 적정수준

predict(model,test_data)

caret 모델델

getModelInfo()

iris 데이터로 붓스트랩 교차검증 (Bootstrap Resampling)

1. 패키지 로드

library(caret) # 2. 데이터 준비 data(iris) set.seed(123) # 3. 붓스트랩 설정 (bootstrap 방식) control <- trainControl(method = “boot”, number = 25) # 25회 복원추출 반복 # 4. 모델 학습 (예: 분류 트리) model <- train(Species ~ ., data = iris, method = “rpart”, trControl = control) # 5. 결과 확인 print(model) model$resample

6. 각 반복별 성능 확인 (선택적)

head(model$resample)

####################
##### 2025.6.11 시작
rm(list=ls())
####################


# 1. 데이터 준비
data(iris)
set.seed(123) # 재현 가능성 확보
# 2. 데이터 셔플 및 분할 (70% 훈련, 30% 검증)
index <- sample(1:nrow(iris), 0.7 * nrow(iris)) # 무작위로 70% 인덱스 선택
train_data <- iris[index, ]
test_data <- iris[-index, ]

nrow(train_data)
## [1] 105
nrow(test_data)
## [1] 45
### k-Fold 교차검증증
# install.packages("caret")
library(caret)
## Loading required package: lattice
# 1. 패키지 로드
library(caret)
# 2. 데이터 준비
data(iris)
set.seed(123) # 재현 가능성 확보
# 3. 교차검증 설정 (10-Fold)
control <- trainControl(method = "cv", number = 10)
# 4. 모델 학습 (예: 의사결정나무)
model <- train(Species ~ ., data = iris,
  method = "rpart", # 분류 트리
  trControl = control)
model$resample
##     Accuracy Kappa Resample
## 1  0.8666667   0.8   Fold02
## 2  0.8666667   0.8   Fold01
## 3  1.0000000   1.0   Fold03
## 4  0.9333333   0.9   Fold06
## 5  0.9333333   0.9   Fold05
## 6  0.9333333   0.9   Fold04
## 7  1.0000000   1.0   Fold07
## 8  0.8666667   0.8   Fold10
## 9  1.0000000   1.0   Fold09
## 10 1.0000000   1.0   Fold08
mean(model$resample$Accuracy)
## [1] 0.94
# 표준편차차
sd(model$resample$Accuracy)
## [1] 0.058373
# 표준편차(Accuracy) 0.01 ~ 0.03 적정수준

predict(model,test_data)
##  [1] setosa     setosa     setosa     setosa     setosa     setosa    
##  [7] setosa     setosa     setosa     setosa     setosa     setosa    
## [13] setosa     setosa     versicolor versicolor versicolor versicolor
## [19] versicolor versicolor versicolor versicolor versicolor versicolor
## [25] versicolor versicolor versicolor versicolor versicolor versicolor
## [31] versicolor versicolor virginica  virginica  virginica  virginica 
## [37] virginica  virginica  virginica  virginica  virginica  versicolor
## [43] virginica  virginica  virginica 
## Levels: setosa versicolor virginica
# caret 모델델
getModelInfo()
## $ada
## $ada$label
## [1] "Boosted Classification Trees"
## 
## $ada$library
## [1] "ada"  "plyr"
## 
## $ada$loop
## function (grid) 
## {
##     loop <- plyr::ddply(grid, c("nu", "maxdepth"), function(x) c(iter = max(x$iter)))
##     submodels <- vector(mode = "list", length = nrow(loop))
##     for (i in seq(along = loop$iter)) {
##         index <- which(grid$maxdepth == loop$maxdepth[i] & grid$nu == 
##             loop$nu[i])
##         trees <- grid[index, "iter"]
##         submodels[[i]] <- data.frame(iter = trees[trees != loop$iter[i]])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $ada$type
## [1] "Classification"
## 
## $ada$parameters
##   parameter   class          label
## 1      iter numeric         #Trees
## 2  maxdepth numeric Max Tree Depth
## 3        nu numeric  Learning Rate
## 
## $ada$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out = expand.grid(iter = floor((1:len) * 50), maxdepth = seq(1, 
##             len), nu = 0.1)
##     }
##     else {
##         out <- data.frame(iter = sample(1:1000, replace = TRUE, 
##             size = len), maxdepth = sample(1:10, replace = TRUE, 
##             size = len), nu = runif(len, min = 0.001, max = 0.5))
##     }
##     out
## }
## 
## $ada$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$maxdepth <- param$maxdepth
##         ctl <- theDots$control
##         theDots$control <- NULL
##     }
##     else ctl <- rpart::rpart.control(maxdepth = param$maxdepth, 
##         cp = -1, minsplit = 0, xval = 0)
##     modelArgs <- c(list(x = x, y = y, iter = param$iter, nu = param$nu, 
##         control = ctl), theDots)
##     out <- do.call(ada::ada, modelArgs)
##     out
## }
## 
## $ada$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata, n.iter = modelFit$tuneValue$iter)
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = length(submodels$iter) + 
##             1)
##         tmp[[1]] <- out
##         for (i in seq(along = submodels$iter)) {
##             tmp[[i + 1]] <- predict(modelFit, newdata, n.iter = submodels$iter[[i]])
##         }
##         out <- lapply(tmp, as.character)
##     }
##     out
## }
## 
## $ada$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata, type = "prob", n.iter = modelFit$tuneValue$iter)
##     colnames(out) <- modelFit$obsLevels
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = length(submodels$iter) + 
##             1)
##         tmp[[1]] <- out
##         for (i in seq(along = submodels$iter)) {
##             tmp[[i + 1]] <- predict(modelFit, newdata, type = "prob", 
##                 n.iter = submodels$iter[[i]])
##             colnames(tmp[[i + 1]]) <- modelFit$obsLevels
##         }
##         out <- lapply(tmp, as.data.frame)
##     }
##     out
## }
## 
## $ada$levels
## function (x) 
## x$obsLevels
## 
## $ada$tags
## [1] "Tree-Based Model"              "Ensemble Model"               
## [3] "Boosting"                      "Implicit Feature Selection"   
## [5] "Two Class Only"                "Handle Missing Predictor Data"
## 
## $ada$sort
## function (x) 
## x[order(x$iter, x$maxdepth, x$nu), ]
## 
## 
## $AdaBag
## $AdaBag$label
## [1] "Bagged AdaBoost"
## 
## $AdaBag$library
## [1] "adabag" "plyr"  
## 
## $AdaBag$loop
## function (grid) 
## {
##     loop <- plyr::ddply(grid, c("maxdepth"), function(x) c(mfinal = max(x$mfinal)))
##     submodels <- vector(mode = "list", length = nrow(loop))
##     for (i in seq(along = loop$mfinal)) {
##         index <- which(grid$maxdepth == loop$maxdepth[i])
##         trees <- grid[index, "mfinal", drop = FALSE]
##         submodels[[i]] <- data.frame(mfinal = trees[trees != 
##             loop$mfinal[i]])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $AdaBag$type
## [1] "Classification"
## 
## $AdaBag$parameters
##   parameter   class          label
## 1    mfinal numeric         #Trees
## 2  maxdepth numeric Max Tree Depth
## 
## $AdaBag$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(mfinal = floor((1:len) * 50), maxdepth = seq(1, 
##             len))
##     }
##     else {
##         out <- data.frame(mfinal = sample(1:100, replace = TRUE, 
##             size = len), maxdepth = sample(1:30, replace = TRUE, 
##             size = len))
##     }
##     out
## }
## 
## $AdaBag$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$maxdepth <- param$maxdepth
##         ctl <- theDots$control
##         theDots$control <- NULL
##     }
##     else ctl <- rpart::rpart.control(maxdepth = param$maxdepth, 
##         cp = -1, minsplit = 0, xval = 0)
##     if (!is.data.frame(x) | inherits(x, "tbl_df")) 
##         x <- as.data.frame(x, stringsAsFactors = TRUE)
##     modelArgs <- c(list(formula = as.formula(.outcome ~ .), data = x, 
##         mfinal = param$mfinal, control = ctl), theDots)
##     modelArgs$data$.outcome <- y
##     out <- do.call(adabag::bagging, modelArgs)
##     out
## }
## 
## $AdaBag$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata) | inherits(newdata, "tbl_df")) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     newdata$.outcome <- factor(rep(modelFit$obsLevels[1], nrow(newdata)), 
##         levels = modelFit$obsLevels)
##     out <- predict(modelFit, newdata, newmfinal = modelFit$tuneValue$mfinal)$class
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = length(submodels$mfinal) + 
##             1)
##         tmp[[1]] <- out
##         for (i in seq(along = submodels$mfinal)) {
##             tmp[[i + 1]] <- predict(modelFit, newdata, newmfinal = submodels$mfinal[[i]])$class
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $AdaBag$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata) | inherits(newdata, "tbl_df")) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     newdata$.outcome <- factor(rep(modelFit$obsLevels[1], nrow(newdata)), 
##         levels = modelFit$obsLevels)
##     out <- predict(modelFit, newdata)$prob
##     colnames(out) <- modelFit$obsLevels
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = length(submodels$mfinal) + 
##             1)
##         tmp[[1]] <- out
##         for (i in seq(along = submodels$mfinal)) {
##             tmp[[i + 1]] <- predict(modelFit, newdata, newmfinal = submodels$mfinal[[i]])$prob
##             colnames(tmp[[i + 1]]) <- modelFit$obsLevels
##         }
##         out <- lapply(tmp, as.data.frame)
##     }
##     out
## }
## 
## $AdaBag$varImp
## function (object, ...) 
## {
##     imps <- data.frame(Overall = object$importance)
##     rownames(imps) <- names(object$importance)
##     imps
## }
## 
## $AdaBag$levels
## function (x) 
## x$obsLevels
## 
## $AdaBag$predictors
## function (x, ...) 
## names(x$importance)[x$importance != 0]
## 
## $AdaBag$tags
## [1] "Tree-Based Model"              "Ensemble Model"               
## [3] "Boosting"                      "Bagging"                      
## [5] "Implicit Feature Selection"    "Handle Missing Predictor Data"
## 
## $AdaBag$sort
## function (x) 
## x[order(x$mfinal, x$maxdepth), ]
## 
## 
## $AdaBoost.M1
## $AdaBoost.M1$label
## [1] "AdaBoost.M1"
## 
## $AdaBoost.M1$library
## [1] "adabag" "plyr"  
## 
## $AdaBoost.M1$loop
## function (grid) 
## {
##     loop <- plyr::ddply(grid, c("coeflearn", "maxdepth"), function(x) c(mfinal = max(x$mfinal)))
##     submodels <- vector(mode = "list", length = nrow(loop))
##     for (i in seq(along = loop$mfinal)) {
##         index <- which(grid$maxdepth == loop$maxdepth[i] & grid$coeflearn == 
##             loop$coeflearn[i])
##         trees <- grid[index, "mfinal"]
##         submodels[[i]] <- data.frame(mfinal = trees[trees != 
##             loop$mfinal[i]])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $AdaBoost.M1$type
## [1] "Classification"
## 
## $AdaBoost.M1$parameters
##   parameter     class            label
## 1    mfinal   numeric           #Trees
## 2  maxdepth   numeric   Max Tree Depth
## 3 coeflearn character Coefficient Type
## 
## $AdaBoost.M1$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     types <- c("Breiman", "Freund", "Zhu")
##     if (search == "grid") {
##         out <- expand.grid(mfinal = floor((1:len) * 50), maxdepth = seq(1, 
##             len), coeflearn = types)
##     }
##     else {
##         out <- data.frame(mfinal = sample(1:100, replace = TRUE, 
##             size = len), maxdepth = sample(1:30, replace = TRUE, 
##             size = len), coeflearn = sample(types, replace = TRUE, 
##             size = len))
##     }
##     out
## }
## 
## $AdaBoost.M1$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$maxdepth <- param$maxdepth
##         ctl <- theDots$control
##         theDots$control <- NULL
##     }
##     else ctl <- rpart::rpart.control(maxdepth = param$maxdepth, 
##         cp = -1, minsplit = 0, xval = 0)
##     if (!is.data.frame(x) | inherits(x, "tbl_df")) 
##         x <- as.data.frame(x, stringsAsFactors = TRUE)
##     modelArgs <- c(list(formula = as.formula(.outcome ~ .), data = x, 
##         mfinal = param$mfinal, coeflearn = as.character(param$coeflearn), 
##         control = ctl), theDots)
##     modelArgs$data$.outcome <- y
##     out <- do.call(adabag::boosting, modelArgs)
##     out
## }
## 
## $AdaBoost.M1$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata) | inherits(newdata, "tbl_df")) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     newdata$.outcome <- factor(rep(modelFit$obsLevels[1], nrow(newdata)), 
##         levels = modelFit$obsLevels)
##     out <- predict(modelFit, newdata, newmfinal = modelFit$tuneValue$mfinal)$class
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = length(submodels$mfinal) + 
##             1)
##         tmp[[1]] <- out
##         for (i in seq(along = submodels$mfinal)) {
##             tmp[[i + 1]] <- predict(modelFit, newdata, newmfinal = submodels$mfinal[[i]])$class
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $AdaBoost.M1$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata) | inherits(newdata, "tbl_df")) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     newdata$.outcome <- factor(rep(modelFit$obsLevels[1], nrow(newdata)), 
##         levels = modelFit$obsLevels)
##     out <- predict(modelFit, newdata)$prob
##     colnames(out) <- modelFit$obsLevels
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = length(submodels$mfinal) + 
##             1)
##         tmp[[1]] <- out
##         for (i in seq(along = submodels$mfinal)) {
##             tmp[[i + 1]] <- predict(modelFit, newdata, newmfinal = submodels$mfinal[[i]])$prob
##             colnames(tmp[[i + 1]]) <- modelFit$obsLevels
##         }
##         out <- lapply(tmp, as.data.frame)
##     }
##     out
## }
## 
## $AdaBoost.M1$levels
## function (x) 
## x$obsLevels
## 
## $AdaBoost.M1$varImp
## function (object, ...) 
## {
##     imps <- data.frame(Overall = object$importance)
##     rownames(imps) <- names(object$importance)
##     imps
## }
## 
## $AdaBoost.M1$predictors
## function (x, ...) 
## names(x$importance)[x$importance != 0]
## 
## $AdaBoost.M1$tags
## [1] "Tree-Based Model"              "Ensemble Model"               
## [3] "Boosting"                      "Implicit Feature Selection"   
## [5] "Handle Missing Predictor Data"
## 
## $AdaBoost.M1$sort
## function (x) 
## x[order(x$mfinal, x$maxdepth), ]
## 
## 
## $adaboost
## $adaboost$label
## [1] "AdaBoost Classification Trees"
## 
## $adaboost$library
## [1] "fastAdaboost"
## 
## $adaboost$loop
## NULL
## 
## $adaboost$type
## [1] "Classification"
## 
## $adaboost$parameters
##   parameter     class  label
## 1     nIter   numeric #Trees
## 2    method character Method
## 
## $adaboost$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out = expand.grid(nIter = floor((1:len) * 50), method = c("Adaboost.M1", 
##             "Real adaboost"))
##     }
##     else {
##         out <- data.frame(nIter = sample(1:1000, replace = TRUE, 
##             size = len), method = sample(c("Adaboost.M1", "Real adaboost"), 
##             replace = TRUE, size = len))
##     }
##     out
## }
## 
## $adaboost$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (!is.data.frame(x) | inherits(x, "tbl_df")) 
##         as.data.frame(x, stringsAsFactors = TRUE)
##     else x
##     dat$.outcome <- y
##     out <- if (param$method == "Adaboost.M1") 
##         fastAdaboost::adaboost(.outcome ~ ., data = dat, nIter = param$nIter, 
##             ...)
##     else fastAdaboost::real_adaboost(.outcome ~ ., data = dat, 
##         nIter = param$nIter, ...)
##     out
## }
## 
## $adaboost$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata) | inherits(newdata, "tbl_df")) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)$class
## }
## 
## $adaboost$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata) | inherits(newdata, "tbl_df")) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata)$prob
##     out <- t(apply(out, 1, function(x) ifelse(x == Inf, 1, x)))
##     out <- t(apply(out, 1, function(x) ifelse(x == -Inf, 0, x)))
##     out <- as.data.frame(out, stringsAsFactors = TRUE)
##     colnames(out) <- as.vector(modelFit$classnames)
##     out
## }
## 
## $adaboost$levels
## function (x) 
## as.vector(x$classnames)
## 
## $adaboost$predictors
## function (x, ...) 
## unique(unlist(lapply(x$trees, predictors)))
## 
## $adaboost$tags
## [1] "Tree-Based Model"           "Ensemble Model"            
## [3] "Boosting"                   "Implicit Feature Selection"
## [5] "Two Class Only"            
## 
## $adaboost$sort
## function (x) 
## x[order(x$nIter), ]
## 
## 
## $amdai
## $amdai$label
## [1] "Adaptive Mixture Discriminant Analysis"
## 
## $amdai$library
## [1] "adaptDA"
## 
## $amdai$loop
## NULL
## 
## $amdai$type
## [1] "Classification"
## 
## $amdai$parameters
##   parameter     class      label
## 1     model character Model Type
## 
## $amdai$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(model = "lda")
## 
## $amdai$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     mod <- adaptDA::amdai(x, as.numeric(y), model = as.character(param$model), 
##         ...)
##     mod$levels <- levels(y)
##     mod
## }
## 
## $amdai$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata, K = length(modelFit$levels))$cls
##     factor(modelFit$levels[out], levels = modelFit$levels)
## }
## 
## $amdai$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata, K = length(modelFit$levels))$P
##     factor(modelFit$levels[out], levels = modelFit$levels)
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $amdai$varImp
## NULL
## 
## $amdai$predictors
## function (x, ...) 
## predictors(x$terms)
## 
## $amdai$levels
## function (x) 
## if (any(names(x) == "obsLevels")) x$obsLevels else NULL
## 
## $amdai$tags
## [1] "Discriminant Analysis" "Mixture Model"        
## 
## $amdai$sort
## function (x) 
## x
## 
## 
## $ANFIS
## $ANFIS$label
## [1] "Adaptive-Network-Based Fuzzy Inference System"
## 
## $ANFIS$library
## [1] "frbs"
## 
## $ANFIS$type
## [1] "Regression"
## 
## $ANFIS$parameters
##    parameter   class           label
## 1 num.labels numeric    #Fuzzy Terms
## 2   max.iter numeric Max. Iterations
## 
## $ANFIS$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(num.labels = 1 + (1:len) * 2, max.iter = 10)
##     }
##     else {
##         out <- data.frame(num.labels = sample(1:10, replace = TRUE, 
##             size = len), max.iter = sample(1:20, replace = TRUE, 
##             size = len))
##     }
##     out
## }
## 
## $ANFIS$loop
## NULL
## 
## $ANFIS$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     args <- list(data.train = as.matrix(cbind(x, y)), method.type = "ANFIS")
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$num.labels <- param$num.labels
##         theDots$control$max.iter <- param$max.iter
##     }
##     else theDots$control <- list(num.labels = param$num.labels, 
##         max.iter = param$max.iter, max.iter = 10, step.size = 0.01, 
##         type.tnorm = "MIN", type.snorm = "MAX", type.implication.func = "ZADEH", 
##         name = "sim-0")
##     if (!(any(names(theDots) == "range.data"))) {
##         args$range.data <- apply(args$data.train, 2, extendrange)
##     }
##     do.call(frbs::frbs.learn, c(args, theDots))
## }
## 
## $ANFIS$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     predict(modelFit, newdata)[, 1]
## }
## 
## $ANFIS$prob
## NULL
## 
## $ANFIS$predictors
## function (x, ...) 
## {
##     x$colnames.var[x$colnames.var %in% as.vector(x$rule)]
## }
## 
## $ANFIS$tags
## [1] "Rule-Based Model"
## 
## $ANFIS$levels
## NULL
## 
## $ANFIS$sort
## function (x) 
## x[order(x$num.labels), ]
## 
## 
## $avNNet
## $avNNet$label
## [1] "Model Averaged Neural Network"
## 
## $avNNet$library
## [1] "nnet"
## 
## $avNNet$loop
## NULL
## 
## $avNNet$type
## [1] "Classification" "Regression"    
## 
## $avNNet$parameters
##   parameter   class         label
## 1      size numeric #Hidden Units
## 2     decay numeric  Weight Decay
## 3       bag logical       Bagging
## 
## $avNNet$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(size = ((1:len) * 2) - 1, decay = c(0, 
##             10^seq(-1, -4, length = len - 1)), bag = FALSE)
##     }
##     else {
##         out <- data.frame(size = sample(1:20, size = len, replace = TRUE), 
##             decay = 10^runif(len, min = -5, 1), bag = sample(c(TRUE, 
##                 FALSE), size = len, replace = TRUE))
##     }
##     out
## }
## 
## $avNNet$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     if (!is.null(wts)) {
##         out <- caret::avNNet(.outcome ~ ., data = dat, weights = wts, 
##             size = param$size, decay = param$decay, bag = param$bag, 
##             ...)
##     }
##     else out <- caret::avNNet(.outcome ~ ., data = dat, size = param$size, 
##         decay = param$decay, bag = param$bag, ...)
##     out
## }
## 
## $avNNet$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (modelFit$problemType == "Classification") {
##         out <- predict(modelFit, newdata, type = "class")
##     }
##     else {
##         out <- predict(modelFit, newdata, type = "raw")
##     }
##     out
## }
## 
## $avNNet$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata, type = "prob")
##     if (ncol(as.data.frame(out, stringsAsFactors = TRUE)) == 
##         1) {
##         out <- cbind(out, 1 - out)
##         dimnames(out)[[2]] <- rev(modelFit$obsLevels)
##     }
##     out
## }
## 
## $avNNet$predictors
## function (x, ...) 
## x$names
## 
## $avNNet$levels
## function (x) 
## x$model[[1]]$lev
## 
## $avNNet$tags
## [1] "Neural Network"       "Ensemble Model"       "Bagging"             
## [4] "L2 Regularization"    "Accepts Case Weights"
## 
## $avNNet$sort
## function (x) 
## x[order(x$size, -x$decay), ]
## 
## 
## $awnb
## $awnb$label
## [1] "Naive Bayes Classifier with Attribute Weighting"
## 
## $awnb$library
## [1] "bnclassify"
## 
## $awnb$type
## [1] "Classification"
## 
## $awnb$parameters
##   parameter   class               label
## 1    smooth numeric Smoothing Parameter
## 
## $awnb$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(smooth = 0:(len - 1))
##     }
##     else {
##         out <- data.frame(smooth = runif(len, min = 0, max = 10))
##     }
##     out
## }
## 
## $awnb$loop
## NULL
## 
## $awnb$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     struct <- bnclassify::nb(class = ".outcome", dataset = dat)
##     args <- list(x = bnclassify::nb(".outcome", dataset = dat), 
##         dataset = dat, smooth = param$smooth)
##     dots <- list(...)
##     args <- c(args, dots)
##     do.call(bnclassify::lp, args)
## }
## 
## $awnb$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $awnb$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata, prob = TRUE)
## }
## 
## $awnb$levels
## function (x) 
## x$obsLevels
## 
## $awnb$predictors
## function (x, s = NULL, ...) 
## x$xNames
## 
## $awnb$tags
## [1] "Bayesian Model"              "Categorical Predictors Only"
## 
## $awnb$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $awtan
## $awtan$label
## [1] "Tree Augmented Naive Bayes Classifier with Attribute Weighting"
## 
## $awtan$library
## [1] "bnclassify"
## 
## $awtan$type
## [1] "Classification"
## 
## $awtan$parameters
##   parameter     class               label
## 1     score character      Score Function
## 2    smooth   numeric Smoothing Parameter
## 
## $awtan$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     out <- expand.grid(score = c("loglik", "bic", "aic"), smooth = 1:2)
## }
## 
## $awtan$loop
## NULL
## 
## $awtan$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     args <- list(x = bnclassify::tan_cl(".outcome", dataset = dat, 
##         score = as.character(param$score)), dataset = dat, smooth = param$smooth)
##     dots <- list(...)
##     if (!any(names(dots) == "awnb_trees")) 
##         dots$awnb_trees <- 10
##     if (!any(names(dots) == "awnb_bootstrap")) 
##         dots$awnb_bootstrap <- 10
##     args <- c(args, dots)
##     do.call(bnclassify::lp, args)
## }
## 
## $awtan$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $awtan$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata, prob = TRUE)
## }
## 
## $awtan$levels
## function (x) 
## x$obsLevels
## 
## $awtan$predictors
## function (x, s = NULL, ...) 
## x$xNames
## 
## $awtan$tags
## [1] "Bayesian Model"              "Categorical Predictors Only"
## 
## $awtan$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## $awtan$check
## function (pkg) 
## {
##     requireNamespace("kohonen")
##     current <- packageDescription("bnclassify")$Version
##     expected <- "0.3.3"
##     if (compareVersion(current, expected) < 0) 
##         stop("This modeling workflow requires bnclassify version ", 
##             expected, "or greater.", call. = FALSE)
## }
## 
## 
## $bag
## $bag$label
## [1] "Bagged Model"
## 
## $bag$library
## [1] "caret"
## 
## $bag$loop
## NULL
## 
## $bag$type
## [1] "Regression"     "Classification"
## 
## $bag$parameters
##   parameter   class                         label
## 1      vars numeric #Randomly Selected Predictors
## 
## $bag$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(vars = ncol(x))
## 
## $bag$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     out <- caret::bag(x, y, vars = param$vars, ...)
##     out$xNames <- colnames(x)
##     out
## }
## 
## $bag$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)
## 
## $bag$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata, type = "prob")
## 
## $bag$predictors
## function (x, ...) 
## x$xNames
## 
## $bag$levels
## function (x) 
## x$obsLevels
## 
## $bag$varImp
## NULL
## 
## $bag$tags
## [1] "Bagging"        "Ensemble Model"
## 
## $bag$sort
## function (x) 
## x
## 
## 
## $bagEarth
## $bagEarth$label
## [1] "Bagged MARS"
## 
## $bagEarth$library
## [1] "earth"
## 
## $bagEarth$type
## [1] "Regression"     "Classification"
## 
## $bagEarth$parameters
##   parameter   class          label
## 1    nprune numeric         #Terms
## 2    degree numeric Product Degree
## 
## $bagEarth$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     mod <- earth::earth(.outcome ~ ., data = dat, pmethod = "none")
##     maxTerms <- nrow(mod$dirs)
##     maxTerms <- min(200, floor(maxTerms * 0.75) + 2)
##     if (search == "grid") {
##         out <- data.frame(nprune = unique(floor(seq(2, to = maxTerms, 
##             length = len))), degree = 1)
##     }
##     else {
##         out <- data.frame(nprune = sample(2:maxTerms, size = len, 
##             replace = TRUE), degree = sample(1:2, size = len, 
##             replace = TRUE))
##     }
## }
## 
## $bagEarth$loop
## function (grid) 
## {
##     deg <- unique(grid$degree)
##     loop <- data.frame(degree = deg)
##     loop$nprune <- NA
##     submodels <- vector(mode = "list", length = length(deg))
##     for (i in seq(along = deg)) {
##         np <- grid[grid$degree == deg[i], "nprune"]
##         loop$nprune[loop$degree == deg[i]] <- np[which.max(np)]
##         submodels[[i]] <- data.frame(nprune = np[-which.max(np)])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $bagEarth$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(earth)
##     theDots <- list(...)
##     theDots$keepxy <- TRUE
##     if (!is.null(wts)) 
##         theDots$weights <- wts
##     modelArgs <- c(list(x = x, y = y, degree = param$degree, 
##         nprune = param$nprune), theDots)
##     if (is.factor(y) & !any(names(theDots) == "glm")) {
##         modelArgs$glm <- list(family = binomial, maxit = 100)
##     }
##     tmp <- do.call(getFromNamespace("bagEarth.default", "caret"), 
##         modelArgs)
##     tmp$call["nprune"] <- param$nprune
##     tmp$call["degree"] <- param$degree
##     tmp
## }
## 
## $bagEarth$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (modelFit$problemType == "Classification") {
##         out <- predict(modelFit, newdata, type = "class")
##     }
##     else {
##         out <- predict(modelFit, newdata)
##     }
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- if (is.matrix(out)) 
##             out[, 1]
##         else out
##         for (j in seq(along = submodels$nprune)) {
##             prunedFit <- update(modelFit, nprune = submodels$nprune[j])
##             if (modelFit$problemType == "Classification") {
##                 tmp[[j + 1]] <- predict(prunedFit, newdata, type = "class")
##             }
##             else {
##                 tmp[[j + 1]] <- predict(prunedFit, newdata)
##             }
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $bagEarth$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata, type = "prob")
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$nprune)) {
##             prunedFit <- update(modelFit, nprune = submodels$nprune[j])
##             tmp2 <- predict(prunedFit, newdata, type = "prob")
##             tmp[[j + 1]] <- tmp2
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $bagEarth$predictors
## function (x, ...) 
## {
##     predEarth <- function(x) {
##         vi <- varImp(x)
##         notZero <- sort(unique(unlist(lapply(vi, function(x) which(x > 
##             0)))))
##         if (length(notZero) > 0) 
##             rownames(vi)[notZero]
##         else NULL
##     }
##     eachFit <- lapply(x$fit, predEarth)
##     unique(unlist(eachFit))
## }
## 
## $bagEarth$varImp
## function (object, ...) 
## {
##     allImp <- lapply(object$fit, varImp, ...)
##     allImp <- lapply(allImp, function(x) {
##         x$var <- rownames(x)
##         x
##     }, ...)
##     allImp <- do.call("rbind", allImp)
##     impDF <- plyr::ddply(allImp, .(var), function(x) c(Overall = mean(x$Overall, 
##         rm.na = TRUE)))
##     out <- data.frame(Overall = impDF$Overall)
##     rownames(out) <- impDF$var
##     out
## }
## 
## $bagEarth$levels
## function (x) 
## x$levels
## 
## $bagEarth$tags
## [1] "Multivariate Adaptive Regression Splines"
## [2] "Ensemble Model"                          
## [3] "Implicit Feature Selection"              
## [4] "Bagging"                                 
## [5] "Accepts Case Weights"                    
## 
## $bagEarth$notes
## [1] "Unlike other packages used by `train`, the `earth` package is fully loaded when this model is used."
## 
## $bagEarth$sort
## function (x) 
## x[order(x$degree, x$nprune), ]
## 
## $bagEarth$oob
## function (x) 
## apply(x$oob, 2, function(x) quantile(x, probs = 0.5))
## 
## 
## $bagEarthGCV
## $bagEarthGCV$label
## [1] "Bagged MARS using gCV Pruning"
## 
## $bagEarthGCV$library
## [1] "earth"
## 
## $bagEarthGCV$type
## [1] "Regression"     "Classification"
## 
## $bagEarthGCV$parameters
##   parameter   class          label
## 1    degree numeric Product Degree
## 
## $bagEarthGCV$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(degree = 1)
## 
## $bagEarthGCV$loop
## NULL
## 
## $bagEarthGCV$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(earth)
##     theDots <- list(...)
##     theDots$keepxy <- TRUE
##     if (!is.null(wts)) 
##         theDots$weights <- wts
##     modelArgs <- c(list(x = x, y = y, degree = param$degree), 
##         theDots)
##     if (is.factor(y) & !any(names(theDots) == "glm")) {
##         modelArgs$glm <- list(family = binomial, maxit = 100)
##     }
##     tmp <- do.call(getFromNamespace("bagEarth.default", "caret"), 
##         modelArgs)
##     tmp$call["degree"] <- param$degree
##     tmp
## }
## 
## $bagEarthGCV$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (modelFit$problemType == "Classification") {
##         out <- predict(modelFit, newdata, type = "class")
##     }
##     else {
##         out <- predict(modelFit, newdata)
##     }
##     out
## }
## 
## $bagEarthGCV$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     predict(modelFit, newdata, type = "prob")
## }
## 
## $bagEarthGCV$predictors
## function (x, ...) 
## {
##     predEarth <- function(x) {
##         vi <- varImp(x)
##         notZero <- sort(unique(unlist(lapply(vi, function(x) which(x > 
##             0)))))
##         if (length(notZero) > 0) 
##             rownames(vi)[notZero]
##         else NULL
##     }
##     eachFit <- lapply(x$fit, predEarth)
##     unique(unlist(eachFit))
## }
## 
## $bagEarthGCV$varImp
## function (object, ...) 
## {
##     allImp <- lapply(object$fit, varImp, ...)
##     allImp <- lapply(allImp, function(x) {
##         x$var <- rownames(x)
##         x
##     }, ...)
##     allImp <- do.call("rbind", allImp)
##     impDF <- plyr::ddply(allImp, .(var), function(x) c(Overall = mean(x$Overall, 
##         rm.na = TRUE)))
##     out <- data.frame(Overall = impDF$Overall)
##     rownames(out) <- impDF$var
##     out
## }
## 
## $bagEarthGCV$levels
## function (x) 
## x$levels
## 
## $bagEarthGCV$tags
## [1] "Multivariate Adaptive Regression Splines"
## [2] "Ensemble Model"                          
## [3] "Implicit Feature Selection"              
## [4] "Bagging"                                 
## [5] "Accepts Case Weights"                    
## 
## $bagEarthGCV$notes
## [1] "Unlike other packages used by `train`, the `earth` package is fully loaded when this model is used."
## 
## $bagEarthGCV$sort
## function (x) 
## x[order(x$degree), ]
## 
## $bagEarthGCV$oob
## function (x) 
## apply(x$oob, 2, function(x) quantile(x, probs = 0.5))
## 
## 
## $bagFDA
## $bagFDA$label
## [1] "Bagged Flexible Discriminant Analysis"
## 
## $bagFDA$library
## [1] "earth" "mda"  
## 
## $bagFDA$loop
## NULL
## 
## $bagFDA$type
## [1] "Classification"
## 
## $bagFDA$parameters
##   parameter   class          label
## 1    degree numeric Product Degree
## 2    nprune numeric         #Terms
## 
## $bagFDA$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     dat <- if (!is.data.frame(x)) 
##         as.data.frame(x, stringsAsFactors = TRUE)
##     else x
##     dat$.outcome <- y
##     mod <- mda::fda(.outcome ~ ., data = dat, method = earth::earth, 
##         pmethod = "none")
##     maxTerms <- nrow(mod$fit$dirs) - 1
##     maxTerms <- min(200, floor(maxTerms * 0.75) + 2)
##     if (search == "grid") {
##         out <- data.frame(nprune = unique(floor(seq(2, to = maxTerms, 
##             length = len))), degree = 1)
##     }
##     else {
##         out <- data.frame(nprune = sample(2:maxTerms, size = len, 
##             replace = TRUE), degree = sample(1:2, size = len, 
##             replace = TRUE))
##     }
##     out
## }
## 
## $bagFDA$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(earth)
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     bagFDA(.outcome ~ ., data = dat, degree = param$degree, nprune = param$nprune, 
##         weights = wts, ...)
## }
## 
## $bagFDA$tags
## [1] "Multivariate Adaptive Regression Splines"
## [2] "Ensemble Model"                          
## [3] "Implicit Feature Selection"              
## [4] "Bagging"                                 
## [5] "Accepts Case Weights"                    
## 
## $bagFDA$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)
## 
## $bagFDA$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata, type = "probs")
## 
## $bagFDA$predictors
## function (x, ...) 
## {
##     fdaPreds <- function(x) {
##         code <- getModelInfo("earth", regex = FALSE)[[1]]$predictors
##         tmp <- predictors(x$terms)
##         out <- if (class(x$fit) == "earth") 
##             code(x$fit)
##         else tmp
##         out
##     }
##     eachFit <- lapply(x$fit, fdaPreds)
##     unique(unlist(eachFit))
## }
## 
## $bagFDA$varImp
## function (object, ...) 
## {
##     allImp <- lapply(object$fit, varImp, ...)
##     allImp <- lapply(allImp, function(x) {
##         x$var <- rownames(x)
##         x
##     }, ...)
##     allImp <- do.call("rbind", allImp)
##     impDF <- plyr::ddply(allImp, .(var), function(x) c(Overall = mean(x$Overall, 
##         rm.na = TRUE)))
##     out <- data.frame(Overall = impDF$Overall)
##     rownames(out) <- impDF$var
##     out
## }
## 
## $bagFDA$notes
## [1] "Unlike other packages used by `train`, the `earth` package is fully loaded when this model is used."
## 
## $bagFDA$levels
## function (x) 
## x$levels
## 
## $bagFDA$sort
## function (x) 
## x[order(x$degree, x$nprune), ]
## 
## $bagFDA$oob
## function (x) 
## apply(x$oob, 2, function(x) quantile(x, probs = 0.5))
## 
## 
## $bagFDAGCV
## $bagFDAGCV$label
## [1] "Bagged FDA using gCV Pruning"
## 
## $bagFDAGCV$library
## [1] "earth"
## 
## $bagFDAGCV$type
## [1] "Classification"
## 
## $bagFDAGCV$parameters
##   parameter   class          label
## 1    degree numeric Product Degree
## 
## $bagFDAGCV$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(degree = 1)
## 
## $bagFDAGCV$loop
## NULL
## 
## $bagFDAGCV$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(earth)
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     caret::bagFDA(.outcome ~ ., data = dat, degree = param$degree, 
##         weights = wts, ...)
## }
## 
## $bagFDAGCV$tags
## [1] "Multivariate Adaptive Regression Splines"
## [2] "Ensemble Model"                          
## [3] "Implicit Feature Selection"              
## [4] "Bagging"                                 
## 
## $bagFDAGCV$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)
## 
## $bagFDAGCV$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata, type = "probs")
## 
## $bagFDAGCV$predictors
## function (x, ...) 
## {
##     fdaPreds <- function(x) {
##         code <- getModelInfo("earth", regex = FALSE)[[1]]$predictors
##         tmp <- predictors(x$terms)
##         out <- if (class(x$fit) == "earth") 
##             code(x$fit)
##         else tmp
##         out
##     }
##     eachFit <- lapply(x$fit, fdaPreds)
##     unique(unlist(eachFit))
## }
## 
## $bagFDAGCV$varImp
## function (object, ...) 
## {
##     allImp <- lapply(object$fit, varImp, ...)
##     allImp <- lapply(allImp, function(x) {
##         x$var <- rownames(x)
##         x
##     }, ...)
##     allImp <- do.call("rbind", allImp)
##     impDF <- plyr::ddply(allImp, .(var), function(x) c(Overall = mean(x$Overall, 
##         rm.na = TRUE)))
##     out <- data.frame(Overall = impDF$Overall)
##     rownames(out) <- impDF$var
##     out
## }
## 
## $bagFDAGCV$levels
## function (x) 
## x$levels
## 
## $bagFDAGCV$tags
## [1] "Multivariate Adaptive Regression Splines"
## [2] "Ensemble Model"                          
## [3] "Implicit Feature Selection"              
## [4] "Bagging"                                 
## [5] "Accepts Case Weights"                    
## 
## $bagFDAGCV$notes
## [1] "Unlike other packages used by `train`, the `earth` package is fully loaded when this model is used."
## 
## $bagFDAGCV$sort
## function (x) 
## x[order(x$degree), ]
## 
## $bagFDAGCV$oob
## function (x) 
## apply(x$oob, 2, function(x) quantile(x, probs = 0.5))
## 
## 
## $bam
## $bam$label
## [1] "Generalized Additive Model using Splines"
## 
## $bam$library
## [1] "mgcv"
## 
## $bam$loop
## NULL
## 
## $bam$type
## [1] "Regression"     "Classification"
## 
## $bam$parameters
##   parameter     class             label
## 1    select   logical Feature Selection
## 2    method character            Method
## 
## $bam$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(select = c(TRUE, FALSE), method = "GCV.Cp")
##     }
##     else {
##         out <- data.frame(select = sample(c(TRUE, FALSE), size = len, 
##             replace = TRUE), method = sample(c("GCV.Cp", "ML", 
##             "REML"), size = len, replace = TRUE))
##     }
## }
## 
## $bam$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(mgcv)
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     modForm <- caret:::smootherFormula(x)
##     if (is.factor(y)) {
##         dat$.outcome <- ifelse(y == lev[1], 0, 1)
##         dist <- binomial()
##     }
##     else {
##         dat$.outcome <- y
##         dist <- gaussian()
##     }
##     modelArgs <- list(formula = modForm, data = dat, select = param$select, 
##         method = as.character(param$method))
##     theDots <- list(...)
##     if (!any(names(theDots) == "family")) 
##         modelArgs$family <- dist
##     modelArgs <- c(modelArgs, theDots)
##     out <- do.call(mgcv::bam, modelArgs)
##     out
## }
## 
## $bam$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     if (modelFit$problemType == "Classification") {
##         probs <- predict(modelFit, newdata, type = "response")
##         out <- ifelse(probs < 0.5, modelFit$obsLevel[1], modelFit$obsLevel[2])
##     }
##     else {
##         out <- predict(modelFit, newdata, type = "response")
##     }
##     out
## }
## 
## $bam$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata, type = "response")
##     out <- cbind(1 - out, out)
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $bam$predictors
## function (x, ...) 
## {
##     predictors(x$terms)
## }
## 
## $bam$levels
## function (x) 
## x$obsLevels
## 
## $bam$varImp
## function (object, ...) 
## {
##     smoothed <- summary(object)$s.table[, "p-value", drop = FALSE]
##     linear <- summary(object)$p.table
##     linear <- linear[, grepl("^Pr", colnames(linear)), drop = FALSE]
##     gams <- rbind(smoothed, linear)
##     gams <- gams[rownames(gams) != "(Intercept)", , drop = FALSE]
##     rownames(gams) <- gsub("^s\\(", "", rownames(gams))
##     rownames(gams) <- gsub("\\)$", "", rownames(gams))
##     colnames(gams)[1] <- "Overall"
##     gams <- as.data.frame(gams, stringsAsFactors = TRUE)
##     gams$Overall <- -log10(gams$Overall)
##     allPreds <- colnames(attr(object$terms, "factors"))
##     extras <- allPreds[!(allPreds %in% rownames(gams))]
##     if (any(extras)) {
##         tmp <- data.frame(Overall = rep(NA, length(extras)))
##         rownames(tmp) <- extras
##         gams <- rbind(gams, tmp)
##     }
##     gams
## }
## 
## $bam$notes
## [1] "Which terms enter the model in a nonlinear manner is determined by the number of unique values for the predictor. For example, if a predictor only has four unique values, most basis expansion method will fail because there are not enough granularity in the data. By default, a predictor must have at least 10 unique values to be used in a nonlinear basis expansion. Unlike other packages used by `train`, the `mgcv` package is fully loaded when this model is used."
## 
## $bam$tags
## [1] "Generalized Linear Model"   "Generalized Additive Model"
## 
## $bam$sort
## function (x) 
## x
## 
## 
## $bartMachine
## $bartMachine$label
## [1] "Bayesian Additive Regression Trees"
## 
## $bartMachine$library
## [1] "bartMachine"
## 
## $bartMachine$loop
## NULL
## 
## $bartMachine$type
## [1] "Classification" "Regression"    
## 
## $bartMachine$parameters
##   parameter   class                              label
## 1 num_trees numeric                             #Trees
## 2         k numeric                     Prior Boundary
## 3     alpha numeric  Base Terminal Node Hyperparameter
## 4      beta numeric Power Terminal Node Hyperparameter
## 5        nu numeric                 Degrees of Freedom
## 
## $bartMachine$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(num_trees = 50, k = (1:len) + 1, alpha = seq(0.9, 
##             0.99, length = len), beta = seq(1, 3, length = len), 
##             nu = (1:len) + 1)
##     }
##     else {
##         out <- data.frame(num_trees = sample(10:100, replace = TRUE, 
##             size = len), k = runif(len, min = 0, max = 5), alpha = runif(len, 
##             min = 0.9, max = 1), beta = runif(len, min = 0, max = 4), 
##             nu = runif(len, min = 0, max = 5))
##     }
##     if (is.factor(y)) {
##         out$k <- NA
##         out$nu <- NA
##     }
## }
## 
## $bartMachine$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!is.data.frame(x) | inherits(x, "tbl_df")) 
##         x <- as.data.frame(x, stringsAsFactors = TRUE)
##     out <- if (is.factor(y)) {
##         bartMachine::bartMachine(X = x, y = y, num_trees = param$num_trees, 
##             alpha = param$alpha, beta = param$beta, ...)
##     }
##     else {
##         bartMachine::bartMachine(X = x, y = y, num_trees = param$num_trees, 
##             k = param$k, alpha = param$alpha, beta = param$beta, 
##             nu = param$nu, ...)
##     }
##     out
## }
## 
## $bartMachine$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata) | inherits(newdata, "tbl_df")) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- if (is.factor(modelFit$y)) 
##         predict(modelFit, newdata, type = "class")
##     else predict(modelFit, newdata)
## }
## 
## $bartMachine$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata) | inherits(newdata, "tbl_df")) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata, type = "prob")
##     out <- data.frame(y1 = 1 - out, y2 = out)
##     colnames(out) <- modelFit$y_levels
##     out
## }
## 
## $bartMachine$predictors
## function (x, ...) 
## colnames(x$X)
## 
## $bartMachine$varImp
## function (object, ...) 
## {
##     imps <- bartMachine::investigate_var_importance(object, plot = FALSE)
##     imps <- imps$avg_var_props - 1.96 * imps$sd_var_props
##     missing_x <- !(colnames(object$X) %in% names(imps))
##     if (any(missing_x)) {
##         imps2 <- rep(0, sum(missing_x))
##         names(imps2) <- colnames(object$X)[missing_x]
##         imps <- c(imps, imps2)
##     }
##     out <- data.frame(Overall = as.vector(imps))
##     rownames(out) <- names(imps)
##     out
## }
## 
## $bartMachine$levels
## function (x) 
## x$y_levels
## 
## $bartMachine$tags
## [1] "Tree-Based Model"           "Implicit Feature Selection"
## [3] "Bayesian Model"             "Two Class Only"            
## 
## $bartMachine$sort
## function (x) 
## x[order(-x[, "num_trees"]), ]
## 
## 
## $bayesglm
## $bayesglm$label
## [1] "Bayesian Generalized Linear Model"
## 
## $bayesglm$library
## [1] "arm"
## 
## $bayesglm$loop
## NULL
## 
## $bayesglm$type
## [1] "Regression"     "Classification"
## 
## $bayesglm$parameters
##   parameter     class     label
## 1 parameter character parameter
## 
## $bayesglm$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(parameter = "none")
## 
## $bayesglm$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     theDots <- list(...)
##     if (!any(names(theDots) == "family")) 
##         theDots$family <- if (is.factor(dat$.outcome)) 
##             binomial()
##         else gaussian()
##     if (!is.null(wts)) 
##         theDots$weights <- wts
##     modelArgs <- c(list(formula = as.formula(".outcome ~ ."), 
##         data = dat), theDots)
##     out <- do.call(arm::bayesglm, modelArgs)
##     out$call <- NULL
##     out
## }
## 
## $bayesglm$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     if (modelFit$problemType == "Classification") {
##         probs <- predict(modelFit, newdata, type = "response")
##         out <- ifelse(probs < 0.5, modelFit$obsLevel[1], modelFit$obsLevel[2])
##     }
##     else {
##         out <- predict(modelFit, newdata, type = "response")
##     }
##     out
## }
## 
## $bayesglm$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata, type = "response")
##     out <- cbind(1 - out, out)
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $bayesglm$levels
## function (x) 
## x$obsLevels
## 
## $bayesglm$trim
## function (x) 
## {
##     x$y = c()
##     x$model = c()
##     x$residuals = c()
##     x$fitted.values = c()
##     x$effects = c()
##     x$qr$qr = c()
##     x$linear.predictors = c()
##     x$weights = c()
##     x$prior.weights = c()
##     x$data = c()
##     x$family$variance = c()
##     x$family$dev.resids = c()
##     x$family$aic = c()
##     x$family$validmu = c()
##     x$family$simulate = c()
##     attr(x$terms, ".Environment") = c()
##     attr(x$formula, ".Environment") = c()
##     x$R <- c()
##     x$xNames <- c()
##     x$xlevels <- c()
##     x
## }
## 
## $bayesglm$tags
## [1] "Generalized Linear Model" "Logistic Regression"     
## [3] "Linear Classifier"        "Bayesian Model"          
## [5] "Accepts Case Weights"    
## 
## $bayesglm$sort
## function (x) 
## x
## 
## 
## $binda
## $binda$label
## [1] "Binary Discriminant Analysis"
## 
## $binda$library
## [1] "binda"
## 
## $binda$loop
## NULL
## 
## $binda$type
## [1] "Classification"
## 
## $binda$parameters
##      parameter   class               label
## 1 lambda.freqs numeric Shrinkage Intensity
## 
## $binda$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(lambda.freqs = seq(0, 1, length = len))
##     }
##     else {
##         out <- data.frame(lambda.freqs = runif(len, min = 0, 
##             max = 1))
##     }
##     out
## }
## 
## $binda$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     binda::binda(as.matrix(x), y, lambda.freqs = param$lambda.freqs, 
##         ...)
## }
## 
## $binda$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     as.character(predict(modelFit, as.matrix(newdata))$class)
## }
## 
## $binda$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     predict(modelFit, as.matrix(newdata))$posterior
## }
## 
## $binda$varImp
## NULL
## 
## $binda$predictors
## function (x, ...) 
## rownames(x$logp0)
## 
## $binda$levels
## function (x) 
## if (any(names(x) == "obsLevels")) x$obsLevels else names(x$logfreqs)
## 
## $binda$tags
## [1] "Discriminant Analysis"  "Two Class Only"         "Binary Predictors Only"
## 
## $binda$sort
## function (x) 
## x
## 
## 
## $blackboost
## $blackboost$label
## [1] "Boosted Tree"
## 
## $blackboost$library
## [1] "party"    "mboost"   "plyr"     "partykit"
## 
## $blackboost$type
## [1] "Regression"     "Classification"
## 
## $blackboost$parameters
##   parameter   class          label
## 1     mstop numeric         #Trees
## 2  maxdepth numeric Max Tree Depth
## 
## $blackboost$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(maxdepth = seq(1, len), mstop = floor((1:len) * 
##             50))
##     }
##     else {
##         out <- data.frame(mstop = sample(1:1000, replace = TRUE, 
##             size = len), maxdepth = sample(1:10, replace = TRUE, 
##             size = len))
##     }
##     out
## }
## 
## $blackboost$loop
## function (grid) 
## {
##     loop <- plyr::ddply(grid, plyr::.(maxdepth), function(x) c(mstop = max(x$mstop)))
##     submodels <- vector(mode = "list", length = nrow(loop))
##     for (i in seq(along = loop$mstop)) {
##         index <- which(grid$maxdepth == loop$maxdepth[i])
##         subStops <- grid[index, "mstop"]
##         submodels[[i]] <- data.frame(mstop = subStops[subStops != 
##             loop$mstop[i]])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $blackboost$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     if (length(levels(y)) > 2) 
##         stop("Two-class outcomes only. See ?mboost::Multinomial", 
##             call. = FALSE)
##     if (any(names(theDots) == "tree_controls")) {
##         theDots$tree_controls$maxdepth <- param$maxdepth
##         treeCtl <- theDots$tree_controls
##         theDots$tree_controls <- NULL
##     }
##     else treeCtl <- partykit::ctree_control(maxdepth = param$maxdepth)
##     if (any(names(theDots) == "control")) {
##         theDots$control$mstop <- param$mstop
##         ctl <- theDots$control
##         theDots$control <- NULL
##     }
##     else ctl <- mboost::boost_control(mstop = param$mstop)
##     if (!any(names(theDots) == "family")) {
##         if (is.factor(y)) {
##             theDots$family <- if (length(lev) == 2) 
##                 mboost::Binomial()
##             else mboost::Multinomial()
##         }
##         else theDots$family <- mboost::GaussReg()
##     }
##     if (!is.null(wts)) 
##         theDots$weights <- wts
##     modelArgs <- c(list(formula = as.formula(".outcome ~ ."), 
##         data = if (!is.data.frame(x)) as.data.frame(x, stringsAsFactors = TRUE) else x, 
##         control = ctl, tree_controls = treeCtl), theDots)
##     modelArgs$data$.outcome <- y
##     out <- do.call(mboost::blackboost, modelArgs)
##     out$call["data"] <- "data"
##     out
## }
## 
## $blackboost$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     predType <- ifelse(modelFit$problemType == "Classification", 
##         "class", "response")
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata, type = predType)
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- as.vector(out)
##         for (j in seq(along = submodels$mstop)) {
##             tmp[[j + 1]] <- as.vector(predict(modelFit[submodels$mstop[j]], 
##                 newdata, type = predType))
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $blackboost$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     probs <- predict(modelFit, newdata, type = "response")
##     out <- cbind(1 - probs, probs)
##     colnames(out) <- modelFit$obsLevels
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$mstop)) {
##             tmpProb <- predict(modelFit[submodels$mstop[j]], 
##                 newdata, type = "response")
##             tmpProb <- cbind(1 - tmpProb, tmpProb)
##             colnames(tmpProb) <- modelFit$obsLevels
##             tmp[[j + 1]] <- as.data.frame(tmpProb[, modelFit$obsLevels, 
##                 drop = FALSE], stringsAsFactors = TRUE)
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $blackboost$predictors
## function (x, ...) 
## {
##     strsplit(variable.names(x), ", ")[[1]]
## }
## 
## $blackboost$levels
## function (x) 
## levels(x$response)
## 
## $blackboost$tags
## [1] "Tree-Based Model"     "Ensemble Model"       "Boosting"            
## [4] "Accepts Case Weights"
## 
## $blackboost$sort
## function (x) 
## x[order(x$mstop, x$maxdepth), ]
## 
## 
## $blasso
## $blasso$label
## [1] "The Bayesian lasso"
## 
## $blasso$library
## [1] "monomvn"
## 
## $blasso$type
## [1] "Regression"
## 
## $blasso$parameters
##   parameter   class              label
## 1  sparsity numeric Sparsity Threshold
## 
## $blasso$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (len == 1) 
##         return(data.frame(sparsity = 0.5))
##     if (search == "grid") {
##         out <- expand.grid(sparsity = seq(0.3, 0.7, length = len))
##     }
##     else {
##         out <- data.frame(sparsity = runif(len, min = 0, max = 1))
##     }
##     out
## }
## 
## $blasso$loop
## function (grid) 
## {
##     grid <- grid[order(grid$sparsity, decreasing = TRUE), , drop = FALSE]
##     loop <- grid[1, , drop = FALSE]
##     submodels <- list(grid[-1, , drop = FALSE])
##     list(loop = loop, submodels = submodels)
## }
## 
## $blasso$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     mod <- monomvn::blasso(as.matrix(x), y, ...)
##     mod$.percent <- apply(mod$beta, 2, function(x) mean(x != 
##         0))
##     mod$.sparsity <- param$sparsity
##     mod$.betas <- colMeans(mod$beta)
##     mod
## }
## 
## $blasso$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     betas <- modelFit$.betas
##     betas[modelFit$.percent <= modelFit$.sparsity] <- 0
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- (newdata %*% betas)[, 1]
##     if (modelFit$icept) 
##         out <- out + mean(modelFit$mu)
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels))
##         for (i in 1:nrow(submodels)) {
##             betas <- modelFit$.betas
##             betas[modelFit$.percent <= submodels$sparsity[i]] <- 0
##             tmp[[i]] <- (newdata %*% betas)[, 1]
##             if (modelFit$icept) 
##                 tmp[[i]] <- tmp[[i]] + mean(modelFit$mu)
##         }
##         out <- c(list(out), tmp)
##     }
##     out
## }
## 
## $blasso$predictors
## function (x, s = NULL, ...) 
## {
##     x$xNames[x$.percent <= x$.sparsity]
## }
## 
## $blasso$notes
## [1] "This model creates predictions using the mean of the posterior distributions but sets some parameters specifically to zero based on the tuning parameter `sparsity`. For example, when `sparsity = .5`, only coefficients where at least half the posterior estimates are nonzero are used."
## 
## $blasso$tags
## [1] "Linear Regression"          "Bayesian Model"            
## [3] "Implicit Feature Selection" "L1 Regularization"         
## 
## $blasso$prob
## NULL
## 
## $blasso$sort
## function (x) 
## x[order(-x$sparsity), ]
## 
## 
## $blassoAveraged
## $blassoAveraged$label
## [1] "Bayesian Ridge Regression (Model Averaged)"
## 
## $blassoAveraged$library
## [1] "monomvn"
## 
## $blassoAveraged$type
## [1] "Regression"
## 
## $blassoAveraged$parameters
##   parameter     class     label
## 1 parameter character parameter
## 
## $blassoAveraged$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(parameter = "none")
## 
## $blassoAveraged$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     out <- monomvn::blasso(as.matrix(x), y, ...)
##     out
## }
## 
## $blassoAveraged$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- modelFit$beta %*% t(newdata)
##     if (modelFit$icept) 
##         out <- out + (matrix(1, ncol = ncol(out), nrow = nrow(out)) * 
##             modelFit$mu)
##     apply(out, 2, mean)
## }
## 
## $blassoAveraged$predictors
## function (x, s = NULL, ...) 
## {
##     x$xNames[apply(x$beta, 2, function(x) any(x != 0))]
## }
## 
## $blassoAveraged$notes
## [1] "This model makes predictions by averaging the predictions based on the posterior estimates of the regression coefficients. While it is possible that some of these posterior estimates are zero for non-informative predictors, the final predicted value may be a function of many (or even all) predictors. "
## 
## $blassoAveraged$tags
## [1] "Linear Regression" "Bayesian Model"    "L1 Regularization"
## 
## $blassoAveraged$prob
## NULL
## 
## $blassoAveraged$sort
## function (x) 
## x
## 
## 
## $bridge
## $bridge$label
## [1] "Bayesian Ridge Regression"
## 
## $bridge$library
## [1] "monomvn"
## 
## $bridge$type
## [1] "Regression"
## 
## $bridge$parameters
##   parameter     class     label
## 1 parameter character parameter
## 
## $bridge$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(parameter = "none")
## 
## $bridge$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     out <- monomvn::bridge(as.matrix(x), y, ...)
##     out
## }
## 
## $bridge$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- modelFit$beta %*% t(newdata)
##     if (modelFit$icept) 
##         out <- out + (matrix(1, ncol = ncol(out), nrow = nrow(out)) * 
##             modelFit$mu)
##     apply(out, 2, mean)
## }
## 
## $bridge$predictors
## function (x, s = NULL, ...) 
## {
##     x$xNames
## }
## 
## $bridge$tags
## [1] "Linear Regression" "Bayesian Model"    "L2 Regularization"
## 
## $bridge$prob
## NULL
## 
## $bridge$sort
## function (x) 
## x
## 
## 
## $brnn
## $brnn$label
## [1] "Bayesian Regularized Neural Networks"
## 
## $brnn$library
## [1] "brnn"
## 
## $brnn$type
## [1] "Regression"
## 
## $brnn$parameters
##   parameter   class     label
## 1   neurons numeric # Neurons
## 
## $brnn$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(neurons = 1:len)
##     }
##     else {
##         out <- data.frame(neurons = sample(1:20, replace = TRUE, 
##             size = len))
##     }
##     out
## }
## 
## $brnn$loop
## NULL
## 
## $brnn$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## brnn::brnn(as.matrix(x), y, neurons = param$neurons, ...)
## 
## $brnn$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, as.matrix(newdata))
## 
## $brnn$prob
## NULL
## 
## $brnn$predictors
## function (x, s = NULL, ...) 
## names(x$x_spread)
## 
## $brnn$tags
## [1] "Bayesian Model" "Neural Network" "Regularization"
## 
## $brnn$prob
## NULL
## 
## $brnn$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $BstLm
## $BstLm$label
## [1] "Boosted Linear Model"
## 
## $BstLm$library
## [1] "bst"  "plyr"
## 
## $BstLm$type
## [1] "Regression"     "Classification"
## 
## $BstLm$parameters
##   parameter   class                 label
## 1     mstop numeric # Boosting Iterations
## 2        nu numeric             Shrinkage
## 
## $BstLm$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(mstop = floor((1:len) * 50), nu = 0.1)
##     }
##     else {
##         out <- data.frame(mstop = floor(runif(len, min = 1, max = 500)), 
##             nu = runif(len, min = 0.001, max = 0.6))
##     }
##     out
## }
## 
## $BstLm$loop
## function (grid) 
## {
##     loop <- plyr::ddply(grid, plyr::.(nu), function(x) c(mstop = max(x$mstop)))
##     submodels <- vector(mode = "list", length = nrow(loop))
##     for (i in seq(along = loop$mstop)) {
##         index <- which(grid$nu == loop$nu[i])
##         subTrees <- grid[index, "mstop"]
##         submodels[[i]] <- data.frame(mstop = subTrees[subTrees != 
##             loop$mstop[i]])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $BstLm$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!is.data.frame(x) | inherits(x, "tbl_df")) 
##         x <- as.data.frame(x, stringsAsFactors = TRUE)
##     theDots <- list(...)
##     modDist <- if (is.factor(y)) 
##         "hinge"
##     else "gaussian"
##     y <- if (is.factor(y)) 
##         ifelse(y == lev[1], 1, -1)
##     else y
##     if (any(names(theDots) == "ctrl")) {
##         theDots$ctrl$mstop <- param$mstop
##         theDots$ctrl$nu <- param$nu
##     }
##     else {
##         theDots$ctrl <- bst::bst_control(mstop = param$mstop, 
##             nu = param$nu)
##     }
##     modArgs <- list(x = x, y = y, family = modDist, learner = "ls")
##     modArgs <- c(modArgs, theDots)
##     out <- do.call(bst::bst, modArgs)
##     out$call <- quote(redacted)
##     out
## }
## 
## $BstLm$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata) | inherits(newdata, "tbl_df")) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     if (modelFit$problemType == "Classification") {
##         out <- predict(modelFit, newdata, type = "class", mstop = modelFit$submodels$mstop)
##         out <- ifelse(out == 1, modelFit$obsLevels[1], modelFit$obsLevels[2])
##     }
##     else {
##         out <- predict(modelFit, newdata, type = "response", 
##             mstop = modelFit$submodels$mstop)
##     }
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$mstop)) {
##             if (modelFit$problemType == "Classification") {
##                 bstPred <- predict(modelFit, newdata, type = "class", 
##                   mstop = submodels$mstop[j])
##                 tmp[[j + 1]] <- ifelse(bstPred == 1, modelFit$obsLevels[1], 
##                   modelFit$obsLevels[2])
##             }
##             else {
##                 tmp[[j + 1]] <- predict(modelFit, newdata, type = "response", 
##                   mstop = submodels$mstop[j])
##             }
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $BstLm$levels
## function (x) 
## x$obsLevels
## 
## $BstLm$tags
## [1] "Linear Regression"          "Ensemble Model"            
## [3] "Boosting"                   "Implicit Feature Selection"
## 
## $BstLm$prob
## NULL
## 
## $BstLm$sort
## function (x) 
## x[order(x$mstop, x$nu), ]
## 
## 
## $bstSm
## $bstSm$label
## [1] "Boosted Smoothing Spline"
## 
## $bstSm$library
## [1] "bst"  "plyr"
## 
## $bstSm$type
## [1] "Regression"     "Classification"
## 
## $bstSm$parameters
##   parameter   class                 label
## 1     mstop numeric # Boosting Iterations
## 2        nu numeric             Shrinkage
## 
## $bstSm$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(mstop = floor((1:len) * 50), nu = 0.1)
##     }
##     else {
##         out <- data.frame(mstop = sample(1:500, replace = TRUE, 
##             size = len), nu = runif(len, min = 0.001, max = 0.6))
##     }
##     out
## }
## 
## $bstSm$loop
## function (grid) 
## {
##     loop <- plyr::ddply(grid, plyr::.(nu), function(x) c(mstop = max(x$mstop)))
##     submodels <- vector(mode = "list", length = nrow(loop))
##     for (i in seq(along = loop$mstop)) {
##         index <- which(grid$nu == loop$nu[i])
##         subTrees <- grid[index, "mstop"]
##         submodels[[i]] <- data.frame(mstop = subTrees[subTrees != 
##             loop$mstop[i]])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $bstSm$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!is.data.frame(x) | inherits(x, "tbl_df")) 
##         x <- as.data.frame(x, stringsAsFactors = TRUE)
##     theDots <- list(...)
##     modDist <- if (is.factor(y)) 
##         "hinge"
##     else "gaussian"
##     y <- if (is.factor(y)) 
##         ifelse(y == lev[1], 1, -1)
##     else y
##     if (any(names(theDots) == "ctrl")) {
##         theDots$ctrl$mstop <- param$mstop
##         theDots$ctrl$nu <- param$nu
##     }
##     else {
##         theDots$ctrl <- bst::bst_control(mstop = param$mstop, 
##             nu = param$nu)
##     }
##     modArgs <- list(x = x, y = y, family = modDist, learner = "sm")
##     modArgs <- c(modArgs, theDots)
##     out <- do.call(bst::bst, modArgs)
##     out$call <- quote(redacted)
##     out
## }
## 
## $bstSm$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata) | inherits(newdata, "tbl_df")) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     if (modelFit$problemType == "Classification") {
##         out <- predict(modelFit, newdata, type = "class", mstop = modelFit$submodels$mstop)
##         out <- ifelse(out == 1, modelFit$obsLevels[1], modelFit$obsLevels[2])
##     }
##     else {
##         out <- predict(modelFit, newdata, type = "response", 
##             mstop = modelFit$submodels$mstop)
##     }
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$mstop)) {
##             if (modelFit$problemType == "Classification") {
##                 bstPred <- predict(modelFit, newdata, type = "class", 
##                   mstop = submodels$mstop[j])
##                 tmp[[j + 1]] <- ifelse(bstPred == 1, modelFit$obsLevels[1], 
##                   modelFit$obsLevels[2])
##             }
##             else {
##                 tmp[[j + 1]] <- predict(modelFit, newdata, type = "response", 
##                   mstop = submodels$mstop[j])
##             }
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $bstSm$levels
## function (x) 
## x$obsLevels
## 
## $bstSm$tags
## [1] "Ensemble Model"             "Boosting"                  
## [3] "Implicit Feature Selection"
## 
## $bstSm$prob
## NULL
## 
## $bstSm$sort
## function (x) 
## x[order(x$mstop, x$nu), ]
## 
## 
## $bstTree
## $bstTree$label
## [1] "Boosted Tree"
## 
## $bstTree$library
## [1] "bst"  "plyr"
## 
## $bstTree$type
## [1] "Regression"     "Classification"
## 
## $bstTree$parameters
##   parameter   class                 label
## 1     mstop numeric # Boosting Iterations
## 2  maxdepth numeric        Max Tree Depth
## 3        nu numeric             Shrinkage
## 
## $bstTree$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(mstop = floor((1:len) * 50), maxdepth = seq(1, 
##             len), nu = 0.1)
##     }
##     else {
##         out <- data.frame(mstop = sample(1:500, replace = TRUE, 
##             size = len), maxdepth = sample(1:10, replace = TRUE, 
##             size = len), nu = runif(len, min = 0.001, max = 0.6))
##     }
##     out
## }
## 
## $bstTree$loop
## function (grid) 
## {
##     loop <- plyr::ddply(grid, plyr::.(maxdepth, nu), function(x) c(mstop = max(x$mstop)))
##     submodels <- vector(mode = "list", length = nrow(loop))
##     for (i in seq(along = loop$mstop)) {
##         index <- which(grid$maxdepth == loop$maxdepth[i] & grid$nu == 
##             loop$nu[i])
##         subTrees <- grid[index, "mstop"]
##         submodels[[i]] <- data.frame(mstop = subTrees[subTrees != 
##             loop$mstop[i]])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $bstTree$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     modDist <- if (is.factor(y)) 
##         "hinge"
##     else "gaussian"
##     y <- if (is.factor(y)) 
##         ifelse(y == lev[1], 1, -1)
##     else y
##     if (any(names(theDots) == "ctrl")) {
##         theDots$ctrl$mstop <- param$mstop
##         theDots$ctrl$nu <- param$nu
##     }
##     else {
##         theDots$ctrl <- bst::bst_control(mstop = param$mstop, 
##             nu = param$nu)
##     }
##     if (any(names(theDots) == "control.tree")) {
##         theDots$control.tree$maxdepth <- param$maxdepth
##     }
##     else {
##         theDots$control.tree <- list(maxdepth = param$maxdepth)
##     }
##     modArgs <- list(x = x, y = y, family = modDist, learner = "tree")
##     modArgs <- c(modArgs, theDots)
##     do.call(bst::bst, modArgs)
## }
## 
## $bstTree$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (modelFit$problemType == "Classification") {
##         out <- predict(modelFit, newdata, type = "class", mstop = modelFit$submodels$mstop)
##         out <- ifelse(out == 1, modelFit$obsLevels[1], modelFit$obsLevels[2])
##     }
##     else {
##         out <- predict(modelFit, newdata, type = "response", 
##             mstop = modelFit$submodels$mstop)
##     }
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$mstop)) {
##             if (modelFit$problemType == "Classification") {
##                 bstPred <- predict(modelFit, newdata, type = "class", 
##                   mstop = submodels$mstop[j])
##                 tmp[[j + 1]] <- ifelse(bstPred == 1, modelFit$obsLevels[1], 
##                   modelFit$obsLevels[2])
##             }
##             else {
##                 tmp[[j + 1]] <- predict(modelFit, newdata, type = "response", 
##                   mstop = submodels$mstop[j])
##             }
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $bstTree$levels
## function (x) 
## x$obsLevels
## 
## $bstTree$tags
## [1] "Tree-Based Model" "Ensemble Model"   "Boosting"        
## 
## $bstTree$prob
## NULL
## 
## $bstTree$sort
## function (x) 
## x[order(x$mstop, x$maxdepth, x$nu), ]
## 
## 
## $C5.0
## $C5.0$label
## [1] "C5.0"
## 
## $C5.0$library
## [1] "C50"  "plyr"
## 
## $C5.0$loop
## function (grid) 
## {
##     loop <- plyr::ddply(grid, c("model", "winnow"), function(x) c(trials = max(x$trials)))
##     submodels <- vector(mode = "list", length = nrow(loop))
##     for (i in seq(along = loop$trials)) {
##         index <- which(grid$model == loop$model[i] & grid$winnow == 
##             loop$winnow[i])
##         trials <- grid[index, "trials"]
##         submodels[[i]] <- data.frame(trials = trials[trials != 
##             loop$trials[i]])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $C5.0$type
## [1] "Classification"
## 
## $C5.0$parameters
##   parameter     class                 label
## 1    trials   numeric # Boosting Iterations
## 2     model character            Model Type
## 3    winnow   logical                Winnow
## 
## $C5.0$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         c5seq <- if (len == 1) 
##             1
##         else c(1, 10 * ((2:min(len, 11)) - 1))
##         out <- expand.grid(trials = c5seq, model = c("tree", 
##             "rules"), winnow = c(TRUE, FALSE))
##     }
##     else {
##         out <- data.frame(trials = sample(1:100, replace = TRUE, 
##             size = len), model = sample(c("tree", "rules"), replace = TRUE, 
##             size = len), winnow = sample(c(TRUE, FALSE), replace = TRUE, 
##             size = len))
##     }
##     out
## }
## 
## $C5.0$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$winnow <- param$winnow
##     }
##     else theDots$control <- C50::C5.0Control(winnow = param$winnow)
##     argList <- list(x = x, y = y, weights = wts, trials = param$trials, 
##         rules = param$model == "rules")
##     argList <- c(argList, theDots)
##     do.call(C50:::C5.0.default, argList)
## }
## 
## $C5.0$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)
##     if (!is.null(submodels)) {
##         tmp <- out
##         out <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         out[[1]] <- tmp
##         for (j in seq(along = submodels$trials)) out[[j + 1]] <- as.character(predict(modelFit, 
##             newdata, trial = submodels$trials[j]))
##     }
##     out
## }
## 
## $C5.0$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata, type = "prob")
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$trials)) {
##             tmp[[j + 1]] <- predict(modelFit, newdata, type = "prob", 
##                 trials = submodels$trials[j])
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $C5.0$levels
## function (x) 
## x$obsLevels
## 
## $C5.0$predictors
## function (x, ...) 
## {
##     vars <- C50::C5imp(x, metric = "splits")
##     rownames(vars)[vars$Overall > 0]
## }
## 
## $C5.0$varImp
## function (object, ...) 
## C50::C5imp(object, ...)
## 
## $C5.0$tags
## [1] "Tree-Based Model"              "Rule-Based Model"             
## [3] "Implicit Feature Selection"    "Boosting"                     
## [5] "Ensemble Model"                "Handle Missing Predictor Data"
## [7] "Accepts Case Weights"         
## 
## $C5.0$sort
## function (x) 
## {
##     x$model <- factor(as.character(x$model), levels = c("rules", 
##         "tree"))
##     x[order(x$trials, x$model, !x$winnow), ]
## }
## 
## $C5.0$trim
## function (x) 
## {
##     x$boostResults <- NULL
##     x$size <- NULL
##     x$call <- NULL
##     x$output <- NULL
##     x
## }
## 
## 
## $C5.0Cost
## $C5.0Cost$label
## [1] "Cost-Sensitive C5.0"
## 
## $C5.0Cost$library
## [1] "C50"  "plyr"
## 
## $C5.0Cost$loop
## function (grid) 
## {
##     loop <- plyr::ddply(grid, c("model", "winnow", "cost"), function(x) c(trials = max(x$trials)))
##     submodels <- vector(mode = "list", length = nrow(loop))
##     for (i in seq(along = loop$trials)) {
##         index <- which(grid$model == loop$model[i] & grid$winnow == 
##             loop$winnow[i], grid$cost == loop$cost[i])
##         trials <- grid[index, "trials"]
##         submodels[[i]] <- data.frame(trials = trials[trials != 
##             loop$trials[i]])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $C5.0Cost$type
## [1] "Classification"
## 
## $C5.0Cost$parameters
##   parameter     class                 label
## 1    trials   numeric # Boosting Iterations
## 2     model character            Model Type
## 3    winnow   logical                Winnow
## 4      cost   numeric                  Cost
## 
## $C5.0Cost$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     c5seq <- if (len == 1) 
##         1
##     else c(1, 10 * ((2:min(len, 11)) - 1))
##     expand.grid(trials = c5seq, model = c("tree", "rules"), winnow = c(TRUE, 
##         FALSE), cost = 1:2)
##     if (search == "grid") {
##         c5seq <- if (len == 1) 
##             1
##         else c(1, 10 * ((2:min(len, 11)) - 1))
##         out <- expand.grid(trials = c5seq, model = c("tree", 
##             "rules"), winnow = c(TRUE, FALSE), cost = 1:2)
##     }
##     else {
##         out <- data.frame(trials = sample(1:100, replace = TRUE, 
##             size = len), model = sample(c("tree", "rules"), replace = TRUE, 
##             size = len), winnow = sample(c(TRUE, FALSE), replace = TRUE, 
##             size = len), cost = runif(len, min = 1, max = 20))
##     }
##     out
## }
## 
## $C5.0Cost$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$winnow <- param$winnow
##     }
##     else theDots$control <- C50::C5.0Control(winnow = param$winnow)
##     argList <- list(x = x, y = y, weights = wts, trials = param$trials, 
##         rules = param$model == "rules")
##     cmat <- matrix(c(0, param$cost, 1, 0), ncol = 2)
##     rownames(cmat) <- colnames(cmat) <- levels(y)
##     if (any(names(theDots) == "cost")) {
##         warning("For 'C5.0Cost', the costs are a tuning parameter")
##         theDots$costs <- cmat
##     }
##     else argList$costs <- cmat
##     argList <- c(argList, theDots)
##     do.call(C50:::C5.0.default, argList)
## }
## 
## $C5.0Cost$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)
##     if (!is.null(submodels)) {
##         tmp <- out
##         out <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         out[[1]] <- tmp
##         for (j in seq(along = submodels$trials)) out[[j + 1]] <- as.character(predict(modelFit, 
##             newdata, trial = submodels$trials[j]))
##     }
##     out
## }
## 
## $C5.0Cost$prob
## NULL
## 
## $C5.0Cost$predictors
## function (x, ...) 
## {
##     vars <- C50::C5imp(x, metric = "splits")
##     rownames(vars)[vars$Overall > 0]
## }
## 
## $C5.0Cost$levels
## function (x) 
## x$obsLevels
## 
## $C5.0Cost$varImp
## function (object, ...) 
## C50::C5imp(object, ...)
## 
## $C5.0Cost$tags
## [1] "Tree-Based Model"              "Rule-Based Model"             
## [3] "Implicit Feature Selection"    "Boosting"                     
## [5] "Ensemble Model"                "Cost Sensitive Learning"      
## [7] "Two Class Only"                "Handle Missing Predictor Data"
## [9] "Accepts Case Weights"         
## 
## $C5.0Cost$sort
## function (x) 
## {
##     x$model <- factor(as.character(x$model), levels = c("rules", 
##         "tree"))
##     x[order(x$trials, x$model, !x$winnow, x$cost), ]
## }
## 
## $C5.0Cost$trim
## function (x) 
## {
##     x$boostResults <- NULL
##     x$size <- NULL
##     x$call <- NULL
##     x$output <- NULL
##     x
## }
## 
## 
## $C5.0Rules
## $C5.0Rules$label
## [1] "Single C5.0 Ruleset"
## 
## $C5.0Rules$library
## [1] "C50"
## 
## $C5.0Rules$loop
## NULL
## 
## $C5.0Rules$type
## [1] "Classification"
## 
## $C5.0Rules$parameters
##   parameter     class label
## 1 parameter character  none
## 
## $C5.0Rules$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(parameter = "none")
## 
## $C5.0Rules$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## C50:::C5.0.default(x = x, y = y, weights = wts, rules = TRUE, 
##     ...)
## 
## $C5.0Rules$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)
## 
## $C5.0Rules$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata, type = "prob")
## 
## $C5.0Rules$predictors
## function (x, ...) 
## {
##     vars <- C50::C5imp(x, metric = "splits")
##     rownames(vars)[vars$Overall > 0]
## }
## 
## $C5.0Rules$levels
## function (x) 
## x$obsLevels
## 
## $C5.0Rules$varImp
## function (object, ...) 
## C50::C5imp(object, ...)
## 
## $C5.0Rules$tags
## [1] "Rule-Based Model"              "Implicit Feature Selection"   
## [3] "Handle Missing Predictor Data" "Accepts Case Weights"         
## 
## $C5.0Rules$trim
## function (x) 
## {
##     x$boostResults <- NULL
##     x$size <- NULL
##     x$call <- NULL
##     x$output <- NULL
##     x
## }
## 
## $C5.0Rules$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $C5.0Tree
## $C5.0Tree$label
## [1] "Single C5.0 Tree"
## 
## $C5.0Tree$library
## [1] "C50"
## 
## $C5.0Tree$loop
## NULL
## 
## $C5.0Tree$type
## [1] "Classification"
## 
## $C5.0Tree$parameters
##   parameter     class label
## 1 parameter character  none
## 
## $C5.0Tree$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(parameter = "none")
## 
## $C5.0Tree$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## C50:::C5.0.default(x = x, y = y, weights = wts, ...)
## 
## $C5.0Tree$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)
## 
## $C5.0Tree$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata, type = "prob")
## 
## $C5.0Tree$predictors
## function (x, ...) 
## {
##     vars <- C50::C5imp(x, metric = "splits")
##     rownames(vars)[vars$Overall > 0]
## }
## 
## $C5.0Tree$levels
## function (x) 
## x$obsLevels
## 
## $C5.0Tree$varImp
## function (object, ...) 
## C50::C5imp(object, ...)
## 
## $C5.0Tree$tags
## [1] "Tree-Based Model"              "Implicit Feature Selection"   
## [3] "Handle Missing Predictor Data" "Accepts Case Weights"         
## 
## $C5.0Tree$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## $C5.0Tree$trim
## function (x) 
## {
##     x$boostResults <- NULL
##     x$size <- NULL
##     x$call <- NULL
##     x$output <- NULL
##     x
## }
## 
## 
## $cforest
## $cforest$label
## [1] "Conditional Inference Random Forest"
## 
## $cforest$library
## [1] "party"
## 
## $cforest$loop
## NULL
## 
## $cforest$type
## [1] "Classification" "Regression"    
## 
## $cforest$parameters
##   parameter   class                         label
## 1      mtry numeric #Randomly Selected Predictors
## 
## $cforest$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(mtry = caret::var_seq(p = ncol(x), 
##             classification = is.factor(y), len = len))
##     }
##     else {
##         out <- data.frame(mtry = unique(sample(1:ncol(x), replace = TRUE, 
##             size = len)))
##     }
##     out
## }
## 
## $cforest$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     theDots <- list(...)
##     if (any(names(theDots) == "controls")) {
##         theDots$controls@gtctrl@mtry <- as.integer(param$mtry)
##         ctl <- theDots$controls
##         theDots$controls <- NULL
##     }
##     else ctl <- party::cforest_control(mtry = min(param$mtry, 
##         ncol(x)))
##     if (!is.null(wts)) 
##         theDots$weights <- wts
##     modelArgs <- c(list(formula = as.formula(.outcome ~ .), data = dat, 
##         controls = ctl), theDots)
##     out <- do.call(party::cforest, modelArgs)
##     out
## }
## 
## $cforest$predict
## function (modelFit, newdata = NULL, submodels = NULL) 
## {
##     if (!is.null(newdata) && !is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- party:::predict.RandomForest(modelFit, newdata = newdata, 
##         OOB = TRUE)
##     if (is.matrix(out)) 
##         out <- out[, 1]
##     if (!is.null(modelFit@responses@levels$.outcome)) 
##         out <- as.character(out)
##     out
## }
## 
## $cforest$prob
## function (modelFit, newdata = NULL, submodels = NULL) 
## {
##     if (!is.null(newdata) && !is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     obsLevels <- levels(modelFit@data@get("response")[, 1])
##     rawProbs <- party::treeresponse(modelFit, newdata = newdata, 
##         OOB = TRUE)
##     probMatrix <- matrix(unlist(rawProbs), ncol = length(obsLevels), 
##         byrow = TRUE)
##     out <- data.frame(probMatrix)
##     colnames(out) <- obsLevels
##     rownames(out) <- NULL
##     out
## }
## 
## $cforest$predictors
## function (x, ...) 
## {
##     vi <- party::varimp(x, ...)
##     names(vi)[vi != 0]
## }
## 
## $cforest$varImp
## function (object, ...) 
## {
##     variableImp <- party::varimp(object, ...)
##     out <- data.frame(Overall = variableImp)
##     out
## }
## 
## $cforest$tags
## [1] "Random Forest"              "Ensemble Model"            
## [3] "Bagging"                    "Implicit Feature Selection"
## [5] "Accepts Case Weights"      
## 
## $cforest$levels
## function (x) 
## levels(x@data@get("response")[, 1])
## 
## $cforest$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## $cforest$oob
## function (x) 
## {
##     obs <- x@data@get("response")[, 1]
##     pred <- party:::predict.RandomForest(x, OOB = TRUE)
##     postResample(pred, obs)
## }
## 
## 
## $chaid
## $chaid$label
## [1] "CHi-squared Automated Interaction Detection"
## 
## $chaid$library
## [1] "CHAID"
## 
## $chaid$loop
## NULL
## 
## $chaid$type
## [1] "Classification"
## 
## $chaid$parameters
##   parameter   class
## 1    alpha2 numeric
## 2    alpha3 numeric
## 3    alpha4 numeric
##                                                                                     label
## 1                                                                       Merging Threshold
## 2                                                       Splitting former Merged Threshold
## 3 \n                                                    Splitting former Merged Threshold
## 
## $chaid$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(alpha2 = seq(from = 0.05, to = 0.01, 
##             length = len), alpha3 = -1, alpha4 = seq(from = 0.05, 
##             to = 0.01, length = len))
##     }
##     else {
##         out <- data.frame(alpha2 = runif(len, min = 1e-06, max = 0.1), 
##             alpha3 = runif(len, min = -0.1, max = 0.1), alpha4 = runif(len, 
##                 min = 1e-06, max = 0.1))
##     }
##     out
## }
## 
## $chaid$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$alpha2 <- param$alpha2
##         theDots$control$alpha3 <- param$alpha3
##         theDots$control$alpha4 <- param$alpha4
##         ctl <- theDots$control
##         theDots$control <- NULL
##     }
##     else ctl <- CHAID::chaid_control(alpha2 = param$alpha2, alpha3 = param$alpha3, 
##         alpha4 = param$alpha4)
##     if (!is.null(wts)) 
##         theDots$weights <- wts
##     modelArgs <- c(list(formula = as.formula(".outcome ~ ."), 
##         data = dat, control = ctl), theDots)
##     out <- do.call(CHAID::chaid, modelArgs)
##     out
## }
## 
## $chaid$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $chaid$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata, type = "prob")
## }
## 
## $chaid$levels
## function (x) 
## x$obsLevels
## 
## $chaid$predictors
## function (x, surrogate = TRUE, ...) 
## {
##     predictors(terms(x))
## }
## 
## $chaid$tags
## [1] "Tree-Based Model"           "Implicit Feature Selection"
## [3] "Two Class Only"             "Accepts Case Weights"      
## 
## $chaid$sort
## function (x) 
## x[order(-x$alpha2, -x$alpha4, -x$alpha3), ]
## 
## 
## $CSimca
## $CSimca$label
## [1] "SIMCA"
## 
## $CSimca$library
## [1] "rrcov"   "rrcovHD"
## 
## $CSimca$loop
## NULL
## 
## $CSimca$type
## [1] "Classification"
## 
## $CSimca$parameters
##   parameter     class     label
## 1 parameter character parameter
## 
## $CSimca$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     data.frame(parameter = "none")
## }
## 
## $CSimca$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## rrcovHD::CSimca(x, y, ...)
## 
## $CSimca$predict
## function (modelFit, newdata, submodels = NULL) 
## rrcov::predict(modelFit, newdata)@classification
## 
## $CSimca$prob
## NULL
## 
## $CSimca$tags
## [1] "Robust Model"
## 
## $CSimca$levels
## function (x) 
## names(x@prior)
## 
## $CSimca$sort
## function (x) 
## x
## 
## 
## $ctree
## $ctree$label
## [1] "Conditional Inference Tree"
## 
## $ctree$library
## [1] "party"
## 
## $ctree$loop
## NULL
## 
## $ctree$type
## [1] "Classification" "Regression"    
## 
## $ctree$parameters
##      parameter   class                 label
## 1 mincriterion numeric 1 - P-Value Threshold
## 
## $ctree$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(mincriterion = seq(from = 0.99, to = 0.01, 
##             length = len))
##     }
##     else {
##         out <- data.frame(mincriterion = runif(len, min = 0, 
##             max = 1))
##     }
##     out
## }
## 
## $ctree$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     theDots <- list(...)
##     if (any(names(theDots) == "controls")) {
##         theDots$controls@gtctrl@mincriterion <- param$mincriterion
##         ctl <- theDots$controls
##         theDots$controls <- NULL
##     }
##     else ctl <- do.call(getFromNamespace("ctree_control", "party"), 
##         list(mincriterion = param$mincriterion))
##     if (!is.null(wts)) 
##         theDots$weights <- wts
##     modelArgs <- c(list(formula = as.formula(".outcome ~ ."), 
##         data = dat, controls = ctl), theDots)
##     out <- do.call(party::ctree, modelArgs)
##     out
## }
## 
## $ctree$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata)
##     if (!is.null(modelFit@responses@levels$.outcome)) 
##         out <- as.character(out)
##     if (is.matrix(out)) 
##         out <- out[, 1]
##     out
## }
## 
## $ctree$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     obsLevels <- levels(modelFit@data@get("response")[, 1])
##     rawProbs <- party::treeresponse(modelFit, newdata)
##     probMatrix <- matrix(unlist(rawProbs), ncol = length(obsLevels), 
##         byrow = TRUE)
##     out <- data.frame(probMatrix)
##     colnames(out) <- obsLevels
##     rownames(out) <- NULL
##     out
## }
## 
## $ctree$predictors
## function (x, surrogate = TRUE, ...) 
## {
##     treeObj <- unlist(nodes(x, 1))
##     target <- "psplit\\.variableName"
##     vars <- treeObj[grep(target, names(treeObj))]
##     if (surrogate) {
##         target2 <- "ssplits\\.variableName"
##         svars <- treeObj[grep(target, names(treeObj))]
##         vars <- c(vars, svars)
##     }
##     unique(vars)
## }
## 
## $ctree$tags
## [1] "Tree-Based Model"           "Implicit Feature Selection"
## [3] "Accepts Case Weights"      
## 
## $ctree$levels
## function (x) 
## levels(x@data@get("response")[, 1])
## 
## $ctree$sort
## function (x) 
## x[order(-x$mincriterion), ]
## 
## 
## $ctree2
## $ctree2$label
## [1] "Conditional Inference Tree"
## 
## $ctree2$library
## [1] "party"
## 
## $ctree2$loop
## NULL
## 
## $ctree2$type
## [1] "Regression"     "Classification"
## 
## $ctree2$parameters
##      parameter   class                 label
## 1     maxdepth numeric        Max Tree Depth
## 2 mincriterion numeric 1 - P-Value Threshold
## 
## $ctree2$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(maxdepth = 1:len, mincriterion = seq(from = 0.99, 
##             to = 0.01, length = len))
##     }
##     else {
##         out <- data.frame(maxdepth = sample(1:15, replace = TRUE, 
##             size = len), mincriterion = runif(len, min = 0, max = 1))
##     }
##     out
## }
## 
## $ctree2$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     theDots <- list(...)
##     if (any(names(theDots) == "controls")) {
##         theDots$controls@tgctrl@maxdepth <- param$maxdepth
##         theDots$controls@gtctrl@mincriterion <- param$mincriterion
##         ctl <- theDots$controls
##         theDots$controls <- NULL
##     }
##     else ctl <- do.call(party::ctree_control, list(maxdepth = param$maxdepth, 
##         mincriterion = param$mincriterion))
##     if (!is.null(wts)) 
##         theDots$weights <- wts
##     modelArgs <- c(list(formula = as.formula(".outcome ~ ."), 
##         data = dat, controls = ctl), theDots)
##     out <- do.call(party::ctree, modelArgs)
##     out
## }
## 
## $ctree2$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata)
##     if (!is.null(modelFit@responses@levels$.outcome)) 
##         out <- as.character(out)
##     if (is.matrix(out)) 
##         out <- out[, 1]
##     out
## }
## 
## $ctree2$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     obsLevels <- levels(modelFit@data@get("response")[, 1])
##     rawProbs <- party::treeresponse(modelFit, newdata)
##     probMatrix <- matrix(unlist(rawProbs), ncol = length(obsLevels), 
##         byrow = TRUE)
##     out <- data.frame(probMatrix)
##     colnames(out) <- obsLevels
##     rownames(out) <- NULL
##     out
## }
## 
## $ctree2$predictors
## function (x, surrogate = TRUE, ...) 
## {
##     treeObj <- unlist(nodes(x, 1))
##     target <- "psplit\\.variableName"
##     vars <- treeObj[grep(target, names(treeObj))]
##     if (surrogate) {
##         target2 <- "ssplits\\.variableName"
##         svars <- treeObj[grep(target, names(treeObj))]
##         vars <- c(vars, svars)
##     }
##     unique(vars)
## }
## 
## $ctree2$tags
## [1] "Tree-Based Model"           "Implicit Feature Selection"
## [3] "Accepts Case Weights"      
## 
## $ctree2$levels
## function (x) 
## levels(x@data@get("response")[, 1])
## 
## $ctree2$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $cubist
## $cubist$label
## [1] "Cubist"
## 
## $cubist$library
## [1] "Cubist"
## 
## $cubist$loop
## function (grid) 
## {
##     grid <- grid[order(-grid$committees, grid$neighbors, decreasing = TRUE), 
##         , drop = FALSE]
##     uniqueCom <- unique(grid$committees)
##     loop <- data.frame(committees = uniqueCom)
##     loop$neighbors <- NA
##     submodels <- vector(mode = "list", length = length(uniqueCom))
##     for (i in seq(along = uniqueCom)) {
##         subK <- grid[grid$committees == uniqueCom[i], "neighbors"]
##         loop$neighbors[loop$committees == uniqueCom[i]] <- subK[which.max(subK)]
##         submodels[[i]] <- data.frame(neighbors = subK[-which.max(subK)])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $cubist$type
## [1] "Regression"
## 
## $cubist$parameters
##    parameter   class       label
## 1 committees numeric #Committees
## 2  neighbors numeric  #Instances
## 
## $cubist$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(neighbors = c(0, 5, 9), committees = c(1, 
##             10, 20))
##     }
##     else {
##         out <- data.frame(neighbors = sample(0:9, replace = TRUE, 
##             size = len), committees = sample(1:100, replace = TRUE, 
##             size = len))
##     }
## }
## 
## $cubist$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     out <- Cubist::cubist(x, y, committees = param$committees, 
##         ...)
##     if (last) 
##         out$tuneValue$neighbors <- param$neighbors
##     out
## }
## 
## $cubist$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata, neighbors = modelFit$tuneValue$neighbors)
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$neighbors)) tmp[[j + 
##             1]] <- predict(modelFit, newdata, neighbors = submodels$neighbors[j])
##         out <- tmp
##     }
##     out
## }
## 
## $cubist$varImp
## function (object, weights = c(0.5, 0.5), ...) 
## {
##     if (length(weights) != 2) 
##         stop("two weights must be given")
##     weights <- weights/sum(weights)
##     out <- data.frame(Overall = object$usage$Conditions * weights[1] + 
##         object$usage$Model * weights[2])
##     rownames(out) <- object$usage$Variable
##     out
## }
## 
## $cubist$predictors
## function (x, ...) 
## {
##     subset(x$usage, Conditions > 0 | Model > 0)$Variable
## }
## 
## $cubist$tags
## [1] "Rule-Based Model"           "Boosting"                  
## [3] "Ensemble Model"             "Prototype Models"          
## [5] "Model Tree"                 "Linear Regression"         
## [7] "Implicit Feature Selection"
## 
## $cubist$prob
## NULL
## 
## $cubist$sort
## function (x) 
## x[order(x$committees, x$neighbors), ]
## 
## 
## $dda
## $dda$label
## [1] "Diagonal Discriminant Analysis"
## 
## $dda$library
## [1] "sparsediscrim"
## 
## $dda$loop
## NULL
## 
## $dda$type
## [1] "Classification"
## 
## $dda$parameters
##   parameter     class          label
## 1     model character          Model
## 2 shrinkage character Shrinkage Type
## 
## $dda$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(model = rep(c("Linear", "Quadratic"), each = 3), shrinkage = rep(c("None", 
##     "Variance", "Mean"), 2))
## 
## $dda$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (param$model == "Linear") {
##         if (param$shrinkage == "None") {
##             out <- sparsediscrim::dlda(x, y, ...)
##         }
##         else {
##             if (param$shrinkage == "Variance") {
##                 out <- sparsediscrim::sdlda(x, y, ...)
##             }
##             else out <- sparsediscrim::smdlda(x, y, ...)
##         }
##     }
##     else {
##         if (param$shrinkage == "None") {
##             out <- sparsediscrim::dqda(x, y, ...)
##         }
##         else {
##             if (param$shrinkage == "Variance") {
##                 out <- sparsediscrim::sdqda(x, y, ...)
##             }
##             else out <- sparsediscrim::smdqda(x, y, ...)
##         }
##     }
##     out
## }
## 
## $dda$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)$class
## 
## $dda$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)$scores
##     as.data.frame(t(apply(out, 2, function(x) exp(-x)/sum(exp(-x)))), 
##         stringsAsFactors = TRUE)
## }
## 
## $dda$predictors
## function (x, ...) 
## if (hasTerms(x)) predictors(x$terms) else colnames(x$means)
## 
## $dda$tags
## [1] "Discriminant Analysis" "Linear Classifier"     "Polynomial Model"     
## [4] "Regularization"       
## 
## $dda$levels
## function (x) 
## names(x$prior)
## 
## $dda$sort
## function (x) 
## x
## 
## 
## $deepboost
## $deepboost$label
## [1] "DeepBoost"
## 
## $deepboost$library
## [1] "deepboost"
## 
## $deepboost$loop
## NULL
## 
## $deepboost$type
## [1] "Classification"
## 
## $deepboost$parameters
##    parameter     class                     label
## 1   num_iter   numeric     # Boosting Iterations
## 2 tree_depth   numeric                Tree Depth
## 3       beta   numeric         L1 Regularization
## 4     lambda   numeric Tree Depth Regularization
## 5  loss_type character                      Loss
## 
## $deepboost$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(tree_depth = seq(1, len), num_iter = floor((1:len) * 
##             50), beta = 2^seq(-8, -4, length = len), lambda = 2^seq(-6, 
##             -2, length = len), loss_type = "l")
##     }
##     else {
##         out <- data.frame(num_iter = floor(runif(len, min = 1, 
##             max = 500)), tree_depth = sample(1:20, replace = TRUE, 
##             size = len), beta = runif(len, max = 0.25), lambda = runif(len, 
##             max = 0.25), loss_type = sample(c("l"), replace = TRUE, 
##             size = len))
##     }
##     out
## }
## 
## $deepboost$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!is.data.frame(x)) 
##         x <- as.data.frame(x, stringsAsFactors = TRUE)
##     if (is.null(wts)) {
##         dat <- x
##         dat$.outcome <- y
##         out <- deepboost::deepboost(.outcome ~ ., data = dat, 
##             tree_depth = param$tree_depth, num_iter = param$num_iter, 
##             beta = param$beta, lambda = param$lambda, loss_type = as.character(param$loss_type), 
##             ...)
##     }
##     else {
##         out <- deepboost::deepboost(.outcome ~ ., data = dat, 
##             tree_depth = param$tree_depth, instance_weights = wts, 
##             num_iter = param$num_iter, beta = param$beta, lambda = param$lambda, 
##             loss_type = as.character(param$loss_type), ...)
##     }
##     out
## }
## 
## $deepboost$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     deepboost:::predict(modelFit, newdata)
## }
## 
## $deepboost$levels
## function (x) 
## x@classes
## 
## $deepboost$prob
## NULL
## 
## $deepboost$tags
## [1] "Tree-Based Model"           "Boosting"                  
## [3] "Ensemble Model"             "Implicit Feature Selection"
## [5] "Accepts Case Weights"       "L1 Regularization"         
## [7] "Two Class Only"            
## 
## $deepboost$sort
## function (x) 
## x[order(x$num_iter, x$tree_depth, x$beta), ]
## 
## 
## $DENFIS
## $DENFIS$label
## [1] "Dynamic Evolving Neural-Fuzzy Inference System "
## 
## $DENFIS$library
## [1] "frbs"
## 
## $DENFIS$type
## [1] "Regression"
## 
## $DENFIS$parameters
##   parameter   class           label
## 1      Dthr numeric       Threshold
## 2  max.iter numeric Max. Iterations
## 
## $DENFIS$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(Dthr = seq(0.1, 0.5, length = len), 
##             max.iter = 100)
##     }
##     else {
##         out <- data.frame(Dthr = runif(len, min = 0, max = 1), 
##             max.iter = sample(1:20, replace = TRUE, size = len))
##     }
##     out
## }
## 
## $DENFIS$loop
## NULL
## 
## $DENFIS$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     args <- list(data.train = as.matrix(cbind(x, y)), method.type = "DENFIS")
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$Dthr <- param$Dthr
##         theDots$control$max.iter <- param$max.iter
##     }
##     else theDots$control <- list(Dthr = param$Dthr, max.iter = param$max.iter, 
##         step.size = 0.01, d = 2, method.type = "DENFIS", name = "sim-0")
##     if (!(any(names(theDots) == "range.data"))) {
##         args$range.data <- apply(args$data.train, 2, extendrange)
##     }
##     do.call(frbs::frbs.learn, c(args, theDots))
## }
## 
## $DENFIS$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     predict(modelFit, newdata)[, 1]
## }
## 
## $DENFIS$prob
## NULL
## 
## $DENFIS$predictors
## function (x, ...) 
## {
##     x$colnames.var[x$colnames.var %in% as.vector(x$rule)]
## }
## 
## $DENFIS$tags
## [1] "Rule-Based Model"
## 
## $DENFIS$levels
## NULL
## 
## $DENFIS$sort
## function (x) 
## x[order(x$Dthr), ]
## 
## 
## $dnn
## $dnn$label
## [1] "Stacked AutoEncoder Deep Neural Network"
## 
## $dnn$library
## [1] "deepnet"
## 
## $dnn$loop
## NULL
## 
## $dnn$type
## [1] "Classification" "Regression"    
## 
## $dnn$parameters
##         parameter   class           label
## 1          layer1 numeric  Hidden Layer 1
## 2          layer2 numeric  Hidden Layer 2
## 3          layer3 numeric  Hidden Layer 3
## 4  hidden_dropout numeric Hidden Dropouts
## 5 visible_dropout numeric Visible Dropout
## 
## $dnn$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(layer1 = 1:len, layer2 = 0:(len - 
##             1), layer3 = 0:(len - 1), hidden_dropout = seq(0, 
##             0.7, length = len), visible_dropout = seq(0, 0.7, 
##             length = len))
##     }
##     else {
##         out <- data.frame(layer1 = sample(2:20, replace = TRUE, 
##             size = len), layer2 = sample(2:20, replace = TRUE, 
##             size = len), layer3 = sample(2:20, replace = TRUE, 
##             size = len), hidden_dropout = runif(len, min = 0, 
##             max = 0.7), visible_dropout = runif(len, min = 0, 
##             max = 0.7))
##     }
##     out
## }
## 
## $dnn$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!is.matrix(x)) 
##         x <- as.matrix(x)
##     is_class <- is.factor(y)
##     if (is_class) 
##         y <- caret:::class2ind(y)
##     layers <- c(param$layer1, param$layer2, param$layer3)
##     layers <- layers[layers > 0]
##     deepnet::sae.dnn.train(x, y, hidden = layers, output = if (is_class) 
##         "sigm"
##     else "linear", hidden_dropout = param$hidden_dropout, visible_dropout = param$visible_dropout, 
##         ...)
## }
## 
## $dnn$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     pred <- deepnet::nn.predict(modelFit, as.matrix(newdata))
##     if (ncol(pred) > 1) 
##         pred <- modelFit$obsLevels[apply(pred, 1, which.max)]
##     pred
## }
## 
## $dnn$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- exp(deepnet::nn.predict(modelFit, as.matrix(newdata)))
##     out <- apply(out, 1, function(x) x/sum(x))
##     t(out)
## }
## 
## $dnn$predictors
## function (x, ...) 
## {
##     NULL
## }
## 
## $dnn$varImp
## NULL
## 
## $dnn$levels
## function (x) 
## x$classes
## 
## $dnn$tags
## [1] "Neural Network"
## 
## $dnn$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $dwdLinear
## $dwdLinear$label
## [1] "Linear Distance Weighted Discrimination"
## 
## $dwdLinear$library
## [1] "kerndwd"
## 
## $dwdLinear$type
## [1] "Classification"
## 
## $dwdLinear$parameters
##   parameter   class                    label
## 1    lambda numeric Regularization Parameter
## 2      qval numeric                        q
## 
## $dwdLinear$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (length(levels(y)) != 2) 
##         stop("Two class problems only")
##     if (search == "grid") {
##         out <- expand.grid(lambda = 10^seq(-5, 1, length = len), 
##             qval = 1)
##     }
##     else {
##         out <- data.frame(lambda = 10^runif(len, min = -5, 1), 
##             qval = runif(len, min = 0, 3))
##     }
##     out
## }
## 
## $dwdLinear$loop
## NULL
## 
## $dwdLinear$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!is.matrix(x)) 
##         x <- as.matrix(x)
##     out <- kerndwd::kerndwd(x = x, y = ifelse(y == lev[1], 1, 
##         -1), qval = param$qval, lambda = param$lambda, kern = kernlab::vanilladot(), 
##         ...)
##     out$kern <- kernlab::vanilladot()
##     out$x <- x
##     out
## }
## 
## $dwdLinear$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(object = modelFit, newx = newdata, kern = modelFit$kern, 
##         x = modelFit$x)[, 1]
##     ifelse(out == 1, modelFit$obsLevels[1], modelFit$obsLevels[2])
## }
## 
## $dwdLinear$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(object = modelFit, newx = newdata, kern = modelFit$kern, 
##         x = modelFit$x, type = "link")[, 1]
##     out <- binomial()$linkinv(out)
##     out <- cbind(out, 1 - out)
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $dwdLinear$levels
## function (x) 
## x$obsLevels
## 
## $dwdLinear$predictors
## function (x, s = NULL, ...) 
## x$xNames
## 
## $dwdLinear$tags
## [1] "Discriminant Analysis"            "L2 Regularization"               
## [3] "Kernel Method"                    "Linear Classifier"               
## [5] "Distance Weighted Discrimination" "Two Class Only"                  
## 
## $dwdLinear$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $dwdPoly
## $dwdPoly$label
## [1] "Distance Weighted Discrimination with Polynomial Kernel"
## 
## $dwdPoly$library
## [1] "kerndwd"
## 
## $dwdPoly$type
## [1] "Classification"
## 
## $dwdPoly$parameters
##   parameter   class                    label
## 1    lambda numeric Regularization Parameter
## 2      qval numeric                        q
## 3    degree numeric        Polynomial Degree
## 4     scale numeric                    Scale
## 
## $dwdPoly$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (length(levels(y)) != 2) 
##         stop("Two class problems only")
##     if (search == "grid") {
##         out <- expand.grid(lambda = 10^seq(-5, 1, length = len), 
##             qval = 1, degree = seq(1, min(len, 3)), scale = 10^((1:len) - 
##                 4))
##     }
##     else {
##         out <- data.frame(lambda = 10^runif(len, min = -5, 1), 
##             qval = runif(len, min = 0, 3), degree = sample(1:3, 
##                 size = len, replace = TRUE), scale = 10^runif(len, 
##                 min = -5, 0))
##     }
##     out
## }
## 
## $dwdPoly$loop
## NULL
## 
## $dwdPoly$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!is.matrix(x)) 
##         x <- as.matrix(x)
##     kobj <- kernlab::polydot(degree = param$degree, scale = param$scale, 
##         offset = 1)
##     out <- kerndwd::kerndwd(x = x, y = ifelse(y == lev[1], 1, 
##         -1), qval = param$qval, lambda = param$lambda, kern = kobj, 
##         ...)
##     out$kern <- kobj
##     out$x <- x
##     out
## }
## 
## $dwdPoly$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(object = modelFit, newx = newdata, kern = modelFit$kern, 
##         x = modelFit$x)[, 1]
##     ifelse(out == 1, modelFit$obsLevels[1], modelFit$obsLevels[2])
## }
## 
## $dwdPoly$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(object = modelFit, newx = newdata, kern = modelFit$kern, 
##         x = modelFit$x, type = "link")[, 1]
##     out <- binomial()$linkinv(out)
##     out <- cbind(out, 1 - out)
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $dwdPoly$levels
## function (x) 
## x$obsLevels
## 
## $dwdPoly$predictors
## function (x, s = NULL, ...) 
## x$xNames
## 
## $dwdPoly$tags
## [1] "Discriminant Analysis"            "L2 Regularization"               
## [3] "Kernel Method"                    "Polynomial Model"                
## [5] "Distance Weighted Discrimination" "Two Class Only"                  
## 
## $dwdPoly$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $dwdRadial
## $dwdRadial$label
## [1] "Distance Weighted Discrimination with Radial Basis Function Kernel"
## 
## $dwdRadial$library
## [1] "kernlab" "kerndwd"
## 
## $dwdRadial$type
## [1] "Classification"
## 
## $dwdRadial$parameters
##   parameter   class                    label
## 1    lambda numeric Regularization Parameter
## 2      qval numeric                        q
## 3     sigma numeric                    Sigma
## 
## $dwdRadial$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     sigmas <- kernlab::sigest(as.matrix(x), na.action = na.omit, 
##         scaled = TRUE)
##     if (length(levels(y)) != 2) 
##         stop("Two class problems only")
##     if (search == "grid") {
##         out <- expand.grid(lambda = 10^seq(-5, 1, length = len), 
##             qval = 1, sigma = mean(as.vector(sigmas[-2])))
##     }
##     else {
##         rng <- extendrange(log(sigmas), f = 0.75)
##         out <- data.frame(lambda = 10^runif(len, min = -5, 1), 
##             qval = runif(len, min = 0, 3), sigma = exp(runif(len, 
##                 min = rng[1], max = rng[2])))
##     }
##     out
## }
## 
## $dwdRadial$loop
## NULL
## 
## $dwdRadial$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!is.matrix(x)) 
##         x <- as.matrix(x)
##     kobj <- kernlab::rbfdot(sigma = param$sigma)
##     out <- kerndwd::kerndwd(x = x, y = ifelse(y == lev[1], 1, 
##         -1), qval = param$qval, lambda = param$lambda, kern = kobj, 
##         ...)
##     out$kern <- kobj
##     out$x <- x
##     out
## }
## 
## $dwdRadial$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(object = modelFit, newx = newdata, kern = modelFit$kern, 
##         x = modelFit$x)[, 1]
##     ifelse(out == 1, modelFit$obsLevels[1], modelFit$obsLevels[2])
## }
## 
## $dwdRadial$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(object = modelFit, newx = newdata, kern = modelFit$kern, 
##         x = modelFit$x, type = "link")[, 1]
##     out <- binomial()$linkinv(out)
##     out <- cbind(out, 1 - out)
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $dwdRadial$levels
## function (x) 
## x$obsLevels
## 
## $dwdRadial$predictors
## function (x, s = NULL, ...) 
## x$xNames
## 
## $dwdRadial$tags
## [1] "Discriminant Analysis"            "L2 Regularization"               
## [3] "Kernel Method"                    "Radial Basis Function"           
## [5] "Distance Weighted Discrimination" "Two Class Only"                  
## 
## $dwdRadial$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $earth
## $earth$label
## [1] "Multivariate Adaptive Regression Spline"
## 
## $earth$library
## [1] "earth"
## 
## $earth$type
## [1] "Regression"     "Classification"
## 
## $earth$parameters
##   parameter   class          label
## 1    nprune numeric         #Terms
## 2    degree numeric Product Degree
## 
## $earth$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     mod <- earth::earth(.outcome ~ ., data = dat, pmethod = "none")
##     maxTerms <- nrow(mod$dirs)
##     maxTerms <- min(200, floor(maxTerms * 0.75) + 2)
##     if (search == "grid") {
##         out <- data.frame(nprune = unique(floor(seq(2, to = maxTerms, 
##             length = len))), degree = 1)
##     }
##     else {
##         out <- data.frame(nprune = sample(2:maxTerms, size = len, 
##             replace = TRUE), degree = sample(1:2, size = len, 
##             replace = TRUE))
##     }
## }
## 
## $earth$loop
## function (grid) 
## {
##     deg <- unique(grid$degree)
##     loop <- data.frame(degree = deg)
##     loop$nprune <- NA
##     submodels <- vector(mode = "list", length = length(deg))
##     for (i in seq(along = deg)) {
##         np <- grid[grid$degree == deg[i], "nprune"]
##         loop$nprune[loop$degree == deg[i]] <- np[which.max(np)]
##         submodels[[i]] <- data.frame(nprune = np[-which.max(np)])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $earth$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(earth)
##     theDots <- list(...)
##     theDots$keepxy <- TRUE
##     if (!is.null(wts)) 
##         theDots$weights <- wts
##     modelArgs <- c(list(x = x, y = y, degree = param$degree, 
##         nprune = param$nprune), theDots)
##     if (is.factor(y) & !any(names(theDots) == "glm")) {
##         modelArgs$glm <- list(family = binomial, maxit = 100)
##     }
##     tmp <- do.call(earth::earth, modelArgs)
##     tmp$call["nprune"] <- param$nprune
##     tmp$call["degree"] <- param$degree
##     tmp
## }
## 
## $earth$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (modelFit$problemType == "Classification") {
##         out <- earth:::predict.earth(modelFit, newdata, type = "class")
##     }
##     else {
##         out <- earth:::predict.earth(modelFit, newdata)
##     }
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- if (is.matrix(out)) 
##             out[, 1]
##         else out
##         for (j in seq(along = submodels$nprune)) {
##             prunedFit <- earth:::update.earth(modelFit, nprune = submodels$nprune[j])
##             if (modelFit$problemType == "Classification") {
##                 tmp[[j + 1]] <- earth:::predict.earth(prunedFit, 
##                   newdata, type = "class")
##             }
##             else {
##                 tmp[[j + 1]] <- earth:::predict.earth(prunedFit, 
##                   newdata)
##             }
##             if (is.matrix(tmp[[j + 1]])) 
##                 tmp[[j + 1]] <- tmp[[j + 1]][, 1]
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $earth$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- earth:::predict.earth(modelFit, newdata, type = "response")
##     if (ncol(out) > 1) {
##         out <- t(apply(out, 1, function(x) x/sum(x)))
##     }
##     else {
##         out <- cbind(1 - out[, 1], out[, 1])
##         colnames(out) <- modelFit$obsLevels
##     }
##     out <- as.data.frame(out, stringsAsFactors = TRUE)
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$nprune)) {
##             prunedFit <- earth:::update.earth(modelFit, nprune = submodels$nprune[j])
##             tmp2 <- earth:::predict.earth(prunedFit, newdata, 
##                 type = "response")
##             if (ncol(tmp2) > 1) {
##                 tmp2 <- t(apply(tmp2, 1, function(x) x/sum(x)))
##             }
##             else {
##                 tmp2 <- cbind(1 - tmp2[, 1], tmp2[, 1])
##                 colnames(tmp2) <- modelFit$obsLevels
##             }
##             tmp2 <- as.data.frame(tmp2, stringsAsFactors = TRUE)
##             tmp[[j + 1]] <- tmp2
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $earth$predictors
## function (x, ...) 
## {
##     vi <- varImp(x)
##     notZero <- sort(unique(unlist(lapply(vi, function(x) which(x > 
##         0)))))
##     if (length(notZero) > 0) 
##         rownames(vi)[notZero]
##     else NULL
## }
## 
## $earth$varImp
## function (object, value = "gcv", ...) 
## {
##     earthImp <- earth::evimp(object)
##     if (!is.matrix(earthImp)) 
##         earthImp <- t(as.matrix(earthImp))
##     out <- earthImp
##     perfCol <- which(colnames(out) == value)
##     increaseInd <- out[, perfCol + 1]
##     out <- as.data.frame(out[, perfCol, drop = FALSE], stringsAsFactors = TRUE)
##     colnames(out) <- "Overall"
##     if (any(earthImp[, "used"] == 0)) {
##         dropList <- grep("-unused", rownames(earthImp), value = TRUE)
##         out$Overall[rownames(out) %in% dropList] <- 0
##     }
##     rownames(out) <- gsub("-unused", "", rownames(out))
##     out <- as.data.frame(out, stringsAsFactors = TRUE)
##     xNames <- object$namesx.org
##     if (any(!(xNames %in% rownames(out)))) {
##         xNames <- xNames[!(xNames %in% rownames(out))]
##         others <- data.frame(Overall = rep(0, length(xNames)), 
##             row.names = xNames)
##         out <- rbind(out, others)
##     }
##     out
## }
## 
## $earth$levels
## function (x) 
## x$levels
## 
## $earth$tags
## [1] "Multivariate Adaptive Regression Splines"
## [2] "Implicit Feature Selection"              
## [3] "Accepts Case Weights"                    
## 
## $earth$notes
## [1] "Unlike other packages used by `train`, the `earth` package is fully loaded when this model is used."
## 
## $earth$sort
## function (x) 
## x[order(x$degree, x$nprune), ]
## 
## 
## $elm
## $elm$label
## [1] "Extreme Learning Machine"
## 
## $elm$library
## [1] "elmNN"
## 
## $elm$loop
## NULL
## 
## $elm$type
## [1] "Classification" "Regression"    
## 
## $elm$parameters
##   parameter     class               label
## 1      nhid   numeric       #Hidden Units
## 2    actfun character Activation Function
## 
## $elm$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     funs <- c("sin", "radbas", "purelin", "tansig")
##     if (search == "grid") {
##         out <- expand.grid(nhid = ((1:len) * 2) - 1, actfun = funs)
##     }
##     else {
##         out <- data.frame(nhid = floor(runif(len, min = 1, max = 20)), 
##             actfun = sample(funs, replace = TRUE, size = len))
##     }
## }
## 
## $elm$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (is.factor(y)) {
##         factor2ind <- function(x) {
##             x <- model.matrix(~x - 1, contrasts = list(x = "contr.treatment"))
##             colnames(x) <- gsub("^x", "", colnames(x))
##             att <- attributes(x)
##             att$assign <- NULL
##             att$contrasts <- NULL
##             attributes(x) <- att
##             x
##         }
##         out <- elmNN::elmtrain.default(x = x, y = factor2ind(y), 
##             nhid = param$nhid, actfun = param$actfun, ...)
##         out$lev <- levels(y)
##     }
##     else {
##         out <- elmNN::elmtrain.default(x = x, y = y, nhid = param$nhid, 
##             actfun = param$actfun, ...)
##     }
##     out$xNames <- colnames(x)
##     out
## }
## 
## $elm$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- elmNN::predict.elmNN(modelFit, newdata)
##     if (modelFit$problemType == "Classification") {
##         out <- modelFit$lev[apply(out, 1, which.max)]
##         out <- factor(out, levels = modelFit$lev)
##     }
##     out
## }
## 
## $elm$prob
## NULL
## 
## $elm$varImp
## NULL
## 
## $elm$predictors
## function (x, ...) 
## x$xNames
## 
## $elm$tags
## [1] "Neural Network"
## 
## $elm$levels
## function (x) 
## x$lev
## 
## $elm$notes
## [1] "The package is no longer on CRAN but can be installed from the archive at  https://cran.r-project.org/src/contrib/Archive/elmNN/"
## 
## $elm$sort
## function (x) 
## x[order(x$nhid), ]
## 
## 
## $enet
## $enet$label
## [1] "Elasticnet"
## 
## $enet$library
## [1] "elasticnet"
## 
## $enet$type
## [1] "Regression"
## 
## $enet$parameters
##   parameter   class                     label
## 1  fraction numeric Fraction of Full Solution
## 2    lambda numeric              Weight Decay
## 
## $enet$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(lambda = c(0, 10^seq(-1, -4, length = len - 
##             1)), fraction = seq(0.05, 1, length = len))
##     }
##     else {
##         out <- data.frame(lambda = 10^runif(len, min = -5, 1), 
##             fraction = runif(len, min = 0, max = 1))
##     }
##     out
## }
## 
## $enet$loop
## function (grid) 
## {
##     grid <- grid[order(grid$lambda, grid$fraction, decreasing = TRUE), 
##         , drop = FALSE]
##     uniqueLambda <- unique(grid$lambda)
##     loop <- data.frame(lambda = uniqueLambda)
##     loop$fraction <- NA
##     submodels <- vector(mode = "list", length = length(uniqueLambda))
##     for (i in seq(along = uniqueLambda)) {
##         subFrac <- grid[grid$lambda == uniqueLambda[i], "fraction"]
##         loop$fraction[loop$lambda == uniqueLambda[i]] <- subFrac[which.max(subFrac)]
##         submodels[[i]] <- data.frame(fraction = subFrac[-which.max(subFrac)])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $enet$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     elasticnet::enet(as.matrix(x), y, lambda = param$lambda)
## }
## 
## $enet$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- elasticnet::predict.enet(modelFit, newdata, s = modelFit$tuneValue$fraction, 
##         mode = "fraction")$fit
##     if (!is.null(submodels)) {
##         if (nrow(submodels) > 1) {
##             out <- c(list(if (is.matrix(out)) out[, 1] else out), 
##                 as.list(as.data.frame(elasticnet::predict.enet(modelFit, 
##                   newx = newdata, s = submodels$fraction, mode = "fraction")$fit, 
##                   stringsAsFactor = FALSE)))
##         }
##         else {
##             tmp <- elasticnet::predict.enet(modelFit, newx = newdata, 
##                 s = submodels$fraction, mode = "fraction")$fit
##             out <- c(list(if (is.matrix(out)) out[, 1] else out), 
##                 list(tmp))
##         }
##     }
##     out
## }
## 
## $enet$predictors
## function (x, s = NULL, ...) 
## {
##     if (is.null(s)) {
##         if (!is.null(x$tuneValue)) {
##             s <- x$tuneValue$fraction
##         }
##         else stop("must supply a vaue of s")
##         out <- elasticnet::predict.enet(x, s = s, type = "coefficients", 
##             mode = "fraction")$coefficients
##     }
##     else {
##         out <- elasticnet::predict.enet(x, s = s)$coefficients
##     }
##     names(out)[out != 0]
## }
## 
## $enet$tags
## [1] "Linear Regression"          "Implicit Feature Selection"
## [3] "L1 Regularization"         
## 
## $enet$prob
## NULL
## 
## $enet$sort
## function (x) 
## x[order(x$fraction, -x$lambda), ]
## 
## 
## $evtree
## $evtree$label
## [1] "Tree Models from Genetic Algorithms"
## 
## $evtree$library
## [1] "evtree"
## 
## $evtree$loop
## NULL
## 
## $evtree$type
## [1] "Regression"     "Classification"
## 
## $evtree$parameters
##   parameter   class                label
## 1     alpha numeric Complexity Parameter
## 
## $evtree$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(alpha = seq(1, 3, length = len))
##     }
##     else {
##         out <- data.frame(alpha = runif(len, min = 1, max = 5))
##     }
##     out
## }
## 
## $evtree$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$alpha <- param$alpha
##         ctl <- theDots$control
##         theDots$control <- NULL
##     }
##     else ctl <- evtree::evtree.control(alpha = param$alpha)
##     if (!is.null(wts)) 
##         theDots$weights <- wts
##     modelArgs <- c(list(formula = as.formula(".outcome ~ ."), 
##         data = dat, control = ctl), theDots)
##     out <- do.call(evtree::evtree, modelArgs)
##     out
## }
## 
## $evtree$levels
## function (x) 
## x$obsLevels
## 
## $evtree$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $evtree$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata, type = "prob")
## }
## 
## $evtree$tags
## [1] "Tree-Based Model"           "Implicit Feature Selection"
## [3] "Accepts Case Weights"      
## 
## $evtree$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $extraTrees
## $extraTrees$label
## [1] "Random Forest by Randomization"
## 
## $extraTrees$library
## [1] "extraTrees"
## 
## $extraTrees$loop
## NULL
## 
## $extraTrees$type
## [1] "Regression"     "Classification"
## 
## $extraTrees$parameters
##       parameter   class                          label
## 1          mtry numeric # Randomly Selected Predictors
## 2 numRandomCuts numeric                  # Random Cuts
## 
## $extraTrees$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(mtry = caret::var_seq(p = ncol(x), 
##             classification = is.factor(y), len = len), numRandomCuts = 1:len)
##     }
##     else {
##         out <- data.frame(mtry = sample(1:ncol(x), size = len, 
##             replace = TRUE), numRandomCuts = sample(1:25, size = len, 
##             replace = TRUE))
##     }
## }
## 
## $extraTrees$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## extraTrees::extraTrees(x, y, mtry = min(param$mtry, ncol(x)), 
##     numRandomCuts = param$numRandomCuts, ...)
## 
## $extraTrees$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)
## 
## $extraTrees$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata, probability = TRUE)
## 
## $extraTrees$levels
## function (x) 
## x$obsLevels
## 
## $extraTrees$tags
## [1] "Random Forest"              "Ensemble Model"            
## [3] "Bagging"                    "Implicit Feature Selection"
## 
## $extraTrees$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $fda
## $fda$label
## [1] "Flexible Discriminant Analysis"
## 
## $fda$library
## [1] "earth" "mda"  
## 
## $fda$loop
## NULL
## 
## $fda$type
## [1] "Classification"
## 
## $fda$parameters
##   parameter   class          label
## 1    degree numeric Product Degree
## 2    nprune numeric         #Terms
## 
## $fda$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     mod <- earth::earth(.outcome ~ ., data = dat, pmethod = "none")
##     maxTerms <- nrow(mod$dirs)
##     maxTerms <- min(200, floor(maxTerms * 0.75) + 2)
##     if (search == "grid") {
##         out <- data.frame(nprune = unique(floor(seq(2, to = maxTerms, 
##             length = len))), degree = 1)
##     }
##     else {
##         out <- data.frame(nprune = sample(2:maxTerms, size = len, 
##             replace = TRUE), degree = sample(1:2, size = len, 
##             replace = TRUE))
##     }
## }
## 
## $fda$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(earth)
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     mda::fda(.outcome ~ ., data = dat, method = earth::earth, 
##         degree = param$degree, nprune = param$nprune, weights = wts, 
##         ...)
## }
## 
## $fda$levels
## function (x) 
## x$obsLevels
## 
## $fda$tags
## [1] "Multivariate Adaptive Regression Splines"
## [2] "Implicit Feature Selection"              
## [3] "Accepts Case Weights"                    
## 
## $fda$notes
## [1] "Unlike other packages used by `train`, the `earth` package is fully loaded when this model is used."
## 
## $fda$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)
## 
## $fda$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata, type = "posterior")
## 
## $fda$predictors
## function (x, ...) 
## {
##     code <- getModelInfo("earth", regex = FALSE)[[1]]$predictors
##     tmp <- predictors(x$terms)
##     out <- if (class(x$fit) == "earth") 
##         code(x$fit)
##     else tmp
##     out
## }
## 
## $fda$varImp
## function (object, value = "gcv", ...) 
## varImp(object$fit, value = value, ...)
## 
## $fda$sort
## function (x) 
## x[order(x$degree, x$nprune), ]
## 
## 
## $FH.GBML
## $FH.GBML$label
## [1] "Fuzzy Rules Using Genetic Cooperative-Competitive Learning and Pittsburgh"
## 
## $FH.GBML$library
## [1] "frbs"
## 
## $FH.GBML$type
## [1] "Classification"
## 
## $FH.GBML$parameters
##      parameter   class            label
## 1 max.num.rule numeric      Max. #Rules
## 2    popu.size numeric  Population Size
## 3      max.gen numeric Max. Generations
## 
## $FH.GBML$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(max.num.rule = 1 + (1:len) * 2, popu.size = 10, 
##             max.gen = 10)
##     }
##     else {
##         out <- data.frame(max.gen = sample(1:20, size = len, 
##             replace = TRUE), popu.size = sample(seq(2, 20, by = 2), 
##             size = len, replace = TRUE), max.num.rule = sample(1:20, 
##             size = len, replace = TRUE))
##     }
##     out
## }
## 
## $FH.GBML$loop
## NULL
## 
## $FH.GBML$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     args <- list(data.train = as.matrix(cbind(x, as.numeric(y))), 
##         method.type = "FH.GBML")
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$max.num.rule <- param$max.num.rule
##         theDots$control$popu.size <- param$popu.size
##         theDots$control$max.gen <- param$max.gen
##     }
##     else theDots$control <- list(max.num.rule = param$max.num.rule, 
##         popu.size = param$popu.size, max.gen = param$max.gen, 
##         persen_cross = 0.6, persen_mutant = 0.3, p.dcare = 0.5, 
##         p.gccl = 0.5, num.class = length(unique(y)), name = "sim-0")
##     do.call(frbs::frbs.learn, c(args, theDots))
## }
## 
## $FH.GBML$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     modelFit$obsLevels[predict(modelFit, newdata)]
## }
## 
## $FH.GBML$prob
## NULL
## 
## $FH.GBML$predictors
## function (x, ...) 
## {
##     x$colnames.var[x$colnames.var %in% as.vector(x$rule)]
## }
## 
## $FH.GBML$tags
## [1] "Rule-Based Model"
## 
## $FH.GBML$levels
## NULL
## 
## $FH.GBML$sort
## function (x) 
## x[order(x$max.num.rule), ]
## 
## 
## $FIR.DM
## $FIR.DM$label
## [1] "Fuzzy Inference Rules by Descent Method"
## 
## $FIR.DM$library
## [1] "frbs"
## 
## $FIR.DM$type
## [1] "Regression"
## 
## $FIR.DM$parameters
##    parameter   class           label
## 1 num.labels numeric    #Fuzzy Terms
## 2   max.iter numeric Max. Iterations
## 
## $FIR.DM$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(num.labels = 1 + (1:len) * 2, max.iter = 100)
##     }
##     else {
##         out <- data.frame(max.iter = sample(1:20, replace = TRUE, 
##             size = len), num.labels = sample(2:20, size = len, 
##             replace = TRUE))
##     }
##     out
## }
## 
## $FIR.DM$loop
## NULL
## 
## $FIR.DM$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     args <- list(data.train = as.matrix(cbind(x, y)), method.type = "FIR.DM")
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$num.labels <- param$num.labels
##         theDots$control$max.iter <- param$max.iter
##     }
##     else theDots$control <- list(num.labels = param$num.labels, 
##         max.iter = param$max.iter, step.size = 0.01, type.tnorm = "MIN", 
##         type.snorm = "MAX", type.implication.func = "ZADEH", 
##         name = "sim-0")
##     if (!(any(names(theDots) == "range.data"))) {
##         args$range.data <- apply(args$data.train, 2, extendrange)
##     }
##     do.call(frbs::frbs.learn, c(args, theDots))
## }
## 
## $FIR.DM$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     predict(modelFit, newdata)[, 1]
## }
## 
## $FIR.DM$prob
## NULL
## 
## $FIR.DM$predictors
## function (x, ...) 
## {
##     x$colnames.var[x$colnames.var %in% as.vector(x$rule)]
## }
## 
## $FIR.DM$tags
## [1] "Rule-Based Model"
## 
## $FIR.DM$levels
## NULL
## 
## $FIR.DM$sort
## function (x) 
## x[order(x$num.labels), ]
## 
## 
## $foba
## $foba$label
## [1] "Ridge Regression with Variable Selection"
## 
## $foba$library
## [1] "foba"
## 
## $foba$type
## [1] "Regression"
## 
## $foba$parameters
##   parameter   class               label
## 1         k numeric #Variables Retained
## 2    lambda numeric          L2 Penalty
## 
## $foba$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(lambda = 10^seq(-5, -1, length = len), 
##             k = caret::var_seq(p = ncol(x), classification = is.factor(y), 
##                 len = len))
##     }
##     else {
##         out <- data.frame(lambda = 10^runif(len, min = -5, 1), 
##             k = sample(1:ncol(x), replace = TRUE, size = len))
##     }
##     out
## }
## 
## $foba$loop
## function (grid) 
## {
##     grid <- grid[order(grid$lambda, grid$k, decreasing = TRUE), 
##         , drop = FALSE]
##     uniqueLambda <- unique(grid$lambda)
##     loop <- data.frame(lambda = uniqueLambda)
##     loop$k <- NA
##     submodels <- vector(mode = "list", length = length(uniqueLambda))
##     for (i in seq(along = uniqueLambda)) {
##         subK <- grid[grid$lambda == uniqueLambda[i], "k"]
##         loop$k[loop$lambda == uniqueLambda[i]] <- subK[which.max(subK)]
##         submodels[[i]] <- data.frame(k = subK[-which.max(subK)])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $foba$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## foba::foba(as.matrix(x), y, lambda = param$lambda, ...)
## 
## $foba$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata, k = modelFit$tuneValue$k, 
##         type = "fit")$fit
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$k)) {
##             tmp[[j + 1]] <- predict(modelFit, newdata, k = submodels$k[j], 
##                 type = "fit")$fit
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $foba$predictors
## function (x, k = NULL, ...) 
## {
##     if (is.null(k)) {
##         if (!is.null(x$tuneValue)) 
##             k <- x$tuneValue$k[1]
##         else stop("Please specify k")
##     }
##     names(predict(x, k = k, type = "coefficients")$selected.variables)
## }
## 
## $foba$tags
## [1] "Linear Regression"         "Ridge Regression"         
## [3] "L2 Regularization"         "Feature Selection Wrapper"
## 
## $foba$prob
## NULL
## 
## $foba$sort
## function (x) 
## x[order(x$k, -x$lambda), ]
## 
## 
## $FRBCS.CHI
## $FRBCS.CHI$label
## [1] "Fuzzy Rules Using Chi's Method"
## 
## $FRBCS.CHI$library
## [1] "frbs"
## 
## $FRBCS.CHI$type
## [1] "Classification"
## 
## $FRBCS.CHI$parameters
##    parameter     class               label
## 1 num.labels   numeric        #Fuzzy Terms
## 2    type.mf character Membership Function
## 
## $FRBCS.CHI$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     type <- c("GAUSSIAN", "TRAPEZOID", "TRIANGLE")
##     if (search == "grid") {
##         out <- expand.grid(num.labels = 1 + (1:len) * 2, type.mf = type)
##     }
##     else {
##         out <- data.frame(type.mf = sample(type, size = len, 
##             replace = TRUE), num.labels = sample(2:20, size = len, 
##             replace = TRUE))
##     }
##     out
## }
## 
## $FRBCS.CHI$loop
## NULL
## 
## $FRBCS.CHI$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     args <- list(data.train = as.matrix(cbind(x, as.numeric(y))), 
##         method.type = "FRBCS.CHI")
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$num.labels <- param$num.labels
##         theDots$control$type.mf <- param$type.mf
##     }
##     else theDots$control <- list(num.labels = param$num.labels, 
##         type.mf = param$type.mf, type.tnorm = "MIN", type.snorm = "MAX", 
##         type.implication.func = "ZADEH", num.class = length(unique(y)), 
##         name = "sim-0")
##     if (!(any(names(theDots) == "range.data"))) {
##         args$range.data <- apply(args$data.train, 2, extendrange)
##     }
##     do.call(frbs::frbs.learn, c(args, theDots))
## }
## 
## $FRBCS.CHI$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     modelFit$obsLevels[predict(modelFit, newdata)[, 1]]
## }
## 
## $FRBCS.CHI$prob
## NULL
## 
## $FRBCS.CHI$predictors
## function (x, ...) 
## {
##     x$colnames.var[x$colnames.var %in% as.vector(x$rule)]
## }
## 
## $FRBCS.CHI$tags
## [1] "Rule-Based Model"
## 
## $FRBCS.CHI$levels
## NULL
## 
## $FRBCS.CHI$sort
## function (x) 
## x[order(x$num.labels), ]
## 
## 
## $FRBCS.W
## $FRBCS.W$label
## [1] "Fuzzy Rules with Weight Factor"
## 
## $FRBCS.W$library
## [1] "frbs"
## 
## $FRBCS.W$type
## [1] "Classification"
## 
## $FRBCS.W$parameters
##    parameter     class               label
## 1 num.labels   numeric        #Fuzzy Terms
## 2    type.mf character Membership Function
## 
## $FRBCS.W$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     type <- c("GAUSSIAN", "TRAPEZOID", "TRIANGLE")
##     if (search == "grid") {
##         out <- expand.grid(num.labels = 1 + (1:len) * 2, type.mf = type)
##     }
##     else {
##         out <- data.frame(type.mf = sample(type, size = len, 
##             replace = TRUE), num.labels = sample(2:20, size = len, 
##             replace = TRUE))
##     }
##     out
## }
## 
## $FRBCS.W$loop
## NULL
## 
## $FRBCS.W$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     args <- list(data.train = as.matrix(cbind(x, as.numeric(y))), 
##         method.type = "FRBCS.W")
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$num.labels <- param$num.labels
##         theDots$control$type.mf <- param$type.mf
##     }
##     else theDots$control <- list(num.labels = param$num.labels, 
##         type.mf = param$type.mf, type.tnorm = "MIN", type.snorm = "MAX", 
##         type.implication.func = "ZADEH", num.class = length(unique(y)), 
##         name = "sim-0")
##     if (!(any(names(theDots) == "range.data"))) {
##         args$range.data <- apply(args$data.train, 2, extendrange)
##     }
##     do.call(frbs::frbs.learn, c(args, theDots))
## }
## 
## $FRBCS.W$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     modelFit$obsLevels[predict(modelFit, newdata)[, 1]]
## }
## 
## $FRBCS.W$prob
## NULL
## 
## $FRBCS.W$predictors
## function (x, ...) 
## {
##     x$colnames.var[x$colnames.var %in% as.vector(x$rule)]
## }
## 
## $FRBCS.W$tags
## [1] "Rule-Based Model"
## 
## $FRBCS.W$levels
## NULL
## 
## $FRBCS.W$sort
## function (x) 
## x[order(x$num.labels), ]
## 
## 
## $FS.HGD
## $FS.HGD$label
## [1] "Simplified TSK Fuzzy Rules"
## 
## $FS.HGD$library
## [1] "frbs"
## 
## $FS.HGD$type
## [1] "Regression"
## 
## $FS.HGD$parameters
##    parameter   class           label
## 1 num.labels numeric    #Fuzzy Terms
## 2   max.iter numeric Max. Iterations
## 
## $FS.HGD$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(num.labels = 1 + (1:len) * 2, max.iter = 100)
##     }
##     else {
##         out <- data.frame(max.iter = sample(1:20, replace = TRUE, 
##             size = len), num.labels = sample(2:20, size = len, 
##             replace = TRUE))
##     }
##     out
## }
## 
## $FS.HGD$loop
## NULL
## 
## $FS.HGD$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     args <- list(data.train = as.matrix(cbind(x, y)), method.type = "FS.HGD")
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$num.labels <- param$num.labels
##         theDots$control$max.iter <- param$max.iter
##     }
##     else theDots$control <- list(num.labels = param$num.labels, 
##         max.iter = param$max.iter, step.size = 0.01, alpha.heuristic = 1, 
##         type.tnorm = "MIN", type.snorm = "MAX", type.implication.func = "ZADEH", 
##         name = "sim-0")
##     if (!(any(names(theDots) == "range.data"))) {
##         args$range.data <- apply(args$data.train, 2, extendrange)
##     }
##     do.call(frbs::frbs.learn, c(args, theDots))
## }
## 
## $FS.HGD$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     predict(modelFit, newdata)[, 1]
## }
## 
## $FS.HGD$prob
## NULL
## 
## $FS.HGD$predictors
## function (x, ...) 
## {
##     x$colnames.var[x$colnames.var %in% as.vector(x$rule)]
## }
## 
## $FS.HGD$tags
## [1] "Rule-Based Model"
## 
## $FS.HGD$levels
## NULL
## 
## $FS.HGD$sort
## function (x) 
## x[order(x$num.labels), ]
## 
## 
## $gam
## $gam$label
## [1] "Generalized Additive Model using Splines"
## 
## $gam$library
## [1] "mgcv"
## 
## $gam$loop
## NULL
## 
## $gam$type
## [1] "Regression"     "Classification"
## 
## $gam$parameters
##   parameter     class             label
## 1    select   logical Feature Selection
## 2    method character            Method
## 
## $gam$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(select = c(TRUE, FALSE)[1:min(2, len)], 
##             method = "GCV.Cp")
##     }
##     else {
##         out <- data.frame(select = sample(c(TRUE, FALSE), size = len, 
##             replace = TRUE), method = sample(c("GCV.Cp", "ML"), 
##             size = len, replace = TRUE))
##     }
## }
## 
## $gam$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(mgcv)
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     modForm <- caret:::smootherFormula(x)
##     if (is.factor(y)) {
##         dat$.outcome <- ifelse(y == lev[1], 0, 1)
##         dist <- binomial()
##     }
##     else {
##         dat$.outcome <- y
##         dist <- gaussian()
##     }
##     modelArgs <- list(formula = modForm, data = dat, select = param$select, 
##         method = as.character(param$method))
##     theDots <- list(...)
##     if (!any(names(theDots) == "family")) 
##         modelArgs$family <- dist
##     modelArgs <- c(modelArgs, theDots)
##     out <- do.call(mgcv::gam, modelArgs)
##     out
## }
## 
## $gam$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     if (modelFit$problemType == "Classification") {
##         probs <- predict(modelFit, newdata, type = "response")
##         out <- ifelse(probs < 0.5, modelFit$obsLevel[1], modelFit$obsLevel[2])
##     }
##     else {
##         out <- predict(modelFit, newdata, type = "response")
##     }
##     out
## }
## 
## $gam$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata, type = "response")
##     out <- cbind(1 - out, out)
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $gam$predictors
## function (x, ...) 
## {
##     predictors(x$terms)
## }
## 
## $gam$levels
## function (x) 
## x$obsLevels
## 
## $gam$varImp
## function (object, ...) 
## {
##     smoothed <- summary(object)$s.table[, "p-value", drop = FALSE]
##     linear <- summary(object)$p.table
##     linear <- linear[, grepl("^Pr", colnames(linear)), drop = FALSE]
##     gams <- rbind(smoothed, linear)
##     gams <- gams[rownames(gams) != "(Intercept)", , drop = FALSE]
##     rownames(gams) <- gsub("^s\\(", "", rownames(gams))
##     rownames(gams) <- gsub("\\)$", "", rownames(gams))
##     colnames(gams)[1] <- "Overall"
##     gams <- as.data.frame(gams, stringsAsFactors = TRUE)
##     gams$Overall <- -log10(gams$Overall)
##     allPreds <- colnames(attr(object$terms, "factors"))
##     extras <- allPreds[!(allPreds %in% rownames(gams))]
##     if (any(extras)) {
##         tmp <- data.frame(Overall = rep(NA, length(extras)))
##         rownames(tmp) <- extras
##         gams <- rbind(gams, tmp)
##     }
##     gams
## }
## 
## $gam$notes
## [1] "Which terms enter the model in a nonlinear manner is determined by the number of unique values for the predictor. For example, if a predictor only has four unique values, most basis expansion method will fail because there are not enough granularity in the data. By default, a predictor must have at least 10 unique values to be used in a nonlinear basis expansion. Unlike other packages used by `train`, the `mgcv` package is fully loaded when this model is used."
## 
## $gam$tags
## [1] "Generalized Linear Model"   "Generalized Additive Model"
## 
## $gam$sort
## function (x) 
## x
## 
## 
## $gamboost
## $gamboost$label
## [1] "Boosted Generalized Additive Model"
## 
## $gamboost$library
## [1] "mboost" "plyr"   "import"
## 
## $gamboost$type
## [1] "Regression"     "Classification"
## 
## $gamboost$parameters
##   parameter     class                 label
## 1     mstop   numeric # Boosting Iterations
## 2     prune character            AIC Prune?
## 
## $gamboost$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(mstop = floor((1:len) * 50), prune = "no")
##     }
##     else {
##         out <- data.frame(mstop = sample(1:1000, size = len, 
##             replace = TRUE), prune = sample(c("yes", "no"), size = len, 
##             replace = TRUE))
##     }
## }
## 
## $gamboost$loop
## function (grid) 
## {
##     grid <- grid[order(-grid$mstop, grid$prune), ]
##     loop <- plyr::ddply(grid, plyr::.(prune), function(x) data.frame(mstop = max(x$mstop)))
##     submodels <- vector(mode = "list", length = nrow(loop))
##     for (i in seq(along = loop$mstop)) {
##         submodels[[i]] <- subset(grid, prune == loop$prune[i] & 
##             mstop < loop$mstop[i])
##     }
##     list(loop = loop[, c("mstop", "prune")], submodels = submodels)
## }
## 
## $gamboost$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     import::from(mboost, bbs, .into = "mboost")
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$mstop <- param$mstop
##         ctl <- theDots$control
##         theDots$control <- NULL
##     }
##     else ctl <- mboost::boost_control(mstop = param$mstop)
##     if (!any(names(theDots) == "family")) 
##         theDots$family <- if (is.factor(y)) 
##             mboost::Binomial()
##         else mboost::GaussReg()
##     if (!is.null(wts)) 
##         theDots$weights <- wts
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     modelArgs <- c(list(formula = as.formula(".outcome ~ ."), 
##         data = dat, control = ctl), theDots)
##     out <- do.call(mboost::gamboost, modelArgs)
##     if (param$prune == "yes") {
##         iters <- if (is.factor(y)) 
##             mboost::mstop(AIC(out, "classical"))
##         else mboost::mstop(AIC(out))
##         if (iters < out$mstop()) 
##             out <- out[iters]
##     }
##     out$.org.mstop <- out$mstop()
##     out$call["x"] <- "xData"
##     out$call["y"] <- "yData"
##     out
## }
## 
## $gamboost$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     predType <- ifelse(modelFit$problemType == "Classification", 
##         "class", "response")
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata, type = predType)
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- as.vector(out)
##         for (j in seq(along = submodels$mstop)) {
##             this_mstop <- if (submodels$prune[j] == "yes" & submodels$mstop[j] > 
##                 modelFit$.org.mstop) 
##                 modelFit$.org.mstop
##             else submodels$mstop[j]
##             tmp[[j + 1]] <- as.vector(predict(modelFit[this_mstop], 
##                 newdata, type = predType))
##         }
##         out <- tmp
##         mboost::mstop(modelFit) <- modelFit$.org.mstop
##     }
##     out
## }
## 
## $gamboost$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     probs <- predict(modelFit, newdata, type = "response")
##     out <- cbind(1 - probs, probs)
##     colnames(out) <- modelFit$obsLevels
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$mstop)) {
##             this_mstop <- if (submodels$prune[j] == "yes" & submodels$mstop[j] > 
##                 modelFit$.org.mstop) 
##                 modelFit$.org.mstop
##             else submodels$mstop[j]
##             tmpProb <- predict(modelFit[this_mstop], newdata, 
##                 type = "response")
##             tmpProb <- cbind(1 - tmpProb, tmpProb)
##             colnames(tmpProb) <- modelFit$obsLevels
##             tmp[[j + 1]] <- as.data.frame(tmpProb[, modelFit$obsLevels, 
##                 drop = FALSE], stringsAsFactors = TRUE)
##         }
##         out <- tmp
##         mboost::mstop(modelFit) <- modelFit$.org.mstop
##     }
##     out
## }
## 
## $gamboost$predictors
## function (x, ...) 
## {
##     strsplit(variable.names(x), ", ")[[1]]
## }
## 
## $gamboost$notes
## [1] "The `prune` option for this model enables the number of iterations to be determined by the optimal AIC value across all iterations. See the examples in `?mboost::mstop`. If pruning is not used, the ensemble makes predictions using the exact value of the `mstop` tuning parameter value."
## 
## $gamboost$tags
## [1] "Generalized Additive Model" "Ensemble Model"            
## [3] "Boosting"                   "Implicit Feature Selection"
## [5] "Two Class Only"             "Accepts Case Weights"      
## 
## $gamboost$levels
## function (x) 
## levels(x$response)
## 
## $gamboost$sort
## function (x) 
## x[order(x$mstop, x$prune), ]
## 
## 
## $gamLoess
## $gamLoess$label
## [1] "Generalized Additive Model using LOESS"
## 
## $gamLoess$library
## [1] "gam"
## 
## $gamLoess$loop
## NULL
## 
## $gamLoess$type
## [1] "Regression"     "Classification"
## 
## $gamLoess$parameters
##   parameter   class  label
## 1      span numeric   Span
## 2    degree numeric Degree
## 
## $gamLoess$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(span = 0.5, degree = 1)
##     }
##     else {
##         out <- data.frame(span = runif(len, min = 0, max = 1), 
##             degree = sample(1:2, size = len, replace = TRUE))
##     }
##     out
## }
## 
## $gamLoess$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(gam)
##     args <- list(data = if (is.data.frame(x)) x else as.data.frame(x, 
##         stringsAsFactors = TRUE))
##     args$data$.outcome <- y
##     args$formula <- caret:::smootherFormula(x, smoother = "lo", 
##         span = param$span, degree = param$degree)
##     theDots <- list(...)
##     if (!any(names(theDots) == "family")) 
##         args$family <- if (is.factor(y)) 
##             binomial
##         else gaussian
##     if (length(theDots) > 0) 
##         args <- c(args, theDots)
##     do.call(gam::gam, args)
## }
## 
## $gamLoess$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     if (modelFit$problemType == "Classification") {
##         probs <- gam:::predict.Gam(modelFit, newdata, type = "response")
##         out <- ifelse(probs < 0.5, modelFit$obsLevel[1], modelFit$obsLevel[2])
##     }
##     else {
##         out <- gam:::predict.Gam(modelFit, newdata, type = "response")
##     }
##     out
## }
## 
## $gamLoess$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata, type = "response")
##     out <- cbind(1 - out, out)
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $gamLoess$predictors
## function (x, ...) 
## {
##     getNames <- function(x) {
##         x <- strsplit(x, "(\\()|(,)|(\\))")
##         x <- lapply(x, function(x) x[!(x %in% c("s", "lo", ""))])
##         unlist(lapply(x, function(x) x[1]))
##     }
##     getNames(predictors(x$terms))
## }
## 
## $gamLoess$varImp
## function (object, ...) 
## {
##     getNames <- function(x) {
##         x <- strsplit(x, "(\\()|(,)|(\\))")
##         x <- lapply(x, function(x) x[!(x %in% c("s", "lo", ""))])
##         unlist(lapply(x, function(x) x[1]))
##     }
##     gamSummary <- gam:::summary.Gam(object)
##     smoothed <- gamSummary$anova
##     smoothed <- smoothed[complete.cases(smoothed), grepl("^P", 
##         colnames(smoothed)), drop = FALSE]
##     linear <- gamSummary$parametric.anova
##     linear <- linear[complete.cases(linear), grepl("^P", colnames(linear)), 
##         drop = FALSE]
##     linear <- linear[!(rownames(linear) %in% rownames(smoothed)), 
##         , drop = FALSE]
##     colnames(smoothed) <- colnames(linear) <- "pval"
##     gams <- rbind(smoothed, linear)
##     gams <- gams[rownames(gams) != "(Intercept)", , drop = FALSE]
##     rownames(gams) <- getNames(rownames(gams))
##     colnames(gams)[1] <- "Overall"
##     gams <- as.data.frame(gams, stringsAsFactors = TRUE)
##     gams$Overall <- -log10(gams$Overall)
##     allPreds <- getNames(colnames(attr(object$terms, "factors")))
##     extras <- allPreds[!(allPreds %in% rownames(gams))]
##     if (any(extras)) {
##         tmp <- data.frame(Overall = rep(NA, length(extras)))
##         rownames(tmp) <- extras
##         gams <- rbind(gams, tmp)
##     }
##     gams
## }
## 
## $gamLoess$levels
## function (x) 
## x$obsLevels
## 
## $gamLoess$notes
## [1] "Which terms enter the model in a nonlinear manner is determined by the number of unique values for the predictor. For example, if a predictor only has four unique values, most basis expansion method will fail because there are not enough granularity in the data. By default, a predictor must have at least 10 unique values to be used in a nonlinear basis expansion. Unlike other packages used by `train`, the `gam` package is fully loaded when this model is used."
## 
## $gamLoess$tags
## [1] "Generalized Linear Model"   "Generalized Additive Model"
## 
## $gamLoess$sort
## function (x) 
## x
## 
## $gamLoess$check
## function (pkg) 
## {
##     requireNamespace("gam")
##     current <- packageDescription("gam")$Version
##     expected <- "1.15"
##     if (compareVersion(current, expected) < 0) 
##         stop("This modeling workflow requires kohonen version ", 
##             expected, "or greater.", call. = FALSE)
## }
## 
## 
## $gamSpline
## $gamSpline$label
## [1] "Generalized Additive Model using Splines"
## 
## $gamSpline$library
## [1] "gam"
## 
## $gamSpline$loop
## NULL
## 
## $gamSpline$type
## [1] "Regression"     "Classification"
## 
## $gamSpline$parameters
##   parameter   class              label
## 1        df numeric Degrees of Freedom
## 
## $gamSpline$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(df = seq(1, 3, length = len))
##     }
##     else {
##         out <- data.frame(df = runif(len, min = 0, max = 5))
##     }
##     out
## }
## 
## $gamSpline$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(gam)
##     args <- list(data = if (is.data.frame(x)) x else as.data.frame(x, 
##         stringsAsFactors = TRUE))
##     args$data$.outcome <- y
##     args$formula <- caret:::smootherFormula(x, smoother = "s", 
##         df = param$df)
##     theDots <- list(...)
##     if (!any(names(theDots) == "family")) 
##         args$family <- if (is.factor(y)) 
##             binomial
##         else gaussian
##     if (length(theDots) > 0) 
##         args <- c(args, theDots)
##     do.call(gam::gam, args)
## }
## 
## $gamSpline$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     if (modelFit$problemType == "Classification") {
##         probs <- gam:::predict.Gam(modelFit, newdata, type = "response")
##         out <- ifelse(probs < 0.5, modelFit$obsLevel[1], modelFit$obsLevel[2])
##     }
##     else {
##         out <- gam:::predict.Gam(modelFit, newdata, type = "response")
##     }
##     out
## }
## 
## $gamSpline$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- gam:::predict.Gam(modelFit, newdata, type = "response")
##     out <- cbind(1 - out, out)
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $gamSpline$levels
## function (x) 
## x$obsLevels
## 
## $gamSpline$predictors
## function (x, ...) 
## {
##     getNames <- function(x) {
##         x <- strsplit(x, "(\\()|(,)|(\\))")
##         x <- lapply(x, function(x) x[!(x %in% c("s", "lo", ""))])
##         unlist(lapply(x, function(x) x[1]))
##     }
##     getNames(predictors(x$terms))
## }
## 
## $gamSpline$varImp
## function (object, ...) 
## {
##     getNames <- function(x) {
##         x <- strsplit(x, "(\\()|(,)|(\\))")
##         x <- lapply(x, function(x) x[!(x %in% c("s", "lo", ""))])
##         unlist(lapply(x, function(x) x[1]))
##     }
##     gamSummary <- gam:::summary.Gam(object)
##     smoothed <- gamSummary$anova
##     smoothed <- smoothed[complete.cases(smoothed), grepl("^P", 
##         colnames(smoothed)), drop = FALSE]
##     linear <- gamSummary$parametric.anova
##     linear <- linear[complete.cases(linear), grepl("^P", colnames(linear)), 
##         drop = FALSE]
##     linear <- linear[!(rownames(linear) %in% rownames(smoothed)), 
##         , drop = FALSE]
##     colnames(smoothed) <- colnames(linear) <- "pval"
##     gams <- rbind(smoothed, linear)
##     gams <- gams[rownames(gams) != "(Intercept)", , drop = FALSE]
##     rownames(gams) <- getNames(rownames(gams))
##     colnames(gams)[1] <- "Overall"
##     gams <- as.data.frame(gams, stringsAsFactors = TRUE)
##     gams$Overall <- -log10(gams$Overall)
##     allPreds <- getNames(colnames(attr(object$terms, "factors")))
##     extras <- allPreds[!(allPreds %in% rownames(gams))]
##     if (any(extras)) {
##         tmp <- data.frame(Overall = rep(NA, length(extras)))
##         rownames(tmp) <- extras
##         gams <- rbind(gams, tmp)
##     }
##     gams
## }
## 
## $gamSpline$notes
## [1] "Which terms enter the model in a nonlinear manner is determined by the number of unique values for the predictor. For example, if a predictor only has four unique values, most basis expansion method will fail because there are not enough granularity in the data. By default, a predictor must have at least 10 unique values to be used in a nonlinear basis expansion. Unlike other packages used by `train`, the `gam` package is fully loaded when this model is used."
## 
## $gamSpline$tags
## [1] "Generalized Linear Model"   "Generalized Additive Model"
## 
## $gamSpline$sort
## function (x) 
## x
## 
## $gamSpline$check
## function (pkg) 
## {
##     requireNamespace("gam")
##     current <- packageDescription("gam")$Version
##     expected <- "1.15"
##     if (compareVersion(current, expected) < 0) 
##         stop("This modeling workflow requires kohonen version ", 
##             expected, "or greater.", call. = FALSE)
## }
## 
## 
## $gaussprLinear
## $gaussprLinear$label
## [1] "Gaussian Process"
## 
## $gaussprLinear$library
## [1] "kernlab"
## 
## $gaussprLinear$type
## [1] "Regression"     "Classification"
## 
## $gaussprLinear$parameters
##   parameter     class     label
## 1 parameter character Parameter
## 
## $gaussprLinear$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(parameter = "none")
## 
## $gaussprLinear$loop
## NULL
## 
## $gaussprLinear$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     kernlab::gausspr(x = as.matrix(x), y = y, kernel = "vanilladot", 
##         kpar = list(), ...)
## }
## 
## $gaussprLinear$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- kernlab::predict(modelFit, as.matrix(newdata))
##     if (is.matrix(out)) 
##         out <- out[, 1]
##     out
## }
## 
## $gaussprLinear$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     kernlab::predict(modelFit, as.matrix(newdata), type = "probabilities")
## }
## 
## $gaussprLinear$predictors
## function (x, ...) 
## {
##     if (hasTerms(x) & !is.null(x@terms)) {
##         out <- predictors.terms(x@terms)
##     }
##     else {
##         out <- colnames(attr(x, "xmatrix"))
##     }
##     if (is.null(out)) 
##         out <- names(attr(x, "scaling")$x.scale$`scaled:center`)
##     if (is.null(out)) 
##         out <- NA
##     out
## }
## 
## $gaussprLinear$tags
## [1] "Kernel Method"     "Gaussian Process"  "Linear Classifier"
## 
## $gaussprLinear$levels
## function (x) 
## lev(x)
## 
## $gaussprLinear$sort
## function (x) 
## x
## 
## 
## $gaussprPoly
## $gaussprPoly$label
## [1] "Gaussian Process with Polynomial Kernel"
## 
## $gaussprPoly$library
## [1] "kernlab"
## 
## $gaussprPoly$type
## [1] "Regression"     "Classification"
## 
## $gaussprPoly$parameters
##   parameter   class             label
## 1    degree numeric Polynomial Degree
## 2     scale numeric             Scale
## 
## $gaussprPoly$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(degree = seq(1, min(len, 3)), scale = 10^((1:len) - 
##             4))
##     }
##     else {
##         out <- data.frame(degree = sample(1:3, size = len, replace = TRUE), 
##             scale = 10^runif(len, min = -5, 0))
##     }
##     out
## }
## 
## $gaussprPoly$loop
## NULL
## 
## $gaussprPoly$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     kernlab::gausspr(x = as.matrix(x), y = y, kernel = kernlab::polydot(degree = param$degree, 
##         scale = param$scale, offset = 1), ...)
## }
## 
## $gaussprPoly$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- kernlab::predict(modelFit, as.matrix(newdata))
##     if (is.matrix(out)) 
##         out <- out[, 1]
##     out
## }
## 
## $gaussprPoly$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     kernlab::predict(modelFit, as.matrix(newdata), type = "probabilities")
## }
## 
## $gaussprPoly$predictors
## function (x, ...) 
## {
##     if (hasTerms(x) & !is.null(x@terms)) {
##         out <- predictors.terms(x@terms)
##     }
##     else {
##         out <- colnames(attr(x, "xmatrix"))
##     }
##     if (is.null(out)) 
##         out <- names(attr(x, "scaling")$xscale$`scaled:center`)
##     if (is.null(out)) 
##         out <- NA
##     out
## }
## 
## $gaussprPoly$tags
## [1] "Kernel Method"    "Gaussian Process" "Polynomial Model"
## 
## $gaussprPoly$levels
## function (x) 
## lev(x)
## 
## $gaussprPoly$sort
## function (x) 
## x
## 
## 
## $gaussprRadial
## $gaussprRadial$label
## [1] "Gaussian Process with Radial Basis Function Kernel"
## 
## $gaussprRadial$library
## [1] "kernlab"
## 
## $gaussprRadial$type
## [1] "Regression"     "Classification"
## 
## $gaussprRadial$parameters
##   parameter   class label
## 1     sigma numeric Sigma
## 
## $gaussprRadial$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     sigmas <- kernlab::sigest(as.matrix(x), na.action = na.omit, 
##         scaled = TRUE)
##     if (search == "grid") {
##         out <- expand.grid(sigma = mean(as.vector(sigmas[-2])))
##     }
##     else {
##         rng <- extendrange(log(sigmas), f = 0.75)
##         out <- data.frame(sigma = exp(runif(len, min = rng[1], 
##             max = rng[2])))
##     }
##     out
## }
## 
## $gaussprRadial$loop
## NULL
## 
## $gaussprRadial$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     kernlab::gausspr(x = as.matrix(x), y = y, kernel = "rbfdot", 
##         kpar = list(sigma = param$sigma), ...)
## }
## 
## $gaussprRadial$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- kernlab::predict(modelFit, as.matrix(newdata))
##     if (is.matrix(out)) 
##         out <- out[, 1]
##     out
## }
## 
## $gaussprRadial$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     kernlab::predict(modelFit, as.matrix(newdata), type = "probabilities")
## }
## 
## $gaussprRadial$predictors
## function (x, ...) 
## {
##     if (hasTerms(x) & !is.null(x@terms)) {
##         out <- predictors.terms(x@terms)
##     }
##     else {
##         out <- colnames(attr(x, "xmatrix"))
##     }
##     if (is.null(out)) 
##         out <- names(attr(x, "scaling")$x.scale$`scaled:center`)
##     if (is.null(out)) 
##         out <- NA
##     out
## }
## 
## $gaussprRadial$tags
## [1] "Kernel Method"         "Gaussian Process"      "Radial Basis Function"
## 
## $gaussprRadial$levels
## function (x) 
## lev(x)
## 
## $gaussprRadial$sort
## function (x) 
## x
## 
## 
## $gbm_h2o
## $gbm_h2o$label
## [1] "Gradient Boosting Machines"
## 
## $gbm_h2o$library
## [1] "h2o"
## 
## $gbm_h2o$type
## [1] "Regression"     "Classification"
## 
## $gbm_h2o$parameters
##         parameter   class                         label
## 1          ntrees numeric         # Boosting Iterations
## 2       max_depth numeric                Max Tree Depth
## 3        min_rows numeric       Min. Terminal Node Size
## 4      learn_rate numeric                     Shrinkage
## 5 col_sample_rate numeric #Randomly Selected Predictors
## 
## $gbm_h2o$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(max_depth = seq(1, len), ntrees = floor((1:len) * 
##             50), learn_rate = 0.1, min_rows = 10, col_sample_rate = 1)
##     }
##     else {
##         out <- data.frame(ntrees = floor(runif(len, min = 1, 
##             max = 5000)), max_depth = sample(1:10, replace = TRUE, 
##             size = len), learn_rate = runif(len, min = 0.001, 
##             max = 0.6), min_rows = sample(5:25, replace = TRUE, 
##             size = len), col_sample_rate = runif(len))
##     }
##     out
## }
## 
## $gbm_h2o$loop
## NULL
## 
## $gbm_h2o$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     lvs <- length(levels(y))
##     fam <- "gaussian"
##     if (lvs == 2) 
##         fam <- "bernoulli"
##     if (lvs > 2) 
##         fam <- "multinomial"
##     dat <- if (!is.data.frame(x)) 
##         as.data.frame(x, stringsAsFactors = TRUE)
##     else x
##     dat$.outcome <- y
##     frame_name <- paste0("tmp_gbm_dat_", sample.int(1e+05, 1))
##     tmp_train_dat = h2o::as.h2o(dat, destination_frame = frame_name)
##     out <- h2o::h2o.gbm(x = colnames(x), y = ".outcome", training_frame = tmp_train_dat, 
##         distribution = fam, ntrees = param$ntrees, max_depth = param$max_depth, 
##         learn_rate = param$learn_rate, min_rows = param$min_rows, 
##         col_sample_rate = param$col_sample_rate, ...)
##     h2o::h2o.getModel(out@model_id)
## }
## 
## $gbm_h2o$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     frame_name <- paste0("new_gbm_dat_", sample.int(1e+05, 1))
##     newdata <- h2o::as.h2o(newdata, destination_frame = frame_name)
##     as.data.frame(predict(modelFit, newdata), stringsAsFactors = TRUE)[, 
##         1]
## }
## 
## $gbm_h2o$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     frame_name <- paste0("new_gbm_dat_", sample.int(1e+05, 1))
##     newdata <- h2o::as.h2o(newdata, destination_frame = frame_name)
##     as.data.frame(predict(modelFit, newdata), stringsAsFactors = TRUE)[, 
##         -1]
## }
## 
## $gbm_h2o$predictors
## function (x, ...) 
## {
##     out <- as.data.frame(h2o::h2o.varimp(x), stringsAsFactors = TRUE)
##     out <- subset(out, relative_importance > 0)
##     as.character(out$variable)
## }
## 
## $gbm_h2o$varImp
## function (object, ...) 
## {
##     out <- as.data.frame(h2o::h2o.varimp(object), stringsAsFactors = TRUE)
##     colnames(out)[colnames(out) == "relative_importance"] <- "Overall"
##     rownames(out) <- out$variable
##     out[, c("Overall"), drop = FALSE]
## }
## 
## $gbm_h2o$levels
## function (x) 
## x@model$training_metrics@metrics$domain
## 
## $gbm_h2o$tags
## [1] "Tree-Based Model"           "Boosting"                  
## [3] "Ensemble Model"             "Implicit Feature Selection"
## 
## $gbm_h2o$sort
## function (x) 
## x[order(x$ntrees, x$max_depth, x$learn_rate), ]
## 
## $gbm_h2o$trim
## NULL
## 
## 
## $gbm
## $gbm$label
## [1] "Stochastic Gradient Boosting"
## 
## $gbm$library
## [1] "gbm"  "plyr"
## 
## $gbm$type
## [1] "Regression"     "Classification"
## 
## $gbm$parameters
##           parameter   class                   label
## 1           n.trees numeric   # Boosting Iterations
## 2 interaction.depth numeric          Max Tree Depth
## 3         shrinkage numeric               Shrinkage
## 4    n.minobsinnode numeric Min. Terminal Node Size
## 
## $gbm$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(interaction.depth = seq(1, len), n.trees = floor((1:len) * 
##             50), shrinkage = 0.1, n.minobsinnode = 10)
##     }
##     else {
##         out <- data.frame(n.trees = floor(runif(len, min = 1, 
##             max = 5000)), interaction.depth = sample(1:10, replace = TRUE, 
##             size = len), shrinkage = runif(len, min = 0.001, 
##             max = 0.6), n.minobsinnode = sample(5:25, replace = TRUE, 
##             size = len))
##     }
##     out
## }
## 
## $gbm$loop
## function (grid) 
## {
##     loop <- plyr::ddply(grid, c("shrinkage", "interaction.depth", 
##         "n.minobsinnode"), function(x) c(n.trees = max(x$n.trees)))
##     submodels <- vector(mode = "list", length = nrow(loop))
##     for (i in seq(along = loop$n.trees)) {
##         index <- which(grid$interaction.depth == loop$interaction.depth[i] & 
##             grid$shrinkage == loop$shrinkage[i] & grid$n.minobsinnode == 
##             loop$n.minobsinnode[i])
##         trees <- grid[index, "n.trees"]
##         submodels[[i]] <- data.frame(n.trees = trees[trees != 
##             loop$n.trees[i]])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $gbm$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     if (any(names(theDots) == "distribution")) {
##         modDist <- theDots$distribution
##         theDots$distribution <- NULL
##     }
##     else {
##         if (is.numeric(y)) {
##             modDist <- "gaussian"
##         }
##         else modDist <- if (length(lev) == 2) 
##             "bernoulli"
##         else "multinomial"
##     }
##     if (!is.null(wts)) 
##         theDots$w <- wts
##     if (is.factor(y) && length(lev) == 2) 
##         y <- ifelse(y == lev[1], 1, 0)
##     if (!is.data.frame(x) | inherits(x, "tbl_df")) 
##         x <- as.data.frame(x, stringsAsFactors = TRUE)
##     modArgs <- list(x = x, y = y, interaction.depth = param$interaction.depth, 
##         n.trees = param$n.trees, shrinkage = param$shrinkage, 
##         n.minobsinnode = param$n.minobsinnode, distribution = modDist)
##     if (any(names(theDots) == "family")) 
##         modArgs$distribution <- NULL
##     if (length(theDots) > 0) 
##         modArgs <- c(modArgs, theDots)
##     do.call(gbm::gbm.fit, modArgs)
## }
## 
## $gbm$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata, type = "response", n.trees = modelFit$tuneValue$n.trees)
##     out[is.nan(out)] <- NA
##     out <- switch(modelFit$distribution$name, multinomial = {
##         colnames(out[, , 1, drop = FALSE])[apply(out[, , 1, drop = FALSE], 
##             1, which.max)]
##     }, bernoulli = , adaboost = , huberized = {
##         ifelse(out >= 0.5, modelFit$obsLevels[1], modelFit$obsLevels[2])
##     }, gaussian = , laplace = , tdist = , poisson = , quantile = {
##         out
##     })
##     if (!is.null(submodels)) {
##         tmp <- predict(modelFit, newdata, type = "response", 
##             n.trees = submodels$n.trees)
##         out <- switch(modelFit$distribution$name, multinomial = {
##             lvl <- colnames(tmp[, , 1, drop = FALSE])
##             tmp <- apply(tmp, 3, function(x) apply(x, 1, which.max))
##             if (is.vector(tmp)) tmp <- matrix(tmp, nrow = 1)
##             tmp <- t(apply(tmp, 1, function(x, lvl) lvl[x], lvl = lvl))
##             if (nrow(tmp) == 1 & nrow(newdata) > 1) tmp <- t(tmp)
##             tmp <- as.list(as.data.frame(tmp, stringsAsFactors = FALSE))
##             c(list(out), tmp)
##         }, bernoulli = , adaboost = , huberized = {
##             tmp <- ifelse(tmp >= 0.5, modelFit$obsLevels[1], 
##                 modelFit$obsLevels[2])
##             tmp <- as.list(as.data.frame(tmp, stringsAsFactors = FALSE))
##             c(list(out), tmp)
##         }, gaussian = , laplace = , tdist = , poisson = , quantile = {
##             tmp <- as.list(as.data.frame(tmp, stringsAsFactors = TRUE))
##             c(list(out), tmp)
##         })
##     }
##     out
## }
## 
## $gbm$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata, type = "response", n.trees = modelFit$tuneValue$n.trees)
##     out[is.nan(out)] <- NA
##     out <- switch(modelFit$distribution$name, multinomial = {
##         out <- if (dim(out)[3] == 1) as.data.frame(out, stringsAsFactors = TRUE) else out[, 
##             , 1]
##         colnames(out) <- modelFit$obsLevels
##         out
##     }, bernoulli = , adaboost = , huberized = {
##         out <- cbind(out, 1 - out)
##         colnames(out) <- modelFit$obsLevels
##         out
##     }, gaussian = , laplace = , tdist = , poisson = {
##         out
##     })
##     if (!is.null(submodels)) {
##         tmp <- predict(modelFit, newdata, type = "response", 
##             n.trees = submodels$n.trees)
##         tmp <- switch(modelFit$distribution$name, multinomial = {
##             apply(tmp, 3, function(x) data.frame(x))
##         }, bernoulli = , adaboost = , huberized = {
##             tmp <- as.list(as.data.frame(tmp, stringsAsFactors = TRUE))
##             lapply(tmp, function(x, lvl) {
##                 x <- cbind(x, 1 - x)
##                 colnames(x) <- lvl
##                 x
##             }, lvl = modelFit$obsLevels)
##         })
##         out <- c(list(out), tmp)
##     }
##     out
## }
## 
## $gbm$predictors
## function (x, ...) 
## {
##     vi <- relative.influence(x, n.trees = x$tuneValue$n.trees)
##     names(vi)[vi > 0]
## }
## 
## $gbm$varImp
## function (object, numTrees = NULL, ...) 
## {
##     if (is.null(numTrees)) 
##         numTrees <- object$tuneValue$n.trees
##     varImp <- relative.influence(object, n.trees = numTrees)
##     out <- data.frame(varImp)
##     colnames(out) <- "Overall"
##     rownames(out) <- object$var.names
##     out
## }
## 
## $gbm$levels
## function (x) 
## {
##     if (x$distribution$name %in% c("gaussian", "laplace", "tdist")) 
##         return(NULL)
##     if (is.null(x$classes)) {
##         out <- if (any(names(x) == "obsLevels")) 
##             x$obsLevels
##         else NULL
##     }
##     else {
##         out <- x$classes
##     }
##     out
## }
## 
## $gbm$tags
## [1] "Tree-Based Model"           "Boosting"                  
## [3] "Ensemble Model"             "Implicit Feature Selection"
## [5] "Accepts Case Weights"      
## 
## $gbm$sort
## function (x) 
## {
##     x[order(x$n.trees, x$interaction.depth, x$shrinkage), ]
## }
## 
## 
## $gcvEarth
## $gcvEarth$label
## [1] "Multivariate Adaptive Regression Splines"
## 
## $gcvEarth$library
## [1] "earth"
## 
## $gcvEarth$type
## [1] "Regression"     "Classification"
## 
## $gcvEarth$parameters
##   parameter   class          label
## 1    degree numeric Product Degree
## 
## $gcvEarth$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     data.frame(degree = 1)
## }
## 
## $gcvEarth$loop
## NULL
## 
## $gcvEarth$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(earth)
##     theDots <- list(...)
##     theDots$keepxy <- TRUE
##     if (!is.null(wts)) 
##         theDots$weights <- wts
##     modelArgs <- c(list(x = x, y = y, degree = param$degree), 
##         theDots)
##     if (is.factor(y) & !any(names(theDots) == "glm")) {
##         modelArgs$glm <- list(family = binomial, maxit = 100)
##     }
##     tmp <- do.call(earth::earth, modelArgs)
##     tmp$call["degree"] <- param$degree
##     tmp
## }
## 
## $gcvEarth$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (modelFit$problemType == "Classification") {
##         out <- earth:::predict.earth(modelFit, newdata, type = "class")
##     }
##     else {
##         out <- earth:::predict.earth(modelFit, newdata)
##     }
##     as.vector(out)
## }
## 
## $gcvEarth$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- earth:::predict.earth(modelFit, newdata, type = "response")
##     if (ncol(out) > 1) {
##         out <- t(apply(out, 1, function(x) x/sum(x)))
##     }
##     else {
##         out <- cbind(1 - out[, 1], out[, 1])
##         colnames(out) <- modelFit$obsLevels
##     }
##     as.data.frame(out, stringsAsFactors = TRUE)
## }
## 
## $gcvEarth$predictors
## function (x, ...) 
## {
##     vi <- varImp(x)
##     notZero <- sort(unique(unlist(lapply(vi, function(x) which(x > 
##         0)))))
##     if (length(notZero) > 0) 
##         rownames(vi)[notZero]
##     else NULL
## }
## 
## $gcvEarth$varImp
## function (object, value = "gcv", ...) 
## {
##     earthImp <- earth::evimp(object)
##     if (!is.matrix(earthImp)) 
##         earthImp <- t(as.matrix(earthImp))
##     out <- earthImp
##     perfCol <- which(colnames(out) == value)
##     increaseInd <- out[, perfCol + 1]
##     out <- as.data.frame(out[, perfCol, drop = FALSE], stringsAsFactors = TRUE)
##     colnames(out) <- "Overall"
##     if (any(earthImp[, "used"] == 0)) {
##         dropList <- grep("-unused", rownames(earthImp), value = TRUE)
##         out$Overall[rownames(out) %in% dropList] <- 0
##     }
##     rownames(out) <- gsub("-unused", "", rownames(out))
##     out <- as.data.frame(out, stringsAsFactors = TRUE)
##     xNames <- object$namesx.org
##     if (any(!(xNames %in% rownames(out)))) {
##         xNames <- xNames[!(xNames %in% rownames(out))]
##         others <- data.frame(Overall = rep(0, length(xNames)), 
##             row.names = xNames)
##         out <- rbind(out, others)
##     }
##     out
## }
## 
## $gcvEarth$levels
## function (x) 
## x$levels
## 
## $gcvEarth$tags
## [1] "Multivariate Adaptive Regression Splines"
## [2] "Implicit Feature Selection"              
## [3] "Accepts Case Weights"                    
## 
## $gcvEarth$notes
## [1] "Unlike other packages used by `train`, the `earth` package is fully loaded when this model is used."
## 
## $gcvEarth$sort
## function (x) 
## x[order(x$degree), ]
## 
## 
## $GFS.FR.MOGUL
## $GFS.FR.MOGUL$label
## [1] "Fuzzy Rules via MOGUL"
## 
## $GFS.FR.MOGUL$library
## [1] "frbs"
## 
## $GFS.FR.MOGUL$type
## [1] "Regression"
## 
## $GFS.FR.MOGUL$parameters
##   parameter   class                  label
## 1   max.gen numeric       Max. Generations
## 2  max.iter numeric        Max. Iterations
## 3  max.tune numeric Max. Tuning Iterations
## 
## $GFS.FR.MOGUL$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(max.gen = 10 * (1:len), max.iter = 10, 
##             max.tune = 10 * (1:len))
##     }
##     else {
##         out <- data.frame(max.gen = sample(1:20, size = len, 
##             replace = TRUE), max.iter = sample(1:20, replace = TRUE, 
##             size = len), max.tune = sample(1:20, size = len, 
##             replace = TRUE))
##     }
##     out
## }
## 
## $GFS.FR.MOGUL$loop
## NULL
## 
## $GFS.FR.MOGUL$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     args <- list(data.train = as.matrix(cbind(x, y)), method.type = "GFS.FR.MOGUL")
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$max.gen <- param$max.gen
##         theDots$control$max.iter <- param$max.iter
##         theDots$control$max.tune <- param$max.tune
##     }
##     else theDots$control <- list(max.gen = param$max.gen, max.iter = param$max.iter, 
##         max.tune = param$max.tune, persen_cross = 0.6, persen_mutant = 0.3, 
##         epsilon = 0.4, name = "sim-0")
##     if (!(any(names(theDots) == "range.data"))) {
##         args$range.data <- apply(args$data.train, 2, extendrange)
##     }
##     do.call(frbs::frbs.learn, c(args, theDots))
## }
## 
## $GFS.FR.MOGUL$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     predict(modelFit, newdata)[, 1]
## }
## 
## $GFS.FR.MOGUL$prob
## NULL
## 
## $GFS.FR.MOGUL$predictors
## function (x, ...) 
## {
##     x$colnames.var[x$colnames.var %in% as.vector(x$rule)]
## }
## 
## $GFS.FR.MOGUL$tags
## [1] "Rule-Based Model"
## 
## $GFS.FR.MOGUL$levels
## NULL
## 
## $GFS.FR.MOGUL$sort
## function (x) 
## x[order(x$max.iter), ]
## 
## 
## $GFS.LT.RS
## $GFS.LT.RS$label
## [1] "Genetic Lateral Tuning and Rule Selection of Linguistic Fuzzy Systems"
## 
## $GFS.LT.RS$library
## [1] "frbs"
## 
## $GFS.LT.RS$type
## [1] "Regression"
## 
## $GFS.LT.RS$parameters
##    parameter   class            label
## 1  popu.size numeric  Population Size
## 2 num.labels numeric   # Fuzzy Labels
## 3    max.gen numeric Max. Generations
## 
## $GFS.LT.RS$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(popu.size = 10 * (1:len), num.labels = 1 + 
##             (1:len) * 2, max.gen = 10)
##     }
##     else {
##         out <- data.frame(max.gen = sample(1:20, size = len, 
##             replace = TRUE), popu.size = sample(seq(10, 50, by = 2), 
##             size = len, replace = TRUE), num.labels = sample(2:20, 
##             size = len, replace = TRUE))
##     }
##     out
## }
## 
## $GFS.LT.RS$loop
## NULL
## 
## $GFS.LT.RS$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     args <- list(data.train = as.matrix(cbind(x, y)), method.type = "GFS.LT.RS")
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$popu.size <- param$popu.size
##         theDots$control$num.labels <- param$num.labels
##         theDots$control$max.gen <- param$max.gen
##     }
##     else theDots$control <- list(popu.size = param$popu.size, 
##         num.labels = param$num.labels, max.gen = param$max.gen, 
##         persen_cross = 0.6, persen_mutant = 0.3, mode.tuning = "GLOBAL", 
##         type.tnorm = "MIN", type.snorm = "MAX", type.implication.func = "ZADEH", 
##         type.defuz = "WAM", rule.selection = FALSE, name = "sim-0")
##     if (!(any(names(theDots) == "range.data"))) {
##         args$range.data <- apply(args$data.train, 2, extendrange)
##     }
##     do.call(frbs::frbs.learn, c(args, theDots))
## }
## 
## $GFS.LT.RS$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     predict(modelFit, newdata)[, 1]
## }
## 
## $GFS.LT.RS$prob
## NULL
## 
## $GFS.LT.RS$predictors
## function (x, ...) 
## {
##     x$colnames.var[x$colnames.var %in% as.vector(x$rule)]
## }
## 
## $GFS.LT.RS$tags
## [1] "Rule-Based Model"
## 
## $GFS.LT.RS$levels
## NULL
## 
## $GFS.LT.RS$sort
## function (x) 
## x[order(x$num.labels), ]
## 
## 
## $GFS.THRIFT
## $GFS.THRIFT$label
## [1] "Fuzzy Rules via Thrift"
## 
## $GFS.THRIFT$library
## [1] "frbs"
## 
## $GFS.THRIFT$type
## [1] "Regression"
## 
## $GFS.THRIFT$parameters
##    parameter   class            label
## 1  popu.size numeric  Population Size
## 2 num.labels numeric   # Fuzzy Labels
## 3    max.gen numeric Max. Generations
## 
## $GFS.THRIFT$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(popu.size = 10 * (1:len), num.labels = 1 + 
##             (1:len) * 2, max.gen = 10)
##     }
##     else {
##         out <- data.frame(max.gen = sample(1:20, size = len, 
##             replace = TRUE), popu.size = sample(seq(2, 20, by = 2), 
##             size = len, replace = TRUE), num.labels = sample(2:20, 
##             size = len, replace = TRUE))
##     }
##     out
## }
## 
## $GFS.THRIFT$loop
## NULL
## 
## $GFS.THRIFT$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     args <- list(data.train = as.matrix(cbind(x, y)), method.type = "GFS.THRIFT")
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$popu.size <- param$popu.size
##         theDots$control$num.labels <- param$num.labels
##         theDots$control$max.gen <- param$max.gen
##     }
##     else theDots$control <- list(popu.size = param$popu.size, 
##         num.labels = param$num.labels, max.gen = param$max.gen, 
##         persen_cross = 0.6, persen_mutant = 0.3, type.defuz = "WAM", 
##         type.tnorm = "MIN", type.snorm = "MAX", type.mf = "TRIANGLE", 
##         type.implication.func = "ZADEH", name = "sim-0")
##     if (!(any(names(theDots) == "range.data"))) {
##         args$range.data <- apply(args$data.train, 2, extendrange)
##     }
##     do.call(frbs::frbs.learn, c(args, theDots))
## }
## 
## $GFS.THRIFT$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     predict(modelFit, newdata)[, 1]
## }
## 
## $GFS.THRIFT$prob
## NULL
## 
## $GFS.THRIFT$predictors
## function (x, ...) 
## {
##     x$colnames.var[x$colnames.var %in% as.vector(x$rule)]
## }
## 
## $GFS.THRIFT$tags
## [1] "Rule-Based Model"
## 
## $GFS.THRIFT$levels
## NULL
## 
## $GFS.THRIFT$sort
## function (x) 
## x[order(x$num.labels), ]
## 
## 
## $glm.nb
## $glm.nb$label
## [1] "Negative Binomial Generalized Linear Model"
## 
## $glm.nb$library
## [1] "MASS"
## 
## $glm.nb$loop
## NULL
## 
## $glm.nb$type
## [1] "Regression"
## 
## $glm.nb$parameters
##   parameter     class         label
## 1      link character Link Function
## 
## $glm.nb$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(link = c("log", "sqrt", "identity"))[1:min(len, 3), 
##     , drop = FALSE]
## 
## $glm.nb$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     theDots <- list(...)
##     if (!is.null(wts)) 
##         theDots$weights <- wts
##     modelArgs <- c(list(formula = as.formula(".outcome ~ ."), 
##         data = dat, link = as.character(param$link)), theDots)
##     out <- do.call(getFromNamespace("glm.nb", "MASS"), modelArgs)
##     out$call <- NULL
##     out
## }
## 
## $glm.nb$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata, type = "response")
## }
## 
## $glm.nb$prob
## NULL
## 
## $glm.nb$varImp
## function (object, ...) 
## {
##     values <- summary(object)$coef
##     varImps <- abs(values[-1, grep("value$", colnames(values)), 
##         drop = FALSE])
##     out <- data.frame(varImps)
##     colnames(out) <- "Overall"
##     if (!is.null(names(varImps))) 
##         rownames(out) <- names(varImps)
##     out
## }
## 
## $glm.nb$predictors
## function (x, ...) 
## predictors(x$terms)
## 
## $glm.nb$levels
## NULL
## 
## $glm.nb$trim
## function (x) 
## {
##     x$y = c()
##     x$model = c()
##     x$residuals = c()
##     x$fitted.values = c()
##     x$effects = c()
##     x$qr$qr = c()
##     x$linear.predictors = c()
##     x$weights = c()
##     x$prior.weights = c()
##     x$data = c()
##     x$family$variance = c()
##     x$family$dev.resids = c()
##     x$family$aic = c()
##     x$family$validmu = c()
##     x$family$simulate = c()
##     attr(x$terms, ".Environment") = c()
##     attr(x$formula, ".Environment") = c()
##     x
## }
## 
## $glm.nb$tags
## [1] "Generalized Linear Model" "Accepts Case Weights"    
## 
## $glm.nb$sort
## function (x) 
## x
## 
## 
## $glm
## $glm$label
## [1] "Generalized Linear Model"
## 
## $glm$library
## NULL
## 
## $glm$loop
## NULL
## 
## $glm$type
## [1] "Regression"     "Classification"
## 
## $glm$parameters
##   parameter     class     label
## 1 parameter character parameter
## 
## $glm$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(parameter = "none")
## 
## $glm$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     if (length(levels(y)) > 2) 
##         stop("glm models can only use 2-class outcomes")
##     theDots <- list(...)
##     if (!any(names(theDots) == "family")) {
##         theDots$family <- if (is.factor(y)) 
##             binomial()
##         else gaussian()
##     }
##     if (!is.null(wts)) 
##         theDots$weights <- wts
##     modelArgs <- c(list(formula = as.formula(".outcome ~ ."), 
##         data = dat), theDots)
##     out <- do.call("glm", modelArgs)
##     out$call <- NULL
##     out
## }
## 
## $glm$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     if (modelFit$problemType == "Classification") {
##         probs <- predict(modelFit, newdata, type = "response")
##         out <- ifelse(probs < 0.5, modelFit$obsLevel[1], modelFit$obsLevel[2])
##     }
##     else {
##         out <- predict(modelFit, newdata, type = "response")
##     }
##     out
## }
## 
## $glm$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata, type = "response")
##     out <- cbind(1 - out, out)
##     dimnames(out)[[2]] <- modelFit$obsLevels
##     out
## }
## 
## $glm$varImp
## function (object, ...) 
## {
##     values <- summary(object)$coef
##     varImps <- abs(values[-1, grep("value$", colnames(values)), 
##         drop = FALSE])
##     vimp <- data.frame(varImps)
##     colnames(vimp) <- "Overall"
##     if (!is.null(names(varImps))) 
##         rownames(vimp) <- names(varImps)
##     vimp
## }
## 
## $glm$predictors
## function (x, ...) 
## predictors(x$terms)
## 
## $glm$levels
## function (x) 
## if (any(names(x) == "obsLevels")) x$obsLevels else NULL
## 
## $glm$trim
## function (x) 
## {
##     x$y = c()
##     x$model = c()
##     x$residuals = c()
##     x$fitted.values = c()
##     x$effects = c()
##     x$qr$qr = c()
##     x$linear.predictors = c()
##     x$weights = c()
##     x$prior.weights = c()
##     x$data = c()
##     x$family$variance = c()
##     x$family$dev.resids = c()
##     x$family$aic = c()
##     x$family$validmu = c()
##     x$family$simulate = c()
##     attr(x$terms, ".Environment") = c()
##     attr(x$formula, ".Environment") = c()
##     x
## }
## 
## $glm$tags
## [1] "Generalized Linear Model" "Linear Classifier"       
## [3] "Two Class Only"           "Accepts Case Weights"    
## 
## $glm$sort
## function (x) 
## x
## 
## 
## $glmboost
## $glmboost$label
## [1] "Boosted Generalized Linear Model"
## 
## $glmboost$library
## [1] "plyr"   "mboost"
## 
## $glmboost$type
## [1] "Regression"     "Classification"
## 
## $glmboost$parameters
##   parameter     class                 label
## 1     mstop   numeric # Boosting Iterations
## 2     prune character            AIC Prune?
## 
## $glmboost$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(mstop = floor((1:len) * 50), prune = "no")
##     }
##     else {
##         out <- data.frame(mstop = sample(1:1000, size = len, 
##             replace = TRUE), prune = sample(c("yes", "no"), size = len, 
##             replace = TRUE))
##     }
## }
## 
## $glmboost$loop
## function (grid) 
## {
##     grid <- grid[order(-grid$mstop, grid$prune), ]
##     loop <- plyr::ddply(grid, plyr::.(prune), function(x) data.frame(mstop = max(x$mstop)))
##     submodels <- vector(mode = "list", length = nrow(loop))
##     for (i in seq(along = loop$mstop)) {
##         submodels[[i]] <- subset(grid, prune == loop$prune[i] & 
##             mstop < loop$mstop[i])
##     }
##     list(loop = loop[, c("mstop", "prune")], submodels = submodels)
## }
## 
## $glmboost$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$mstop <- param$mstop
##         ctl <- theDots$control
##         theDots$control <- NULL
##     }
##     else ctl <- mboost::boost_control(mstop = param$mstop)
##     if (!any(names(theDots) == "family")) 
##         theDots$family <- if (is.factor(y)) 
##             mboost::Binomial()
##         else mboost::GaussReg()
##     if (!is.null(wts)) 
##         theDots$weights <- wts
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     modelArgs <- c(list(formula = as.formula(".outcome ~ ."), 
##         data = dat, control = ctl), theDots)
##     out <- do.call(mboost:::glmboost.formula, modelArgs)
##     if (param$prune == "yes") {
##         iters <- if (is.factor(y)) 
##             mboost::mstop(AIC(out, "classical"))
##         else mboost::mstop(AIC(out))
##         if (iters < out$mstop()) 
##             out <- out[iters]
##     }
##     out$.org.mstop <- out$mstop()
##     out$call["x"] <- "xData"
##     out$call["y"] <- "yData"
##     out
## }
## 
## $glmboost$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     predType <- ifelse(modelFit$problemType == "Classification", 
##         "class", "response")
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata, type = predType)
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- as.vector(out)
##         for (j in seq(along = submodels$mstop)) {
##             this_mstop <- if (submodels$prune[j] == "yes" & submodels$mstop[j] > 
##                 modelFit$.org.mstop) 
##                 modelFit$.org.mstop
##             else submodels$mstop[j]
##             tmp[[j + 1]] <- as.vector(predict(modelFit[this_mstop], 
##                 newdata, type = predType))
##         }
##         out <- tmp
##         mboost::mstop(modelFit) <- modelFit$.org.mstop
##     }
##     out
## }
## 
## $glmboost$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     probs <- predict(modelFit, newdata, type = "response")
##     out <- cbind(1 - probs, probs)
##     colnames(out) <- modelFit$obsLevels
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$mstop)) {
##             this_mstop <- if (submodels$prune[j] == "yes" & submodels$mstop[j] > 
##                 modelFit$.org.mstop) 
##                 modelFit$.org.mstop
##             else submodels$mstop[j]
##             tmpProb <- predict(modelFit[this_mstop], newdata, 
##                 type = "response")
##             tmpProb <- cbind(1 - tmpProb, tmpProb)
##             colnames(tmpProb) <- modelFit$obsLevels
##             tmp[[j + 1]] <- as.data.frame(tmpProb[, modelFit$obsLevels, 
##                 drop = FALSE], stringsAsFactors = TRUE)
##         }
##         out <- tmp
##         mboost::mstop(modelFit) <- modelFit$.org.mstop
##     }
##     out
## }
## 
## $glmboost$predictors
## function (x, ...) 
## {
##     strsplit(variable.names(x), ", ")[[1]]
## }
## 
## $glmboost$varImp
## function (object, ...) 
## {
##     betas <- abs(coef(object))
##     betas <- betas[names(betas) != "(Intercept)"]
##     bnames <- names(betas)
##     name_check <- object$xName %in% bnames
##     if (any(!(name_check))) {
##         missing <- object$xName[!name_check]
##         beta_miss <- rep(0, length(missing))
##         names(beta_miss) <- missing
##         betas <- c(betas, beta_miss)
##     }
##     out <- data.frame(Overall = betas)
##     rownames(out) <- names(betas)
##     out
## }
## 
## $glmboost$levels
## function (x) 
## levels(x$response)
## 
## $glmboost$notes
## [1] "The `prune` option for this model enables the number of iterations to be determined by the optimal AIC value across all iterations. See the examples in `?mboost::mstop`. If pruning is not used, the ensemble makes predictions using the exact value of the `mstop` tuning parameter value."
## 
## $glmboost$tags
## [1] "Generalized Linear Model" "Ensemble Model"          
## [3] "Boosting"                 "Linear Classifier"       
## [5] "Two Class Only"           "Accepts Case Weights"    
## 
## $glmboost$sort
## function (x) 
## x[order(x$mstop, x$prune), ]
## 
## 
## $glmnet_h2o
## $glmnet_h2o$label
## [1] "glmnet"
## 
## $glmnet_h2o$library
## [1] "h2o"
## 
## $glmnet_h2o$type
## [1] "Regression"     "Classification"
## 
## $glmnet_h2o$parameters
##   parameter   class                    label
## 1     alpha numeric        Mixing Percentage
## 2    lambda numeric Regularization Parameter
## 
## $glmnet_h2o$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(alpha = seq(0, 1, length = len), lambda = c(0, 
##             10^seq(-1, -4, length = len - 1)))
##     }
##     else {
##         out <- data.frame(alpha = runif(len, min = 0, 1), lambda = 2^runif(len, 
##             min = -10, 3))
##     }
##     out
## }
## 
## $glmnet_h2o$loop
## NULL
## 
## $glmnet_h2o$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (!is.data.frame(x)) 
##         as.data.frame(x, stringsAsFactors = TRUE)
##     else x
##     dat$.outcome <- y
##     p <- ncol(dat)
##     frame_name <- paste0("tmp_glmnet_dat_", sample.int(1e+05, 
##         1))
##     tmp_train_dat = h2o::as.h2o(dat, destination_frame = frame_name)
##     out <- h2o::h2o.glm(x = colnames(x), y = ".outcome", training_frame = tmp_train_dat, 
##         family = if (is.factor(y)) 
##             "binomial"
##         else "gaussian", alpha = param$alpha, lambda = param$lambda, 
##         ...)
##     h2o::h2o.getModel(out@model_id)
## }
## 
## $glmnet_h2o$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     frame_name <- paste0("new_glmnet_dat_", sample.int(1e+05, 
##         1))
##     newdata <- h2o::as.h2o(newdata, destination_frame = frame_name)
##     as.data.frame(predict(modelFit, newdata), stringsAsFactors = TRUE)[, 
##         1]
## }
## 
## $glmnet_h2o$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     frame_name <- paste0("new_glmnet_dat_", sample.int(1e+05, 
##         1))
##     newdata <- h2o::as.h2o(newdata, destination_frame = frame_name)
##     as.data.frame(predict(modelFit, newdata), stringsAsFactors = TRUE)[, 
##         -1]
## }
## 
## $glmnet_h2o$predictors
## function (object, ...) 
## {
##     out <- as.data.frame(h2o::h2o.varimp(object), stringsAsFactors = TRUE)
##     colnames(out)[colnames(out) == "coefficients"] <- "Overall"
##     out <- out[!is.na(out$Overall), ]
##     out$names
## }
## 
## $glmnet_h2o$varImp
## function (object, ...) 
## {
##     out <- as.data.frame(h2o::h2o.varimp(object), stringsAsFactors = TRUE)
##     colnames(out)[colnames(out) == "coefficients"] <- "Overall"
##     rownames(out) <- out$names
##     out <- out[!is.na(out$Overall), c("Overall"), drop = FALSE]
##     all_var <- object@allparameters$x
##     if (any(!(all_var %in% rownames(out)))) {
##         missing <- all_var[!(all_var %in% rownames(out))]
##         tmp <- data.frame(OVerall = rep(0, length(missing)))
##         rownames(tmp) <- missing
##         out <- rbind(out, tmp)
##     }
##     out
## }
## 
## $glmnet_h2o$levels
## NULL
## 
## $glmnet_h2o$tags
## [1] "Generalized Linear Model"   "Implicit Feature Selection"
## [3] "L1 Regularization"          "L2 Regularization"         
## [5] "Linear Classifier"          "Linear Regression"         
## [7] "Two Class Only"            
## 
## $glmnet_h2o$sort
## function (x) 
## x[order(-x$lambda, x$alpha), ]
## 
## $glmnet_h2o$trim
## NULL
## 
## 
## $glmnet
## $glmnet$label
## [1] "glmnet"
## 
## $glmnet$library
## [1] "glmnet" "Matrix"
## 
## $glmnet$type
## [1] "Regression"     "Classification"
## 
## $glmnet$parameters
##   parameter   class                    label
## 1     alpha numeric        Mixing Percentage
## 2    lambda numeric Regularization Parameter
## 
## $glmnet$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         numLev <- if (is.character(y) | is.factor(y)) 
##             length(levels(y))
##         else NA
##         if (!is.na(numLev)) {
##             fam <- ifelse(numLev > 2, "multinomial", "binomial")
##         }
##         else fam <- "gaussian"
##         if (!is.matrix(x) && !inherits(x, "sparseMatrix")) 
##             x <- Matrix::as.matrix(x)
##         init <- glmnet::glmnet(x, y, family = fam, nlambda = len + 
##             2, alpha = 0.5)
##         lambda <- unique(init$lambda)
##         lambda <- lambda[-c(1, length(lambda))]
##         lambda <- lambda[1:min(length(lambda), len)]
##         out <- expand.grid(alpha = seq(0.1, 1, length = len), 
##             lambda = lambda)
##     }
##     else {
##         out <- data.frame(alpha = runif(len, min = 0, 1), lambda = 2^runif(len, 
##             min = -10, 3))
##     }
##     out
## }
## 
## $glmnet$loop
## function (grid) 
## {
##     alph <- unique(grid$alpha)
##     loop <- data.frame(alpha = alph)
##     loop$lambda <- NA
##     submodels <- vector(mode = "list", length = length(alph))
##     for (i in seq(along = alph)) {
##         np <- grid[grid$alpha == alph[i], "lambda"]
##         loop$lambda[loop$alpha == alph[i]] <- np[which.max(np)]
##         submodels[[i]] <- data.frame(lambda = np[-which.max(np)])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $glmnet$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     numLev <- if (is.character(y) | is.factor(y)) 
##         length(levels(y))
##     else NA
##     theDots <- list(...)
##     if (all(names(theDots) != "family")) {
##         if (!is.na(numLev)) {
##             fam <- ifelse(numLev > 2, "multinomial", "binomial")
##         }
##         else fam <- "gaussian"
##         theDots$family <- fam
##     }
##     if (!is.null(wts)) 
##         theDots$weights <- wts
##     if (!is.matrix(x) && !inherits(x, "sparseMatrix")) 
##         x <- Matrix::as.matrix(x)
##     modelArgs <- c(list(x = x, y = y, alpha = param$alpha), theDots)
##     out <- do.call(glmnet::glmnet, modelArgs)
##     if (!is.na(param$lambda[1])) 
##         out$lambdaOpt <- param$lambda[1]
##     out
## }
## 
## $glmnet$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata) && !inherits(newdata, "sparseMatrix")) 
##         newdata <- Matrix::as.matrix(newdata)
##     if (length(modelFit$obsLevels) < 2) {
##         out <- predict(modelFit, newdata, s = modelFit$lambdaOpt, 
##             type = "response")
##     }
##     else {
##         out <- predict(modelFit, newdata, s = modelFit$lambdaOpt, 
##             type = "class")
##     }
##     if (is.matrix(out)) 
##         out <- out[, 1]
##     if (!is.null(submodels)) {
##         if (length(modelFit$obsLevels) < 2) {
##             tmp <- as.list(as.data.frame(predict(modelFit, newdata, 
##                 s = submodels$lambda), stringsAsFactors = TRUE))
##         }
##         else {
##             tmp <- predict(modelFit, newdata, s = submodels$lambda, 
##                 type = "class")
##             tmp <- if (is.matrix(tmp)) 
##                 as.data.frame(tmp, stringsAsFactors = FALSE)
##             else as.character(tmp)
##             tmp <- as.list(tmp)
##         }
##         out <- c(list(out), tmp)
##     }
##     out
## }
## 
## $glmnet$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     obsLevels <- if ("classnames" %in% names(modelFit)) 
##         modelFit$classnames
##     else NULL
##     if (!is.matrix(newdata) && !inherits(newdata, "sparseMatrix")) 
##         newdata <- Matrix::as.matrix(newdata)
##     probs <- predict(modelFit, newdata, s = modelFit$lambdaOpt, 
##         type = "response")
##     if (length(obsLevels) == 2) {
##         probs <- as.vector(probs)
##         probs <- as.data.frame(cbind(1 - probs, probs), stringsAsFactors = FALSE)
##         colnames(probs) <- modelFit$obsLevels
##     }
##     else {
##         probs <- as.data.frame(probs[, , 1, drop = FALSE], stringsAsFactors = FALSE)
##         names(probs) <- modelFit$obsLevels
##     }
##     if (!is.null(submodels)) {
##         tmp <- predict(modelFit, newdata, s = submodels$lambda, 
##             type = "response")
##         if (length(obsLevels) == 2) {
##             tmp <- as.list(as.data.frame(tmp, stringsAsFactors = TRUE))
##             tmp <- lapply(tmp, function(x, lev) {
##                 x <- as.vector(x)
##                 tmp <- data.frame(1 - x, x)
##                 names(tmp) <- lev
##                 tmp
##             }, lev = modelFit$obsLevels)
##         }
##         else tmp <- apply(tmp, 3, function(x) data.frame(x))
##         probs <- if (is.list(tmp)) 
##             c(list(probs), tmp)
##         else list(probs, tmp)
##     }
##     probs
## }
## 
## $glmnet$predictors
## function (x, lambda = NULL, ...) 
## {
##     if (is.null(lambda)) {
##         if (length(lambda) > 1) 
##             stop("Only one value of lambda is allowed right now")
##         if (!is.null(x$lambdaOpt)) {
##             lambda <- x$lambdaOpt
##         }
##         else stop("must supply a value of lambda")
##     }
##     allVar <- if (is.list(x$beta)) 
##         rownames(x$beta[[1]])
##     else rownames(x$beta)
##     out <- unlist(predict(x, s = lambda, type = "nonzero"))
##     out <- unique(out)
##     if (length(out) > 0) {
##         out <- out[!is.na(out)]
##         out <- allVar[out]
##     }
##     out
## }
## 
## $glmnet$varImp
## function (object, lambda = NULL, ...) 
## {
##     if (is.null(lambda)) {
##         if (length(lambda) > 1) 
##             stop("Only one value of lambda is allowed right now")
##         if (!is.null(object$lambdaOpt)) {
##             lambda <- object$lambdaOpt
##         }
##         else stop("must supply a value of lambda")
##     }
##     beta <- predict(object, s = lambda, type = "coef")
##     if (is.list(beta)) {
##         out <- do.call("cbind", lapply(beta, function(x) x[, 
##             1]))
##         out <- as.data.frame(out, stringsAsFactors = TRUE)
##     }
##     else out <- data.frame(Overall = beta[, 1])
##     out <- abs(out[rownames(out) != "(Intercept)", , drop = FALSE])
##     out
## }
## 
## $glmnet$levels
## function (x) 
## if (any(names(x) == "obsLevels")) x$obsLevels else NULL
## 
## $glmnet$tags
## [1] "Generalized Linear Model"   "Implicit Feature Selection"
## [3] "L1 Regularization"          "L2 Regularization"         
## [5] "Linear Classifier"          "Linear Regression"         
## 
## $glmnet$sort
## function (x) 
## x[order(-x$lambda, x$alpha), ]
## 
## $glmnet$trim
## function (x) 
## {
##     x$call <- NULL
##     x$df <- NULL
##     x$dev.ratio <- NULL
##     x
## }
## 
## 
## $glmStepAIC
## $glmStepAIC$label
## [1] "Generalized Linear Model with Stepwise Feature Selection"
## 
## $glmStepAIC$library
## [1] "MASS"
## 
## $glmStepAIC$loop
## NULL
## 
## $glmStepAIC$type
## [1] "Regression"     "Classification"
## 
## $glmStepAIC$parameters
##   parameter     class     label
## 1 parameter character parameter
## 
## $glmStepAIC$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(parameter = "none")
## 
## $glmStepAIC$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     if (length(levels(y)) > 2) 
##         stop("glm models can only use 2-class outcomes")
##     stepArgs <- names(formals(MASS::stepAIC))
##     stepArgs <- stepArgs[!(stepArgs %in% c("object", "..."))]
##     theDots <- list(...)
##     glmArgs <- list()
##     if (!any(names(theDots) == "family")) {
##         glmArgs$family <- if (is.factor(y)) 
##             binomial()
##         else gaussian()
##     }
##     else glmArgs$family <- theDots$family
##     if (any(!(names(theDots) %in% stepArgs))) 
##         theDots <- theDots[names(theDots) %in% stepArgs]
##     if (any(names(theDots) == "direction")) {
##         if (theDots$direction == "forward") {
##             start_form <- as.formula(".outcome ~ 1")
##             if (!any(names(theDots) == "scope")) {
##                 theDots$scope <- list(lower = as.formula(".outcome ~ 1"), 
##                   upper = as.formula(paste0(".outcome~", paste0(colnames(x), 
##                     collapse = "+"))))
##             }
##         }
##         else {
##             start_form <- as.formula(".outcome ~ .")
##         }
##     }
##     else start_form <- as.formula(".outcome ~ .")
##     if (!is.null(wts)) 
##         glmArgs$weights <- wts
##     modelArgs <- c(list(formula = start_form, data = dat), glmArgs)
##     mod <- do.call(glm, modelArgs)
##     theDots$object <- mod
##     out <- do.call(MASS::stepAIC, theDots)
##     out$call <- NULL
##     out
## }
## 
## $glmStepAIC$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     if (modelFit$problemType == "Classification") {
##         probs <- predict(modelFit, newdata, type = "response")
##         out <- ifelse(probs < 0.5, modelFit$obsLevel[1], modelFit$obsLevel[2])
##     }
##     else {
##         out <- predict(modelFit, newdata, type = "response")
##     }
##     out
## }
## 
## $glmStepAIC$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata, type = "response")
##     out <- cbind(1 - out, out)
##     dimnames(out)[[2]] <- modelFit$obsLevels
##     out
## }
## 
## $glmStepAIC$levels
## function (x) 
## x$obsLevels
## 
## $glmStepAIC$tags
## [1] "Generalized Linear Model"   "Feature Selection Wrapper" 
## [3] "Linear Classifier"          "Implicit Feature Selection"
## [5] "Two Class Only"             "Accepts Case Weights"      
## 
## $glmStepAIC$sort
## function (x) 
## x
## 
## 
## $gpls
## $gpls$label
## [1] "Generalized Partial Least Squares"
## 
## $gpls$library
## [1] "gpls"
## 
## $gpls$loop
## NULL
## 
## $gpls$type
## [1] "Classification"
## 
## $gpls$parameters
##   parameter   class       label
## 1    K.prov numeric #Components
## 
## $gpls$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(K.prov = seq(1, len))
##     }
##     else {
##         out <- data.frame(K.prov = unique(sample(1:ncol(x), size = len, 
##             replace = TRUE)))
##     }
##     out
## }
## 
## $gpls$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## gpls::gpls(x, y, K.prov = param$K.prov, ...)
## 
## $gpls$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)$class
## 
## $gpls$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)$predicted
##     out <- cbind(out, 1 - out)
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $gpls$predictors
## function (x, ...) 
## {
##     out <- if (hasTerms(x)) 
##         predictors(x$terms)
##     else colnames(x$data$x.order)
##     out[!(out %in% "Intercept")]
## }
## 
## $gpls$tags
## [1] "Logistic Regression"   "Partial Least Squares" "Linear Classifier"    
## 
## $gpls$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## $gpls$levels
## function (x) 
## x$obsLevels
## 
## 
## $hda
## $hda$label
## [1] "Heteroscedastic Discriminant Analysis"
## 
## $hda$library
## [1] "hda"
## 
## $hda$loop
## NULL
## 
## $hda$type
## [1] "Classification"
## 
## $hda$parameters
##   parameter   class                                    label
## 1     gamma numeric                                    Gamma
## 2    lambda numeric                                   Lambda
## 3    newdim numeric Dimension of the Discriminative Subspace
## 
## $hda$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(gamma = seq(0.1, 1, length = len), 
##             lambda = seq(0, 1, length = len), newdim = 2:(min(len, 
##                 ncol(x))))
##     }
##     else {
##         out <- data.frame(gamma = runif(len, min = 0, max = 1), 
##             lambda = runif(len, min = 0, max = 1), newdim = sample(2:ncol(x), 
##                 size = len, replace = TRUE))
##     }
##     out
## }
## 
## $hda$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## hda::hda(x, y, newdim = param$newdim, reg.lamb = param$lambda, 
##     reg.gamm = param$gamma, crule = TRUE, ...)
## 
## $hda$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     tmp <- predict(modelFit, as.matrix(newdata))
##     if (is.vector(tmp)) 
##         tmp <- matrix(tmp, ncol = 1)
##     as.character(predict(modelFit$naivebayes, tmp))
## }
## 
## $hda$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     tmp <- predict(modelFit, as.matrix(newdata))
##     if (is.vector(tmp)) 
##         tmp <- matrix(tmp, ncol = 1)
##     as.data.frame(predict(modelFit$naivebayes, tmp, type = "raw"), 
##         stringsAsFactors = FALSE)
## }
## 
## $hda$levels
## function (x) 
## x$obsLevels
## 
## $hda$tags
## [1] "Discriminant Analysis" "Linear Classifier"     "Regularization"       
## 
## $hda$sort
## function (x) 
## x[order(x$newdim, -x$lambda, x$gamma), ]
## 
## 
## $hdda
## $hdda$label
## [1] "High Dimensional Discriminant Analysis"
## 
## $hdda$library
## [1] "HDclassif"
## 
## $hdda$loop
## NULL
## 
## $hdda$type
## [1] "Classification"
## 
## $hdda$parameters
##   parameter     class      label
## 1 threshold character  Threshold
## 2     model   numeric Model Type
## 
## $hdda$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     mods <- c("AkjBkQkDk", "AkBkQkDk", "ABkQkDk", "AkjBQkDk", 
##         "AkBQkDk", "ABQkDk", "AkjBkQkD", "AkBkQkD", "ABkQkD", 
##         "AkjBQkD", "AkBQkD", "ABQkD", "AjBQD", "ABQD")
##     if (search == "grid") {
##         out <- expand.grid(model = c("all"), threshold = seq(0.05, 
##             0.3, length = len))
##     }
##     else {
##         out <- data.frame(model = sample(mods, size = len, replace = TRUE), 
##             threshold = runif(len, min = 0, max = 1))
##     }
##     out
## }
## 
## $hdda$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     HDclassif::hdda(x, y, model = as.character(param$model), 
##         threshold = param$threshold, ...)
## }
## 
## $hdda$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     as.character(predict(modelFit, newdata)$class)
## }
## 
## $hdda$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     data.frame(unclass(predict(modelFit, newdata)$posterior))
## }
## 
## $hdda$levels
## function (x) 
## x$obsLevels
## 
## $hdda$tags
## [1] "Discriminant Analysis" "Linear Classifier"    
## 
## $hdda$sort
## function (x) 
## x[order(-x$threshold), ]
## 
## 
## $hdrda
## $hdrda$label
## [1] "High-Dimensional Regularized Discriminant Analysis"
## 
## $hdrda$library
## [1] "sparsediscrim"
## 
## $hdrda$loop
## NULL
## 
## $hdrda$type
## [1] "Classification"
## 
## $hdrda$parameters
##        parameter     class          label
## 1          gamma   numeric          Gamma
## 2         lambda   numeric         Lambda
## 3 shrinkage_type character Shrinkage Type
## 
## $hdrda$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(gamma = seq(0, 1, length = len), lambda = seq(0, 
##             1, length = len), shrinkage_type = c("ridge", "convex"))
##     }
##     else {
##         out <- data.frame(gamma = runif(len, min = 0, max = 1), 
##             lambda = runif(len, min = 0, max = 1), shrinkage_type = sample(c("ridge", 
##                 "convex"), size = len, replace = TRUE))
##     }
##     out
## }
## 
## $hdrda$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     sparsediscrim::hdrda(x, y, gamma = param$gamma, lambda = param$lambda, 
##         shrinkage_type = as.character(param$shrinkage_type), 
##         ...)
## }
## 
## $hdrda$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)$class
## 
## $hdrda$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)$posterior
## 
## $hdrda$predictors
## function (x, ...) 
## x$varnames
## 
## $hdrda$tags
## [1] "Discriminant Analysis" "Polynomial Model"      "Regularization"       
## [4] "Linear Classifier"    
## 
## $hdrda$levels
## function (x) 
## names(x$prior)
## 
## $hdrda$sort
## function (x) 
## {
##     x[order(-x$lambda, x$gamma), ]
## }
## 
## 
## $HYFIS
## $HYFIS$label
## [1] "Hybrid Neural Fuzzy Inference System"
## 
## $HYFIS$library
## [1] "frbs"
## 
## $HYFIS$type
## [1] "Regression"
## 
## $HYFIS$parameters
##    parameter   class           label
## 1 num.labels numeric    #Fuzzy Terms
## 2   max.iter numeric Max. Iterations
## 
## $HYFIS$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(num.labels = 1 + (1:len) * 2, max.iter = 10)
##     }
##     else {
##         out <- data.frame(num.labels = sample(2:20, size = len, 
##             replace = TRUE), max.iter = sample(1:20, replace = TRUE, 
##             size = len))
##     }
##     out
## }
## 
## $HYFIS$loop
## NULL
## 
## $HYFIS$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     args <- list(data.train = as.matrix(cbind(x, y)), method.type = "HYFIS")
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$num.labels <- param$num.labels
##         theDots$control$max.iter <- param$max.iter
##     }
##     else theDots$control <- list(num.labels = param$num.labels, 
##         max.iter = param$max.iter, step.size = 0.01, type.tnorm = "MIN", 
##         type.snorm = "MAX", type.defuz = "COG", type.implication.func = "ZADEH", 
##         name = "sim-0")
##     if (!(any(names(theDots) == "range.data"))) {
##         args$range.data <- apply(args$data.train, 2, extendrange)
##     }
##     do.call(frbs::frbs.learn, c(args, theDots))
## }
## 
## $HYFIS$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     predict(modelFit, newdata)[, 1]
## }
## 
## $HYFIS$prob
## NULL
## 
## $HYFIS$predictors
## function (x, ...) 
## {
##     x$colnames.var[x$colnames.var %in% as.vector(x$rule)]
## }
## 
## $HYFIS$tags
## [1] "Rule-Based Model"
## 
## $HYFIS$levels
## NULL
## 
## $HYFIS$sort
## function (x) 
## x[order(x$num.labels), ]
## 
## 
## $icr
## $icr$label
## [1] "Independent Component Regression"
## 
## $icr$library
## [1] "fastICA"
## 
## $icr$loop
## NULL
## 
## $icr$type
## [1] "Regression"
## 
## $icr$parameters
##   parameter   class       label
## 1    n.comp numeric #Components
## 
## $icr$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(n.comp = 1:len)
##     }
##     else {
##         out <- data.frame(n.comp = unique(sample(1:ncol(x), size = len, 
##             replace = TRUE)))
##     }
##     out
## }
## 
## $icr$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     caret::icr(x, y, n.comp = param$n.comp, ...)
## }
## 
## $icr$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)
## 
## $icr$prob
## NULL
## 
## $icr$tags
## [1] "Linear Regression"  "Feature Extraction"
## 
## $icr$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $J48
## $J48$label
## [1] "C4.5-like Trees"
## 
## $J48$library
## [1] "RWeka"
## 
## $J48$loop
## NULL
## 
## $J48$type
## [1] "Classification"
## 
## $J48$parameters
##   parameter   class                      label
## 1         C numeric       Confidence Threshold
## 2         M numeric Minimum Instances Per Leaf
## 
## $J48$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     upperBound <- min(max(1, floor(nrow(x)/2)), 50)
##     if (search == "grid") {
##         out <- expand.grid(C = seq(0.01, 0.5, length.out = len), 
##             M = 1:min(upperBound, len))
##         if (len == 1) {
##             out <- data.frame(C = 0.25, M = 2)
##         }
##     }
##     else {
##         out <- data.frame(C = runif(len, 0, 0.5), M = round(exp(runif(len, 
##             0, log(upperBound)))))
##     }
##     return(out)
## }
## 
## $J48$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$C <- param$C
##         theDots$control$M <- param$M
##         ctl <- theDots$control
##         theDots$control <- NULL
##     }
##     else ctl <- RWeka::Weka_control(C = param$C, M = param$M)
##     modelArgs <- c(list(formula = as.formula(".outcome ~ ."), 
##         data = dat, control = ctl), theDots)
##     out <- do.call(RWeka::J48, modelArgs)
##     out
## }
## 
## $J48$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $J48$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata, type = "probability")
## }
## 
## $J48$levels
## function (x) 
## x$obsLevels
## 
## $J48$predictors
## function (x, ...) 
## predictors(x$terms)
## 
## $J48$tags
## [1] "Tree-Based Model"           "Implicit Feature Selection"
## 
## $J48$sort
## function (x) 
## x[order(x$C, x$M), ]
## 
## 
## $JRip
## $JRip$label
## [1] "Rule-Based Classifier"
## 
## $JRip$library
## [1] "RWeka"
## 
## $JRip$loop
## NULL
## 
## $JRip$type
## [1] "Classification"
## 
## $JRip$parameters
##    parameter   class           label
## 1     NumOpt numeric # Optimizations
## 2   NumFolds numeric         # Folds
## 3 MinWeights numeric     Min Weights
## 
## $JRip$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     upperBound <- min(max(1, floor(nrow(x)/2)), 50)
##     if (search == "grid") {
##         out <- expand.grid(NumOpt = 1:min(len, sqrt(upperBound)), 
##             NumFolds = 2:min(len + 1, upperBound), MinWeights = 1:min(len, 
##                 sqrt(upperBound)))
##         if (len == 1) {
##             out <- data.frame(NumOpt = 2, NumFolds = 3, MinWeights = 2)
##         }
##     }
##     else {
##         out <- data.frame(NumOpt = round(exp(runif(5 * len, 0, 
##             log(len)))), NumFolds = round(exp(runif(5 * len, 
##             0, log(upperBound)))), MinWeights = round(exp(runif(5 * 
##             len, 0, log(upperBound)))))
##         out <- unique(out)
##         out <- out[1:min(nrow(out), len), ]
##     }
##     return(out)
## }
## 
## $JRip$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$O <- param$NumOpt
##         theDots$control$F <- param$NumFolds
##         theDots$control$N <- param$MinWeights
##         ctl <- theDots$control
##         theDots$control <- NULL
##     }
##     else ctl <- RWeka::Weka_control(O = param$NumOpt, F = param$NumFolds, 
##         N = param$MinWeights)
##     modelArgs <- c(list(formula = as.formula(".outcome ~ ."), 
##         data = dat, control = ctl), theDots)
##     out <- do.call(RWeka::JRip, modelArgs)
##     out
## }
## 
## $JRip$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $JRip$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata, type = "probability")
## }
## 
## $JRip$levels
## function (x) 
## x$obsLevels
## 
## $JRip$predictors
## function (x, ...) 
## predictors(x$terms)
## 
## $JRip$tags
## [1] "Rule-Based Model"           "Implicit Feature Selection"
## 
## $JRip$varImp
## function (object, ...) 
## {
##     dat <- caret:::ripperRuleSummary(object)
##     out <- dat$varUsage[, "Overall", drop = FALSE]
##     rownames(out) <- dat$varUsage$Var
##     out
## }
## 
## $JRip$sort
## function (x) 
## x[order(x$NumOpt, x$NumFolds, x$MinWeights, decreasing = TRUE), 
##     ]
## 
## 
## $kernelpls
## $kernelpls$label
## [1] "Partial Least Squares"
## 
## $kernelpls$library
## [1] "pls"
## 
## $kernelpls$type
## [1] "Regression"     "Classification"
## 
## $kernelpls$parameters
##   parameter   class       label
## 1     ncomp numeric #Components
## 
## $kernelpls$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(ncomp = seq(1, min(ncol(x) - 1, len), 
##             by = 1))
##     }
##     else {
##         out <- data.frame(ncomp = unique(sample(1:ncol(x), size = len, 
##             replace = TRUE)))
##     }
##     out
## }
## 
## $kernelpls$loop
## function (grid) 
## {
##     grid <- grid[order(grid$ncomp, decreasing = TRUE), , drop = FALSE]
##     loop <- grid[1, , drop = FALSE]
##     submodels <- list(grid[-1, , drop = FALSE])
##     list(loop = loop, submodels = submodels)
## }
## 
## $kernelpls$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     ncomp <- min(ncol(x), param$ncomp)
##     out <- if (is.factor(y)) {
##         caret::plsda(x, y, method = "kernelpls", ncomp = ncomp, 
##             ...)
##     }
##     else {
##         dat <- if (is.data.frame(x)) 
##             x
##         else as.data.frame(x, stringsAsFactors = TRUE)
##         dat$.outcome <- y
##         pls::plsr(.outcome ~ ., data = dat, method = "kernelpls", 
##             ncomp = ncomp, ...)
##     }
##     out
## }
## 
## $kernelpls$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- if (modelFit$problemType == "Classification") {
##         if (!is.matrix(newdata)) 
##             newdata <- as.matrix(newdata)
##         out <- predict(modelFit, newdata, type = "class")
##     }
##     else as.vector(pls:::predict.mvr(modelFit, newdata, ncomp = max(modelFit$ncomp)))
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels))
##         if (modelFit$problemType == "Classification") {
##             if (length(submodels$ncomp) > 1) {
##                 tmp <- as.list(predict(modelFit, newdata, ncomp = submodels$ncomp))
##             }
##             else tmp <- list(predict(modelFit, newdata, ncomp = submodels$ncomp))
##         }
##         else {
##             tmp <- as.list(as.data.frame(apply(predict(modelFit, 
##                 newdata, ncomp = submodels$ncomp), 3, function(x) list(x)), 
##                 stringsAsFActors = FALSE))
##         }
##         out <- c(list(out), tmp)
##     }
##     out
## }
## 
## $kernelpls$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(modelFit, newdata, type = "prob", ncomp = modelFit$tuneValue$ncomp)
##     if (length(dim(out)) == 3) {
##         if (dim(out)[1] > 1) {
##             out <- out[, , 1]
##         }
##         else {
##             out <- as.data.frame(t(out[, , 1]), stringsAsFactors = TRUE)
##         }
##     }
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$ncomp)) {
##             tmpProb <- predict(modelFit, newdata, type = "prob", 
##                 ncomp = submodels$ncomp[j])
##             if (length(dim(tmpProb)) == 3) {
##                 if (dim(tmpProb)[1] > 1) {
##                   tmpProb <- tmpProb[, , 1]
##                 }
##                 else {
##                   tmpProb <- as.data.frame(t(tmpProb[, , 1]), 
##                     stringsAsFactors = TRUE)
##                 }
##             }
##             tmp[[j + 1]] <- as.data.frame(tmpProb[, modelFit$obsLevels, 
##                 drop = FALSE], stringsAsFactors = TRUE)
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $kernelpls$varImp
## function (object, estimate = NULL, ...) 
## {
##     library(pls)
##     modelCoef <- coef(object, intercept = FALSE, comps = 1:object$ncomp)
##     perf <- MSEP(object)$val
##     nms <- dimnames(perf)
##     if (length(nms$estimate) > 1) {
##         pIndex <- if (is.null(estimate)) 
##             1
##         else which(nms$estimate == estimate)
##         perf <- perf[pIndex, , , drop = FALSE]
##     }
##     numResp <- dim(modelCoef)[2]
##     if (numResp <= 2) {
##         modelCoef <- modelCoef[, 1, , drop = FALSE]
##         perf <- perf[, 1, ]
##         delta <- -diff(perf)
##         delta <- delta/sum(delta)
##         out <- data.frame(Overall = apply(abs(modelCoef), 1, 
##             weighted.mean, w = delta))
##     }
##     else {
##         perf <- -t(apply(perf[1, , ], 1, diff))
##         perf <- t(apply(perf, 1, function(u) u/sum(u)))
##         out <- matrix(NA, ncol = numResp, nrow = dim(modelCoef)[1])
##         for (i in 1:numResp) {
##             tmp <- abs(modelCoef[, i, , drop = FALSE])
##             out[, i] <- apply(tmp, 1, weighted.mean, w = perf[i, 
##                 ])
##         }
##         colnames(out) <- dimnames(modelCoef)[[2]]
##         rownames(out) <- dimnames(modelCoef)[[1]]
##     }
##     as.data.frame(out, stringsAsFactors = TRUE)
## }
## 
## $kernelpls$predictors
## function (x, ...) 
## rownames(x$projection)
## 
## $kernelpls$levels
## function (x) 
## x$obsLevels
## 
## $kernelpls$tags
## [1] "Partial Least Squares" "Feature Extraction"    "Kernel Method"        
## [4] "Linear Classifier"     "Linear Regression"    
## 
## $kernelpls$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $kknn
## $kknn$label
## [1] "k-Nearest Neighbors"
## 
## $kknn$library
## [1] "kknn"
## 
## $kknn$loop
## NULL
## 
## $kknn$type
## [1] "Regression"     "Classification"
## 
## $kknn$parameters
##   parameter     class           label
## 1      kmax   numeric Max. #Neighbors
## 2  distance   numeric        Distance
## 3    kernel character          Kernel
## 
## $kknn$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(kmax = (5:((2 * len) + 4))[(5:((2 * 
##             len) + 4))%%2 > 0], distance = 2, kernel = "optimal")
##     }
##     else {
##         by_val <- if (is.factor(y)) 
##             length(levels(y))
##         else 1
##         kerns <- c("rectangular", "triangular", "epanechnikov", 
##             "biweight", "triweight", "cos", "inv", "gaussian")
##         out <- data.frame(kmax = sample(seq(1, floor(nrow(x)/3), 
##             by = by_val), size = len, replace = TRUE), distance = runif(len, 
##             min = 0, max = 3), kernel = sample(kerns, size = len, 
##             replace = TRUE))
##     }
##     out
## }
## 
## $kknn$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     kknn::train.kknn(.outcome ~ ., data = dat, kmax = param$kmax, 
##         distance = param$distance, kernel = as.character(param$kernel), 
##         ...)
## }
## 
## $kknn$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $kknn$levels
## function (x) 
## x$obsLevels
## 
## $kknn$tags
## [1] "Prototype Models"
## 
## $kknn$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata, type = "prob")
## }
## 
## $kknn$sort
## function (x) 
## x[order(-x[, 1]), ]
## 
## 
## $knn
## $knn$label
## [1] "k-Nearest Neighbors"
## 
## $knn$library
## NULL
## 
## $knn$loop
## NULL
## 
## $knn$type
## [1] "Classification" "Regression"    
## 
## $knn$parameters
##   parameter   class      label
## 1         k numeric #Neighbors
## 
## $knn$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(k = (5:((2 * len) + 4))[(5:((2 * len) + 
##             4))%%2 > 0])
##     }
##     else {
##         by_val <- if (is.factor(y)) 
##             length(levels(y))
##         else 1
##         out <- data.frame(k = sample(seq(1, floor(nrow(x)/3), 
##             by = by_val), size = len, replace = TRUE))
##     }
## }
## 
## $knn$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (is.factor(y)) {
##         knn3(as.matrix(x), y, k = param$k, ...)
##     }
##     else {
##         knnreg(as.matrix(x), y, k = param$k, ...)
##     }
## }
## 
## $knn$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (modelFit$problemType == "Classification") {
##         out <- predict(modelFit, newdata, type = "class")
##     }
##     else {
##         out <- predict(modelFit, newdata)
##     }
##     out
## }
## 
## $knn$predictors
## function (x, ...) 
## colnames(x$learn$X)
## 
## $knn$tags
## [1] "Prototype Models"
## 
## $knn$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata, type = "prob")
## 
## $knn$levels
## function (x) 
## levels(x$learn$y)
## 
## $knn$sort
## function (x) 
## x[order(-x[, 1]), ]
## 
## 
## $krlsPoly
## $krlsPoly$label
## [1] "Polynomial Kernel Regularized Least Squares"
## 
## $krlsPoly$library
## [1] "KRLS"
## 
## $krlsPoly$loop
## NULL
## 
## $krlsPoly$type
## [1] "Regression"
## 
## $krlsPoly$parameters
##   parameter   class                    label
## 1    lambda numeric Regularization Parameter
## 2    degree numeric        Polynomial Degree
## 
## $krlsPoly$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(lambda = NA, degree = 1:3)
##     }
##     else {
##         out <- data.frame(lambda = 10^runif(len, min = -5, 0), 
##             degree = sample(1:3, size = len, replace = TRUE))
##     }
##     out
## }
## 
## $krlsPoly$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!(param$degree %in% 1:4)) 
##         stop("Degree should be either 1, 2, 3 or 4")
##     krn <- switch(param$degree, `1` = "linear", `2` = "poly2", 
##         `3` = "poly3", `4` = "poly4")
##     KRLS::krls(x, y, lambda = if (is.na(param$lambda)) 
##         NULL
##     else param$lambda, derivative = FALSE, whichkernel = krn, 
##         ...)
## }
## 
## $krlsPoly$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     KRLS:::predict.krls(modelFit, newdata)$fit[, 1]
## }
## 
## $krlsPoly$tags
## [1] "Kernel Method"     "L2 Regularization" "Polynomial Model" 
## 
## $krlsPoly$prob
## NULL
## 
## $krlsPoly$sort
## function (x) 
## x[order(x$degree, x$lambda), ]
## 
## 
## $krlsRadial
## $krlsRadial$label
## [1] "Radial Basis Function Kernel Regularized Least Squares"
## 
## $krlsRadial$library
## [1] "KRLS"    "kernlab"
## 
## $krlsRadial$loop
## NULL
## 
## $krlsRadial$type
## [1] "Regression"
## 
## $krlsRadial$parameters
##   parameter   class                    label
## 1    lambda numeric Regularization Parameter
## 2     sigma numeric                    Sigma
## 
## $krlsRadial$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     sigmaEstimate <- try(kernlab::sigest(x, na.action = na.omit, 
##         scaled = TRUE), silent = TRUE)
##     if (!(class(sigmaEstimate) == "try-error")) {
##         if (search == "grid") {
##             out <- expand.grid(lambda = NA, sigma = 1/seq(sigmaEstimate[1], 
##                 sigmaEstimate[3], length = len))
##         }
##         else {
##             rng <- extendrange(log(sigmaEstimate), f = 0.75)
##             out <- data.frame(lambda = 10^runif(len, min = -5, 
##                 0), sigma = 1/exp(runif(len, min = rng[1], max = rng[2])))
##         }
##     }
##     else {
##         if (search == "grid") {
##             out <- expand.grid(lambda = NA, sigma = 1/(10^((1:len) - 
##                 3)))
##         }
##         else {
##             out <- data.frame(lambda = 10^runif(len, min = -5, 
##                 0), sigma = 1/(10^runif(len, min = -4, max = 0)))
##         }
##     }
##     out
## }
## 
## $krlsRadial$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     KRLS::krls(x, y, lambda = if (is.na(param$lambda)) 
##         NULL
##     else param$lambda, sigma = param$sigma, ...)
## }
## 
## $krlsRadial$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     KRLS:::predict.krls(modelFit, newdata)$fit[, 1]
## }
## 
## $krlsRadial$tags
## [1] "Kernel Method"         "L2 Regularization"     "Radial Basis Function"
## 
## $krlsRadial$prob
## NULL
## 
## $krlsRadial$sort
## function (x) 
## x[order(x$lambda), ]
## 
## 
## $lars
## $lars$label
## [1] "Least Angle Regression"
## 
## $lars$library
## [1] "lars"
## 
## $lars$type
## [1] "Regression"
## 
## $lars$parameters
##   parameter   class    label
## 1  fraction numeric Fraction
## 
## $lars$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(fraction = seq(0.05, 1, length = len))
##     }
##     else {
##         out <- data.frame(fraction = runif(len, min = 0, max = 1))
##     }
##     out
## }
## 
## $lars$loop
## function (grid) 
## {
##     grid <- grid[order(grid$fraction, decreasing = TRUE), , drop = FALSE]
##     loop <- grid[1, , drop = FALSE]
##     submodels <- list(grid[-1, , drop = FALSE])
##     list(loop = loop, submodels = submodels)
## }
## 
## $lars$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## lars::lars(as.matrix(x), y, ...)
## 
## $lars$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, as.matrix(newdata), type = "fit", 
##         mode = "fraction", s = modelFit$tuneValue$fraction)$fit
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$fraction)) {
##             tmp[[j + 1]] <- predict(modelFit, as.matrix(newdata), 
##                 type = "fit", mode = "fraction", s = submodels$fraction[j])$fit
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $lars$predictors
## function (x, s = NULL, ...) 
## {
##     if (is.null(s)) {
##         if (!is.null(x$tuneValue)) {
##             s <- x$tuneValue$fraction
##         }
##         else stop("must supply a vaue of s")
##         out <- predict(x, s = s, type = "coefficients", mode = "fraction")$coefficients
##     }
##     else {
##         out <- predict(x, s = s, ...)$coefficients
##     }
##     names(out)[out != 0]
## }
## 
## $lars$tags
## [1] "Linear Regression"          "Implicit Feature Selection"
## [3] "L1 Regularization"         
## 
## $lars$prob
## NULL
## 
## $lars$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $lars2
## $lars2$label
## [1] "Least Angle Regression"
## 
## $lars2$library
## [1] "lars"
## 
## $lars2$type
## [1] "Regression"
## 
## $lars2$parameters
##   parameter   class  label
## 1      step numeric #Steps
## 
## $lars2$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(step = caret::var_seq(p = ncol(x), 
##             classification = is.factor(y), len = len))
##     }
##     else {
##         out <- data.frame(step = sample(1:ncol(x), size = len, 
##             replace = TRUE))
##     }
##     out
## }
## 
## $lars2$loop
## function (grid) 
## {
##     grid <- grid[order(grid$step, decreasing = TRUE), , drop = FALSE]
##     loop <- grid[1, , drop = FALSE]
##     submodels <- list(grid[-1, , drop = FALSE])
##     list(loop = loop, submodels = submodels)
## }
## 
## $lars2$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## lars::lars(as.matrix(x), y, ...)
## 
## $lars2$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, as.matrix(newdata), type = "fit", 
##         mode = "step", s = modelFit$tuneValue$step)$fit
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$step)) {
##             tmp[[j + 1]] <- predict(modelFit, as.matrix(newdata), 
##                 type = "fit", mode = "step", s = submodels$step[j])$fit
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $lars2$predictors
## function (x, s = NULL, ...) 
## {
##     if (is.null(s)) {
##         if (!is.null(x$tuneValue)) {
##             s <- x$tuneValue$.fraction
##         }
##         else stop("must supply a vaue of s")
##         out <- predict(x, s = s, type = "coefficients", mode = "fraction")$coefficients
##     }
##     else {
##         out <- predict(x, s = s, ...)$coefficients
##     }
##     names(out)[out != 0]
## }
## 
## $lars2$tags
## [1] "Linear Regression"          "Implicit Feature Selection"
## [3] "L1 Regularization"         
## 
## $lars2$prob
## NULL
## 
## $lars2$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $lasso
## $lasso$label
## [1] "The lasso"
## 
## $lasso$library
## [1] "elasticnet"
## 
## $lasso$type
## [1] "Regression"
## 
## $lasso$parameters
##   parameter   class                     label
## 1  fraction numeric Fraction of Full Solution
## 
## $lasso$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(fraction = seq(0.1, 0.9, length = len))
##     }
##     else {
##         out <- data.frame(fraction = runif(len, min = 0, max = 1))
##     }
##     out
## }
## 
## $lasso$loop
## function (grid) 
## {
##     grid <- grid[order(grid$fraction, decreasing = TRUE), , drop = FALSE]
##     loop <- grid[1, , drop = FALSE]
##     submodels <- list(grid[-1, , drop = FALSE])
##     list(loop = loop, submodels = submodels)
## }
## 
## $lasso$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     elasticnet::enet(as.matrix(x), y, lambda = 0, ...)
## }
## 
## $lasso$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- elasticnet::predict.enet(modelFit, newdata, s = modelFit$tuneValue$fraction, 
##         mode = "fraction")$fit
##     if (!is.null(submodels)) {
##         if (nrow(submodels) > 1) {
##             out <- c(list(if (is.matrix(out)) out[, 1] else out), 
##                 as.list(as.data.frame(elasticnet::predict.enet(modelFit, 
##                   newx = newdata, s = submodels$fraction, mode = "fraction")$fit)))
##         }
##         else {
##             tmp <- elasticnet::predict.enet(modelFit, newx = newdata, 
##                 s = submodels$fraction, mode = "fraction")$fit
##             out <- c(list(if (is.matrix(out)) out[, 1] else out), 
##                 list(tmp))
##         }
##     }
##     out
## }
## 
## $lasso$predictors
## function (x, s = NULL, ...) 
## {
##     if (is.null(s)) {
##         if (!is.null(x$tuneValue)) {
##             s <- x$tuneValue$fraction
##         }
##         else stop("must supply a vaue of s")
##         out <- elasticnet::predict.enet(x, s = s, type = "coefficients", 
##             mode = "fraction")$coefficients
##     }
##     else {
##         out <- elasticnet::predict.enet(x, s = s)$coefficients
##     }
##     names(out)[out != 0]
## }
## 
## $lasso$tags
## [1] "Linear Regression"          "Implicit Feature Selection"
## [3] "L1 Regularization"         
## 
## $lasso$prob
## NULL
## 
## $lasso$sort
## function (x) 
## x[order(x$fraction), ]
## 
## 
## $lda
## $lda$label
## [1] "Linear Discriminant Analysis"
## 
## $lda$library
## [1] "MASS"
## 
## $lda$loop
## NULL
## 
## $lda$type
## [1] "Classification"
## 
## $lda$parameters
##   parameter     class     label
## 1 parameter character parameter
## 
## $lda$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(parameter = "none")
## 
## $lda$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## MASS::lda(x, y, ...)
## 
## $lda$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)$class
## 
## $lda$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)$posterior
## 
## $lda$predictors
## function (x, ...) 
## if (hasTerms(x)) predictors(x$terms) else colnames(x$means)
## 
## $lda$tags
## [1] "Discriminant Analysis" "Linear Classifier"    
## 
## $lda$levels
## function (x) 
## names(x$prior)
## 
## $lda$sort
## function (x) 
## x
## 
## 
## $lda2
## $lda2$label
## [1] "Linear Discriminant Analysis"
## 
## $lda2$library
## [1] "MASS"
## 
## $lda2$loop
## function (grid) 
## {
##     grid <- grid[order(grid$dimen, decreasing = TRUE), , drop = FALSE]
##     loop <- grid[1, , drop = FALSE]
##     submodels <- list(grid[-1, , drop = FALSE])
##     list(loop = loop, submodels = submodels)
## }
## 
## $lda2$type
## [1] "Classification"
## 
## $lda2$parameters
##   parameter   class                   label
## 1     dimen numeric #Discriminant Functions
## 
## $lda2$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(dimen = 1:min(ncol(x), length(levels(y)) - 1))
## 
## $lda2$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## MASS::lda(x, y, ...)
## 
## $lda2$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- as.character(predict(modelFit, newdata, dimen = modelFit$tuneValue$dimen)$class)
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$dimen)) {
##             tmp[[j + 1]] <- as.character(predict(modelFit, newdata, 
##                 dimen = submodels$dimen[j])$class)
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $lda2$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata, dimen = modelFit$tuneValue$dimen)$posterior
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$dimen)) {
##             tmpProb <- predict(modelFit, newdata, dimen = submodels$dimen[j])$posterior
##             tmp[[j + 1]] <- as.data.frame(tmpProb[, modelFit$obsLevels, 
##                 drop = FALSE], stringsAsFactors = TRUE)
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $lda2$predictors
## function (x, ...) 
## if (hasTerms(x)) predictors(x$terms) else colnames(x$means)
## 
## $lda2$tags
## [1] "Discriminant Analysis" "Linear Classifier"    
## 
## $lda2$levels
## function (x) 
## names(x$prior)
## 
## $lda2$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $leapBackward
## $leapBackward$label
## [1] "Linear Regression with Backwards Selection"
## 
## $leapBackward$library
## [1] "leaps"
## 
## $leapBackward$type
## [1] "Regression"
## 
## $leapBackward$parameters
##   parameter   class                        label
## 1     nvmax numeric Maximum Number of Predictors
## 
## $leapBackward$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(nvmax = 2:(len + 1))
##     }
##     else {
##         out <- data.frame(nvmax = sort(unique(sample(2:(ncol(x) - 
##             1), size = len, replace = TRUE))))
##     }
##     out
## }
## 
## $leapBackward$loop
## function (grid) 
## {
##     grid <- grid[order(grid$nvmax, decreasing = TRUE), , drop = FALSE]
##     loop <- grid[1, , drop = FALSE]
##     submodels <- list(grid[-1, , drop = FALSE])
##     list(loop = loop, submodels = submodels)
## }
## 
## $leapBackward$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     if (any(names(theDots) == "nbest")) 
##         stop("'nbest' should not be specified")
##     if (any(names(theDots) == "method")) 
##         stop("'method' should not be specified")
##     if (any(names(theDots) == "nvmax")) 
##         stop("'nvmax' should not be specified")
##     leaps::regsubsets(as.matrix(x), y, weights = if (!is.null(wts)) 
##         wts
##     else rep(1, length(y)), nbest = 1, nvmax = param$nvmax, method = "backward", 
##         ...)
## }
## 
## $leapBackward$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     newdata <- as.matrix(newdata)
##     foo <- function(b, x) x[, names(b), drop = FALSE] %*% b
##     path <- 1:(modelFit$nvmax - 1)
##     betas <- coef(modelFit, id = 1:(modelFit$nvmax - 1))
##     newdata <- cbind(rep(1, nrow(newdata)), newdata)
##     colnames(newdata)[1] <- "(Intercept)"
##     out <- foo(betas[[length(betas)]], newdata)[, 1]
##     if (!is.null(submodels)) {
##         numTerms <- unlist(lapply(betas, length))
##         if (any(names(betas[[length(betas)]]) == "(Intercept)")) 
##             numTerms <- numTerms - 1
##         keepers <- which(numTerms %in% submodels$nvmax)
##         if (length(keepers) != length(submodels$nvmax)) 
##             stop("Some values of 'nvmax' are not in the model sequence.")
##         keepers <- rev(keepers)
##         preds <- lapply(betas[keepers], foo, x = newdata)
##         preds <- do.call("cbind", preds)
##         out <- as.data.frame(cbind(out, preds), stringsAsFactors = TRUE)
##         out <- as.list(out)
##     }
##     out
## }
## 
## $leapBackward$tags
## [1] "Linear Regression"         "Feature Selection Wrapper"
## 
## $leapBackward$prob
## NULL
## 
## $leapBackward$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $leapForward
## $leapForward$label
## [1] "Linear Regression with Forward Selection"
## 
## $leapForward$library
## [1] "leaps"
## 
## $leapForward$type
## [1] "Regression"
## 
## $leapForward$parameters
##   parameter   class                        label
## 1     nvmax numeric Maximum Number of Predictors
## 
## $leapForward$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(nvmax = 2:(len + 1))
##     }
##     else {
##         out <- data.frame(nvmax = sort(unique(sample(2:(ncol(x) - 
##             1), size = len, replace = TRUE))))
##     }
##     out
## }
## 
## $leapForward$loop
## function (grid) 
## {
##     grid <- grid[order(grid$nvmax, decreasing = TRUE), , drop = FALSE]
##     loop <- grid[1, , drop = FALSE]
##     submodels <- list(grid[-1, , drop = FALSE])
##     list(loop = loop, submodels = submodels)
## }
## 
## $leapForward$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     if (any(names(theDots) == "nbest")) 
##         stop("'nbest' should not be specified")
##     if (any(names(theDots) == "method")) 
##         stop("'method' should not be specified")
##     if (any(names(theDots) == "nvmax")) 
##         stop("'nvmax' should not be specified")
##     leaps::regsubsets(as.matrix(x), y, weights = if (!is.null(wts)) 
##         wts
##     else rep(1, length(y)), nbest = 1, nvmax = param$nvmax, method = "forward", 
##         ...)
## }
## 
## $leapForward$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     newdata <- as.matrix(newdata)
##     foo <- function(b, x) x[, names(b), drop = FALSE] %*% b
##     path <- 1:(modelFit$nvmax - 1)
##     betas <- coef(modelFit, id = 1:(modelFit$nvmax - 1))
##     newdata <- cbind(rep(1, nrow(newdata)), as.matrix(newdata))
##     colnames(newdata)[1] <- "(Intercept)"
##     out <- foo(betas[[length(betas)]], newdata)[, 1]
##     if (!is.null(submodels)) {
##         numTerms <- unlist(lapply(betas, length))
##         if (any(names(betas[[length(betas)]]) == "(Intercept)")) 
##             numTerms <- numTerms - 1
##         keepers <- which(numTerms %in% submodels$nvmax)
##         if (length(keepers) != length(submodels$nvmax)) 
##             stop("Some values of 'nvmax' are not in the model sequence.")
##         keepers <- rev(keepers)
##         preds <- lapply(betas[keepers], foo, x = newdata)
##         preds <- do.call("cbind", preds)
##         out <- as.data.frame(cbind(out, preds), stringsAsFactors = TRUE)
##         out <- as.list(out)
##     }
##     out
## }
## 
## $leapForward$tags
## [1] "Linear Regression"         "Feature Selection Wrapper"
## 
## $leapForward$prob
## NULL
## 
## $leapForward$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $leapSeq
## $leapSeq$label
## [1] "Linear Regression with Stepwise Selection"
## 
## $leapSeq$library
## [1] "leaps"
## 
## $leapSeq$type
## [1] "Regression"
## 
## $leapSeq$parameters
##   parameter   class                        label
## 1     nvmax numeric Maximum Number of Predictors
## 
## $leapSeq$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(nvmax = 2:(len + 1))
##     }
##     else {
##         out <- data.frame(nvmax = sort(unique(sample(2:(ncol(x) - 
##             1), size = len, replace = TRUE))))
##     }
##     out
## }
## 
## $leapSeq$loop
## function (grid) 
## {
##     grid <- grid[order(grid$nvmax, decreasing = TRUE), , drop = FALSE]
##     loop <- grid[1, , drop = FALSE]
##     submodels <- list(grid[-1, , drop = FALSE])
##     list(loop = loop, submodels = submodels)
## }
## 
## $leapSeq$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     if (any(names(theDots) == "nbest")) 
##         stop("'nbest' should not be specified")
##     if (any(names(theDots) == "method")) 
##         stop("'method' should not be specified")
##     if (any(names(theDots) == "nvmax")) 
##         stop("'nvmax' should not be specified")
##     leaps::regsubsets(as.matrix(x), y, weights = if (!is.null(wts)) 
##         wts
##     else rep(1, length(y)), nbest = 1, nvmax = param$nvmax, method = "seqrep", 
##         ...)
## }
## 
## $leapSeq$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     newdata <- as.matrix(newdata)
##     foo <- function(b, x) x[, names(b), drop = FALSE] %*% b
##     path <- 1:(modelFit$nvmax - 1)
##     betas <- coef(modelFit, id = 1:(modelFit$nvmax - 1))
##     newdata <- cbind(rep(1, nrow(newdata)), as.matrix(newdata))
##     colnames(newdata)[1] <- "(Intercept)"
##     out <- foo(betas[[length(betas)]], newdata)[, 1]
##     if (!is.null(submodels)) {
##         numTerms <- unlist(lapply(betas, length))
##         if (any(names(betas[[length(betas)]]) == "(Intercept)")) 
##             numTerms <- numTerms - 1
##         keepers <- which(numTerms %in% submodels$nvmax)
##         if (length(keepers) != length(submodels$nvmax)) 
##             stop("Some values of 'nvmax' are not in the model sequence.")
##         keepers <- rev(keepers)
##         preds <- lapply(betas[keepers], foo, x = newdata)
##         preds <- do.call("cbind", preds)
##         out <- as.data.frame(cbind(out, preds), stringsAsFactors = TRUE)
##         out <- as.list(out)
##     }
##     out
## }
## 
## $leapSeq$tags
## [1] "Linear Regression"         "Feature Selection Wrapper"
## 
## $leapSeq$prob
## NULL
## 
## $leapSeq$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $Linda
## $Linda$label
## [1] "Robust Linear Discriminant Analysis"
## 
## $Linda$library
## [1] "rrcov"
## 
## $Linda$loop
## NULL
## 
## $Linda$type
## [1] "Classification"
## 
## $Linda$parameters
##   parameter     class label
## 1 parameter character  none
## 
## $Linda$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(parameter = "none")
## 
## $Linda$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## rrcov:::Linda(x, y, ...)
## 
## $Linda$predict
## function (modelFit, newdata, submodels = NULL) 
## rrcov:::predict(modelFit, newdata)@classification
## 
## $Linda$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     probs <- rrcov:::predict(modelFit, newdata)@posterior
##     colnames(probs) <- names(modelFit@prior)
##     probs
## }
## 
## $Linda$tags
## [1] "Discriminant Analysis" "Linear Classifier"     "Robust Model"         
## 
## $Linda$levels
## function (x) 
## names(x@prior)
## 
## $Linda$sort
## function (x) 
## x
## 
## 
## $lm
## $lm$label
## [1] "Linear Regression"
## 
## $lm$library
## NULL
## 
## $lm$loop
## NULL
## 
## $lm$type
## [1] "Regression"
## 
## $lm$parameters
##   parameter   class     label
## 1 intercept logical intercept
## 
## $lm$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(intercept = TRUE)
## 
## $lm$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     if (!is.null(wts)) {
##         if (param$intercept) 
##             out <- lm(.outcome ~ ., data = dat, weights = wts, 
##                 ...)
##         else out <- lm(.outcome ~ 0 + ., data = dat, weights = wts, 
##             ...)
##     }
##     else {
##         if (param$intercept) 
##             out <- lm(.outcome ~ ., data = dat, ...)
##         else out <- lm(.outcome ~ 0 + ., data = dat, ...)
##     }
##     out
## }
## 
## $lm$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $lm$prob
## NULL
## 
## $lm$predictors
## function (x, ...) 
## predictors(x$terms)
## 
## $lm$tags
## [1] "Linear Regression"    "Accepts Case Weights"
## 
## $lm$varImp
## function (object, ...) 
## {
##     values <- summary(object)$coef
##     varImps <- abs(values[!grepl(rownames(values), pattern = "Intercept"), 
##         grep("value$", colnames(values)), drop = FALSE])
##     out <- data.frame(varImps)
##     colnames(out) <- "Overall"
##     if (!is.null(names(varImps))) 
##         rownames(out) <- names(varImps)
##     out
## }
## 
## $lm$sort
## function (x) 
## x
## 
## 
## $lmStepAIC
## $lmStepAIC$label
## [1] "Linear Regression with Stepwise Selection"
## 
## $lmStepAIC$library
## [1] "MASS"
## 
## $lmStepAIC$loop
## NULL
## 
## $lmStepAIC$type
## [1] "Regression"
## 
## $lmStepAIC$parameters
##   parameter     class     label
## 1 parameter character parameter
## 
## $lmStepAIC$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(parameter = "none")
## 
## $lmStepAIC$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     if (!is.null(wts)) {
##         out <- MASS::stepAIC(lm(.outcome ~ ., data = dat, weights = wts), 
##             ...)
##     }
##     else out <- MASS::stepAIC(lm(.outcome ~ ., data = dat), ...)
##     out
## }
## 
## $lmStepAIC$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $lmStepAIC$prob
## NULL
## 
## $lmStepAIC$tags
## [1] "Linear Regression"         "Feature Selection Wrapper"
## [3] "Accepts Case Weights"     
## 
## $lmStepAIC$sort
## function (x) 
## x
## 
## 
## $LMT
## $LMT$label
## [1] "Logistic Model Trees"
## 
## $LMT$library
## [1] "RWeka"
## 
## $LMT$loop
## NULL
## 
## $LMT$type
## [1] "Classification"
## 
## $LMT$parameters
##   parameter   class       label
## 1      iter numeric # Iteratons
## 
## $LMT$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(iter = 1 + (0:(len - 1)) * 20)
##     }
##     else {
##         out <- data.frame(iter = unique(sample(1:100, size = len, 
##             replace = TRUE)))
##     }
##     out
## }
## 
## $LMT$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$I <- param$iter
##         ctl <- theDots$control
##         theDots$control <- NULL
##     }
##     else ctl <- RWeka::Weka_control(I = param$iter)
##     modelArgs <- c(list(formula = as.formula(".outcome ~ ."), 
##         data = dat, control = ctl), theDots)
##     out <- do.call(RWeka::LMT, modelArgs)
##     out
## }
## 
## $LMT$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $LMT$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata, type = "probability")
## }
## 
## $LMT$levels
## function (x) 
## x$obsLevels
## 
## $LMT$predictors
## function (x, ...) 
## predictors(x$terms)
## 
## $LMT$tags
## [1] "Model Tree"                 "Implicit Feature Selection"
## [3] "Logistic Regression"        "Linear Classifier"         
## 
## $LMT$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $loclda
## $loclda$label
## [1] "Localized Linear Discriminant Analysis"
## 
## $loclda$library
## [1] "klaR"
## 
## $loclda$loop
## NULL
## 
## $loclda$type
## [1] "Classification"
## 
## $loclda$parameters
##   parameter   class              label
## 1         k numeric #Nearest Neighbors
## 
## $loclda$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     min_p <- ncol(x)/nrow(x) + 0.05
##     p_seq <- seq(min_p, min(0.9, min_p + 1/3), length = len)
##     if (search == "grid") {
##         out <- data.frame(k = floor(p_seq * nrow(x)))
##     }
##     else {
##         by_val <- if (is.factor(y)) 
##             length(levels(y))
##         else 1
##         out <- data.frame(k = floor(runif(len, min = nrow(x) * 
##             min_p, max = nrow(x) * min(0.9, min_p + 1/3))))
##     }
##     out
## }
## 
## $loclda$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## klaR::loclda(x, y, k = floor(param$k), ...)
## 
## $loclda$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)$class
## 
## $loclda$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)$posterior
## 
## $loclda$predictors
## function (x, ...) 
## if (hasTerms(x)) predictors(x$terms) else colnames(x$means)
## 
## $loclda$tags
## [1] "Discriminant Analysis" "Linear Classifier"    
## 
## $loclda$levels
## function (x) 
## names(x$prior)
## 
## $loclda$sort
## function (x) 
## x
## 
## 
## $logicBag
## $logicBag$label
## [1] "Bagged Logic Regression"
## 
## $logicBag$library
## [1] "logicFS"
## 
## $logicBag$loop
## NULL
## 
## $logicBag$type
## [1] "Regression"     "Classification"
## 
## $logicBag$parameters
##   parameter   class                    label
## 1   nleaves numeric Maximum Number of Leaves
## 2    ntrees numeric          Number of Trees
## 
## $logicBag$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(ntrees = (1:len) + 1, nleaves = 2^((1:len) + 
##             6))
##     }
##     else {
##         out <- data.frame(ntrees = sample(1:10, size = len, replace = TRUE), 
##             nleaves = sample(1:10, size = len, replace = TRUE))
##     }
##     out
## }
## 
## $logicBag$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(logicFS)
##     logicFS::logic.bagging(as.matrix(x), y, ntrees = param$ntrees, 
##         nleaves = param$nleaves, ...)
## }
## 
## $logicBag$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (modelFit$problemType == "Classification") {
##         if (length(modelFit$obsLevels) == 2) {
##             as.character(modelFit$obsLevels[predict(modelFit, 
##                 newData = newdata) + 1])
##         }
##         else {
##             as.character(predict(modelFit, newData = newdata))
##         }
##     }
##     else predict(modelFit, newData = newdata)
## }
## 
## $logicBag$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (length(modelFit$obsLevels) == 2) {
##         out <- predict(modelFit, newData = newdata, type = "prob")
##         out <- as.data.frame(cbind(out, 1 - out), stringsAsFactors = TRUE)
##         colnames(out) <- modelFit$obsLevels
##     }
##     else {
##         out <- predict(modelFit, newData = newdata, type = "prob")
##     }
##     out
## }
## 
## $logicBag$predictors
## function (x, ...) 
## {
##     varNums <- lapply(x$logreg.model, function(y) lapply(y$trees, 
##         function(z) z$trees$knot))
##     varNums <- sort(unique(unlist(varNums)))
##     varNums <- varNums[varNums > 0]
##     if (length(varNums) > 0) 
##         colnames(x$data)[varNums]
##     else NA
## }
## 
## $logicBag$levels
## function (x) 
## x$obsLevels
## 
## $logicBag$notes
## [1] "Unlike other packages used by `train`, the `logicFS` package is fully loaded when this model is used."
## 
## $logicBag$tags
## [1] "Logic Regression"       "Linear Classifier"      "Linear Regression"     
## [4] "Logistic Regression"    "Bagging"                "Ensemble Model"        
## [7] "Two Class Only"         "Binary Predictors Only"
## 
## $logicBag$sort
## function (x) 
## x[order(x$ntrees, x$nleaves), ]
## 
## 
## $LogitBoost
## $LogitBoost$label
## [1] "Boosted Logistic Regression"
## 
## $LogitBoost$library
## [1] "caTools"
## 
## $LogitBoost$loop
## function (grid) 
## {
##     loop <- grid[which.max(grid$nIter), , drop = FALSE]
##     submodels <- grid[-which.max(grid$nIter), , drop = FALSE]
##     submodels <- list(submodels)
##     list(loop = loop, submodels = submodels)
## }
## 
## $LogitBoost$type
## [1] "Classification"
## 
## $LogitBoost$parameters
##   parameter   class                 label
## 1     nIter numeric # Boosting Iterations
## 
## $LogitBoost$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(nIter = 1 + ((1:len) * 10))
##     }
##     else {
##         out <- data.frame(nIter = unique(sample(1:100, size = len, 
##             replace = TRUE)))
##     }
##     out
## }
## 
## $LogitBoost$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     caTools::LogitBoost(as.matrix(x), y, nIter = param$nIter)
## }
## 
## $LogitBoost$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- caTools::predict.LogitBoost(modelFit, newdata, type = "class")
##     if (!is.null(submodels)) {
##         tmp <- out
##         out <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         out[[1]] <- tmp
##         for (j in seq(along = submodels$nIter)) {
##             out[[j + 1]] <- caTools::predict.LogitBoost(modelFit, 
##                 newdata, nIter = submodels$nIter[j])
##         }
##     }
##     out
## }
## 
## $LogitBoost$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- caTools::predict.LogitBoost(modelFit, newdata, type = "raw")
##     out <- t(apply(out, 1, function(x) x/sum(x)))
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$nIter)) {
##             tmpProb <- caTools::predict.LogitBoost(modelFit, 
##                 newdata, type = "raw", nIter = submodels$nIter[j])
##             tmpProb <- out <- t(apply(tmpProb, 1, function(x) x/sum(x)))
##             tmp[[j + 1]] <- as.data.frame(tmpProb[, modelFit$obsLevels, 
##                 drop = FALSE], stringsAsFactors = TRUE)
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $LogitBoost$predictors
## function (x, ...) 
## {
##     if (!is.null(x$xNames)) {
##         out <- unique(x$xNames[x$Stump[, "feature"]])
##     }
##     else out <- NA
##     out
## }
## 
## $LogitBoost$levels
## function (x) 
## x$obsLevels
## 
## $LogitBoost$tags
## [1] "Ensemble Model"             "Boosting"                  
## [3] "Implicit Feature Selection" "Tree-Based Model"          
## [5] "Logistic Regression"       
## 
## $LogitBoost$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $logreg
## $logreg$label
## [1] "Logic Regression"
## 
## $logreg$library
## [1] "LogicReg"
## 
## $logreg$loop
## NULL
## 
## $logreg$type
## [1] "Regression"     "Classification"
## 
## $logreg$parameters
##   parameter   class                    label
## 1  treesize numeric Maximum Number of Leaves
## 2    ntrees numeric          Number of Trees
## 
## $logreg$grid
## function (x, y, len = NULL, search = "grid") 
## expand.grid(ntrees = (1:3) + 1, treesize = 2^(1 + (1:len)))
## 
## $logreg$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     isReg <- is.numeric(y)
##     if (is.factor(y)) 
##         y <- ifelse(y == levels(y)[1], 1, 0)
##     LogicReg::logreg(resp = y, bin = x, ntrees = param$ntrees, 
##         tree.control = LogicReg::logreg.tree.control(treesize = param$treesize), 
##         select = 1, type = ifelse(isReg, 2, 3), ...)
## }
## 
## $logreg$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (modelFit$type == "logistic") {
##         out <- ifelse(predict(modelFit, newbin = newdata) >= 
##             0.5, modelFit$obsLevels[1], modelFit$obsLevels[2])
##     }
##     else out <- predict(modelFit, newbin = newdata)
##     out
## }
## 
## $logreg$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     tmp <- predict(modelFit, newbin = newdata)
##     out <- cbind(tmp, 1 - tmp)
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $logreg$predictors
## function (x, ...) 
## {
##     getVarIndex <- function(y) unique(y$trees$knot)
##     varNums <- unique(unlist(lapply(x$model$trees, getVarIndex)))
##     varNums <- varNums[varNums > 0]
##     if (length(varNums) > 0) 
##         colnames(x$binary)[varNums]
##     else NA
## }
## 
## $logreg$levels
## function (x) 
## x$obsLevels
## 
## $logreg$tags
## [1] "Logic Regression"       "Linear Classifier"      "Linear Regression"     
## [4] "Logistic Regression"    "Two Class Only"         "Binary Predictors Only"
## 
## $logreg$sort
## function (x) 
## x[order(x$ntrees, x$treesize), ]
## 
## 
## $lssvmLinear
## $lssvmLinear$label
## [1] "Least Squares Support Vector Machine"
## 
## $lssvmLinear$library
## [1] "kernlab"
## 
## $lssvmLinear$type
## [1] "Classification"
## 
## $lssvmLinear$parameters
##   parameter   class                    label
## 1       tau numeric Regularization Parameter
## 
## $lssvmLinear$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(tau = 2^((1:len) - 5))
##     }
##     else {
##         out <- data.frame(tau = 2^runif(len, min = -5, max = 10))
##     }
##     out
## }
## 
## $lssvmLinear$loop
## NULL
## 
## $lssvmLinear$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     kernlab::lssvm(x = as.matrix(x), y = y, tau = param$tau, 
##         kernel = kernlab::polydot(degree = 1, scale = 1, offset = 1), 
##         ...)
## }
## 
## $lssvmLinear$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- kernlab::predict(modelFit, as.matrix(newdata))
##     if (is.matrix(out)) 
##         out <- out[, 1]
##     out
## }
## 
## $lssvmLinear$prob
## NULL
## 
## $lssvmLinear$predictors
## function (x, ...) 
## {
##     if (hasTerms(x) & !is.null(x@terms)) {
##         out <- predictors.terms(x@terms)
##     }
##     else {
##         out <- colnames(attr(x, "xmatrix"))
##     }
##     if (is.null(out)) 
##         out <- names(attr(x, "scaling")$x.scale$`scaled:center`)
##     if (is.null(out)) 
##         out <- NA
##     out
## }
## 
## $lssvmLinear$tags
## [1] "Kernel Method"           "Support Vector Machines"
## [3] "Linear Classifier"      
## 
## $lssvmLinear$levels
## function (x) 
## lev(x)
## 
## $lssvmLinear$sort
## function (x) 
## x
## 
## 
## $lssvmPoly
## $lssvmPoly$label
## [1] "Least Squares Support Vector Machine with Polynomial Kernel"
## 
## $lssvmPoly$library
## [1] "kernlab"
## 
## $lssvmPoly$type
## [1] "Classification"
## 
## $lssvmPoly$parameters
##   parameter   class                    label
## 1    degree numeric        Polynomial Degree
## 2     scale numeric                    Scale
## 3       tau numeric Regularization Parameter
## 
## $lssvmPoly$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(degree = seq(1, min(len, 3)), scale = 10^((1:len) - 
##             4), tau = 2^((1:len) - 5))
##     }
##     else {
##         out <- data.frame(degree = sample(1:3, size = len, replace = TRUE), 
##             scale = 10^runif(len, min = -5, log10(2)), tau = 2^runif(len, 
##                 min = -5, max = 10))
##     }
##     out
## }
## 
## $lssvmPoly$loop
## NULL
## 
## $lssvmPoly$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     kernlab::lssvm(x = as.matrix(x), y = y, tau = param$tau, 
##         kernel = kernlab::polydot(degree = param$degree, scale = param$scale, 
##             offset = 1), ...)
## }
## 
## $lssvmPoly$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- kernlab::predict(modelFit, as.matrix(newdata))
##     if (is.matrix(out)) 
##         out <- out[, 1]
##     out
## }
## 
## $lssvmPoly$prob
## NULL
## 
## $lssvmPoly$predictors
## function (x, ...) 
## {
##     if (hasTerms(x) & !is.null(x@terms)) {
##         out <- predictors.terms(x@terms)
##     }
##     else {
##         out <- colnames(attr(x, "xmatrix"))
##     }
##     if (is.null(out)) 
##         out <- names(attr(x, "scaling")$xscale$`scaled:center`)
##     if (is.null(out)) 
##         out <- NA
##     out
## }
## 
## $lssvmPoly$tags
## [1] "Kernel Method"           "Support Vector Machines"
## [3] "Polynomial Model"       
## 
## $lssvmPoly$levels
## function (x) 
## lev(x)
## 
## $lssvmPoly$sort
## function (x) 
## x
## 
## 
## $lssvmRadial
## $lssvmRadial$label
## [1] "Least Squares Support Vector Machine with Radial Basis Function Kernel"
## 
## $lssvmRadial$library
## [1] "kernlab"
## 
## $lssvmRadial$type
## [1] "Classification"
## 
## $lssvmRadial$parameters
##   parameter   class                    label
## 1     sigma numeric                    Sigma
## 2       tau numeric Regularization Parameter
## 
## $lssvmRadial$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     sigmas <- kernlab::sigest(as.matrix(x), na.action = na.omit, 
##         scaled = TRUE)
##     if (search == "grid") {
##         out <- expand.grid(sigma = seq(min(sigmas), max(sigmas), 
##             length = min(6, len)), tau = 2^((1:len) - 5))
##     }
##     else {
##         rng <- extendrange(log(sigmas), f = 0.75)
##         out <- data.frame(sigma = exp(runif(len, min = rng[1], 
##             max = rng[2])), tau = 2^runif(len, min = -5, max = 10))
##     }
##     out
## }
## 
## $lssvmRadial$loop
## NULL
## 
## $lssvmRadial$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     kernlab::lssvm(x = as.matrix(x), y = y, tau = param$tau, 
##         kernel = "rbfdot", kpar = list(sigma = param$sigma), 
##         ...)
## }
## 
## $lssvmRadial$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- kernlab::predict(modelFit, as.matrix(newdata))
##     if (is.matrix(out)) 
##         out <- out[, 1]
##     out
## }
## 
## $lssvmRadial$prob
## NULL
## 
## $lssvmRadial$predictors
## function (x, ...) 
## {
##     if (hasTerms(x) & !is.null(x@terms)) {
##         out <- predictors.terms(x@terms)
##     }
##     else {
##         out <- colnames(attr(x, "xmatrix"))
##     }
##     if (is.null(out)) 
##         out <- names(attr(x, "scaling")$x.scale$`scaled:center`)
##     if (is.null(out)) 
##         out <- NA
##     out
## }
## 
## $lssvmRadial$tags
## [1] "Kernel Method"           "Support Vector Machines"
## [3] "Radial Basis Function"  
## 
## $lssvmRadial$levels
## function (x) 
## lev(x)
## 
## $lssvmRadial$sort
## function (x) 
## x
## 
## 
## $lvq
## $lvq$label
## [1] "Learning Vector Quantization"
## 
## $lvq$library
## [1] "class"
## 
## $lvq$loop
## NULL
## 
## $lvq$type
## [1] "Classification"
## 
## $lvq$parameters
##   parameter   class         label
## 1      size numeric Codebook Size
## 2         k numeric   #Prototypes
## 
## $lvq$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     p <- ncol(x)
##     ng <- length(levels(y))
##     n <- nrow(x)
##     tmp <- min(round(0.4 * ng * (ng - 1 + p/2), 0), n)
##     if (search == "grid") {
##         out <- expand.grid(size = floor(seq(tmp, 2 * tmp, length = len)), 
##             k = -4 + (1:len) * 5)
##         out$size <- floor(out$size)
##     }
##     else {
##         out <- data.frame(size = sample(tmp:(2 * tmp), size = len, 
##             replace = TRUE), k = sample(1:(nrow(x) - 2), size = len, 
##             replace = TRUE))
##     }
##     out <- subset(out, size < n & k < n)
##     out
## }
## 
## $lvq$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     class::lvq3(x, y, class::lvqinit(x, y, size = param$size, 
##         k = min(param$k, nrow(x) - length(levels(y)))), ...)
## }
## 
## $lvq$predict
## function (modelFit, newdata, submodels = NULL) 
## class::lvqtest(modelFit, newdata)
## 
## $lvq$levels
## function (x) 
## x$obsLevels
## 
## $lvq$prob
## NULL
## 
## $lvq$tags
## [1] "Prototype Models"
## 
## $lvq$sort
## function (x) 
## x[order(-x$k, -x$size), ]
## 
## 
## $M5
## $M5$label
## [1] "Model Tree"
## 
## $M5$library
## [1] "RWeka"
## 
## $M5$loop
## NULL
## 
## $M5$type
## [1] "Regression"
## 
## $M5$parameters
##   parameter     class    label
## 1    pruned character   Pruned
## 2  smoothed character Smoothed
## 3     rules character    Rules
## 
## $M5$grid
## function (x, y, len = NULL, search = "grid") 
## expand.grid(pruned = c("Yes", "No"), smoothed = c("Yes", "No"), 
##     rules = c("Yes", "No"))
## 
## $M5$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$N <- ifelse(param$pruned == "No", TRUE, 
##             FALSE)
##         theDots$control$U <- ifelse(param$smoothed == "No", TRUE, 
##             FALSE)
##         ctl <- theDots$control
##         theDots$control <- NULL
##     }
##     else ctl <- RWeka::Weka_control(N = ifelse(param$pruned == 
##         "No", TRUE, FALSE), U = ifelse(param$smoothed == "No", 
##         TRUE, FALSE))
##     modelArgs <- c(list(formula = as.formula(".outcome ~ ."), 
##         control = ctl), theDots)
##     modelArgs$data <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     modelArgs$data$.outcome <- y
##     out <- do.call(if (param$rules == "Yes") 
##         RWeka::M5Rules
##     else RWeka::M5P, modelArgs)
##     out
## }
## 
## $M5$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $M5$prob
## NULL
## 
## $M5$predictors
## function (x, ...) 
## predictors(x$terms)
## 
## $M5$tags
## [1] "Rule-Based Model"           "Tree-Based Model"          
## [3] "Linear Regression"          "Implicit Feature Selection"
## [5] "Model Tree"                
## 
## $M5$sort
## function (x) 
## {
##     x$pruned <- factor(as.character(x$pruned), levels = c("Yes", 
##         "No"))
##     x$smoothed <- factor(as.character(x$smoothed), levels = c("Yes", 
##         "No"))
##     x$rules <- factor(as.character(x$rules), levels = c("Yes", 
##         "No"))
##     x[order(x$pruned, x$smoothed, x$rules), ]
## }
## 
## 
## $M5Rules
## $M5Rules$label
## [1] "Model Rules"
## 
## $M5Rules$library
## [1] "RWeka"
## 
## $M5Rules$loop
## NULL
## 
## $M5Rules$type
## [1] "Regression"
## 
## $M5Rules$parameters
##   parameter     class    label
## 1    pruned character   Pruned
## 2  smoothed character Smoothed
## 
## $M5Rules$grid
## function (x, y, len = NULL, search = "grid") 
## expand.grid(pruned = c("Yes", "No"), smoothed = c("Yes", "No"))
## 
## $M5Rules$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$N <- ifelse(param$pruned == "No", TRUE, 
##             FALSE)
##         theDots$control$U <- ifelse(param$smoothed == "No", TRUE, 
##             FALSE)
##         ctl <- theDots$control
##         theDots$control <- NULL
##     }
##     else ctl <- RWeka::Weka_control(N = ifelse(param$pruned == 
##         "No", TRUE, FALSE), U = ifelse(param$smoothed == "No", 
##         TRUE, FALSE))
##     modelArgs <- c(list(formula = as.formula(".outcome ~ ."), 
##         control = ctl), theDots)
##     modelArgs$data <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     modelArgs$data$.outcome <- y
##     out <- do.call(RWeka::M5Rules, modelArgs)
##     out
## }
## 
## $M5Rules$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $M5Rules$predictors
## function (x, ...) 
## predictors(x$terms)
## 
## $M5Rules$prob
## NULL
## 
## $M5Rules$tags
## [1] "Rule-Based Model"           "Linear Regression"         
## [3] "Implicit Feature Selection" "Model Tree"                
## 
## $M5Rules$sort
## function (x) 
## {
##     x$pruned <- factor(as.character(x$pruned), levels = c("Yes", 
##         "No"))
##     x$smoothed <- factor(as.character(x$smoothed), levels = c("Yes", 
##         "No"))
##     x[order(x$pruned, x$smoothed), ]
## }
## 
## 
## $manb
## $manb$label
## [1] "Model Averaged Naive Bayes Classifier"
## 
## $manb$library
## [1] "bnclassify"
## 
## $manb$type
## [1] "Classification"
## 
## $manb$parameters
##   parameter   class               label
## 1    smooth numeric Smoothing Parameter
## 2     prior numeric   Prior Probability
## 
## $manb$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(smooth = 0:(len - 1), prior = seq(0.1, 
##             0.9, length = len))
##     }
##     else {
##         out <- data.frame(smooth = runif(len, min = 0, max = 10), 
##             prior = runif(len))
##     }
##     out$smooth[out$smooth <= 0] <- 0.05
##     out
## }
## 
## $manb$loop
## NULL
## 
## $manb$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     struct <- bnclassify::nb(class = ".outcome", dataset = dat)
##     bnclassify::lp(struct, dat, smooth = param$smooth, manb_prior = param$prior, 
##         ...)
## }
## 
## $manb$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $manb$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata, prob = TRUE)
## }
## 
## $manb$levels
## function (x) 
## x$obsLevels
## 
## $manb$predictors
## function (x, s = NULL, ...) 
## x$xNames
## 
## $manb$tags
## [1] "Bayesian Model"              "Categorical Predictors Only"
## 
## $manb$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $mda
## $mda$label
## [1] "Mixture Discriminant Analysis"
## 
## $mda$library
## [1] "mda"
## 
## $mda$loop
## NULL
## 
## $mda$type
## [1] "Classification"
## 
## $mda$parameters
##    parameter   class                 label
## 1 subclasses numeric #Subclasses Per Class
## 
## $mda$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(subclasses = (1:len) + 1)
## 
## $mda$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     mda::mda(as.formula(".outcome ~ ."), data = dat, subclasses = param$subclasses, 
##         ...)
## }
## 
## $mda$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)
## 
## $mda$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata, type = "posterior")
## 
## $mda$predictors
## function (x, ...) 
## predictors(x$terms)
## 
## $mda$levels
## function (x) 
## x$obsLevels
## 
## $mda$tags
## [1] "Discriminant Analysis" "Mixture Model"        
## 
## $mda$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $Mlda
## $Mlda$label
## [1] "Maximum Uncertainty Linear Discriminant Analysis"
## 
## $Mlda$library
## [1] "HiDimDA"
## 
## $Mlda$loop
## NULL
## 
## $Mlda$type
## [1] "Classification"
## 
## $Mlda$parameters
##   parameter     class     label
## 1 parameter character parameter
## 
## $Mlda$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(parameter = "none")
## 
## $Mlda$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## HiDimDA::Mlda(x, y, q = param$.q, maxq = param$.q, ...)
## 
## $Mlda$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)$class
##     out <- modelFit$obsLevels[as.numeric(out)]
##     out
## }
## 
## $Mlda$levels
## function (x) 
## x$obsLevels
## 
## $Mlda$prob
## NULL
## 
## $Mlda$tags
## [1] "Discriminant Analysis" "Linear Classifier"    
## 
## $Mlda$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $mlp
## $mlp$label
## [1] "Multi-Layer Perceptron"
## 
## $mlp$library
## [1] "RSNNS"
## 
## $mlp$loop
## NULL
## 
## $mlp$type
## [1] "Regression"     "Classification"
## 
## $mlp$parameters
##   parameter   class         label
## 1      size numeric #Hidden Units
## 
## $mlp$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(size = ((1:len) * 2) - 1)
##     }
##     else {
##         out <- data.frame(size = unique(sample(1:20, size = len, 
##             replace = TRUE)))
##     }
##     out
## }
## 
## $mlp$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     theDots <- theDots[!(names(theDots) %in% c("size", "linOut"))]
##     if (is.factor(y)) {
##         y <- RSNNS:::decodeClassLabels(y)
##         lin <- FALSE
##     }
##     else lin <- TRUE
##     args <- list(x = x, y = y, size = param$size, linOut = lin)
##     args <- c(args, theDots)
##     do.call(RSNNS::mlp, args)
## }
## 
## $mlp$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)
##     if (modelFit$problemType == "Classification") {
##         out <- modelFit$obsLevels[apply(out, 1, which.max)]
##     }
##     else out <- out[, 1]
##     out
## }
## 
## $mlp$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $mlp$levels
## function (x) 
## x$obsLevels
## 
## $mlp$tags
## [1] "Neural Network"
## 
## $mlp$sort
## function (x) 
## x[order(x$size), ]
## 
## 
## $mlpKerasDecay
## $mlpKerasDecay$label
## [1] "Multilayer Perceptron Network with Weight Decay"
## 
## $mlpKerasDecay$library
## [1] "keras"
## 
## $mlpKerasDecay$loop
## NULL
## 
## $mlpKerasDecay$type
## [1] "Regression"     "Classification"
## 
## $mlpKerasDecay$parameters
##    parameter     class               label
## 1       size   numeric       #Hidden Units
## 2     lambda   numeric   L2 Regularization
## 3 batch_size   numeric          Batch Size
## 4         lr   numeric       Learning Rate
## 5        rho   numeric                 Rho
## 6      decay   numeric Learning Rate Decay
## 7 activation character Activation Function
## 
## $mlpKerasDecay$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     afuncs <- c("sigmoid", "relu", "tanh")
##     if (search == "grid") {
##         out <- expand.grid(size = ((1:len) * 2) - 1, lambda = c(0, 
##             10^seq(-1, -4, length = len - 1)), batch_size = floor(nrow(x)/3), 
##             lr = 2e-06, rho = 0.9, decay = 0, activation = "relu")
##     }
##     else {
##         n <- nrow(x)
##         out <- data.frame(size = sample(2:20, replace = TRUE, 
##             size = len), lambda = 10^runif(len, min = -5, 1), 
##             batch_size = floor(n * runif(len, min = 0.1)), lr = runif(len), 
##             rho = runif(len), decay = 10^runif(len, min = -5, 
##                 0), activation = sample(afuncs, size = len, replace = TRUE))
##     }
##     out
## }
## 
## $mlpKerasDecay$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(dplyr)
##     K <- keras::backend()
##     K$clear_session()
##     if (!is.matrix(x)) 
##         x <- as.matrix(x)
##     model <- keras::keras_model_sequential()
##     model %>% keras::layer_dense(units = param$size, activation = as.character(param$activation), 
##         input_shape = ncol(x), kernel_initializer = keras::initializer_glorot_uniform(), 
##         kernel_regularizer = keras::regularizer_l2(param$lambda))
##     if (is.factor(y)) {
##         y <- class2ind(y)
##         model %>% keras::layer_dense(units = length(lev), activation = "softmax", 
##             kernel_regularizer = keras::regularizer_l2(param$lambda)) %>% 
##             keras::compile(loss = "categorical_crossentropy", 
##                 optimizer = keras::optimizer_rmsprop(lr = param$lr, 
##                   rho = param$rho, decay = param$decay), metrics = "accuracy")
##     }
##     else {
##         model %>% keras::layer_dense(units = 1, activation = "linear", 
##             kernel_regularizer = keras::regularizer_l2(param$lambda)) %>% 
##             keras::compile(loss = "mean_squared_error", optimizer = keras::optimizer_rmsprop(lr = param$lr, 
##                 rho = param$rho, decay = param$decay), metrics = "mean_squared_error")
##     }
##     model %>% keras::fit(x = x, y = y, batch_size = param$batch_size, 
##         ...)
##     if (last) 
##         model <- keras::serialize_model(model)
##     list(object = model)
## }
## 
## $mlpKerasDecay$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (inherits(modelFit$object, "raw")) 
##         modelFit$object <- keras::unserialize_model(modelFit$object)
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(modelFit$object, newdata)
##     if (ncol(out) == 1) {
##         out <- out[, 1]
##     }
##     else {
##         out <- modelFit$obsLevels[apply(out, 1, which.max)]
##     }
##     out
## }
## 
## $mlpKerasDecay$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (inherits(modelFit$object, "raw")) 
##         modelFit$object <- keras::unserialize_model(modelFit$object)
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(modelFit$object, newdata)
##     colnames(out) <- modelFit$obsLevels
##     as.data.frame(out, stringsAsFactors = TRUE)
## }
## 
## $mlpKerasDecay$varImp
## NULL
## 
## $mlpKerasDecay$tags
## [1] "Neural Network"    "L2 Regularization"
## 
## $mlpKerasDecay$sort
## function (x) 
## x[order(x$size, -x$lambda), ]
## 
## $mlpKerasDecay$notes
## [1] "After `train` completes, the keras model object is serialized so that it can be used between R session. When predicting, the code will temporarily unsearalize the object. To make the predictions more efficient, the user might want to use  `keras::unsearlize_model(object$finalModel$object)` in the current R session so that that operation is only done once. Also, this model cannot be run in parallel due to the nature of how tensorflow does the computations. Unlike other packages used by `train`, the `dplyr` package is fully loaded when this model is used."
## 
## $mlpKerasDecay$check
## function (pkg) 
## {
##     testmod <- try(keras::keras_model_sequential(), silent = TRUE)
##     if (inherits(testmod, "try-error")) 
##         stop("Could not start a sequential model. ", "`tensorflow` might not be installed. ", 
##             "See `?install_tensorflow`.", call. = FALSE)
##     TRUE
## }
## 
## 
## $mlpKerasDecayCost
## $mlpKerasDecayCost$label
## [1] "Multilayer Perceptron Network with Weight Decay"
## 
## $mlpKerasDecayCost$library
## [1] "keras"
## 
## $mlpKerasDecayCost$loop
## NULL
## 
## $mlpKerasDecayCost$type
## [1] "Classification"
## 
## $mlpKerasDecayCost$parameters
##    parameter     class               label
## 1       size   numeric       #Hidden Units
## 2     lambda   numeric   L2 Regularization
## 3 batch_size   numeric          Batch Size
## 4         lr   numeric       Learning Rate
## 5        rho   numeric                 Rho
## 6      decay   numeric Learning Rate Decay
## 7       cost   numeric                Cost
## 8 activation character Activation Function
## 
## $mlpKerasDecayCost$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     afuncs <- c("sigmoid", "relu", "tanh")
##     if (search == "grid") {
##         out <- expand.grid(size = ((1:len) * 2) - 1, lambda = c(0, 
##             10^seq(-1, -4, length = len - 1)), batch_size = floor(nrow(x)/3), 
##             lr = 2e-06, rho = 0.9, decay = 0, activation = "relu", 
##             cost = 1:len)
##     }
##     else {
##         n <- nrow(x)
##         out <- data.frame(size = sample(2:20, replace = TRUE, 
##             size = len), lambda = 10^runif(len, min = -5, 1), 
##             batch_size = floor(n * runif(len, min = 0.1)), lr = runif(len), 
##             rho = runif(len), decay = 10^runif(len, min = -5, 
##                 0), activation = sample(afuncs, size = len, replace = TRUE), 
##             cost = runif(len, min = 1, max = 20))
##     }
##     out
## }
## 
## $mlpKerasDecayCost$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(dplyr)
##     K <- keras::backend()
##     K$clear_session()
##     if (!is.matrix(x)) 
##         x <- as.matrix(x)
##     model <- keras::keras_model_sequential()
##     model %>% keras::layer_dense(units = param$size, activation = as.character(param$activation), 
##         input_shape = ncol(x), kernel_initializer = keras::initializer_glorot_uniform(), 
##         kernel_regularizer = keras::regularizer_l2(param$lambda))
##     y <- class2ind(y)
##     model %>% keras::layer_dense(units = length(lev), activation = "softmax", 
##         kernel_regularizer = keras::regularizer_l2(param$lambda)) %>% 
##         keras::compile(loss = "categorical_crossentropy", loss_weights = list(param$cost), 
##             optimizer = keras::optimizer_rmsprop(lr = param$lr, 
##                 rho = param$rho, decay = param$decay), metrics = "accuracy")
##     model %>% keras::fit(x = x, y = y, batch_size = param$batch_size, 
##         ...)
##     if (last) 
##         model <- keras::serialize_model(model)
##     list(object = model)
## }
## 
## $mlpKerasDecayCost$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (inherits(modelFit$object, "raw")) 
##         modelFit$object <- keras::unserialize_model(modelFit$object)
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(modelFit$object, newdata)
##     if (ncol(out) == 1) {
##         out <- out[, 1]
##     }
##     else {
##         out <- modelFit$obsLevels[apply(out, 1, which.max)]
##     }
##     out
## }
## 
## $mlpKerasDecayCost$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (inherits(modelFit$object, "raw")) 
##         modelFit$object <- keras::unserialize_model(modelFit$object)
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(modelFit$object, newdata)
##     colnames(out) <- modelFit$obsLevels
##     as.data.frame(out, stringsAsFactors = TRUE)
## }
## 
## $mlpKerasDecayCost$varImp
## NULL
## 
## $mlpKerasDecayCost$tags
## [1] "Neural Network"          "L2 Regularization"      
## [3] "Cost Sensitive Learning" "Two Class Only"         
## 
## $mlpKerasDecayCost$sort
## function (x) 
## x[order(x$size, -x$lambda), ]
## 
## $mlpKerasDecayCost$notes
## [1] "After `train` completes, the keras model object is serialized so that it can be used between R session. When predicting, the code will temporarily unsearalize the object. To make the predictions more efficient, the user might want to use  `keras::unsearlize_model(object$finalModel$object)` in the current R session so that that operation is only done once. Also, this model cannot be run in parallel due to the nature of how tensorflow does the computations. Finally, the cost parameter weights the first class in the outcome vector. Unlike other packages used by `train`, the `dplyr` package is fully loaded when this model is used."
## 
## $mlpKerasDecayCost$check
## function (pkg) 
## {
##     testmod <- try(keras::keras_model_sequential(), silent = TRUE)
##     if (inherits(testmod, "try-error")) 
##         stop("Could not start a sequential model. ", "`tensorflow` might not be installed. ", 
##             "See `?install_tensorflow`.", call. = FALSE)
##     TRUE
## }
## 
## 
## $mlpKerasDropout
## $mlpKerasDropout$label
## [1] "Multilayer Perceptron Network with Dropout"
## 
## $mlpKerasDropout$library
## [1] "keras"
## 
## $mlpKerasDropout$loop
## NULL
## 
## $mlpKerasDropout$type
## [1] "Regression"     "Classification"
## 
## $mlpKerasDropout$parameters
##    parameter     class               label
## 1       size   numeric       #Hidden Units
## 2    dropout   numeric        Dropout Rate
## 3 batch_size   numeric          Batch Size
## 4         lr   numeric       Learning Rate
## 5        rho   numeric                 Rho
## 6      decay   numeric Learning Rate Decay
## 7 activation character Activation Function
## 
## $mlpKerasDropout$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     afuncs <- c("sigmoid", "relu", "tanh")
##     if (search == "grid") {
##         out <- expand.grid(size = ((1:len) * 2) - 1, dropout = seq(0, 
##             0.7, length = len), batch_size = floor(nrow(x)/3), 
##             lr = 2e-06, rho = 0.9, decay = 0, activation = "relu")
##     }
##     else {
##         n <- nrow(x)
##         out <- data.frame(size = sample(2:20, replace = TRUE, 
##             size = len), dropout = runif(len, max = 0.7), batch_size = floor(n * 
##             runif(len, min = 0.1)), lr = runif(len), rho = runif(len), 
##             decay = 10^runif(len, min = -5, 0), activation = sample(afuncs, 
##                 size = len, replace = TRUE))
##     }
##     out
## }
## 
## $mlpKerasDropout$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(dplyr)
##     K <- keras::backend()
##     K$clear_session()
##     if (!is.matrix(x)) 
##         x <- as.matrix(x)
##     model <- keras::keras_model_sequential()
##     model %>% keras::layer_dense(units = param$size, activation = as.character(param$activation), 
##         kernel_initializer = keras::initializer_glorot_uniform(), 
##         input_shape = ncol(x)) %>% keras::layer_dropout(rate = param$dropout, 
##         seed = sample.int(1000, 1))
##     if (is.factor(y)) {
##         y <- class2ind(y)
##         model %>% keras::layer_dense(units = length(lev), activation = "softmax") %>% 
##             keras::compile(loss = "categorical_crossentropy", 
##                 optimizer = keras::optimizer_rmsprop(lr = param$lr, 
##                   rho = param$rho, decay = param$decay), metrics = "accuracy")
##     }
##     else {
##         model %>% keras::layer_dense(units = 1, activation = "linear") %>% 
##             keras::compile(loss = "mean_squared_error", optimizer = keras::optimizer_rmsprop(lr = param$lr, 
##                 rho = param$rho, decay = param$decay), metrics = "mean_squared_error")
##     }
##     model %>% keras::fit(x = x, y = y, batch_size = param$batch_size, 
##         ...)
##     if (last) 
##         model <- keras::serialize_model(model)
##     list(object = model)
## }
## 
## $mlpKerasDropout$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (inherits(modelFit$object, "raw")) 
##         modelFit$object <- keras::unserialize_model(modelFit$object)
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(modelFit$object, newdata)
##     if (ncol(out) == 1) {
##         out <- out[, 1]
##     }
##     else {
##         out <- modelFit$obsLevels[apply(out, 1, which.max)]
##     }
##     out
## }
## 
## $mlpKerasDropout$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (inherits(modelFit$object, "raw")) 
##         modelFit$object <- keras::unserialize_model(modelFit$object)
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(modelFit$object, newdata)
##     colnames(out) <- modelFit$obsLevels
##     as.data.frame(out, stringsAsFactors = TRUE)
## }
## 
## $mlpKerasDropout$varImp
## NULL
## 
## $mlpKerasDropout$tags
## [1] "Neural Network"
## 
## $mlpKerasDropout$sort
## function (x) 
## x[order(x$size, -x$dropout), ]
## 
## $mlpKerasDropout$notes
## [1] "After `train` completes, the keras model object is serialized so that it can be used between R session. When predicting, the code will temporarily unsearalize the object. To make the predictions more efficient, the user might want to use  `keras::unsearlize_model(object$finalModel$object)` in the current R session so that that operation is only done once. Also, this model cannot be run in parallel due to the nature of how tensorflow does the computations. Unlike other packages used by `train`, the `dplyr` package is fully loaded when this model is used."
## 
## $mlpKerasDropout$check
## function (pkg) 
## {
##     testmod <- try(keras::keras_model_sequential(), silent = TRUE)
##     if (inherits(testmod, "try-error")) 
##         stop("Could not start a sequential model. ", "`tensorflow` might not be installed. ", 
##             "See `?install_tensorflow`.", call. = FALSE)
##     TRUE
## }
## 
## 
## $mlpKerasDropoutCost
## $mlpKerasDropoutCost$label
## [1] "Multilayer Perceptron Network with Dropout"
## 
## $mlpKerasDropoutCost$library
## [1] "keras"
## 
## $mlpKerasDropoutCost$loop
## NULL
## 
## $mlpKerasDropoutCost$type
## [1] "Classification"
## 
## $mlpKerasDropoutCost$parameters
##    parameter     class               label
## 1       size   numeric       #Hidden Units
## 2    dropout   numeric        Dropout Rate
## 3 batch_size   numeric          Batch Size
## 4         lr   numeric       Learning Rate
## 5        rho   numeric                 Rho
## 6      decay   numeric Learning Rate Decay
## 7       cost   numeric                Cost
## 8 activation character Activation Function
## 
## $mlpKerasDropoutCost$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     afuncs <- c("sigmoid", "relu", "tanh")
##     if (search == "grid") {
##         out <- expand.grid(size = ((1:len) * 2) - 1, dropout = seq(0, 
##             0.7, length = len), batch_size = floor(nrow(x)/3), 
##             lr = 2e-06, rho = 0.9, decay = 0, activation = "relu", 
##             cost = 1:len)
##     }
##     else {
##         n <- nrow(x)
##         out <- data.frame(size = sample(2:20, replace = TRUE, 
##             size = len), dropout = runif(len, max = 0.7), batch_size = floor(n * 
##             runif(len, min = 0.1)), lr = runif(len), rho = runif(len), 
##             decay = 10^runif(len, min = -5, 0), activation = sample(afuncs, 
##                 size = len, replace = TRUE), cost = runif(len, 
##                 min = 1, max = 20))
##     }
##     out
## }
## 
## $mlpKerasDropoutCost$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(dplyr)
##     K <- keras::backend()
##     K$clear_session()
##     if (!is.matrix(x)) 
##         x <- as.matrix(x)
##     model <- keras::keras_model_sequential()
##     model %>% keras::layer_dense(units = param$size, activation = as.character(param$activation), 
##         kernel_initializer = keras::initializer_glorot_uniform(), 
##         input_shape = ncol(x)) %>% keras::layer_dropout(rate = param$dropout, 
##         seed = sample.int(1000, 1))
##     y <- class2ind(y)
##     model %>% keras::layer_dense(units = length(lev), activation = "softmax") %>% 
##         keras::compile(loss = "categorical_crossentropy", loss_weights = list(param$cost), 
##             optimizer = keras::optimizer_rmsprop(lr = param$lr, 
##                 rho = param$rho, decay = param$decay), metrics = "accuracy")
##     model %>% keras::fit(x = x, y = y, batch_size = param$batch_size, 
##         ...)
##     if (last) 
##         model <- keras::serialize_model(model)
##     list(object = model)
## }
## 
## $mlpKerasDropoutCost$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (inherits(modelFit$object, "raw")) 
##         modelFit$object <- keras::unserialize_model(modelFit$object)
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(modelFit$object, newdata)
##     if (ncol(out) == 1) {
##         out <- out[, 1]
##     }
##     else {
##         out <- modelFit$obsLevels[apply(out, 1, which.max)]
##     }
##     out
## }
## 
## $mlpKerasDropoutCost$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (inherits(modelFit$object, "raw")) 
##         modelFit$object <- keras::unserialize_model(modelFit$object)
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(modelFit$object, newdata)
##     colnames(out) <- modelFit$obsLevels
##     as.data.frame(out, stringsAsFactors = TRUE)
## }
## 
## $mlpKerasDropoutCost$varImp
## NULL
## 
## $mlpKerasDropoutCost$tags
## [1] "Neural Network"          "Cost Sensitive Learning"
## [3] "Two Class Only"         
## 
## $mlpKerasDropoutCost$sort
## function (x) 
## x[order(x$size, -x$dropout), ]
## 
## $mlpKerasDropoutCost$notes
## [1] "After `train` completes, the keras model object is serialized so that it can be used between R session. When predicting, the code will temporarily unsearalize the object. To make the predictions more efficient, the user might want to use  `keras::unsearlize_model(object$finalModel$object)` in the current R session so that that operation is only done once. Also, this model cannot be run in parallel due to the nature of how tensorflow does the computations. Finally, the cost parameter weights the first class in the outcome vector. Unlike other packages used by `train`, the `dplyr` package is fully loaded when this model is used."
## 
## $mlpKerasDropoutCost$check
## function (pkg) 
## {
##     testmod <- try(keras::keras_model_sequential(), silent = TRUE)
##     if (inherits(testmod, "try-error")) 
##         stop("Could not start a sequential model. ", "`tensorflow` might not be installed. ", 
##             "See `?install_tensorflow`.", call. = FALSE)
##     TRUE
## }
## 
## 
## $mlpML
## $mlpML$label
## [1] "Multi-Layer Perceptron, with multiple layers"
## 
## $mlpML$library
## [1] "RSNNS"
## 
## $mlpML$loop
## NULL
## 
## $mlpML$type
## [1] "Regression"     "Classification"
## 
## $mlpML$parameters
##   parameter   class                label
## 1    layer1 numeric #Hidden Units layer1
## 2    layer2 numeric #Hidden Units layer2
## 3    layer3 numeric #Hidden Units layer3
## 
## $mlpML$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(layer1 = ((1:len) * 2) - 1, layer2 = 0, 
##             layer3 = 0)
##     }
##     else {
##         out <- data.frame(layer1 = sample(2:20, replace = TRUE, 
##             size = len), layer2 = sample(c(0, 2:20), replace = TRUE, 
##             size = len), layer3 = sample(c(0, 2:20), replace = TRUE, 
##             size = len))
##     }
##     out
## }
## 
## $mlpML$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     theDots <- theDots[!(names(theDots) %in% c("size", "linOut"))]
##     if (is.factor(y)) {
##         y <- RSNNS:::decodeClassLabels(y)
##         lin <- FALSE
##     }
##     else lin <- TRUE
##     nodes <- c(param$layer1, param$layer2, param$layer3)
##     if (any(nodes == 0)) {
##         nodes <- nodes[nodes > 0]
##         warning("At least one layer had zero units and ", "were removed. The new structure is ", 
##             paste0(nodes, collapse = "->"), call. = FALSE)
##     }
##     args <- list(x = x, y = y, size = nodes, linOut = lin)
##     args <- c(args, theDots)
##     do.call(RSNNS::mlp, args)
## }
## 
## $mlpML$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)
##     if (modelFit$problemType == "Classification") {
##         out <- modelFit$obsLevels[apply(out, 1, which.max)]
##     }
##     else out <- out[, 1]
##     out
## }
## 
## $mlpML$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $mlpML$levels
## function (x) 
## x$obsLevels
## 
## $mlpML$tags
## [1] "Neural Network"
## 
## $mlpML$sort
## function (x) 
## x[order(x$layer1, x$layer2, x$layer3), ]
## 
## 
## $mlpSGD
## $mlpSGD$label
## [1] "Multilayer Perceptron Network by Stochastic Gradient Descent"
## 
## $mlpSGD$library
## [1] "FCNN4R" "plyr"  
## 
## $mlpSGD$loop
## NULL
## 
## $mlpSGD$type
## [1] "Regression"     "Classification"
## 
## $mlpSGD$parameters
##     parameter   class                 label
## 1        size numeric         #Hidden Units
## 2       l2reg numeric     L2 Regularization
## 3      lambda numeric RMSE Gradient Scaling
## 4  learn_rate numeric         Learning Rate
## 5    momentum numeric              Momentum
## 6       gamma numeric   Learning Rate Decay
## 7 minibatchsz numeric            Batch Size
## 8     repeats numeric               #Models
## 
## $mlpSGD$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     n <- nrow(x)
##     if (search == "grid") {
##         out <- expand.grid(size = ((1:len) * 2) - 1, l2reg = c(0, 
##             10^seq(-1, -4, length = len - 1)), lambda = 0, learn_rate = 2e-06, 
##             momentum = 0.9, gamma = seq(0, 0.9, length = len), 
##             minibatchsz = floor(nrow(x)/3), repeats = 1)
##     }
##     else {
##         out <- data.frame(size = sample(2:20, replace = TRUE, 
##             size = len), l2reg = 10^runif(len, min = -5, 1), 
##             lambda = runif(len, max = 0.4), learn_rate = runif(len), 
##             momentum = runif(len, min = 0.5), gamma = runif(len), 
##             minibatchsz = floor(n * runif(len, min = 0.1)), repeats = sample(1:10, 
##                 replace = TRUE, size = len))
##     }
##     out
## }
## 
## $mlpSGD$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!is.matrix(x)) 
##         x <- as.matrix(x)
##     if (is.factor(y)) {
##         y <- class2ind(y)
##         net <- FCNN4R::mlp_net(c(ncol(x), param$size, ncol(y)))
##         net <- FCNN4R::mlp_set_activation(net, layer = "h", activation = "sigmoid")
##         net <- FCNN4R::mlp_set_activation(net, layer = "o", activation = "sigmoid")
##     }
##     else {
##         y <- matrix(y, ncol = 1)
##         net <- FCNN4R::mlp_net(c(ncol(x), param$size, 1))
##         net <- FCNN4R::mlp_set_activation(net, layer = "h", activation = "sigmoid")
##         net <- FCNN4R::mlp_set_activation(net, layer = "o", activation = "linear")
##     }
##     args <- list(net = net, input = x, output = y, learn_rate = param$learn_rate, 
##         minibatchsz = param$minibatchsz, l2reg = param$l2reg, 
##         lambda = param$lambda, gamma = param$gamma, momentum = param$momentum)
##     the_dots <- list(...)
##     if (!any(names(the_dots) == "tol_level")) {
##         if (ncol(y) == 1) 
##             args$tol_level <- sd(y[, 1])/sqrt(nrow(y))
##         else args$tol_level <- 0.001
##     }
##     if (!any(names(the_dots) == "max_epochs")) 
##         args$max_epochs <- 1000
##     args <- c(args, the_dots)
##     out <- list(models = vector(mode = "list", length = param$repeats))
##     for (i in 1:param$repeats) {
##         args$net <- FCNN4R::mlp_rnd_weights(args$net)
##         out$models[[i]] <- do.call(FCNN4R::mlp_teach_sgd, args)
##     }
##     out
## }
## 
## $mlpSGD$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- lapply(modelFit$models, function(obj, newdata) FCNN4R::mlp_eval(obj$net, 
##         input = newdata), newdata = newdata)
##     if (modelFit$problemType == "Classification") {
##         out <- as.data.frame(do.call("rbind", out), stringsAsFactors = TRUE)
##         out$sample <- rep(1:nrow(newdata), length(modelFit$models))
##         out <- plyr::ddply(out, plyr::.(sample), function(x) colMeans(x[, 
##             -ncol(x)]))[, -1]
##         out <- modelFit$obsLevels[apply(out, 1, which.max)]
##     }
##     else {
##         out <- if (length(out) == 1) 
##             out[[1]][, 1]
##         else {
##             out <- do.call("cbind", out)
##             out <- apply(out, 1, mean)
##         }
##     }
##     out
## }
## 
## $mlpSGD$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- lapply(modelFit$models, function(obj, newdata) FCNN4R::mlp_eval(obj$net, 
##         input = newdata), newdata = newdata)
##     out <- as.data.frame(do.call("rbind", out), stringsAsFactors = TRUE)
##     out$sample <- rep(1:nrow(newdata), length(modelFit$models))
##     out <- plyr::ddply(out, plyr::.(sample), function(x) colMeans(x[, 
##         -ncol(x)]))[, -1]
##     out <- t(apply(out, 1, function(x) exp(x)/sum(exp(x))))
##     colnames(out) <- modelFit$obsLevels
##     as.data.frame(out, stringsAsFactors = TRUE)
## }
## 
## $mlpSGD$varImp
## function (object, ...) 
## {
##     imps <- lapply(object$models, caret:::GarsonWeights_FCNN4R, 
##         xnames = object$xNames)
##     imps <- do.call("rbind", imps)
##     imps <- apply(imps, 1, mean, na.rm = TRUE)
##     imps <- data.frame(var = names(imps), imp = imps)
##     imps <- plyr::ddply(imps, plyr::.(var), function(x) c(Overall = mean(x$imp)))
##     rownames(imps) <- as.character(imps$var)
##     imps$var <- NULL
##     imps[object$xNames, , drop = FALSE]
## }
## 
## $mlpSGD$tags
## [1] "Neural Network"    "L2 Regularization"
## 
## $mlpSGD$sort
## function (x) 
## x[order(x$size, -x$l2reg, -x$gamma), ]
## 
## 
## $mlpWeightDecay
## $mlpWeightDecay$label
## [1] "Multi-Layer Perceptron"
## 
## $mlpWeightDecay$library
## [1] "RSNNS"
## 
## $mlpWeightDecay$loop
## NULL
## 
## $mlpWeightDecay$type
## [1] "Regression"     "Classification"
## 
## $mlpWeightDecay$parameters
##   parameter   class         label
## 1      size numeric #Hidden Units
## 2     decay numeric  Weight Decay
## 
## $mlpWeightDecay$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(size = ((1:len) * 2) - 1, decay = c(0, 
##             10^seq(-1, -4, length = len - 1)))
##     }
##     else {
##         out <- data.frame(size = sample(1:20, size = len, replace = TRUE), 
##             decay = 10^runif(len, min = -5, max = 1))
##     }
##     out
## }
## 
## $mlpWeightDecay$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     theDots <- theDots[!(names(theDots) %in% c("size", "linOut"))]
##     if (any(names(theDots) == "learnFunc")) {
##         theDots$learnFunc <- NULL
##         warning("Cannot over-ride 'learnFunc' argument for this model. BackpropWeightDecay is used.")
##     }
##     if (any(names(theDots) == "learnFuncParams")) {
##         prms <- theDots$learnFuncParams
##         prms[2] <- param$decay
##         warning("Over-riding weight decay value in the 'learnFuncParams' argument you passed in. Other values are retained")
##     }
##     else prms <- c(0.2, param$decay, 0, 0)
##     if (is.factor(y)) {
##         y <- RSNNS:::decodeClassLabels(y)
##         lin <- FALSE
##     }
##     else lin <- TRUE
##     args <- list(x = x, y = y, learnFunc = "BackpropWeightDecay", 
##         learnFuncParams = prms, size = param$size, linOut = lin)
##     args <- c(args, theDots)
##     do.call(RSNNS::mlp, args)
## }
## 
## $mlpWeightDecay$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)
##     if (modelFit$problemType == "Classification") {
##         out <- modelFit$obsLevels[apply(out, 1, which.max)]
##     }
##     else out <- out[, 1]
##     out
## }
## 
## $mlpWeightDecay$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $mlpWeightDecay$levels
## function (x) 
## x$obsLevels
## 
## $mlpWeightDecay$tags
## [1] "Neural Network"    "L2 Regularization"
## 
## $mlpWeightDecay$sort
## function (x) 
## x[order(x$size, -x$decay), ]
## 
## 
## $mlpWeightDecayML
## $mlpWeightDecayML$label
## [1] "Multi-Layer Perceptron, multiple layers"
## 
## $mlpWeightDecayML$library
## [1] "RSNNS"
## 
## $mlpWeightDecayML$loop
## NULL
## 
## $mlpWeightDecayML$type
## [1] "Regression"     "Classification"
## 
## $mlpWeightDecayML$parameters
##   parameter   class                label
## 1    layer1 numeric #Hidden Units layer1
## 2    layer2 numeric #Hidden Units layer2
## 3    layer3 numeric #Hidden Units layer3
## 4     decay numeric         Weight Decay
## 
## $mlpWeightDecayML$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(layer1 = ((1:len) * 2) - 1, layer2 = 0, 
##             layer3 = 0, decay = c(0, 10^seq(-1, -4, length = len - 
##                 1)))
##     }
##     else {
##         out <- data.frame(layer1 = sample(2:20, replace = TRUE, 
##             size = len), layer2 = sample(c(0, 2:20), replace = TRUE, 
##             size = len), layer3 = sample(c(0, 2:20), replace = TRUE, 
##             size = len), decay = 10^runif(len, min = -5, max = 1))
##     }
##     out
## }
## 
## $mlpWeightDecayML$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     theDots <- theDots[!(names(theDots) %in% c("size", "linOut"))]
##     if (any(names(theDots) == "learnFunc")) {
##         theDots$learnFunc <- NULL
##         warning("Cannot over-ride 'learnFunc' argument for this model. BackpropWeightDecay is used.")
##     }
##     if (any(names(theDots) == "learnFuncParams")) {
##         prms <- theDots$learnFuncParams
##         prms[2] <- param$decay
##         warning("Over-riding weight decay value in the 'learnFuncParams' argument you passed in. Other values are retained")
##     }
##     else prms <- c(0.2, param$decay, 0, 0)
##     if (is.factor(y)) {
##         y <- RSNNS:::decodeClassLabels(y)
##         lin <- FALSE
##     }
##     else lin <- TRUE
##     nodes <- c(param$layer1, param$layer2, param$layer3)
##     if (any(nodes == 0)) {
##         nodes <- nodes[nodes > 0]
##         warning("At least one layer had zero units and ", "were removed. The new structure is ", 
##             paste0(nodes, collapse = "->"), call. = FALSE)
##     }
##     args <- list(x = x, y = y, learnFunc = "BackpropWeightDecay", 
##         learnFuncParams = prms, size = nodes, linOut = lin)
##     args <- c(args, theDots)
##     do.call(RSNNS::mlp, args)
## }
## 
## $mlpWeightDecayML$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)
##     if (modelFit$problemType == "Classification") {
##         out <- modelFit$obsLevels[apply(out, 1, which.max)]
##     }
##     else out <- out[, 1]
##     out
## }
## 
## $mlpWeightDecayML$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $mlpWeightDecayML$levels
## function (x) 
## x$obsLevels
## 
## $mlpWeightDecayML$tags
## [1] "Neural Network"    "L2 Regularization"
## 
## $mlpWeightDecayML$sort
## function (x) 
## x[order(x$layer1, x$layer2, x$layer3, -x$decay), ]
## 
## 
## $monmlp
## $monmlp$label
## [1] "Monotone Multi-Layer Perceptron Neural Network"
## 
## $monmlp$library
## [1] "monmlp"
## 
## $monmlp$loop
## NULL
## 
## $monmlp$type
## [1] "Classification" "Regression"    
## 
## $monmlp$parameters
##    parameter   class         label
## 1    hidden1 numeric #Hidden Units
## 2 n.ensemble numeric       #Models
## 
## $monmlp$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(hidden1 = ((1:len) * 2) - 1, n.ensemble = 1)
##     }
##     else {
##         out <- data.frame(hidden1 = sample(2:20, replace = TRUE, 
##             size = len), n.ensemble = sample(1:10, replace = TRUE, 
##             size = len))
##     }
##     out
## }
## 
## $monmlp$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!is.matrix(x)) 
##         x <- as.matrix(x)
##     y <- if (is.numeric(y)) 
##         matrix(y, ncol = 1)
##     else class2ind(y)
##     out <- monmlp::monmlp.fit(y = y, x = x, hidden1 = param$hidden1, 
##         n.ensemble = param$n.ensemble, ...)
##     list(model = out)
## }
## 
## $monmlp$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- monmlp::monmlp.predict(newdata, modelFit$model)
##     if (modelFit$problemType == "Classification") {
##         out <- modelFit$obsLevels[apply(out, 1, which.max)]
##     }
##     else out <- out[, 1]
##     out
## }
## 
## $monmlp$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- monmlp::monmlp.predict(newdata, modelFit$model)
##     out <- t(apply(out, 1, function(x) exp(x)/sum(exp(x))))
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $monmlp$predictors
## function (x, ...) 
## {
##     colnames(attr(x$model, "x"))
## }
## 
## $monmlp$varImp
## NULL
## 
## $monmlp$levels
## function (x) 
## {
##     y <- attr(x$model, "y")
##     if (is.matrix(y) && ncol >= 2) 
##         colnames(colnames)
##     else NULL
## }
## 
## $monmlp$oob
## NULL
## 
## $monmlp$tags
## [1] "Neural Network"
## 
## $monmlp$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $msaenet
## $msaenet$label
## [1] "Multi-Step Adaptive MCP-Net"
## 
## $msaenet$library
## [1] "msaenet"
## 
## $msaenet$type
## [1] "Regression"     "Classification"
## 
## $msaenet$parameters
##   parameter   class                          label
## 1    alphas numeric                          Alpha
## 2    nsteps numeric     #Adaptive Estimation Steps
## 3     scale numeric Adaptive Weight Scaling Factor
## 
## $msaenet$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(alphas = seq(0.05, 0.95, length = len), 
##             nsteps = 2:(len + 1), scale = 2:(len + 1))
##     }
##     else {
##         out <- data.frame(alphas = runif(len, min = 0.05, max = 0.95), 
##             nsteps = sample(2:10, size = len, replace = TRUE), 
##             scale = runif(len, min = 0.25, max = 4))
##     }
##     out
## }
## 
## $msaenet$loop
## NULL
## 
## $msaenet$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (length(levels(y)) > 2) 
##         stop("Only two class problems are supported by `msamnet`", 
##             call. = FALSE)
##     theDots <- list(...)
##     if (all(names(theDots) != "family")) {
##         if (length(levels(y)) > 0) {
##             fam <- "binomial"
##         }
##         else fam <- "gaussian"
##         theDots$family <- fam
##     }
##     if (all(names(theDots) != "tune")) 
##         theDots$tune <- "aic"
##     if (all(names(theDots) != "tune.nsteps")) 
##         theDots$tune.nsteps <- "aic"
##     if (!is.matrix(x)) 
##         x <- as.matrix(x)
##     modelArgs <- c(list(x = x, y = y, alphas = param$alphas, 
##         nsteps = param$nsteps, scale = param$scale), theDots)
##     do.call(getFromNamespace("msamnet", "msaenet"), modelArgs)
## }
## 
## $msaenet$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- msaenet:::predict.msaenet(modelFit, newdata, type = "response")
##     if (modelFit$model$family == "binomial") {
##         out <- ifelse(out > 0.4, modelFit$obsLevels[2], modelFit$obsLevels[1])
##     }
##     out
## }
## 
## $msaenet$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- msaenet:::predict.msaenet(modelFit, newdata, type = "response")
##     out <- as.data.frame(cbind(1 - out, out), stringsAsFactors = TRUE)
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $msaenet$predictors
## function (x, ...) 
## {
##     coefs <- msaenet:::predict.msaenet(x, newx = NULL, type = "coefficients")
##     coefs <- rownames(coefs)[coefs != 0]
##     coefs <- coefs[coefs != "(Intercept)"]
##     coefs
## }
## 
## $msaenet$varImp
## function (object, ...) 
## {
##     coefs <- msaenet:::predict.msaenet(object, newx = NULL, type = "coefficients")
##     coefs <- abs(coefs[rownames(coefs) != "(Intercept)", , drop = FALSE])
##     colnames(coefs) <- "Overall"
##     coefs
## }
## 
## $msaenet$levels
## function (x) 
## if (any(names(x) == "obsLevels")) x$obsLevels else NULL
## 
## $msaenet$tags
## [1] "Generalized Linear Model"   "Implicit Feature Selection"
## [3] "L1 Regularization"          "L2 Regularization"         
## [5] "Linear Classifier"          "Linear Regression"         
## 
## $msaenet$sort
## function (x) 
## x[order(x$alphas, x$nsteps), ]
## 
## $msaenet$trim
## function (x) 
## {
##     x$call <- NULL
##     x
## }
## 
## 
## $multinom
## $multinom$label
## [1] "Penalized Multinomial Regression"
## 
## $multinom$library
## [1] "nnet"
## 
## $multinom$loop
## NULL
## 
## $multinom$type
## [1] "Classification"
## 
## $multinom$parameters
##   parameter   class        label
## 1     decay numeric Weight Decay
## 
## $multinom$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(decay = c(0, 10^seq(-1, -4, length = len - 
##             1)))
##     }
##     else {
##         out <- data.frame(decay = 10^runif(len, min = -5, 1))
##     }
##     out
## }
## 
## $multinom$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     if (!is.null(wts)) {
##         out <- nnet::multinom(.outcome ~ ., data = dat, weights = wts, 
##             decay = param$decay, ...)
##     }
##     else out <- nnet::multinom(.outcome ~ ., data = dat, decay = param$decay, 
##         ...)
##     out
## }
## 
## $multinom$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata, type = "class")
## 
## $multinom$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata, type = "probs")
##     if (nrow(newdata) == 1) {
##         out <- as.data.frame(t(out), stringsAsFactors = TRUE)
##     }
##     if (length(modelFit$obsLevels) == 2) {
##         out <- cbind(1 - out, out)
##         colnames(out) <- modelFit$obsLevels
##     }
##     out
## }
## 
## $multinom$predictors
## function (x, ...) 
## if (hasTerms(x)) predictors(x$terms) else NA
## 
## $multinom$varImp
## function (object, ...) 
## {
##     out <- abs(coef(object))
##     if (is.vector(out)) {
##         out <- data.frame(Overall = out)
##         rownames(out) <- names(coef(object))
##     }
##     else {
##         out <- as.data.frame(apply(out, 2, sum), stringsAsFactors = TRUE)
##         names(out)[1] <- "Overall"
##     }
##     subset(out, rownames(out) != "(Intercept)")
## }
## 
## $multinom$levels
## function (x) 
## x$obsLevels
## 
## $multinom$tags
## [1] "Neural Network"       "L2 Regularization"    "Logistic Regression" 
## [4] "Linear Classifier"    "Accepts Case Weights"
## 
## $multinom$sort
## function (x) 
## x[order(-x[, 1]), ]
## 
## 
## $mxnet
## $mxnet$label
## [1] "Neural Network"
## 
## $mxnet$library
## [1] "mxnet"
## 
## $mxnet$loop
## NULL
## 
## $mxnet$type
## [1] "Classification" "Regression"    
## 
## $mxnet$parameters
##       parameter     class                    label
## 1        layer1   numeric #Hidden Units in Layer 1
## 2        layer2   numeric #Hidden Units in Layer 2
## 3        layer3   numeric #Hidden Units in Layer 3
## 4 learning.rate   numeric            Learning Rate
## 5      momentum   numeric                 Momentum
## 6       dropout   numeric             Dropout Rate
## 7    activation character      Activation Function
## 
## $mxnet$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(layer1 = ((1:len) * 2) - 1, layer2 = 0, 
##             layer3 = 0, learning.rate = 2e-06, momentum = 0.9, 
##             dropout = seq(0, 0.7, length = len), activation = "relu")
##     }
##     else {
##         out <- data.frame(layer1 = sample(2:20, replace = TRUE, 
##             size = len), layer2 = 0, layer3 = 0, learning.rate = runif(len), 
##             momentum = runif(len), dropout = runif(len, max = 0.7), 
##             activation = sample(c("relu", "sigmoid", "tanh", 
##                 "softrelu"), replace = TRUE, size = len))
##     }
##     out
## }
## 
## $mxnet$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     mxnet::mx.set.seed(21)
##     num_units <- param[grepl("layer[1-9]", names(param))]
##     num_units <- num_units[num_units > 0]
##     if (!is.matrix(x)) 
##         x <- as.matrix(x)
##     if (is.numeric(y)) {
##         out <- mxnet::mx.mlp(data = x, label = y, hidden_node = num_units, 
##             out_node = 1, out_activation = "rmse", learning.rate = param$learning.rate, 
##             momentum = param$momentum, eval.metric = mxnet::mx.metric.rmse, 
##             array.layout = "rowmajor", activation = rep(as.character(param$activation), 
##                 length(num_units)), initializer = mxnet::mx.init.Xavier(factor_type = "avg", 
##                 magnitude = 3, rnd_type = "uniform"), ...)
##     }
##     else {
##         y <- as.numeric(y) - 1
##         out <- mxnet::mx.mlp(data = x, label = y, hidden_node = num_units, 
##             out_node = length(unique(y)), out_activation = "softmax", 
##             learning.rate = param$learning.rate, momentum = param$momentum, 
##             eval.metric = mxnet::mx.metric.accuracy, array.layout = "rowmajor", 
##             activation = rep(as.character(param$activation), 
##                 length(num_units)), initializer = mxnet::mx.init.Xavier(factor_type = "avg", 
##                 magnitude = 3, rnd_type = "uniform"), ...)
##     }
##     if (last) 
##         out <- mxnet::mx.serialize(out)
##     out
## }
## 
## $mxnet$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     pred <- predict(modelFit, newdata, array.layout = "rowmajor")
##     if (modelFit$problemType == "Regression") {
##         pred <- pred[1, ]
##     }
##     else pred <- modelFit$obsLevels[apply(pred, 2, which.max)]
##     pred
## }
## 
## $mxnet$predictors
## function (x, ...) 
## {
##     if (any(names(x) == "xNames")) 
##         x$xNames
##     else NA
## }
## 
## $mxnet$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     pred <- t(predict(modelFit, newdata, array.layout = "rowmajor"))
##     colnames(pred) <- modelFit$obsLevels
##     pred
## }
## 
## $mxnet$notes
## [1] "The `mxnet` package is not yet on CRAN. See [https://mxnet.apache.org/](https://mxnet.apache.org/) for installation instructions."
## 
## $mxnet$tags
## [1] "Neural Network"
## 
## $mxnet$sort
## function (x) 
## x[order(x$layer1, x$layer2, x$layer3), ]
## 
## 
## $mxnetAdam
## $mxnetAdam$label
## [1] "Neural Network"
## 
## $mxnetAdam$library
## [1] "mxnet"
## 
## $mxnetAdam$type
## [1] "Classification" "Regression"    
## 
## $mxnetAdam$parameters
##      parameter     class                    label
## 1       layer1   numeric #Hidden Units in Layer 1
## 2       layer2   numeric #Hidden Units in Layer 2
## 3       layer3   numeric #Hidden Units in Layer 3
## 4      dropout   numeric             Dropout Rate
## 5        beta1   numeric                    beta1
## 6        beta2   numeric                    beta2
## 7 learningrate   numeric            Learning Rate
## 8   activation character      Activation Function
## 
## $mxnetAdam$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(layer1 = ((1:len) * 4) - 1, layer2 = 0, 
##             layer3 = 0, learningrate = 2e-06, beta1 = 0.9, beta2 = 0.9999, 
##             dropout = seq(0, 0.7, length = len), activation = "relu")
##     }
##     else {
##         out <- data.frame(layer1 = sample(2:20, replace = TRUE, 
##             size = len), layer2 = 0, layer3 = 0, learningrate = runif(len), 
##             beta1 = runif(len), beta2 = runif(len), dropout = runif(len, 
##                 max = 0.7), activation = sample(c("relu", "sigmoid", 
##                 "tanh", "softrelu"), replace = TRUE, size = len))
##     }
##     out
## }
## 
## $mxnetAdam$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     num_units <- param[grepl("layer[1-9]", names(param))]
##     num_units <- num_units[num_units > 0]
##     if (!is.matrix(x)) 
##         x <- as.matrix(x)
##     if (is.numeric(y)) {
##         mxnet::mx.set.seed(21)
##         out <- mxnet::mx.mlp(data = x, label = y, out_node = 1, 
##             out_activation = "rmse", optimizer = "adam", eval.metric = mxnet::mx.metric.rmse, 
##             array.layout = "rowmajor", learning.rate = param$learningrate, 
##             beta1 = param$beta1, beta2 = param$beta2, dropout = param$dropout, 
##             hidden_node = num_units, activation = rep(as.character(param$activation), 
##                 length(num_units)), initializer = mxnet::mx.init.Xavier(factor_type = "avg", 
##                 magnitude = 3, rnd_type = "uniform"), ...)
##     }
##     else {
##         y <- as.numeric(y) - 1
##         mxnet::mx.set.seed(21)
##         out <- mxnet::mx.mlp(data = x, label = y, out_node = length(unique(y)), 
##             out_activation = "softmax", optimizer = "adam", eval.metric = mxnet::mx.metric.accuracy, 
##             array.layout = "rowmajor", learning.rate = param$learningrate, 
##             beta1 = param$beta1, beta2 = param$beta2, dropout = param$dropout, 
##             hidden_node = num_units, activation = rep(as.character(param$activation), 
##                 length(num_units)), initializer = mxnet::mx.init.Xavier(factor_type = "avg", 
##                 magnitude = 3, rnd_type = "uniform"), ...)
##     }
##     if (last) 
##         out <- mxnet::mx.serialize(out)
##     out
## }
## 
## $mxnetAdam$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     pred <- predict(modelFit, newdata, array.layout = "rowmajor")
##     if (modelFit$problemType == "Regression") {
##         pred <- pred[1, ]
##     }
##     else {
##         pred <- modelFit$obsLevels[apply(pred, 2, which.max)]
##     }
##     pred
## }
## 
## $mxnetAdam$predictors
## function (x, ...) 
## {
##     if (any(names(x) == "xNames")) 
##         x$xNames
##     else NA
## }
## 
## $mxnetAdam$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     pred <- t(predict(modelFit, newdata, array.layout = "rowmajor"))
##     colnames(pred) <- modelFit$obsLevels
##     pred
## }
## 
## $mxnetAdam$notes
## [1] "The `mxnet` package is not yet on CRAN. See [https://mxnet.apache.org/](https://mxnet.apache.org/) for installation instructions. Users are strongly advised to define `num.round` themselves."
## 
## $mxnetAdam$tags
## [1] "Neural Network"
## 
## $mxnetAdam$sort
## function (x) 
## x[order(x$layer1, x$layer2, x$layer3, x$beta1, x$beta2, x$learningrate, 
##     x$dropout), ]
## 
## 
## $naive_bayes
## $naive_bayes$label
## [1] "Naive Bayes"
## 
## $naive_bayes$library
## [1] "naivebayes"
## 
## $naive_bayes$loop
## NULL
## 
## $naive_bayes$type
## [1] "Classification"
## 
## $naive_bayes$parameters
##   parameter   class                label
## 1   laplace numeric   Laplace Correction
## 2 usekernel logical    Distribution Type
## 3    adjust numeric Bandwidth Adjustment
## 
## $naive_bayes$grid
## function (x, y, len = NULL, search = "grid") 
## expand.grid(usekernel = c(TRUE, FALSE), laplace = 0, adjust = 1)
## 
## $naive_bayes$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (param$usekernel) {
##         out <- naivebayes::naive_bayes(x, y, usekernel = TRUE, 
##             laplace = param$laplace, adjust = param$adjust, ...)
##     }
##     else out <- naivebayes::naive_bayes(x, y, usekernel = FALSE, 
##         laplace = param$laplace, ...)
##     out
## }
## 
## $naive_bayes$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $naive_bayes$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     as.data.frame(predict(modelFit, newdata, type = "prob"), 
##         stringsAsFactors = TRUE)
## }
## 
## $naive_bayes$predictors
## function (x, ...) 
## if (hasTerms(x)) predictors(x$terms) else names(x$tables)
## 
## $naive_bayes$tags
## [1] "Bayesian Model"
## 
## $naive_bayes$levels
## function (x) 
## x$levels
## 
## $naive_bayes$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $nb
## $nb$label
## [1] "Naive Bayes"
## 
## $nb$library
## [1] "klaR"
## 
## $nb$loop
## NULL
## 
## $nb$type
## [1] "Classification"
## 
## $nb$parameters
##   parameter   class                label
## 1        fL numeric   Laplace Correction
## 2 usekernel logical    Distribution Type
## 3    adjust numeric Bandwidth Adjustment
## 
## $nb$grid
## function (x, y, len = NULL, search = "grid") 
## expand.grid(usekernel = c(TRUE, FALSE), fL = 0, adjust = 1)
## 
## $nb$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (param$usekernel) {
##         out <- klaR::NaiveBayes(x, y, usekernel = TRUE, fL = param$fL, 
##             adjust = param$adjust, ...)
##     }
##     else out <- klaR::NaiveBayes(x, y, usekernel = FALSE, fL = param$fL, 
##         ...)
##     out
## }
## 
## $nb$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)$class
## }
## 
## $nb$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata, type = "raw")$posterior
## }
## 
## $nb$predictors
## function (x, ...) 
## if (hasTerms(x)) predictors(x$terms) else x$varnames
## 
## $nb$tags
## [1] "Bayesian Model"
## 
## $nb$levels
## function (x) 
## x$levels
## 
## $nb$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $nbDiscrete
## $nbDiscrete$label
## [1] "Naive Bayes Classifier"
## 
## $nbDiscrete$library
## [1] "bnclassify"
## 
## $nbDiscrete$type
## [1] "Classification"
## 
## $nbDiscrete$parameters
##   parameter   class               label
## 1    smooth numeric Smoothing Parameter
## 
## $nbDiscrete$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(smooth = 0:(len - 1))
##     }
##     else {
##         out <- data.frame(smooth = runif(len, min = 0, max = 10))
##     }
##     out
## }
## 
## $nbDiscrete$loop
## NULL
## 
## $nbDiscrete$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     bnclassify::bnc("nb", class = ".outcome", dataset = dat, 
##         smooth = param$smooth, ...)
## }
## 
## $nbDiscrete$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $nbDiscrete$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata, prob = TRUE)
## }
## 
## $nbDiscrete$levels
## function (x) 
## x$obsLevels
## 
## $nbDiscrete$predictors
## function (x, s = NULL, ...) 
## x$xNames
## 
## $nbDiscrete$tags
## [1] "Bayesian Model"              "Categorical Predictors Only"
## 
## $nbDiscrete$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $nbSearch
## $nbSearch$label
## [1] "Semi-Naive Structure Learner Wrapper"
## 
## $nbSearch$library
## [1] "bnclassify"
## 
## $nbSearch$type
## [1] "Classification"
## 
## $nbSearch$parameters
##      parameter     class                        label
## 1            k   numeric                       #Folds
## 2      epsilon   numeric Minimum Absolute Improvement
## 3       smooth   numeric          Smoothing Parameter
## 4 final_smooth   numeric    Final Smoothing Parameter
## 5    direction character             Search Direction
## 
## $nbSearch$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(k = 10, epsilon = 0.01, smooth = 0.01, 
##             final_smooth = 1, direction = c("forward", "backwards"))
##     }
##     else {
##         out <- data.frame(k = sample(3:10, size = len, replace = TRUE), 
##             epsilon = runif(len, min = 0, max = 0.05), smooth = runif(len, 
##                 min = 0, max = 10), final_smooth = runif(len, 
##                 min = 0, max = 10), direction = sample(c("forward", 
##                 "backwards"), size = len, replace = TRUE))
##     }
##     out
## }
## 
## $nbSearch$loop
## NULL
## 
## $nbSearch$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     if (param$direction == "forward") {
##         struct <- bnclassify::fssj(class = ".outcome", dataset = dat, 
##             k = param$k, epsilon = param$epsilon, smooth = param$smooth)
##     }
##     else {
##         struct <- bnclassify::bsej(class = ".outcome", dataset = dat, 
##             k = param$k, epsilon = param$epsilon, smooth = param$smooth)
##     }
##     bnclassify::lp(struct, dat, smooth = param$final_smooth, 
##         ...)
## }
## 
## $nbSearch$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $nbSearch$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata, prob = TRUE)
## }
## 
## $nbSearch$levels
## function (x) 
## x$obsLevels
## 
## $nbSearch$predictors
## function (x, s = NULL, ...) 
## x$xNames
## 
## $nbSearch$tags
## [1] "Bayesian Model"              "Categorical Predictors Only"
## 
## $nbSearch$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $neuralnet
## $neuralnet$label
## [1] "Neural Network"
## 
## $neuralnet$library
## [1] "neuralnet"
## 
## $neuralnet$loop
## NULL
## 
## $neuralnet$type
## [1] "Regression"
## 
## $neuralnet$parameters
##   parameter   class                    label
## 1    layer1 numeric #Hidden Units in Layer 1
## 2    layer2 numeric #Hidden Units in Layer 2
## 3    layer3 numeric #Hidden Units in Layer 3
## 
## $neuralnet$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(layer1 = ((1:len) * 2) - 1, layer2 = 0, 
##             layer3 = 0)
##     }
##     else {
##         out <- data.frame(layer1 = sample(2:20, replace = TRUE, 
##             size = len), layer2 = sample(c(0, 2:20), replace = TRUE, 
##             size = len), layer3 = sample(c(0, 2:20), replace = TRUE, 
##             size = len))
##     }
##     out
## }
## 
## $neuralnet$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     colNames <- colnames(x)
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     form <- as.formula(paste(".outcome ~", paste(colNames, collapse = "+")))
##     if (param$layer1 == 0) 
##         stop("the first layer must have at least one hidden unit")
##     if (param$layer2 == 0 & param$layer2 > 0) 
##         stop("the second layer must have at least one hidden unit if a third layer is specified")
##     nodes <- c(param$layer1)
##     if (param$layer2 > 0) {
##         nodes <- c(nodes, param$layer2)
##         if (param$layer3 > 0) 
##             nodes <- c(nodes, param$layer3)
##     }
##     neuralnet::neuralnet(form, data = dat, hidden = nodes, ...)
## }
## 
## $neuralnet$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     newdata <- newdata[, modelFit$model.list$variables, drop = FALSE]
##     neuralnet::compute(modelFit, covariate = newdata)$net.result[, 
##         1]
## }
## 
## $neuralnet$prob
## NULL
## 
## $neuralnet$tags
## [1] "Neural Network"
## 
## $neuralnet$sort
## function (x) 
## x[order(x$layer1, x$layer2, x$layer3), ]
## 
## 
## $nnet
## $nnet$label
## [1] "Neural Network"
## 
## $nnet$library
## [1] "nnet"
## 
## $nnet$loop
## NULL
## 
## $nnet$type
## [1] "Classification" "Regression"    
## 
## $nnet$parameters
##   parameter   class         label
## 1      size numeric #Hidden Units
## 2     decay numeric  Weight Decay
## 
## $nnet$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(size = ((1:len) * 2) - 1, decay = c(0, 
##             10^seq(-1, -4, length = len - 1)))
##     }
##     else {
##         out <- data.frame(size = sample(1:20, size = len, replace = TRUE), 
##             decay = 10^runif(len, min = -5, 1))
##     }
##     out
## }
## 
## $nnet$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     if (!is.null(wts)) {
##         out <- nnet::nnet(.outcome ~ ., data = dat, weights = wts, 
##             size = param$size, decay = param$decay, ...)
##     }
##     else out <- nnet::nnet(.outcome ~ ., data = dat, size = param$size, 
##         decay = param$decay, ...)
##     out
## }
## 
## $nnet$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (modelFit$problemType == "Classification") {
##         out <- predict(modelFit, newdata, type = "class")
##     }
##     else {
##         out <- predict(modelFit, newdata, type = "raw")[, 1]
##     }
##     out
## }
## 
## $nnet$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)
##     if (ncol(as.data.frame(out, stringsAsFactors = TRUE)) == 
##         1) {
##         out <- cbind(out, 1 - out)
##         dimnames(out)[[2]] <- rev(modelFit$obsLevels)
##     }
##     out
## }
## 
## $nnet$varImp
## function (object, ...) 
## {
##     imp <- caret:::GarsonWeights(object, ...)
##     if (ncol(imp) > 1) {
##         imp <- cbind(apply(imp, 1, mean), imp)
##         colnames(imp)[1] <- "Overall"
##     }
##     else {
##         imp <- as.data.frame(imp, stringsAsFactors = TRUE)
##         names(imp) <- "Overall"
##     }
##     if (!is.null(object$xNames)) 
##         rownames(imp) <- object$xNames
##     imp
## }
## 
## $nnet$predictors
## function (x, ...) 
## if (hasTerms(x)) predictors(x$terms) else NA
## 
## $nnet$tags
## [1] "Neural Network"       "L2 Regularization"    "Accepts Case Weights"
## 
## $nnet$levels
## function (x) 
## x$lev
## 
## $nnet$sort
## function (x) 
## x[order(x$size, -x$decay), ]
## 
## 
## $nnls
## $nnls$label
## [1] "Non-Negative Least Squares"
## 
## $nnls$library
## [1] "nnls"
## 
## $nnls$loop
## NULL
## 
## $nnls$type
## [1] "Regression"
## 
## $nnls$parameters
##   parameter     class     label
## 1 parameter character parameter
## 
## $nnls$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(parameter = "none")
## 
## $nnls$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!is.matrix(x)) 
##         x <- as.matrix(x)
##     out <- nnls::nnls(x, y)
##     names(out$x) <- colnames(x)
##     out
## }
## 
## $nnls$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- newdata %*% modelFit$x
##     out[, 1]
## }
## 
## $nnls$prob
## NULL
## 
## $nnls$predictors
## function (x, ...) 
## names(x$x)[x$x != 0]
## 
## $nnls$tags
## [1] "Linear Regression"
## 
## $nnls$varImp
## function (object, ...) 
## {
##     out <- data.frame(Overall = object$x)
##     rownames(out) <- names(object$x)
##     out
##     out
## }
## 
## $nnls$sort
## function (x) 
## x
## 
## 
## $nodeHarvest
## $nodeHarvest$label
## [1] "Tree-Based Ensembles"
## 
## $nodeHarvest$library
## [1] "nodeHarvest"
## 
## $nodeHarvest$loop
## NULL
## 
## $nodeHarvest$type
## [1] "Regression"     "Classification"
## 
## $nodeHarvest$parameters
##   parameter     class                     label
## 1  maxinter   numeric Maximum Interaction Depth
## 2      mode character           Prediction Mode
## 
## $nodeHarvest$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(maxinter = 1:len, mode = c("mean", 
##             "outbag"))
##     }
##     else {
##         out <- data.frame(maxinter = sample(1:20, size = len, 
##             replace = TRUE), mode = sample(c("mean", "outbag"), 
##             size = len, replace = TRUE))
##     }
##     out
## }
## 
## $nodeHarvest$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!is.data.frame(x) | inherits(x, "tbl_df")) 
##         x <- as.data.frame(x, stringsAsFactors = TRUE)
##     if (is.numeric(y)) {
##         out <- nodeHarvest::nodeHarvest(x, y, maxinter = param$maxinter, 
##             mode = param$mode, ...)
##     }
##     else {
##         if (length(lev) > 2) 
##             stop("Two Class problems only")
##         out <- nodeHarvest::nodeHarvest(x, ifelse(y == levels(y)[1], 
##             1, 0), maxinter = param$maxinter, mode = param$mode, 
##             ...)
##     }
##     out
## }
## 
## $nodeHarvest$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata) | inherits(newdata, "tbl_df")) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     if (modelFit$problemType == "Regression") {
##         predict(modelFit, as.matrix(newdata), maxshow = 0)
##     }
##     else {
##         prbs <- predict(modelFit, as.matrix(newdata), maxshow = 0)
##         ifelse(prbs > 0.5, modelFit$obsLevels[1], modelFit$obsLevels[2])
##     }
## }
## 
## $nodeHarvest$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata) | inherits(newdata, "tbl_df")) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, as.matrix(newdata), maxshow = 0)
##     if (is.vector(out)) {
##         out <- cbind(out, 1 - out)
##         colnames(out) <- modelFit$obsLevels
##     }
##     out
## }
## 
## $nodeHarvest$levels
## function (x) 
## x$obsLevels
## 
## $nodeHarvest$tags
## [1] "Tree-Based Model"           "Implicit Feature Selection"
## [3] "Ensemble Model"             "Two Class Only"            
## 
## $nodeHarvest$sort
## function (x) 
## x[order(x$maxinter, x$mode), ]
## 
## 
## $null
## $null$label
## [1] "Non-Informative Model"
## 
## $null$library
## NULL
## 
## $null$type
## [1] "Classification" "Regression"    
## 
## $null$parameters
##   parameter     class     label
## 1 parameter character parameter
## 
## $null$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(parameter = "none")
## 
## $null$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     nullModel(y = y, ...)
## }
## 
## $null$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     predict(modelFit, newdata)
## }
## 
## $null$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     predict(modelFit, newdata, type = "prob")
## }
## 
## $null$levels
## function (x) 
## x$levels
## 
## $null$tags
## NULL
## 
## $null$notes
## [1] "Since this model always predicts the same value, R-squared values will always be estimated to be NA."
## 
## $null$sort
## function (x) 
## x
## 
## 
## $OneR
## $OneR$label
## [1] "Single Rule Classification"
## 
## $OneR$library
## [1] "RWeka"
## 
## $OneR$loop
## NULL
## 
## $OneR$type
## [1] "Classification"
## 
## $OneR$parameters
##   parameter     class label
## 1 parameter character  none
## 
## $OneR$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(parameter = "none")
## 
## $OneR$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     theDots <- list(...)
##     modelArgs <- c(list(formula = as.formula(".outcome ~ ."), 
##         data = dat), theDots)
##     out <- do.call(RWeka::OneR, modelArgs)
##     out
## }
## 
## $OneR$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $OneR$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata, type = "probability")
## }
## 
## $OneR$levels
## function (x) 
## x$obsLevels
## 
## $OneR$predictors
## function (x, ...) 
## predictors(x$terms)
## 
## $OneR$tags
## [1] "Rule-Based Model"           "Implicit Feature Selection"
## 
## $OneR$sort
## function (x) 
## x
## 
## 
## $ordinalNet
## $ordinalNet$label
## [1] "Penalized Ordinal Regression"
## 
## $ordinalNet$library
## [1] "ordinalNet" "plyr"      
## 
## $ordinalNet$check
## function (pkg) 
## {
##     requireNamespace("ordinalNet")
##     current <- packageDescription("ordinalNet")$Version
##     expected <- "2.0"
##     if (compareVersion(current, expected) < 0) 
##         stop("This modeling workflow requires ordinalNet version ", 
##             expected, "or greater.", call. = FALSE)
## }
## 
## $ordinalNet$type
## [1] "Classification"
## 
## $ordinalNet$parameters
##   parameter     class               label
## 1     alpha   numeric   Mixing Percentage
## 2  criteria character Selection criterion
## 3      link character       Link Function
## 4    lambda   numeric   Penalty Parameter
## 5 modeltype character          Model Form
## 6    family character        Model Family
## 
## $ordinalNet$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     links <- c("logit", "probit", "cloglog", "cauchit")
##     modeltypes <- c("parallel", "nonparallel", "semiparallel")
##     families <- c("cumulative", "sratio", "cratio", "acat")
##     max_lambda <- max(ordinalNet::ordinalNet(x = x, y = y, alpha = 0.01, 
##         nLambda = 2)$lambdaVals)
##     min_lambda <- 0.005 * max_lambda
##     if (search == "grid") {
##         out <- expand.grid(alpha = seq(0.1, 1, length = len), 
##             criteria = "aic", link = links, lambda = seq(from = min_lambda, 
##                 to = max_lambda, length.out = len), modeltype = modeltypes, 
##             family = families)
##     }
##     else {
##         out <- data.frame(alpha = runif(len, min = 0, 1), criteria = sample(c("aic", 
##             "bic"), size = len, replace = TRUE), link = sample(links, 
##             size = len, replace = TRUE), lambda = runif(len, 
##             min = min_lambda, max = max_lambda), modeltype = sample(modeltypes, 
##             size = len, replace = TRUE), family = sample(families, 
##             size = len, replace = T))
##     }
##     out
## }
## 
## $ordinalNet$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!is.matrix(x)) 
##         x <- as.matrix(x)
##     out <- ordinalNet::ordinalNet(x = x, y = y, alpha = param$alpha, 
##         link = as.character(param$link), lambdaVals = param$lambda, 
##         family = as.character(param$family), parallelTerms = (as.character(param$modeltype) %in% 
##             c("parallel", "semiparallel")), nonparallelTerms = (as.character(param$modeltype) %in% 
##             c("semiparallel", "nonparallel")), ...)
##     out$.criteria <- as.character(param$criteria)
##     out
## }
## 
## $ordinalNet$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(modelFit, newdata, criteria = modelFit$.criteria, 
##         type = "class")
##     out <- modelFit$obsLevels[out]
##     out
## }
## 
## $ordinalNet$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(modelFit, newdata, criteria = modelFit$.criteria, 
##         type = "response")
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $ordinalNet$predictors
## function (x, lambda = NULL, ...) 
## {
##     betas <- coef(x, criteria = x$.criteria)
##     out <- names(betas)[betas != 0]
##     out[!grepl("^Intercept", out)]
## }
## 
## $ordinalNet$varImp
## function (object, lambda = NULL, ...) 
## {
##     betas <- coef(object, criteria = object$.criteria)
##     betas <- betas[!grepl("^Intercept", names(betas))]
##     out <- data.frame(Overall = abs(betas))
##     rownames(out) <- names(betas)
##     out
## }
## 
## $ordinalNet$levels
## function (x) 
## if (any(names(x) == "obsLevels")) x$obsLevels else NULL
## 
## $ordinalNet$tags
## [1] "Generalized Linear Model"   "Implicit Feature Selection"
## [3] "L1 Regularization"          "L2 Regularization"         
## [5] "Linear Classifier"          "Linear Regression"         
## [7] "Ordinal Outcomes"          
## 
## $ordinalNet$sort
## function (x) 
## {
##     model_type_order <- c(parallel = 1, semiparallel = 2, nonparallel = 3)
##     mt <- model_type_order[x$modeltype]
##     a <- x$alpha
##     l <- x$lambda
##     family_order <- c(cratio = 1, sratio = 2, acat = 3, cumulative = 4)
##     f <- family_order[x$family]
##     x[order(mt, a, l, f), ]
## }
## 
## $ordinalNet$notes
## [1] "Requires ordinalNet package version >= 2.0"
## 
## 
## $ordinalRF
## $ordinalRF$label
## [1] "Random Forest"
## 
## $ordinalRF$library
## [1] "e1071"         "ranger"        "dplyr"         "ordinalForest"
## 
## $ordinalRF$check
## function (pkg) 
## {
##     requireNamespace("ordinalForest")
##     current <- packageDescription("ordinalForest")$Version
##     expected <- "2.1"
##     if (compareVersion(current, expected) < 0) 
##         stop("This modeling workflow requires ordinalForest version ", 
##             expected, "or greater.", call. = FALSE)
## }
## 
## $ordinalRF$loop
## NULL
## 
## $ordinalRF$type
## [1] "Classification"
## 
## $ordinalRF$parameters
##     parameter   class                                         label
## 1       nsets numeric # score sets tried prior to the approximation
## 2 ntreeperdiv numeric                        # of trees (small RFs)
## 3  ntreefinal numeric                         # of trees (final RF)
## 
## $ordinalRF$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(nsets = seq_len(len) * 50, ntreeperdiv = seq_len(len) * 
##             50, ntreefinal = seq_len(len) * 200)
##     }
##     else {
##         out <- data.frame(nsets = sample(20, size = len, replace = TRUE) * 
##             50, ntreeperdiv = sample(20, size = len, replace = TRUE) * 
##             50, ntreefinal = sample(2:20, size = len, replace = TRUE) * 
##             200)
##     }
##     out
## }
## 
## $ordinalRF$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if ((!is.data.frame(x)) || dplyr::is.tbl(x)) 
##         x <- as.data.frame(x, stringsAsFactors = TRUE)
##     x$.outcome <- y
##     out <- ordinalForest::ordfor(depvar = ".outcome", data = x, 
##         nsets = param$nsets, ntreeperdiv = param$ntreeperdiv, 
##         ntreefinal = param$ntreefinal, ...)
##     out
## }
## 
## $ordinalRF$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if ((!is.data.frame(newdata)) || dplyr::is.tbl(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata)$ypred
##     out
## }
## 
## $ordinalRF$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata)$classprobs
##     colnames(out) <- modelFit$classes
##     as.data.frame(out)
## }
## 
## $ordinalRF$predictors
## function (x, ...) 
## {
##     var_index <- sort(unique(unlist(lapply(x$forestfinal$forest$split.varIDs, 
##         function(x) x))))
##     var_index <- var_index[var_index > 0]
##     x$forestfinal$forest$independent.variable.names[var_index]
## }
## 
## $ordinalRF$varImp
## function (object, ...) 
## {
##     if (length(object$varimp) == 0) 
##         stop("No importance values available")
##     imps <- object$varimp
##     out <- data.frame(Overall = as.vector(imps))
##     rownames(out) <- names(imps)
##     out
## }
## 
## $ordinalRF$levels
## function (x) 
## {
##     if (x$treetype == "Classification") {
##         out <- levels(x$predictions)
##     }
##     else out <- NULL
##     out
## }
## 
## $ordinalRF$tags
## [1] "Random Forest"              "Ensemble Model"            
## [3] "Bagging"                    "Implicit Feature Selection"
## [5] "Ordinal Outcomes"          
## 
## $ordinalRF$sort
## function (x) 
## {
##     x[order(x[, 1]), ]
## }
## 
## 
## $ORFlog
## $ORFlog$label
## [1] "Oblique Random Forest"
## 
## $ORFlog$library
## [1] "obliqueRF"
## 
## $ORFlog$loop
## NULL
## 
## $ORFlog$type
## [1] "Classification"
## 
## $ORFlog$parameters
##   parameter   class                         label
## 1      mtry numeric #Randomly Selected Predictors
## 
## $ORFlog$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(mtry = caret::var_seq(p = ncol(x), 
##             classification = is.factor(y), len = len))
##     }
##     else {
##         out <- data.frame(mtry = unique(sample(1:ncol(x), size = len, 
##             replace = TRUE)))
##     }
##     out
## }
## 
## $ORFlog$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(obliqueRF)
##     obliqueRF::obliqueRF(as.matrix(x), y, training_method = "log", 
##         ...)
## }
## 
## $ORFlog$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)
## 
## $ORFlog$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata, type = "prob")
## 
## $ORFlog$levels
## function (x) 
## x$obsLevels
## 
## $ORFlog$notes
## [1] "Unlike other packages used by `train`, the `obliqueRF` package is fully loaded when this model is used."
## 
## $ORFlog$tags
## [1] "Random Forest"              "Oblique Tree"              
## [3] "Logistic Regression"        "Implicit Feature Selection"
## [5] "Ensemble Model"             "Two Class Only"            
## 
## $ORFlog$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $ORFpls
## $ORFpls$label
## [1] "Oblique Random Forest"
## 
## $ORFpls$library
## [1] "obliqueRF"
## 
## $ORFpls$loop
## NULL
## 
## $ORFpls$type
## [1] "Classification"
## 
## $ORFpls$parameters
##   parameter   class                         label
## 1      mtry numeric #Randomly Selected Predictors
## 
## $ORFpls$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(mtry = caret::var_seq(p = ncol(x), 
##             classification = is.factor(y), len = len))
##     }
##     else {
##         out <- data.frame(mtry = unique(sample(1:ncol(x), size = len, 
##             replace = TRUE)))
##     }
##     out
## }
## 
## $ORFpls$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(obliqueRF)
##     obliqueRF::obliqueRF(as.matrix(x), y, training_method = "pls", 
##         ...)
## }
## 
## $ORFpls$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)
## 
## $ORFpls$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata, type = "prob")
## 
## $ORFpls$levels
## function (x) 
## x$obsLevels
## 
## $ORFpls$notes
## [1] "Unlike other packages used by `train`, the `obliqueRF` package is fully loaded when this model is used."
## 
## $ORFpls$tags
## [1] "Random Forest"              "Oblique Tree"              
## [3] "Partial Least Squares"      "Implicit Feature Selection"
## [5] "Ensemble Model"             "Two Class Only"            
## 
## $ORFpls$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $ORFridge
## $ORFridge$label
## [1] "Oblique Random Forest"
## 
## $ORFridge$library
## [1] "obliqueRF"
## 
## $ORFridge$loop
## NULL
## 
## $ORFridge$type
## [1] "Classification"
## 
## $ORFridge$parameters
##   parameter   class                         label
## 1      mtry numeric #Randomly Selected Predictors
## 
## $ORFridge$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(mtry = caret::var_seq(p = ncol(x), 
##             classification = is.factor(y), len = len))
##     }
##     else {
##         out <- data.frame(mtry = unique(sample(1:ncol(x), size = len, 
##             replace = TRUE)))
##     }
##     out
## }
## 
## $ORFridge$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(obliqueRF)
##     obliqueRF::obliqueRF(as.matrix(x), y, training_method = "ridge", 
##         ...)
## }
## 
## $ORFridge$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)
## 
## $ORFridge$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata, type = "prob")
## 
## $ORFridge$levels
## function (x) 
## x$obsLevels
## 
## $ORFridge$notes
## [1] "Unlike other packages used by `train`, the `obliqueRF` package is fully loaded when this model is used."
## 
## $ORFridge$tags
## [1] "Random Forest"              "Oblique Tree"              
## [3] "Ridge Regression"           "Implicit Feature Selection"
## [5] "Ensemble Model"             "Two Class Only"            
## [7] "L2 Regularization"         
## 
## $ORFridge$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $ORFsvm
## $ORFsvm$label
## [1] "Oblique Random Forest"
## 
## $ORFsvm$library
## [1] "obliqueRF"
## 
## $ORFsvm$loop
## NULL
## 
## $ORFsvm$type
## [1] "Classification"
## 
## $ORFsvm$parameters
##   parameter   class                         label
## 1      mtry numeric #Randomly Selected Predictors
## 
## $ORFsvm$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(mtry = caret::var_seq(p = ncol(x), 
##             classification = is.factor(y), len = len))
##     }
##     else {
##         out <- data.frame(mtry = unique(sample(1:ncol(x), size = len, 
##             replace = TRUE)))
##     }
##     out
## }
## 
## $ORFsvm$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(obliqueRF)
##     obliqueRF::obliqueRF(as.matrix(x), y, training_method = "svm", 
##         ...)
## }
## 
## $ORFsvm$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)
## 
## $ORFsvm$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata, type = "prob")
## 
## $ORFsvm$levels
## function (x) 
## x$obsLevels
## 
## $ORFsvm$notes
## [1] "Unlike other packages used by `train`, the `obliqueRF` package is fully loaded when this model is used."
## 
## $ORFsvm$tags
## [1] "Random Forest"              "Oblique Tree"              
## [3] "Kernel Method"              "Implicit Feature Selection"
## [5] "Ensemble Model"             "Two Class Only"            
## 
## $ORFsvm$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $ownn
## $ownn$label
## [1] "Optimal Weighted Nearest Neighbor Classifier"
## 
## $ownn$library
## [1] "snn"
## 
## $ownn$loop
## NULL
## 
## $ownn$type
## [1] "Classification"
## 
## $ownn$parameters
##   parameter   class      label
## 1         K numeric #Neighbors
## 
## $ownn$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(K = (5:((2 * len) + 4))[(5:((2 * len) + 
##             4))%%2 > 0])
##     }
##     else {
##         by_val <- if (is.factor(y)) 
##             length(levels(y))
##         else 1
##         out <- data.frame(K = sample(seq(1, floor(nrow(x)/3), 
##             by = by_val), size = len, replace = TRUE))
##     }
##     out
## }
## 
## $ownn$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!is.matrix(x)) 
##         x <- as.matrix(x)
##     if (!(class(x[1, 1]) %in% c("integer", "numeric"))) 
##         stop("predictors should be all numeric")
##     x <- cbind(x, as.numeric(y))
##     colnames(x)[ncol(x)] <- ".outcome"
##     list(dat = x, K = param$K)
## }
## 
## $ownn$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- snn::myownn(train = modelFit$dat, test = newdata, 
##         K = modelFit$K)
##     modelFit$obsLevels[out]
## }
## 
## $ownn$predictors
## function (x, ...) 
## x$xNames
## 
## $ownn$tags
## [1] "Prototype Models"
## 
## $ownn$prob
## NULL
## 
## $ownn$levels
## function (x) 
## x$obsLevels
## 
## $ownn$sort
## function (x) 
## x[order(-x[, 1]), ]
## 
## 
## $pam
## $pam$label
## [1] "Nearest Shrunken Centroids"
## 
## $pam$library
## [1] "pamr"
## 
## $pam$type
## [1] "Classification"
## 
## $pam$parameters
##   parameter   class               label
## 1 threshold numeric Shrinkage Threshold
## 
## $pam$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     cc <- complete.cases(x) & complete.cases(y)
##     x <- x[cc, , drop = FALSE]
##     y <- y[cc]
##     initialThresh <- pamr::pamr.train(list(x = t(x), y = y))$threshold
##     initialThresh <- initialThresh[-c(1, length(initialThresh))]
##     if (search == "grid") {
##         out <- data.frame(threshold = seq(from = min(initialThresh), 
##             to = max(initialThresh), length = len))
##     }
##     else {
##         out <- data.frame(threshold = runif(len, min = min(initialThresh), 
##             max = max(initialThresh)))
##     }
##     out
## }
## 
## $pam$loop
## function (grid) 
## {
##     grid <- grid[order(grid$threshold, decreasing = TRUE), , 
##         drop = FALSE]
##     loop <- grid[1, , drop = FALSE]
##     submodels <- list(grid[-1, , drop = FALSE])
##     list(loop = loop, submodels = submodels)
## }
## 
## $pam$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     res <- pamr::pamr.train(list(x = t(x), y = y), threshold = param$threshold, 
##         ...)
##     if (last) {
##         res$xData <- x
##         res$yData <- y
##     }
##     res
## }
## 
## $pam$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- pamr::pamr.predict(modelFit, t(newdata), threshold = modelFit$tuneValue$threshold)
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$threshold)) {
##             tmp[[j + 1]] <- pamr::pamr.predict(modelFit, t(newdata), 
##                 threshold = submodels$threshold[j])
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $pam$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- pamr::pamr.predict(modelFit, t(newdata), threshold = modelFit$tuneValue$threshold, 
##         type = "posterior")
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$threshold)) {
##             tmpProb <- pamr::pamr.predict(modelFit, t(newdata), 
##                 threshold = submodels$threshold[j], type = "posterior")
##             tmp[[j + 1]] <- as.data.frame(tmpProb[, modelFit$obsLevels, 
##                 drop = FALSE], stringsAsFactors = TRUE)
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $pam$predictors
## function (x, newdata = NULL, threshold = NULL, ...) 
## {
##     if (is.null(newdata)) {
##         if (!is.null(x$xData)) 
##             newdata <- x$xData
##         else stop("must supply newdata")
##     }
##     if (is.null(threshold)) {
##         if (!is.null(x$threshold)) 
##             threshold <- x$threshold
##         else stop("must supply threshold")
##     }
##     varIndex <- pamr::pamr.predict(x, newx = newdata, threshold = threshold, 
##         type = "nonzero")
##     colnames(newdata)[varIndex]
## }
## 
## $pam$varImp
## function (object, threshold = NULL, data = NULL, ...) 
## {
##     if (is.null(data)) 
##         data <- object$xData
##     if (is.null(threshold)) 
##         threshold <- object$tuneValue$threshold
##     if (dim(object$centroids)[1] != dim(data)[2]) 
##         stop("the number of columns (=variables) is not consistent with the pamr object")
##     if (is.null(dimnames(data))) {
##         featureNames <- paste("Feature", seq(along = data[1, 
##             ]), sep = "")
##         colnames(data) <- featureNames
##     }
##     else featureNames <- dimnames(data)[[2]]
##     x <- t(data)
##     retainedX <- x[object$gene.subset, object$sample.subset, 
##         drop = F]
##     centroids <- pamr::pamr.predict(object, x, threshold = threshold, 
##         type = "cent")
##     standCentroids <- (centroids - object$centroid.overall)/object$sd
##     rownames(standCentroids) <- featureNames
##     colnames(standCentroids) <- names(object$prior)
##     as.data.frame(standCentroids, stringsAsFactors = TRUE)
## }
## 
## $pam$levels
## function (x) 
## names(x$prior)
## 
## $pam$tags
## [1] "Prototype Models"           "Implicit Feature Selection"
## [3] "Linear Classifier"         
## 
## $pam$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $parRF
## $parRF$label
## [1] "Parallel Random Forest"
## 
## $parRF$library
## [1] "e1071"        "randomForest" "foreach"      "import"      
## 
## $parRF$loop
## NULL
## 
## $parRF$type
## [1] "Classification" "Regression"    
## 
## $parRF$parameters
##   parameter   class                         label
## 1      mtry numeric #Randomly Selected Predictors
## 
## $parRF$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(mtry = caret::var_seq(p = ncol(x), 
##             classification = is.factor(y), len = len))
##     }
##     else {
##         out <- data.frame(mtry = unique(sample(1:ncol(x), size = len, 
##             replace = TRUE)))
##     }
##     out
## }
## 
## $parRF$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     workers <- foreach::getDoParWorkers()
##     import::from(foreach, `%dopar%`)
##     theDots <- list(...)
##     theDots$ntree <- if (is.null(theDots$ntree)) 
##         formals(randomForest:::randomForest.default)$ntree
##     else theDots$ntree
##     theDots$x <- x
##     theDots$y <- y
##     theDots$mtry <- param$mtry
##     theDots$ntree <- ceiling(theDots$ntree/workers)
##     iter_seeds <- sample.int(10000, size = workers)
##     out <- foreach::foreach(ntree = 1:workers, .combine = randomForest::combine) %dopar% 
##         {
##             set.seed(iter_seeds[workers])
##             do.call(randomForest::randomForest, theDots)
##         }
##     if (!inherits(out, "randomForest")) 
##         out <- do.call("randomForest::combine", out)
##     out$call["x"] <- "x"
##     out$call["y"] <- "y"
##     out
## }
## 
## $parRF$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)
## 
## $parRF$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata, type = "prob")
## 
## $parRF$predictors
## function (x, ...) 
## {
##     varIndex <- as.numeric(names(table(x$forest$bestvar)))
##     varIndex <- varIndex[varIndex > 0]
##     varsUsed <- names(x$forest$ncat)[varIndex]
##     varsUsed
## }
## 
## $parRF$varImp
## function (object, ...) 
## {
##     varImp <- randomForest::importance(object, ...)
##     if (object$type == "regression") 
##         varImp <- data.frame(Overall = varImp[, "%IncMSE"])
##     else {
##         retainNames <- levels(object$y)
##         if (all(retainNames %in% colnames(varImp))) {
##             varImp <- varImp[, retainNames]
##         }
##         else {
##             varImp <- data.frame(Overall = varImp[, 1])
##         }
##     }
##     out <- as.data.frame(varImp, stringsAsFactors = TRUE)
##     if (dim(out)[2] == 2) {
##         tmp <- apply(out, 1, mean)
##         out[, 1] <- out[, 2] <- tmp
##     }
##     out
## }
## 
## $parRF$levels
## function (x) 
## x$classes
## 
## $parRF$tags
## [1] "Random Forest"              "Ensemble Model"            
## [3] "Bagging"                    "Implicit Feature Selection"
## 
## $parRF$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## $parRF$oob
## function (x) 
## {
##     out <- switch(x$type, regression = c(sqrt(max(x$mse[length(x$mse)], 
##         0)), x$rsq[length(x$rsq)]), classification = c(1 - x$err.rate[x$ntree, 
##         "OOB"], e1071::classAgreement(x$confusion[, -dim(x$confusion)[2]])[["kappa"]]))
##     names(out) <- if (x$type == "regression") 
##         c("RMSE", "Rsquared")
##     else c("Accuracy", "Kappa")
##     out
## }
## 
## 
## $PART
## $PART$label
## [1] "Rule-Based Classifier"
## 
## $PART$library
## [1] "RWeka"
## 
## $PART$loop
## NULL
## 
## $PART$type
## [1] "Classification"
## 
## $PART$parameters
##   parameter     class                label
## 1 threshold   numeric Confidence Threshold
## 2    pruned character              Pruning
## 
## $PART$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(threshold = seq(0.01, 0.5, length.out = len), 
##             pruned = c("yes", "no"))
##         if (len == 1) {
##             out <- data.frame(threshold = 0.25, pruned = "yes")
##         }
##     }
##     else {
##         out <- data.frame(threshold = runif(len, 0, 0.5), pruned = sample(c("yes", 
##             "no"), len, replace = TRUE))
##     }
##     return(out)
## }
## 
## $PART$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$U <- ifelse(tolower(param$pruned) == 
##             "no", TRUE, FALSE)
##         theDots$control$C <- param$threshold
##         ctl <- theDots$control
##         theDots$control <- NULL
##     }
##     else ctl <- RWeka::Weka_control(U = ifelse(tolower(param$pruned) == 
##         "no", TRUE, FALSE), C = param$threshold)
##     modelArgs <- c(list(formula = as.formula(".outcome ~ ."), 
##         data = dat, control = ctl), theDots)
##     out <- do.call(RWeka::PART, modelArgs)
##     out
## }
## 
## $PART$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $PART$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata, type = "probability")
## }
## 
## $PART$levels
## function (x) 
## x$obsLevels
## 
## $PART$predictors
## function (x, ...) 
## predictors(x$terms)
## 
## $PART$tags
## [1] "Rule-Based Model"           "Implicit Feature Selection"
## 
## $PART$varImp
## function (object, ...) 
## {
##     dat <- caret:::partRuleSummary(object)
##     out <- dat$varUsage[, "Overall", drop = FALSE]
##     rownames(out) <- dat$varUsage$Var
##     out
## }
## 
## $PART$sort
## function (x) 
## x[order(x$pruned, -x$threshold), ]
## 
## 
## $partDSA
## $partDSA$label
## [1] "partDSA"
## 
## $partDSA$library
## [1] "partDSA"
## 
## $partDSA$type
## [1] "Regression"     "Classification"
## 
## $partDSA$parameters
##        parameter   class                         label
## 1 cut.off.growth numeric Number of Terminal Partitions
## 2            MPD numeric    Minimum Percent Difference
## 
## $partDSA$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(cut.off.growth = 1:len, MPD = 0.1)
##     }
##     else {
##         out <- data.frame(cut.off.growth = sample(1:20, size = len, 
##             replace = TRUE), MPD = runif(len, min = 0, max = 0.5))
##     }
##     out
## }
## 
## $partDSA$loop
## function (grid) 
## {
##     grid <- grid[order(grid$MPD, grid$cut.off.growth, decreasing = TRUE), 
##         , drop = FALSE]
##     uniqueMPD <- unique(grid$MPD)
##     loop <- data.frame(MPD = uniqueMPD)
##     loop$cut.off.growth <- NA
##     submodels <- vector(mode = "list", length = length(uniqueMPD))
##     for (i in seq(along = uniqueMPD)) {
##         subCuts <- grid[grid$MPD == uniqueMPD[i], "cut.off.growth"]
##         loop$cut.off.growth[loop$MPD == uniqueMPD[i]] <- subCuts[which.max(subCuts)]
##         submodels[[i]] <- data.frame(cut.off.growth = subCuts[-which.max(subCuts)])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $partDSA$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!is.data.frame(x) | inherits(x, "tbl_df")) 
##         x <- as.data.frame(x, stringsAsFactors = TRUE)
##     partDSA::partDSA(x, y, control = partDSA::DSA.control(cut.off.growth = param$cut.off.growth, 
##         MPD = param$MPD, vfold = 1), ...)
## }
## 
## $partDSA$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata) | inherits(newdata, "tbl_df")) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     if (!is.null(submodels)) {
##         tmp <- c(modelFit$tuneValue$cut.off.growth, submodels$cut.off.growth)
##         if (modelFit$problemType == "Classification") {
##             out <- partDSA::predict.dsa(modelFit, newdata)
##             if (max(tmp) > length(out)) 
##                 tmp[tmp > length(out)] <- length(out)
##             out <- out[tmp]
##         }
##         else {
##             out <- partDSA::predict.dsa(modelFit, newdata)
##             if (max(tmp) > ncol(out)) 
##                 tmp[tmp > ncol(out)] <- ncol(out)
##             out <- out[, tmp, drop = FALSE]
##             out <- as.list(as.data.frame(out, stringsAsFactors = TRUE))
##         }
##     }
##     else {
##         index <- min(modelFit$cut.off.growth, length(modelFit$test.set.risk.DSA))
##         if (modelFit$problemType == "Classification") {
##             out <- as.character(partDSA::predict.dsa(modelFit, 
##                 newdata)[[index]])
##         }
##         else {
##             out <- partDSA::predict.dsa(modelFit, newdata)[, 
##                 index]
##         }
##     }
##     out
## }
## 
## $partDSA$predictors
## function (x, cuts = NULL, ...) 
## {
##     if (is.null(cuts) & !is.null(x$tuneValue)) {
##         cuts <- x$tuneValue$cut.off.growth[1]
##     }
##     else {
##         if (is.null(cuts)) 
##             stop("please supply a value for 'cuts'")
##     }
##     tmp <- x$var.importance[, cuts]
##     names(tmp)[which(tmp != 0)]
## }
## 
## $partDSA$levels
## function (x) 
## x$obsLevels
## 
## $partDSA$tags
## [1] ""
## 
## $partDSA$prob
## NULL
## 
## $partDSA$varImp
## function (object, cuts = NULL, ...) 
## {
##     if (is.null(cuts) & !is.null(object$tuneValue)) {
##         cuts <- object$tuneValue$cut.off.growth[1]
##     }
##     else {
##         if (is.null(cuts)) 
##             stop("please supply a value for 'cuts'")
##     }
##     tmp <- object$var.importance[, cuts]
##     out <- data.frame(Overall = tmp)
##     rownames(out) <- names(tmp)
##     out
## }
## 
## $partDSA$sort
## function (x) 
## x[order(x$cut.off.growth, x$MPD), ]
## 
## 
## $pcaNNet
## $pcaNNet$label
## [1] "Neural Networks with Feature Extraction"
## 
## $pcaNNet$library
## [1] "nnet"
## 
## $pcaNNet$loop
## NULL
## 
## $pcaNNet$type
## [1] "Classification" "Regression"    
## 
## $pcaNNet$parameters
##   parameter   class         label
## 1      size numeric #Hidden Units
## 2     decay numeric  Weight Decay
## 
## $pcaNNet$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(size = ((1:len) * 2) - 1, decay = c(0, 
##             10^seq(-1, -4, length = len - 1)))
##     }
##     else {
##         out <- data.frame(size = sample(1:20, size = len, replace = TRUE), 
##             decay = 10^runif(len, min = -5, 1))
##     }
##     out
## }
## 
## $pcaNNet$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     if (!is.null(wts)) {
##         out <- caret::pcaNNet(.outcome ~ ., data = dat, weights = wts, 
##             size = param$size, decay = param$decay, ...)
##     }
##     else out <- caret::pcaNNet(.outcome ~ ., data = dat, size = param$size, 
##         decay = param$decay, ...)
##     out
## }
## 
## $pcaNNet$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (modelFit$problemType == "Classification") {
##         out <- predict(modelFit, newdata, type = "class")
##     }
##     else {
##         out <- predict(modelFit, newdata, type = "raw")
##     }
##     out
## }
## 
## $pcaNNet$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata, type = "prob")
##     if (ncol(as.data.frame(out, stringsAsFactors = TRUE)) == 
##         1) {
##         out <- cbind(out, 1 - out)
##         dimnames(out)[[2]] <- rev(modelFit$obsLevels)
##     }
##     out
## }
## 
## $pcaNNet$predictors
## function (x, ...) 
## rownames(x$pc$rotation)
## 
## $pcaNNet$levels
## function (x) 
## x$model$lev
## 
## $pcaNNet$tags
## [1] "Neural Network"       "Feature Extraction"   "L2 Regularization"   
## [4] "Accepts Case Weights"
## 
## $pcaNNet$sort
## function (x) 
## x[order(x$size, -x$decay), ]
## 
## 
## $pcr
## $pcr$label
## [1] "Principal Component Analysis"
## 
## $pcr$library
## [1] "pls"
## 
## $pcr$type
## [1] "Regression"
## 
## $pcr$parameters
##   parameter   class       label
## 1     ncomp numeric #Components
## 
## $pcr$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(ncomp = seq(1, min(ncol(x) - 1, len), 
##             by = 1))
##     }
##     else {
##         out <- data.frame(ncomp = unique(sample(1:(ncol(x) - 
##             1), size = len, replace = TRUE)))
##     }
##     out
## }
## 
## $pcr$loop
## function (grid) 
## {
##     grid <- grid[order(grid$ncomp, decreasing = TRUE), , drop = FALSE]
##     loop <- grid[1, , drop = FALSE]
##     submodels <- list(grid[-1, , drop = FALSE])
##     list(loop = loop, submodels = submodels)
## }
## 
## $pcr$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     ncomp <- min(ncol(x), param$ncomp)
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     pls::pcr(.outcome ~ ., data = dat, ncomp = ncomp, ...)
## }
## 
## $pcr$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- as.vector(pls:::predict.mvr(modelFit, newdata, ncomp = max(modelFit$ncomp)))
##     if (!is.null(submodels)) {
##         tmp <- apply(predict(modelFit, newdata, ncomp = submodels$ncomp), 
##             3, function(x) list(x))
##         tmp <- as.data.frame(tmp, stringsAsFactors = TRUE)
##         out <- c(list(out), as.list(tmp))
##     }
##     out
## }
## 
## $pcr$predictors
## function (x, ...) 
## rownames(x$projection)
## 
## $pcr$tags
## [1] "Linear Regression"  "Feature Extraction"
## 
## $pcr$prob
## NULL
## 
## $pcr$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $pda
## $pda$label
## [1] "Penalized Discriminant Analysis"
## 
## $pda$library
## [1] "mda"
## 
## $pda$loop
## NULL
## 
## $pda$type
## [1] "Classification"
## 
## $pda$parameters
##   parameter   class                         label
## 1    lambda numeric Shrinkage Penalty Coefficient
## 
## $pda$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(lambda = c(0, 10^seq(-1, -4, length = len - 
##             1)))
##     }
##     else {
##         out <- data.frame(lambda = 10^runif(len, min = -5, 1))
##     }
##     out
## }
## 
## $pda$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     if (!is.null(wts)) {
##         out <- mda::fda(as.formula(".outcome ~ ."), data = dat, 
##             method = mda::gen.ridge, weights = wts, lambda = param$lambda, 
##             ...)
##     }
##     else {
##         out <- mda::fda(as.formula(".outcome ~ ."), data = dat, 
##             method = mda::gen.ridge, lambda = param$lambda, ...)
##     }
##     out
## }
## 
## $pda$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)
## 
## $pda$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata, type = "posterior")
## 
## $pda$levels
## function (x) 
## x$obsLevels
## 
## $pda$tags
## [1] "Discriminant Analysis" "Polynomial Model"      "Accepts Case Weights" 
## 
## $pda$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $pda2
## $pda2$label
## [1] "Penalized Discriminant Analysis"
## 
## $pda2$library
## [1] "mda"
## 
## $pda2$loop
## NULL
## 
## $pda2$type
## [1] "Classification"
## 
## $pda2$parameters
##   parameter   class              label
## 1        df numeric Degrees of Freedom
## 
## $pda2$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(df = 2 * (0:(len - 1) + 1))
##     }
##     else {
##         out <- data.frame(df = runif(len, min = 1, max = 5))
##     }
##     out
## }
## 
## $pda2$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     if (!is.null(wts)) {
##         out <- mda::fda(as.formula(".outcome ~ ."), data = dat, 
##             method = mda::gen.ridge, weights = wts, df = param$df, 
##             ...)
##     }
##     else {
##         out <- mda::fda(as.formula(".outcome ~ ."), data = dat, 
##             method = mda::gen.ridge, df = param$df, ...)
##     }
##     out
## }
## 
## $pda2$levels
## function (x) 
## x$obsLevels
## 
## $pda2$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)
## 
## $pda2$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata, type = "posterior")
## 
## $pda2$tags
## [1] "Discriminant Analysis" "Polynomial Model"      "Accepts Case Weights" 
## 
## $pda2$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $penalized
## $penalized$label
## [1] "Penalized Linear Regression"
## 
## $penalized$library
## [1] "penalized"
## 
## $penalized$type
## [1] "Regression"
## 
## $penalized$parameters
##   parameter   class      label
## 1   lambda1 numeric L1 Penalty
## 2   lambda2 numeric L2 Penalty
## 
## $penalized$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(lambda1 = 2^((1:len) - 1), lambda2 = 2^((1:len) - 
##             1))
##     }
##     else {
##         out <- data.frame(lambda1 = 10^runif(len, min = -5, 1), 
##             lambda2 = 10^runif(len, min = -5, 1))
##     }
##     out
## }
## 
## $penalized$loop
## NULL
## 
## $penalized$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     penalized::penalized(y, x, model = "linear", lambda1 = param$lambda1, 
##         lambda2 = param$lambda2, ...)
## }
## 
## $penalized$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- penalized::predict(modelFit, newdata)
##     out <- if (is.vector(out)) 
##         out["mu"]
##     else out[, "mu"]
##     out
## }
## 
## $penalized$prob
## NULL
## 
## $penalized$predictors
## function (x, ...) 
## {
##     out <- coef(x, "all")
##     out <- names(out)[out != 0]
##     out[out != "(Intercept)"]
## }
## 
## $penalized$tags
## [1] "Implicit Feature Selection" "L1 Regularization"         
## [3] "L2 Regularization"          "Linear Regression"         
## 
## $penalized$sort
## function (x) 
## x[order(x$lambda1, x$lambda2), ]
## 
## 
## $PenalizedLDA
## $PenalizedLDA$label
## [1] "Penalized Linear Discriminant Analysis"
## 
## $PenalizedLDA$library
## [1] "penalizedLDA" "plyr"        
## 
## $PenalizedLDA$loop
## function (grid) 
## {
##     loop <- plyr::ddply(grid, .(lambda), function(x) c(K = max(x$K)))
##     if (length(unique(loop$K)) == 1) 
##         return(list(loop = loop, submodels = NULL))
##     submodels <- vector(mode = "list", length = nrow(loop))
##     for (i in seq(along = loop$K)) {
##         index <- which(grid$lambda == loop$lambda[i])
##         subK <- grid[index, "K"]
##         otherK <- data.frame(K = subK[subK != loop$K[i]])
##         if (nrow(otherK) > 0) 
##             submodels[[i]] <- otherK
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $PenalizedLDA$type
## [1] "Classification"
## 
## $PenalizedLDA$parameters
##   parameter   class                   label
## 1    lambda numeric              L1 Penalty
## 2         K numeric #Discriminant Functions
## 
## $PenalizedLDA$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(lambda = 10^seq(-1, -4, length = len), 
##             K = length(levels(y)) - 1)
##     }
##     else {
##         out <- data.frame(lambda = 10^runif(len, min = -5, 1), 
##             K = sample(1:(length(levels(y)) - 1), size = len, 
##                 replace = TRUE))
##     }
##     out
## }
## 
## $PenalizedLDA$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## penalizedLDA::PenalizedLDA(as.matrix(x), as.numeric(y), lambda = param$lambda, 
##     K = param$K, ...)
## 
## $PenalizedLDA$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out0 <- predict(modelFit, newdata)$ypred
##     out <- out0[, ncol(out0)]
##     out <- modelFit$obsLevels[out]
##     if (!is.null(submodels)) {
##         tmp <- out0[, submodels$K, drop = FALSE]
##         tmp <- apply(tmp, 2, function(x, l) l[x], l = modelFit$obsLevels)
##         out <- as.data.frame(cbind(out, tmp), stringsAsFactors = FALSE)
##     }
##     out
## }
## 
## $PenalizedLDA$levels
## function (x) 
## x$obsLevels
## 
## $PenalizedLDA$prob
## NULL
## 
## $PenalizedLDA$tags
## [1] "Discriminant Analysis"      "L1 Regularization"         
## [3] "Implicit Feature Selection" "Linear Classifier"         
## 
## $PenalizedLDA$sort
## function (x) 
## x[order(x$lambda, x$K), ]
## 
## 
## $plr
## $plr$label
## [1] "Penalized Logistic Regression"
## 
## $plr$library
## [1] "stepPlr"
## 
## $plr$loop
## NULL
## 
## $plr$type
## [1] "Classification"
## 
## $plr$parameters
##   parameter     class                label
## 1    lambda   numeric           L2 Penalty
## 2        cp character Complexity Parameter
## 
## $plr$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(cp = "bic", lambda = c(0, 10^seq(-1, 
##             -4, length = len - 1)))
##     }
##     else {
##         out <- data.frame(cp = sample(c("aic", "bic"), size = len, 
##             replace = TRUE), lambda = 10^runif(len, min = -5, 
##             1))
##     }
##     out
## }
## 
## $plr$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!is.matrix(x)) 
##         x <- as.matrix(x)
##     y <- ifelse(y == levels(y)[1], 1, 0)
##     stepPlr::plr(x, y, lambda = param$lambda, cp = as.character(param$cp), 
##         weights = if (!is.null(wts)) 
##             wts
##         else rep(1, length(y)), ...)
## }
## 
## $plr$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     ifelse(stepPlr::predict.plr(modelFit, as.matrix(newdata), 
##         type = "class") == 1, modelFit$obsLevels[1], modelFit$obsLevels[2])
## }
## 
## $plr$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- stepPlr::predict.plr(modelFit, as.matrix(newdata), 
##         type = "response")
##     out <- cbind(out, 1 - out)
##     dimnames(out)[[2]] <- modelFit$obsLevels
##     out
## }
## 
## $plr$levels
## function (x) 
## x$obsLevels
## 
## $plr$tags
## [1] "L2 Regularization"   "Logistic Regression" "Linear Classifier"  
## 
## $plr$sort
## function (x) 
## x[order(-x$lambda), ]
## 
## 
## $pls
## $pls$label
## [1] "Partial Least Squares"
## 
## $pls$library
## [1] "pls"
## 
## $pls$type
## [1] "Regression"     "Classification"
## 
## $pls$parameters
##   parameter   class       label
## 1     ncomp numeric #Components
## 
## $pls$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(ncomp = seq(1, min(ncol(x) - 1, len), 
##             by = 1))
##     }
##     else {
##         out <- data.frame(ncomp = unique(sample(1:ncol(x), replace = TRUE)))
##     }
##     out
## }
## 
## $pls$loop
## function (grid) 
## {
##     grid <- grid[order(grid$ncomp, decreasing = TRUE), , drop = FALSE]
##     loop <- grid[1, , drop = FALSE]
##     submodels <- list(grid[-1, , drop = FALSE])
##     list(loop = loop, submodels = submodels)
## }
## 
## $pls$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     ncomp <- min(ncol(x), param$ncomp)
##     out <- if (is.factor(y)) {
##         plsda(x, y, method = "oscorespls", ncomp = ncomp, ...)
##     }
##     else {
##         dat <- if (is.data.frame(x)) 
##             x
##         else as.data.frame(x, stringsAsFactors = TRUE)
##         dat$.outcome <- y
##         pls::plsr(.outcome ~ ., data = dat, method = "oscorespls", 
##             ncomp = ncomp, ...)
##     }
##     out
## }
## 
## $pls$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- if (modelFit$problemType == "Classification") {
##         if (!is.matrix(newdata)) 
##             newdata <- as.matrix(newdata)
##         out <- predict(modelFit, newdata, type = "class")
##     }
##     else as.vector(pls:::predict.mvr(modelFit, newdata, ncomp = max(modelFit$ncomp)))
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels))
##         if (modelFit$problemType == "Classification") {
##             if (length(submodels$ncomp) > 1) {
##                 tmp <- as.list(predict(modelFit, newdata, ncomp = submodels$ncomp))
##             }
##             else tmp <- list(predict(modelFit, newdata, ncomp = submodels$ncomp))
##         }
##         else {
##             tmp <- as.list(as.data.frame(apply(predict(modelFit, 
##                 newdata, ncomp = submodels$ncomp), 3, function(x) list(x))))
##         }
##         out <- c(list(out), tmp)
##     }
##     out
## }
## 
## $pls$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(modelFit, newdata, type = "prob", ncomp = modelFit$tuneValue$ncomp)
##     if (length(dim(out)) == 3) {
##         if (dim(out)[1] > 1) {
##             out <- out[, , 1]
##         }
##         else {
##             out <- as.data.frame(t(out[, , 1]), stringsAsFactors = TRUE)
##         }
##     }
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$ncomp)) {
##             tmpProb <- predict(modelFit, newdata, type = "prob", 
##                 ncomp = submodels$ncomp[j])
##             if (length(dim(tmpProb)) == 3) {
##                 if (dim(tmpProb)[1] > 1) {
##                   tmpProb <- tmpProb[, , 1]
##                 }
##                 else {
##                   tmpProb <- as.data.frame(t(tmpProb[, , 1]), 
##                     stringsAsFactors = TRUE)
##                 }
##             }
##             tmp[[j + 1]] <- as.data.frame(tmpProb[, modelFit$obsLevels])
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $pls$varImp
## function (object, estimate = NULL, ...) 
## {
##     library(pls)
##     modelCoef <- coef(object, intercept = FALSE, comps = 1:object$ncomp)
##     perf <- pls:::MSEP.mvr(object)$val
##     nms <- dimnames(perf)
##     if (length(nms$estimate) > 1) {
##         pIndex <- if (is.null(estimate)) 
##             1
##         else which(nms$estimate == estimate)
##         perf <- perf[pIndex, , , drop = FALSE]
##     }
##     numResp <- dim(modelCoef)[2]
##     if (numResp <= 2) {
##         modelCoef <- modelCoef[, 1, , drop = FALSE]
##         perf <- perf[, 1, ]
##         delta <- -diff(perf)
##         delta <- delta/sum(delta)
##         out <- data.frame(Overall = apply(abs(modelCoef), 1, 
##             weighted.mean, w = delta))
##     }
##     else {
##         if (dim(perf)[3] <= 2) {
##             perf <- -t(t(apply(perf[1, , ], 1, diff)))
##             perf <- t(t(apply(perf, 1, function(u) u/sum(u))))
##         }
##         else {
##             perf <- -t(apply(perf[1, , ], 1, diff))
##             perf <- t(apply(perf, 1, function(u) u/sum(u)))
##         }
##         out <- matrix(NA, ncol = numResp, nrow = dim(modelCoef)[1])
##         for (i in 1:numResp) {
##             tmp <- abs(modelCoef[, i, , drop = FALSE])
##             out[, i] <- apply(tmp, 1, weighted.mean, w = perf[i, 
##                 ])
##         }
##         colnames(out) <- dimnames(modelCoef)[[2]]
##         rownames(out) <- dimnames(modelCoef)[[1]]
##     }
##     as.data.frame(out, stringsAsFactors = TRUE)
## }
## 
## $pls$predictors
## function (x, ...) 
## rownames(x$projection)
## 
## $pls$levels
## function (x) 
## x$obsLevels
## 
## $pls$tags
## [1] "Partial Least Squares" "Feature Extraction"    "Linear Classifier"    
## [4] "Linear Regression"    
## 
## $pls$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $plsRglm
## $plsRglm$label
## [1] "Partial Least Squares Generalized Linear Models "
## 
## $plsRglm$library
## [1] "plsRglm"
## 
## $plsRglm$loop
## NULL
## 
## $plsRglm$type
## [1] "Classification" "Regression"    
## 
## $plsRglm$parameters
##           parameter   class             label
## 1                nt numeric   #PLS Components
## 2 alpha.pvals.expli numeric p-Value threshold
## 
## $plsRglm$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(nt = 1:len, alpha.pvals.expli = 10^(c(-2:(len - 
##             3), 0)))
##     }
##     else {
##         out <- data.frame(nt = sample(1:ncol(x), size = len, 
##             replace = TRUE), alpha.pvals.expli = runif(len, min = 0, 
##             0.2))
##     }
##     out
## }
## 
## $plsRglm$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(plsRglm)
##     if (is.factor(y)) {
##         lv <- levels(y)
##         y <- as.numeric(y) - 1
##         dst <- "pls-glm-logistic"
##     }
##     else {
##         lv <- NULL
##         dst <- "pls-glm-gaussian"
##     }
##     theDots <- list(...)
##     if (any(names(theDots) == "modele")) {
##         mod <- plsrRglm::plsRglm(y, x, nt = param$nt, pvals.expli = param$alpha.pvals.expli < 
##             1, sparse = param$alpha.pvals.expli < 1, alpha.pvals.expli = param$alpha.pvals.expli, 
##             ...)
##     }
##     else {
##         mod <- plsRglm::plsRglm(y, x, nt = param$nt, modele = dst, 
##             pvals.expli = param$alpha.pvals.expli < 1, sparse = param$alpha.pvals.expli < 
##                 1, alpha.pvals.expli = param$alpha.pvals.expli, 
##             ...)
##     }
##     mod
## }
## 
## $plsRglm$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata, type = "response")
##     if (modelFit$problemType == "Classification") {
##         out <- factor(ifelse(out >= 0.5, modelFit$obsLevels[2], 
##             modelFit$obsLevels[1]))
##     }
##     out
## }
## 
## $plsRglm$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata, type = "response")
##     out <- cbind(1 - out, out)
##     dimnames(out)[[2]] <- rev(modelFit$obsLevels)
##     out
## }
## 
## $plsRglm$varImp
## NULL
## 
## $plsRglm$predictors
## function (x, ...) 
## {
##     vars <- names(which(coef(x)[[2]][, 1] != 0))
##     vars[vars != "Intercept"]
## }
## 
## $plsRglm$notes
## [1] "Unlike other packages used by `train`, the `plsRglm` package is fully loaded when this model is used."
## 
## $plsRglm$tags
## [1] "Generalized Linear Models" "Partial Least Squares"    
## [3] "Two Class Only"           
## 
## $plsRglm$levels
## function (x) 
## x$lev
## 
## $plsRglm$sort
## function (x) 
## x[order(-x$alpha.pvals.expli, x$nt), ]
## 
## 
## $polr
## $polr$label
## [1] "Ordered Logistic or Probit Regression"
## 
## $polr$library
## [1] "MASS"
## 
## $polr$loop
## NULL
## 
## $polr$type
## [1] "Classification"
## 
## $polr$parameters
##   parameter     class     label
## 1    method character parameter
## 
## $polr$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(method = c("logistic", "probit", "loglog", 
##             "cloglog", "cauchit"))
##     }
##     else {
##         out <- data.frame(method = sample(c("logistic", "probit", 
##             "loglog", "cloglog", "cauchit"), size = len, replace = TRUE))
##     }
## }
## 
## $polr$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     modelArgs <- list(...)
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     modelArgs <- c(list(formula = .outcome ~ ., data = dat, method = as.character(param$method)), 
##         modelArgs)
##     modelArgs$Hess <- TRUE
##     if (!is.null(wts)) 
##         modelArgs$weights <- wts
##     ans <- do.call(MASS::polr, modelArgs)
##     ans$call <- NULL
##     ans
## }
## 
## $polr$predict
## function (modelFit, newdata, preProc = NULL, submodels = NULL) 
## predict(modelFit, newdata = newdata, type = "class")
## 
## $polr$prob
## function (modelFit, newdata, preProc = NULL, submodels = NULL) 
## predict(modelFit, newdata = newdata, type = "probs")
## 
## $polr$varImp
## function (object, ...) 
## {
##     cf <- coef(object)
##     ncf <- length(cf)
##     se <- sqrt(diag(vcov(object)))
##     se <- se[seq_len(ncf)]
##     z <- cf/se
##     out <- data.frame(Overall = abs(z))
##     if (!is.null(names(cf))) 
##         rownames(out) <- names(cf)
##     out
## }
## 
## $polr$predictors
## function (x, ...) 
## predictors(terms(x))
## 
## $polr$levels
## function (x) 
## if (any(names(x) == "obsLevels")) x$obsLevels else NULL
## 
## $polr$tags
## [1] "Logistic Regression"  "Linear Classifier"    "Accepts Case Weights"
## [4] "Ordinal Outcomes"    
## 
## $polr$sort
## function (x) 
## x
## 
## 
## $ppr
## $ppr$label
## [1] "Projection Pursuit Regression"
## 
## $ppr$library
## NULL
## 
## $ppr$loop
## NULL
## 
## $ppr$type
## [1] "Regression"
## 
## $ppr$parameters
##   parameter   class   label
## 1    nterms numeric # Terms
## 
## $ppr$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(nterms = 1:len)
## 
## $ppr$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!is.null(wts)) {
##         out <- ppr(as.matrix(x), y, weights = wts, nterms = param$nterms, 
##             ...)
##     }
##     else {
##         out <- ppr(as.matrix(x), y, nterms = param$nterms, ...)
##     }
##     out
## }
## 
## $ppr$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)
## 
## $ppr$prob
## NULL
## 
## $ppr$predictors
## function (x, ...) 
## x$xnames
## 
## $ppr$tags
## [1] "Feature Extraction"   "Accepts Case Weights"
## 
## $ppr$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $pre
## $pre$library
## [1] "pre"
## 
## $pre$type
## [1] "Classification" "Regression"    
## 
## $pre$parameters
##         parameter     class                          label
## 1        sampfrac   numeric           Subsampling Fraction
## 2        maxdepth   numeric                 Max Tree Depth
## 3       learnrate   numeric                      Shrinkage
## 4            mtry   numeric # Randomly Selected Predictors
## 5        use.grad   logical       Employ Gradient Boosting
## 6 penalty.par.val character       Regularization Parameter
## 
## $pre$grid
## function (x, y, len = NULL, search = "grid", sampfrac = 0.5, 
##     maxdepth = 3L, learnrate = 0.01, mtry = Inf, use.grad = TRUE, 
##     penalty.par.val = "lambda.1se") 
## {
##     if (search == "grid") {
##         if (!is.null(len)) {
##             maxdepth <- c(3L, 4L, 2L, 5L, 1L, 6:len)[1:len]
##             if (len > 2) {
##                 sampfrac <- c(0.5, 0.75, 1)
##             }
##             if (len > 1) {
##                 penalty.par.val = c("lambda.min", "lambda.1se")
##             }
##         }
##         out <- expand.grid(sampfrac = sampfrac, maxdepth = maxdepth, 
##             learnrate = learnrate, mtry = mtry, use.grad = use.grad, 
##             penalty.par.val = penalty.par.val)
##     }
##     else if (search == "random") {
##         out <- data.frame(sampfrac = sample(c(0.5, 0.75, 1), 
##             size = len, replace = TRUE), maxdepth = sample(2L:6L, 
##             size = len, replace = TRUE), learnrate = sample(c(0.001, 
##             0.01, 0.1), size = len, replace = TRUE), mtry = sample(c(ceiling(sqrt(ncol(x))), 
##             ceiling(ncol(x)/3), ncol(x)), size = len, replace = TRUE), 
##             use.grad = sample(c(TRUE, FALSE), size = len, replace = TRUE), 
##             penalty.par.val = sample(c("lambda.1se", "lambda.min"), 
##                 size = len, replace = TRUE))
##     }
##     return(out)
## }
## 
## $pre$fit
## function (x, y, wts = NULL, param, lev = NULL, last = NULL, weights = NULL, 
##     classProbs, ...) 
## {
##     theDots <- list(...)
##     if (!any(names(theDots) == "family")) {
##         theDots$family <- if (is.factor(y)) {
##             if (nlevels(y) == 2L) {
##                 "binomial"
##             }
##             else {
##                 "multinomial"
##             }
##         }
##         else {
##             "gaussian"
##         }
##     }
##     data <- data.frame(x, .outcome = y)
##     formula <- .outcome ~ .
##     if (is.null(weights)) {
##         weights <- rep(1, times = nrow(x))
##     }
##     pre(formula = formula, data = data, weights = weights, sampfrac = param$sampfrac, 
##         maxdepth = param$maxdepth, learnrate = param$learnrate, 
##         mtry = param$mtry, use.grad = param$use.grad, ...)
## }
## 
## $pre$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (is.null(submodels)) {
##         if (modelFit$family %in% c("gaussian", "mgaussian")) {
##             out <- pre:::predict.pre(object = modelFit, newdata = as.data.frame(newdata))
##         }
##         else if (modelFit$family == "poisson") {
##             out <- pre:::predict.pre(object = modelFit, newdata = as.data.frame(newdata), 
##                 type = "response")
##         }
##         else {
##             out <- factor(pre:::predict.pre(object = modelFit, 
##                 newdata = as.data.frame(newdata), type = "class"))
##         }
##     }
##     else {
##         out <- list()
##         for (i in seq(along.with = submodels$penalty.par.val)) {
##             if (modelFit$family %in% c("gaussian", "mgaussian")) {
##                 out[[i]] <- pre:::predict.pre(object = modelFit, 
##                   newdata = as.data.frame(newdata), penalty.par.val = as.character(submodels$penalty.par.val[i]))
##             }
##             else if (modelFit$family == "poisson") {
##                 out[[i]] <- pre:::predict.pre(object = modelFit, 
##                   newdata = as.data.frame(newdata), type = "response", 
##                   penalty.par.val = as.character(submodels$penalty.par.val[i]))
##             }
##             else {
##                 out[[i]] <- factor(pre:::predict.pre(object = modelFit, 
##                   newdata = as.data.frame(newdata), type = "class", 
##                   penalty.par.val = as.character(submodels$penalty.par.val[i])))
##             }
##         }
##     }
##     out
## }
## 
## $pre$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (is.null(submodels)) {
##         probs <- pre:::predict.pre(object = modelFit, newdata = as.data.frame(newdata), 
##             type = "response")
##         if (is.null(ncol(probs)) || ncol(probs) == 1) {
##             probs <- data.frame(1 - probs, probs)
##             colnames(probs) <- levels(modelFit$data[, modelFit$y_names])
##         }
##     }
##     else {
##         probs <- list()
##         for (i in seq(along.with = submodels$penalty.par.val)) {
##             probs[[i]] <- pre:::predict.pre(object = modelFit, 
##                 newdata = as.data.frame(newdata), type = "response", 
##                 penalty.par.val = as.character(submodels$penalty.par.val[i]))
##             if (is.null(ncol(probs[[i]])) || ncol(probs[[i]]) == 
##                 1) {
##                 probs[[i]] <- data.frame(1 - probs[[i]], probs[[i]])
##                 colnames(probs[[i]]) <- levels(modelFit$data[, 
##                   modelFit$y_names])
##             }
##         }
##     }
##     probs
## }
## 
## $pre$sort
## function (x) 
## {
##     ordering <- order(x$maxdepth, x$use.grad, max(x$mtry) - x$mtry, 
##         x$sampfrac != 1L, x$learnrate, decreasing = FALSE)
##     x[ordering, ]
## }
## 
## $pre$loop
## function (fullGrid) 
## {
##     loop_rows <- rownames(unique(fullGrid[, -which(names(fullGrid) == 
##         "penalty.par.val")]))
##     loop <- fullGrid[rownames(fullGrid) %in% loop_rows, ]
##     submodels <- list()
##     for (i in 1:nrow(loop)) {
##         lambda_vals <- character()
##         for (j in 1:nrow(fullGrid)) {
##             if (all(loop[i, -which(colnames(loop) == "penalty.par.val")] == 
##                 fullGrid[j, -which(colnames(fullGrid) == "penalty.par.val")])) {
##                 lambda_vals <- c(lambda_vals, as.character(fullGrid[j, 
##                   "penalty.par.val"]))
##             }
##         }
##         lambda_vals <- lambda_vals[-which(lambda_vals == loop$penalty.par.val[i])]
##         submodels[[i]] <- data.frame(penalty.par.val = lambda_vals)
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $pre$levels
## function (x) 
## {
##     levels(x$data[, x$y_names])
## }
## 
## $pre$tag
## [1] "Rule-Based Model"  "Tree-Based Model"  "L1 regularization"
## [4] "Bagging"           "Boosting"         
## 
## $pre$label
## [1] "Prediction Rule Ensembles"
## 
## $pre$predictors
## function (x, ...) 
## {
##     if (x$family %in% c("gaussian", "poisson", "binomial")) {
##         return(suppressWarnings(importance(x, plot = FALSE, ...)$varimps$varname))
##     }
##     else {
##         warning("Reporting the predictors in the model is not yet available for multinomial and multivariate responses")
##         return(NULL)
##     }
## }
## 
## $pre$varImp
## function (x, ...) 
## {
##     if (x$family %in% c("gaussian", "binomial", "poisson")) {
##         varImp <- pre:::importance(x, plot = FALSE, ...)$varimps
##         varnames <- varImp$varname
##         varImp <- data.frame(Overall = varImp$imp)
##         rownames(varImp) <- varnames
##         return(varImp)
##     }
##     else {
##         warning("Variable importances cannot be calculated for multinomial or mgaussian family")
##         return(NULL)
##     }
## }
## 
## $pre$oob
## NULL
## 
## $pre$notes
## NULL
## 
## $pre$check
## NULL
## 
## $pre$tags
## [1] "Rule-Based Model" "Regularization"  
## 
## 
## $PRIM
## $PRIM$label
## [1] "Patient Rule Induction Method"
## 
## $PRIM$library
## [1] "supervisedPRIM"
## 
## $PRIM$loop
## NULL
## 
## $PRIM$type
## [1] "Classification"
## 
## $PRIM$parameters
##     parameter   class            label
## 1  peel.alpha numeric peeling quantile
## 2 paste.alpha numeric pasting quantile
## 3    mass.min numeric     minimum mass
## 
## $PRIM$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     lowerlimit <- 1/nrow(x)
##     if (search == "grid") {
##         out <- expand.grid(peel.alpha = seq(max(lowerlimit, 0.01), 
##             0.25, length.out = len), paste.alpha = seq(max(lowerlimit, 
##             0.01), 0.25, length.out = len), mass.min = seq(max(lowerlimit, 
##             0.01), 0.25, length.out = len))
##         if (len == 1) {
##             out <- data.frame(peel.alpha = 0.05, paste.alpha = 0.01, 
##                 mass.min = 0.05)
##         }
##     }
##     else {
##         out <- data.frame(peel.alpha = runif(len, min = lowerlimit, 
##             max = 0.25), paste.alpha = runif(len, min = lowerlimit, 
##             max = 0.25), mass.min = runif(len, min = lowerlimit, 
##             max = 0.25))
##     }
##     out
## }
## 
## $PRIM$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!is.data.frame(x) | inherits(x, "tbl_df")) 
##         x <- as.data.frame(x, stringsAsFactors = TRUE)
##     out <- supervisedPRIM::supervisedPRIM(x = x, y = y, peel.alpha = param$peel.alpha, 
##         paste.alpha = param$paste.alpha, mass.min = param$mass.min, 
##         ...)
##     out
## }
## 
## $PRIM$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata) | inherits(newdata, "tbl_df")) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     supervisedPRIM:::predict.supervisedPRIM(modelFit, newdata = newdata, 
##         classProb = FALSE)
## }
## 
## $PRIM$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata) | inherits(newdata, "tbl_df")) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- supervisedPRIM:::predict.supervisedPRIM(modelFit, 
##         newdata = newdata, classProb = TRUE)
##     out <- data.frame(out, 1 - out)
##     names(out) <- modelFit$levels[1:2]
##     return(out)
## }
## 
## $PRIM$predictors
## function (x, ...) 
## if (hasTerms(x)) predictors(x$terms) else NA
## 
## $PRIM$tags
## [1] "Rule-Based Model"              "Patient Rule Induction Method"
## 
## $PRIM$levels
## function (x) 
## x$lev
## 
## $PRIM$sort
## function (x) 
## x[order(x$peel.alpha, x$paste.alpha, x$mass.min), ]
## 
## 
## $protoclass
## $protoclass$label
## [1] "Greedy Prototype Selection"
## 
## $protoclass$library
## [1] "proxy"      "protoclass"
## 
## $protoclass$loop
## NULL
## 
## $protoclass$type
## [1] "Classification"
## 
## $protoclass$parameters
##   parameter   class          label
## 1       eps numeric      Ball Size
## 2 Minkowski numeric Distance Order
## 
## $protoclass$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(eps = 1:len, Minkowski = 2)
## 
## $protoclass$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     out <- protoclass::protoclass(x = x, y = y, dxz = as.matrix(proxy:::dist(x, 
##         x, method = "Minkowski", p = as.double(param$Minkowski))), 
##         eps = param$eps, ...)
##     out$Minkowski <- 2
##     out$training <- x
##     out
## }
## 
## $protoclass$levels
## function (x) 
## x$obsLevels
## 
## $protoclass$predict
## function (modelFit, newdata, submodels = NULL) 
## as.character(protoclass::predictwithd.protoclass(modelFit, as.matrix(proxy:::dist(newdata, 
##     modelFit$training, "Minkowski", p = modelFit$Minkowski))))
## 
## $protoclass$prob
## NULL
## 
## $protoclass$tags
## [1] "Prototype Models"
## 
## $protoclass$sort
## function (x) 
## x[order(-x$eps), ]
## 
## 
## $qda
## $qda$label
## [1] "Quadratic Discriminant Analysis"
## 
## $qda$library
## [1] "MASS"
## 
## $qda$loop
## NULL
## 
## $qda$type
## [1] "Classification"
## 
## $qda$parameters
##   parameter     class     label
## 1 parameter character parameter
## 
## $qda$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(parameter = "none")
## 
## $qda$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## MASS::qda(x, y, ...)
## 
## $qda$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)$class
## 
## $qda$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)$posterior
## 
## $qda$predictors
## function (x, ...) 
## if (hasTerms(x)) predictors(x$terms) else colnames(x$means)
## 
## $qda$tags
## [1] "Discriminant Analysis" "Polynomial Model"     
## 
## $qda$levels
## function (x) 
## names(x$prior)
## 
## $qda$sort
## function (x) 
## x
## 
## 
## $QdaCov
## $QdaCov$label
## [1] "Robust Quadratic Discriminant Analysis"
## 
## $QdaCov$library
## [1] "rrcov"
## 
## $QdaCov$loop
## NULL
## 
## $QdaCov$type
## [1] "Classification"
## 
## $QdaCov$parameters
##   parameter     class     label
## 1 parameter character parameter
## 
## $QdaCov$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(parameter = "none")
## 
## $QdaCov$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## rrcov:::QdaCov(x, y, ...)
## 
## $QdaCov$predict
## function (modelFit, newdata, submodels = NULL) 
## rrcov::predict(modelFit, newdata)@classification
## 
## $QdaCov$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     probs <- rrcov::predict(modelFit, newdata)@posterior
##     colnames(probs) <- names(modelFit@prior)
##     probs
## }
## 
## $QdaCov$tags
## [1] "Discriminant Analysis" "Polynomial Model"     
## 
## $QdaCov$levels
## function (x) 
## names(x@prior)
## 
## $QdaCov$sort
## function (x) 
## x
## 
## 
## $qrf
## $qrf$label
## [1] "Quantile Random Forest"
## 
## $qrf$library
## [1] "quantregForest"
## 
## $qrf$loop
## NULL
## 
## $qrf$type
## [1] "Regression"
## 
## $qrf$parameters
##   parameter   class                         label
## 1      mtry numeric #Randomly Selected Predictors
## 
## $qrf$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(mtry = caret::var_seq(p = ncol(x), 
##             classification = is.factor(y), len = len))
##     }
##     else {
##         out <- data.frame(mtry = unique(sample(1:ncol(x), size = len, 
##             replace = TRUE)))
##     }
##     out
## }
## 
## $qrf$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## quantregForest::quantregForest(x, y, mtry = min(param$mtry, ncol(x)), 
##     ...)
## 
## $qrf$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata, what = 0.5)
##     if (is.matrix(out)) 
##         out <- out[, 1]
##     out
## }
## 
## $qrf$prob
## NULL
## 
## $qrf$tags
## [1] "Random Forest"              "Ensemble Model"            
## [3] "Bagging"                    "Implicit Feature Selection"
## [5] "Quantile Regression"        "Robust Model"              
## 
## $qrf$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $qrnn
## $qrnn$label
## [1] "Quantile Regression Neural Network"
## 
## $qrnn$library
## [1] "qrnn"
## 
## $qrnn$loop
## NULL
## 
## $qrnn$type
## [1] "Regression"
## 
## $qrnn$parameters
##   parameter   class          label
## 1  n.hidden numeric  #Hidden Units
## 2   penalty numeric   Weight Decay
## 3       bag logical Bagged Models?
## 
## $qrnn$grid
## function (x, y, len = NULL, search = "grid") 
## expand.grid(n.hidden = ((1:len) * 2) - 1, penalty = c(0, 10^seq(-1, 
##     -4, length = len - 1)), bag = FALSE)
## 
## $qrnn$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     qrnn::qrnn.fit(as.matrix(x), matrix(y), n.hidden = param$n.hidden, 
##         print.level = 0, penalty = param$penalty, bag = param$bag, 
##         ...)
## }
## 
## $qrnn$predict
## function (modelFit, newdata, submodels = NULL) 
## qrnn::qrnn.predict(as.matrix(newdata), modelFit)[, 1]
## 
## $qrnn$prob
## NULL
## 
## $qrnn$tags
## [1] "Neural Network"      "L2 Regularization"   "Quantile Regression"
## [4] "Bagging"             "Ensemble Model"      "Robust Model"       
## 
## $qrnn$sort
## function (x) 
## x[order(x$n.hidden, -x$penalty), ]
## 
## 
## $randomGLM
## $randomGLM$label
## [1] "Ensembles of Generalized Linear Models"
## 
## $randomGLM$library
## [1] "randomGLM"
## 
## $randomGLM$type
## [1] "Regression"     "Classification"
## 
## $randomGLM$parameters
##             parameter   class             label
## 1 maxInteractionOrder numeric Interaction Order
## 
## $randomGLM$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(maxInteractionOrder = 1:min(len, 3))
##     }
##     else {
##         out <- data.frame(maxInteractionOrder = sample(1:3, length = len))
##     }
##     out
## }
## 
## $randomGLM$loop
## NULL
## 
## $randomGLM$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(randomGLM)
##     if (!is.matrix(x)) 
##         x <- as.matrix(x)
##     mod <- randomGLM::randomGLM(x = x, y, maxInteractionOrder = param$maxInteractionOrder, 
##         ...)
##     mod
## }
## 
## $randomGLM$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(modelFit, newdata)
##     if (modelFit$problemType == "Classification") 
##         out <- modelFit$obsLevel[apply(out, 1, which.max)]
##     out
## }
## 
## $randomGLM$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     predict(modelFit, newdata)
## }
## 
## $randomGLM$predictors
## function (x, s = NULL, ...) 
## {
##     all_pred <- lapply(x$models, function(x) names(coef(x)))
##     all_pred <- unique(unlist(all_pred))
##     all_pred <- strsplit(all_pred, ".times.", fixed = TRUE)
##     all_pred <- unique(unlist(all_pred))
##     all_pred[all_pred != "(Intercept)"]
## }
## 
## $randomGLM$tags
## [1] "Generalized Linear Model" "Linear Classifier"       
## [3] "Ensemble Model"           "Bagging"                 
## 
## $randomGLM$notes
## [1] "Unlike other packages used by `train`, the `randomGLM` package is fully loaded when this model is used."
## 
## $randomGLM$prob
## NULL
## 
## $randomGLM$sort
## function (x) 
## x
## 
## 
## $ranger
## $ranger$label
## [1] "Random Forest"
## 
## $ranger$library
## [1] "e1071"  "ranger" "dplyr" 
## 
## $ranger$check
## function (pkg) 
## {
##     requireNamespace("ranger")
##     current <- packageDescription("ranger")$Version
##     expected <- "0.8.0"
##     if (compareVersion(current, expected) < 0) 
##         stop("This modeling workflow requires ranger version ", 
##             expected, "or greater.", call. = FALSE)
## }
## 
## $ranger$loop
## NULL
## 
## $ranger$type
## [1] "Classification" "Regression"    
## 
## $ranger$parameters
##       parameter     class                         label
## 1          mtry   numeric #Randomly Selected Predictors
## 2     splitrule character                Splitting Rule
## 3 min.node.size   numeric             Minimal Node Size
## 
## $ranger$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         srule <- if (is.factor(y)) 
##             "gini"
##         else "variance"
##         out <- expand.grid(mtry = caret::var_seq(p = ncol(x), 
##             classification = is.factor(y), len = len), min.node.size = ifelse(is.factor(y), 
##             1, 5), splitrule = c(srule, "extratrees")[1:min(2, 
##             len)])
##     }
##     else {
##         srules <- if (is.factor(y)) 
##             c("gini", "extratrees")
##         else c("variance", "extratrees", "maxstat")
##         out <- data.frame(min.node.size = sample(1:(min(20, nrow(x))), 
##             size = len, replace = TRUE), mtry = sample(1:ncol(x), 
##             size = len, replace = TRUE), splitrule = sample(srules, 
##             size = len, replace = TRUE))
##     }
## }
## 
## $ranger$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if ((!is.data.frame(x)) || dplyr::is.tbl(x)) 
##         x <- as.data.frame(x, stringsAsFactors = TRUE)
##     x$.outcome <- y
##     if (!is.null(wts)) {
##         out <- ranger::ranger(dependent.variable.name = ".outcome", 
##             data = x, mtry = min(param$mtry, ncol(x)), min.node.size = param$min.node.size, 
##             splitrule = as.character(param$splitrule), write.forest = TRUE, 
##             probability = classProbs, case.weights = wts, ...)
##     }
##     else {
##         out <- ranger::ranger(dependent.variable.name = ".outcome", 
##             data = x, mtry = min(param$mtry, ncol(x)), min.node.size = param$min.node.size, 
##             splitrule = as.character(param$splitrule), write.forest = TRUE, 
##             probability = classProbs, ...)
##     }
##     if (!last) 
##         out$y <- y
##     out
## }
## 
## $ranger$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if ((!is.data.frame(newdata)) || dplyr::is.tbl(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata)$predictions
##     if (!is.null(modelFit$obsLevels) & modelFit$treetype == "Probability estimation") {
##         out <- colnames(out)[apply(out, 1, which.max)]
##     }
##     out
## }
## 
## $ranger$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)$predictions
## }
## 
## $ranger$predictors
## function (x, ...) 
## {
##     var_index <- sort(unique(unlist(lapply(x$forest$split.varIDs, 
##         function(x) x))))
##     var_index <- var_index[var_index > 0]
##     x$forest$independent.variable.names[var_index]
## }
## 
## $ranger$varImp
## function (object, ...) 
## {
##     if (length(object$variable.importance) == 0) 
##         stop("No importance values available")
##     imps <- ranger:::importance(object)
##     out <- data.frame(Overall = as.vector(imps))
##     rownames(out) <- names(imps)
##     out
## }
## 
## $ranger$levels
## function (x) 
## {
##     if (x$treetype == "Probability estimation") {
##         out <- colnames(x$predictions)
##     }
##     else {
##         if (x$treetype == "Classification") {
##             out <- levels(x$predictions)
##         }
##         else out <- NULL
##     }
##     out
## }
## 
## $ranger$oob
## function (x) 
## {
##     postResample(x$predictions, x$y)
## }
## 
## $ranger$tags
## [1] "Random Forest"              "Ensemble Model"            
## [3] "Bagging"                    "Implicit Feature Selection"
## [5] "Accepts Case Weights"      
## 
## $ranger$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $rbf
## $rbf$label
## [1] "Radial Basis Function Network"
## 
## $rbf$library
## [1] "RSNNS"
## 
## $rbf$loop
## NULL
## 
## $rbf$type
## [1] "Classification" "Regression"    
## 
## $rbf$parameters
##   parameter   class         label
## 1      size numeric #Hidden Units
## 
## $rbf$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(size = ((1:len) * 2) - 1)
##     }
##     else {
##         out <- data.frame(size = unique(sample(1:20, size = len, 
##             replace = TRUE)))
##     }
##     out
## }
## 
## $rbf$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     theDots <- theDots[!(names(theDots) %in% c("size", "linOut"))]
##     if (any(names(theDots) == "learnFunc")) {
##         theDots$learnFunc <- NULL
##         warning("Cannot over-ride 'learnFunc' argument for this model. RadialBasisLearning is used.")
##     }
##     if (!any(names(theDots) == "initFuncParams")) {
##         theDots$initFuncParams <- c(0, 1, 0, 0.02, 0.04)
##         if (is.factor(y)) 
##             theDots$initFuncParams[1:2] <- c(-4, 4)
##     }
##     if (!any(names(theDots) == "learnFuncParams")) {
##         theDots$learnFuncParams <- c(1e-08, 0, 1e-08, 0.1, 0.8)
##     }
##     if (is.factor(y)) {
##         y <- RSNNS:::decodeClassLabels(y)
##         lin <- FALSE
##     }
##     else lin <- TRUE
##     args <- list(x = x, y = y, size = param$size, linOut = lin)
##     args <- c(args, theDots)
##     do.call(RSNNS::rbf, args)
## }
## 
## $rbf$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)
##     if (modelFit$problemType == "Classification") {
##         out <- modelFit$obsLevels[apply(out, 1, which.max)]
##     }
##     else out <- out[, 1]
##     out
## }
## 
## $rbf$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $rbf$levels
## function (x) 
## x$obsLevels
## 
## $rbf$tags
## [1] "Neural Network"        "L2 Regularization"     "Radial Basis Function"
## 
## $rbf$sort
## function (x) 
## x[order(x$size), ]
## 
## 
## $rbfDDA
## $rbfDDA$label
## [1] "Radial Basis Function Network"
## 
## $rbfDDA$library
## [1] "RSNNS"
## 
## $rbfDDA$loop
## NULL
## 
## $rbfDDA$type
## [1] "Regression"     "Classification"
## 
## $rbfDDA$parameters
##           parameter   class                                    label
## 1 negativeThreshold numeric Activation Limit for Conflicting Classes
## 
## $rbfDDA$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(negativeThreshold = 10^(-(1:len)))
##     }
##     else {
##         out <- data.frame(negativeThreshold = runif(len, min = 0, 
##             3))
##     }
##     out
## }
## 
## $rbfDDA$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     if (any(names(theDots) == "learnFunc")) {
##         theDots$learnFunc <- NULL
##         warning("Cannot over-ride 'learnFunc' argument for this model. RBF-DDA is used.")
##     }
##     if (any(names(theDots) == "learnFuncParams")) {
##         theDots$learnFuncParams[2] <- param$negativeThreshold
##     }
##     else theDots$learnFuncParams <- c(0.4, param$negativeThreshold, 
##         5)
##     y <- RSNNS:::decodeClassLabels(y)
##     args <- list(x = x, y = y)
##     args <- c(args, theDots)
##     do.call(RSNNS::rbfDDA, args)
## }
## 
## $rbfDDA$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)
##     if (modelFit$problemType == "Classification") {
##         out <- modelFit$obsLevels[apply(out, 1, which.max)]
##     }
##     else out <- out[, 1]
##     out
## }
## 
## $rbfDDA$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $rbfDDA$levels
## function (x) 
## x$obsLevels
## 
## $rbfDDA$tags
## [1] "Neural Network"        "L2 Regularization"     "Radial Basis Function"
## 
## $rbfDDA$sort
## function (x) 
## x[order(-x$negativeThreshold), ]
## 
## 
## $Rborist
## $Rborist$label
## [1] "Random Forest"
## 
## $Rborist$library
## [1] "Rborist"
## 
## $Rborist$loop
## NULL
## 
## $Rborist$type
## [1] "Classification" "Regression"    
## 
## $Rborist$parameters
##   parameter   class                         label
## 1 predFixed numeric #Randomly Selected Predictors
## 2   minNode numeric             Minimal Node Size
## 
## $Rborist$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(predFixed = caret::var_seq(p = ncol(x), 
##             classification = is.factor(y), len = len), minNode = ifelse(is.factor(y), 
##             2, 3))
##     }
##     else {
##         out <- data.frame(predFixed = sample(1:ncol(x), size = len, 
##             replace = TRUE), minNode = sample(1:(min(20, nrow(x))), 
##             size = len, replace = TRUE))
##     }
## }
## 
## $Rborist$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     Rborist::Rborist(x, y, predFixed = param$predFixed, minNode = param$minNode, 
##         ...)
## }
## 
## $Rborist$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)$yPred
##     if (modelFit$problemType == "Classification") 
##         out <- modelFit$obsLevels[out]
##     out
## }
## 
## $Rborist$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)$census
##     out <- t(apply(out, 1, function(x) x/sum(x)))
##     out
## }
## 
## $Rborist$predictors
## function (x, ...) 
## x$xNames[x$training$info != 0]
## 
## $Rborist$varImp
## function (object, ...) 
## {
##     out <- data.frame(Overall = object$training$info)
##     rownames(out) <- object$xNames
##     out
## }
## 
## $Rborist$levels
## function (x) 
## colnames(x$validation$confusion)
## 
## $Rborist$tags
## [1] "Random Forest"              "Ensemble Model"            
## [3] "Bagging"                    "Implicit Feature Selection"
## 
## $Rborist$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## $Rborist$oob
## function (x) 
## {
##     out <- switch(x$problemType, Regression = c(sqrt(x$validation$mse), 
##         x$validation$rsq), Classification = c(sum(diag(x$validation$confusion))/sum(x$validation$confusion), 
##         e1071::classAgreement(x$validation$confusion)[["kappa"]]))
##     names(out) <- if (x$problemType == "Regression") 
##         c("RMSE", "Rsquared")
##     else c("Accuracy", "Kappa")
##     out
## }
## 
## 
## $rda
## $rda$label
## [1] "Regularized Discriminant Analysis"
## 
## $rda$library
## [1] "klaR"
## 
## $rda$loop
## NULL
## 
## $rda$type
## [1] "Classification"
## 
## $rda$parameters
##   parameter   class  label
## 1     gamma numeric  Gamma
## 2    lambda numeric Lambda
## 
## $rda$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(gamma = seq(0, 1, length = len), lambda = seq(0, 
##             1, length = len))
##     }
##     else {
##         out <- data.frame(gamma = runif(len, min = 0, max = 1), 
##             lambda = runif(len, min = 0, max = 1))
##     }
##     out
## }
## 
## $rda$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     klaR:::rda(x, y, gamma = param$gamma, lambda = param$lambda, 
##         ...)
## }
## 
## $rda$predict
## function (modelFit, newdata, submodels = NULL) 
## klaR:::predict.rda(modelFit, newdata)$class
## 
## $rda$prob
## function (modelFit, newdata, submodels = NULL) 
## klaR:::predict.rda(modelFit, newdata)$posterior
## 
## $rda$predictors
## function (x, ...) 
## x$varnames
## 
## $rda$tags
## [1] "Discriminant Analysis" "Polynomial Model"      "Regularization"       
## [4] "Linear Classifier"    
## 
## $rda$levels
## function (x) 
## names(x$prior)
## 
## $rda$sort
## function (x) 
## {
##     x[order(-x$lambda, x$gamma), ]
## }
## 
## 
## $regLogistic
## $regLogistic$label
## [1] "Regularized Logistic Regression"
## 
## $regLogistic$library
## [1] "LiblineaR"
## 
## $regLogistic$type
## [1] "Classification"
## 
## $regLogistic$parameters
##   parameter     class         label
## 1      cost   numeric          Cost
## 2      loss character Loss Function
## 3   epsilon   numeric     Tolerance
## 
## $regLogistic$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(cost = 2^((1:len) - ceiling(len * 
##             0.5)), loss = c("L1", "L2_dual", "L2_primal"), epsilon = signif(0.01 * 
##             (10^((1:len) - ceiling(len * 0.5))), 2))
##     }
##     else {
##         out <- data.frame(cost = 2^runif(len, min = -10, max = 10), 
##             loss = sample(c("L1", "L2_dual", "L2_primal"), size = len, 
##                 replace = TRUE), epsilon = 1^runif(len, min = -10, 
##                 max = 10))
##     }
##     out
## }
## 
## $regLogistic$loop
## NULL
## 
## $regLogistic$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!(param$loss %in% c("L1", "L2_dual", "L2_primal"))) {
##         stop("Loss function is not recognised.", call. = FALSE)
##     }
##     if (!is.factor(y)) {
##         stop("y is not recognised as a factor", call. = FALSE)
##     }
##     model_type <- ifelse(param$loss == "L1", 6, ifelse(param$loss == 
##         "L2_primal", 0, 7))
##     out <- LiblineaR::LiblineaR(data = as.matrix(x), target = y, 
##         cost = param$cost, epsilon = param$epsilon, type = model_type, 
##         ...)
##     out
## }
## 
## $regLogistic$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     LiblineaR:::predict.LiblineaR(modelFit, newdata)$predictions
## }
## 
## $regLogistic$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     LiblineaR:::predict.LiblineaR(modelFit, newdata, proba = TRUE)$probabilities
## }
## 
## $regLogistic$predictors
## function (x, ...) 
## {
##     out <- colnames(x$W)
##     out[out != "Bias"]
## }
## 
## $regLogistic$tags
## [1] "Linear Classifier" "Robust Methods"    "L1 Regularization"
## [4] "L2 Regularization"
## 
## $regLogistic$levels
## function (x) 
## x$levels
## 
## $regLogistic$sort
## function (x) 
## {
##     x[order(x$cost), ]
## }
## 
## 
## $relaxo
## $relaxo$label
## [1] "Relaxed Lasso"
## 
## $relaxo$library
## [1] "relaxo" "plyr"  
## 
## $relaxo$type
## [1] "Regression"
## 
## $relaxo$parameters
##   parameter   class                label
## 1    lambda numeric    Penalty Parameter
## 2       phi numeric Relaxation Parameter
## 
## $relaxo$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     tmp <- relaxo::relaxo(as.matrix(x), y)
##     lambdas <- log10(tmp$lambda)[-c(1, length(tmp$lambda))]
##     if (search == "grid") {
##         out <- expand.grid(phi = seq(0.1, 0.9, length = len), 
##             lambda = 10^seq(min(lambdas), quantile(lambdas, probs = 0.9), 
##                 length = len))
##     }
##     else {
##         out <- data.frame(lambda = 10^runif(len, min = min(lambdas), 
##             max = max(lambdas)), phi = runif(len, min = 0, max = 1))
##     }
##     out
## }
## 
## $relaxo$loop
## function (grid) 
## {
##     loop <- plyr::ddply(grid, plyr::.(phi), function(x) c(lambda = max(x$lambda)))
##     submodels <- vector(mode = "list", length = nrow(loop))
##     for (i in seq(along = submodels)) {
##         submodels[[i]] <- data.frame(lambda = subset(grid, subset = phi == 
##             loop$phi[i] & lambda < loop$lambda[i])$lambda)
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $relaxo$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     relaxo::relaxo(as.matrix(x), y, phi = param$phi, ...)
## }
## 
## $relaxo$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, as.matrix(newdata), lambda = min(max(modelFit$lambda), 
##         modelFit$tuneValue$lambda), phi = modelFit$tuneValue$phi)
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$lambda)) {
##             tmp[[j + 1]] <- predict(modelFit, as.matrix(newdata), 
##                 lambda = min(max(modelFit$lambda), submodels$lambda[j]), 
##                 phi = modelFit$tuneValue$phi)
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $relaxo$prob
## NULL
## 
## $relaxo$tags
## [1] "Implicit Feature Selection" "L1 Regularization"         
## [3] "L2 Regularization"          "Linear Regression"         
## 
## $relaxo$sort
## function (x) 
## x[order(x$phi, -x$lambda), ]
## 
## 
## $rf
## $rf$label
## [1] "Random Forest"
## 
## $rf$library
## [1] "randomForest"
## 
## $rf$loop
## NULL
## 
## $rf$type
## [1] "Classification" "Regression"    
## 
## $rf$parameters
##   parameter   class                         label
## 1      mtry numeric #Randomly Selected Predictors
## 
## $rf$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(mtry = caret::var_seq(p = ncol(x), 
##             classification = is.factor(y), len = len))
##     }
##     else {
##         out <- data.frame(mtry = unique(sample(1:ncol(x), size = len, 
##             replace = TRUE)))
##     }
## }
## 
## $rf$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## randomForest::randomForest(x, y, mtry = param$mtry, ...)
## 
## $rf$predict
## function (modelFit, newdata, submodels = NULL) 
## if (!is.null(newdata)) predict(modelFit, newdata) else predict(modelFit)
## 
## $rf$prob
## function (modelFit, newdata, submodels = NULL) 
## if (!is.null(newdata)) predict(modelFit, newdata, type = "prob") else predict(modelFit, 
##     type = "prob")
## 
## $rf$predictors
## function (x, ...) 
## {
##     varIndex <- as.numeric(names(table(x$forest$bestvar)))
##     varIndex <- varIndex[varIndex > 0]
##     varsUsed <- names(x$forest$ncat)[varIndex]
##     varsUsed
## }
## 
## $rf$varImp
## function (object, ...) 
## {
##     varImp <- randomForest::importance(object, ...)
##     if (object$type == "regression") {
##         if ("%IncMSE" %in% colnames(varImp)) {
##             varImp <- data.frame(Overall = varImp[, "%IncMSE"])
##         }
##         else {
##             varImp <- data.frame(Overall = varImp[, 1])
##         }
##     }
##     else {
##         retainNames <- levels(object$y)
##         if (all(retainNames %in% colnames(varImp))) {
##             varImp <- varImp[, retainNames]
##         }
##         else {
##             varImp <- data.frame(Overall = varImp[, 1])
##         }
##     }
##     out <- as.data.frame(varImp, stringsAsFactors = TRUE)
##     if (dim(out)[2] == 2) {
##         tmp <- apply(out, 1, mean)
##         out[, 1] <- out[, 2] <- tmp
##     }
##     out
## }
## 
## $rf$levels
## function (x) 
## x$classes
## 
## $rf$tags
## [1] "Random Forest"              "Ensemble Model"            
## [3] "Bagging"                    "Implicit Feature Selection"
## 
## $rf$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## $rf$oob
## function (x) 
## {
##     out <- switch(x$type, regression = c(sqrt(max(x$mse[length(x$mse)], 
##         0)), x$rsq[length(x$rsq)]), classification = c(1 - x$err.rate[x$ntree, 
##         "OOB"], e1071::classAgreement(x$confusion[, -dim(x$confusion)[2]])[["kappa"]]))
##     names(out) <- if (x$type == "regression") 
##         c("RMSE", "Rsquared")
##     else c("Accuracy", "Kappa")
##     out
## }
## 
## 
## $rFerns
## $rFerns$label
## [1] "Random Ferns"
## 
## $rFerns$library
## [1] "rFerns"
## 
## $rFerns$loop
## NULL
## 
## $rFerns$type
## [1] "Classification"
## 
## $rFerns$parameters
##   parameter   class      label
## 1     depth numeric Fern Depth
## 
## $rFerns$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(depth = unique(floor(seq(1, 16, length = len))))
##     }
##     else {
##         out <- data.frame(depth = unique(sample(1:16, size = len, 
##             replace = TRUE)))
##     }
##     out
## }
## 
## $rFerns$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!is.data.frame(x) | inherits(x, "tbl_df")) 
##         x <- as.data.frame(x, stringsAsFactors = TRUE)
##     rFerns::rFerns(x, y, depth = param$depth, ...)
## }
## 
## $rFerns$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata) | inherits(newdata, "tbl_df")) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $rFerns$levels
## function (x) 
## x$obsLevels
## 
## $rFerns$prob
## NULL
## 
## $rFerns$tags
## [1] "Random Forest"              "Ensemble Model"            
## [3] "Bagging"                    "Implicit Feature Selection"
## 
## $rFerns$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $RFlda
## $RFlda$label
## [1] "Factor-Based Linear Discriminant Analysis"
## 
## $RFlda$library
## [1] "HiDimDA"
## 
## $RFlda$loop
## NULL
## 
## $RFlda$type
## [1] "Classification"
## 
## $RFlda$parameters
##   parameter   class     label
## 1         q numeric # Factors
## 
## $RFlda$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(q = 1:len)
##     }
##     else {
##         out <- data.frame(q = unique(sample(1:10, size = len, 
##             replace = TRUE)))
##     }
##     out
## }
## 
## $RFlda$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## HiDimDA::RFlda(x, y, q = param$q, maxq = param$q, ...)
## 
## $RFlda$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)$class
##     out <- modelFit$obsLevels[as.numeric(out)]
##     out
## }
## 
## $RFlda$levels
## function (x) 
## x$obsLevels
## 
## $RFlda$prob
## NULL
## 
## $RFlda$tags
## [1] "Discriminant Analysis" "Linear Classifier"    
## 
## $RFlda$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $rfRules
## $rfRules$label
## [1] "Random Forest Rule-Based Model"
## 
## $rfRules$library
## [1] "randomForest" "inTrees"      "plyr"        
## 
## $rfRules$type
## [1] "Classification" "Regression"    
## 
## $rfRules$parameters
##   parameter   class                         label
## 1      mtry numeric #Randomly Selected Predictors
## 2  maxdepth numeric            Maximum Rule Depth
## 
## $rfRules$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(mtry = caret::var_seq(p = ncol(x), 
##             classification = is.factor(y), len = len), maxdepth = (1:len) + 
##             1)
##     }
##     else {
##         out <- data.frame(mtry = sample(1:ncol(x), size = len, 
##             replace = TRUE), maxdepth = sample(1:15, size = len, 
##             replace = TRUE))
##     }
## }
## 
## $rfRules$loop
## function (grid) 
## {
##     loop <- plyr::ddply(grid, c("mtry"), function(x) c(maxdepth = max(x$maxdepth)))
##     submodels <- vector(mode = "list", length = nrow(loop))
##     for (i in seq(along = loop$maxdepth)) {
##         index <- which(grid$mtry == loop$mtry[i])
##         trees <- grid[index, "maxdepth"]
##         submodels[[i]] <- data.frame(maxdepth = trees[trees != 
##             loop$maxdepth[i]])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $rfRules$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!is.data.frame(x) | inherits(x, "tbl_df")) 
##         x <- as.data.frame(x, stringsAsFactors = TRUE)
##     RFor <- randomForest::randomForest(x, y, mtry = min(param$mtry, 
##         ncol(x)), ...)
##     treeList <- inTrees::RF2List(RFor)
##     exec <- inTrees::extractRules(treeList, x, maxdepth = param$maxdepth, 
##         ntree = RFor$ntree)
##     ruleMetric <- inTrees::getRuleMetric(exec, x, y)
##     ruleMetric <- inTrees::pruneRule(ruleMetric, x, y)
##     ruleMetric <- inTrees::selectRuleRRF(ruleMetric, x, y)
##     out <- list(model = inTrees::buildLearner(ruleMetric, x, 
##         y))
##     if (!last) {
##         out$rf <- treeList
##         out$x <- x
##         out$y <- y
##         out$trees <- RFor$ntree
##     }
##     out
## }
## 
## $rfRules$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata) | inherits(newdata, "tbl_df")) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- inTrees::applyLearner(modelFit$model, newdata)
##     if (modelFit$problemType == "Regression") 
##         out <- as.numeric(out)
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- if (is.matrix(out)) 
##             out[, 1]
##         else out
##         for (i in seq(along = submodels$maxdepth)) {
##             exec <- inTrees::extractRules(modelFit$rf, modelFit$x, 
##                 maxdepth = submodels$maxdepth[i], ntree = modelFit$trees)
##             ruleMetric <- inTrees::getRuleMetric(exec, modelFit$x, 
##                 modelFit$y)
##             ruleMetric <- inTrees::pruneRule(ruleMetric, modelFit$x, 
##                 modelFit$y)
##             ruleMetric <- inTrees::selectRuleRRF(ruleMetric, 
##                 modelFit$x, modelFit$y)
##             mod <- inTrees::buildLearner(ruleMetric, modelFit$x, 
##                 modelFit$y)
##             tmp[[i + 1]] <- inTrees::applyLearner(mod, newdata)
##             if (modelFit$problemType == "Regression") 
##                 tmp[[i + 1]] <- as.numeric(tmp[[i + 1]])
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $rfRules$prob
## NULL
## 
## $rfRules$predictors
## function (x, ...) 
## {
##     split_up <- strsplit(x$model[, "condition"], "&")
##     isolate <- function(x) {
##         index <- gregexpr("]", x, fixed = TRUE)
##         out <- NULL
##         for (i in seq_along(index)) {
##             if (all(index[[i]] > 0)) {
##                 tmp <- substring(x[i], 1, index[[i]][1])
##                 tmp <- gsub("(X)|(\\[)|(\\])|(,)|( )", "", tmp)
##                 tmp <- tmp[tmp != ""]
##                 out <- c(out, as.numeric(tmp))
##             }
##         }
##         as.numeric(unique(out))
##     }
##     var_index <- unique(unlist(lapply(split_up, isolate)))
##     if (length(var_index) > 0) 
##         x$xNames[var_index]
##     else NULL
## }
## 
## $rfRules$varImp
## function (object, ...) 
## {
##     split_up <- strsplit(object$model[, "condition"], "&")
##     isolate <- function(x) {
##         index <- gregexpr("]", x, fixed = TRUE)
##         out <- NULL
##         for (i in seq_along(index)) {
##             if (all(index[[i]] > 0)) {
##                 tmp <- substring(x[i], 1, index[[i]][1])
##                 tmp <- gsub("(X)|(\\[)|(\\])|(,)|( )", "", tmp)
##                 tmp <- tmp[tmp != ""]
##                 out <- c(out, as.numeric(tmp))
##             }
##         }
##         as.numeric(unique(out))
##     }
##     var_index <- lapply(split_up, isolate)
##     vars_dat <- lapply(var_index, function(x, p) {
##         out <- rep(0, p)
##         if (length(x) > 0) 
##             out[x] <- 1
##         out
##     }, p = length(object$xNames))
##     vars_dat <- do.call("rbind", vars_dat)
##     colnames(vars_dat) <- object$xNames
##     freqs <- as.numeric(object$model[, "freq"])
##     vars_dat <- vars_dat * freqs
##     var_imp <- apply(vars_dat, 2, sum)
##     out <- data.frame(Overall = as.vector(var_imp))
##     rownames(out) <- names(var_imp)
##     out
## }
## 
## $rfRules$levels
## function (x) 
## x$obsLevels
## 
## $rfRules$tags
## [1] "Random Forest"              "Ensemble Model"            
## [3] "Bagging"                    "Implicit Feature Selection"
## [5] "Rule-Based Model"          
## 
## $rfRules$sort
## function (x) 
## x[order(x[, "maxdepth"]), ]
## 
## 
## $ridge
## $ridge$label
## [1] "Ridge Regression"
## 
## $ridge$library
## [1] "elasticnet"
## 
## $ridge$type
## [1] "Regression"
## 
## $ridge$parameters
##   parameter   class        label
## 1    lambda numeric Weight Decay
## 
## $ridge$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(lambda = c(0, 10^seq(-1, -4, length = len - 
##             1)))
##     }
##     else {
##         out <- data.frame(lambda = 10^runif(len, min = -5, 1))
##     }
##     out
## }
## 
## $ridge$loop
## NULL
## 
## $ridge$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     elasticnet::enet(as.matrix(x), y, lambda = param$lambda)
## }
## 
## $ridge$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     elasticnet::predict.enet(modelFit, newdata, s = 1, mode = "fraction")$fit
## }
## 
## $ridge$predictors
## function (x, s = NULL, ...) 
## {
##     if (is.null(s)) {
##         if (!is.null(x$tuneValue)) {
##             s <- x$tuneValue$.fraction
##         }
##         else stop("must supply a vaue of s")
##         out <- elasticnet::predict.enet(x, s = s, type = "coefficients", 
##             mode = "fraction")$coefficients
##     }
##     else {
##         out <- elasticnet::predict.enet(x, s = s)$coefficients
##     }
##     names(out)[out != 0]
## }
## 
## $ridge$tags
## [1] "Linear Regression" "L2 Regularization"
## 
## $ridge$prob
## NULL
## 
## $ridge$sort
## function (x) 
## x[order(-x$lambda), ]
## 
## 
## $rlda
## $rlda$label
## [1] "Regularized Linear Discriminant Analysis"
## 
## $rlda$library
## [1] "sparsediscrim"
## 
## $rlda$loop
## NULL
## 
## $rlda$type
## [1] "Classification"
## 
## $rlda$parameters
##   parameter     class                 label
## 1 estimator character Regularization Method
## 
## $rlda$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     data.frame(estimator = c("Moore-Penrose Pseudo-Inverse", 
##         "Schafer-Strimmer", "Thomaz-Kitani-Gillies"))
## }
## 
## $rlda$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (as.character(param$estimator == "Moore-Penrose Pseudo-Inverse")) {
##         out <- sparsediscrim::lda_pseudo(x, y, ...)
##     }
##     else {
##         if (as.character(param$estimator == "Schafer-Strimmer")) {
##             out <- sparsediscrim::lda_schafer(x, y, ...)
##         }
##         else out <- sparsediscrim::lda_thomaz(x, y, ...)
##     }
##     out
## }
## 
## $rlda$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)$class
## 
## $rlda$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)$scores
##     as.data.frame(t(apply(out, 2, function(x) exp(-x)/sum(exp(-x)))), 
##         stringsAsFactors = TRUE)
## }
## 
## $rlda$predictors
## function (x, ...) 
## x$varnames
## 
## $rlda$tags
## [1] "Discriminant Analysis" "Polynomial Model"      "Regularization"       
## [4] "Linear Classifier"    
## 
## $rlda$levels
## function (x) 
## names(x$prior)
## 
## $rlda$sort
## function (x) 
## x
## 
## 
## $rlm
## $rlm$label
## [1] "Robust Linear Model"
## 
## $rlm$library
## [1] "MASS"
## 
## $rlm$loop
## NULL
## 
## $rlm$type
## [1] "Regression"
## 
## $rlm$parameters
##   parameter     class     label
## 1 intercept   logical intercept
## 2       psi character       psi
## 
## $rlm$grid
## function (x, y, len = NULL, search = "grid") 
## expand.grid(intercept = c(TRUE, FALSE), psi = c("psi.huber", 
##     "psi.hampel", "psi.bisquare"))
## 
## $rlm$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     psi <- MASS::psi.huber
##     if (param$psi == "psi.bisquare") 
##         psi <- MASS::psi.bisquare
##     else if (param$psi == "psi.hampel") 
##         psi <- MASS::psi.hampel
##     if (!is.null(wts)) {
##         if (param$intercept) 
##             out <- MASS::rlm(.outcome ~ ., data = dat, weights = wts, 
##                 psi = psi, ...)
##         else out <- MASS::rlm(.outcome ~ 0 + ., data = dat, weights = wts, 
##             psi = psi, ...)
##     }
##     else {
##         if (param$intercept) 
##             out <- MASS::rlm(.outcome ~ ., data = dat, psi = psi, 
##                 ...)
##         else out <- MASS::rlm(.outcome ~ 0 + ., data = dat, psi = psi, 
##             ...)
##     }
##     out
## }
## 
## $rlm$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $rlm$prob
## NULL
## 
## $rlm$varImp
## function (object, ...) 
## {
##     values <- summary(object)$coef
##     varImps <- abs(values[!grepl(rownames(values), pattern = "Intercept"), 
##         grep("value$", colnames(values)), drop = FALSE])
##     out <- data.frame(varImps)
##     colnames(out) <- "Overall"
##     if (!is.null(names(varImps))) 
##         rownames(out) <- names(varImps)
##     out
## }
## 
## $rlm$tags
## [1] "Linear Regression"    "Robust Model"         "Accepts Case Weights"
## 
## $rlm$sort
## function (x) 
## x
## 
## 
## $rmda
## $rmda$label
## [1] "Robust Mixture Discriminant Analysis"
## 
## $rmda$library
## [1] "robustDA"
## 
## $rmda$loop
## NULL
## 
## $rmda$type
## [1] "Classification"
## 
## $rmda$parameters
##   parameter     class                 label
## 1         K   numeric #Subclasses Per Class
## 2     model character                 Model
## 
## $rmda$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     mods <- c("EII", "VII", "EEI", "EVI", "VEI", "VVI")
##     if (search == "grid") {
##         out <- expand.grid(K = (1:len) + 1, model = c("VEV"))
##     }
##     else {
##         out <- data.frame(K = sample(2:10, size = len, replace = TRUE), 
##             model = sample(mods, size = len, replace = TRUE))
##     }
##     out
## }
## 
## $rmda$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     mod <- robustDA::rmda(x, as.numeric(y), K = param$K, model = as.character(param$model), 
##         ...)
##     mod$levels <- levels(y)
##     mod
## }
## 
## $rmda$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)$cls
##     factor(modelFit$levels[out], levels = modelFit$levels)
## }
## 
## $rmda$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)$P
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $rmda$varImp
## NULL
## 
## $rmda$predictors
## function (x, ...) 
## colnames(x$prms$data)
## 
## $rmda$levels
## function (x) 
## if (any(names(x) == "obsLevels")) x$obsLevels else NULL
## 
## $rmda$tags
## [1] "Discriminant Analysis" "Mixture Model"         "Robust Methods"       
## 
## $rmda$sort
## function (x) 
## x
## 
## 
## $rocc
## $rocc$label
## [1] "ROC-Based Classifier"
## 
## $rocc$library
## [1] "rocc"
## 
## $rocc$loop
## NULL
## 
## $rocc$type
## [1] "Classification"
## 
## $rocc$parameters
##   parameter   class               label
## 1    xgenes numeric #Variables Retained
## 
## $rocc$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(xgenes = caret::var_seq(p = ncol(x), 
##             classification = is.factor(y), len = len))
##     }
##     else {
##         out <- data.frame(xgenes = unique(sample(1:ncol(x), size = len, 
##             replace = TRUE)))
##     }
##     out
## }
## 
## $rocc$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     newY <- factor(ifelse(y == levels(y)[1], 1, 0), levels = c("0", 
##         "1"))
##     rocc::tr.rocc(g = t(as.matrix(x)), out = newY, xgenes = param$xgenes)
## }
## 
## $rocc$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     tmp <- rocc::p.rocc(modelFit, t(as.matrix(newdata)))
##     factor(ifelse(tmp == "1", modelFit$obsLevels[1], modelFit$obsLevels[2]), 
##         levels = modelFit$obsLevels)
## }
## 
## $rocc$levels
## function (x) 
## x$obsLevels
## 
## $rocc$prob
## NULL
## 
## $rocc$predictors
## function (x, ...) 
## x$genes
## 
## $rocc$tags
## [1] "ROC Curves"
## 
## $rocc$sort
## function (x) 
## x[order(x$xgenes), ]
## 
## 
## $rotationForest
## $rotationForest$label
## [1] "Rotation Forest"
## 
## $rotationForest$library
## [1] "rotationForest"
## 
## $rotationForest$type
## [1] "Classification"
## 
## $rotationForest$parameters
##   parameter   class             label
## 1         K numeric #Variable Subsets
## 2         L numeric     Ensemble Size
## 
## $rotationForest$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     feas_k <- 1:15
##     feas_k <- feas_k[ncol(x)%%feas_k == 0]
##     if (search == "grid") {
##         out <- expand.grid(K = feas_k[1:min(len, length(feas_k))], 
##             L = (1:len) * 3)
##     }
##     else {
##         out <- data.frame(K = sample(feas_k, size = len, replace = TRUE), 
##             L = sample(1:100, size = len, replace = TRUE))
##     }
##     out
## }
## 
## $rotationForest$loop
## function (grid) 
## {
##     grid <- grid[order(grid$K, -grid$L, decreasing = TRUE), , 
##         drop = FALSE]
##     unique_k <- unique(grid$K)
##     loop <- data.frame(K = unique_k, L = NA)
##     submodels <- vector(mode = "list", length = length(unique_k))
##     for (i in seq(along = unique_k)) {
##         sub_L <- grid[grid$K == unique_k[i], "L"]
##         loop$L[loop$K == unique_k[i]] <- sub_L[which.max(sub_L)]
##         submodels[[i]] <- data.frame(L = sub_L[-which.max(sub_L)])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $rotationForest$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     param$K <- min(param$k, floor(ncol(x)/2))
##     if (length(lev) != 2) 
##         stop("rotationForest is only implemented for binary classification")
##     y <- ifelse(y == lev[1], 1, 0)
##     if (!is.data.frame(x)) 
##         x <- as.data.frame(x, stringsAsFactors = TRUE)
##     rotationForest::rotationForest(x, y, K = param$K, L = param$L, 
##         ...)
## }
## 
## $rotationForest$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata)
##     out <- ifelse(out >= 0.5, modelFit$obsLevels[1], modelFit$obsLevels[2])
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         all_L <- predict(modelFit, newdata, all = TRUE)
##         for (j in seq(along = submodels$L)) {
##             tmp_pred <- apply(all_L[, 1:submodels$L[j], drop = FALSE], 
##                 1, mean)
##             tmp[[j + 1]] <- ifelse(tmp_pred >= 0.5, modelFit$obsLevels[1], 
##                 modelFit$obsLevels[2])
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $rotationForest$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     all_L <- predict(modelFit, newdata, all = TRUE)
##     out <- apply(all_L, 1, mean)
##     out <- data.frame(x = out, y = 1 - out)
##     colnames(out) <- modelFit$obsLevels
##     if (!is.null(rownames(newdata))) 
##         rownames(out) <- rownames(newdata)
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$L)) {
##             tmp_pred <- apply(all_L[, 1:submodels$L[j], drop = FALSE], 
##                 1, mean)
##             tmp_pred <- data.frame(x = tmp_pred, y = 1 - tmp_pred)
##             colnames(tmp_pred) <- modelFit$obsLevels
##             if (!is.null(rownames(newdata))) 
##                 rownames(tmp_pred) <- rownames(newdata)
##             tmp[[j + 1]] <- tmp_pred
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $rotationForest$predictors
## function (x, ...) 
## {
##     non_zero <- function(x) {
##         out <- apply(x, 1, function(x) any(x != 0))
##         names(out)[out]
##     }
##     sort(unique(unlist(lapply(x$loadings, non_zero))))
## }
## 
## $rotationForest$varImp
## function (object, ...) 
## {
##     vis <- lapply(object$models, varImp, scale = FALSE)
##     wgt <- vector(mode = "list", length = length(vis))
##     for (i in seq(along = vis)) {
##         tmp <- vis[[i]]
##         vi1 <- tmp[, 1]
##         names(vi1) <- rownames(tmp)
##         l1 <- object$loadings[[i]]
##         tmp2 <- vi1 %*% abs(as.matrix(l1[names(vi1), ]))
##         tmp2 <- tmp2[, sort(colnames(tmp2))]
##         wgt[[i]] <- tmp2
##     }
##     wgt <- do.call("rbind", wgt)
##     vi <- apply(wgt, 2, mean)
##     out <- data.frame(Overall = vi)
##     rownames(out) <- colnames(wgt)
##     out
## }
## 
## $rotationForest$levels
## function (x) 
## x$obsLevels
## 
## $rotationForest$tags
## [1] "Ensemble Model"             "Implicit Feature Selection"
## [3] "Feature Extraction Models"  "Tree-Based Model"          
## [5] "Two Class Only"            
## 
## $rotationForest$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $rotationForestCp
## $rotationForestCp$label
## [1] "Rotation Forest"
## 
## $rotationForestCp$library
## [1] "rpart"          "plyr"           "rotationForest"
## 
## $rotationForestCp$type
## [1] "Classification"
## 
## $rotationForestCp$parameters
##   parameter   class                label
## 1         K numeric    #Variable Subsets
## 2         L numeric        Ensemble Size
## 3        cp numeric Complexity Parameter
## 
## $rotationForestCp$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     feas_k <- 1:15
##     feas_k <- feas_k[ncol(x)%%feas_k == 0]
##     if (search == "grid") {
##         out <- expand.grid(K = feas_k[1:min(len, length(feas_k))], 
##             L = (1:len) * 3, cp = unique(seq(0, 0.1, length = len)))
##     }
##     else {
##         out <- data.frame(K = sample(feas_k, size = len, replace = TRUE), 
##             L = sample(10:100, size = len, replace = TRUE), cp = runif(len, 
##                 0, 0.1))
##     }
##     out
## }
## 
## $rotationForestCp$loop
## function (grid) 
## {
##     loop <- plyr::ddply(grid, plyr::.(cp, K), function(x) c(L = max(x$L)))
##     submodels <- vector(mode = "list", length = nrow(loop))
##     for (i in seq(along = loop$L)) {
##         index <- which(grid$cp == loop$cp[i] & grid$K == loop$K[i])
##         bases <- grid[index, "L"]
##         submodels[[i]] <- data.frame(L = bases[bases != loop$L[i]])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $rotationForestCp$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     param$K <- min(param$k, floor(ncol(x)/2))
##     if (length(lev) != 2) 
##         stop("rotationForest is only implemented for binary classification")
##     y <- ifelse(y == lev[1], 1, 0)
##     if (!is.data.frame(x)) 
##         x <- as.data.frame(x, stringsAsFactors = TRUE)
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$cp <- param$cp
##         theDots$control$xval <- 0
##         rpctl <- theDots$control
##     }
##     else rpctl <- rpart::rpart.control(cp = param$cp, xval = 0)
##     rotationForest::rotationForest(x, y, K = param$K, L = param$L, 
##         control = rpctl)
## }
## 
## $rotationForestCp$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata)
##     out <- ifelse(out >= 0.5, modelFit$obsLevels[1], modelFit$obsLevels[2])
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         all_L <- predict(modelFit, newdata, all = TRUE)
##         for (j in seq(along = submodels$L)) {
##             tmp_pred <- apply(all_L[, 1:submodels$L[j], drop = FALSE], 
##                 1, mean)
##             tmp[[j + 1]] <- ifelse(tmp_pred >= 0.5, modelFit$obsLevels[1], 
##                 modelFit$obsLevels[2])
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $rotationForestCp$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     all_L <- predict(modelFit, newdata, all = TRUE)
##     out <- apply(all_L, 1, mean)
##     out <- data.frame(x = out, y = 1 - out)
##     colnames(out) <- modelFit$obsLevels
##     if (!is.null(rownames(newdata))) 
##         rownames(out) <- rownames(newdata)
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$L)) {
##             tmp_pred <- apply(all_L[, 1:submodels$L[j], drop = FALSE], 
##                 1, mean)
##             tmp_pred <- data.frame(x = tmp_pred, y = 1 - tmp_pred)
##             colnames(tmp_pred) <- modelFit$obsLevels
##             if (!is.null(rownames(newdata))) 
##                 rownames(tmp_pred) <- rownames(newdata)
##             tmp[[j + 1]] <- tmp_pred
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $rotationForestCp$predictors
## function (x, ...) 
## {
##     non_zero <- function(x) {
##         out <- apply(x, 1, function(x) any(x != 0))
##         names(out)[out]
##     }
##     sort(unique(unlist(lapply(x$loadings, non_zero))))
## }
## 
## $rotationForestCp$varImp
## function (object, ...) 
## {
##     vis <- lapply(object$models, varImp, scale = FALSE)
##     wgt <- vector(mode = "list", length = length(vis))
##     for (i in seq(along = vis)) {
##         tmp <- vis[[i]]
##         vi1 <- tmp[, 1]
##         names(vi1) <- rownames(tmp)
##         l1 <- object$loadings[[i]]
##         tmp2 <- vi1 %*% abs(as.matrix(l1[names(vi1), ]))
##         tmp2 <- tmp2[, sort(colnames(tmp2))]
##         wgt[[i]] <- tmp2
##     }
##     wgt <- do.call("rbind", wgt)
##     vi <- apply(wgt, 2, mean)
##     out <- data.frame(Overall = vi)
##     rownames(out) <- colnames(wgt)
##     out
## }
## 
## $rotationForestCp$levels
## function (x) 
## x$obsLevels
## 
## $rotationForestCp$tags
## [1] "Ensemble Model"             "Implicit Feature Selection"
## [3] "Feature Extraction Models"  "Tree-Based Model"          
## [5] "Two Class Only"            
## 
## $rotationForestCp$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $rpart
## $rpart$label
## [1] "CART"
## 
## $rpart$library
## [1] "rpart"
## 
## $rpart$type
## [1] "Regression"     "Classification"
## 
## $rpart$parameters
##   parameter   class                label
## 1        cp numeric Complexity Parameter
## 
## $rpart$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     initialFit <- rpart::rpart(.outcome ~ ., data = dat, control = rpart::rpart.control(cp = 0))$cptable
##     initialFit <- initialFit[order(-initialFit[, "CP"]), , drop = FALSE]
##     if (search == "grid") {
##         if (nrow(initialFit) < len) {
##             tuneSeq <- data.frame(cp = seq(min(initialFit[, "CP"]), 
##                 max(initialFit[, "CP"]), length = len))
##         }
##         else tuneSeq <- data.frame(cp = initialFit[1:len, "CP"])
##         colnames(tuneSeq) <- "cp"
##     }
##     else {
##         tuneSeq <- data.frame(cp = unique(sample(initialFit[, 
##             "CP"], size = len, replace = TRUE)))
##     }
##     tuneSeq
## }
## 
## $rpart$loop
## function (grid) 
## {
##     grid <- grid[order(grid$cp, decreasing = FALSE), , drop = FALSE]
##     loop <- grid[1, , drop = FALSE]
##     submodels <- list(grid[-1, , drop = FALSE])
##     list(loop = loop, submodels = submodels)
## }
## 
## $rpart$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     cpValue <- if (!last) 
##         param$cp
##     else 0
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$cp <- cpValue
##         theDots$control$xval <- 0
##         ctl <- theDots$control
##         theDots$control <- NULL
##     }
##     else ctl <- rpart::rpart.control(cp = cpValue, xval = 0)
##     if (!is.null(wts)) 
##         theDots$weights <- wts
##     modelArgs <- c(list(formula = as.formula(".outcome ~ ."), 
##         data = if (is.data.frame(x)) x else as.data.frame(x, 
##             stringsAsFactors = TRUE), control = ctl), theDots)
##     modelArgs$data$.outcome <- y
##     out <- do.call(rpart::rpart, modelArgs)
##     if (last) 
##         out <- rpart::prune.rpart(out, cp = param$cp)
##     out
## }
## 
## $rpart$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     pType <- if (modelFit$problemType == "Classification") 
##         "class"
##     else "vector"
##     out <- predict(modelFit, newdata, type = pType)
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$cp)) {
##             prunedFit <- rpart::prune.rpart(modelFit, cp = submodels$cp[j])
##             tmp[[j + 1]] <- predict(prunedFit, newdata, type = pType)
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $rpart$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata, type = "prob")
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$cp)) {
##             prunedFit <- rpart::prune.rpart(modelFit, cp = submodels$cp[j])
##             tmpProb <- predict(prunedFit, newdata, type = "prob")
##             tmp[[j + 1]] <- as.data.frame(tmpProb[, modelFit$obsLevels, 
##                 drop = FALSE], stringsAsFactors = TRUE)
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $rpart$predictors
## function (x, surrogate = TRUE, ...) 
## {
##     out <- as.character(x$frame$var)
##     out <- out[!(out %in% c("<leaf>"))]
##     if (surrogate) {
##         splits <- x$splits
##         splits <- splits[splits[, "adj"] > 0, ]
##         out <- c(out, rownames(splits))
##     }
##     unique(out)
## }
## 
## $rpart$varImp
## function (object, surrogates = FALSE, competes = TRUE, ...) 
## {
##     if (nrow(object$splits) > 0) {
##         tmp <- rownames(object$splits)
##         rownames(object$splits) <- 1:nrow(object$splits)
##         splits <- data.frame(object$splits)
##         splits$var <- tmp
##         splits$type <- ""
##         frame <- as.data.frame(object$frame, stringsAsFactors = TRUE)
##         index <- 0
##         for (i in 1:nrow(frame)) {
##             if (frame$var[i] != "<leaf>") {
##                 index <- index + 1
##                 splits$type[index] <- "primary"
##                 if (frame$ncompete[i] > 0) {
##                   for (j in 1:frame$ncompete[i]) {
##                     index <- index + 1
##                     splits$type[index] <- "competing"
##                   }
##                 }
##                 if (frame$nsurrogate[i] > 0) {
##                   for (j in 1:frame$nsurrogate[i]) {
##                     index <- index + 1
##                     splits$type[index] <- "surrogate"
##                   }
##                 }
##             }
##         }
##         splits$var <- factor(as.character(splits$var))
##         if (!surrogates) 
##             splits <- subset(splits, type != "surrogate")
##         if (!competes) 
##             splits <- subset(splits, type != "competing")
##         out <- aggregate(splits$improve, list(Variable = splits$var), 
##             sum, na.rm = TRUE)
##     }
##     else {
##         out <- data.frame(x = numeric(), Variable = character())
##     }
##     allVars <- colnames(attributes(object$terms)$factors)
##     if (!all(allVars %in% out$Variable)) {
##         missingVars <- allVars[!(allVars %in% out$Variable)]
##         zeros <- data.frame(x = rep(0, length(missingVars)), 
##             Variable = missingVars)
##         out <- rbind(out, zeros)
##     }
##     out2 <- data.frame(Overall = out$x)
##     rownames(out2) <- out$Variable
##     out2
## }
## 
## $rpart$levels
## function (x) 
## x$obsLevels
## 
## $rpart$trim
## function (x) 
## {
##     x$call <- list(na.action = (x$call)$na.action)
##     x$x <- NULL
##     x$y <- NULL
##     x$where <- NULL
##     x
## }
## 
## $rpart$tags
## [1] "Tree-Based Model"              "Implicit Feature Selection"   
## [3] "Handle Missing Predictor Data" "Accepts Case Weights"         
## 
## $rpart$sort
## function (x) 
## x[order(x[, 1], decreasing = TRUE), ]
## 
## 
## $rpart1SE
## $rpart1SE$label
## [1] "CART"
## 
## $rpart1SE$library
## [1] "rpart"
## 
## $rpart1SE$type
## [1] "Regression"     "Classification"
## 
## $rpart1SE$parameters
##   parameter     class     label
## 1 parameter character parameter
## 
## $rpart1SE$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(parameter = "none")
## 
## $rpart1SE$loop
## NULL
## 
## $rpart1SE$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     if (!is.null(wts)) {
##         out <- rpart::rpart(.outcome ~ ., data = dat, ...)
##     }
##     else {
##         out <- rpart::rpart(.outcome ~ ., data = dat, weights = wts, 
##             ...)
##     }
##     out
## }
## 
## $rpart1SE$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- if (modelFit$problemType == "Classification") 
##         predict(modelFit, newdata, type = "class")
##     else predict(modelFit, newdata)
##     out
## }
## 
## $rpart1SE$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata, type = "prob")
## }
## 
## $rpart1SE$predictors
## function (x, surrogate = TRUE, ...) 
## {
##     out <- as.character(x$frame$var)
##     out <- out[!(out %in% c("<leaf>"))]
##     if (surrogate) {
##         splits <- x$splits
##         splits <- splits[splits[, "adj"] > 0, ]
##         out <- c(out, rownames(splits))
##     }
##     unique(out)
## }
## 
## $rpart1SE$varImp
## function (object, surrogates = FALSE, competes = TRUE, ...) 
## {
##     tmp <- rownames(object$splits)
##     rownames(object$splits) <- 1:nrow(object$splits)
##     splits <- data.frame(object$splits)
##     splits$var <- tmp
##     splits$type <- ""
##     frame <- as.data.frame(object$frame, stringsAsFactors = TRUE)
##     index <- 0
##     for (i in 1:nrow(frame)) {
##         if (frame$var[i] != "<leaf>") {
##             index <- index + 1
##             splits$type[index] <- "primary"
##             if (frame$ncompete[i] > 0) {
##                 for (j in 1:frame$ncompete[i]) {
##                   index <- index + 1
##                   splits$type[index] <- "competing"
##                 }
##             }
##             if (frame$nsurrogate[i] > 0) {
##                 for (j in 1:frame$nsurrogate[i]) {
##                   index <- index + 1
##                   splits$type[index] <- "surrogate"
##                 }
##             }
##         }
##     }
##     splits$var <- factor(as.character(splits$var))
##     if (!surrogates) 
##         splits <- subset(splits, type != "surrogate")
##     if (!competes) 
##         splits <- subset(splits, type != "competing")
##     out <- aggregate(splits$improve, list(Variable = splits$var), 
##         sum, na.rm = TRUE)
##     allVars <- colnames(attributes(object$terms)$factors)
##     if (!all(allVars %in% out$Variable)) {
##         missingVars <- allVars[!(allVars %in% out$Variable)]
##         zeros <- data.frame(x = rep(0, length(missingVars)), 
##             Variable = missingVars)
##         out <- rbind(out, zeros)
##     }
##     out2 <- data.frame(Overall = out$x)
##     rownames(out2) <- out$Variable
##     out2
## }
## 
## $rpart1SE$levels
## function (x) 
## x$obsLevels
## 
## $rpart1SE$trim
## function (x) 
## {
##     x$call <- list(na.action = (x$call)$na.action)
##     x$x <- NULL
##     x$y <- NULL
##     x$where <- NULL
##     x
## }
## 
## $rpart1SE$notes
## [1] "This CART model replicates the same process used by the `rpart` function where the model complexity is determined using the one-standard error method. This procedure is replicated inside of the resampling done by `train` so that an external resampling estimate can be obtained."
## 
## $rpart1SE$tags
## [1] "Tree-Based Model"              "Implicit Feature Selection"   
## [3] "Handle Missing Predictor Data" "Accepts Case Weights"         
## 
## $rpart1SE$sort
## function (x) 
## x[order(x[, 1], decreasing = TRUE), ]
## 
## 
## $rpart2
## $rpart2$label
## [1] "CART"
## 
## $rpart2$library
## [1] "rpart"
## 
## $rpart2$type
## [1] "Regression"     "Classification"
## 
## $rpart2$parameters
##   parameter   class          label
## 1  maxdepth numeric Max Tree Depth
## 
## $rpart2$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     initialFit <- rpart::rpart(.outcome ~ ., data = dat, control = rpart::rpart.control(cp = 0))$cptable
##     initialFit <- initialFit[order(-initialFit[, "CP"]), "nsplit", 
##         drop = FALSE]
##     initialFit <- initialFit[initialFit[, "nsplit"] > 0 & initialFit[, 
##         "nsplit"] <= 30, , drop = FALSE]
##     if (search == "grid") {
##         if (dim(initialFit)[1] < len) {
##             cat("note: only", nrow(initialFit), "possible values of the max tree depth from the initial fit.\n", 
##                 "Truncating the grid to", nrow(initialFit), ".\n\n")
##             tuneSeq <- as.data.frame(initialFit, stringsAsFactors = TRUE)
##         }
##         else tuneSeq <- as.data.frame(initialFit[1:len, ], stringsAsFactors = TRUE)
##         colnames(tuneSeq) <- "maxdepth"
##     }
##     else {
##         tuneSeq <- data.frame(maxdepth = unique(sample(as.vector(initialFit[, 
##             1]), size = len, replace = TRUE)))
##     }
##     tuneSeq
## }
## 
## $rpart2$loop
## function (grid) 
## {
##     grid <- grid[order(grid$maxdepth, decreasing = TRUE), , drop = FALSE]
##     loop <- grid[1, , drop = FALSE]
##     submodels <- list(grid[-1, , drop = FALSE])
##     list(loop = loop, submodels = submodels)
## }
## 
## $rpart2$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$maxdepth <- param$maxdepth
##         theDots$control$xval <- 0
##         ctl <- theDots$control
##         theDots$control <- NULL
##     }
##     else ctl <- rpart::rpart.control(maxdepth = param$maxdepth, 
##         xval = 0)
##     if (!is.null(wts)) 
##         theDots$weights <- wts
##     modelArgs <- c(list(formula = as.formula(".outcome ~ ."), 
##         data = if (is.data.frame(x)) x else as.data.frame(x, 
##             stringsAsFactors = TRUE), control = ctl), theDots)
##     modelArgs$data$.outcome <- y
##     out <- do.call(rpart::rpart, modelArgs)
##     out
## }
## 
## $rpart2$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     depth2cp <- function(x, depth) {
##         out <- approx(x[, "nsplit"], x[, "CP"], depth)$y
##         out[depth > max(x[, "nsplit"])] <- min(x[, "CP"]) * 0.99
##         out
##     }
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     pType <- if (modelFit$problemType == "Classification") 
##         "class"
##     else "vector"
##     out <- predict(modelFit, newdata, type = pType)
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         cpValues <- depth2cp(modelFit$cptable, submodels$maxdepth)
##         for (j in seq(along = cpValues)) {
##             prunedFit <- rpart::prune.rpart(modelFit, cp = cpValues[j])
##             tmp[[j + 1]] <- predict(prunedFit, newdata, type = pType)
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $rpart2$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     depth2cp <- function(x, depth) {
##         out <- approx(x[, "nsplit"], x[, "CP"], depth)$y
##         out[depth > max(x[, "nsplit"])] <- min(x[, "CP"]) * 0.99
##         out
##     }
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- predict(modelFit, newdata, type = "prob")
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         cpValues <- depth2cp(modelFit$cptable, submodels$maxdepth)
##         for (j in seq(along = cpValues)) {
##             prunedFit <- rpart::prune.rpart(modelFit, cp = cpValues[j])
##             tmpProb <- predict(prunedFit, newdata, type = "prob")
##             tmp[[j + 1]] <- as.data.frame(tmpProb[, modelFit$obsLevels, 
##                 drop = FALSE], stringsAsFactors = TRUE)
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $rpart2$predictors
## function (x, surrogate = TRUE, ...) 
## {
##     out <- as.character(x$frame$var)
##     out <- out[!(out %in% c("<leaf>"))]
##     if (surrogate) {
##         splits <- x$splits
##         splits <- splits[splits[, "adj"] > 0, ]
##         out <- c(out, rownames(splits))
##     }
##     unique(out)
## }
## 
## $rpart2$varImp
## function (object, surrogates = FALSE, competes = TRUE, ...) 
## {
##     tmp <- rownames(object$splits)
##     rownames(object$splits) <- 1:nrow(object$splits)
##     splits <- data.frame(object$splits)
##     splits$var <- tmp
##     splits$type <- ""
##     frame <- as.data.frame(object$frame, stringsAsFactors = TRUE)
##     index <- 0
##     for (i in 1:nrow(frame)) {
##         if (frame$var[i] != "<leaf>") {
##             index <- index + 1
##             splits$type[index] <- "primary"
##             if (frame$ncompete[i] > 0) {
##                 for (j in 1:frame$ncompete[i]) {
##                   index <- index + 1
##                   splits$type[index] <- "competing"
##                 }
##             }
##             if (frame$nsurrogate[i] > 0) {
##                 for (j in 1:frame$nsurrogate[i]) {
##                   index <- index + 1
##                   splits$type[index] <- "surrogate"
##                 }
##             }
##         }
##     }
##     splits$var <- factor(as.character(splits$var))
##     if (!surrogates) 
##         splits <- subset(splits, type != "surrogate")
##     if (!competes) 
##         splits <- subset(splits, type != "competing")
##     out <- aggregate(splits$improve, list(Variable = splits$var), 
##         sum, na.rm = TRUE)
##     allVars <- colnames(attributes(object$terms)$factors)
##     if (!all(allVars %in% out$Variable)) {
##         missingVars <- allVars[!(allVars %in% out$Variable)]
##         zeros <- data.frame(x = rep(0, length(missingVars)), 
##             Variable = missingVars)
##         out <- rbind(out, zeros)
##     }
##     out2 <- data.frame(Overall = out$x)
##     rownames(out2) <- out$Variable
##     out2
## }
## 
## $rpart2$levels
## function (x) 
## x$obsLevels
## 
## $rpart2$trim
## function (x) 
## {
##     x$call <- list(na.action = (x$call)$na.action)
##     x$x <- NULL
##     x$y <- NULL
##     x$where <- NULL
##     x
## }
## 
## $rpart2$tags
## [1] "Tree-Based Model"              "Implicit Feature Selection"   
## [3] "Handle Missing Predictor Data" "Accepts Case Weights"         
## 
## $rpart2$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $rpartCost
## $rpartCost$label
## [1] "Cost-Sensitive CART"
## 
## $rpartCost$library
## [1] "rpart" "plyr" 
## 
## $rpartCost$type
## [1] "Classification"
## 
## $rpartCost$parameters
##   parameter   class                label
## 1        cp numeric Complexity Parameter
## 2      Cost numeric                 Cost
## 
## $rpartCost$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     initialFit <- rpart::rpart(.outcome ~ ., data = dat, control = rpart::rpart.control(cp = 0))$cptable
##     initialFit <- initialFit[order(-initialFit[, "CP"]), , drop = FALSE]
##     if (search == "grid") {
##         if (nrow(initialFit) < len) {
##             tuneSeq <- expand.grid(cp = seq(min(initialFit[, 
##                 "CP"]), max(initialFit[, "CP"]), length = len), 
##                 Cost = 1:len)
##         }
##         else tuneSeq <- data.frame(cp = initialFit[1:len, "CP"], 
##             Cost = 1:len)
##         colnames(tuneSeq) <- c("cp", "Cost")
##     }
##     else {
##         tuneSeq <- data.frame(cp = 10^runif(len, min = -8, max = -1), 
##             Cost = runif(len, min = 1, max = 30))
##     }
##     tuneSeq
## }
## 
## $rpartCost$loop
## function (grid) 
## {
##     loop <- plyr::ddply(grid, plyr::.(Cost), function(x) c(cp = min(x$cp)))
##     submodels <- vector(mode = "list", length = nrow(loop))
##     for (i in seq(along = submodels)) {
##         larger_cp <- subset(grid, subset = Cost == loop$Cost[i] & 
##             cp > loop$cp[i])
##         submodels[[i]] <- data.frame(cp = sort(larger_cp$cp))
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $rpartCost$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$cp <- param$cp
##         theDots$control$xval <- 0
##         ctl <- theDots$control
##         theDots$control <- NULL
##     }
##     else ctl <- rpart::rpart.control(cp = param$cp, xval = 0)
##     lmat <- matrix(c(0, 1, param$Cost, 0), ncol = 2)
##     rownames(lmat) <- colnames(lmat) <- levels(y)
##     if (any(names(theDots) == "parms")) {
##         theDots$parms$loss <- lmat
##     }
##     else parms <- list(loss = lmat)
##     if (!is.null(wts)) 
##         theDots$weights <- wts
##     modelArgs <- c(list(formula = as.formula(".outcome ~ ."), 
##         data = if (is.data.frame(x)) x else as.data.frame(x, 
##             stringsAsFactors = TRUE), parms = parms, control = ctl), 
##         theDots)
##     modelArgs$data$.outcome <- y
##     out <- do.call(rpart::rpart, modelArgs)
##     out
## }
## 
## $rpartCost$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     pType <- if (modelFit$problemType == "Classification") 
##         "class"
##     else "vector"
##     out <- predict(modelFit, newdata, type = pType)
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$cp)) {
##             prunedFit <- rpart::prune.rpart(modelFit, cp = submodels$cp[j])
##             tmp[[j + 1]] <- predict(prunedFit, newdata, type = pType)
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $rpartCost$levels
## function (x) 
## x$obsLevels
## 
## $rpartCost$prob
## NULL
## 
## $rpartCost$tags
## [1] "Tree-Based Model"              "Implicit Feature Selection"   
## [3] "Cost Sensitive Learning"       "Two Class Only"               
## [5] "Handle Missing Predictor Data" "Accepts Case Weights"         
## 
## $rpartCost$sort
## function (x) 
## x[order(-x$cp, -x$Cost), ]
## 
## 
## $rpartScore
## $rpartScore$label
## [1] "CART or Ordinal Responses"
## 
## $rpartScore$library
## [1] "rpartScore" "plyr"      
## 
## $rpartScore$type
## [1] "Classification"
## 
## $rpartScore$parameters
##   parameter     class                label
## 1        cp   numeric Complexity Parameter
## 2     split character       Split Function
## 3     prune character      Pruning Measure
## 
## $rpartScore$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     initialFit <- rpart::rpart(.outcome ~ ., data = dat, control = rpart::rpart.control(cp = 0))$cptable
##     initialFit <- initialFit[order(-initialFit[, "CP"]), , drop = FALSE]
##     if (search == "grid") {
##         if (nrow(initialFit) < len) {
##             tuneSeq <- expand.grid(cp = seq(min(initialFit[, 
##                 "CP"]), max(initialFit[, "CP"]), length = len), 
##                 split = c("abs", "quad"), prune = c("mr", "mc"))
##         }
##         else tuneSeq <- expand.grid(cp = initialFit[1:len, "CP"], 
##             split = c("abs", "quad"), prune = c("mr", "mc"))
##         colnames(tuneSeq)[1] <- "cp"
##     }
##     else {
##         tuneSeq <- expand.grid(cp = unique(sample(initialFit[, 
##             "CP"], size = len, replace = TRUE)), split = c("abs", 
##             "quad"), prune = c("mr", "mc"))
##     }
##     tuneSeq
## }
## 
## $rpartScore$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     cpValue <- if (!last) 
##         param$cp
##     else 0
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$cp <- cpValue
##         theDots$control$xval <- 0
##         ctl <- theDots$control
##         theDots$control <- NULL
##     }
##     else ctl <- rpart::rpart.control(cp = cpValue, xval = 0)
##     if (!is.null(wts)) 
##         theDots$weights <- wts
##     modelArgs <- c(list(formula = as.formula(".outcome ~ ."), 
##         data = if (is.data.frame(x)) x else as.data.frame(x, 
##             stringsAsFactors = TRUE), split = as.character(param$split), 
##         prune = as.character(param$prune), control = ctl), theDots)
##     modelArgs$data$.outcome <- as.numeric(y)
##     out <- do.call(rpartScore::rpartScore, modelArgs)
##     if (last) 
##         out <- rpart::prune.rpart(out, cp = param$cp)
##     out
## }
## 
## $rpartScore$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- modelFit$obsLevels[predict(modelFit, newdata)]
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$cp)) {
##             prunedFit <- rpart::prune.rpart(modelFit, cp = submodels$cp[j])
##             tmp[[j + 1]] <- modelFit$obsLevels[predict(prunedFit, 
##                 newdata)]
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $rpartScore$prob
## NULL
## 
## $rpartScore$predictors
## function (x, surrogate = TRUE, ...) 
## {
##     out <- as.character(x$frame$var)
##     out <- out[!(out %in% c("<leaf>"))]
##     if (surrogate) {
##         splits <- x$splits
##         splits <- splits[splits[, "adj"] > 0, ]
##         out <- c(out, rownames(splits))
##     }
##     unique(out)
## }
## 
## $rpartScore$varImp
## function (object, surrogates = FALSE, competes = TRUE, ...) 
## {
##     allVars <- all.vars(object$terms)
##     allVars <- allVars[allVars != ".outcome"]
##     out <- data.frame(Overall = object$variable.importance, Variable = names(object$variable.importance))
##     rownames(out) <- names(object$variable.importance)
##     if (!all(allVars %in% out$Variable)) {
##         missingVars <- allVars[!(allVars %in% out$Variable)]
##         zeros <- data.frame(Overall = rep(0, length(missingVars)), 
##             Variable = missingVars)
##         out <- rbind(out, zeros)
##     }
##     rownames(out) <- out$Variable
##     out$Variable <- NULL
##     out
## }
## 
## $rpartScore$levels
## function (x) 
## x$obsLevels
## 
## $rpartScore$trim
## function (x) 
## {
##     x$call <- list(na.action = (x$call)$na.action)
##     x$x <- NULL
##     x$y <- NULL
##     x$where <- NULL
##     x
## }
## 
## $rpartScore$tags
## [1] "Tree-Based Model"              "Implicit Feature Selection"   
## [3] "Handle Missing Predictor Data" "Accepts Case Weights"         
## [5] "Ordinal Outcomes"             
## 
## $rpartScore$sort
## function (x) 
## x[order(x[, 1], decreasing = TRUE), ]
## 
## 
## $rqlasso
## $rqlasso$label
## [1] "Quantile Regression with LASSO penalty"
## 
## $rqlasso$library
## [1] "rqPen"
## 
## $rqlasso$type
## [1] "Regression"
## 
## $rqlasso$parameters
##   parameter   class      label
## 1    lambda numeric L1 Penalty
## 
## $rqlasso$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(lambda = c(10^seq(-1, -4, length = len)))
##     }
##     else {
##         out <- data.frame(lambda = 10^runif(len, min = -5, 1))
##     }
##     out
## }
## 
## $rqlasso$loop
## NULL
## 
## $rqlasso$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     rqPen::rq.lasso.fit(as.matrix(x), y, lambda = param$lambda, 
##         ...)
## }
## 
## $rqlasso$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     predict(modelFit, newx = as.matrix(newdata))[, 1]
## }
## 
## $rqlasso$predictors
## function (x, ...) 
## {
##     out <- coef(x)
##     out <- out[names(out) != "intercept"]
##     names(out)[out != 0]
## }
## 
## $rqlasso$tags
## [1] "Linear Regression"          "Quantile Regression"       
## [3] "Implicit Feature Selection" "L1 Regularization"         
## 
## $rqlasso$prob
## NULL
## 
## $rqlasso$sort
## function (x) 
## x[order(-x$lambda), ]
## 
## 
## $rqnc
## $rqnc$label
## [1] "Non-Convex Penalized Quantile Regression"
## 
## $rqnc$library
## [1] "rqPen"
## 
## $rqnc$type
## [1] "Regression"
## 
## $rqnc$parameters
##   parameter     class        label
## 1    lambda   numeric   L1 Penalty
## 2   penalty character Penalty Type
## 
## $rqnc$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(lambda = c(10^seq(-1, -4, length = len)), 
##             penalty = c("MCP", "SCAD"))
##     }
##     else {
##         out <- data.frame(lambda = 10^runif(len, min = -5, 1), 
##             penalty = sample(c("MCP", "SCAD"), size = len, replace = TRUE))
##     }
##     out
## }
## 
## $rqnc$loop
## NULL
## 
## $rqnc$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     rqPen::rq.nc.fit(as.matrix(x), y, lambda = param$lambda, 
##         penalty = as.character(param$penalty), ...)
## }
## 
## $rqnc$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     predict(modelFit, newx = as.matrix(newdata))[, 1]
## }
## 
## $rqnc$predictors
## function (x, ...) 
## {
##     out <- coef(x)
##     out <- out[names(out) != "intercept"]
##     names(out)[out != 0]
## }
## 
## $rqnc$tags
## [1] "Linear Regression"          "Quantile Regression"       
## [3] "Implicit Feature Selection" "L1 Regularization"         
## 
## $rqnc$prob
## NULL
## 
## $rqnc$sort
## function (x) 
## x[order(-x$lambda), ]
## 
## 
## $RRF
## $RRF$label
## [1] "Regularized Random Forest"
## 
## $RRF$library
## [1] "randomForest" "RRF"         
## 
## $RRF$loop
## NULL
## 
## $RRF$type
## [1] "Regression"     "Classification"
## 
## $RRF$parameters
##   parameter   class                         label
## 1      mtry numeric #Randomly Selected Predictors
## 2   coefReg numeric          Regularization Value
## 3   coefImp numeric        Importance Coefficient
## 
## $RRF$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(mtry = caret::var_seq(p = ncol(x), 
##             classification = is.factor(y), len = len), coefReg = seq(0.01, 
##             1, length = len), coefImp = seq(0, 1, length = len))
##     }
##     else {
##         out <- data.frame(mtry = sample(1:ncol(x), size = len, 
##             replace = TRUE), coefReg = runif(len, min = 0, max = 1), 
##             coefImp = runif(len, min = 0, max = 1))
##     }
##     out
## }
## 
## $RRF$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     theDots$importance <- TRUE
##     args <- list(x = x, y = y, mtry = min(param$mtry, ncol(x)))
##     args <- c(args, theDots)
##     firstFit <- do.call(randomForest::randomForest, args)
##     firstImp <- randomForest:::importance(firstFit)
##     if (is.factor(y)) {
##         firstImp <- firstImp[, "MeanDecreaseGini"]/max(firstImp[, 
##             "MeanDecreaseGini"])
##     }
##     else firstImp <- firstImp[, "%IncMSE"]/max(firstImp[, "%IncMSE"])
##     firstImp <- ((1 - param$coefImp) * param$coefReg) + (param$coefImp * 
##         firstImp)
##     RRF::RRF(x, y, mtry = min(param$mtry, ncol(x)), coefReg = firstImp, 
##         ...)
## }
## 
## $RRF$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)
## 
## $RRF$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata, type = "prob")
## 
## $RRF$varImp
## function (object, ...) 
## {
##     varImp <- RRF::importance(object, ...)
##     if (object$type == "regression") 
##         varImp <- data.frame(Overall = varImp[, "%IncMSE"])
##     else {
##         retainNames <- levels(object$y)
##         if (all(retainNames %in% colnames(varImp))) {
##             varImp <- varImp[, retainNames]
##         }
##         else {
##             varImp <- data.frame(Overall = varImp[, 1])
##         }
##     }
##     out <- as.data.frame(varImp, stringsAsFactors = TRUE)
##     if (dim(out)[2] == 2) {
##         tmp <- apply(out, 1, mean)
##         out[, 1] <- out[, 2] <- tmp
##     }
##     out
## }
## 
## $RRF$levels
## function (x) 
## x$obsLevels
## 
## $RRF$tags
## [1] "Random Forest"              "Ensemble Model"            
## [3] "Bagging"                    "Implicit Feature Selection"
## [5] "Regularization"            
## 
## $RRF$sort
## function (x) 
## x[order(x$coefReg), ]
## 
## 
## $RRFglobal
## $RRFglobal$label
## [1] "Regularized Random Forest"
## 
## $RRFglobal$library
## [1] "RRF"
## 
## $RRFglobal$loop
## NULL
## 
## $RRFglobal$type
## [1] "Regression"     "Classification"
## 
## $RRFglobal$parameters
##   parameter   class                         label
## 1      mtry numeric #Randomly Selected Predictors
## 2   coefReg numeric          Regularization Value
## 
## $RRFglobal$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(mtry = caret::var_seq(p = ncol(x), 
##             classification = is.factor(y), len = len), coefReg = seq(0.01, 
##             1, length = len))
##     }
##     else {
##         out <- data.frame(mtry = sample(1:ncol(x), size = len, 
##             replace = TRUE), coefReg = runif(len, min = 0, max = 1))
##     }
##     out
## }
## 
## $RRFglobal$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     RRF::RRF(x, y, mtry = param$mtry, coefReg = param$coefReg, 
##         ...)
## }
## 
## $RRFglobal$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)
## 
## $RRFglobal$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata, type = "prob")
## 
## $RRFglobal$varImp
## function (object, ...) 
## {
##     varImp <- RRF::importance(object, ...)
##     if (object$type == "regression") 
##         varImp <- data.frame(Overall = varImp[, "%IncMSE"])
##     else {
##         retainNames <- levels(object$y)
##         if (all(retainNames %in% colnames(varImp))) {
##             varImp <- varImp[, retainNames]
##         }
##         else {
##             varImp <- data.frame(Overall = varImp[, 1])
##         }
##     }
##     out <- as.data.frame(varImp, stringsAsFactors = TRUE)
##     if (dim(out)[2] == 2) {
##         tmp <- apply(out, 1, mean)
##         out[, 1] <- out[, 2] <- tmp
##     }
##     out
## }
## 
## $RRFglobal$levels
## function (x) 
## x$obsLevels
## 
## $RRFglobal$tags
## [1] "Random Forest"              "Ensemble Model"            
## [3] "Bagging"                    "Implicit Feature Selection"
## [5] "Regularization"            
## 
## $RRFglobal$sort
## function (x) 
## x[order(x$coefReg), ]
## 
## 
## $rrlda
## $rrlda$label
## [1] "Robust Regularized Linear Discriminant Analysis"
## 
## $rrlda$library
## [1] "rrlda"
## 
## $rrlda$loop
## NULL
## 
## $rrlda$type
## [1] "Classification"
## 
## $rrlda$parameters
##   parameter     class                label
## 1    lambda   numeric    Penalty Parameter
## 2        hp   numeric Robustness Parameter
## 3   penalty character         Penalty Type
## 
## $rrlda$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(lambda = (1:len) * 0.25, hp = seq(0.5, 
##             1, length = len), penalty = "L2")
##     }
##     else {
##         out <- data.frame(lambda = 10^runif(len, min = -5, 1), 
##             hp = runif(len, min = 0, 1), penalty = sample(c("L1", 
##                 "L2"), size = len, replace = TRUE))
##     }
##     out
## }
## 
## $rrlda$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require("rrlda")
##     rrlda:::rrlda(x, as.numeric(y), lambda = param$lambda, hp = param$hp, 
##         penalty = as.character(param$penalty), ...)
## }
## 
## $rrlda$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)$class
##     modelFit$obsLevels[as.numeric(out)]
## }
## 
## $rrlda$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata)$posterior
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $rrlda$levels
## function (x) 
## x$obsLevels
## 
## $rrlda$notes
## [1] "Unlike other packages used by `train`, the `rrlda` package is fully loaded when this model is used."
## 
## $rrlda$tags
## [1] "Discriminant Analysis" "Robust Model"          "Regularization"       
## [4] "Linear Classifier"    
## 
## $rrlda$sort
## function (x) 
## x[order(-x$lambda), ]
## 
## 
## $RSimca
## $RSimca$label
## [1] "Robust SIMCA"
## 
## $RSimca$library
## [1] "rrcovHD"
## 
## $RSimca$loop
## NULL
## 
## $RSimca$type
## [1] "Classification"
## 
## $RSimca$parameters
##   parameter     class     label
## 1 parameter character parameter
## 
## $RSimca$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     data.frame(parameter = "none")
## }
## 
## $RSimca$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(rrcovHD)
##     rrcovHD::RSimca(x, y, ...)
## }
## 
## $RSimca$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)@classification
## 
## $RSimca$prob
## NULL
## 
## $RSimca$notes
## [1] "Unlike other packages used by `train`, the `rrcovHD` package is fully loaded when this model is used."
## 
## $RSimca$tags
## [1] "Robust Model"      "Linear Classifier"
## 
## $RSimca$levels
## function (x) 
## names(x@prior)
## 
## $RSimca$sort
## function (x) 
## x
## 
## 
## $rvmLinear
## $rvmLinear$label
## [1] "Relevance Vector Machines with Linear Kernel"
## 
## $rvmLinear$library
## [1] "kernlab"
## 
## $rvmLinear$loop
## NULL
## 
## $rvmLinear$type
## [1] "Regression"
## 
## $rvmLinear$parameters
##   parameter     class     label
## 1 parameter character parameter
## 
## $rvmLinear$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(parameter = "none")
## 
## $rvmLinear$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     kernlab:::rvm(x = as.matrix(x), y = y, kernel = kernlab::vanilladot(), 
##         ...)
## }
## 
## $rvmLinear$predict
## function (modelFit, newdata, submodels = NULL) 
## kernlab::predict(modelFit, newdata)
## 
## $rvmLinear$prob
## NULL
## 
## $rvmLinear$predictors
## function (x, ...) 
## {
##     if (hasTerms(x) & !is.null(x@terms)) {
##         out <- predictors.terms(x@terms)
##     }
##     else {
##         out <- colnames(attr(x, "xmatrix"))
##     }
##     if (is.null(out)) 
##         out <- names(attr(x, "scaling")$x.scale$`scaled:center`)
##     if (is.null(out)) 
##         out <- NA
##     out
## }
## 
## $rvmLinear$tags
## [1] "Kernel Method"             "Relevance Vector Machines"
## [3] "Linear Regression"         "Robust Methods"           
## 
## $rvmLinear$sort
## function (x) 
## x
## 
## 
## $rvmPoly
## $rvmPoly$label
## [1] "Relevance Vector Machines with Polynomial Kernel"
## 
## $rvmPoly$library
## [1] "kernlab"
## 
## $rvmPoly$loop
## NULL
## 
## $rvmPoly$type
## [1] "Regression"
## 
## $rvmPoly$parameters
##   parameter   class             label
## 1     scale numeric             Scale
## 2    degree numeric Polynomial Degree
## 
## $rvmPoly$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(degree = seq(1, min(len, 3)), scale = 10^((1:len) - 
##             4))
##     }
##     else {
##         out <- data.frame(degree = sample(1:3, size = len, replace = TRUE), 
##             scale = 10^runif(len, min = -5, 0))
##     }
##     out
## }
## 
## $rvmPoly$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     kernlab:::rvm(x = as.matrix(x), y = y, kernel = kernlab::polydot(degree = param$degree, 
##         scale = param$scale, offset = 1), ...)
## }
## 
## $rvmPoly$predict
## function (modelFit, newdata, submodels = NULL) 
## kernlab::predict(modelFit, newdata)
## 
## $rvmPoly$prob
## NULL
## 
## $rvmPoly$predictors
## function (x, ...) 
## {
##     if (hasTerms(x) & !is.null(x@terms)) {
##         out <- predictors.terms(x@terms)
##     }
##     else {
##         out <- colnames(attr(x, "xmatrix"))
##     }
##     if (is.null(out)) 
##         out <- names(attr(x, "scaling")$xscale$`scaled:center`)
##     if (is.null(out)) 
##         out <- NA
##     out
## }
## 
## $rvmPoly$tags
## [1] "Kernel Method"             "Relevance Vector Machines"
## [3] "Polynomial Model"          "Robust Methods"           
## 
## $rvmPoly$sort
## function (x) 
## x[order(x$degree, x$scale), ]
## 
## 
## $rvmRadial
## $rvmRadial$label
## [1] "Relevance Vector Machines with Radial Basis Function Kernel"
## 
## $rvmRadial$library
## [1] "kernlab"
## 
## $rvmRadial$loop
## NULL
## 
## $rvmRadial$type
## [1] "Regression"
## 
## $rvmRadial$parameters
##   parameter   class label
## 1     sigma numeric Sigma
## 
## $rvmRadial$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     sigmas <- kernlab::sigest(as.matrix(x), na.action = na.omit, 
##         scaled = TRUE)
##     if (search == "grid") {
##         out <- expand.grid(sigma = mean(as.vector(sigmas[-2])))
##     }
##     else {
##         rng <- extendrange(log(sigmas), f = 0.75)
##         out <- data.frame(sigma = exp(runif(len, min = rng[1], 
##             max = rng[2])))
##     }
##     out
## }
## 
## $rvmRadial$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     kernlab:::rvm(x = as.matrix(x), y = y, kernel = "rbfdot", 
##         kpar = list(sigma = param$sigma), ...)
## }
## 
## $rvmRadial$predict
## function (modelFit, newdata, submodels = NULL) 
## kernlab::predict(modelFit, newdata)
## 
## $rvmRadial$prob
## NULL
## 
## $rvmRadial$predictors
## function (x, ...) 
## {
##     if (hasTerms(x) & !is.null(x@terms)) {
##         out <- predictors.terms(x@terms)
##     }
##     else {
##         out <- colnames(attr(x, "xmatrix"))
##     }
##     if (is.null(out)) 
##         out <- names(attr(x, "scaling")$x.scale$`scaled:center`)
##     if (is.null(out)) 
##         out <- NA
##     out
## }
## 
## $rvmRadial$tags
## [1] "Kernel Method"             "Relevance Vector Machines"
## [3] "Radial Basis Function"     "Robust Methods"           
## 
## $rvmRadial$sort
## function (x) 
## x[order(-x$sigma), ]
## 
## 
## $SBC
## $SBC$label
## [1] "Subtractive Clustering and Fuzzy c-Means Rules"
## 
## $SBC$library
## [1] "frbs"
## 
## $SBC$type
## [1] "Regression"
## 
## $SBC$parameters
##   parameter   class           label
## 1       r.a numeric          Radius
## 2  eps.high numeric Upper Threshold
## 3   eps.low numeric Lower Threshold
## 
## $SBC$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(r.a = seq(0, 1, length = len), eps.high = seq(0, 
##             1, length = len), eps.low = seq(0, 1, length = len))
##     }
##     else {
##         out <- data.frame(r.a = sample(1:20, size = len * 10, 
##             replace = TRUE), eps.high = runif(len * 10, min = 0, 
##             max = 1), eps.low = runif(len * 10, min = 0, max = 1))
##     }
##     out <- subset(out, eps.high > eps.low)
##     out[1:min(nrow(out), len), ]
## }
## 
## $SBC$loop
## NULL
## 
## $SBC$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     args <- list(data.train = as.matrix(cbind(x, y)), method.type = "SBC", 
##         control = list(r.a = param$r.a, eps.high = param$eps.high, 
##             eps.low = param$eps.low))
##     theDots <- list(...)
##     if (!(any(names(theDots) == "range.data"))) {
##         args$range.data <- apply(args$data.train, 2, extendrange)
##     }
##     do.call(frbs::frbs.learn, c(args, theDots))
## }
## 
## $SBC$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     predict(modelFit, newdata)[, 1]
## }
## 
## $SBC$prob
## NULL
## 
## $SBC$predictors
## function (x, ...) 
## {
##     x$colnames.var[x$colnames.var %in% as.vector(x$rule)]
## }
## 
## $SBC$tags
## [1] "Rule-Based Model"
## 
## $SBC$levels
## NULL
## 
## $SBC$sort
## function (x) 
## x[order(x$r.a), ]
## 
## 
## $sda
## $sda$label
## [1] "Shrinkage Discriminant Analysis"
## 
## $sda$library
## [1] "sda"
## 
## $sda$loop
## NULL
## 
## $sda$type
## [1] "Classification"
## 
## $sda$parameters
##   parameter   class       label
## 1  diagonal logical Diagonalize
## 2    lambda numeric   shrinkage
## 
## $sda$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(diagonal = FALSE, lambda = seq(0, 1, 
##             length = len))
##     }
##     else {
##         out <- data.frame(lambda = runif(len, min = 0, 1), diagonal = sample(c(TRUE, 
##             FALSE), size = len, replace = TRUE))
##     }
##     out
## }
## 
## $sda$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## sda::sda(as.matrix(x), y, diagonal = param$diagonal, lambda = param$lambda, 
##     ...)
## 
## $sda$predict
## function (modelFit, newdata, submodels = NULL) 
## sda::predict.sda(modelFit, as.matrix(newdata))$class
## 
## $sda$prob
## function (modelFit, newdata, submodels = NULL) 
## sda::predict.sda(modelFit, as.matrix(newdata))$posterior
## 
## $sda$predictors
## function (x, ...) 
## {
##     colnames(x$beta)
## }
## 
## $sda$levels
## function (x) 
## x$obsLevels
## 
## $sda$tags
## [1] "Discriminant Analysis" "Regularization"        "Linear Classifier"    
## 
## $sda$sort
## function (x) 
## x[order(x$diagonal, x$lambda), ]
## 
## 
## $sdwd
## $sdwd$label
## [1] "Sparse Distance Weighted Discrimination"
## 
## $sdwd$library
## [1] "sdwd"
## 
## $sdwd$type
## [1] "Classification"
## 
## $sdwd$parameters
##   parameter   class      label
## 1    lambda numeric L1 Penalty
## 2   lambda2 numeric L2 Penalty
## 
## $sdwd$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     lev <- levels(y)
##     y <- ifelse(y == lev[1], 1, -1)
##     init <- sdwd::sdwd(as.matrix(x), y, nlambda = len + 2, lambda2 = 0)
##     lambda <- unique(init$lambda)
##     lambda <- lambda[-c(1, length(lambda))]
##     if (search == "grid") {
##         lambda <- lambda[1:min(length(lambda), len)]
##         out <- expand.grid(lambda = lambda, lambda2 = seq(0.1, 
##             1, length = len))
##     }
##     else {
##         out <- data.frame(lambda = runif(len, min = min(lambda), 
##             max(lambda)), lambda2 = 10^runif(len, min = -5, 0))
##     }
##     out
## }
## 
## $sdwd$loop
## NULL
## 
## $sdwd$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     y <- ifelse(y == lev[1], 1, -1)
##     sdwd::sdwd(as.matrix(x), y = y, lambda = param$lambda, lambda2 = param$lambda2, 
##         ...)
## }
## 
## $sdwd$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(modelFit, newx = newdata, type = "class")
##     ifelse(out == 1, modelFit$obsLevels[1], modelFit$obsLevels[2])
## }
## 
## $sdwd$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(modelFit, newx = newdata, type = "link")
##     out <- binomial()$linkinv(out)
##     out <- data.frame(c1 = out, c2 = 1 - out)
##     colnames(out) <- modelFit$obsLevels
##     out
## }
## 
## $sdwd$predictors
## function (x, ...) 
## {
##     out <- apply(x$beta, 1, function(x) any(x != 0))
##     names(out)[out]
## }
## 
## $sdwd$varImp
## function (object, lambda = NULL, ...) 
## {
##     out <- as.data.frame(as.matrix(abs(object$beta)), stringsAsFactors = TRUE)
##     colnames(out) <- "Overall"
##     out
## }
## 
## $sdwd$levels
## function (x) 
## if (any(names(x) == "obsLevels")) x$obsLevels else NULL
## 
## $sdwd$tags
## [1] "Discriminant Analysis Models"     "Implicit Feature Selection"      
## [3] "L1 Regularization"                "L2 Regularization"               
## [5] "Linear Classifier"                "Distance Weighted Discrimination"
## 
## $sdwd$sort
## function (x) 
## x[order(-x$lambda, -x$lambda2), ]
## 
## $sdwd$trim
## function (x) 
## {
##     x$call <- NULL
##     x
## }
## 
## 
## $simpls
## $simpls$label
## [1] "Partial Least Squares"
## 
## $simpls$library
## [1] "pls"
## 
## $simpls$type
## [1] "Regression"     "Classification"
## 
## $simpls$parameters
##   parameter   class       label
## 1     ncomp numeric #Components
## 
## $simpls$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(ncomp = seq(1, min(ncol(x) - 1, len), 
##             by = 1))
##     }
##     else {
##         out <- data.frame(ncomp = unique(sample(1:(ncol(x) - 
##             1), size = len, replace = TRUE)))
##     }
##     out
## }
## 
## $simpls$loop
## function (grid) 
## {
##     grid <- grid[order(grid$ncomp, decreasing = TRUE), , drop = FALSE]
##     loop <- grid[1, , drop = FALSE]
##     submodels <- list(grid[-1, , drop = FALSE])
##     list(loop = loop, submodels = submodels)
## }
## 
## $simpls$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     ncomp <- min(ncol(x), param$ncomp)
##     out <- if (is.factor(y)) {
##         plsda(x, y, method = "simpls", ncomp = ncomp, ...)
##     }
##     else {
##         dat <- if (is.data.frame(x)) 
##             x
##         else as.data.frame(x, stringsAsFactors = TRUE)
##         dat$.outcome <- y
##         pls::plsr(.outcome ~ ., data = dat, method = "simpls", 
##             ncomp = ncomp, ...)
##     }
##     out
## }
## 
## $simpls$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- if (modelFit$problemType == "Classification") {
##         if (!is.matrix(newdata)) 
##             newdata <- as.matrix(newdata)
##         out <- predict(modelFit, newdata, type = "class")
##     }
##     else as.vector(pls:::predict.mvr(modelFit, newdata, ncomp = max(modelFit$ncomp)))
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels))
##         if (modelFit$problemType == "Classification") {
##             if (length(submodels$ncomp) > 1) {
##                 tmp <- as.list(predict(modelFit, newdata, ncomp = submodels$ncomp))
##             }
##             else tmp <- list(predict(modelFit, newdata, ncomp = submodels$ncomp))
##         }
##         else {
##             tmp <- as.list(as.data.frame(apply(predict(modelFit, 
##                 newdata, ncomp = submodels$ncomp), 3, function(x) list(x))))
##         }
##         out <- c(list(out), tmp)
##     }
##     out
## }
## 
## $simpls$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(modelFit, newdata, type = "prob", ncomp = modelFit$tuneValue$ncomp)
##     if (length(dim(out)) == 3) {
##         if (dim(out)[1] > 1) {
##             out <- out[, , 1]
##         }
##         else {
##             out <- as.data.frame(t(out[, , 1]), stringsAsFactors = TRUE)
##         }
##     }
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$ncomp)) {
##             tmpProb <- predict(modelFit, newdata, type = "prob", 
##                 ncomp = submodels$ncomp[j])
##             if (length(dim(tmpProb)) == 3) {
##                 if (dim(tmpProb)[1] > 1) {
##                   tmpProb <- tmpProb[, , 1]
##                 }
##                 else {
##                   tmpProb <- as.data.frame(t(tmpProb[, , 1]), 
##                     stringsAsFactors = TRUE)
##                 }
##             }
##             tmp[[j + 1]] <- as.data.frame(tmpProb[, modelFit$obsLevels, 
##                 drop = FALSE], stringsAsFactors = TRUE)
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $simpls$varImp
## function (object, estimate = NULL, ...) 
## {
##     library(pls)
##     modelCoef <- coef(object, intercept = FALSE, comps = 1:object$ncomp)
##     perf <- pls:::MSEP.mvr(object)$val
##     nms <- dimnames(perf)
##     if (length(nms$estimate) > 1) {
##         pIndex <- if (is.null(estimate)) 
##             1
##         else which(nms$estimate == estimate)
##         perf <- perf[pIndex, , , drop = FALSE]
##     }
##     numResp <- dim(modelCoef)[2]
##     if (numResp <= 2) {
##         modelCoef <- modelCoef[, 1, , drop = FALSE]
##         perf <- perf[, 1, ]
##         delta <- -diff(perf)
##         delta <- delta/sum(delta)
##         out <- data.frame(Overall = apply(abs(modelCoef), 1, 
##             weighted.mean, w = delta))
##     }
##     else {
##         perf <- -t(apply(perf[1, , ], 1, diff))
##         perf <- t(apply(perf, 1, function(u) u/sum(u)))
##         out <- matrix(NA, ncol = numResp, nrow = dim(modelCoef)[1])
##         for (i in 1:numResp) {
##             tmp <- abs(modelCoef[, i, , drop = FALSE])
##             out[, i] <- apply(tmp, 1, weighted.mean, w = perf[i, 
##                 ])
##         }
##         colnames(out) <- dimnames(modelCoef)[[2]]
##         rownames(out) <- dimnames(modelCoef)[[1]]
##     }
##     as.data.frame(out, stringsAsFactors = TRUE)
## }
## 
## $simpls$levels
## function (x) 
## x$obsLevels
## 
## $simpls$predictors
## function (x, ...) 
## rownames(x$projection)
## 
## $simpls$tags
## [1] "Partial Least Squares" "Feature Extraction"    "Linear Classifier"    
## [4] "Linear Regression"    
## 
## $simpls$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $SLAVE
## $SLAVE$label
## [1] "Fuzzy Rules Using the Structural Learning Algorithm on Vague Environment"
## 
## $SLAVE$library
## [1] "frbs"
## 
## $SLAVE$type
## [1] "Classification"
## 
## $SLAVE$parameters
##    parameter   class            label
## 1 num.labels numeric     #Fuzzy Terms
## 2   max.iter numeric  Max. Iterations
## 3    max.gen numeric Max. Generations
## 
## $SLAVE$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(num.labels = 1 + (1:len) * 2, max.iter = 10, 
##             max.gen = 10)
##     }
##     else {
##         out <- data.frame(num.labels = sample(2:20, size = len, 
##             replace = TRUE), max.iter = sample(1:20, replace = TRUE, 
##             size = len), max.gen = sample(1:20, size = len, replace = TRUE))
##     }
##     out
## }
## 
## $SLAVE$loop
## NULL
## 
## $SLAVE$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     args <- list(data.train = as.matrix(cbind(x, as.numeric(y))), 
##         method.type = "SLAVE")
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$num.labels <- param$num.labels
##         theDots$control$max.iter <- param$max.iter
##         theDots$control$max.gen <- param$max.gen
##         theDots$control$num.class <- length(unique(y))
##     }
##     else theDots$control <- list(num.labels = param$num.labels, 
##         max.iter = param$max.iter, max.gen = param$max.gen, persen_cross = 0.6, 
##         persen_mutant = 0.3, k.lower = 0.25, k.upper = 0.75, 
##         epsilon = 0.1, num.class = length(unique(y)), name = "sim-0")
##     if (!(any(names(theDots) == "range.data"))) {
##         args$range.data <- apply(args$data.train, 2, extendrange)
##     }
##     do.call(frbs::frbs.learn, c(args, theDots))
## }
## 
## $SLAVE$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     modelFit$obsLevels[predict(modelFit, newdata)[, 1]]
## }
## 
## $SLAVE$prob
## NULL
## 
## $SLAVE$predictors
## function (x, ...) 
## {
##     x$colnames.var[x$colnames.var %in% as.vector(x$rule)]
## }
## 
## $SLAVE$tags
## [1] "Rule-Based Model"
## 
## $SLAVE$levels
## NULL
## 
## $SLAVE$sort
## function (x) 
## x[order(x$num.labels), ]
## 
## 
## $slda
## $slda$label
## [1] "Stabilized Linear Discriminant Analysis"
## 
## $slda$library
## [1] "ipred"
## 
## $slda$loop
## NULL
## 
## $slda$type
## [1] "Classification"
## 
## $slda$parameters
##   parameter     class label
## 1 parameter character  none
## 
## $slda$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(parameter = "none")
## 
## $slda$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     ipred::slda(.outcome ~ ., data = dat, ...)
## }
## 
## $slda$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)$class
## }
## 
## $slda$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)$posterior
## }
## 
## $slda$levels
## function (x) 
## x$obsLevels
## 
## $slda$predictors
## function (x, ...) 
## if (hasTerms(x)) predictors(x$terms) else predictors(x$mylda)
## 
## $slda$tags
## [1] "Discriminant Analysis" "Linear Classifier"    
## 
## $slda$sort
## function (x) 
## x
## 
## 
## $smda
## $smda$label
## [1] "Sparse Mixture Discriminant Analysis"
## 
## $smda$library
## [1] "sparseLDA"
## 
## $smda$loop
## NULL
## 
## $smda$type
## [1] "Classification"
## 
## $smda$parameters
##   parameter   class        label
## 1   NumVars numeric # Predictors
## 2    lambda numeric       Lambda
## 3         R numeric # Subclasses
## 
## $smda$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(NumVars = caret::var_seq(p = ncol(x), 
##             classification = is.factor(y), len = len), R = (1:len) + 
##             1, lambda = c(0, 10^seq(-1, -4, length = len - 1)))
##     }
##     else {
##         out <- data.frame(NumVars = sample(1:ncol(x), size = len, 
##             replace = TRUE), lambda = 10^runif(len, min = -5, 
##             1), R = sample(2:5, size = len, replace = TRUE))
##     }
##     out
## }
## 
## $smda[[7]]
## NULL
## 
## $smda$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## sparseLDA::smda(x, y, Rj = param$R, lambda = param$lambda, stop = -param$NumVars, 
##     ...)
## 
## $smda$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)$class
## 
## $smda$prob
## NULL
## 
## $smda$levels
## function (x) 
## x$obsLevels
## 
## $smda$predictors
## function (x, ...) 
## x$varNames
## 
## $smda$tags
## [1] "Discriminant Analysis"      "L1 Regularization"         
## [3] "Implicit Feature Selection" "Mixture Model"             
## 
## $smda$sort
## function (x) 
## x[order(x$NumVars, x$R, -x$lambda), ]
## 
## 
## $snn
## $snn$label
## [1] "Stabilized Nearest Neighbor Classifier"
## 
## $snn$library
## [1] "snn"
## 
## $snn$loop
## NULL
## 
## $snn$type
## [1] "Classification"
## 
## $snn$parameters
##   parameter   class                   label
## 1    lambda numeric Stabilization Parameter
## 
## $snn$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(lambda = c(0, 2^seq(-5, 5, length = len - 
##             1)))
##     }
##     else {
##         out <- data.frame(lambda = 2^runif(len, min = -5, 5))
##     }
##     out
## }
## 
## $snn$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!is.matrix(x)) 
##         x <- as.matrix(x)
##     if (!(class(x[1, 1]) %in% c("integer", "numeric"))) 
##         stop("predictors should be all numeric")
##     x <- cbind(x, as.numeric(y))
##     colnames(x)[ncol(x)] <- ".outcome"
##     list(dat = x, lambda = param$lambda)
## }
## 
## $snn$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- snn::mysnn(train = modelFit$dat, test = newdata, lambda = modelFit$lambda)
##     modelFit$obsLevels[out]
## }
## 
## $snn$predictors
## function (x, ...) 
## x$xNames
## 
## $snn$tags
## [1] "Prototype Models"
## 
## $snn$prob
## NULL
## 
## $snn$levels
## function (x) 
## x$obsLevels
## 
## $snn$sort
## function (x) 
## x[order(-x[, 1]), ]
## 
## 
## $sparseLDA
## $sparseLDA$label
## [1] "Sparse Linear Discriminant Analysis"
## 
## $sparseLDA$library
## [1] "sparseLDA"
## 
## $sparseLDA$loop
## NULL
## 
## $sparseLDA$type
## [1] "Classification"
## 
## $sparseLDA$parameters
##   parameter   class        label
## 1   NumVars numeric # Predictors
## 2    lambda numeric       Lambda
## 
## $sparseLDA$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(NumVars = caret::var_seq(p = ncol(x), 
##             classification = is.factor(y), len = len), lambda = c(0, 
##             10^seq(-1, -4, length = len - 1)))
##     }
##     else {
##         out <- data.frame(lambda = 10^runif(len, min = -5, 1), 
##             NumVars = sample(1:ncol(x), size = len, replace = TRUE))
##     }
##     out
## }
## 
## $sparseLDA$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## sparseLDA:::sda(x, y, lambda = param$lambda, stop = -param$NumVars, 
##     ...)
## 
## $sparseLDA$predictors
## function (x) 
## x$xNames[x$varIndex]
## 
## $sparseLDA$predict
## function (modelFit, newdata, submodels = NULL) 
## sparseLDA:::predict.sda(modelFit, newdata)$class
## 
## $sparseLDA$prob
## function (modelFit, newdata, submodels = NULL) 
## sparseLDA:::predict.sda(modelFit, newdata)$posterior
## 
## $sparseLDA$levels
## function (x) 
## x$obsLevels
## 
## $sparseLDA$tags
## [1] "Discriminant Analysis"      "L1 Regularization"         
## [3] "Implicit Feature Selection" "Linear Classifier"         
## 
## $sparseLDA$sort
## function (x) 
## x[order(x$NumVars, -x$lambda), ]
## 
## 
## $spikeslab
## $spikeslab$label
## [1] "Spike and Slab Regression"
## 
## $spikeslab$library
## [1] "spikeslab" "plyr"     
## 
## $spikeslab$type
## [1] "Regression"
## 
## $spikeslab$parameters
##   parameter   class              label
## 1      vars numeric Variables Retained
## 
## $spikeslab$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(vars = caret::var_seq(p = ncol(x), 
##             classification = is.factor(y), len = len))
##     }
##     else {
##         out <- data.frame(vars = unique(sample(1:ncol(x), size = len, 
##             replace = TRUE)))
##     }
##     out
## }
## 
## $spikeslab$loop
## function (grid) 
## {
##     grid <- grid[order(grid$vars, decreasing = TRUE), , drop = FALSE]
##     loop <- grid[1, , drop = FALSE]
##     submodels <- list(grid[-1, , drop = FALSE])
##     list(loop = loop, submodels = submodels)
## }
## 
## $spikeslab$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     require(spikeslab)
##     mod <- spikeslab::spikeslab(x = as.matrix(x), y = y, max.var = param$vars, 
##         ...)
##     path <- data.frame(k = apply(mod$gnet.path$path, 1, function(x) sum(x != 
##         0)))
##     path$index <- 1:nrow(path)
##     path <- plyr::ddply(path, plyr::.(k), function(x) x[which.min(x$index), 
##         ])
##     if (all(path$k != ncol(x))) 
##         path <- rbind(path, data.frame(k = ncol(x), index = max(path$index)))
##     mod$.path <- path
##     mod$.size <- param$vars
##     mod
## }
## 
## $spikeslab$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- spikeslab::predict.spikeslab(modelFit, newdata)$yhat.gnet.path
##     if (is.vector(out)) 
##         out <- matrix(out, nrow = 1)
##     if (!is.null(submodels)) {
##         vars <- data.frame(k = c(modelFit$.size, submodels$vars))
##         vars$order <- 1:nrow(vars)
##         vars <- merge(vars, modelFit$.path, all.x = TRUE)
##         vars <- vars[order(vars$order), ]
##         out <- out[, vars$index]
##         out <- as.list(as.data.frame(out, stringsAsFactors = TRUE))
##     }
##     else {
##         index <- modelFit$.path$index[modelFit$.path$k == modelFit$.size]
##         out <- out[, index]
##     }
##     out
## }
## 
## $spikeslab$predictors
## function (x, s = NULL, ...) 
## {
##     coefs <- x$gnet
##     names(coefs)[coefs != 0]
## }
## 
## $spikeslab$notes
## [1] "Unlike other packages used by `train`, the `spikeslab` package is fully loaded when this model is used."
## 
## $spikeslab$tags
## [1] "Linear Regression"          "Bayesian Model"            
## [3] "Implicit Feature Selection"
## 
## $spikeslab$prob
## NULL
## 
## $spikeslab$sort
## function (x) 
## x
## 
## 
## $spls
## $spls$label
## [1] "Sparse Partial Least Squares"
## 
## $spls$library
## [1] "spls"
## 
## $spls$type
## [1] "Regression"     "Classification"
## 
## $spls$parameters
##   parameter   class       label
## 1         K numeric #Components
## 2       eta numeric   Threshold
## 3     kappa numeric       Kappa
## 
## $spls$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(K = 1:min(nrow(x), ncol(x)), eta = seq(0.1, 
##             0.9, length = len), kappa = 0.5)
##     }
##     else {
##         out <- data.frame(kappa = runif(len, min = 0, max = 0.5), 
##             eta = runif(len, min = 0, max = 1), K = sample(1:min(nrow(x), 
##                 ncol(x)), size = len, replace = TRUE))
##     }
##     out
## }
## 
## $spls$loop
## NULL
## 
## $spls$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     param$K <- min(param$K, length(y))
##     if (is.factor(y)) {
##         caret:::splsda(x, y, K = param$K, eta = param$eta, kappa = param$kappa, 
##             ...)
##     }
##     else {
##         spls::spls(x, y, K = param$K, eta = param$eta, kappa = param$kappa, 
##             ...)
##     }
## }
## 
## $spls$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (length(modelFit$obsLevels) < 2) {
##         spls::predict.spls(modelFit, newdata)
##     }
##     else {
##         as.character(caret:::predict.splsda(modelFit, newdata, 
##             type = "class"))
##     }
## }
## 
## $spls$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     caret:::predict.splsda(modelFit, newdata, type = "prob")
## }
## 
## $spls$predictors
## function (x, ...) 
## colnames(x$x)[x$A]
## 
## $spls$tags
## [1] "Partial Least Squares" "Feature Extraction"    "Linear Classifier"    
## [4] "Linear Regression"     "L1 Regularization"    
## 
## $spls$levels
## function (x) 
## x$obsLevels
## 
## $spls$sort
## function (x) 
## x[order(-x$eta, x$K), ]
## 
## 
## $stepLDA
## $stepLDA$label
## [1] "Linear Discriminant Analysis with Stepwise Feature Selection"
## 
## $stepLDA$library
## [1] "klaR" "MASS"
## 
## $stepLDA$loop
## NULL
## 
## $stepLDA$type
## [1] "Classification"
## 
## $stepLDA$parameters
##   parameter     class              label
## 1    maxvar   numeric Maximum #Variables
## 2 direction character   Search Direction
## 
## $stepLDA$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(maxvar = Inf, direction = "both")
##     }
##     else {
##         out <- data.frame(direction = sample(c("both", "forward", 
##             "backward"), size = len, replace = TRUE), maxvar = sample(1:ncol(x), 
##             size = len, replace = TRUE))
##     }
##     out
## }
## 
## $stepLDA$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     out <- klaR::stepclass(x, y, method = "lda", maxvar = param$maxvar, 
##         direction = as.character(param$direction), ...)
##     out$fit <- MASS::lda(x[, out$model$name, drop = FALSE], y, 
##         ...)
##     out
## }
## 
## $stepLDA$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     code <- getModelInfo("lda", regex = FALSE)[[1]]$predictors
##     predict(modelFit$fit, newdata[, code(modelFit$fit), drop = FALSE])$class
## }
## 
## $stepLDA$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     code <- getModelInfo("lda", regex = FALSE)[[1]]$predictors
##     predict(modelFit$fit, newdata[, code(modelFit$fit), drop = FALSE])$posterior
## }
## 
## $stepLDA$predictors
## function (x, ...) 
## {
##     form <- x$formula
##     form[[2]] <- NULL
##     all.vars(form)
## }
## 
## $stepLDA$levels
## function (x) 
## x$obsLevels
## 
## $stepLDA$tags
## [1] "Discriminant Analysis"     "Feature Selection Wrapper"
## [3] "Linear Classifier"        
## 
## $stepLDA$sort
## function (x) 
## x
## 
## 
## $stepQDA
## $stepQDA$label
## [1] "Quadratic Discriminant Analysis with Stepwise Feature Selection"
## 
## $stepQDA$library
## [1] "klaR" "MASS"
## 
## $stepQDA$loop
## NULL
## 
## $stepQDA$type
## [1] "Classification"
## 
## $stepQDA$parameters
##   parameter     class              label
## 1    maxvar   numeric Maximum #Variables
## 2 direction character   Search Direction
## 
## $stepQDA$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(maxvar = Inf, direction = "both")
## 
## $stepQDA$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     out <- klaR::stepclass(x, y, method = "qda", maxvar = param$maxvar, 
##         direction = as.character(param$direction), ...)
##     out$fit <- MASS::qda(x[, out$model$name, drop = FALSE], y, 
##         ...)
##     out
## }
## 
## $stepQDA$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     code <- getModelInfo("qda", regex = FALSE)[[1]]$predictors
##     predict(modelFit$fit, newdata[, code(modelFit$fit), drop = FALSE])$class
## }
## 
## $stepQDA$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     code <- getModelInfo("qda", regex = FALSE)[[1]]$predictors
##     predict(modelFit$fit, newdata[, code(modelFit$fit), drop = FALSE])$posterior
## }
## 
## $stepQDA$predictors
## function (x, ...) 
## {
##     form <- x$formula
##     form[[2]] <- NULL
##     all.vars(form)
## }
## 
## $stepQDA$levels
## function (x) 
## x$obsLevels
## 
## $stepQDA$tags
## [1] "Discriminant Analysis"     "Feature Selection Wrapper"
## [3] "Polynomial Model"         
## 
## $stepQDA$sort
## function (x) 
## x
## 
## 
## $superpc
## $superpc$label
## [1] "Supervised Principal Component Analysis"
## 
## $superpc$library
## [1] "superpc"
## 
## $superpc$type
## [1] "Regression"
## 
## $superpc$parameters
##      parameter   class       label
## 1    threshold numeric   Threshold
## 2 n.components numeric #Components
## 
## $superpc$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(n.components = 1:3, threshold = seq(0.1, 
##             0.9, length = len))
##     }
##     else {
##         out <- data.frame(threshold = runif(len, min = 0, max = 1), 
##             n.components = sample(1:3, size = len, replace = TRUE))
##     }
##     out
## }
## 
## $superpc$loop
## function (grid) 
## {
##     ordering <- order(-grid$n.components, -grid$threshold)
##     loop <- grid[ordering[1], , drop = FALSE]
##     submodels <- list(grid[ordering[-1], , drop = FALSE])
##     list(loop = loop, submodels = submodels)
## }
## 
## $superpc$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     out <- superpc::superpc.train(list(x = t(x), y = y), type = "regression", 
##         ...)
##     out$data <- list(x = t(x), y = y)
##     out$tuneValue <- list(n.components = param$n.components, 
##         threshold = param$threshold)
##     out
## }
## 
## $superpc$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- superpc::superpc.predict(modelFit, modelFit$data, 
##         newdata = list(x = t(newdata)), n.components = modelFit$tuneValue$n.components, 
##         threshold = modelFit$tuneValue$threshold)$v.pred.1df
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$threshold)) {
##             tmp[[j + 1]] <- superpc::superpc.predict(modelFit, 
##                 modelFit$data, newdata = list(x = t(newdata)), 
##                 threshold = submodels$threshold[j], n.components = submodels$n.components[j])$v.pred.1df
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $superpc$prob
## NULL
## 
## $superpc$tags
## [1] "Feature Extraction" "Linear Regression" 
## 
## $superpc$sort
## function (x) 
## x[order(x$threshold, x$n.components), ]
## 
## 
## $svmBoundrangeString
## $svmBoundrangeString$label
## [1] "Support Vector Machines with Boundrange String Kernel"
## 
## $svmBoundrangeString$library
## [1] "kernlab"
## 
## $svmBoundrangeString$type
## [1] "Regression"     "Classification"
## 
## $svmBoundrangeString$parameters
##   parameter   class  label
## 1    length numeric length
## 2         C numeric   Cost
## 
## $svmBoundrangeString$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(length = 2:(len + 1), C = 2^((1:len) - 
##             3))
##     }
##     else {
##         out <- data.frame(length = sample(1:20, size = len, replace = TRUE), 
##             C = 2^runif(len, min = -5, max = 10))
##     }
##     out
## }
## 
## $svmBoundrangeString$loop
## NULL
## 
## $svmBoundrangeString$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (any(names(list(...)) == "prob.model") | is.numeric(y)) {
##         out <- kernlab::ksvm(x = x[, 1], y = y, kernel = "stringdot", 
##             kpar = list(type = "boundrange", length = param$length), 
##             C = param$C, ...)
##     }
##     else {
##         out <- kernlab::ksvm(x = x[, 1], y = y, kernel = "stringdot", 
##             kpar = list(type = "boundrange", length = param$length), 
##             C = param$C, prob.model = classProbs, ...)
##     }
##     out
## }
## 
## $svmBoundrangeString$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     svmPred <- function(obj, x) {
##         hasPM <- !is.null(unlist(obj@prob.model))
##         if (hasPM) {
##             pred <- kernlab::lev(obj)[apply(kernlab::predict(obj, 
##                 x, type = "probabilities"), 1, which.max)]
##         }
##         else pred <- kernlab::predict(obj, x)
##         pred
##     }
##     out <- try(svmPred(modelFit, newdata[, 1]), silent = TRUE)
##     if (is.character(kernlab::lev(modelFit))) {
##         if (class(out)[1] == "try-error") {
##             warning("kernlab class prediction calculations failed; returning NAs")
##             out <- rep("", nrow(newdata))
##             out[seq(along = out)] <- NA
##         }
##     }
##     else {
##         if (class(out)[1] == "try-error") {
##             warning("kernlab prediction calculations failed; returning NAs")
##             out <- rep(NA, nrow(newdata))
##         }
##     }
##     out
## }
## 
## $svmBoundrangeString$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- try(kernlab::predict(modelFit, newdata[, 1], type = "probabilities"), 
##         silent = TRUE)
##     if (class(out)[1] != "try-error") {
##         if (any(out < 0)) {
##             out[out < 0] <- 0
##             out <- t(apply(out, 1, function(x) x/sum(x)))
##         }
##         out <- out[, kernlab::lev(modelFit), drop = FALSE]
##     }
##     else {
##         warning("kernlab class probability calculations failed; returning NAs")
##         out <- matrix(NA, nrow(newdata) * length(kernlab::lev(modelFit)), 
##             ncol = length(kernlab::lev(modelFit)))
##         colnames(out) <- kernlab::lev(modelFit)
##     }
##     out
## }
## 
## $svmBoundrangeString$predictors
## function (x, ...) 
## {
##     iNA
## }
## 
## $svmBoundrangeString$tags
## [1] "Kernel Method"           "Support Vector Machines"
## [3] "String Kernel"           "Robust Methods"         
## [5] "Text Mining"            
## 
## $svmBoundrangeString$levels
## function (x) 
## kernlab::lev(x)
## 
## $svmBoundrangeString$sort
## function (x) 
## {
##     x[order(x$C, -x$length), ]
## }
## 
## 
## $svmExpoString
## $svmExpoString$label
## [1] "Support Vector Machines with Exponential String Kernel"
## 
## $svmExpoString$library
## [1] "kernlab"
## 
## $svmExpoString$type
## [1] "Regression"     "Classification"
## 
## $svmExpoString$parameters
##   parameter   class  label
## 1    lambda numeric lambda
## 2         C numeric   Cost
## 
## $svmExpoString$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(lambda = 0.25 + 2^((1:len) - 1), C = 2^((1:len) - 
##             3))
##     }
##     else {
##         out <- data.frame(lambda = 2^runif(len, min = -5, max = 6), 
##             C = 2^runif(len, min = -5, max = 10))
##     }
##     out
## }
## 
## $svmExpoString$loop
## NULL
## 
## $svmExpoString$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (any(names(list(...)) == "prob.model") | is.numeric(y)) {
##         out <- kernlab::ksvm(x = x[, 1], y = y, kernel = "stringdot", 
##             kpar = list(type = "exponential", lambda = param$lambda), 
##             C = param$C, ...)
##     }
##     else {
##         out <- kernlab::ksvm(x = x[, 1], y = y, kernel = "stringdot", 
##             kpar = list(type = "exponential", lambda = param$lambda), 
##             C = param$C, prob.model = classProbs, ...)
##     }
##     out
## }
## 
## $svmExpoString$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     svmPred <- function(obj, x) {
##         hasPM <- !is.null(unlist(obj@prob.model))
##         if (hasPM) {
##             pred <- lev(obj)[apply(kernlab::predict(obj, x, type = "probabilities"), 
##                 1, which.max)]
##         }
##         else pred <- kernlab::predict(obj, x)
##         pred
##     }
##     out <- try(svmPred(modelFit, newdata[, 1]), silent = TRUE)
##     if (is.character(kernlab::lev(modelFit))) {
##         if (class(out)[1] == "try-error") {
##             warning("kernlab class prediction calculations failed; returning NAs")
##             out <- rep("", nrow(newdata))
##             out[seq(along = out)] <- NA
##         }
##     }
##     else {
##         if (class(out)[1] == "try-error") {
##             warning("kernlab prediction calculations failed; returning NAs")
##             out <- rep(NA, nrow(newdata))
##         }
##     }
##     out
## }
## 
## $svmExpoString$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- try(kernlab::predict(modelFit, newdata[, 1], type = "probabilities"), 
##         silent = TRUE)
##     if (class(out)[1] != "try-error") {
##         if (any(out < 0)) {
##             out[out < 0] <- 0
##             out <- t(apply(out, 1, function(x) x/sum(x)))
##         }
##         out <- out[, kernlab::lev(modelFit), drop = FALSE]
##     }
##     else {
##         warning("kernlab class probability calculations failed; returning NAs")
##         out <- matrix(NA, nrow(newdata) * length(kernlab::lev(modelFit)), 
##             ncol = length(kernlab::lev(modelFit)))
##         colnames(out) <- kernlab::lev(modelFit)
##     }
##     out
## }
## 
## $svmExpoString$predictors
## function (x, ...) 
## {
##     iNA
## }
## 
## $svmExpoString$tags
## [1] "Kernel Method"           "Support Vector Machines"
## [3] "String Kernel"           "Robust Methods"         
## [5] "Text Mining"            
## 
## $svmExpoString$levels
## function (x) 
## kernlab::lev(x)
## 
## $svmExpoString$sort
## function (x) 
## {
##     x[order(x$C, -x$lambda), ]
## }
## 
## 
## $svmLinear
## $svmLinear$label
## [1] "Support Vector Machines with Linear Kernel"
## 
## $svmLinear$library
## [1] "kernlab"
## 
## $svmLinear$type
## [1] "Regression"     "Classification"
## 
## $svmLinear$parameters
##   parameter   class label
## 1         C numeric  Cost
## 
## $svmLinear$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(C = 1)
##     }
##     else {
##         out <- data.frame(C = 2^runif(len, min = -5, max = 10))
##     }
##     out
## }
## 
## $svmLinear$loop
## NULL
## 
## $svmLinear$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (any(names(list(...)) == "prob.model") | is.numeric(y)) {
##         out <- kernlab::ksvm(x = as.matrix(x), y = y, kernel = kernlab::vanilladot(), 
##             C = param$C, ...)
##     }
##     else {
##         out <- kernlab::ksvm(x = as.matrix(x), y = y, kernel = kernlab::vanilladot(), 
##             C = param$C, prob.model = classProbs, ...)
##     }
##     out
## }
## 
## $svmLinear$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     svmPred <- function(obj, x) {
##         hasPM <- !is.null(unlist(obj@prob.model))
##         if (hasPM) {
##             pred <- kernlab::lev(obj)[apply(kernlab::predict(obj, 
##                 x, type = "probabilities"), 1, which.max)]
##         }
##         else pred <- kernlab::predict(obj, x)
##         pred
##     }
##     out <- try(svmPred(modelFit, newdata), silent = TRUE)
##     if (is.character(kernlab::lev(modelFit))) {
##         if (class(out)[1] == "try-error") {
##             warning("kernlab class prediction calculations failed; returning NAs")
##             out <- rep("", nrow(newdata))
##             out[seq(along = out)] <- NA
##         }
##     }
##     else {
##         if (class(out)[1] == "try-error") {
##             warning("kernlab prediction calculations failed; returning NAs")
##             out <- rep(NA, nrow(newdata))
##         }
##     }
##     if (is.matrix(out)) 
##         out <- out[, 1]
##     out
## }
## 
## $svmLinear$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- try(kernlab::predict(modelFit, newdata, type = "probabilities"), 
##         silent = TRUE)
##     if (class(out)[1] != "try-error") {
##         if (any(out < 0)) {
##             out[out < 0] <- 0
##             out <- t(apply(out, 1, function(x) x/sum(x)))
##         }
##         out <- out[, kernlab::lev(modelFit), drop = FALSE]
##     }
##     else {
##         warning("kernlab class probability calculations failed; returning NAs")
##         out <- matrix(NA, nrow(newdata) * length(kernlab::lev(modelFit)), 
##             ncol = length(kernlab::lev(modelFit)))
##         colnames(out) <- kernlab::lev(modelFit)
##     }
##     out
## }
## 
## $svmLinear$predictors
## function (x, ...) 
## {
##     if (hasTerms(x) & !is.null(x@terms)) {
##         out <- predictors.terms(x@terms)
##     }
##     else {
##         out <- colnames(attr(x, "xmatrix"))
##     }
##     if (is.null(out)) 
##         out <- names(attr(x, "scaling")$x.scale$`scaled:center`)
##     if (is.null(out)) 
##         out <- NA
##     out
## }
## 
## $svmLinear$tags
## [1] "Kernel Method"           "Support Vector Machines"
## [3] "Linear Regression"       "Linear Classifier"      
## [5] "Robust Methods"         
## 
## $svmLinear$levels
## function (x) 
## kernlab::lev(x)
## 
## $svmLinear$sort
## function (x) 
## {
##     x[order(x$C), ]
## }
## 
## 
## $svmLinear2
## $svmLinear2$label
## [1] "Support Vector Machines with Linear Kernel"
## 
## $svmLinear2$library
## [1] "e1071"
## 
## $svmLinear2$type
## [1] "Regression"     "Classification"
## 
## $svmLinear2$parameters
##   parameter   class label
## 1      cost numeric  Cost
## 
## $svmLinear2$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(cost = 2^((1:len) - 3))
##     }
##     else {
##         out <- data.frame(cost = 2^runif(len, min = -5, max = 10))
##     }
##     out
## }
## 
## $svmLinear2$loop
## NULL
## 
## $svmLinear2$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (any(names(list(...)) == "probability") | is.numeric(y)) {
##         out <- e1071::svm(x = as.matrix(x), y = y, kernel = "linear", 
##             cost = param$cost, ...)
##     }
##     else {
##         out <- e1071::svm(x = as.matrix(x), y = y, kernel = "linear", 
##             cost = param$cost, probability = classProbs, ...)
##     }
##     out
## }
## 
## $svmLinear2$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     predict(modelFit, newdata)
## }
## 
## $svmLinear2$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata, probability = TRUE)
##     attr(out, "probabilities")
## }
## 
## $svmLinear2$predictors
## function (x, ...) 
## {
##     out <- if (!is.null(x$terms)) 
##         predictors.terms(x$terms)
##     else x$xNames
##     if (is.null(out)) 
##         out <- names(attr(x, "scaling")$x.scale$`scaled:center`)
##     if (is.null(out)) 
##         out <- NA
##     out
## }
## 
## $svmLinear2$tags
## [1] "Kernel Method"           "Support Vector Machines"
## [3] "Linear Regression"       "Linear Classifier"      
## [5] "Robust Methods"         
## 
## $svmLinear2$levels
## function (x) 
## x$levels
## 
## $svmLinear2$sort
## function (x) 
## {
##     x[order(x$cost), ]
## }
## 
## 
## $svmLinear3
## $svmLinear3$label
## [1] "L2 Regularized Support Vector Machine (dual) with Linear Kernel"
## 
## $svmLinear3$library
## [1] "LiblineaR"
## 
## $svmLinear3$type
## [1] "Regression"     "Classification"
## 
## $svmLinear3$parameters
##   parameter     class         label
## 1      cost   numeric          Cost
## 2      Loss character Loss Function
## 
## $svmLinear3$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(cost = 2^((1:len) - 3), Loss = c("L1", 
##             "L2"))
##     }
##     else {
##         out <- data.frame(cost = 2^runif(len, min = -10, max = 10), 
##             Loss = sample(c("L1", "L2"), size = len, replace = TRUE))
##     }
##     out
## }
## 
## $svmLinear3$loop
## NULL
## 
## $svmLinear3$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (param$Loss == "L2") {
##         model_type <- if (is.factor(y)) 
##             2
##         else 12
##     }
##     else model_type <- if (is.factor(y)) 
##         3
##     else 13
##     out <- LiblineaR::LiblineaR(data = as.matrix(x), target = y, 
##         cost = param$cost, type = model_type, ...)
##     out
## }
## 
## $svmLinear3$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     predict(modelFit, newdata)$predictions
## }
## 
## $svmLinear3$prob
## NULL
## 
## $svmLinear3$predictors
## function (x, ...) 
## {
##     out <- colnames(x$W)
##     out[out != "Bias"]
## }
## 
## $svmLinear3$tags
## [1] "Kernel Method"           "Support Vector Machines"
## [3] "Linear Regression"       "Linear Classifier"      
## [5] "Robust Methods"         
## 
## $svmLinear3$levels
## function (x) 
## x$levels
## 
## $svmLinear3$sort
## function (x) 
## {
##     x[order(x$cost), ]
## }
## 
## 
## $svmLinearWeights
## $svmLinearWeights$label
## [1] "Linear Support Vector Machines with Class Weights"
## 
## $svmLinearWeights$library
## [1] "e1071"
## 
## $svmLinearWeights$type
## [1] "Classification"
## 
## $svmLinearWeights$parameters
##   parameter   class        label
## 1      cost numeric         Cost
## 2    weight numeric Class Weight
## 
## $svmLinearWeights$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(cost = 2^((1:len) - 3), weight = 1:len)
##     }
##     else {
##         out <- data.frame(cost = 2^runif(len, min = -5, max = 10), 
##             weight = runif(len, min = 1, max = 25))
##     }
##     out
## }
## 
## $svmLinearWeights$loop
## NULL
## 
## $svmLinearWeights$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (length(levels(y)) != 2) 
##         stop("Currently implemented for 2-class problems")
##     cwts <- c(1, param$weight)
##     names(cwts) <- levels(y)
##     out <- e1071::svm(x = as.matrix(x), y = y, kernel = "linear", 
##         cost = param$cost, probability = classProbs, class.weights = cwts, 
##         ...)
##     out
## }
## 
## $svmLinearWeights$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     predict(modelFit, newdata)
## }
## 
## $svmLinearWeights$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, newdata, probability = TRUE)
##     attr(out, "probabilities")
## }
## 
## $svmLinearWeights$predictors
## function (x, ...) 
## {
##     out <- if (!is.null(x$terms)) 
##         predictors.terms(x$terms)
##     else x$xNames
##     if (is.null(out)) 
##         out <- names(attr(x, "scaling")$x.scale$`scaled:center`)
##     if (is.null(out)) 
##         out <- NA
##     out
## }
## 
## $svmLinearWeights$tags
## [1] "Kernel Method"           "Support Vector Machines"
## [3] "Linear Classifier"       "Robust Methods"         
## [5] "Cost Sensitive Learning" "Two Class Only"         
## 
## $svmLinearWeights$levels
## function (x) 
## x$levels
## 
## $svmLinearWeights$sort
## function (x) 
## {
##     x[order(x$cost, x$weight), ]
## }
## 
## 
## $svmLinearWeights2
## $svmLinearWeights2$label
## [1] "L2 Regularized Linear Support Vector Machines with Class Weights"
## 
## $svmLinearWeights2$library
## [1] "LiblineaR"
## 
## $svmLinearWeights2$type
## [1] "Classification"
## 
## $svmLinearWeights2$parameters
##   parameter     class         label
## 1      cost   numeric          Cost
## 2      Loss character Loss Function
## 3    weight   numeric  Class Weight
## 
## $svmLinearWeights2$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(cost = 2^((1:len) - 3), Loss = c("L1", 
##             "L2"), weight = 1:len)
##     }
##     else {
##         out <- data.frame(cost = 2^runif(len, min = -10, max = 10), 
##             Loss = sample(c("L1", "L2"), size = len, replace = TRUE), 
##             weight = runif(len, min = 1, max = 25))
##     }
##     out
## }
## 
## $svmLinearWeights2$loop
## NULL
## 
## $svmLinearWeights2$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     model_type <- if (param$Loss == "L2") 
##         2
##     else 3
##     if (length(levels(y)) != 2) 
##         stop("Currently implemented for 2-class problems")
##     cwts <- c(1, param$weight)
##     names(cwts) <- levels(y)
##     out <- LiblineaR::LiblineaR(data = as.matrix(x), target = y, 
##         cost = param$cost, type = model_type, wi = cwts, ...)
##     out
## }
## 
## $svmLinearWeights2$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     predict(modelFit, newdata)$predictions
## }
## 
## $svmLinearWeights2$prob
## NULL
## 
## $svmLinearWeights2$predictors
## function (x, ...) 
## {
##     out <- colnames(x$W)
##     out[out != "Bias"]
## }
## 
## $svmLinearWeights2$tags
## [1] "Kernel Method"           "Support Vector Machines"
## [3] "Linear Classifier"       "Robust Methods"         
## [5] "Cost Sensitive Learning" "Two Class Only"         
## 
## $svmLinearWeights2$levels
## function (x) 
## x$levels
## 
## $svmLinearWeights2$sort
## function (x) 
## {
##     x[order(x$cost), ]
## }
## 
## 
## $svmPoly
## $svmPoly$label
## [1] "Support Vector Machines with Polynomial Kernel"
## 
## $svmPoly$library
## [1] "kernlab"
## 
## $svmPoly$type
## [1] "Regression"     "Classification"
## 
## $svmPoly$parameters
##   parameter   class             label
## 1    degree numeric Polynomial Degree
## 2     scale numeric             Scale
## 3         C numeric              Cost
## 
## $svmPoly$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(degree = seq(1, min(len, 3)), scale = 10^((1:len) - 
##             4), C = 2^((1:len) - 3))
##     }
##     else {
##         out <- data.frame(degree = sample(1:3, size = len, replace = TRUE), 
##             scale = 10^runif(len, min = -5, log10(2)), C = 2^runif(len, 
##                 min = -5, max = 10))
##     }
##     out
## }
## 
## $svmPoly$loop
## NULL
## 
## $svmPoly$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (any(names(list(...)) == "prob.model") | is.numeric(y)) {
##         out <- kernlab::ksvm(x = as.matrix(x), y = y, kernel = kernlab::polydot(degree = param$degree, 
##             scale = param$scale, offset = 1), C = param$C, ...)
##     }
##     else {
##         out <- kernlab::ksvm(x = as.matrix(x), y = y, kernel = kernlab::polydot(degree = param$degree, 
##             scale = param$scale, offset = 1), C = param$C, prob.model = classProbs, 
##             ...)
##     }
##     out
## }
## 
## $svmPoly$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     svmPred <- function(obj, x) {
##         hasPM <- !is.null(unlist(obj@prob.model))
##         if (hasPM) {
##             pred <- kernlab::lev(obj)[apply(kernlab::predict(obj, 
##                 x, type = "probabilities"), 1, which.max)]
##         }
##         else pred <- kernlab::predict(obj, x)
##         pred
##     }
##     out <- try(svmPred(modelFit, newdata), silent = TRUE)
##     if (is.character(kernlab::lev(modelFit))) {
##         if (class(out)[1] == "try-error") {
##             warning("kernlab class prediction calculations failed; returning NAs")
##             out <- rep("", nrow(newdata))
##             out[seq(along = out)] <- NA
##         }
##     }
##     else {
##         if (class(out)[1] == "try-error") {
##             warning("kernlab prediction calculations failed; returning NAs")
##             out <- rep(NA, nrow(newdata))
##         }
##     }
##     if (is.matrix(out)) 
##         out <- out[, 1]
##     out
## }
## 
## $svmPoly$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- try(kernlab::predict(modelFit, newdata, type = "probabilities"), 
##         silent = TRUE)
##     if (class(out)[1] != "try-error") {
##         if (any(out < 0)) {
##             out[out < 0] <- 0
##             out <- t(apply(out, 1, function(x) x/sum(x)))
##         }
##         out <- out[, kernlab::lev(modelFit), drop = FALSE]
##     }
##     else {
##         warning("kernlab class probability calculations failed; returning NAs")
##         out <- matrix(NA, nrow(newdata) * length(kernlab::lev(modelFit)), 
##             ncol = length(kernlab::lev(modelFit)))
##         colnames(out) <- kernlab::lev(modelFit)
##     }
##     out
## }
## 
## $svmPoly$predictors
## function (x, ...) 
## {
##     if (hasTerms(x) & !is.null(x@terms)) {
##         out <- predictors.terms(x@terms)
##     }
##     else {
##         out <- colnames(attr(x, "xmatrix"))
##     }
##     if (is.null(out)) 
##         out <- names(attr(x, "scaling")$xscale$`scaled:center`)
##     if (is.null(out)) 
##         out <- NA
##     out
## }
## 
## $svmPoly$tags
## [1] "Kernel Method"           "Support Vector Machines"
## [3] "Polynomial Model"        "Robust Methods"         
## 
## $svmPoly$levels
## function (x) 
## kernlab::lev(x)
## 
## $svmPoly$sort
## function (x) 
## x[order(x$degree, x$C, x$scale), ]
## 
## 
## $svmRadial
## $svmRadial$label
## [1] "Support Vector Machines with Radial Basis Function Kernel"
## 
## $svmRadial$library
## [1] "kernlab"
## 
## $svmRadial$type
## [1] "Regression"     "Classification"
## 
## $svmRadial$parameters
##   parameter   class label
## 1     sigma numeric Sigma
## 2         C numeric  Cost
## 
## $svmRadial$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     sigmas <- kernlab::sigest(as.matrix(x), na.action = na.omit, 
##         scaled = TRUE)
##     if (search == "grid") {
##         out <- expand.grid(sigma = mean(as.vector(sigmas[-2])), 
##             C = 2^((1:len) - 3))
##     }
##     else {
##         rng <- extendrange(log(sigmas), f = 0.75)
##         out <- data.frame(sigma = exp(runif(len, min = rng[1], 
##             max = rng[2])), C = 2^runif(len, min = -5, max = 10))
##     }
##     out
## }
## 
## $svmRadial$loop
## NULL
## 
## $svmRadial$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (any(names(list(...)) == "prob.model") | is.numeric(y)) {
##         out <- kernlab::ksvm(x = as.matrix(x), y = y, kernel = "rbfdot", 
##             kpar = list(sigma = param$sigma), C = param$C, ...)
##     }
##     else {
##         out <- kernlab::ksvm(x = as.matrix(x), y = y, kernel = "rbfdot", 
##             kpar = list(sigma = param$sigma), C = param$C, prob.model = classProbs, 
##             ...)
##     }
##     out
## }
## 
## $svmRadial$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     svmPred <- function(obj, x) {
##         hasPM <- !is.null(unlist(obj@prob.model))
##         if (hasPM) {
##             pred <- kernlab::lev(obj)[apply(kernlab::predict(obj, 
##                 x, type = "probabilities"), 1, which.max)]
##         }
##         else pred <- kernlab::predict(obj, x)
##         pred
##     }
##     out <- try(svmPred(modelFit, newdata), silent = TRUE)
##     if (is.character(kernlab::lev(modelFit))) {
##         if (class(out)[1] == "try-error") {
##             warning("kernlab class prediction calculations failed; returning NAs")
##             out <- rep("", nrow(newdata))
##             out[seq(along = out)] <- NA
##         }
##     }
##     else {
##         if (class(out)[1] == "try-error") {
##             warning("kernlab prediction calculations failed; returning NAs")
##             out <- rep(NA, nrow(newdata))
##         }
##     }
##     if (is.matrix(out)) 
##         out <- out[, 1]
##     out
## }
## 
## $svmRadial$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- try(kernlab::predict(modelFit, newdata, type = "probabilities"), 
##         silent = TRUE)
##     if (class(out)[1] != "try-error") {
##         if (any(out < 0)) {
##             out[out < 0] <- 0
##             out <- t(apply(out, 1, function(x) x/sum(x)))
##         }
##         out <- out[, kernlab::lev(modelFit), drop = FALSE]
##     }
##     else {
##         num_lvls <- length(kernlab::lev(modelFit))
##         warning("kernlab class probability calculations failed; returning NAs")
##         out <- matrix(NA, nrow(newdata) * num_lvls, ncol = num_lvls)
##         colnames(out) <- kernlab::lev(modelFit)
##     }
##     out
## }
## 
## $svmRadial$predictors
## function (x, ...) 
## {
##     if (hasTerms(x) & !is.null(x@terms)) {
##         out <- predictors.terms(x@terms)
##     }
##     else {
##         out <- colnames(attr(x, "xmatrix"))
##     }
##     if (is.null(out)) 
##         out <- names(attr(x, "scaling")$x.scale$`scaled:center`)
##     if (is.null(out)) 
##         out <- NA
##     out
## }
## 
## $svmRadial$tags
## [1] "Kernel Method"           "Support Vector Machines"
## [3] "Radial Basis Function"   "Robust Methods"         
## 
## $svmRadial$levels
## function (x) 
## kernlab::lev(x)
## 
## $svmRadial$sort
## function (x) 
## {
##     x[order(x$C, -x$sigma), ]
## }
## 
## 
## $svmRadialCost
## $svmRadialCost$label
## [1] "Support Vector Machines with Radial Basis Function Kernel"
## 
## $svmRadialCost$library
## [1] "kernlab"
## 
## $svmRadialCost$type
## [1] "Regression"     "Classification"
## 
## $svmRadialCost$parameters
##   parameter   class label
## 1         C numeric  Cost
## 
## $svmRadialCost$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(C = 2^((1:len) - 3))
##     }
##     else {
##         out <- data.frame(C = 2^runif(len, min = -5, max = 10))
##     }
##     out
## }
## 
## $svmRadialCost$loop
## NULL
## 
## $svmRadialCost$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (any(names(list(...)) == "prob.model") | is.numeric(y)) {
##         out <- kernlab::ksvm(x = as.matrix(x), y = y, kernel = "rbfdot", 
##             C = param$C, ...)
##     }
##     else {
##         out <- kernlab::ksvm(x = as.matrix(x), y = y, kernel = "rbfdot", 
##             C = param$C, prob.model = classProbs, ...)
##     }
##     out
## }
## 
## $svmRadialCost$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     svmPred <- function(obj, x) {
##         hasPM <- !is.null(unlist(obj@prob.model))
##         if (hasPM) {
##             pred <- kernlab::lev(obj)[apply(kernlab::predict(obj, 
##                 x, type = "probabilities"), 1, which.max)]
##         }
##         else pred <- kernlab::predict(obj, x)
##         pred
##     }
##     out <- try(svmPred(modelFit, newdata), silent = TRUE)
##     if (is.character(kernlab::lev(modelFit))) {
##         if (class(out)[1] == "try-error") {
##             warning("kernlab class prediction calculations failed; returning NAs")
##             out <- rep("", nrow(newdata))
##             out[seq(along = out)] <- NA
##         }
##     }
##     else {
##         if (class(out)[1] == "try-error") {
##             warning("kernlab prediction calculations failed; returning NAs")
##             out <- rep(NA, nrow(newdata))
##         }
##     }
##     if (is.matrix(out)) 
##         out <- out[, 1]
##     out
## }
## 
## $svmRadialCost$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- try(kernlab::predict(modelFit, newdata, type = "probabilities"), 
##         silent = TRUE)
##     if (class(out)[1] != "try-error") {
##         if (any(out < 0)) {
##             out[out < 0] <- 0
##             out <- t(apply(out, 1, function(x) x/sum(x)))
##         }
##         out <- out[, kernlab::lev(modelFit), drop = FALSE]
##     }
##     else {
##         warning("kernlab class probability calculations failed; returning NAs")
##         out <- matrix(NA, nrow(newdata) * length(kernlab::lev(modelFit)), 
##             ncol = length(kernlab::lev(modelFit)))
##         colnames(out) <- kernlab::lev(modelFit)
##     }
##     out
## }
## 
## $svmRadialCost$predictors
## function (x, ...) 
## {
##     if (hasTerms(x) & !is.null(x@terms)) {
##         out <- predictors.terms(x@terms)
##     }
##     else {
##         out <- colnames(attr(x, "xmatrix"))
##     }
##     if (is.null(out)) 
##         out <- names(attr(x, "scaling")$x.scale$`scaled:center`)
##     if (is.null(out)) 
##         out <- NA
##     out
## }
## 
## $svmRadialCost$tags
## [1] "Kernel Method"           "Support Vector Machines"
## [3] "Radial Basis Function"  
## 
## $svmRadialCost$levels
## function (x) 
## kernlab::lev(x)
## 
## $svmRadialCost$sort
## function (x) 
## {
##     x[order(x$C), ]
## }
## 
## 
## $svmRadialSigma
## $svmRadialSigma$label
## [1] "Support Vector Machines with Radial Basis Function Kernel"
## 
## $svmRadialSigma$library
## [1] "kernlab"
## 
## $svmRadialSigma$type
## [1] "Regression"     "Classification"
## 
## $svmRadialSigma$parameters
##   parameter   class label
## 1     sigma numeric Sigma
## 2         C numeric  Cost
## 
## $svmRadialSigma$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     sigmas <- kernlab::sigest(as.matrix(x), na.action = na.omit, 
##         scaled = TRUE)
##     if (search == "grid") {
##         out <- expand.grid(sigma = seq(min(sigmas), max(sigmas), 
##             length = min(6, len)), C = 2^((1:len) - 3))
##     }
##     else {
##         rng <- extendrange(log(sigmas), f = 0.75)
##         out <- data.frame(sigma = exp(runif(len, min = rng[1], 
##             max = rng[2])), C = 2^runif(len, min = -5, max = 10))
##     }
##     out
## }
## 
## $svmRadialSigma$loop
## NULL
## 
## $svmRadialSigma$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (any(names(list(...)) == "prob.model") | is.numeric(y)) {
##         out <- kernlab::ksvm(x = as.matrix(x), y = y, kernel = "rbfdot", 
##             kpar = list(sigma = param$sigma), C = param$C, ...)
##     }
##     else {
##         out <- kernlab::ksvm(x = as.matrix(x), y = y, kernel = "rbfdot", 
##             kpar = list(sigma = param$sigma), C = param$C, prob.model = classProbs, 
##             ...)
##     }
##     out
## }
## 
## $svmRadialSigma$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     svmPred <- function(obj, x) {
##         hasPM <- !is.null(unlist(obj@prob.model))
##         if (hasPM) {
##             pred <- kernlab::lev(obj)[apply(kernlab::predict(obj, 
##                 x, type = "probabilities"), 1, which.max)]
##         }
##         else pred <- kernlab::predict(obj, x)
##         pred
##     }
##     out <- try(svmPred(modelFit, newdata), silent = TRUE)
##     if (is.character(kernlab::lev(modelFit))) {
##         if (class(out)[1] == "try-error") {
##             warning("kernlab class prediction calculations failed; returning NAs")
##             out <- rep("", nrow(newdata))
##             out[seq(along = out)] <- NA
##         }
##     }
##     else {
##         if (class(out)[1] == "try-error") {
##             warning("kernlab prediction calculations failed; returning NAs")
##             out <- rep(NA, nrow(newdata))
##         }
##     }
##     if (is.matrix(out)) 
##         out <- out[, 1]
##     out
## }
## 
## $svmRadialSigma$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- try(kernlab::predict(modelFit, newdata, type = "probabilities"), 
##         silent = TRUE)
##     if (class(out)[1] != "try-error") {
##         if (any(out < 0)) {
##             out[out < 0] <- 0
##             out <- t(apply(out, 1, function(x) x/sum(x)))
##         }
##         out <- out[, kernlab::lev(modelFit), drop = FALSE]
##     }
##     else {
##         warning("kernlab class probability calculations failed; returning NAs")
##         out <- matrix(NA, nrow(newdata) * length(kernlab::lev(modelFit)), 
##             ncol = length(kernlab::lev(modelFit)))
##         colnames(out) <- kernlab::lev(modelFit)
##     }
##     out
## }
## 
## $svmRadialSigma$predictors
## function (x, ...) 
## {
##     if (hasTerms(x) & !is.null(x@terms)) {
##         out <- predictors.terms(x@terms)
##     }
##     else {
##         out <- colnames(attr(x, "xmatrix"))
##     }
##     if (is.null(out)) 
##         out <- names(attr(x, "scaling")$x.scale$`scaled:center`)
##     if (is.null(out)) 
##         out <- NA
##     out
## }
## 
## $svmRadialSigma$notes
## [1] "This SVM model tunes over the cost parameter and the RBF kernel parameter sigma. In the latter case, using `tuneLength` will, at most, evaluate six values of the kernel parameter. This enables a broad search over the cost parameter and a relatively narrow search over `sigma`"
## 
## $svmRadialSigma$tags
## [1] "Kernel Method"           "Support Vector Machines"
## [3] "Radial Basis Function"   "Robust Methods"         
## 
## $svmRadialSigma$levels
## function (x) 
## kernlab::lev(x)
## 
## $svmRadialSigma$sort
## function (x) 
## {
##     x[order(x$C, -x$sigma), ]
## }
## 
## 
## $svmRadialWeights
## $svmRadialWeights$label
## [1] "Support Vector Machines with Class Weights"
## 
## $svmRadialWeights$library
## [1] "kernlab"
## 
## $svmRadialWeights$type
## [1] "Classification"
## 
## $svmRadialWeights$parameters
##   parameter   class  label
## 1     sigma numeric  Sigma
## 2         C numeric   Cost
## 3    Weight numeric Weight
## 
## $svmRadialWeights$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     sigmas <- kernlab::sigest(as.matrix(x), na.action = na.omit, 
##         scaled = TRUE)
##     if (search == "grid") {
##         out <- expand.grid(sigma = mean(as.vector(sigmas[-2])), 
##             C = 2^((1:len) - 3), Weight = 1:len)
##     }
##     else {
##         rng <- extendrange(log(sigmas), f = 0.75)
##         out <- data.frame(sigma = exp(runif(len, min = rng[1], 
##             max = rng[2])), C = 2^runif(len, min = -5, max = 10), 
##             Weight = runif(len, min = 1, max = 25))
##     }
##     out
## }
## 
## $svmRadialWeights$loop
## NULL
## 
## $svmRadialWeights$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (param$Weight != 1) {
##         wts <- c(param$Weight, 1)
##         names(wts) <- levels(y)
##     }
##     else wts <- NULL
##     if (any(names(list(...)) == "prob.model") | is.numeric(y)) {
##         out <- kernlab::ksvm(x = as.matrix(x), y = y, kernel = "rbfdot", 
##             kpar = list(sigma = param$sigma), class.weights = wts, 
##             C = param$C, ...)
##     }
##     else {
##         out <- kernlab::ksvm(x = as.matrix(x), y = y, kernel = "rbfdot", 
##             kpar = list(sigma = param$sigma), class.weights = wts, 
##             C = param$C, prob.model = classProbs, ...)
##     }
##     out
## }
## 
## $svmRadialWeights$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- kernlab::predict(modelFit, newdata)
##     if (is.matrix(out)) 
##         out <- out[, 1]
##     out
## }
## 
## $svmRadialWeights$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- try(kernlab::predict(modelFit, newdata, type = "probabilities"), 
##         silent = TRUE)
##     if (class(out)[1] != "try-error") {
##         if (any(out < 0)) {
##             out[out < 0] <- 0
##             out <- t(apply(out, 1, function(x) x/sum(x)))
##         }
##         out <- out[, kernlab::lev(modelFit), drop = FALSE]
##     }
##     else {
##         warning("kernlab class probability calculations failed; returning NAs")
##         out <- matrix(NA, nrow(newdata) * length(kernlab::lev(modelFit)), 
##             ncol = length(kernlab::lev(modelFit)))
##         colnames(out) <- kernlab::lev(modelFit)
##     }
##     out
## }
## 
## $svmRadialWeights$predictors
## function (x, ...) 
## {
##     if (hasTerms(x) & !is.null(x@terms)) {
##         out <- predictors.terms(x@terms)
##     }
##     else {
##         out <- colnames(attr(x, "xmatrix"))
##     }
##     if (is.null(out)) 
##         out <- names(attr(x, "scaling")$x.scale$`scaled:center`)
##     if (is.null(out)) 
##         out <- NA
##     out
## }
## 
## $svmRadialWeights$tags
## [1] "Kernel Method"           "Support Vector Machines"
## [3] "Radial Basis Function"   "Cost Sensitive Learning"
## [5] "Two Class Only"         
## 
## $svmRadialWeights$levels
## function (x) 
## kernlab::lev(x)
## 
## $svmRadialWeights$sort
## function (x) 
## x[order(x$C, -x$sigma, x$Weight), ]
## 
## 
## $svmSpectrumString
## $svmSpectrumString$label
## [1] "Support Vector Machines with Spectrum String Kernel"
## 
## $svmSpectrumString$library
## [1] "kernlab"
## 
## $svmSpectrumString$type
## [1] "Regression"     "Classification"
## 
## $svmSpectrumString$parameters
##   parameter   class  label
## 1    length numeric length
## 2         C numeric   Cost
## 
## $svmSpectrumString$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(length = 2:(len + 1), C = 2^((1:len) - 
##             3))
##     }
##     else {
##         out <- data.frame(length = sample(1:20, size = len, replace = TRUE), 
##             C = 2^runif(len, min = -5, max = 10))
##     }
##     out
## }
## 
## $svmSpectrumString$loop
## NULL
## 
## $svmSpectrumString$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (any(names(list(...)) == "prob.model") | is.numeric(y)) {
##         out <- kernlab::ksvm(x = x[, 1], y = y, kernel = "stringdot", 
##             kpar = list(type = "spectrum", length = param$length), 
##             C = param$C, ...)
##     }
##     else {
##         out <- kernlab::ksvm(x = x[, 1], y = y, kernel = "stringdot", 
##             kpar = list(type = "spectrum", length = param$length), 
##             C = param$C, prob.model = classProbs, ...)
##     }
##     out
## }
## 
## $svmSpectrumString$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     svmPred <- function(obj, x) {
##         hasPM <- !is.null(unlist(obj@prob.model))
##         if (hasPM) {
##             pred <- kernlab::lev(obj)[apply(kernlab::predict(obj, 
##                 x, type = "probabilities"), 1, which.max)]
##         }
##         else pred <- kernlab::predict(obj, x)
##         pred
##     }
##     out <- try(svmPred(modelFit, newdata[, 1]), silent = TRUE)
##     if (is.character(kernlab::lev(modelFit))) {
##         if (class(out)[1] == "try-error") {
##             warning("kernlab class prediction calculations failed; returning NAs")
##             out <- rep("", nrow(newdata))
##             out[seq(along = out)] <- NA
##         }
##     }
##     else {
##         if (class(out)[1] == "try-error") {
##             warning("kernlab prediction calculations failed; returning NAs")
##             out <- rep(NA, nrow(newdata))
##         }
##     }
##     if (is.matrix(out)) 
##         out <- out[, 1]
##     out
## }
## 
## $svmSpectrumString$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- try(kernlab::predict(modelFit, newdata[, 1], type = "probabilities"), 
##         silent = TRUE)
##     if (class(out)[1] != "try-error") {
##         if (any(out < 0)) {
##             out[out < 0] <- 0
##             out <- t(apply(out, 1, function(x) x/sum(x)))
##         }
##         out <- out[, kernlab::lev(modelFit), drop = FALSE]
##     }
##     else {
##         warning("kernlab class probability calculations failed; returning NAs")
##         out <- matrix(NA, nrow(newdata) * length(kernlab::lev(modelFit)), 
##             ncol = length(kernlab::lev(modelFit)))
##         colnames(out) <- kernlab::lev(modelFit)
##     }
##     out
## }
## 
## $svmSpectrumString$predictors
## function (x, ...) 
## {
##     NA
## }
## 
## $svmSpectrumString$tags
## [1] "Kernel Method"           "Support Vector Machines"
## [3] "String Kernel"           "Robust Methods"         
## [5] "Text Mining"            
## 
## $svmSpectrumString$levels
## function (x) 
## kernlab::lev(x)
## 
## $svmSpectrumString$sort
## function (x) 
## {
##     x[order(x$C, -x$length), ]
## }
## 
## 
## $tan
## $tan$label
## [1] "Tree Augmented Naive Bayes Classifier"
## 
## $tan$library
## [1] "bnclassify"
## 
## $tan$type
## [1] "Classification"
## 
## $tan$parameters
##   parameter     class               label
## 1     score character      Score Function
## 2    smooth   numeric Smoothing Parameter
## 
## $tan$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(score = c("loglik", "bic", "aic"), 
##             smooth = 0:(len - 1))
##     }
##     else {
##         out <- data.frame(smooth = runif(len, min = 0, max = 10), 
##             score = sample(c("loglik", "bic", "aic"), size = len, 
##                 replace = TRUE))
##     }
##     out
## }
## 
## $tan$loop
## NULL
## 
## $tan$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     bnclassify::bnc("tan_cl", class = ".outcome", dataset = dat, 
##         smooth = param$smooth, dag_args = list(score = as.character(param$score)), 
##         ...)
## }
## 
## $tan$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $tan$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata, prob = TRUE)
## }
## 
## $tan$levels
## function (x) 
## x$obsLevels
## 
## $tan$predictors
## function (x, s = NULL, ...) 
## x$xNames
## 
## $tan$tags
## [1] "Bayesian Model"              "Categorical Predictors Only"
## 
## $tan$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $tanSearch
## $tanSearch$label
## [1] "Tree Augmented Naive Bayes Classifier Structure Learner Wrapper"
## 
## $tanSearch$library
## [1] "bnclassify"
## 
## $tanSearch$type
## [1] "Classification"
## 
## $tanSearch$parameters
##      parameter   class                        label
## 1            k numeric                       #Folds
## 2      epsilon numeric Minimum Absolute Improvement
## 3       smooth numeric          Smoothing Parameter
## 4 final_smooth numeric    Final Smoothing Parameter
## 5           sp logical                 Super-Parent
## 
## $tanSearch$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(k = 10, epsilon = 0.01, smooth = 0.01, 
##             final_smooth = 1, sp = c(TRUE, FALSE))
##     }
##     else {
##         out <- data.frame(k = sample(3:10, size = len, replace = TRUE), 
##             epsilon = runif(len, min = 0, max = 0.05), smooth = runif(len, 
##                 min = 0, max = 10), final_smooth = runif(len, 
##                 min = 0, max = 10), sp = sample(c(TRUE, FALSE), 
##                 size = len, replace = TRUE))
##     }
##     out
## }
## 
## $tanSearch$loop
## NULL
## 
## $tanSearch$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     if (param$sp) {
##         struct <- bnclassify::tan_hcsp(class = ".outcome", dataset = dat, 
##             k = param$k, epsilon = param$epsilon, smooth = param$smooth, 
##             ...)
##     }
##     else {
##         struct <- bnclassify::tan_hc(class = ".outcome", dataset = dat, 
##             k = param$k, epsilon = param$epsilon, smooth = param$smooth, 
##             ...)
##     }
##     bnclassify::lp(struct, dat, smooth = param$final_smooth, 
##         ...)
## }
## 
## $tanSearch$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)
## }
## 
## $tanSearch$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata, prob = TRUE)
## }
## 
## $tanSearch$levels
## function (x) 
## x$obsLevels
## 
## $tanSearch$predictors
## function (x, s = NULL, ...) 
## x$xNames
## 
## $tanSearch$tags
## [1] "Bayesian Model"              "Categorical Predictors Only"
## 
## $tanSearch$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $treebag
## $treebag$label
## [1] "Bagged CART"
## 
## $treebag$library
## [1] "ipred" "plyr"  "e1071"
## 
## $treebag$loop
## NULL
## 
## $treebag$type
## [1] "Regression"     "Classification"
## 
## $treebag$parameters
##   parameter     class     label
## 1 parameter character parameter
## 
## $treebag$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(parameter = "none")
## 
## $treebag$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     if (!any(names(theDots) == "keepX")) 
##         theDots$keepX <- FALSE
##     modelArgs <- c(list(X = x, y = y), theDots)
##     if (!is.null(wts)) 
##         modelArgs$weights <- wts
##     do.call(ipred::ipredbagg, modelArgs)
## }
## 
## $treebag$predict
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata)
## 
## $treebag$prob
## function (modelFit, newdata, submodels = NULL) 
## predict(modelFit, newdata, type = "prob")
## 
## $treebag$predictors
## function (x, surrogate = TRUE, ...) 
## {
##     code <- getModelInfo("rpart", regex = FALSE)[[1]]$predictors
##     eachTree <- lapply(x$mtree, function(u, surr) code(u$btree, 
##         surrogate = surr), surr = surrogate)
##     unique(unlist(eachTree))
## }
## 
## $treebag$varImp
## function (object, ...) 
## {
##     allImp <- lapply(object$mtrees, function(x) varImp(x$btree), 
##         ...)
##     allImp <- lapply(allImp, function(x) {
##         x$variable <- rownames(x)
##         x
##     })
##     allImp <- do.call("rbind", allImp)
##     meanImp <- plyr::ddply(allImp, plyr::.(variable), function(x) c(Overall = mean(x$Overall)))
##     out <- data.frame(Overall = meanImp$Overall)
##     rownames(out) <- meanImp$variable
##     out
## }
## 
## $treebag$trim
## function (x) 
## {
##     trim_rpart <- function(x) {
##         x$call <- list(na.action = (x$call)$na.action)
##         x$x <- NULL
##         x$y <- NULL
##         x$where <- NULL
##         x
##     }
##     x$mtrees <- lapply(x$mtrees, function(x) {
##         x$bindx <- NULL
##         x$btree <- trim_rpart(x$btree)
##         x
##     })
##     x
## }
## 
## $treebag$tags
## [1] "Tree-Based Model"     "Ensemble Model"       "Bagging"             
## [4] "Accepts Case Weights"
## 
## $treebag$levels
## function (x) 
## levels(x$y)
## 
## $treebag$sort
## function (x) 
## x
## 
## $treebag$oob
## function (x) 
## {
##     if (is.null(x$X)) 
##         stop("to get OOB stats, keepX must be TRUE when calling the bagging function")
##     foo <- function(object, y, x) {
##         holdY <- y[-object$bindx]
##         tmp_x <- x[-object$bindx, , drop = FALSE]
##         if (!is.data.frame(tmp_x)) 
##             tmp_x <- as.data.frame(tmp_x, stringsAsFactors = TRUE)
##         if (is.factor(y)) {
##             tmp <- predict(object$btree, tmp_x, type = "class")
##             tmp <- factor(as.character(tmp), levels = levels(y))
##             out <- c(mean(holdY == tmp), e1071::classAgreement(table(holdY, 
##                 tmp))$kappa)
##         }
##         else {
##             tmp <- predict(object$btree, tmp_x)
##             out <- c(sqrt(mean((tmp - holdY)^2, na.rm = TRUE)), 
##                 cor(holdY, tmp, use = "pairwise.complete.obs")^2)
##         }
##         out
##     }
##     eachStat <- lapply(x$mtrees, foo, y = x$y, x = x$X)
##     eachStat <- matrix(unlist(eachStat), nrow = length(eachStat[[1]]))
##     out <- c(apply(eachStat, 1, mean, na.rm = TRUE), apply(eachStat, 
##         1, sd, na.rm = TRUE))
##     names(out) <- if (is.factor(x$y)) 
##         c("Accuracy", "Kappa", "AccuracySD", "KappaSD")
##     else c("RMSE", "Rsquared", "RMSESD", "RsquaredSD")
##     out
## }
## 
## 
## $vbmpRadial
## $vbmpRadial$label
## [1] "Variational Bayesian Multinomial Probit Regression"
## 
## $vbmpRadial$library
## [1] "vbmp"
## 
## $vbmpRadial$loop
## NULL
## 
## $vbmpRadial$type
## [1] "Classification"
## 
## $vbmpRadial$parameters
##       parameter     class           label
## 1 estimateTheta character Theta Estimated
## 
## $vbmpRadial$grid
## function (x, y, len = NULL, search = "grid") 
## data.frame(estimateTheta = "yes")
## 
## $vbmpRadial$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$bThetaEstimate <- ifelse(param$estimateTheta == 
##             "Yes", TRUE, FALSE)
##         ctl <- theDots$control
##         theDots$control <- NULL
##     }
##     else ctl <- list(bThetaEstimate = ifelse(param$estimateTheta == 
##         "Yes", TRUE, FALSE))
##     if (any(names(theDots) == "theta")) {
##         theta <- theDots$theta
##         theDots$theta <- NULL
##     }
##     else theta <- runif(ncol(x))
##     vbmp::vbmp(x, as.numeric(y), theta = theta, control = ctl, 
##         X.TEST = x[1, ], t.class.TEST = as.numeric(y)[1])
## }
## 
## $vbmpRadial$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     probs <- vbmp::predictCPP(modelFit, newdata)
##     modelFit$obsLevels[apply(probs, 1, which.max)]
## }
## 
## $vbmpRadial$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     probs <- vbmp::predictCPP(modelFit, newdata)
##     colnames(probs) <- modelFit$obsLevels
##     probs
## }
## 
## $vbmpRadial$levels
## function (x) 
## x$obsLevels
## 
## $vbmpRadial$tags
## [1] "Gaussian Process"      "Bayesian Model"        "Radial Basis Function"
## 
## $vbmpRadial$sort
## function (x) 
## x
## 
## 
## $vglmAdjCat
## $vglmAdjCat$label
## [1] "Adjacent Categories Probability Model for Ordinal Data"
## 
## $vglmAdjCat$library
## [1] "VGAM"
## 
## $vglmAdjCat$loop
## NULL
## 
## $vglmAdjCat$type
## [1] "Classification"
## 
## $vglmAdjCat$parameters
##   parameter     class           label
## 1  parallel   logical Parallel Curves
## 2      link character   Link Function
## 
## $vglmAdjCat$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     links <- c("loge")
##     if (search == "grid") {
##         out <- expand.grid(parallel = c(TRUE, FALSE), link = links)
##     }
##     else {
##         out <- data.frame(parallel = sample(c(TRUE, FALSE), size = len, 
##             replace = TRUE), link = sample(links, size = len, 
##             replace = TRUE))
##     }
## }
## 
## $vglmAdjCat$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     if (any(names(theDots) == "family")) {
##         stop(paste("The `family` argument cannot be pass from `train` to `vglm`.", 
##             "If you need to change the values of `reverse`, multiple.responses`", 
##             "or `whitespace` you will have to use a custom model (see", 
##             "http://topepo.github.io/caret/custom_models.html for details)."))
##     }
##     fam <- do.call(VGAM::cumulative, list(link = as.character(param$link), 
##         parallel = param$parallel))
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     if (!is.null(wts)) {
##         run_this <- eval(substitute(expression({
##             paste("VGAM::vglm(.outcome ~ ., ", "VGAM::acat(link = '", 
##                 .lnk, "', ", "parallel = ", .par, "), ", "data = dat)", 
##                 sep = "")
##         }), list(.par = param$parallel, .lnk = as.character(param$link))))
##         run_this <- eval(run_this)
##         out <- eval(parse(text = run_this))
##     }
##     else {
##         run_this <- eval(substitute(expression({
##             paste("VGAM::vglm(.outcome ~ ., ", "VGAM::acat(link = '", 
##                 .lnk, "', ", "parallel = ", .par, "), weights = wts,", 
##                 "data = dat)", sep = "")
##         }), list(.par = param$parallel, .lnk = as.character(param$link))))
##         run_this <- eval(run_this)
##         out <- eval(parse(text = run_this))
##     }
##     out
## }
## 
## $vglmAdjCat$predict
## function (modelFit, newdata, preProc = NULL, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- VGAM::predictvglm(modelFit, newdata = newdata, type = "response")
##     ordered(modelFit@misc$ynames[apply(out, 1, which.max)], levels = modelFit@misc$ynames)
## }
## 
## $vglmAdjCat$prob
## function (modelFit, newdata, preProc = NULL, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- VGAM::predictvglm(modelFit, newdata = newdata, type = "response")
##     out <- as.data.frame(out, stringsAsFactors = TRUE)
##     names(out) <- modelFit@misc$ynames
##     out
## }
## 
## $vglmAdjCat$varImp
## NULL
## 
## $vglmAdjCat$predictors
## function (x, ...) 
## caret:::predictors.terms(x@terms$terms)
## 
## $vglmAdjCat$levels
## function (x) 
## if (any(names(x) == "obsLevels")) x$obsLevels else NULL
## 
## $vglmAdjCat$tags
## [1] "Logistic Regression"  "Linear Classifier"    "Accepts Case Weights"
## [4] "Ordinal Outcomes"    
## 
## $vglmAdjCat$sort
## function (x) 
## x
## 
## 
## $vglmContRatio
## $vglmContRatio$label
## [1] "Continuation Ratio Model for Ordinal Data"
## 
## $vglmContRatio$library
## [1] "VGAM"
## 
## $vglmContRatio$loop
## NULL
## 
## $vglmContRatio$type
## [1] "Classification"
## 
## $vglmContRatio$parameters
##   parameter     class           label
## 1  parallel   logical Parallel Curves
## 2      link character   Link Function
## 
## $vglmContRatio$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     links <- c("logit", "probit", "cloglog", "cauchit", "logc")
##     if (search == "grid") {
##         out <- expand.grid(parallel = c(TRUE, FALSE), link = links)
##     }
##     else {
##         out <- data.frame(parallel = sample(c(TRUE, FALSE), size = len, 
##             replace = TRUE), link = sample(links, size = len, 
##             replace = TRUE))
##     }
## }
## 
## $vglmContRatio$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     if (any(names(theDots) == "family")) {
##         stop(paste("The `family` argument cannot be pass from `train` to `vglm`.", 
##             "If you need to change the values of `reverse`", 
##             "or `whitespace` you will have to use a custom model (see", 
##             "http://topepo.github.io/caret/custom_models.html for details)."))
##     }
##     fam <- do.call(VGAM::cumulative, list(link = as.character(param$link), 
##         parallel = param$parallel))
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     if (!is.null(wts)) {
##         run_this <- eval(substitute(expression({
##             paste("VGAM::vglm(.outcome ~ ., ", "VGAM::cratio(link = '", 
##                 .lnk, "', ", "parallel = ", .par, "), ", "data = dat)", 
##                 sep = "")
##         }), list(.par = param$parallel, .lnk = as.character(param$link))))
##         run_this <- eval(run_this)
##         out <- eval(parse(text = run_this))
##     }
##     else {
##         run_this <- eval(substitute(expression({
##             paste("VGAM::vglm(.outcome ~ ., ", "VGAM::cratio(link = '", 
##                 .lnk, "', ", "parallel = ", .par, "), weights = wts,", 
##                 "data = dat)", sep = "")
##         }), list(.par = param$parallel, .lnk = as.character(param$link))))
##         run_this <- eval(run_this)
##         out <- eval(parse(text = run_this))
##     }
##     out
## }
## 
## $vglmContRatio$predict
## function (modelFit, newdata, preProc = NULL, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- VGAM::predictvglm(modelFit, newdata = newdata, type = "response")
##     ordered(modelFit@misc$ynames[apply(out, 1, which.max)], levels = modelFit@misc$ynames)
## }
## 
## $vglmContRatio$prob
## function (modelFit, newdata, preProc = NULL, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- VGAM::predictvglm(modelFit, newdata = newdata, type = "response")
##     out <- as.data.frame(out, stringsAsFactors = TRUE)
##     names(out) <- modelFit@misc$ynames
##     out
## }
## 
## $vglmContRatio$varImp
## NULL
## 
## $vglmContRatio$predictors
## function (x, ...) 
## caret:::predictors.terms(x@terms$terms)
## 
## $vglmContRatio$levels
## function (x) 
## if (any(names(x) == "obsLevels")) x$obsLevels else NULL
## 
## $vglmContRatio$tags
## [1] "Logistic Regression"  "Linear Classifier"    "Accepts Case Weights"
## [4] "Ordinal Outcomes"    
## 
## $vglmContRatio$sort
## function (x) 
## x
## 
## 
## $vglmCumulative
## $vglmCumulative$label
## [1] "Cumulative Probability Model for Ordinal Data"
## 
## $vglmCumulative$library
## [1] "VGAM"
## 
## $vglmCumulative$loop
## NULL
## 
## $vglmCumulative$type
## [1] "Classification"
## 
## $vglmCumulative$parameters
##   parameter     class           label
## 1  parallel   logical Parallel Curves
## 2      link character   Link Function
## 
## $vglmCumulative$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     links <- c("logit", "probit", "cloglog", "cauchit", "logc")
##     if (search == "grid") {
##         out <- expand.grid(parallel = c(TRUE, FALSE), link = links)
##     }
##     else {
##         out <- data.frame(parallel = sample(c(TRUE, FALSE), size = len, 
##             replace = TRUE), link = sample(links, size = len, 
##             replace = TRUE))
##     }
## }
## 
## $vglmCumulative$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     theDots <- list(...)
##     if (any(names(theDots) == "family")) {
##         stop(paste("The `family` argument cannot be pass from `train` to `vglm`.", 
##             "If you need to change the values of `reverse`, multiple.responses`", 
##             "or `whitespace` you will have to use a custom model (see", 
##             "http://topepo.github.io/caret/custom_models.html for details)."))
##     }
##     fam <- do.call(VGAM::cumulative, list(link = as.character(param$link), 
##         parallel = param$parallel))
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     if (!is.null(wts)) {
##         run_this <- eval(substitute(expression({
##             paste("VGAM::vglm(.outcome ~ ., ", "VGAM::cumulative(link = '", 
##                 .lnk, "', ", "parallel = ", .par, "), ", "data = dat)", 
##                 sep = "")
##         }), list(.par = param$parallel, .lnk = as.character(param$link))))
##         run_this <- eval(run_this)
##         out <- eval(parse(text = run_this))
##     }
##     else {
##         run_this <- eval(substitute(expression({
##             paste("VGAM::vglm(.outcome ~ ., ", "VGAM::cumulative(link = '", 
##                 .lnk, "', ", "parallel = ", .par, "), weights = wts,", 
##                 "data = dat)", sep = "")
##         }), list(.par = param$parallel, .lnk = as.character(param$link))))
##         run_this <- eval(run_this)
##         out <- eval(parse(text = run_this))
##     }
##     out
## }
## 
## $vglmCumulative$predict
## function (modelFit, newdata, preProc = NULL, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- VGAM::predictvglm(modelFit, newdata = newdata, type = "response")
##     ordered(modelFit@misc$ynames[apply(out, 1, which.max)], levels = modelFit@misc$ynames)
## }
## 
## $vglmCumulative$prob
## function (modelFit, newdata, preProc = NULL, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     out <- VGAM::predictvglm(modelFit, newdata = newdata, type = "response")
##     out <- as.data.frame(out, stringsAsFactors = TRUE)
##     names(out) <- modelFit@misc$ynames
##     out
## }
## 
## $vglmCumulative$varImp
## NULL
## 
## $vglmCumulative$predictors
## function (x, ...) 
## caret:::predictors.terms(x@terms$terms)
## 
## $vglmCumulative$levels
## function (x) 
## if (any(names(x) == "obsLevels")) x$obsLevels else NULL
## 
## $vglmCumulative$tags
## [1] "Logistic Regression"  "Linear Classifier"    "Accepts Case Weights"
## [4] "Ordinal Outcomes"    
## 
## $vglmCumulative$sort
## function (x) 
## x
## 
## 
## $widekernelpls
## $widekernelpls$label
## [1] "Partial Least Squares"
## 
## $widekernelpls$library
## [1] "pls"
## 
## $widekernelpls$type
## [1] "Regression"     "Classification"
## 
## $widekernelpls$parameters
##   parameter   class       label
## 1     ncomp numeric #Components
## 
## $widekernelpls$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(ncomp = seq(1, min(ncol(x) - 1, len), 
##             by = 1))
##     }
##     else {
##         out <- data.frame(ncomp = unique(sample(1:(ncol(x) - 
##             1), size = len, replace = TRUE)))
##     }
##     out
## }
## 
## $widekernelpls$loop
## function (grid) 
## {
##     grid <- grid[order(grid$ncomp, decreasing = TRUE), , drop = FALSE]
##     loop <- grid[1, , drop = FALSE]
##     submodels <- list(grid[-1, , drop = FALSE])
##     list(loop = loop, submodels = submodels)
## }
## 
## $widekernelpls$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     ncomp <- min(ncol(x), param$ncomp)
##     out <- if (is.factor(y)) {
##         caret::plsda(x, y, method = "widekernelpls", ncomp = ncomp, 
##             ...)
##     }
##     else {
##         dat <- if (is.data.frame(x)) 
##             x
##         else as.data.frame(x, stringsAsFactors = TRUE)
##         dat$.outcome <- y
##         pls::plsr(.outcome ~ ., data = dat, method = "widekernelpls", 
##             ncomp = ncomp, ...)
##     }
##     out
## }
## 
## $widekernelpls$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- if (modelFit$problemType == "Classification") {
##         if (!is.matrix(newdata)) 
##             newdata <- as.matrix(newdata)
##         out <- predict(modelFit, newdata, type = "class")
##     }
##     else as.vector(pls:::predict.mvr(modelFit, newdata, ncomp = max(modelFit$ncomp)))
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels))
##         if (modelFit$problemType == "Classification") {
##             if (length(submodels$ncomp) > 1) {
##                 tmp <- as.list(predict(modelFit, newdata, ncomp = submodels$ncomp))
##             }
##             else tmp <- list(predict(modelFit, newdata, ncomp = submodels$ncomp))
##         }
##         else {
##             tmp <- as.list(as.data.frame(apply(predict(modelFit, 
##                 newdata, ncomp = submodels$ncomp), 3, function(x) list(x))))
##         }
##         out <- c(list(out), tmp)
##     }
##     out
## }
## 
## $widekernelpls$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.matrix(newdata)) 
##         newdata <- as.matrix(newdata)
##     out <- predict(modelFit, newdata, type = "prob", ncomp = modelFit$tuneValue$ncomp)
##     if (length(dim(out)) == 3) {
##         if (dim(out)[1] > 1) {
##             out <- out[, , 1]
##         }
##         else {
##             out <- as.data.frame(t(out[, , 1]), stringsAsFactors = TRUE)
##         }
##     }
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$ncomp)) {
##             tmpProb <- predict(modelFit, newdata, type = "prob", 
##                 ncomp = submodels$ncomp[j])
##             if (length(dim(tmpProb)) == 3) {
##                 if (dim(tmpProb)[1] > 1) {
##                   tmpProb <- tmpProb[, , 1]
##                 }
##                 else {
##                   tmpProb <- as.data.frame(t(tmpProb[, , 1]), 
##                     stringsAsFactors = TRUE)
##                 }
##             }
##             tmp[[j + 1]] <- as.data.frame(tmpProb[, modelFit$obsLevels, 
##                 drop = FALSE], stringsAsFactors = TRUE)
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $widekernelpls$predictors
## function (x, ...) 
## rownames(x$projection)
## 
## $widekernelpls$varImp
## function (object, estimate = NULL, ...) 
## {
##     library(pls)
##     modelCoef <- coef(object, intercept = FALSE, comps = 1:object$ncomp)
##     perf <- pls:::MSEP.mvr(object)$val
##     nms <- dimnames(perf)
##     if (length(nms$estimate) > 1) {
##         pIndex <- if (is.null(estimate)) 
##             1
##         else which(nms$estimate == estimate)
##         perf <- perf[pIndex, , , drop = FALSE]
##     }
##     numResp <- dim(modelCoef)[2]
##     if (numResp <= 2) {
##         modelCoef <- modelCoef[, 1, , drop = FALSE]
##         perf <- perf[, 1, ]
##         delta <- -diff(perf)
##         delta <- delta/sum(delta)
##         out <- data.frame(Overall = apply(abs(modelCoef), 1, 
##             weighted.mean, w = delta))
##     }
##     else {
##         perf <- -t(apply(perf[1, , ], 1, diff))
##         perf <- t(apply(perf, 1, function(u) u/sum(u)))
##         out <- matrix(NA, ncol = numResp, nrow = dim(modelCoef)[1])
##         for (i in 1:numResp) {
##             tmp <- abs(modelCoef[, i, , drop = FALSE])
##             out[, i] <- apply(tmp, 1, weighted.mean, w = perf[i, 
##                 ])
##         }
##         colnames(out) <- dimnames(modelCoef)[[2]]
##         rownames(out) <- dimnames(modelCoef)[[1]]
##     }
##     as.data.frame(out, stringsAsFactors = TRUE)
## }
## 
## $widekernelpls$levels
## function (x) 
## x$obsLevels
## 
## $widekernelpls$tags
## [1] "Partial Least Squares" "Feature Extraction"    "Linear Classifier"    
## [4] "Linear Regression"    
## 
## $widekernelpls$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $WM
## $WM$label
## [1] "Wang and Mendel Fuzzy Rules"
## 
## $WM$library
## [1] "frbs"
## 
## $WM$type
## [1] "Regression"
## 
## $WM$parameters
##    parameter     class               label
## 1 num.labels   numeric        #Fuzzy Terms
## 2    type.mf character Membership Function
## 
## $WM$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(num.labels = 1 + (1:len) * 2, type.mf = c("GAUSSIAN", 
##             "TRAPEZOID", "TRIANGLE"))
##     }
##     else {
##         out <- data.frame(num.labels = sample(2:20, size = len, 
##             replace = TRUE), type.mf = sample(c("GAUSSIAN", "TRAPEZOID", 
##             "TRIANGLE"), size = len, replace = TRUE))
##     }
##     out
## }
## 
## $WM$loop
## NULL
## 
## $WM$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     args <- list(data.train = as.matrix(cbind(x, y)))
##     theDots <- list(...)
##     if (any(names(theDots) == "control")) {
##         theDots$control$num.labels <- param$num.labels
##         theDots$control$type.mf <- param$type.mf
##     }
##     else theDots$control <- list(num.labels = param$num.labels, 
##         type.mf = param$type.mf, type.defuz = "WAM", type.tnorm = "MIN", 
##         type.snorm = "MAX", type.implication.func = "ZADEH", 
##         name = "sim-0")
##     if (!(any(names(theDots) == "range.data"))) {
##         args$range.data <- apply(args$data.train, 2, extendrange)
##     }
##     do.call(frbs::frbs.learn, c(args, theDots))
## }
## 
## $WM$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     predict(modelFit, newdata)[, 1]
## }
## 
## $WM$prob
## NULL
## 
## $WM$predictors
## function (x, ...) 
## {
##     x$colnames.var[x$colnames.var %in% as.vector(x$rule)]
## }
## 
## $WM$tags
## [1] "Rule-Based Model"
## 
## $WM$levels
## NULL
## 
## $WM$sort
## function (x) 
## x[order(x$num.labels), ]
## 
## 
## $wsrf
## $wsrf$label
## [1] "Weighted Subspace Random Forest"
## 
## $wsrf$library
## [1] "wsrf"
## 
## $wsrf$loop
## NULL
## 
## $wsrf$type
## [1] "Classification"
## 
## $wsrf$parameters
##   parameter   class                         label
## 1      mtry numeric #Randomly Selected Predictors
## 
## $wsrf$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- data.frame(mtry = caret::var_seq(p = ncol(x), 
##             classification = is.factor(y), len = len))
##     }
##     else {
##         out <- data.frame(mtry = unique(sample(1:ncol(x), size = len, 
##             replace = TRUE)))
##     }
##     out
## }
## 
## $wsrf$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     dat <- if (is.data.frame(x)) 
##         x
##     else as.data.frame(x, stringsAsFactors = TRUE)
##     dat$.outcome <- y
##     wsrf::wsrf(.outcome ~ ., data = dat, mtry = min(param$mtry, 
##         ncol(x)), ...)
## }
## 
## $wsrf$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata)$class
## }
## 
## $wsrf$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!is.data.frame(newdata)) 
##         newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
##     predict(modelFit, newdata, type = "prob")$prob
## }
## 
## $wsrf$predictors
## function (x, ...) 
## x$xNames
## 
## $wsrf$varImp
## NULL
## 
## $wsrf$levels
## function (x) 
## x$obsLevels
## 
## $wsrf$tags
## [1] "Random Forest"              "Ensemble Model"            
## [3] "Bagging"                    "Implicit Feature Selection"
## 
## $wsrf$sort
## function (x) 
## x[order(x[, 1]), ]
## 
## 
## $xgbDART
## $xgbDART$label
## [1] "eXtreme Gradient Boosting"
## 
## $xgbDART$library
## [1] "xgboost" "plyr"   
## 
## $xgbDART$check
## function (pkg) 
## {
##     requireNamespace("xgboost")
##     current <- packageDescription("xgboost")$Version
##     expected <- "0.6.4"
##     if (compareVersion(current, expected) < 0) 
##         stop("This modeling workflow requires xgboost version ", 
##             expected, " or greater. Consider using the drat repo.", 
##             call. = FALSE)
## }
## 
## $xgbDART$type
## [1] "Regression"     "Classification"
## 
## $xgbDART$parameters
##          parameter   class                          label
## 1          nrounds numeric          # Boosting Iterations
## 2        max_depth numeric                 Max Tree Depth
## 3              eta numeric                      Shrinkage
## 4            gamma numeric         Minimum Loss Reduction
## 5        subsample numeric           Subsample Percentage
## 6 colsample_bytree numeric     Subsample Ratio of Columns
## 7        rate_drop numeric      Fraction of Trees Dropped
## 8        skip_drop numeric     Prob. of Skipping Drop-out
## 9 min_child_weight numeric Minimum Sum of Instance Weight
## 
## $xgbDART$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(nrounds = floor((1:len) * 50), max_depth = seq(1, 
##             len), eta = c(0.3, 0.4), gamma = 0, subsample = seq(0.5, 
##             1, length = len), colsample_bytree = c(0.6, 0.8), 
##             rate_drop = c(0.01, 0.5), skip_drop = c(0.05, 0.95), 
##             min_child_weight = c(1))
##     }
##     else {
##         out <- data.frame(nrounds = sample(1:1000, size = len, 
##             replace = TRUE), max_depth = sample(1:10, replace = TRUE, 
##             size = len), eta = runif(len, min = 0.001, max = 0.6), 
##             gamma = runif(len, min = 0, max = 10), subsample = runif(len, 
##                 min = 0.25, max = 1), colsample_bytree = runif(len, 
##                 min = 0.3, max = 0.7), rate_drop = runif(len, 
##                 min = 0.01, max = 0.5), skip_drop = runif(len, 
##                 min = 0.05, max = 0.95), min_child_weight = sample(0:20, 
##                 size = len, replace = TRUE))
##         out$nrounds <- floor(out$nrounds)
##     }
##     out
## }
## 
## $xgbDART$loop
## function (grid) 
## {
##     loop <- plyr::ddply(grid, c("max_depth", "eta", "rate_drop", 
##         "skip_drop", "min_child_weight", "subsample", "colsample_bytree", 
##         "gamma"), function(x) c(nrounds = max(x$nrounds)))
##     submodels <- vector(mode = "list", length = nrow(loop))
##     for (i in seq(along = loop$nrounds)) {
##         index <- which(grid$max_depth == loop$max_depth[i] & 
##             grid$eta == loop$eta[i] & grid$gamma == loop$gamma[i] & 
##             grid$subsample == loop$subsample[i] & grid$colsample_bytree == 
##             loop$colsample_bytree[i] & grid$rate_drop == loop$rate_drop[i] & 
##             grid$skip_drop == loop$skip_drop[i] & grid$min_child_weight == 
##             loop$min_child_weight[i])
##         trees <- grid[index, "nrounds"]
##         submodels[[i]] <- data.frame(nrounds = trees[trees != 
##             loop$nrounds[i]])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $xgbDART$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!inherits(x, "xgb.DMatrix")) 
##         x <- as.matrix(x)
##     if (is.factor(y)) {
##         if (length(lev) == 2) {
##             y <- ifelse(y == lev[1], 1, 0)
##             if (!inherits(x, "xgb.DMatrix")) 
##                 x <- xgboost::xgb.DMatrix(x, label = y, missing = NA)
##             else xgboost::setinfo(x, "label", y)
##             if (!is.null(wts)) 
##                 xgboost::setinfo(x, "weight", wts)
##             out <- xgboost::xgb.train(list(max_depth = param$max_depth, 
##                 eta = param$eta, rate_drop = param$rate_drop, 
##                 skip_drop = param$skip_drop, min_child_weight = param$min_child_weight, 
##                 gamma = param$gamma, subsample = param$subsample, 
##                 colsample_bytree = param$colsample_bytree), data = x, 
##                 nrounds = param$nrounds, objective = "binary:logistic", 
##                 booster = "dart", ...)
##         }
##         else {
##             y <- as.numeric(y) - 1
##             if (!inherits(x, "xgb.DMatrix")) 
##                 x <- xgboost::xgb.DMatrix(x, label = y, missing = NA)
##             else xgboost::setinfo(x, "label", y)
##             if (!is.null(wts)) 
##                 xgboost::setinfo(x, "weight", wts)
##             out <- xgboost::xgb.train(list(max_depth = param$max_depth, 
##                 eta = param$eta, rate_drop = param$rate_drop, 
##                 skip_drop = param$skip_drop, min_child_weight = param$min_child_weight, 
##                 gamma = param$gamma, subsample = param$subsample, 
##                 colsample_bytree = param$colsample_bytree), data = x, 
##                 num_class = length(lev), nrounds = param$nrounds, 
##                 objective = "multi:softprob", booster = "dart", 
##                 ...)
##         }
##     }
##     else {
##         if (!inherits(x, "xgb.DMatrix")) 
##             x <- xgboost::xgb.DMatrix(x, label = y, missing = NA)
##         else xgboost::setinfo(x, "label", y)
##         if (!is.null(wts)) 
##             xgboost::setinfo(x, "weight", wts)
##         out <- xgboost::xgb.train(list(max_depth = param$max_depth, 
##             eta = param$eta, rate_drop = param$rate_drop, skip_drop = param$skip_drop, 
##             min_child_weight = param$min_child_weight, gamma = param$gamma, 
##             subsample = param$subsample, colsample_bytree = param$colsample_bytree), 
##             data = x, nrounds = param$nrounds, objective = "reg:squarederror", 
##             booster = "dart", ...)
##     }
##     out
## }
## 
## $xgbDART$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!inherits(newdata, "xgb.DMatrix")) {
##         newdata <- as.matrix(newdata)
##         newdata <- xgboost::xgb.DMatrix(data = newdata, missing = NA)
##     }
##     out <- predict(modelFit, newdata, ntreelimit = modelFit$niter)
##     if (modelFit$problemType == "Classification") {
##         if (length(modelFit$obsLevels) == 2) {
##             out <- ifelse(out >= 0.5, modelFit$obsLevels[1], 
##                 modelFit$obsLevels[2])
##         }
##         else {
##             out <- matrix(out, ncol = length(modelFit$obsLevels), 
##                 byrow = TRUE)
##             out <- modelFit$obsLevels[apply(out, 1, which.max)]
##         }
##     }
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$nrounds)) {
##             tmp_pred <- predict(modelFit, newdata, ntreelimit = submodels$nrounds[j])
##             if (modelFit$problemType == "Classification") {
##                 if (length(modelFit$obsLevels) == 2) {
##                   tmp_pred <- ifelse(tmp_pred >= 0.5, modelFit$obsLevels[1], 
##                     modelFit$obsLevels[2])
##                 }
##                 else {
##                   tmp_pred <- matrix(tmp_pred, ncol = length(modelFit$obsLevels), 
##                     byrow = TRUE)
##                   tmp_pred <- modelFit$obsLevels[apply(tmp_pred, 
##                     1, which.max)]
##                 }
##             }
##             tmp[[j + 1]] <- tmp_pred
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $xgbDART$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!inherits(newdata, "xgb.DMatrix")) {
##         newdata <- as.matrix(newdata)
##         newdata <- xgboost::xgb.DMatrix(data = newdata, missing = NA)
##     }
##     if (!is.null(modelFit$param$objective) && modelFit$param$objective == 
##         "binary:logitraw") {
##         p <- predict(modelFit, newdata, ntreelimit = modelFit$niter)
##         out <- binomial()$linkinv(p)
##     }
##     else {
##         out <- predict(modelFit, newdata, ntreelimit = modelFit$niter)
##     }
##     if (length(modelFit$obsLevels) == 2) {
##         out <- cbind(out, 1 - out)
##         colnames(out) <- modelFit$obsLevels
##     }
##     else {
##         out <- matrix(out, ncol = length(modelFit$obsLevels), 
##             byrow = TRUE)
##         colnames(out) <- modelFit$obsLevels
##     }
##     out <- as.data.frame(out, stringsAsFactors = TRUE)
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$nrounds)) {
##             tmp_pred <- predict(modelFit, newdata, ntreelimit = submodels$nrounds[j])
##             if (length(modelFit$obsLevels) == 2) {
##                 tmp_pred <- cbind(tmp_pred, 1 - tmp_pred)
##                 colnames(tmp_pred) <- modelFit$obsLevels
##             }
##             else {
##                 tmp_pred <- matrix(tmp_pred, ncol = length(modelFit$obsLevels), 
##                   byrow = TRUE)
##                 colnames(tmp_pred) <- modelFit$obsLevels
##             }
##             tmp_pred <- as.data.frame(tmp_pred, stringsAsFactors = TRUE)
##             tmp[[j + 1]] <- tmp_pred
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $xgbDART$predictors
## function (x, ...) 
## {
##     imp <- xgboost::xgb.importance(x$xNames, model = x)
##     x$xNames[x$xNames %in% imp$Feature]
## }
## 
## $xgbDART$varImp
## function (object, numTrees = NULL, ...) 
## {
##     imp <- xgboost::xgb.importance(object$xNames, model = object)
##     imp <- as.data.frame(imp, stringsAsFactors = TRUE)[, 1:2]
##     rownames(imp) <- as.character(imp[, 1])
##     imp <- imp[, 2, drop = FALSE]
##     colnames(imp) <- "Overall"
##     missing <- object$xNames[!(object$xNames %in% rownames(imp))]
##     missing_imp <- data.frame(Overall = rep(0, times = length(missing)))
##     rownames(missing_imp) <- missing
##     imp <- rbind(imp, missing_imp)
##     imp
## }
## 
## $xgbDART$levels
## function (x) 
## x$obsLevels
## 
## $xgbDART$tags
## [1] "Tree-Based Model"           "Boosting"                  
## [3] "Ensemble Model"             "Implicit Feature Selection"
## [5] "Accepts Case Weights"      
## 
## $xgbDART$sort
## function (x) 
## {
##     x[order(x$nrounds, x$max_depth, x$eta, x$rate_drop, x$skip_drop, 
##         x$min_child_weight, x$subsample, x$colsample_bytree, 
##         x$gamma), ]
## }
## 
## 
## $xgbLinear
## $xgbLinear$label
## [1] "eXtreme Gradient Boosting"
## 
## $xgbLinear$library
## [1] "xgboost"
## 
## $xgbLinear$type
## [1] "Regression"     "Classification"
## 
## $xgbLinear$parameters
##   parameter   class                 label
## 1   nrounds numeric # Boosting Iterations
## 2    lambda numeric     L2 Regularization
## 3     alpha numeric     L1 Regularization
## 4       eta numeric         Learning Rate
## 
## $xgbLinear$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(lambda = c(0, 10^seq(-1, -4, length = len - 
##             1)), alpha = c(0, 10^seq(-1, -4, length = len - 1)), 
##             nrounds = floor((1:len) * 50), eta = 0.3)
##     }
##     else {
##         out <- data.frame(lambda = 10^runif(len, min = -5, 0), 
##             alpha = 10^runif(len, min = -5, 0), nrounds = sample(1:100, 
##                 size = len, replace = TRUE), eta = runif(len, 
##                 max = 3))
##     }
##     out
## }
## 
## $xgbLinear$loop
## NULL
## 
## $xgbLinear$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!inherits(x, "xgb.DMatrix")) 
##         x <- as.matrix(x)
##     if (is.factor(y)) {
##         if (length(lev) == 2) {
##             y <- ifelse(y == lev[1], 1, 0)
##             if (!inherits(x, "xgb.DMatrix")) 
##                 x <- xgboost::xgb.DMatrix(x, label = y, missing = NA)
##             else xgboost::setinfo(x, "label", y)
##             if (!is.null(wts)) 
##                 xgboost::setinfo(x, "weight", wts)
##             out <- xgboost::xgb.train(list(lambda = param$lambda, 
##                 alpha = param$alpha), data = x, nrounds = param$nrounds, 
##                 objective = "binary:logistic", ...)
##         }
##         else {
##             y <- as.numeric(y) - 1
##             if (!inherits(x, "xgb.DMatrix")) 
##                 x <- xgboost::xgb.DMatrix(x, label = y, missing = NA)
##             else xgboost::setinfo(x, "label", y)
##             if (!is.null(wts)) 
##                 xgboost::setinfo(x, "weight", wts)
##             out <- xgboost::xgb.train(list(lambda = param$lambda, 
##                 alpha = param$alpha), data = x, num_class = length(lev), 
##                 nrounds = param$nrounds, objective = "multi:softprob", 
##                 ...)
##         }
##     }
##     else {
##         if (!inherits(x, "xgb.DMatrix")) 
##             x <- xgboost::xgb.DMatrix(x, label = y, missing = NA)
##         else xgboost::setinfo(x, "label", y)
##         if (!is.null(wts)) 
##             xgboost::setinfo(x, "weight", wts)
##         out <- xgboost::xgb.train(list(lambda = param$lambda, 
##             alpha = param$alpha), data = x, nrounds = param$nrounds, 
##             objective = "reg:squarederror", ...)
##     }
##     out
## }
## 
## $xgbLinear$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!inherits(newdata, "xgb.DMatrix")) {
##         newdata <- as.matrix(newdata)
##         newdata <- xgboost::xgb.DMatrix(data = newdata, missing = NA)
##     }
##     out <- predict(modelFit, newdata)
##     if (modelFit$problemType == "Classification") {
##         if (length(modelFit$obsLevels) == 2) {
##             out <- ifelse(out >= 0.5, modelFit$obsLevels[1], 
##                 modelFit$obsLevels[2])
##         }
##         else {
##             out <- matrix(out, ncol = length(modelFit$obsLevels), 
##                 byrow = TRUE)
##             out <- modelFit$obsLevels[apply(out, 1, which.max)]
##         }
##     }
##     out
## }
## 
## $xgbLinear$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!inherits(newdata, "xgb.DMatrix")) {
##         newdata <- as.matrix(newdata)
##         newdata <- xgboost::xgb.DMatrix(data = newdata, missing = NA)
##     }
##     out <- predict(modelFit, newdata)
##     if (length(modelFit$obsLevels) == 2) {
##         out <- cbind(out, 1 - out)
##         colnames(out) <- modelFit$obsLevels
##     }
##     else {
##         out <- matrix(out, ncol = length(modelFit$obsLevels), 
##             byrow = TRUE)
##         colnames(out) <- modelFit$obsLevels
##     }
##     as.data.frame(out, stringsAsFactors = TRUE)
## }
## 
## $xgbLinear$predictors
## function (x, ...) 
## {
##     imp <- xgboost::xgb.importance(x$xNames, model = x)
##     x$xNames[x$xNames %in% imp$Feature]
## }
## 
## $xgbLinear$varImp
## function (object, numTrees = NULL, ...) 
## {
##     imp <- xgboost::xgb.importance(object$xNames, model = object)
##     imp <- as.data.frame(imp, stringsAsFactors = TRUE)[, 1:2]
##     rownames(imp) <- as.character(imp[, 1])
##     imp <- imp[, 2, drop = FALSE]
##     colnames(imp) <- "Overall"
##     missing <- object$xNames[!(object$xNames %in% rownames(imp))]
##     missing_imp <- data.frame(Overall = rep(0, times = length(missing)))
##     rownames(missing_imp) <- missing
##     imp <- rbind(imp, missing_imp)
##     imp
## }
## 
## $xgbLinear$levels
## function (x) 
## x$obsLevels
## 
## $xgbLinear$tags
## [1] "Linear Classifier Models"   "Linear Regression Models"  
## [3] "L1 Regularization Models"   "L2 Regularization Models"  
## [5] "Boosting"                   "Ensemble Model"            
## [7] "Implicit Feature Selection"
## 
## $xgbLinear$sort
## function (x) 
## {
##     x[order(x$nrounds, x$alpha, x$lambda), ]
## }
## 
## 
## $xgbTree
## $xgbTree$label
## [1] "eXtreme Gradient Boosting"
## 
## $xgbTree$library
## [1] "xgboost" "plyr"   
## 
## $xgbTree$type
## [1] "Regression"     "Classification"
## 
## $xgbTree$parameters
##          parameter   class                          label
## 1          nrounds numeric          # Boosting Iterations
## 2        max_depth numeric                 Max Tree Depth
## 3              eta numeric                      Shrinkage
## 4            gamma numeric         Minimum Loss Reduction
## 5 colsample_bytree numeric     Subsample Ratio of Columns
## 6 min_child_weight numeric Minimum Sum of Instance Weight
## 7        subsample numeric           Subsample Percentage
## 
## $xgbTree$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(max_depth = seq(1, len), nrounds = floor((1:len) * 
##             50), eta = c(0.3, 0.4), gamma = 0, colsample_bytree = c(0.6, 
##             0.8), min_child_weight = c(1), subsample = seq(0.5, 
##             1, length = len))
##     }
##     else {
##         out <- data.frame(nrounds = sample(1:1000, size = len, 
##             replace = TRUE), max_depth = sample(1:10, replace = TRUE, 
##             size = len), eta = runif(len, min = 0.001, max = 0.6), 
##             gamma = runif(len, min = 0, max = 10), colsample_bytree = runif(len, 
##                 min = 0.3, max = 0.7), min_child_weight = sample(0:20, 
##                 size = len, replace = TRUE), subsample = runif(len, 
##                 min = 0.25, max = 1))
##         out$nrounds <- floor(out$nrounds)
##     }
##     out
## }
## 
## $xgbTree$loop
## function (grid) 
## {
##     loop <- plyr::ddply(grid, c("eta", "max_depth", "gamma", 
##         "colsample_bytree", "min_child_weight", "subsample"), 
##         function(x) c(nrounds = max(x$nrounds)))
##     submodels <- vector(mode = "list", length = nrow(loop))
##     for (i in seq(along = loop$nrounds)) {
##         index <- which(grid$max_depth == loop$max_depth[i] & 
##             grid$eta == loop$eta[i] & grid$gamma == loop$gamma[i] & 
##             grid$colsample_bytree == loop$colsample_bytree[i] & 
##             grid$min_child_weight == loop$min_child_weight[i] & 
##             grid$subsample == loop$subsample[i])
##         trees <- grid[index, "nrounds"]
##         submodels[[i]] <- data.frame(nrounds = trees[trees != 
##             loop$nrounds[i]])
##     }
##     list(loop = loop, submodels = submodels)
## }
## 
## $xgbTree$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     if (!inherits(x, "xgb.DMatrix")) 
##         x <- as.matrix(x)
##     if (is.factor(y)) {
##         if (length(lev) == 2) {
##             y <- ifelse(y == lev[1], 1, 0)
##             if (!inherits(x, "xgb.DMatrix")) 
##                 x <- xgboost::xgb.DMatrix(x, label = y, missing = NA)
##             else xgboost::setinfo(x, "label", y)
##             if (!is.null(wts)) 
##                 xgboost::setinfo(x, "weight", wts)
##             out <- xgboost::xgb.train(list(eta = param$eta, max_depth = param$max_depth, 
##                 gamma = param$gamma, colsample_bytree = param$colsample_bytree, 
##                 min_child_weight = param$min_child_weight, subsample = param$subsample), 
##                 data = x, nrounds = param$nrounds, objective = "binary:logistic", 
##                 ...)
##         }
##         else {
##             y <- as.numeric(y) - 1
##             if (!inherits(x, "xgb.DMatrix")) 
##                 x <- xgboost::xgb.DMatrix(x, label = y, missing = NA)
##             else xgboost::setinfo(x, "label", y)
##             if (!is.null(wts)) 
##                 xgboost::setinfo(x, "weight", wts)
##             out <- xgboost::xgb.train(list(eta = param$eta, max_depth = param$max_depth, 
##                 gamma = param$gamma, colsample_bytree = param$colsample_bytree, 
##                 min_child_weight = param$min_child_weight, subsample = param$subsample), 
##                 data = x, num_class = length(lev), nrounds = param$nrounds, 
##                 objective = "multi:softprob", ...)
##         }
##     }
##     else {
##         if (!inherits(x, "xgb.DMatrix")) 
##             x <- xgboost::xgb.DMatrix(x, label = y, missing = NA)
##         else xgboost::setinfo(x, "label", y)
##         if (!is.null(wts)) 
##             xgboost::setinfo(x, "weight", wts)
##         out <- xgboost::xgb.train(list(eta = param$eta, max_depth = param$max_depth, 
##             gamma = param$gamma, colsample_bytree = param$colsample_bytree, 
##             min_child_weight = param$min_child_weight, subsample = param$subsample), 
##             data = x, nrounds = param$nrounds, objective = "reg:squarederror", 
##             ...)
##     }
##     out
## }
## 
## $xgbTree$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!inherits(newdata, "xgb.DMatrix")) {
##         newdata <- as.matrix(newdata)
##         newdata <- xgboost::xgb.DMatrix(data = newdata, missing = NA)
##     }
##     out <- predict(modelFit, newdata)
##     if (modelFit$problemType == "Classification") {
##         if (length(modelFit$obsLevels) == 2) {
##             out <- ifelse(out >= 0.5, modelFit$obsLevels[1], 
##                 modelFit$obsLevels[2])
##         }
##         else {
##             out <- matrix(out, ncol = length(modelFit$obsLevels), 
##                 byrow = TRUE)
##             out <- modelFit$obsLevels[apply(out, 1, which.max)]
##         }
##     }
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$nrounds)) {
##             tmp_pred <- predict(modelFit, newdata, ntreelimit = submodels$nrounds[j])
##             if (modelFit$problemType == "Classification") {
##                 if (length(modelFit$obsLevels) == 2) {
##                   tmp_pred <- ifelse(tmp_pred >= 0.5, modelFit$obsLevels[1], 
##                     modelFit$obsLevels[2])
##                 }
##                 else {
##                   tmp_pred <- matrix(tmp_pred, ncol = length(modelFit$obsLevels), 
##                     byrow = TRUE)
##                   tmp_pred <- modelFit$obsLevels[apply(tmp_pred, 
##                     1, which.max)]
##                 }
##             }
##             tmp[[j + 1]] <- tmp_pred
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $xgbTree$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     if (!inherits(newdata, "xgb.DMatrix")) {
##         newdata <- as.matrix(newdata)
##         newdata <- xgboost::xgb.DMatrix(data = newdata, missing = NA)
##     }
##     if (!is.null(modelFit$param$objective) && modelFit$param$objective == 
##         "binary:logitraw") {
##         p <- predict(modelFit, newdata)
##         out <- binomial()$linkinv(p)
##     }
##     else {
##         out <- predict(modelFit, newdata)
##     }
##     if (length(modelFit$obsLevels) == 2) {
##         out <- cbind(out, 1 - out)
##         colnames(out) <- modelFit$obsLevels
##     }
##     else {
##         out <- matrix(out, ncol = length(modelFit$obsLevels), 
##             byrow = TRUE)
##         colnames(out) <- modelFit$obsLevels
##     }
##     out <- as.data.frame(out, stringsAsFactors = TRUE)
##     if (!is.null(submodels)) {
##         tmp <- vector(mode = "list", length = nrow(submodels) + 
##             1)
##         tmp[[1]] <- out
##         for (j in seq(along = submodels$nrounds)) {
##             tmp_pred <- predict(modelFit, newdata, ntreelimit = submodels$nrounds[j])
##             if (length(modelFit$obsLevels) == 2) {
##                 tmp_pred <- cbind(tmp_pred, 1 - tmp_pred)
##                 colnames(tmp_pred) <- modelFit$obsLevels
##             }
##             else {
##                 tmp_pred <- matrix(tmp_pred, ncol = length(modelFit$obsLevels), 
##                   byrow = TRUE)
##                 colnames(tmp_pred) <- modelFit$obsLevels
##             }
##             tmp_pred <- as.data.frame(tmp_pred, stringsAsFactors = TRUE)
##             tmp[[j + 1]] <- tmp_pred
##         }
##         out <- tmp
##     }
##     out
## }
## 
## $xgbTree$predictors
## function (x, ...) 
## {
##     imp <- xgboost::xgb.importance(x$xNames, model = x)
##     x$xNames[x$xNames %in% imp$Feature]
## }
## 
## $xgbTree$varImp
## function (object, numTrees = NULL, ...) 
## {
##     imp <- xgboost::xgb.importance(object$xNames, model = object)
##     imp <- as.data.frame(imp, stringsAsFactors = TRUE)[, 1:2]
##     rownames(imp) <- as.character(imp[, 1])
##     imp <- imp[, 2, drop = FALSE]
##     colnames(imp) <- "Overall"
##     missing <- object$xNames[!(object$xNames %in% rownames(imp))]
##     missing_imp <- data.frame(Overall = rep(0, times = length(missing)))
##     rownames(missing_imp) <- missing
##     imp <- rbind(imp, missing_imp)
##     imp
## }
## 
## $xgbTree$levels
## function (x) 
## x$obsLevels
## 
## $xgbTree$tags
## [1] "Tree-Based Model"           "Boosting"                  
## [3] "Ensemble Model"             "Implicit Feature Selection"
## [5] "Accepts Case Weights"      
## 
## $xgbTree$sort
## function (x) 
## {
##     x[order(x$nrounds, x$max_depth, x$eta, x$gamma, x$colsample_bytree, 
##         x$min_child_weight), ]
## }
## 
## 
## $xyf
## $xyf$label
## [1] "Self-Organizing Maps"
## 
## $xyf$library
## [1] "kohonen"
## 
## $xyf$check
## function (pkg) 
## {
##     requireNamespace("kohonen")
##     current <- packageDescription("kohonen")$Version
##     expected <- "3.0.0"
##     if (compareVersion(current, expected) < 0) 
##         stop("This modeling workflow requires kohonen version ", 
##             expected, "or greater.", call. = FALSE)
## }
## 
## $xyf$loop
## NULL
## 
## $xyf$type
## [1] "Classification" "Regression"    
## 
## $xyf$parameters
##      parameter     class        label
## 1         xdim   numeric         Rows
## 2         ydim   numeric      Columns
## 3 user.weights   numeric Layer Weight
## 4         topo character     Topology
## 
## $xyf$grid
## function (x, y, len = NULL, search = "grid") 
## {
##     if (search == "grid") {
##         out <- expand.grid(xdim = 1:len, ydim = 2:(len + 1), 
##             user.weights = seq(0.2, 0.8, length = len), topo = "hexagonal")
##         out <- subset(out, xdim <= ydim)
##     }
##     else {
##         out <- data.frame(xdim = sample(1:20, size = len * 10, 
##             replace = TRUE), ydim = sample(1:20, size = len * 
##             10, replace = TRUE), topo = sample(c("rectangular", 
##             "hexagonal"), size = len * 10, replace = TRUE), user.weights = runif(len * 
##             10, min = 0.01, max = 0.99))
##         out <- subset(out, xdim <= ydim & xdim * ydim < nrow(x))
##         out <- out[1:min(nrow(out), len), ]
##     }
##     out
## }
## 
## $xyf$fit
## function (x, y, wts, param, lev, last, classProbs, ...) 
## {
##     layer_wts <- c(1 - param$user.weights, param$user.weights)
##     layer_wts <- layer_wts/sum(layer_wts)
##     if (is.numeric(y)) 
##         y <- as.matrix(y, ncol = 1)
##     kohonen::supersom(list(X = as.matrix(x), Y = y), user.weights = layer_wts, 
##         grid = kohonen::somgrid(param$xdim, param$ydim, as.character(param$topo)), 
##         ...)
## }
## 
## $xyf$predict
## function (modelFit, newdata, submodels = NULL) 
## {
##     out <- predict(modelFit, list(X = as.matrix(newdata)), whatmap = "X")$predictions$Y
##     if (is.factor(out)) 
##         out <- as.character(out)
##     out
## }
## 
## $xyf$prob
## function (modelFit, newdata, submodels = NULL) 
## {
##     preds <- predict(modelFit, list(X = as.matrix(newdata)), 
##         whatmap = "X")
##     preds <- preds$unit.predictions$Y[preds$unit.classif, ]
##     as.data.frame(preds)
## }
## 
## $xyf$levels
## function (x) 
## x$obsLevels
## 
## $xyf$tags
## [1] "Self-Organising Maps"
## 
## $xyf$sort
## function (x) 
## x[order(x$xdim, x$ydim), ]
## 
## $xyf$notes
## [1] "As of version 3.0.0 of the kohonen package, the argument `user.weights` replaces the old `alpha` parameter. `user.weights` is usually a vector of relative weights such as `c(1, 3)` but is parameterized here as a proportion such as `c(1-.75, .75)` where the .75 is the value of the tuning parameter passed to `train` and indicates that the outcome layer has 3 times the weight as the predictor layer."
## iris 데이터로 붓스트랩 교차검증 (Bootstrap Resampling)
# 1. 패키지 로드
library(caret)
# 2. 데이터 준비
data(iris)
set.seed(123)
# 3. 붓스트랩 설정 (bootstrap 방식)
control <- trainControl(method = "boot", number = 25) # 25회 복원추출 반복
# 4. 모델 학습 (예: 분류 트리)
model <- train(Species ~ ., data = iris,
 method = "rpart",
 trControl = control)
# 5. 결과 확인
print(model)
## CART 
## 
## 150 samples
##   4 predictor
##   3 classes: 'setosa', 'versicolor', 'virginica' 
## 
## No pre-processing
## Resampling: Bootstrapped (25 reps) 
## Summary of sample sizes: 150, 150, 150, 150, 150, 150, ... 
## Resampling results across tuning parameters:
## 
##   cp    Accuracy   Kappa    
##   0.00  0.9399680  0.9087443
##   0.44  0.7728363  0.6687838
##   0.50  0.4738282  0.2576720
## 
## Accuracy was used to select the optimal model using the largest value.
## The final value used for the model was cp = 0.
model$resample
##     Accuracy     Kappa   Resample
## 1  0.9322034 0.8964004 Resample09
## 2  0.9454545 0.9158163 Resample05
## 3  0.9649123 0.9467290 Resample01
## 4  0.9310345 0.8913858 Resample10
## 5  0.9423077 0.9133333 Resample06
## 6  0.9600000 0.9396135 Resample02
## 7  0.9166667 0.8714653 Resample11
## 8  0.9661017 0.9490061 Resample07
## 9  0.9347826 0.9014286 Resample03
## 10 0.9600000 0.9396864 Resample12
## 11 0.9200000 0.8771499 Resample08
## 12 0.9365079 0.9045455 Resample04
## 13 0.9636364 0.9454365 Resample13
## 14 0.9464286 0.9185257 Resample22
## 15 0.8823529 0.8237327 Resample18
## 16 0.9322034 0.8968531 Resample14
## 17 0.9322034 0.8981881 Resample23
## 18 0.8846154 0.8237288 Resample19
## 19 0.9056604 0.8574502 Resample15
## 20 0.9433962 0.9147453 Resample24
## 21 0.9218750 0.8800150 Resample20
## 22 0.9655172 0.9477477 Resample16
## 23 0.9824561 0.9732017 Resample25
## 24 0.9824561 0.9734266 Resample21
## 25 0.9464286 0.9189971 Resample17
# 6. 각 반복별 성능 확인 (선택적)
head(model$resample)
##    Accuracy     Kappa   Resample
## 1 0.9322034 0.8964004 Resample09
## 2 0.9454545 0.9158163 Resample05
## 3 0.9649123 0.9467290 Resample01
## 4 0.9310345 0.8913858 Resample10
## 5 0.9423077 0.9133333 Resample06
## 6 0.9600000 0.9396135 Resample02