https://rpubs.com/fumi/592985 参考資料

https://qiita.com/nkjm/items/e751e49c7d2c619cbeab

https://momonoki2017.blogspot.com/2018/04/r007-riris.html

https://rpubs.com/fumi/582119

http://d-m-l.jp/Rbiz/task_rf.html

https://funatsu-lab.github.io/open-course-ware/machine-learning/random-forest/

http://takenaka-akio.org/doc/r_auto/chapter_03.html

http://yut.hatenablog.com/entry/20120827/1346024147

https://mjin.doshisha.ac.jp/R/Chap_23/23.html

https://qiita.com/TsutomuNakamura/items/a1a6a02cb9bb0dcbb37f 混同行列(Confusion Matrix) とは

——–ランダムフォレストに変数選択によるアルゴリズム追加

https://shohei-doi.github.io/notes/posts/2019-05-27-cross-validation/

——–ランダムフォレスト概要

http://d-m-l.jp/Rbiz/task_rf.html ランダムフォレストとは機械学習のアルゴリズムの1つで、学習用のデータをランダムにサンプリングして多数の決定木を作成し、作成した決定木をもとに多数決で結果を決める方法です。精度、汎用性が高く扱いやすい分析手法です。

ランダムフォレストの特徴

————————————————————————-

ランダムフォレストでデータを分析するアルゴリズム

#ランダムフォレストで使用するデータ - Titanics.rpart - Titanic - Titanichはtraingが統計処理されたデータでありこの演習には不向き - cordataは、グラフィック用に処理されたデータでありtrainのPclasswを3区分したり、sexを2区分するなど一部質的化したが、Fareh・年齢は量的データのままであり、氏名はそのままであり、欠落のあるデータは補完してある。 - ダミー変数ummy_varn等はカテゴリーデータをintegerデータに置き換えたものであり以下の論点に合わないらしいので使わない - lldataを使っても良いが、(makedummies()を使用してダミー変数)を実施する前のdumとnot_dum結合した、 - train2を使用する

#randomForestではCharacterは使わないようにしよう http://ushi-goroshi.hatenablog.com/entry/2019/01/30/171259

library(car)
## Loading required package: carData
library(caret)
## Warning: package 'caret' was built under R version 3.6.2
## Loading required package: lattice
library(cluster)
library(dummies)
## dummies-1.5.6 provided by Decision Patterns
library(data.table)
## Warning: package 'data.table' was built under R version 3.6.2
library(dplyr)
## 
## Attaching package: 'dplyr'
## The following objects are masked from 'package:data.table':
## 
##     between, first, last
## The following object is masked from 'package:car':
## 
##     recode
## The following objects are masked from 'package:stats':
## 
##     filter, lag
## The following objects are masked from 'package:base':
## 
##     intersect, setdiff, setequal, union
library(e1071)
## Warning: package 'e1071' was built under R version 3.6.2
library(epitools)
library(effects)
## Warning: package 'effects' was built under R version 3.6.3
## Registered S3 methods overwritten by 'lme4':
##   method                          from
##   cooks.distance.influence.merMod car 
##   influence.merMod                car 
##   dfbeta.influence.merMod         car 
##   dfbetas.influence.merMod        car
## Use the command
##     lattice::trellis.par.set(effectsTheme())
##   to customize lattice options for effects plots.
## See ?effectsTheme for details.
library(ggplot2)
library(ggthemes)
## Warning: package 'ggthemes' was built under R version 3.6.2
library(randomForest)
## Warning: package 'randomForest' was built under R version 3.6.3
## randomForest 4.6-14
## Type rfNews() to see new features/changes/bug fixes.
## 
## Attaching package: 'randomForest'
## The following object is masked from 'package:dplyr':
## 
##     combine
## The following object is masked from 'package:ggplot2':
## 
##     margin
library(ranger)
## Warning: package 'ranger' was built under R version 3.6.2
## 
## Attaching package: 'ranger'
## The following object is masked from 'package:randomForest':
## 
##     importance
library(rgl)
library(rattle)
## Warning: package 'rattle' was built under R version 3.6.2
## Rattle: A free graphical interface for data science with R.
## バージョン 5.3.0 Copyright (c) 2006-2018 Togaware Pty Ltd.
## 'rattle()' と入力して、データを多角的に分析します。
## 
## Attaching package: 'rattle'
## The following object is masked from 'package:ranger':
## 
##     importance
## The following object is masked from 'package:randomForest':
## 
##     importance
library(readr)
## Warning: package 'readr' was built under R version 3.6.2
library(rpart.plot)
## Warning: package 'rpart.plot' was built under R version 3.6.2
## Loading required package: rpart
library(rpart)
library(readr)
library(reshape)
## Warning: package 'reshape' was built under R version 3.6.2
## 
## Attaching package: 'reshape'
## The following object is masked from 'package:dplyr':
## 
##     rename
## The following object is masked from 'package:data.table':
## 
##     melt
library(rsconnect)
## Warning: package 'rsconnect' was built under R version 3.6.2
library(reshape2)
## 
## Attaching package: 'reshape2'
## The following objects are masked from 'package:reshape':
## 
##     colsplit, melt, recast
## The following objects are masked from 'package:data.table':
## 
##     dcast, melt
library(tidyr)
## 
## Attaching package: 'tidyr'
## The following object is masked from 'package:reshape2':
## 
##     smiths
## The following objects are masked from 'package:reshape':
## 
##     expand, smiths
library(xtable)
library(nnet)
## Warning: package 'nnet' was built under R version 3.6.2
library(stargazer)
## 
## Please cite as:
##  Hlavac, Marek (2018). stargazer: Well-Formatted Regression and Summary Statistics Tables.
##  R package version 5.2.2. https://CRAN.R-project.org/package=stargazer
library(randomForest)
library(tidyverse)
## Warning: package 'tidyverse' was built under R version 3.6.2
## -- Attaching packages --------------------------------------------------------------------- tidyverse 1.3.0 --
## √ tibble  2.1.3     √ stringr 1.4.0
## √ purrr   0.3.3     √ forcats 0.4.0
## Warning: package 'stringr' was built under R version 3.6.2
## Warning: package 'forcats' was built under R version 3.6.2
## -- Conflicts ------------------------------------------------------------------------ tidyverse_conflicts() --
## x dplyr::between()        masks data.table::between()
## x randomForest::combine() masks dplyr::combine()
## x tidyr::expand()         masks reshape::expand()
## x dplyr::filter()         masks stats::filter()
## x dplyr::first()          masks data.table::first()
## x dplyr::lag()            masks stats::lag()
## x dplyr::last()           masks data.table::last()
## x purrr::lift()           masks caret::lift()
## x randomForest::margin()  masks ggplot2::margin()
## x dplyr::recode()         masks car::recode()
## x reshape::rename()       masks dplyr::rename()
## x purrr::some()           masks car::some()
## x purrr::transpose()      masks data.table::transpose()
require(ranger)

下水道データ読み込み# 基本統計量表示 gesui # 教科書ではlogit

gesui = read_csv("osui.csv")
## Parsed with column specification:
## cols(
##   OBJECTID = col_double(),
##   sys_name = col_double(),
##   slope = col_double(),
##   uedokaburi = col_double(),
##   masuhonsuu = col_double(),
##   long = col_double(),
##   kubun = col_double(),
##   did = col_double(),
##   kouhou = col_double(),
##   nendo = col_double(),
##   ekijyouka = col_double(),
##   kyouyounensuu = col_double(),
##   kansyu = col_double(),
##   kei = col_double(),
##   kinkyuudo = col_double(),
##   taisyo = col_double()
## )
gesui<- data.frame(gesui) # 教科書ではlogit
#OBJECTID列をデータから削除
gesui <- gesui[-1:-2] 
stargazer(as.data.frame(gesui),type = "html")
Statistic N Mean St. Dev. Min Pctl(25) Pctl(75) Max
slope 1,423 3.437 2.323 -6 1.8 4.5 10
uedokaburi 1,423 4.371 2.475 0.360 2.727 4.949 13.863
masuhonsuu 1,423 1.338 1.808 0 0 2 13
long 1,423 35.129 18.972 0.970 21.445 46.510 196.280
kubun 1,423 1.204 0.403 1 1 1 2
did 1,423 0.696 0.460 0 0 1 1
kouhou 1,423 0.415 0.493 0 0 1 1
nendo 1,423 1,982.967 5.973 1,974 1,978 1,990 2,006
ekijyouka 1,423 0.396 0.611 0 0 1 4
kyouyounensuu 1,423 33.033 5.973 10 26 38 42
kansyu 1,423 1.198 0.399 1 1 1 2
kei 1,423 517.182 308.765 200 250 800 1,650
kinkyuudo 1,423 1.297 1.457 0 0 3 3
taisyo 1,423 0.448 0.497 0 0 1 1
gesui$kansyu <- as.factor(gesui$kansyu)
gesui$taisyo <- as.factor(gesui$taisyo)
gesui$kubun <- as.factor(gesui$kubun)
gesui$did <- as.factor(gesui$did)
gesui$ekijyouka <- as.factor(gesui$ekijyouka)
stargazer(as.data.frame(gesui),type = "html")
Statistic N Mean St. Dev. Min Pctl(25) Pctl(75) Max
slope 1,423 3.437 2.323 -6 1.8 4.5 10
uedokaburi 1,423 4.371 2.475 0.360 2.727 4.949 13.863
masuhonsuu 1,423 1.338 1.808 0 0 2 13
long 1,423 35.129 18.972 0.970 21.445 46.510 196.280
kouhou 1,423 0.415 0.493 0 0 1 1
nendo 1,423 1,982.967 5.973 1,974 1,978 1,990 2,006
kyouyounensuu 1,423 33.033 5.973 10 26 38 42
kei 1,423 517.182 308.765 200 250 800 1,650
kinkyuudo 1,423 1.297 1.457 0 0 3 3
exclude_cols = c("OBJECTID","kinkyuudo")
gesui = gesui[ !names(gesui) %in% exclude_cols ]
model = randomForest(taisyo ~ ., data = gesui, importance = TRUE)
model
## 
## Call:
##  randomForest(formula = taisyo ~ ., data = gesui, importance = TRUE) 
##                Type of random forest: classification
##                      Number of trees: 500
## No. of variables tried at each split: 3
## 
##         OOB estimate of  error rate: 25.37%
## Confusion matrix:
##     0   1 class.error
## 0 635 151   0.1921120
## 1 210 427   0.3296703
predition = predict(model, gesui)
model$importance
##                         0           1 MeanDecreaseAccuracy MeanDecreaseGini
## slope         0.025608462 0.022065049          0.024047412        106.45432
## uedokaburi    0.012218781 0.052642943          0.030296244        123.64079
## masuhonsuu    0.009863123 0.002843574          0.006748572         41.28888
## long          0.018866841 0.032789559          0.025122605        116.10706
## kubun         0.013028624 0.004644896          0.009274256         11.03075
## did           0.014593589 0.024445627          0.018981875         17.43002
## kouhou        0.005684479 0.013064768          0.008944084         12.81962
## nendo         0.065077098 0.052563154          0.059494306         59.65211
## ekijyouka     0.027300030 0.023276388          0.025533277         26.09863
## kyouyounensuu 0.059719399 0.052625005          0.056582981         57.98316
## kansyu        0.046484848 0.012755621          0.031343855         20.31955
## kei           0.040651848 0.049804258          0.044731434         57.18598
varImpPlot(model)

predition
##    1    2    3    4    5    6    7    8    9   10   11   12   13   14   15   16 
##    0    1    1    0    1    0    1    1    1    1    0    0    1    1    0    0 
##   17   18   19   20   21   22   23   24   25   26   27   28   29   30   31   32 
##    0    1    1    0    0    1    1    1    1    0    0    1    1    0    1    1 
##   33   34   35   36   37   38   39   40   41   42   43   44   45   46   47   48 
##    1    1    0    0    1    1    1    1    1    1    1    1    1    1    1    1 
##   49   50   51   52   53   54   55   56   57   58   59   60   61   62   63   64 
##    1    0    1    1    1    1    0    0    0    1    0    1    1    1    0    0 
##   65   66   67   68   69   70   71   72   73   74   75   76   77   78   79   80 
##    0    1    1    1    1    0    0    1    0    0    1    1    0    0    1    0 
##   81   82   83   84   85   86   87   88   89   90   91   92   93   94   95   96 
##    1    1    1    1    1    1    0    0    0    0    0    0    0    0    0    0 
##   97   98   99  100  101  102  103  104  105  106  107  108  109  110  111  112 
##    0    0    1    0    0    0    0    1    0    0    0    1    0    0    1    0 
##  113  114  115  116  117  118  119  120  121  122  123  124  125  126  127  128 
##    1    0    1    0    0    1    0    1    0    0    0    1    0    0    0    0 
##  129  130  131  132  133  134  135  136  137  138  139  140  141  142  143  144 
##    1    0    0    0    0    0    1    0    1    1    1    1    0    0    0    0 
##  145  146  147  148  149  150  151  152  153  154  155  156  157  158  159  160 
##    0    0    0    1    0    0    0    0    0    0    0    0    1    0    0    0 
##  161  162  163  164  165  166  167  168  169  170  171  172  173  174  175  176 
##    0    0    0    0    0    0    0    0    0    0    0    0    0    0    0    0 
##  177  178  179  180  181  182  183  184  185  186  187  188  189  190  191  192 
##    0    0    0    0    0    1    1    1    0    1    0    0    0    0    0    0 
##  193  194  195  196  197  198  199  200  201  202  203  204  205  206  207  208 
##    1    1    1    1    1    1    1    1    1    1    1    1    1    1    0    1 
##  209  210  211  212  213  214  215  216  217  218  219  220  221  222  223  224 
##    0    0    1    0    0    0    0    0    0    0    0    0    0    0    0    0 
##  225  226  227  228  229  230  231  232  233  234  235  236  237  238  239  240 
##    0    0    0    0    0    0    1    1    0    1    1    1    1    1    1    0 
##  241  242  243  244  245  246  247  248  249  250  251  252  253  254  255  256 
##    0    1    1    1    1    1    1    1    1    1    1    1    1    1    1    1 
##  257  258  259  260  261  262  263  264  265  266  267  268  269  270  271  272 
##    1    1    1    1    1    1    0    1    1    0    0    1    1    1    1    1 
##  273  274  275  276  277  278  279  280  281  282  283  284  285  286  287  288 
##    0    0    0    1    1    1    1    0    1    1    1    0    0    0    0    0 
##  289  290  291  292  293  294  295  296  297  298  299  300  301  302  303  304 
##    0    0    0    0    0    0    0    1    1    1    0    1    0    0    0    1 
##  305  306  307  308  309  310  311  312  313  314  315  316  317  318  319  320 
##    0    1    0    0    0    0    0    0    1    0    0    0    0    1    1    1 
##  321  322  323  324  325  326  327  328  329  330  331  332  333  334  335  336 
##    1    1    1    1    1    0    0    0    0    0    0    0    1    1    1    1 
##  337  338  339  340  341  342  343  344  345  346  347  348  349  350  351  352 
##    1    1    0    1    0    1    1    1    1    1    0    1    1    1    0    1 
##  353  354  355  356  357  358  359  360  361  362  363  364  365  366  367  368 
##    1    1    1    0    1    1    0    0    0    0    0    1    1    1    1    1 
##  369  370  371  372  373  374  375  376  377  378  379  380  381  382  383  384 
##    1    1    1    1    1    1    1    1    1    1    1    1    1    1    0    0 
##  385  386  387  388  389  390  391  392  393  394  395  396  397  398  399  400 
##    0    1    1    1    1    1    1    1    0    1    1    0    1    1    1    1 
##  401  402  403  404  405  406  407  408  409  410  411  412  413  414  415  416 
##    1    0    1    1    1    0    1    0    0    0    0    0    0    0    0    0 
##  417  418  419  420  421  422  423  424  425  426  427  428  429  430  431  432 
##    0    0    0    1    0    1    1    0    1    0    1    1    1    1    1    1 
##  433  434  435  436  437  438  439  440  441  442  443  444  445  446  447  448 
##    1    1    1    1    0    1    1    1    1    1    1    1    1    0    1    1 
##  449  450  451  452  453  454  455  456  457  458  459  460  461  462  463  464 
##    0    1    0    0    1    1    1    1    1    1    1    1    1    0    0    0 
##  465  466  467  468  469  470  471  472  473  474  475  476  477  478  479  480 
##    0    1    1    0    0    0    1    0    1    1    0    0    0    0    0    1 
##  481  482  483  484  485  486  487  488  489  490  491  492  493  494  495  496 
##    1    1    1    1    1    1    1    1    0    0    1    1    1    1    1    0 
##  497  498  499  500  501  502  503  504  505  506  507  508  509  510  511  512 
##    1    1    1    0    1    0    1    1    1    0    1    0    1    0    1    1 
##  513  514  515  516  517  518  519  520  521  522  523  524  525  526  527  528 
##    1    1    0    1    1    0    0    0    0    1    1    1    1    1    1    1 
##  529  530  531  532  533  534  535  536  537  538  539  540  541  542  543  544 
##    1    0    0    1    1    1    1    1    1    1    1    1    1    0    1    1 
##  545  546  547  548  549  550  551  552  553  554  555  556  557  558  559  560 
##    0    1    0    0    1    0    0    1    0    0    0    1    0    1    0    0 
##  561  562  563  564  565  566  567  568  569  570  571  572  573  574  575  576 
##    0    0    0    0    0    1    1    1    0    0    1    0    0    0    0    0 
##  577  578  579  580  581  582  583  584  585  586  587  588  589  590  591  592 
##    0    0    1    0    0    0    1    0    1    1    0    1    1    0    0    1 
##  593  594  595  596  597  598  599  600  601  602  603  604  605  606  607  608 
##    0    0    1    0    0    0    0    0    0    0    0    0    1    1    1    0 
##  609  610  611  612  613  614  615  616  617  618  619  620  621  622  623  624 
##    0    0    0    1    1    1    1    1    1    1    1    1    1    1    1    1 
##  625  626  627  628  629  630  631  632  633  634  635  636  637  638  639  640 
##    1    1    1    1    0    1    0    0    1    0    1    0    0    0    0    1 
##  641  642  643  644  645  646  647  648  649  650  651  652  653  654  655  656 
##    1    0    1    1    0    0    1    1    0    0    0    1    0    1    1    1 
##  657  658  659  660  661  662  663  664  665  666  667  668  669  670  671  672 
##    1    1    1    1    1    1    1    0    0    0    0    0    0    0    0    0 
##  673  674  675  676  677  678  679  680  681  682  683  684  685  686  687  688 
##    0    0    0    0    0    0    0    0    0    0    0    0    0    0    0    0 
##  689  690  691  692  693  694  695  696  697  698  699  700  701  702  703  704 
##    0    0    1    0    0    0    0    0    0    0    0    0    0    0    0    0 
##  705  706  707  708  709  710  711  712  713  714  715  716  717  718  719  720 
##    0    0    0    0    0    0    0    1    0    0    0    0    0    0    0    0 
##  721  722  723  724  725  726  727  728  729  730  731  732  733  734  735  736 
##    0    0    0    0    0    0    0    0    0    0    0    0    0    1    0    0 
##  737  738  739  740  741  742  743  744  745  746  747  748  749  750  751  752 
##    0    0    0    1    1    0    1    0    0    0    1    0    0    0    0    0 
##  753  754  755  756  757  758  759  760  761  762  763  764  765  766  767  768 
##    0    0    1    1    0    0    1    0    0    0    0    0    0    0    1    0 
##  769  770  771  772  773  774  775  776  777  778  779  780  781  782  783  784 
##    0    0    0    1    0    0    0    0    1    0    0    0    0    0    0    1 
##  785  786  787  788  789  790  791  792  793  794  795  796  797  798  799  800 
##    0    1    0    0    0    0    0    0    0    0    0    1    1    1    0    1 
##  801  802  803  804  805  806  807  808  809  810  811  812  813  814  815  816 
##    1    1    1    1    1    1    1    1    1    1    1    1    1    1    1    1 
##  817  818  819  820  821  822  823  824  825  826  827  828  829  830  831  832 
##    1    1    0    1    0    0    0    1    0    0    0    0    1    0    0    1 
##  833  834  835  836  837  838  839  840  841  842  843  844  845  846  847  848 
##    0    0    0    0    0    0    0    0    0    0    0    0    0    0    0    1 
##  849  850  851  852  853  854  855  856  857  858  859  860  861  862  863  864 
##    0    0    0    1    1    1    0    1    1    0    0    0    0    0    0    1 
##  865  866  867  868  869  870  871  872  873  874  875  876  877  878  879  880 
##    0    0    0    0    1    1    0    0    0    1    0    1    1    0    0    0 
##  881  882  883  884  885  886  887  888  889  890  891  892  893  894  895  896 
##    0    0    1    0    1    1    0    1    1    1    1    1    1    1    0    0 
##  897  898  899  900  901  902  903  904  905  906  907  908  909  910  911  912 
##    0    1    0    0    1    1    1    1    0    0    1    1    1    1    1    1 
##  913  914  915  916  917  918  919  920  921  922  923  924  925  926  927  928 
##    1    1    1    1    1    1    1    0    1    1    1    0    1    1    0    0 
##  929  930  931  932  933  934  935  936  937  938  939  940  941  942  943  944 
##    1    1    1    0    1    1    1    0    0    0    1    1    1    0    1    0 
##  945  946  947  948  949  950  951  952  953  954  955  956  957  958  959  960 
##    1    0    0    0    1    1    0    0    0    0    0    0    0    0    0    0 
##  961  962  963  964  965  966  967  968  969  970  971  972  973  974  975  976 
##    0    0    0    1    0    1    1    0    1    0    0    0    0    0    0    1 
##  977  978  979  980  981  982  983  984  985  986  987  988  989  990  991  992 
##    0    0    0    0    0    0    0    1    0    0    0    1    1    1    0    1 
##  993  994  995  996  997  998  999 1000 1001 1002 1003 1004 1005 1006 1007 1008 
##    1    1    1    1    1    1    0    1    1    1    0    1    0    0    0    1 
## 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 
##    1    1    1    1    1    1    1    1    1    0    0    1    0    1    1    0 
## 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 
##    0    0    0    1    0    1    0    0    0    0    0    0    1    0    1    0 
## 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 
##    0    0    0    0    0    0    0    0    1    0    0    0    0    0    0    0 
## 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 
##    0    0    0    1    0    0    1    1    0    1    0    0    0    1    1    1 
## 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 
##    1    1    1    1    1    1    1    1    1    1    1    1    1    1    1    1 
## 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 
##    1    1    1    0    0    1    1    1    1    1    1    1    1    1    1    1 
## 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 
##    1    1    1    0    1    0    0    0    0    1    1    1    1    1    1    0 
## 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 
##    1    1    1    0    0    1    1    1    1    1    1    1    0    0    1    0 
## 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 
##    0    0    1    1    1    0    0    0    0    0    0    1    1    0    0    1 
## 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 
##    1    0    1    0    0    1    0    0    0    0    0    0    0    1    1    0 
## 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 
##    0    0    0    0    0    0    0    1    0    0    0    0    0    1    0    1 
## 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 
##    0    0    0    1    1    1    0    0    0    1    0    1    0    0    0    1 
## 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 
##    0    1    1    1    1    1    1    0    0    0    1    1    1    1    1    0 
## 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 
##    0    0    1    0    0    0    0    0    0    1    1    0    0    0    0    0 
## 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 
##    0    0    0    0    0    0    0    0    0    0    0    0    0    0    1    1 
## 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 
##    1    1    1    1    1    1    1    1    1    1    1    1    1    1    1    1 
## 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 
##    1    1    1    1    1    1    1    1    1    1    1    1    1    0    0    0 
## 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 
##    1    1    0    0    0    0    0    0    0    1    0    1    0    0    0    1 
## 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 
##    1    1    1    0    0    0    0    0    0    0    0    0    0    0    0    0 
## 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 
##    0    0    0    1    1    0    0    0    0    0    0    0    0    0    0    1 
## 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 
##    0    0    0    0    0    0    0    0    0    0    0    0    0    0    0    1 
## 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 
##    0    0    0    0    0    0    0    0    1    0    0    0    0    0    0    0 
## 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 
##    0    1    1    0    0    0    0    0    0    0    0    0    0    0    0    0 
## 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 
##    0    0    0    0    0    0    0    0    0    0    0    0    0    0    0    0 
## 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 
##    0    1    0    0    0    0    1    0    1    1    0    0    0    0    0    0 
## 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 
##    0    0    0    0    0    0    0    0    1    0    0    0    0    0    0 
## Levels: 0 1
#gesui$kinkyuudo <- as.factor(gesui$kinkyuudo)
names(gesui)
##  [1] "slope"         "uedokaburi"    "masuhonsuu"    "long"         
##  [5] "kubun"         "did"           "kouhou"        "nendo"        
##  [9] "ekijyouka"     "kyouyounensuu" "kansyu"        "kei"          
## [13] "taisyo"
train <- gesui[1:300,]
test <- gesui[301:478,]

 

model1 = randomForest(taisyo ~ ., data = gesui)
model1
## 
## Call:
##  randomForest(formula = taisyo ~ ., data = gesui) 
##                Type of random forest: classification
##                      Number of trees: 500
## No. of variables tried at each split: 3
## 
##         OOB estimate of  error rate: 25.23%
## Confusion matrix:
##     0   1 class.error
## 0 636 150   0.1908397
## 1 209 428   0.3281005
model2 = randomForest(taisyo ~ ., data = train)
model2
## 
## Call:
##  randomForest(formula = taisyo ~ ., data = train) 
##                Type of random forest: classification
##                      Number of trees: 500
## No. of variables tried at each split: 3
## 
##         OOB estimate of  error rate: 25.33%
## Confusion matrix:
##     0  1 class.error
## 0 126 35   0.2173913
## 1  41 98   0.2949640

#参考資料ではimportance(model1)で変数の重みが算出される事になっているが、実際にはmodel1$importancedでないと 算出できない。

model1$importance
##               MeanDecreaseGini
## slope                107.35312
## uedokaburi           124.59860
## masuhonsuu            41.84487
## long                 115.46781
## kubun                 11.00491
## did                   17.17032
## kouhou                13.41964
## nendo                 58.30324
## ekijyouka             26.69795
## kyouyounensuu         58.90142
## kansyu                19.84494
## kei                   57.57520
model2$importance
##               MeanDecreaseGini
## slope                22.195808
## uedokaburi           25.090792
## masuhonsuu           10.324959
## long                 27.590553
## kubun                 2.077771
## did                   4.401796
## kouhou                2.664155
## nendo                11.531594
## ekijyouka             9.856569
## kyouyounensuu        11.554479
## kansyu                4.244605
## kei                  12.094211
varImpPlot(model1)

varImpPlot(model2)

ランダムフォレストチューニング(データ検証)

http://sfchaos.hatenablog.com/entry/20150628/p1

#注1:set.seed(123)乱数発生 ttps://qiita.com/aich_08_/items/6d885c91c9d461514018

まずは単純にtuneRF関数を実行してみる まずは特別な設定を行わずにtuneRF関数を実行してみよう.tuneRF関数の第1引数には説明変数,第2引数には目的変数を指定する.また,doBest引数をTRUEに指定すると,評価が最も良いモデルを返すようになる.

dim(gesui)
## [1] 1423   13
sapply(gesui, class)
##         slope    uedokaburi    masuhonsuu          long         kubun 
##     "numeric"     "numeric"     "numeric"     "numeric"      "factor" 
##           did        kouhou         nendo     ekijyouka kyouyounensuu 
##      "factor"     "numeric"     "numeric"      "factor"     "numeric" 
##        kansyu           kei        taisyo 
##      "factor"     "numeric"      "factor"
head(gesui)
##   slope uedokaburi masuhonsuu  long kubun did kouhou nendo ekijyouka
## 1  0.00   3.758000          0  9.94     1   1      1  1983         1
## 2  3.90   3.763000          0 15.40     2   1      0  1976         1
## 3  1.32   3.538794          1 14.85     2   1      0  1976         1
## 4  1.22   1.054575          1  3.39     2   1      0  2004         1
## 5  2.50   1.533001          0  7.78     2   1      0  1988         0
## 6  3.50   4.122386          0  9.75     1   1      1  1976         1
##   kyouyounensuu kansyu  kei taisyo
## 1            33      1 1100      0
## 2            40      1  800      1
## 3            40      1  250      1
## 4            12      2  200      0
## 5            28      2  250      1
## 6            40      1 1100      0
set.seed(123)#注1
gesui.tune <- tuneRF(gesui %>% select(-taisyo) ,# 説明変数
     gesui$taisyo,  # 目的変数
  doBest = T)  #分岐に使う変数の数(mtry)を求めるフラグ
## mtry = 3  OOB error = 26.84% 
## Searching left ...
## mtry = 2     OOB error = 25.02% 
## 0.06806283 0.05 
## mtry = 1     OOB error = 30.01% 
## -0.1994382 0.05 
## Searching right ...
## mtry = 6     OOB error = 28.11% 
## -0.1235955 0.05

この結果,特徴量の個数が

3個のときに,Out-of-Bag誤差(OOB error)は7.11% 6個のときに,Out-of-Bag誤差は6.698%、 2個のときに,Out-of-Bag誤差は6.28%、 1個のときに,Out-of-Bag誤差は5.868%、

となり,特徴量の個数が3個のときにOut-of-Bag誤差が最少となり, この個数に設定するのが良さそうであることがわかる*1

構築する決定木の個数を増やしてみる ntreeTry引数はデフォルトでは50となっており,50個の決定木を構築することがわかる.500個の決定木を構築するように指定してみよう.

set.seed(123)#注1
gesui.tune <- tuneRF(gesui %>% select(-taisyo) ,# 説明変数
  gesui$taisyo,  # 目的変数
  ntreeTry=500, #決定木数
   trace = TRUE, 
  doBest = T)
## mtry = 3  OOB error = 26.21% 
## Searching left ...
## mtry = 2     OOB error = 26.21% 
## 0 0.05 
## Searching right ...
## mtry = 6     OOB error = 25.65% 
## 0.02144772 0.05

3個のときに,Out-of-Bag誤差(OOB error)が最大であることは変わらない

チューニングで求めたmtry(tuneRF()結果を、オブジェクトの$mtryに入っています)はこの関数の引数に代入します。

gesui.rf <- randomForest(  # 予測、分類器の構築
  taisyo ~ ., # モデル式
  data = gesui,  # データ
  mtry = gesui.tune$mtry)  # 分岐に使う変数の数
gesui.rf
## 
## Call:
##  randomForest(formula = taisyo ~ ., data = gesui, mtry = gesui.tune$mtry) 
##                Type of random forest: classification
##                      Number of trees: 500
## No. of variables tried at each split: 6
## 
##         OOB estimate of  error rate: 26.56%
## Confusion matrix:
##     0   1 class.error
## 0 615 171   0.2175573
## 1 207 430   0.3249608
x=gesui.rf$importance

出力結果の読み方 OOB estimate of error rate:誤判別率 Confusion matrix:縦軸が予測数、横軸が実際の数。下の例では”0”(緊急度3以下)と478個予測したうち、実際に”0”だったものが450個、“1”だったものが28個と読み取れます。

#重要度順のグラフを出力

rank <- data.frame(x)  # 重要度のリストをデータフレームに変換
rank$factor <- rownames(rank)  # 行名になっている要因をデータフレームに追加
rank <- rank[order(rank[,1], decreasing=T),]  # 重要度(偏回帰係数的なもの)順に並び替え
rownames(rank) <- 1:nrow(rank)  # ランキングを行名にする
rank
##    MeanDecreaseGini        factor
## 1        148.152131    uedokaburi
## 2        136.057274          long
## 3        119.694564         slope
## 4         59.052743           kei
## 5         55.132503         nendo
## 6         53.761260 kyouyounensuu
## 7         43.242571    masuhonsuu
## 8         25.623134     ekijyouka
## 9         22.195448        kansyu
## 10        16.698539           did
## 11        10.761155        kouhou
## 12         9.886133         kubun

重要度順のグラフを出力

varImpPlot(gesui.rf)

#plot(gesui, col=c(2, 3, 4)[gesui$kionkyudo])
plot(gesui, col=c(2, 3)[gesui$taisyo])

緊急度の判定

-下水道データ読み込み# 基本統計量表示 gesui # 教科書ではlogit

gesui = read_csv("gesuidou.csv")
## Parsed with column specification:
## cols(
##   OBJECTID = col_double(),
##   slope = col_double(),
##   long = col_double(),
##   uedokaburi = col_double(),
##   sitadokaburi = col_double(),
##   masuhonsuu = col_double(),
##   nendo = col_double(),
##   kei = col_double(),
##   kubun = col_double(),
##   did = col_double(),
##   kouhou = col_double(),
##   ekijyouka = col_double(),
##   kansyu = col_double(),
##   kinkyuudo = col_double(),
##   taisyo = col_double()
## )
gesui<- data.frame(gesui) # 教科書ではlogit

gesui$kansyu <- as.factor(gesui$kansyu)
gesui$taisyo <- as.factor(gesui$taisyo)
gesui$kubun <- as.factor(gesui$kubun)
gesui$did <- as.factor(gesui$did)
gesui$ekijyouka <- as.factor(gesui$ekijyouka)
#gesui <- gesui[-1] #OBJECTID列をデータから削除
exclude_cols = c("OBJECTID","sys_name")
gesui = gesui[ !names(gesui) %in% exclude_cols ]
set.seed(123)#注1
gesui.tune <- tuneRF(gesui %>% select(-kinkyuudo) ,# 説明変数
  gesui$kinkyuudo,  # 目的変数
  ntreeTry=500, #決定木数
   trace = TRUE, 
  doBest = T)
## Warning in randomForest.default(x, y, mtry = mtryStart, ntree = ntreeTry, :
## The response has five or fewer unique values. Are you sure you want to do
## regression?
## mtry = 4  OOB error = 1.480305 
## Searching left ...
## Warning in randomForest.default(x, y, mtry = mtryCur, ntree = ntreeTry, :
## The response has five or fewer unique values. Are you sure you want to do
## regression?
## mtry = 2     OOB error = 1.486863 
## -0.004430167 0.05 
## Searching right ...
## Warning in randomForest.default(x, y, mtry = mtryCur, ntree = ntreeTry, :
## The response has five or fewer unique values. Are you sure you want to do
## regression?
## mtry = 8     OOB error = 1.519686 
## -0.02660376 0.05
## Warning in randomForest.default(x, y, mtry = res[which.min(res[, 2]), 1], :
## The response has five or fewer unique values. Are you sure you want to do
## regression?

3個のときに,Out-of-Bag誤差(OOB error)が最大であることは変わらない

チューニングで求めたmtry(tuneRF()結果を、オブジェクトの$mtryに入っています)はこの関数の引数に代入します。

gesui.rf <- randomForest(  # 予測、分類器の構築
  kinkyuudo ~ ., # モデル式
  data = gesui,  # データ
  mtry = gesui.tune$mtry)  # 分岐に使う変数の数
## Warning in randomForest.default(m, y, ...): The response has five or fewer
## unique values. Are you sure you want to do regression?
gesui.rf
## 
## Call:
##  randomForest(formula = kinkyuudo ~ ., data = gesui, mtry = gesui.tune$mtry) 
##                Type of random forest: regression
##                      Number of trees: 500
## No. of variables tried at each split: 4
## 
##           Mean of squared residuals: 1.476213
##                     % Var explained: 30.78
x=gesui.rf$importance

出力結果の読み方 OOB estimate of error rate:誤判別率 Confusion matrix:縦軸が予測数、横軸が実際の数。 上の例では正解率69.04% ”0”(緊急度3以下)と218個予測したうち、実際に”0”だったものが162個、“2”だったものが2個、“3”だったものが54と読み取れます。

#重要度順のグラフを出力

rank <- data.frame(x)  # 重要度のリストをデータフレームに変換
rank$factor <- rownames(rank)  # 行名になっている要因をデータフレームに追加
rank <- rank[order(rank[,1], decreasing=T),]  # 重要度(偏回帰係数的なもの)順に並び替え
rownames(rank) <- 1:nrow(rank)  # ランキングを行名にする
rank
##    IncNodePurity       factor
## 1     137.118695         long
## 2     132.486026 sitadokaburi
## 3     130.789376        slope
## 4     124.661796   uedokaburi
## 5     116.354171       kansyu
## 6      69.429390        nendo
## 7      62.659995          kei
## 8      47.334756   masuhonsuu
## 9      38.187225    ekijyouka
## 10     19.239761          did
## 11     14.344117       kouhou
## 12      9.550413       taisyo
## 13      5.626238        kubun

重要度順のグラフを出力

varImpPlot(gesui.rf)

plot(gesui, col=c(2, 3, 4)[gesui$kionkyudo])

塩ビ対処の判定本番データ

-下水道データ読み込み# 基本統計量表示 gesui # 教科書ではlogit

#gesui = read_csv("osui2.csv")
gesui = read_csv("enbi.csv")
## Parsed with column specification:
## cols(
##   OBJECTID = col_double(),
##   sys_name = col_double(),
##   slope = col_double(),
##   uedokaburi = col_double(),
##   masuhonsuu = col_double(),
##   long = col_double(),
##   kubun = col_double(),
##   did = col_double(),
##   kouhou = col_double(),
##   nendo = col_double(),
##   ekijyouka = col_double(),
##   kyouyounensuu = col_double(),
##   kansyu = col_double(),
##   kei = col_double(),
##   kinkyuudo = col_double(),
##   taisyo = col_double()
## )
gesui <- data.frame(gesui) # 教科書ではlogit
#testデータの行番号取得
#randomgesui<-sample(282,200)
#train <- gesui[randomgesui,]
#test <-gesui[-randomgesui,]
#cat(test$sys_name, file = "testrow.txt",append=FALSE)
#write.table(test,"testoutput.txt", quote=F, 
#             col.names=T, append=T)

gesui <- gesui[-1:-2] #OBJECTID,sys_name列をデータから削除
gesui <- gesui[-13]
gesui <- gesui[-8]
gesui <- gesui[-10]

塩ビ管データの基本統計量

stargazer(as.data.frame(gesui),type = "html")
Statistic N Mean St. Dev. Min Pctl(25) Pctl(75) Max
slope 282 3.309 2.017 0.000 1.900 4.100 9.900
uedokaburi 282 4.218 2.570 1.009 2.462 5.397 13.385
masuhonsuu 282 1.284 1.765 0 0 2 11
long 282 31.300 15.309 0.970 21.325 40.492 96.820
kubun 282 1.209 0.407 1 1 1 2
did 282 0.766 0.424 0 1 1 1
kouhou 282 0.337 0.473 0 0 1 1
ekijyouka 282 0.202 0.402 0 0 0 1
kyouyounensuu 282 27.514 5.204 10 25 27 40
kei 282 390.248 162.287 200 250 600 900
taisyo 282 0.312 0.464 0 0 1 1

塩ビ管データのカテゴリー変数の指定

gesui$taisyo <- as.factor(gesui$taisyo)
#gesui$kansyu <- as.factor(gesui$kansyu)

gesui$kubun <- as.factor(gesui$kubun)
gesui$did <- as.factor(gesui$did)
gesui$ekijyouka <- as.factor(gesui$ekijyouka)
#gesui$kinkyuudo <- as.factor(gesui$kinkyuudo)

sapply(gesui, class)
##         slope    uedokaburi    masuhonsuu          long         kubun 
##     "numeric"     "numeric"     "numeric"     "numeric"      "factor" 
##           did        kouhou     ekijyouka kyouyounensuu           kei 
##      "factor"     "numeric"      "factor"     "numeric"     "numeric" 
##        taisyo 
##      "factor"
summary(gesui)
##      slope         uedokaburi       masuhonsuu          long       kubun  
##  Min.   :0.000   Min.   : 1.009   Min.   : 0.000   Min.   : 0.97   1:223  
##  1st Qu.:1.900   1st Qu.: 2.462   1st Qu.: 0.000   1st Qu.:21.32   2: 59  
##  Median :2.685   Median : 3.402   Median : 1.000   Median :30.06          
##  Mean   :3.309   Mean   : 4.218   Mean   : 1.284   Mean   :31.30          
##  3rd Qu.:4.100   3rd Qu.: 5.397   3rd Qu.: 2.000   3rd Qu.:40.49          
##  Max.   :9.900   Max.   :13.385   Max.   :11.000   Max.   :96.82          
##  did         kouhou       ekijyouka kyouyounensuu        kei        taisyo 
##  0: 66   Min.   :0.0000   0:225     Min.   :10.00   Min.   :200.0   0:194  
##  1:216   1st Qu.:0.0000   1: 57     1st Qu.:25.00   1st Qu.:250.0   1: 88  
##          Median :0.0000             Median :25.00   Median :250.0          
##          Mean   :0.3369             Mean   :27.51   Mean   :390.2          
##          3rd Qu.:1.0000             3rd Qu.:27.00   3rd Qu.:600.0          
##          Max.   :1.0000             Max.   :40.00   Max.   :900.0

学習用データとテストデータの区分化

randomgesui<-sample(282,200)
train <- gesui[randomgesui,]
test <-gesui[-randomgesui,]
gesui <- train
mean(train$taisyo == 1)#trainh平均値
## [1] 0.325
mean(test$taisyo == 1)#test平均値
## [1] 0.2804878

3/30train関数を使用した変数削減による精度検証

https://shohei-doi.github.io/notes/posts/2019-05-27-cross-validation/

#変数を選択しない場合
vote_rf2 <- train(
  taisyo ~ .,
  data = train,
  method = "rf"
)

train_rf2 <- confusionMatrix(predict(vote_rf2, train), train$taisyo)
test_rf2 <- confusionMatrix(predict(vote_rf2, test), test$taisyo)
print(str_c("Accuracy (train):", train_rf2$overall[1]))
## [1] "Accuracy (train):0.995"
print(str_c("Accuracy (test) :", test_rf2$overall[1]))
## [1] "Accuracy (test) :0.853658536585366"

#変数を選択した場合(3つ除去-kouhou, -did, -kubun)

#変数を選択した場合(3つ除去-kouhou, -did, -kubun)
vote_rf2 <- train(
  taisyo ~ .,
  data = train%>% 
    select(-kouhou, -did, -kubun),
  method = "rf",
  importance = TRUE
)

train_rf2 <- confusionMatrix(predict(vote_rf2, train), train$taisyo)
test_rf2 <- confusionMatrix(predict(vote_rf2, test), test$taisyo)
print(str_c("Accuracy (train):", train_rf2$overall[1]))
## [1] "Accuracy (train):1"
print(str_c("Accuracy (test) :", test_rf2$overall[1]))
## [1] "Accuracy (test) :0.878048780487805"

若干ではあるものの、予測精度が改善しました。 0.853→0.8780 ポイントとしては、 特徴量は多ければいいというわけではないということ、 ドメイン知識に基づく特徴選択が有効な方法であるということです。

#変数を選択した場合(4つ除去-kouhou, -did, -kubun, -masuhonsuu)

#変数を選択した場合(3つ除去-kouhou, -did, -kubun)
vote_rf2 <- train(
  taisyo ~ .,
  data = train%>% 
    select(-kouhou, -did, -kubun, -masuhonsuu),
  method = "rf"
)

train_rf2 <- confusionMatrix(predict(vote_rf2, train), train$taisyo)
test_rf2 <- confusionMatrix(predict(vote_rf2, test), test$taisyo)
print(str_c("Accuracy (train):", train_rf2$overall[1]))
## [1] "Accuracy (train):1"
print(str_c("Accuracy (test) :", test_rf2$overall[1]))
## [1] "Accuracy (test) :0.890243902439024"

さらに精度は向上した。0.8780→0.89

変数を選択した場合(5つ除去-kouhou, -did, -kubun, -masuhonsuu, -ekijyouka)

#変数を選択した場合(5つ除去-kouhou, -did, -kubun, -masuhonsuu, -ekijyouka)
vote_rf2 <- train(
  taisyo ~ .,
  data = train%>% 
    select(-kouhou, -did, -kubun, -masuhonsuu, -ekijyouka),
  method = "rf",
  importance = TRUE
)

train_rf2 <- confusionMatrix(predict(vote_rf2, train), train$taisyo)
test_rf2 <- confusionMatrix(predict(vote_rf2, test), test$taisyo)
print(str_c("Accuracy (train):", train_rf2$overall[1]))
## [1] "Accuracy (train):1"
print(str_c("Accuracy (test) :", test_rf2$overall[1]))
## [1] "Accuracy (test) :0.890243902439024"

さらに精度が向上した。0.89→0.90

4/1この結果を受けて再推定した。

gesui.rf2 <- randomForest(  # 予測、分類器の構築
taisyo ~ slope + uedokaburi + masuhonsuu + long + kyouyounensuu + kei, # モデル式
data = train,# データ
                          importance = TRUE) 

predrandam = predict(gesui.rf2, test)
predrandam
##   6  10  12  15  17  22  24  26  27  29  32  40  41  42  44  48  49  55  57  63 
##   0   0   0   0   0   0   0   0   1   0   0   0   0   0   0   0   0   0   1   1 
##  64  67  72  76  78  81  86  89  90  92  93  96  97 100 105 107 109 110 114 116 
##   1   1   0   0   0   0   0   0   0   0   0   0   1   0   0   0   0   0   0   0 
## 119 120 122 123 127 135 138 149 156 159 163 166 168 169 170 189 196 203 205 206 
##   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   1   1   1   1   1 
## 213 214 218 224 231 234 235 236 240 241 243 247 250 251 252 256 263 265 267 268 
##   1   1   1   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0 
## 269 279 
##   0   0 
## Levels: 0 1
table(predrandam,test$taisyo)
##           
## predrandam  0  1
##          0 59  9
##          1  0 14
x=gesui.rf2$importance
varImpPlot(gesui.rf2)

plot(test$taisyo, predrandam, main = gesui.rf2$call)
curve(identity, add = TRUE)

gesui.rf2
## 
## Call:
##  randomForest(formula = taisyo ~ slope + uedokaburi + masuhonsuu +      long + kyouyounensuu + kei, data = train, importance = TRUE) 
##                Type of random forest: classification
##                      Number of trees: 500
## No. of variables tried at each split: 2
## 
##         OOB estimate of  error rate: 22.5%
## Confusion matrix:
##     0  1 class.error
## 0 123 12  0.08888889
## 1  33 32  0.50769231
rank <- data.frame(x)  # 重要度のリストをデータフレームに変換
rank$factor <- rownames(rank)  # 行名になっている要因をデータフレームに追加
rank <- rank[order(rank[,1], decreasing=T),]  # 重要度(偏回帰係数的なもの)順に並び替え
rownames(rank) <- 1:nrow(rank)  # ランキングを行名にする
rank
##             X0          X1 MeanDecreaseAccuracy MeanDecreaseGini        factor
## 1  0.024067461 0.106046130          0.050622981        14.078434 kyouyounensuu
## 2  0.021488290 0.020185351          0.020619146        17.643598         slope
## 3  0.012559467 0.077368179          0.033360676        21.458246          long
## 4  0.009582111 0.043772279          0.020798958        20.675098    uedokaburi
## 5 -0.004562522 0.069481756          0.019409760         5.703110           kei
## 6 -0.005555701 0.003690695         -0.002448653         5.723344    masuhonsuu

4/2さらにこの結果に対してパラメータチューニングを行う

http://d-m-l.jp/Rbiz/task_rf.html

dim(gesui)
## [1] 200  11
sapply(gesui, class)
##         slope    uedokaburi    masuhonsuu          long         kubun 
##     "numeric"     "numeric"     "numeric"     "numeric"      "factor" 
##           did        kouhou     ekijyouka kyouyounensuu           kei 
##      "factor"     "numeric"      "factor"     "numeric"     "numeric" 
##        taisyo 
##      "factor"
head(gesui)
##     slope uedokaburi masuhonsuu  long kubun did kouhou ekijyouka kyouyounensuu
## 108  9.60   2.481001          3 39.08     2   1      0         0            26
## 115  4.12   4.084854          7 51.90     2   1      1         0            24
## 223  1.34   3.484858          4 20.20     1   1      0         0            25
## 65   3.60   1.572562          1 24.52     2   1      0         0            37
## 28   1.72   2.119883          0 17.00     1   1      0         0            40
## 79   1.82   1.366309          0 23.59     2   1      0         1            24
##     kei taisyo
## 108 250      0
## 115 250      0
## 223 250      0
## 65  250      1
## 28  250      1
## 79  250      0
set.seed(123)#注1
gesui.tune <- tuneRF(gesui %>% select(-kouhou, -did, -kubun, -masuhonsuu, -ekijyouka,-taisyo) ,# 説明変数
     gesui$taisyo,  # 目的変数
  doBest = T)  #分岐に使う変数の数(mtry)を求めるフラグ
## mtry = 2  OOB error = 24% 
## Searching left ...
## mtry = 1     OOB error = 23% 
## 0.04166667 0.05 
## Searching right ...
## mtry = 4     OOB error = 27% 
## -0.125 0.05

set.seed(123)#注1
gesui.tune <- tuneRF(gesui %>% select(-kouhou, -did, -kubun, -masuhonsuu, -ekijyouka,-taisyo) ,# 説明変数
gesui$taisyo,  # 目的変数
ntreeTry=2500, #決定木数
trace = TRUE, 
doBest = T)
## mtry = 2  OOB error = 22% 
## Searching left ...
## mtry = 1     OOB error = 22% 
## 0 0.05 
## Searching right ...
## mtry = 4     OOB error = 25.5% 
## -0.1590909 0.05

gesui.rf2 <- randomForest(  # 予測、分類器の構築
taisyo ~ slope + uedokaburi + masuhonsuu + long + kyouyounensuu + kei, 
data = gesui,  # データ
# 3/30 データから5変数を除去
mtry = gesui.tune$mtry,importance=T)
# 3/26 importance=TRUEにしないとimportance関数は使えない事が分かった。
#分岐に使う変数の数
gesui.tune$mtry
## [1] 1
gesui.rf2
## 
## Call:
##  randomForest(formula = taisyo ~ slope + uedokaburi + masuhonsuu +      long + kyouyounensuu + kei, data = gesui, mtry = gesui.tune$mtry,      importance = T) 
##                Type of random forest: classification
##                      Number of trees: 500
## No. of variables tried at each split: 1
## 
##         OOB estimate of  error rate: 24.5%
## Confusion matrix:
##     0  1 class.error
## 0 128  7  0.05185185
## 1  42 23  0.64615385

パラメータチューニング後の推定結果:結果としてパラメータチューニングを行っても精度は向上しないことが分かった。

predrandam = predict(gesui.rf2, test)
predrandam
##   6  10  12  15  17  22  24  26  27  29  32  40  41  42  44  48  49  55  57  63 
##   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   1   1 
##  64  67  72  76  78  81  86  89  90  92  93  96  97 100 105 107 109 110 114 116 
##   1   1   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0 
## 119 120 122 123 127 135 138 149 156 159 163 166 168 169 170 189 196 203 205 206 
##   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   1   1   1   1   1 
## 213 214 218 224 231 234 235 236 240 241 243 247 250 251 252 256 263 265 267 268 
##   1   1   0   0   0   0   0   0   0   0   0   0   0   0   0   1   0   0   0   0 
## 269 279 
##   0   0 
## Levels: 0 1
summary(predrandam)
##  0  1 
## 70 12
table(predrandam,test$taisyo)
##           
## predrandam  0  1
##          0 58 12
##          1  1 11

4/1 正則化

3/30 次元削減・特徴抽出(主成分分析+ランダムフォレスト分析)

https://shohei-doi.github.io/notes/posts/2019-05-27-cross-validation/

pca <- preProcess(train, method = "pca", pcaComp = 7)
#pca <- preProcess(train, method = "pca", pcaComp = 10)
data_train_pca <- predict(pca, train)
data_test_pca <- predict(pca, test)

data_train_pca %>% 
  head()
##     kubun did ekijyouka taisyo         PC1        PC2         PC3        PC4
## 108     2   1         0      0 -1.61025549  2.6710723 -1.11310066  0.8926806
## 115     2   1         0      0 -0.03237762  2.9936539  1.45453114 -0.2513616
## 223     1   1         0      0 -1.37111328  0.2167997  0.63361498 -1.3716400
## 65      2   1         0      1 -1.86491182 -0.8519132  0.38165861  1.1834120
## 28      1   1         0      1 -1.78941805 -2.1148545  0.52738491  1.2271106
## 79      2   1         1      0 -1.06831348 -0.9199812 -0.08940499 -1.1984562
##            PC5          PC6         PC7
## 108 -0.9232326 -0.173504685 -0.06050638
## 115  1.2525465 -0.041831585  1.57339022
## 223  1.0966349  0.113140803  0.23398759
## 65  -0.2687406  0.006696695  0.08106040
## 28   0.1120783 -0.034854195  0.03031515
## 79  -0.8471442 -0.350753948  0.37001943
vote_rf3 <- train(
  taisyo ~ .,
  data = data_train_pca,
  method = "rf"
)

10個の主成分を用いてランダムフォレストで学習をします。

train_rf3 <- confusionMatrix(predict(vote_rf3, data_train_pca), data_train_pca$taisyo)
test_rf3 <- confusionMatrix(predict(vote_rf3, data_test_pca), data_test_pca$taisyo)
print(str_c("Accuracy (train):", train_rf3$overall[1]))
## [1] "Accuracy (train):1"
print(str_c("Accuracy (test):", test_rf3$overall[1]))
## [1] "Accuracy (test):0.841463414634146"

sapply(gesui, class)

summary(gesui) train <- gesui

塩ビ管の基本統計データ

stargazer(as.data.frame(gesui),type = "html")
Statistic N Mean St. Dev. Min Pctl(25) Pctl(75) Max
slope 200 3.296 2.032 0.000 1.900 4.200 9.700
uedokaburi 200 4.121 2.418 1.055 2.476 5.261 13.385
masuhonsuu 200 1.270 1.781 0 0 2 11
long 200 31.977 15.610 0.970 21.745 41.785 96.820
kouhou 200 0.355 0.480 0 0 1 1
kyouyounensuu 200 27.615 5.313 10 25 27 40
kei 200 394.500 164.025 200 250 600 900

学習データの基本統計データ

stargazer(as.data.frame(train),type = "html")
Statistic N Mean St. Dev. Min Pctl(25) Pctl(75) Max
slope 200 3.296 2.032 0.000 1.900 4.200 9.700
uedokaburi 200 4.121 2.418 1.055 2.476 5.261 13.385
masuhonsuu 200 1.270 1.781 0 0 2 11
long 200 31.977 15.610 0.970 21.745 41.785 96.820
kouhou 200 0.355 0.480 0 0 1 1
kyouyounensuu 200 27.615 5.313 10 25 27 40
kei 200 394.500 164.025 200 250 600 900

推定データの基本統計データ

stargazer(as.data.frame(test),type = "html")
Statistic N Mean St. Dev. Min Pctl(25) Pctl(75) Max
slope 82 3.340 1.991 0.000 1.896 3.968 9.900
uedokaburi 82 4.454 2.910 1.009 2.455 5.852 12.289
masuhonsuu 82 1.317 1.735 0 0 2 10
long 82 29.647 14.509 2.710 19.992 39.252 65.070
kouhou 82 0.293 0.458 0 0 1 1
kyouyounensuu 82 27.268 4.952 17 25 27 40
kei 82 379.878 158.476 200 250 600 600

塩ビ管の異常判定結果

#model = randomForest(taisyo ~ ., data = gesui)
model = randomForest(taisyo ~ ., data = train,
                          importance = TRUE)
#model = randomForest(kinkyuudo ~ ., data = gesui)
model
## 
## Call:
##  randomForest(formula = taisyo ~ ., data = train, importance = TRUE) 
##                Type of random forest: classification
##                      Number of trees: 500
## No. of variables tried at each split: 3
## 
##         OOB estimate of  error rate: 23.5%
## Confusion matrix:
##     0  1 class.error
## 0 123 12  0.08888889
## 1  35 30  0.53846154
#predition = predict(model, gesui)
predition = predict(model, test)
predition
##   6  10  12  15  17  22  24  26  27  29  32  40  41  42  44  48  49  55  57  63 
##   0   0   0   0   0   0   0   0   1   0   0   0   0   0   0   0   0   0   1   1 
##  64  67  72  76  78  81  86  89  90  92  93  96  97 100 105 107 109 110 114 116 
##   1   1   0   0   0   0   0   0   0   0   0   0   0   0   1   0   0   0   0   0 
## 119 120 122 123 127 135 138 149 156 159 163 166 168 169 170 189 196 203 205 206 
##   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   1   1   1   1   1 
## 213 214 218 224 231 234 235 236 240 241 243 247 250 251 252 256 263 265 267 268 
##   1   1   1   0   0   0   0   0   1   0   0   0   0   0   0   0   0   0   0   0 
## 269 279 
##   0   0 
## Levels: 0 1
summary(predition)
##  0  1 
## 67 15

予測結果と実測値の対比

table(predition,test$taisyo)
##          
## predition  0  1
##         0 57 10
##         1  2 13
#sapply(gesui, class)
#summary(gesui)

モデルの変数重要度

model$importance
##                          0           1 MeanDecreaseAccuracy MeanDecreaseGini
## slope          0.018392319 0.014010535         0.0171466521        15.253607
## uedokaburi     0.035383360 0.047315544         0.0389753491        18.506697
## masuhonsuu    -0.001589300 0.004792608         0.0003818136         5.293078
## long           0.019854609 0.075646063         0.0375333633        18.742660
## kubun          0.006843189 0.006866738         0.0068690359         1.839719
## did           -0.001654820 0.020337061         0.0052698574         1.928438
## kouhou         0.010161804 0.003396517         0.0080208726         1.565702
## ekijyouka      0.012326535 0.001307921         0.0086425680         2.795039
## kyouyounensuu  0.020981539 0.091373828         0.0435957948        12.866993
## kei            0.000539189 0.062357504         0.0204448360         5.223490
varImpPlot(model)

dim(gesui)
## [1] 200  11
sapply(gesui, class)
##         slope    uedokaburi    masuhonsuu          long         kubun 
##     "numeric"     "numeric"     "numeric"     "numeric"      "factor" 
##           did        kouhou     ekijyouka kyouyounensuu           kei 
##      "factor"     "numeric"      "factor"     "numeric"     "numeric" 
##        taisyo 
##      "factor"
head(gesui)
##     slope uedokaburi masuhonsuu  long kubun did kouhou ekijyouka kyouyounensuu
## 108  9.60   2.481001          3 39.08     2   1      0         0            26
## 115  4.12   4.084854          7 51.90     2   1      1         0            24
## 223  1.34   3.484858          4 20.20     1   1      0         0            25
## 65   3.60   1.572562          1 24.52     2   1      0         0            37
## 28   1.72   2.119883          0 17.00     1   1      0         0            40
## 79   1.82   1.366309          0 23.59     2   1      0         1            24
##     kei taisyo
## 108 250      0
## 115 250      0
## 223 250      0
## 65  250      1
## 28  250      1
## 79  250      0
set.seed(123)#注1

#gesui.tune <- tuneRF(gesui %>% select(-kinkyuudo) ,# 説明変数
#    gesui$kinkyuudo,  # 目的変数

gesui.tune <- tuneRF(gesui %>% select(-taisyo) ,# 説明変数
     gesui$taisyo,  # 目的変数
  doBest = T)  #分岐に使う変数の数(mtry)を求めるフラグ
## mtry = 3  OOB error = 23% 
## Searching left ...
## mtry = 2     OOB error = 23% 
## 0 0.05 
## Searching right ...
## mtry = 6     OOB error = 24% 
## -0.04347826 0.05

set.seed(123)#注1 #gesui.tune <- tuneRF(gesui %>% select(-kinkyuudo) ,# 説明変数 # gesui\(kinkyuudo, # 目的変数 gesui.tune <- tuneRF(gesui %>% select(-taisyo),# 説明変数 gesui\)taisyo,# 目的変数 doBest = T)#分岐に使う変数の数(mtry)を求めるフラグ

この結果,特徴量の個数が 3個以上のときに,Out-of-Bag誤差(OOB error)は3.99% 2個のときに,Out-of-Bag誤差は4.57%、 1個のときに,Out-of-Bag誤差は4.36%、

となり,特徴量の個数が3個のときにOut-of-Bag誤差が最少となり, この個数に設定するのが良さそうであることがわかる*1

構築する決定木の個数を増やしてみる ntreeTry引数はデフォルトでは50となっており,50個の決定木を構築することがわかる.1500個の決定木を構築するように指定してみよう.

gesui <- train

train <- gesui[-kouhou] gesui <- train test <- test[-kouhou]

stargazer(as.data.frame(train),type = "html")
Statistic N Mean St. Dev. Min Pctl(25) Pctl(75) Max
slope 200 3.296 2.032 0.000 1.900 4.200 9.700
uedokaburi 200 4.121 2.418 1.055 2.476 5.261 13.385
masuhonsuu 200 1.270 1.781 0 0 2 11
long 200 31.977 15.610 0.970 21.745 41.785 96.820
kouhou 200 0.355 0.480 0 0 1 1
kyouyounensuu 200 27.615 5.313 10 25 27 40
kei 200 394.500 164.025 200 250 600 900
stargazer(as.data.frame(test),type = "html")
Statistic N Mean St. Dev. Min Pctl(25) Pctl(75) Max
slope 82 3.340 1.991 0.000 1.896 3.968 9.900
uedokaburi 82 4.454 2.910 1.009 2.455 5.852 12.289
masuhonsuu 82 1.317 1.735 0 0 2 10
long 82 29.647 14.509 2.710 19.992 39.252 65.070
kouhou 82 0.293 0.458 0 0 1 1
kyouyounensuu 82 27.268 4.952 17 25 27 40
kei 82 379.878 158.476 200 250 600 600
set.seed(123)#注1
#gesui.tune <- tuneRF(gesui %>% select(-kinkyuudo) ,# 説明変数
#  gesui$kinkyuudo,  # 目的変数
gesui.tune <- tuneRF(gesui %>% select(-taisyo) ,# 説明変数
gesui$taisyo,  # 目的変数
ntreeTry=2500, #決定木数
trace = TRUE, 
doBest = T)
## mtry = 3  OOB error = 22.5% 
## Searching left ...
## mtry = 2     OOB error = 20.5% 
## 0.08888889 0.05 
## mtry = 1     OOB error = 23.5% 
## -0.1463415 0.05 
## Searching right ...
## mtry = 6     OOB error = 24% 
## -0.1707317 0.05

6個のときに,Out-of-Bag誤差(OOB error)が最大となり、5.06%となっている。

チューニングで求めたmtry(tuneRF()結果を、オブジェクトの$mtryに入っています)はこの関数の引数に代入します。

推定式

3/26 importance =TRUEにしないとimportance関数は使えない事が分かった。

gesui.rf2 <- randomForest(  # 予測、分類器の構築
#  kinkyuudo ~ ., # モデル式
taisyo ~ ., # モデル式
data = gesui,  # データ
  # 3/30 データから5変数を除去
mtry = gesui.tune$mtry,importance=T)  # 3/26 importance =TRUEにしないとimportance関数は使えない事が分かった。
#分岐に使う変数の数
gesui.rf2
## 
## Call:
##  randomForest(formula = taisyo ~ ., data = gesui, mtry = gesui.tune$mtry,      importance = T) 
##                Type of random forest: classification
##                      Number of trees: 500
## No. of variables tried at each split: 2
## 
##         OOB estimate of  error rate: 21%
## Confusion matrix:
##     0  1 class.error
## 0 128  7  0.05185185
## 1  35 30  0.53846154

パラメータチューニング後の推定結果

predrandam = predict(gesui.rf2, test)
predrandam
##   6  10  12  15  17  22  24  26  27  29  32  40  41  42  44  48  49  55  57  63 
##   0   0   0   0   0   0   0   1   1   0   0   0   0   0   0   0   0   0   1   1 
##  64  67  72  76  78  81  86  89  90  92  93  96  97 100 105 107 109 110 114 116 
##   1   1   0   0   0   0   0   0   0   0   0   0   0   0   1   0   0   0   0   0 
## 119 120 122 123 127 135 138 149 156 159 163 166 168 169 170 189 196 203 205 206 
##   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   1   1   1   1   1 
## 213 214 218 224 231 234 235 236 240 241 243 247 250 251 252 256 263 265 267 268 
##   1   1   1   0   0   0   0   0   1   0   0   0   0   0   0   0   0   0   0   0 
## 269 279 
##   0   0 
## Levels: 0 1
summary(predrandam)
##  0  1 
## 66 16
table(predrandam,test$taisyo)
##           
## predrandam  0  1
##          0 57  9
##          1  2 14

,“masuhonsuu”

下水劣化推定変数の重要度

gesui.rf2
## 
## Call:
##  randomForest(formula = taisyo ~ ., data = gesui, mtry = gesui.tune$mtry,      importance = T) 
##                Type of random forest: classification
##                      Number of trees: 500
## No. of variables tried at each split: 2
## 
##         OOB estimate of  error rate: 21%
## Confusion matrix:
##     0  1 class.error
## 0 128  7  0.05185185
## 1  35 30  0.53846154
x=gesui.rf2$importance
x
##                          0           1 MeanDecreaseAccuracy MeanDecreaseGini
## slope          0.019278350 0.006175289         0.0150091372        12.786018
## uedokaburi     0.033746693 0.032532627         0.0332667763        15.548922
## masuhonsuu    -0.004367810 0.007278541        -0.0006865278         5.283975
## long           0.011615214 0.055539961         0.0258221275        14.811650
## kubun          0.010304180 0.003067069         0.0080297285         1.889741
## did           -0.003405668 0.033601746         0.0082865121         2.016948
## kouhou         0.009793807 0.005519660         0.0083875405         1.385116
## ekijyouka      0.010575397 0.002003619         0.0079779208         2.613673
## kyouyounensuu  0.020940650 0.093122078         0.0445696937        12.140770
## kei            0.008438833 0.055213043         0.0231262079         5.200025

http://sfchaos.hatenablog.com/entry/20150628/p1 https://tjo.hatenablog.com/entry/2013/09/02/190449

出力結果の読み方 OOB estimate of error rate:誤判別率 Confusion matrix:縦軸が予測数、横軸が実際の数。下の例では”0”(緊急度3以下)と478個予測したうち、実際に”0”だったものが450個、“1”だったものが28個と読み取れます。

重要度の高い順番に並び替え

rank <- data.frame(x)  # 重要度のリストをデータフレームに変換
rank$factor <- rownames(rank)  # 行名になっている要因をデータフレームに追加
rank <- rank[order(rank[,1], decreasing=T),]  # 重要度(偏回帰係数的なもの)順に並び替え
rownames(rank) <- 1:nrow(rank)  # ランキングを行名にする
rank
##              X0          X1 MeanDecreaseAccuracy MeanDecreaseGini        factor
## 1   0.033746693 0.032532627         0.0332667763        15.548922    uedokaburi
## 2   0.020940650 0.093122078         0.0445696937        12.140770 kyouyounensuu
## 3   0.019278350 0.006175289         0.0150091372        12.786018         slope
## 4   0.011615214 0.055539961         0.0258221275        14.811650          long
## 5   0.010575397 0.002003619         0.0079779208         2.613673     ekijyouka
## 6   0.010304180 0.003067069         0.0080297285         1.889741         kubun
## 7   0.009793807 0.005519660         0.0083875405         1.385116        kouhou
## 8   0.008438833 0.055213043         0.0231262079         5.200025           kei
## 9  -0.003405668 0.033601746         0.0082865121         2.016948           did
## 10 -0.004367810 0.007278541        -0.0006865278         5.283975    masuhonsuu
plot(gesui.rf2)

varImpPlot(gesui.rf2)

参考 https://yolo-kiyoshi.com/2019/09/16/post-1226/ https://aotamasaki.hatenablog.com/entry/bias_in_feature_importances

# 別のサイトでのランダムフォレストによるEDAをRで実践 https://navaclass.com/random-forest-eda/

#set.seed(111)
#ランダムフォレストモデルの学習
#boston.rf <- randomForest(kinkyuudo ~ .,
#boston.rf <- randomForest(taisyo ~ .,                          
#                          data = train,
#                          importance=T)

#テストデータに対する予測
#pred <- predict(boston.rf, newdata = test)

#観測値と予測値をプロット
#plot(test$taisyo, pred, main = boston.rf$call)
#curve(identity, add = TRUE)

#pred = predict(gesui.rf2, test)
pred = predict(gesui.rf2, train)#報告書には使わない
#plot(test$taisyo, pred, main = gesui.rf2$call)
plot(train$taisyo, pred, main = gesui.rf2$call)#報告書には使わない
curve(identity, add = TRUE)

#3/26追加
kekka<-table(pred,train$taisyo)
kekka
##     
## pred   0   1
##    0 134   2
##    1   1  63
pred = predict(gesui.rf2, test)#報告書用
kekka<-table(pred,test$taisyo)
kekka
##     
## pred  0  1
##    0 57  9
##    1  2 14
require(ranger)

#観測値と予測値をプロット

plot(test$taisyo, pred, main = gesui.rf2$call)
curve(identity, add = TRUE)

#予測誤差(RMSE:二乗平均平方根誤差) #予測誤差の推定のため目的変数をニューリックに変換する

#予測誤差(RMSE:二乗平均平方根誤差
rms <- function(act, pred) {
  sqrt(mean((act - pred) ^ 2))
}
cat(" RMSE =", rms(test$taisyo, pred))
## Warning in Ops.factor(act, pred): '-' not meaningful for factors
##  RMSE = NA
#ロジスティック回帰モデルの予測誤差と比較
model_1 <- glm(taisyo ~ slope + uedokaburi + masuhonsuu + long + kyouyounensuu + kei, data = gesui,family = binomial(link = "logit"))

cat(" RMSE = ",
    rms(test$taisyo,
        predict(model_1, newdata = test)))
## Warning in Ops.factor(act, pred): '-' not meaningful for factors
##  RMSE =  NA

https://funatsu-lab.github.io/open-course-ware/machine-learning/random-forest/

#require(ranger):ranndamforestの別バージョン

require(ranger)
# モデルの構築
ranger_model <- ranger(
#  formula = as.factor(default.payment.next.month) ~ ., # default.payment.next.monthが目的変数になる
  formula = taisyo ~ ., 
  data = train,
  num.trees = 1000,
  mtry = 5,
  write.forest = TRUE,
  importance = 'impurity',
  probability = TRUE
)

ranger_pred <- predict(ranger_model, data=test) ranger_pred$predictions

#決定木

library(rpart.plot) model = rpart(taisyo ~ ., data = train) model pred = predict(model, test)#報告書用 pred #table(pred,test[,11])

kekka<-table(pred,test$taisyo)
kekka


#SVMによる予測

ビジネスに活かすデータマイニング(尾崎豊)

http://yut.hatenablog.com/entry/20120827/1346024147




```r
stargazer(as.data.frame(gesui),type = "html")
Statistic N Mean St. Dev. Min Pctl(25) Pctl(75) Max
slope 200 3.296 2.032 0.000 1.900 4.200 9.700
uedokaburi 200 4.121 2.418 1.055 2.476 5.261 13.385
masuhonsuu 200 1.270 1.781 0 0 2 11
long 200 31.977 15.610 0.970 21.745 41.785 96.820
kouhou 200 0.355 0.480 0 0 1 1
kyouyounensuu 200 27.615 5.313 10 25 27 40
kei 200 394.500 164.025 200 250 600 900

library(e1071)

予測結果と実測値の対比

d.svm<-svm(taisyo ~ ., data = train) print(d.svm) predsvm<-predict(d.svm,newdata=test) summary(predsvm)

kekka<-table(predsvm,test$taisyo) kekka

ニューラルネットワーク法による推定

library( nnet )

予測式

nn<-nnet(taisyo ~., data=train,size = 2, rang = .1, decay = 5e-4, maxit = 200 )
## # weights:  25
## initial  value 134.122308 
## final  value 126.116395 
## converged
nn
## a 10-2-1 network with 25 weights
## inputs: slope uedokaburi masuhonsuu long kubun2 did1 kouhou ekijyouka1 kyouyounensuu kei 
## output(s): taisyo 
## options were - entropy fitting  decay=5e-04

nn<-nnet(taisyo ~., data=train,size = 2, rang = .1, decay = 5e-4, maxit = 200 ) nn

nn_predict<-predict(nn,test,type="class")
table(nn_predict, test$taisyo)
##           
## nn_predict  0  1
##          0 59 23
cat(test$taisyo, file = "testtaisyo2.txt", append =FALSE)
cat(nn_predict, file = "nnresult2.txt", append =FALSE)
nn_predict<-predict(nn,test,type="raw")
nn_predict#推定値の生データ出力:https://mjin.doshisha.ac.jp/R/Chap_23/23.html
##          [,1]
## 6   0.3250011
## 10  0.3250011
## 12  0.3250011
## 15  0.3250011
## 17  0.3250011
## 22  0.3250011
## 24  0.3250011
## 26  0.3250011
## 27  0.3250011
## 29  0.3250011
## 32  0.3250011
## 40  0.3250011
## 41  0.3250011
## 42  0.3250011
## 44  0.3250011
## 48  0.3250011
## 49  0.3250011
## 55  0.3250011
## 57  0.3250011
## 63  0.3250011
## 64  0.3250011
## 67  0.3250011
## 72  0.3250011
## 76  0.3250011
## 78  0.3250011
## 81  0.3250011
## 86  0.3250011
## 89  0.3250011
## 90  0.3250011
## 92  0.3250011
## 93  0.3250011
## 96  0.3250011
## 97  0.3250011
## 100 0.3250011
## 105 0.3250011
## 107 0.3250011
## 109 0.3250011
## 110 0.3250011
## 114 0.3250011
## 116 0.3250011
## 119 0.3250011
## 120 0.3250011
## 122 0.3250011
## 123 0.3250011
## 127 0.3250011
## 135 0.3250011
## 138 0.3250011
## 149 0.3250011
## 156 0.3250011
## 159 0.3250011
## 163 0.3250011
## 166 0.3250011
## 168 0.3250011
## 169 0.3250011
## 170 0.3250011
## 189 0.3250011
## 196 0.3250011
## 203 0.3250011
## 205 0.3250011
## 206 0.3250011
## 213 0.3250011
## 214 0.3250011
## 218 0.3250011
## 224 0.3250011
## 231 0.3250011
## 234 0.3250011
## 235 0.3250011
## 236 0.3250011
## 240 0.3250011
## 241 0.3250011
## 243 0.3250011
## 247 0.3250011
## 250 0.3250011
## 251 0.3250011
## 252 0.3250011
## 256 0.3250011
## 263 0.3250011
## 265 0.3250011
## 267 0.3250011
## 268 0.3250011
## 269 0.3250011
## 279 0.3250011
nn_predict<-predict(nn,test,type="class")#推定値のグループ出力
#推定値グループのファイルテキスト出力http://takenaka-akio.org/doc/r_auto/chapter_03.html
nn_predict
##  [1] "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0"
## [20] "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0"
## [39] "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0"
## [58] "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0"
## [77] "0" "0" "0" "0" "0" "0"
cat(test$taisyo, file = "testtaisyo.txt", append =FALSE)
cat(predrandam, file = "lfresult.txt", append =FALSE)
#cat(predsvm, file = "svmresult.txt", append =FALSE)
cat(nn_predict, file = "nnresult.txt", append =FALSE)
kekka<-table(nn_predict, test$taisyo)
kekka
##           
## nn_predict  0  1
##          0 59 23

nn_predict<-predict(nn,test,type=“class”) table(nn_predict, test\(taisyo) cat(test\)taisyo, file = “testtaisyo2.txt”, append =FALSE) cat(nn_predict, file = “nnresult2.txt”, append =FALSE)

nn_predict<-predict(nn,test,type=“raw”) nn_predict#推定値の生データ出力:https://mjin.doshisha.ac.jp/R/Chap_23/23.html nn_predict<-predict(nn,test,type=“class”)#推定値のグループ出力 #推定値グループのファイルテキスト出力http://takenaka-akio.org/doc/r_auto/chapter_03.html nn_predict cat(test\(taisyo, file = "testtaisyo.txt", append =FALSE) cat(predrandam, file = "lfresult.txt", append =FALSE) cat(predsvm, file = "svmresult.txt", append =FALSE) cat(nn_predict, file = "nnresult.txt", append =FALSE) kekka<-table(nn_predict, test\)taisyo) kekka