library(kernlab) # Surport Vector Machine
library(nnet) # Logistic regression, Neural Network
library(rpart) # Decision Tree
library(randomForest) # Random Forest
library(dplyr) # Data Handling
library(caret) # Confusion Matrix
library(DT) # Data Visualize
library(class) # KNN
library(ggvis) # Data visualize
plot(iris)
iris %>% ggvis(~Petal.Length, ~Petal.Width, fill = ~factor(Species)) %>%
layer_points()
str()함수를 통해 iris 데이터의 구조를 확인해보겠습니다.
str(iris)
## 'data.frame': 150 obs. of 5 variables:
## $ Sepal.Length: num 5.1 4.9 4.7 4.6 5 5.4 4.6 5 4.4 4.9 ...
## $ Sepal.Width : num 3.5 3 3.2 3.1 3.6 3.9 3.4 3.4 2.9 3.1 ...
## $ Petal.Length: num 1.4 1.4 1.3 1.5 1.4 1.7 1.4 1.5 1.4 1.5 ...
## $ Petal.Width : num 0.2 0.2 0.2 0.2 0.2 0.4 0.3 0.2 0.2 0.1 ...
## $ Species : Factor w/ 3 levels "setosa","versicolor",..: 1 1 1 1 1 1 1 1 1 1 ...
summary(iris)
## Sepal.Length Sepal.Width Petal.Length Petal.Width
## Min. :4.300 Min. :2.000 Min. :1.000 Min. :0.100
## 1st Qu.:5.100 1st Qu.:2.800 1st Qu.:1.600 1st Qu.:0.300
## Median :5.800 Median :3.000 Median :4.350 Median :1.300
## Mean :5.843 Mean :3.057 Mean :3.758 Mean :1.199
## 3rd Qu.:6.400 3rd Qu.:3.300 3rd Qu.:5.100 3rd Qu.:1.800
## Max. :7.900 Max. :4.400 Max. :6.900 Max. :2.500
## Species
## setosa :50
## versicolor:50
## virginica :50
##
##
##
sum(is.na(iris))
## [1] 0
seed 값을 설정해주고, sampling과 set 데이터를 만들어주겠습니다.
# 데이터 할당
df <- iris
# seed값 설정
set.seed(919)
# training / test sampling
training_sampling <- sort(sample(1:nrow(df), nrow(df) * 0.7 ))
test_sampling <- setdiff(1: nrow(df), training_sampling)
# training / test set
training_set <- df[training_sampling,]
test_set <- df [test_sampling,]
multi_logit_m <- multinom(Species ~ Petal.Length + Petal.Width, data = training_set)
## # weights: 12 (6 variable)
## initial value 115.354290
## iter 10 value 8.103704
## iter 20 value 6.085925
## iter 30 value 6.068446
## iter 40 value 6.061376
## iter 50 value 6.058734
## iter 60 value 6.055210
## iter 70 value 6.053575
## iter 80 value 6.048157
## iter 90 value 6.047643
## iter 100 value 6.047423
## final value 6.047423
## stopped after 100 iterations
multi_logit_p <- predict(multi_logit_m, newdata = test_set, type = "class")
rpart_m <- rpart(Species ~ Petal.Length + Petal.Width, data = training_set)
rpart_p <- predict(rpart_m, newdata = test_set, type = "class")
앙상블 기법의 일종으로 여러가지 기술을 가진 의사결정 나무들이 모여있는 형태라고 볼 수 있습니다. randomForest패키지의 randomForest함수를 사용하여 Random Forest 모델을 만들어보겠습니다.
rf_m <- randomForest(Species ~ Petal.Length + Petal.Width, data = training_set)
rf_p <- predict(rf_m, newdata = test_set, type = "class")
svm_m <- ksvm(Species ~ Petal.Length + Petal.Width, data=training_set)
svm_p <- predict(svm_m, newdata=test_set)
normalizer <- function(x) {
return_value <- (x - min(x)) / (max(x) - min(x))
return(return_value)
}
normal_iris <- sapply(iris[,1:4], normalizer) %>%
as.data.frame()
# 데이터 생성
df <- cbind(normal_iris, "Species" = iris[,5])
# training / test sampling
training_sampling <- sort(sample(1:nrow(df), nrow(df)* 0.7))
test_sampling <- setdiff(1:nrow(df), training_sampling)
# training_set, test_set
training_set <- df[training_sampling,]
test_set <- df[test_sampling,]
training_set_unlable <- training_set[,1:4]
training_set_lable <- training_set[,5]
test_set_unlable <- test_set[,1:4]
test_set_lable <- test_set[,5]
knn_p <- knn(train = training_set_unlable, test = test_set_unlable, cl = training_set_lable, k =3)
model_list <- cbind(
as.character(multi_logit_p),
as.character(rpart_p),
as.character(rf_p),
as.character(svm_p),
as.character(knn_p) %>%
as.data.frame()
)
# str(model_list)
total_model_accuracy <- data.frame()
for (model in model_list[, 1:ncol(model_list)]) {
model_cm <- confusionMatrix(model, test_set$Species)
model_cm_class <- model_cm$byClass %>% as.data.frame()
model_accuracy <- model_cm_class$'Balanced Accuracy'
total_model_accuracy <- rbind(total_model_accuracy, model_accuracy)
}
colnames(total_model_accuracy) <- levels(test_set$Species)
rownames(total_model_accuracy) <- c("Logistic Regression", "Decision Tree",
"Random Forest", "Support Vector Machine","KNN")
####예측값, 실측값의 비교를 위해 각 모델별 분류에 대한 정확도 베교테이블입니다.
datatable(total_model_accuracy)