AppliedPredictiveModeling: v1.1.6
caret: v6.0.47 used:6.0.76
ElemStatLearn: v2012.04-0 used: 2015.6.26
pgmm: v1.1 used: 1.2
rpart: v4.1.8 used: 4.1.10
gbm: v2.1 used:2.1.3
lubridate: v1.3.3 used: 1.6.0
forecast: v5.6 used:8.0
e1071: v1.6.4 used:1.6-8
library(ElemStatLearn)
library(caret)
data(vowel.train)
data(vowel.test)
vowel.train$y<-as.factor(vowel.train$y)
vowel.test$y<-as.factor(vowel.test$y)
set.seed(33833)
model_rf<-train(y ~., data=vowel.train, method="rf")
pred_rf<-predict(model_rf,vowel.test)
model_gbm<-train(y ~., data=vowel.train, method="gbm")
pred_gbm<-predict(model_gbm,vowel.test)
confusionMatrix(pred_rf, vowel.test$y)$overall['Accuracy']
## Accuracy
## 0.6147186
confusionMatrix(pred_gbm, vowel.test$y)$overall['Accuracy']
## Accuracy
## 0.5367965
DataBoth<-(pred_rf==pred_gbm)
confusionMatrix(pred_rf[DataBoth], vowel.test$y[DataBoth])$overall['Accuracy']
## Accuracy
## 0.6656051
Agreement Accuracy = 0.6361 realy 0.66
Agreement Accuracy = 0.5325
Agreement Accuracy = 0.9983
Agreement Accuracy = 0.9985
library(caret)
library(gbm)
set.seed(3433)
library(AppliedPredictiveModeling)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]]
training = adData[ inTrain,]
testing = adData[-inTrain,]
set.seed(62433)
mod_rf<-train(diagnosis ~., data=training, method="rf")
mod_gbm<-train(diagnosis ~., data=training, method="gbm")
mod_lda<-train(diagnosis ~., data=training, method="lda")
## Warning in lda.default(x, grouping, ...): variables are collinear
## Warning in lda.default(x, grouping, ...): variables are collinear
pred_rf<-predict(mod_rf,testing)
pred_gbm<-predict(mod_gbm,testing)
pred_lda<-predict(mod_lda,testing)
predDF<-data.frame(pred_rf, pred_gbm, pred_lda, diagnosis=testing$diagnosis)
combModFit<-train(diagnosis ~., method="rf", data=predDF)
combPred<-predict(combModFit, predDF)
confusionMatrix(pred_rf,testing$diagnosis)$overall['Accuracy']
## Accuracy
## 0.7682927
confusionMatrix(pred_gbm,testing$diagnosis)$overall['Accuracy']
## Accuracy
## 0.7926829
confusionMatrix(pred_lda,testing$diagnosis)$overall['Accuracy']
## Accuracy
## 0.7682927
confusionMatrix(combPred,testing$diagnosis)$overall['Accuracy']
## Accuracy
## 0.804878
set.seed(3523)
library(AppliedPredictiveModeling)
data(concrete)
inTrain = createDataPartition(concrete$CompressiveStrength, p = 3/4)[[1]]
training = concrete[ inTrain,]
testing = concrete[-inTrain,]
set.seed(233)
mod_lasso<-train(CompressiveStrength ~., data=training, method="lasso")
library(elasticnet)
plot.enet(mod_lasso$finalModel, xvar="penalty", use.color=TRUE)
library(lubridate) # For year() function below
dat = read.csv("gaData.csv")
training = dat[year(dat$date) < 2012,]
testing = dat[(year(dat$date)) > 2011,]
tstrain = ts(training$visitsTumblr)
library(forecast)
mod_ts<-bats(tstrain)
# level -> confidence level for prediction intervals
# h -> number of periods for forecasting
# fcast receives the Model that is the first [1] occurence of the list of 10 returned form forecast
fcast<-forecast(mod_ts, level=95, h=dim(testing) [1])
plot(fcast)
sum(fcast$lower < testing$visitsTumblr & testing$visitsTumblr < fcast$upper) / dim(testing)[1]
## [1] 0.9617021
set.seed(3523)
library(AppliedPredictiveModeling)
library(caret)
library(forecast)
library(e1071)
data(concrete)
inTrain = createDataPartition(concrete$CompressiveStrength, p = 3/4)[[1]]
training = concrete[ inTrain,]
testing = concrete[-inTrain,]
set.seed(325)
mod_svm<-svm(CompressiveStrength ~.,data=training)
pred_svm<-predict(mod_svm, testing)
##library forecast provides accuracy for an object of class"forecast" or a numerical **vector** containing forecasts
accuracy(pred_svm, testing$CompressiveStrength)
## ME RMSE MAE MPE MAPE
## Test set 0.1682863 6.715009 5.120835 -7.102348 19.27739