# początek standardowy
library(mlbench)
data(HouseVotes84)
      # funkcja obliczająca błąd
err <- function(y.true, y.pred) { sum(y.pred!=y.true)/length(y.true) }

      # podzielmy dane na trenujące i testowe losowo w stosunku ok. 2:1
rhv <- runif(nrow(HouseVotes84))
hv.train <- HouseVotes84[rhv>=0.33,]
hv.test <- HouseVotes84[rhv<0.33,]

      # wykorzystamy techniki:
      # bagging z pakietu ipred
      # boosting z pakietu ada
      # lasy losowe z pakietu randomForest
if (! "ipred" %in% row.names(installed.packages()))
  install.packages("ipred")
if (! "ada" %in% row.names(installed.packages()))
  install.packages("ada")
if (! "randomForest" %in% row.names(installed.packages()))
  install.packages("randomForest")  
library(ipred)
library(ada)
library(randomForest)

      # bagging
hv.bag5 <- bagging(Class ~ ., hv.train, nbagg=5)
hv.bag10 <- bagging(Class ~ ., hv.train, nbagg=10)
hv.bag50 <- bagging(Class ~ ., hv.train, nbagg=50)
hv.bag100 <- bagging(Class ~ ., hv.train, nbagg=100)
hv.bag500 <- bagging(Class ~ ., hv.train, nbagg=500)
      # ignorujmy ostrzeżenia przy predykcji
options(warn=-1)
err(hv.test$Class, predict(hv.bag5, hv.test))
err(hv.test$Class, predict(hv.bag10, hv.test))
err(hv.test$Class, predict(hv.bag50, hv.test))
err(hv.test$Class, predict(hv.bag100, hv.test))
err(hv.test$Class, predict(hv.bag500, hv.test))
options(warn=0)

      # boosting (AdaBoost)
hv.boost5 <- ada(Class ~ ., hv.train, iter=5)
hv.boost10 <- ada(Class ~ ., hv.train, iter=10)
hv.boost50 <- ada(Class ~ ., hv.train, iter=50)
hv.boost100 <- ada(Class ~ ., hv.train, iter=100)
hv.boost500 <- ada(Class ~ ., hv.train, iter=500)
err(hv.test$Class, predict(hv.boost5, hv.test))
err(hv.test$Class, predict(hv.boost10, hv.test))
err(hv.test$Class, predict(hv.boost50, hv.test))
err(hv.test$Class, predict(hv.boost100, hv.test))
err(hv.test$Class, predict(hv.boost500, hv.test))

      # las losowy
hv.rf5 <- randomForest(Class ~ ., hv.train, ntree=5, na.action=na.roughfix)
hv.rf10 <- randomForest(Class ~ ., hv.train, ntree=10, na.action=na.roughfix)
hv.rf50 <- randomForest(Class ~ ., hv.train, ntree=50, na.action=na.roughfix)
hv.rf100 <- randomForest(Class ~ ., hv.train, ntree=100, na.action=na.roughfix)
hv.rf500 <- randomForest(Class ~ ., hv.train, ntree=500, na.action=na.roughfix)
      # przed testowaniem -- wypełnienie brakujących wartości w zbiorze testowym
hv.test1 <- na.roughfix(hv.test)
err(hv.test1$Class, predict(hv.rf5, hv.test1))
err(hv.test1$Class, predict(hv.rf10, hv.test1))
err(hv.test1$Class, predict(hv.rf50, hv.test1))
err(hv.test1$Class, predict(hv.rf100, hv.test1))
err(hv.test1$Class, predict(hv.rf500, hv.test1))

      # ocena predykcyjnej użyteczności atrybutów za pomocą lasu losowego
hv.rf100.imp <- randomForest(Class ~ ., hv.train, ntree=100,
                             na.action=na.roughfix, importance=T)
importance(hv.rf100.imp)[order(importance(hv.rf100.imp)[,3], decreasing=T),]
varImpPlot(hv.rf100.imp)