#install.packages("twitteR")
library(twitteR)
#Example from http://www.rdatamining.com/examples/text-mining
#https://dev.twitter.com/
#http://geoffjentry.hexdump.org/twitteR.pdf
#https://twitter.com/apps/new
#>setup_twitter_oauth("APIkey","APIsecret","Accesstoken","Accesssecret ")
setup_twitter_oauth("wuFZwvpeV9yJ1LuwEYzykTZ18", "UO1EYt78bXdiXVQ2aCWn0EO39gdcYCljVmt6owC8wwspMAWYo5", "118065065-kV4WEbP7h330i7dX4wNsHs6PDbzZWDSYOjTdgvW3", "QLTtVqXT7HLUhmS0HfWYFNk4BLFswQSK0yRu3tpeYH3il")
## [1] "Using direct authentication"
rdmTweets <- userTimeline("papaintegrator", n=100)
rdmTweets[1:3]
## [[1]]
## [1] "papaintegrator: The Underwater Thug <U+0096> Ep. 5 https://t.co/7vsgnrPEvJ https://t.co/2nAMrtuhdg"
## 
## [[2]]
## [1] "papaintegrator: The Underwater Thug <U+0096> Ep. 4 https://t.co/6cubinNTSQ https://t.co/BXAhSoHPzf"
## 
## [[3]]
## [1] "papaintegrator: The Underwater Thug <U+0096> Ep. 3 https://t.co/vDeiQWjSO8 https://t.co/VUZt85sbmA"
df <- do.call("rbind", lapply(rdmTweets, as.data.frame))
library(tm)
## Loading required package: NLP
Corpus1=Corpus(VectorSource(df$text))
Corpus1 <- tm_map(Corpus1, removePunctuation)  
Corpus1 <- tm_map(Corpus1, removeNumbers)  
Corpus1 <- tm_map(Corpus1, tolower)
Corpus1 <- tm_map(Corpus1, removeWords, stopwords("english"))
Corpus1 <- tm_map(Corpus1, stemDocument)  
Corpus1 <- tm_map(Corpus1, stripWhitespace)   
Corpus1 <- tm_map(Corpus1, PlainTextDocument)

dtm <- DocumentTermMatrix(Corpus1)
tdm <- TermDocumentMatrix(Corpus1)
matx1=as.matrix(tdm)
sort1=sort(rowSums(matx1),decreasing=T)
di=data.frame(Word=names(sort1),Frequency=sort1)

library(wordcloud)
## Loading required package: RColorBrewer
wordcloud(di$Word, di$Frequency, max.words=100,colors=brewer.pal(6, "Set1"))

findFreqTerms(dtm, lowfreq=10)
## [1] "just"           "papaintegrator" "photo"          "posted"
findAssocs(dtm, 'rstats', 0.30)
## $rstats
## numeric(0)