## Warning: package 'readr' was built under R version 3.4.4
## Warning: package 'tm' was built under R version 3.4.4
## Loading required package: NLP
## Warning: package 'wordcloud' was built under R version 3.4.4
## Warning: package 'plyr' was built under R version 3.4.4
## Warning: package 'plotly' was built under R version 3.4.4
## Loading required package: ggplot2
## Warning: package 'ggplot2' was built under R version 3.4.4
##
## Attaching package: 'ggplot2'
## The following object is masked from 'package:NLP':
##
## annotate
##
## Attaching package: 'plotly'
## The following object is masked from 'package:ggplot2':
##
## last_plot
## The following objects are masked from 'package:plyr':
##
## arrange, mutate, rename, summarise
## The following object is masked from 'package:stats':
##
## filter
## The following object is masked from 'package:graphics':
##
## layout
corpus = Corpus(VectorSource(zyngatweets))
# corpus = Corpus(VectorSource(cmail))
# create term-document matrix
tdm = TermDocumentMatrix(
corpus,
control = list(
wordLengths=c(3,20),
removePunctuation = TRUE,
stopwords = c("the", "a", stopwords("english")),
removeNumbers = TRUE, tolower = FALSE) )
# convert as matrix
tdm = as.matrix(tdm)
# get word counts in decreasing order
word_freqs = sort(rowSums(tdm), decreasing=TRUE)
#remove the top words which donât generate insights such as "the", "a", "and", etc.
word_freqs = word_freqs[-(1:9)] #Here â1â is 1st word in the list we want to remove
# create a data frame with words and their frequencies
dm = data.frame(word=names(word_freqs), freq=word_freqs)
#Plot corpus in a clored graph; need RColorBrewer package
wordcloud(head(dm$word, 50), head(dm$freq, 50), random.order=FALSE, colors=brewer.pal(8, "Dark2"))

head(word_freqs, 60)
## looking Prized now can Petra
## 258 219 208 192 187
## adult Jeneva found trees rewards
## 186 174 166 160 152
## How bit video sponsorship needing
## 143 140 139 139 138
## shook RTHerescar gotas Points Betty
## 138 137 137 129 125
## The FarmVilleOnWeb Fruit Kathryn Play
## 114 114 113 112 111
## Career Spring get Game You
## 105 104 102 98 95
## use Hat Black Corner King
## 92 92 92 88 88
## County Check win game Nesting
## 88 86 85 83 83
## Horse FarmVille Dolls Mobile Ribbon
## 82 82 82 82 81
## yet Roselyn helping crafting Red
## 80 80 79 79 79
## today Needs awarded Online Market
## 78 78 78 78 78
## Players Technology World Demand High
## 78 78 78 77 77
pos.words = scan('positive-words.txt', what='character', comment.char=';')
neg.words = scan('negative-words.txt', what='character', comment.char=';')
neg.words = c(neg.words, 'wtf', 'fail')
#Implementing our sentiment scoring algorithm
require(plyr)
require(stringr)
## Loading required package: stringr
require(stringi)
## Loading required package: stringi
score.sentiment = function(sentences, pos.words, neg.words, .progress='none')
{
scores = laply(sentences, function(sentence, pos.words, neg.words) {
# clean up sentences with R's regex-driven global substitute, gsub():
sentence = gsub('[[:punct:]]', '', sentence)
sentence = gsub('[[:cntrl:]]', '', sentence)
sentence = gsub('\\d+', '', sentence)
# and convert to lower case:
#sentence = tolower(sentence)
# split into words. str_split is in the stringr package
word.list = str_split(sentence, '\\s+')
# sometimes a list() is one level of hierarchy too much
words = unlist(word.list)
# compare our words to the dictionaries of positive & negative terms
pos.matches = match(words, pos.words)
neg.matches = match(words, neg.words)
# match() returns the position of the matched term or NA
# we just want a TRUE/FALSE:
pos.matches = !is.na(pos.matches)
neg.matches = !is.na(neg.matches)
# and conveniently enough, TRUE/FALSE will be treated as 1/0 by sum():
score = sum(pos.matches) - sum(neg.matches)
return(score)
}, pos.words, neg.words, .progress=.progress )
scores.df = data.frame(score=scores, text=sentences)
return(scores.df)
}
sentiment.scores= score.sentiment(zyngatweets, pos.words, neg.words, .progress='none')
score <- sentiment.scores$score
library(plotly)
p <- plot_ly(x = ~score, type = "histogram")
p
zynga$days <- weekdays(as.POSIXlt(zynga$created))
zynga$Msg_Location <- clean.text(zynga$location)
subdf <- subset(zynga, Msg_Location =='Calne' | Msg_Location =='Singapore' | Msg_Location =='Gotham' | Msg_Location =='Sweden' | Msg_Location =='Ireland')
dfrm <-data.frame(table(subdf[,c("Msg_Location","days")]))
genderDays = reshape(dfrm,direction="wide",timevar="days",idvar="Msg_Location")
p <- plot_ly(genderDays, x = ~Msg_Location, y = ~Freq.Monday, type = 'bar', name = 'Monday') %>%
add_trace(y = ~Freq.Tuesday, name = 'Tuesday') %>%
add_trace(y = ~Freq.Wednesday, name = 'Wednesday') %>%
add_trace(y = ~Freq.Thursday, name = 'Thursday') %>%
add_trace(y = ~Freq.Friday, name = 'Friday') %>%
add_trace(y = ~Freq.Saturday, name = 'Saturday') %>%
add_trace(y = ~Freq.Sunday, name = 'Sunday') %>%
layout(yaxis = list(title = 'Count'), barmode = 'group')
p