Synopsis

The goal of this project is just to display that you’ve gotten used to working with the data and that you are on track to create your prediction algorithm. Please submit a report on R Pubs (http://rpubs.com/) that explains your exploratory analysis and your goals for the eventual app and algorithm. This document should be concise and explain only the major features of the data you have identified and briefly summarize your plans for creating the prediction algorithm and Shiny app in a way that would be understandable to a non-data scientist manager. You should make use of tables and plots to illustrate important summaries of the data set. The motivation for this project is to: 1. Demonstrate that you’ve downloaded the data and have successfully loaded it in.2. Create a basic report of summary statistics about the data sets.3. Report any interesting findings that you amassed so far.4. Get feedback on your plans for creating a prediction algorithm and Shiny app.

library(dplyr)
library(ggplot2)
library(stringi)
library(tm)
library(RWeka)

Reading in the data and Basic Analysis

Here I will do the calculations and initial plots of the data to get an understanding of what I am looking at.

con <- file("en_US.blogs.txt","r")
lenn <- nchar(readLines(con, skipNul = TRUE))
max(lenn)
## [1] 40835
close(con)

con <- file("en_US.news.txt","r")
lenn <- nchar(readLines(con, skipNul = TRUE))
max(lenn)
## [1] 5760
close(con)

con <- file("en_US.twitter.txt","r")
lenn <- nchar(readLines(con, skipNul = TRUE))
max(lenn)
## [1] 213
close(con)


blog_data <- readLines("en_US.blogs.txt", encoding = "UTF-8", skipNul = TRUE)

txt <- gsub("http[^[:space:]]*", "", blog_data)

txt_punct <- gsub("[[:punct:]]", "", txt)

list <- strsplit(txt_punct, " ")

words_all <- data.frame(words=unlist(list))
words_all$words <- droplevels(words_all$words)

words_all[,"words"] <- factor(words_all[,"words"], 
                              levels=names(sort(table(words_all[,"words"]), 
                                                decreasing=TRUE))) 

a <- data.frame(table(words_all[,"words"]))
a <- head(a,20)

ggplot(a, aes(x=reorder(Var1, Freq), y=Freq, group = 1)) +
  geom_bar(stat='identity', position = "dodge", color = "black") +
  ylab("Occurrences")+
  xlab("")+
  ggtitle("Blog data")+
  coord_flip()+
  theme(axis.line = element_line(), 
        axis.text=element_text(color='black'), 
        axis.title = element_text(colour = 'black'), 
        legend.text=element_text(), legend.title=element_text(), 
        legend.key = element_rect(colour = "black"))

news_data <- readLines("en_US.news.txt", encoding = "UTF-8", skipNul = TRUE)

txt <- gsub("http[^[:space:]]*", "", news_data)

txt_punct <- gsub("[[:punct:]]", "", txt)

list <- strsplit(txt_punct, " ")

words_all <- data.frame(words=unlist(list))
words_all$words <- droplevels(words_all$words)

words_all[,"words"] <- factor(words_all[,"words"], 
                              levels=names(sort(table(words_all[,"words"]), 
                                                decreasing=TRUE))) 

a <- data.frame(table(words_all[,"words"]))
a <- head(a,20)

ggplot(a, aes(x=reorder(Var1, Freq), y=Freq, group = 1)) +
  geom_bar(stat='identity', position = "dodge", color = "black") +
  ylab("Occurrences")+
  xlab("")+
  ggtitle("News data")+
  coord_flip()+
  theme(axis.line = element_line(), 
        axis.text=element_text(color='black'), 
        axis.title = element_text(colour = 'black'), 
        legend.text=element_text(), legend.title=element_text(), 
        legend.key = element_rect(colour = "black"))

twitter_data <- readLines("en_US.twitter.txt", encoding = "UTF-8", skipNul = TRUE)

set.seed(12) 
blog_sub <- blog_data[sample(seq_len(length(blog_data)),length(blog_data)*0.02)]
news_sub <- news_data[sample(seq_len(length(news_data)),length(news_data)*0.02)]
twitter_sub <- twitter_data[sample(seq_len(length(twitter_data)),length(twitter_data)*0.02)]

myCorpus <- Corpus(VectorSource(c(blog_sub, news_sub, twitter_sub)), 
                 readerControl=list(reader=readPlain,language="en")) 


txt <- gsub("http[^[:space:]]*", "", twitter_data)

txt_punct <- gsub("[[:punct:]]", "", txt)

list <- strsplit(txt_punct, " ")

words_all <- data.frame(words=unlist(list))
words_all$words <- droplevels(words_all$words)

words_all[,"words"] <- factor(words_all[,"words"], 
                              levels=names(sort(table(words_all[,"words"]), 
                                                decreasing=TRUE))) 

a <- data.frame(table(words_all[,"words"]))
a <- head(a,20)

ggplot(a, aes(x=reorder(Var1, Freq), y=Freq, group = 1)) +
  geom_bar(stat='identity', position = "dodge", color = "black") +
  ylab("Occurrences")+
  xlab("")+
  ggtitle("Twitter data")+
  coord_flip()+
  theme(axis.line = element_line(), 
        axis.text=element_text(color='black'), 
        axis.title = element_text(colour = 'black'), 
        legend.text=element_text(), legend.title=element_text(), 
        legend.key = element_rect(colour = "black"))

N-Grams

Next I will construct some N-grams to see what words occure together. Before that I will merge the three files into one corpus and see what are the most common words when stopwords have been removed.

myCorpus <- Corpus(VectorSource(sapply(myCorpus, function(row) iconv(row, "latin1", "ASCII", sub="")))) 

myCorpus <- tm_map(myCorpus, removeNumbers)
myCorpus <- tm_map(myCorpus, removePunctuation)
myCorpus <- tm_map(myCorpus, content_transformer(tolower))
myCorpus <- tm_map(myCorpus, stripWhitespace)

myStopwords <- c(stopwords('SMART'), "use", "see", "used", "via", "amp")
myCorpus_1 <- tm_map(myCorpus, removeWords, myStopwords)

#myCorpus_1 <- tm_map(myCorpus_1, stemDocument)

myCorpus_1 <- Corpus(VectorSource(myCorpus_1))

tdm <- TermDocumentMatrix(myCorpus_1, control = list(wordLengths = c(1, Inf)))

freq.terms <- findFreqTerms(tdm, lowfreq = 1000)

term.freq <- rowSums(as.matrix(tdm))
term.freq <- subset(term.freq, term.freq >= 1000)
df <- data.frame(term = names(term.freq), freq = term.freq)

df <- df[order(df$freq, decreasing = TRUE),]

ggplot(df, aes(x=reorder(term, freq), y=freq)) + 
  geom_bar(stat='identity', color = "black") +
  xlab("Terms") + 
  ylab("Count") + 
  ggtitle("Frequent Words in Corpus with Stopwords Removed")+
  coord_flip() +
  theme(axis.line = element_line(), 
        axis.text=element_text(color='black'), 
        axis.title = element_text(colour = 'black'), 
        legend.text=element_text(), 
        legend.title=element_text(), 
        legend.key = element_rect(colour = "black"))

uniTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 1, max = 1))
biTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
triTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 3, max = 3))
quadTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 4, max = 4))

myCorpus <- VCorpus(VectorSource(myCorpus))
uni <- TermDocumentMatrix(myCorpus, control = list(tokenize = uniTokenizer))
bi <- TermDocumentMatrix(myCorpus, control = list(tokenize = biTokenizer))
tri <- TermDocumentMatrix(myCorpus, control = list(tokenize = triTokenizer))
quad <- TermDocumentMatrix(myCorpus, control = list(tokenize = quadTokenizer))

tmp <- removeSparseTerms(uni, 0.999)
freq <- sort(rowSums(as.matrix(tmp)), decreasing=TRUE)
uni_sorted <- data.frame(word=names(freq), freq=freq)

tmp <- removeSparseTerms(bi, 0.999)
freq <- sort(rowSums(as.matrix(tmp)), decreasing=TRUE)
bi_sorted <- data.frame(word=names(freq), freq=freq)


tmp <- removeSparseTerms(tri, 0.999)
freq <- sort(rowSums(as.matrix(tmp)), decreasing=TRUE)
tri_sorted <- data.frame(word=names(freq), freq=freq)

tmp <- removeSparseTerms(quad, 0.9999)
freq <- sort(rowSums(as.matrix(tmp)), decreasing=TRUE)
quad_sorted <- data.frame(word=names(freq), freq=freq)

Next I will plot the most frequent N-grams.

ggplot(data = head(bi_sorted, 20), aes(x = reorder(word, freq), y = freq)) + 
  geom_bar(stat="identity", col = "black")+ 
  xlab("")+
  ylab("Frequency")+
  ggtitle("Frequencies of Bigrams")+
  coord_flip()+
  theme(axis.line = element_line(), 
        axis.text=element_text(color='black'),
        axis.title = element_text(colour = 'black'), 
        legend.text=element_text(), 
        legend.title=element_text(),
        legend.key = element_rect(colour = "black"),
        legend.position='none')

ggplot(data = head(tri_sorted, 20), aes(x = reorder(word, freq), y = freq)) + 
  geom_bar(stat="identity", col = "black")+ 
  xlab("")+
  ylab("Frequency")+
  ggtitle("Frequencies of Trigrams")+
  coord_flip()+
  theme(axis.line = element_line(), 
        axis.text=element_text(color='black'), 
        axis.title = element_text(colour = 'black'), 
        legend.text=element_text(), 
        legend.title=element_text(),
        legend.key = element_rect(colour = "black"),
        legend.position='none')

ggplot(data = head(quad_sorted, 20), aes(x = reorder(word, freq), y = freq)) + 
  geom_bar(stat="identity", col = "black")+ 
  xlab("")+
  ylab("Frequency")+
  ggtitle("Frequencies of Quadgrams")+
  coord_flip()+
  theme(axis.line = element_line(), 
        axis.text=element_text(color='black'), 
        axis.title = element_text(colour = 'black'), 
        legend.text=element_text(), 
        legend.title=element_text(),
        legend.key = element_rect(colour = "black"),
        legend.position='none')

Further Development Ideas

The next steps will be to build a predictive algorithm that uses an n-gram model. This model will then be deployed on Shiny and will suggest the most likely next word after a phrase is typed.