The goal of this project is just to display that you’ve gotten used to working with the data and that you are on track to create your prediction algorithm. Please submit a report on R Pubs (http://rpubs.com/) that explains your exploratory analysis and your goals for the eventual app and algorithm. This document should be concise and explain only the major features of the data you have identified and briefly summarize your plans for creating the prediction algorithm and Shiny app in a way that would be understandable to a non-data scientist manager. You should make use of tables and plots to illustrate important summaries of the data set. The motivation for this project is to:
* Demonstrate that you’ve downloaded the data and have successfully loaded it in. * Create a basic report of summary statistics about the data sets. * Report any interesting findings that you amassed so far. * Get feedback on your plans for creating a prediction algorithm and Shiny app.
I set the directory and load 3 data.
# Put your data in your working directory first
blogs <- readLines("en_US.blogs.txt",warn=FALSE,encoding="UTF-8",skipNul=TRUE)
news <- readLines("en_US.news.txt",warn=FALSE,encoding="UTF-8", skipNul=TRUE)
twitter <- readLines("en_US.twitter.txt",warn=FALSE,encoding="UTF-8",skipNul=TRUE)
Summarize the contents, which has file size, number of rows, number of character and number of words in each file. And make the table
stats <- data.frame(
FileName=c("en_US.blogs","en_US.news","en_US.twitter"),
FileSizeinMB=c(file.info('en_US.blogs.txt')$size/1024^2,
file.info('en_US.news.txt')$size/1024^2,
file.info('en_US.twitter.txt')$size/1024^2),
t(rbind(sapply(list(blogs,news,twitter), stri_stats_general),
WordCount = sapply(list(blogs, news, twitter), stri_stats_latex)[4,]))
)
knitr::kable(stats)
| FileName | FileSizeinMB | Lines | LinesNEmpty | Chars | CharsNWhite | WordCount |
|---|---|---|---|---|---|---|
| en_US.blogs | 200.4242 | 899288 | 899288 | 206824382 | 170389539 | 37570839 |
| en_US.news | 196.2775 | 77259 | 77259 | 15639408 | 13072698 | 2651432 |
| en_US.twitter | 159.3641 | 2360148 | 2360148 | 162096241 | 134082806 | 30451170 |
Data sets are really big, so using sample() function, I sample 1% of each file.
set.seed(12345)
# sample data set only 1% of each file
sample_data<-c(sample(blogs,length(blogs)*0.01),
sample(news,length(news)*0.01),
sample(twitter,length(twitter)*0.01))
sample_data <- iconv(sample_data,"latin1","ASCII",sub="")
rm(blogs, news, twitter)
Build corpus, and check it making data frame.
library(tm)
library(NLP)
corpus<-VCorpus(VectorSource(sample_data))
corpus1<-tm_map(corpus,removePunctuation)
corpus2<-tm_map(corpus1,stripWhitespace)
corpus3<-tm_map(corpus2,tolower)
corpus4<-tm_map(corpus3,removeNumbers)
corpus5<-tm_map(corpus4,PlainTextDocument)
corpus6<-tm_map(corpus5,removeWords,stopwords("english"))
corpus_result<-data.frame(text=unlist(sapply(corpus6,'[',"content")),stringsAsFactors = FALSE)
rm(corpus, corpus1, corpus2, corpus3, corpus4, corpus5)
To examine the data, we will produce a word clouds showing frequently used terms in the datasets. The word clouds show generally the top words with size varying by frequency.
corpus <- Corpus(DirSource("Sample"), readerControl = list(reader=readPlain, language="en_US"))
#Create function to transform the data
removeURL <- function(x) gsub("http[[:alnum:]]*","",x)
removeSign <- function(x) gsub("[[:punct:]]","",x)
removeNum <- function(x) gsub("[[:digit:]]","",x)
removeapo <- function(x) gsub("'","",x)
removeNonASCII <- function(x) iconv(x, "latin1", "ASCII", sub="")
removerepeat <- function(x) gsub("([[:alpha:]])\\1{2,}", "\\1\\1", x)
toLowerCase <- function(x) sapply(x,tolower)
removeSpace <- function(x) gsub("\\s+"," ",x)
#Transform the corpus
corpus<-tm_map(corpus,content_transformer(removeapo))#remove apostrophe
corpus<-tm_map(corpus,content_transformer(removeNum))#remove numbers
corpus<-tm_map(corpus,content_transformer(removeURL)) #remove web url
corpus<-tm_map(corpus,content_transformer(removeSign)) #remove number and punctuation except apostrophe
corpus<-tm_map(corpus,content_transformer(removeNonASCII)) #remove non-ASCII
corpus<-tm_map(corpus,content_transformer(toLowerCase))# convert uppercase to lowercase
corpus<-tm_map(corpus,content_transformer(removerepeat))# remove repeated alphabets in a words
corpus<-tm_map(corpus,content_transformer(removeSpace)) #remove multiple space
corpus<-tm_map(corpus,removeWords,stopwords("english")) #remove common english words
wordcloud(corpus, max.words=125, random.order=TRUE, rot.per=.15, colors=colorRampPalette(brewer.pal(9,"Blues"))(32), scale=c(3, .3))
Now plotting the frequncies of the words that are used in news, twitter and the blogs.
corpus_tdm <- TermDocumentMatrix(corpus)
corpus_tdm_m <- as.matrix(corpus_tdm)
corpus_tdm_m_freq <- rowSums(corpus_tdm_m)
rm(corpus_tdm, corpus_tdm_m)
corpus_tdm_m_freq <-sort(corpus_tdm_m_freq, decreasing = TRUE)
barplot(corpus_tdm_m_freq[1:25], col = "Blue", las = 2, main = "Word Frequency of the data")
Extract the word and frequency of N-grams.
library(RWeka)
one<-function(x) NGramTokenizer(x,Weka_control(min=1,max=1))
two<-function(x) NGramTokenizer(x,Weka_control(min=2,max=2))
thr<-function(x) NGramTokenizer(x,Weka_control(min=3,max=3))
one_table<-TermDocumentMatrix(corpus6,control=list(tokenize=one))
two_table<-TermDocumentMatrix(corpus6,control=list(tokenize=two))
thr_table<-TermDocumentMatrix(corpus6,control=list(tokenize=thr))
one_corpus<-findFreqTerms(one_table,lowfreq=1000)
two_corpus<-findFreqTerms(two_table,lowfreq=80)
thr_corpus<-findFreqTerms(thr_table,lowfreq=10)
# 1-gram
one_corpus_num<-rowSums(as.matrix(one_table[one_corpus,]))
one_corpus_table<-data.frame(Word=names(one_corpus_num),frequency=one_corpus_num)
one_corpus_sort <- one_corpus_table[order(-one_corpus_table$frequency),]
rownames(one_corpus_sort) <- NULL
knitr::kable(head(one_corpus_sort))
| Word | frequency |
|---|---|
| just | 2576 |
| like | 2218 |
| will | 2211 |
| one | 2049 |
| get | 1869 |
| can | 1866 |
# 2-gram
two_corpus_num<-rowSums(as.matrix(two_table[two_corpus,]))
two_corpus_table<-data.frame(Word=names(two_corpus_num),frequency=two_corpus_num)
two_corpus_sort<-two_corpus_table[order(-two_corpus_table$frequency),]
rownames(two_corpus_sort) <- NULL
knitr::kable(head(two_corpus_sort))
| Word | frequency |
|---|---|
| cant wait | 208 |
| right now | 206 |
| dont know | 164 |
| last night | 148 |
| im going | 130 |
| feel like | 125 |
# 3-gram
thr_corpus_num<-rowSums(as.matrix(thr_table[thr_corpus,]))
thr_corpus_table<-data.frame(Word=names(thr_corpus_num),frequency=thr_corpus_num)
thr_corpus_sort<-thr_corpus_table[order(-thr_corpus_table$frequency),]
rownames(thr_corpus_sort) <- NULL
knitr::kable(head(thr_corpus_sort))
| Word | frequency |
|---|---|
| cant wait see | 45 |
| happy mothers day | 36 |
| happy new year | 24 |
| im pretty sure | 18 |
| italy lakes holidays | 18 |
| little italy boston | 17 |
Plot graphs of each N-gram words. I can confirm which word is the most frequency in those files.
library(ggplot2)
one_g <- ggplot(one_corpus_sort[1:10,],
aes(x=reorder(Word,-frequency),y=frequency,fill=frequency)) +
geom_bar(stat="identity") +
labs(title="Unigrams",x="Words",y="Frequency") +
theme(axis.text.x=element_text(angle=90))
one_g
two_g <- ggplot(two_corpus_sort[1:10,],
aes(x=reorder(Word,-frequency),y=frequency,fill=frequency)) +
geom_bar(stat="identity") +
labs(title="Bigrams",x="Words",y="Frequency") +
theme(axis.text.x=element_text(angle=90))
two_g
thr_g <- ggplot(thr_corpus_sort[1:10,],
aes(x=reorder(Word,-frequency),y=frequency,fill=frequency)) +
geom_bar(stat="identity") +
labs(title="Trigrams",x="Words",y="Frequency") +
theme(axis.text.x=element_text(angle=90))
thr_g
I do analyze initially. Next, I will make a predictive algorithm, and using shiny() app, I will check the result which input is coming.