Instructions

The goal of this project is just to display that you’ve gotten used to working with the data and that you are on track to create your prediction algorithm. Please submit a report on R Pubs (http://rpubs.com/) that explains your exploratory analysis and your goals for the eventual app and algorithm. This document should be concise and explain only the major features of the data you have identified and briefly summarize your plans for creating the prediction algorithm and Shiny app in a way that would be understandable to a non-data scientist manager. You should make use of tables and plots to illustrate important summaries of the data set. The motivation for this project is to:

  1. Demonstrate that you’ve downloaded the data and have successfully loaded it in.

  2. Create a basic report of summary statistics about the data sets.

  3. Report any interesting findings that you amassed so far.

  4. Get feedback on your plans for creating a prediction algorithm and Shiny app.

Data

Link of the data: https://d396qusza40orc.cloudfront.net/dsscapstone/dataset/Coursera-SwiftKey.zip

Load the data:

news <- readLines("D:/Coursera/Data Science Capstone/FINAL PROJECT/final/en_US/en_US_news.txt", encoding="UTF-8", skipNul=T)
## Warning in readLines("D:/Coursera/Data Science Capstone/FINAL PROJECT/final/
## en_US/en_US_news.txt", : incomplete final line found on 'D:/Coursera/Data
## Science Capstone/FINAL PROJECT/final/en_US/en_US_news.txt'
blogs <- readLines("D:/Coursera/Data Science Capstone/FINAL PROJECT/final/en_US/en_US_blogs.txt", encoding="UTF-8", skipNul=T)
twitter <- readLines("D:/Coursera/Data Science Capstone/FINAL PROJECT/final/en_US/en_US_twitter.txt", encoding="UTF-8", skipNul=T)

Summary of the data:

library(stringi)
stri_stats_general(news)
##       Lines LinesNEmpty       Chars CharsNWhite 
##       77259       77259    15639408    13072698
stri_stats_general(blogs)
##       Lines LinesNEmpty       Chars CharsNWhite 
##      899288      899288   206824382   170389539
stri_stats_general(twitter)
##       Lines LinesNEmpty       Chars CharsNWhite 
##     2360148     2360148   162096241   134082806

Sampling the data:

library(tm)
## Loading required package: NLP
sample.blogs <- sample(blogs, size = 1000)
sample.news <- sample(news, size = 1000)
sample.twitter <- sample(twitter, size = 1000)
sample.all.data <- c(sample.blogs, sample.news, sample.twitter)
corpus.sample.all.data <- VCorpus(VectorSource(sample.all.data))

Remove from the data set numbers, special characters, spaces…

corpus <- VCorpus(VectorSource(corpus.sample.all.data))
toSpace <- content_transformer(function(x, pattern) gsub(pattern, " ", x))
corpus <- tm_map(corpus, toSpace, "/|@|//|$|:|:)|*|&|!|?|_|-|#|")
corpus <- tm_map(corpus, content_transformer(tolower))
corpus <- tm_map(corpus, removeNumbers)
corpus <- tm_map(corpus, removePunctuation)
corpus <- tm_map(corpus, removeWords, stopwords())
corpus <- tm_map(corpus, stemDocument)
corpus <- tm_map(corpus, stripWhitespace)

corpus.news <- VCorpus(VectorSource(sample.news))
toSpace <- content_transformer(function(x, pattern) gsub(pattern, " ", x))
corpus.news <- tm_map(corpus.news, toSpace, "/|@|//|$|:|:)|*|&|!|?|_|-|#|")
corpus.news <- tm_map(corpus.news, content_transformer(tolower))
corpus.news <- tm_map(corpus.news, removeNumbers)
corpus.news <- tm_map(corpus.news, removePunctuation)
corpus.news <- tm_map(corpus.news, removeWords, stopwords())
corpus.news <- tm_map(corpus.news, stemDocument)
corpus.news <- tm_map(corpus.news, stripWhitespace)

corpus.twitter <- VCorpus(VectorSource(sample.twitter))
toSpace <- content_transformer(function(x, pattern) gsub(pattern, " ", x))
corpus.twitter <- tm_map(corpus.twitter, toSpace, "/|@|//|$|:|:)|*|&|!|?|_|-|#|")
corpus.twitter <- tm_map(corpus.twitter, content_transformer(tolower))
corpus.twitter <- tm_map(corpus.twitter, removeNumbers)
corpus.twitter <- tm_map(corpus.twitter, removePunctuation)
corpus.twitter <- tm_map(corpus.twitter, removeWords, stopwords())
corpus.twitter <- tm_map(corpus.twitter, stemDocument)
corpus.twitter <- tm_map(corpus.twitter, stripWhitespace)

corpus.blogs <- VCorpus(VectorSource(sample.blogs))
toSpace <- content_transformer(function(x, pattern) gsub(pattern, " ", x))
corpus.blogs <- tm_map(corpus.twitter, toSpace, "/|@|//|$|:|:)|*|&|!|?|_|-|#|")
corpus.blogs <- tm_map(corpus.twitter, content_transformer(tolower))
corpus.blogs <- tm_map(corpus.twitter, removeNumbers)
corpus.blogs <- tm_map(corpus.twitter, removePunctuation)
corpus.blogs <- tm_map(corpus.twitter, removeWords, stopwords())
corpus.blogs <- tm_map(corpus.twitter, stemDocument)
corpus.blogs <- tm_map(corpus.twitter, stripWhitespace)

Create the DocumentTermMatrizes

library(RWeka)
TDM.1 <- TermDocumentMatrix(corpus)
bigram <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
TDM.2 <- TermDocumentMatrix(corpus, control = list(tokenize = bigram))
trigram <- function(x) NGramTokenizer(x, Weka_control(min = 3, max = 3))
TDM.3 <- TermDocumentMatrix(corpus, control = list(tokenize = trigram))

Data exploration

1-Gram Frequency plot

library(ggplot2)
## 
## Attaching package: 'ggplot2'
## The following object is masked from 'package:NLP':
## 
##     annotate
freq1 <- rowSums(as.matrix(TDM.1))
freq1 <- sort(freq1, decreasing = TRUE)
dfFreq1 <- data.frame(Words = names(freq1), Frequency=freq1)
ggplot(dfFreq1[1:15, ], aes(Words, Frequency)) +
    geom_bar(stat="identity", fill="green4", colour="green4") +
    theme(axis.text.x=element_text(angle=90, hjust=1)) + ggtitle("1-Gram Frequency")

2-Gram Frequency plot

library(ggplot2)
freq2 <- rowSums(as.matrix(TDM.2))
freq2 <- sort(freq2, decreasing = TRUE)
dfFreq2 <- data.frame(Words = names(freq2), Frequency=freq2)
ggplot(dfFreq2[1:15, ], aes(Words, Frequency)) +
    geom_bar(stat="identity", fill="deepskyblue3", colour="deepskyblue3") +
    theme(axis.text.x=element_text(angle=90, hjust=1)) + ggtitle("2-Gram Frequency")

3-Gram Frequency plot

library(ggplot2)
freq3 <- rowSums(as.matrix(TDM.3))
freq3 <- sort(freq3, decreasing = TRUE)
dfFreq3 <- data.frame(Words = names(freq3), Frequency=freq3)
ggplot(dfFreq3[1:15, ], aes(Words, Frequency)) +
    geom_bar(stat="identity", fill="orangered1", colour="orangered1") +
    theme(axis.text.x=element_text(angle=90, hjust=1)) + ggtitle("3-Gram Frequency")

Word-cloud data of twitter

library(wordcloud)
## Loading required package: RColorBrewer
wordcloud(corpus.twitter, max.words = 100, random.order = FALSE,rot.per=0.35, use.r.layout=FALSE,colors=brewer.pal(3, "Paired"))

Word-cloud data of blogs

library(wordcloud)
wordcloud(corpus.blogs, max.words = 100, random.order = FALSE,rot.per=0.35, use.r.layout=FALSE,colors=brewer.pal(3, "Paired"))

Word-cloud data of news

library(wordcloud)
wordcloud(corpus.news, max.words = 100, random.order = FALSE,rot.per=0.35, use.r.layout=FALSE,colors=brewer.pal(3, "Paired"))