Executive Summary

The goal of this project is just to display that you’ve gotten used to working with the data and that you are on track to create your prediction algorithm. Please submit a report on R Pubs (http://rpubs.com/) that explains your exploratory analysis and your goals for the eventual app and algorithm. This document should be concise and explain only the major features of the data you have identified and briefly summarize your plans for creating the prediction algorithm and Shiny app in a way that would be understandable to a non-data scientist manager. You should make use of tables and plots to illustrate important summaries of the data set. The motivation for this project is to: 1. Demonstrate that you’ve downloaded the data and have successfully loaded it in.2. Create a basic report of summary statistics about the data sets.3. Report any interesting findings that you amassed so far.4. Get feedback on your plans for creating a prediction algorithm and Shiny app.

Load libraries

library(stringi)
library(tm)
## Warning: package 'tm' was built under R version 4.4.2
## Loading required package: NLP
library(SnowballC)
library(RWeka)
## Warning: package 'RWeka' was built under R version 4.4.2
library(ggplot2)
## 
## Attaching package: 'ggplot2'
## The following object is masked from 'package:NLP':
## 
##     annotate

Loading and summarize data

setwd("~/final/en_US")
blogs <- readLines("en_US.blogs.txt", encoding = "UTF-8", skipNul = TRUE)
news <-  readLines(file("en_US.news.txt", open = "rb") , encoding = "UTF-8", skipNul = TRUE)
twitter <- readLines("en_US.twitter.txt", encoding = "UTF-8", skipNul = TRUE)

### Files in MB
mb_blogs <- (file.info("en_US.blogs.txt")$size)/1024/1024
mb_news <- (file.info("en_US.news.txt")$size)/1024/1024
mb_twitter <- (file.info("en_US.twitter.txt")$size)/1024/1024

### Length of files
len_blogs<-length(blogs)
len_news<-length(news)
len_twitter<-length(twitter)

### Number of characters
nchar_blogs<-sum(nchar(blogs))
nchar_news<-sum(nchar(news))
nchar_twitter<-sum(nchar(twitter))

### Number of words
words_blogs<-sum(stri_count_words(blogs)) 
words_news<-sum(stri_count_words(news)) 
wordstwitter<-sum(stri_count_words(twitter)) 

table<-data.frame("File Name"=c("Blogs","News","Twitter"),
                  "File Size(MB)"=c(mb_blogs,mb_news,mb_twitter),
                  "Length"=c(len_blogs,len_news,len_twitter),
                  "Number of character"=c(nchar_blogs,nchar_news,nchar_twitter),
                  "Number of words"=c(words_blogs,words_news,wordstwitter))
table
##   File.Name File.Size.MB.  Length Number.of.character Number.of.words
## 1     Blogs      200.4242  899288           206824505        37546806
## 2      News      196.2775 1010242           203223159        34762658
## 3   Twitter      159.3641 2360148           162096241        30096690

Clean data

cut_blogs <- sample(blogs, size = 1000)
cut_news <- sample(news, size = 1000)
cut_twitter <- sample(twitter, size = 1000)
sample_data <- c(cut_blogs, cut_news, cut_twitter)
corpus <- VCorpus(VectorSource(sample_data))
corpus <- tm_map(corpus, content_transformer(tolower))
corpus <- tm_map(corpus, removeNumbers)
corpus <- tm_map(corpus, removePunctuation)
corpus <- tm_map(corpus, removeWords, stopwords())
corpus <- tm_map(corpus, stemDocument)
corpus <- tm_map(corpus, stripWhitespace)

Build N-grams

# Function to generate N-grams
one_tokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 1, max = 1))
two_tokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
three_tokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 3, max = 3))

# Generate Document Term matrix
one_dtm <- TermDocumentMatrix(corpus, control = list(tokenize = one_tokenizer))
two_dtm <- TermDocumentMatrix(corpus, control = list(tokenize = two_tokenizer))
three_dim <- TermDocumentMatrix(corpus, control = list(tokenize = three_tokenizer))

# Find the most frequent N-grams
one_freq <- findFreqTerms(one_dtm, lowfreq = 30)
two_freq <- findFreqTerms(two_dtm, lowfreq = 3)
three_freq <- findFreqTerms(three_dim, lowfreq = 3)

1-gram plot

one_freq_sum <- rowSums(as.matrix(one_dtm[one_freq,]))
one_freq_sum <- sort(one_freq_sum, decreasing = TRUE)
one_df <- data.frame(word = names(one_freq_sum), frequency = one_freq_sum)
head(one_df)
##      word frequency
## said said       298
## will will       278
## like like       243
## one   one       238
## get   get       225
## just just       217
one_gram<- ggplot(one_df[1:15, ],aes(x=reorder(word,frequency),y=frequency,fill=frequency)) +
geom_bar(stat = "identity")+ labs(x = "words")+ coord_flip()
one_gram

2-gram plot

two_freq_sum <- rowSums(as.matrix(two_dtm[two_freq,]))
two_freq_sum <- sort(two_freq_sum, decreasing = TRUE)
two_df <- data.frame(word = names(two_freq_sum), frequency = two_freq_sum)
head(two_df)
##                word frequency
## last year last year        23
## new york   new york        21
## right now right now        21
## last week last week        17
## dont know dont know        15
## feel like feel like        15
two_gram<- ggplot(two_df[1:15, ],aes(x=reorder(word,frequency),y=frequency,fill=frequency)) +
geom_bar(stat = "identity")+ labs(x = "words")+ coord_flip()
two_gram

3-gram plot

three_freq_sum <- rowSums(as.matrix(three_dim[three_freq,]))
three_freq_sum <- sort(three_freq_sum, decreasing = TRUE)
three_df <- data.frame(word = names(three_freq_sum), frequency = three_freq_sum)
head(three_df)
##                                word frequency
## new york citi         new york citi         4
## can relat anoth     can relat anoth         3
## doesnt matter us   doesnt matter us         3
## happi new year       happi new year         3
## us district court us district court         3
three_gram<- ggplot(three_df[1:15, ],aes(x=reorder(word,frequency),y=frequency,fill=frequency)) +
geom_bar(stat = "identity")+ labs(x = "words")+ coord_flip()
three_gram
## Warning: Removed 10 rows containing missing values or values outside the scale range
## (`geom_bar()`).