The goal of this project is just to display that you’ve gotten used to working with the data and that you are on track to create your prediction algorithm. Please submit a report on R Pubs that explains your exploratory analysis and your goals for the eventual app and algorithm. This document should be concise and explain only the major features of the data you have identified and briefly summarize your plans for creating the prediction algorithm and Shiny app in a way that would be understandable to a non-data scientist manager. You should make use of tables and plots to illustrate important summaries of the data set. The motivation for this project is to: 1. Demonstrate that you’ve downloaded the data and have successfully loaded it in.2. Create a basic report of summary statistics about the data sets.3. Report any interesting findings that you amassed so far.4. Get feedback on your plans for creating a prediction algorithm and Shiny app.
library(quanteda)
## Package version: 3.3.1
## Unicode version: 13.0
## ICU version: 69.1
## Parallel computing: 8 of 8 threads used.
## See https://quanteda.io for tutorials and examples.
library(wordcloud)
## Loading required package: RColorBrewer
library(NLP)
##
## Attaching package: 'NLP'
## The following objects are masked from 'package:quanteda':
##
## meta, meta<-
library(tm)
##
## Attaching package: 'tm'
## The following object is masked from 'package:quanteda':
##
## stopwords
library(stringi)
con <- file("C:/Users/wangq/Downloads/final/en_US/en_US.twitter.txt")
readLines(con, 1) ## Read the first line of text readLines(con, 1)
## [1] "How are you? Btw thanks for the RT. You gonna be in DC anytime soon? Love to see you. Been way, way too long."
#readLines(con, 5) ## Read in the next 5 lines of text close(con)
object.size(con) ## The number of bytes that this R object occupies in memory
## 584 bytes
twitter<-readLines("C:/Users/wangq/Downloads/final/en_US/en_US.twitter.txt",encoding="UTF-8")
## Warning in readLines("C:/Users/wangq/Downloads/final/en_US/en_US.twitter.txt",
## : line 167155 appears to contain an embedded nul
## Warning in readLines("C:/Users/wangq/Downloads/final/en_US/en_US.twitter.txt",
## : line 268547 appears to contain an embedded nul
## Warning in readLines("C:/Users/wangq/Downloads/final/en_US/en_US.twitter.txt",
## : line 1274086 appears to contain an embedded nul
## Warning in readLines("C:/Users/wangq/Downloads/final/en_US/en_US.twitter.txt",
## : line 1759032 appears to contain an embedded nul
con1 <- file("C:/Users/wangq/Downloads/final/en_US/en_US.news.txt")
readLines(con1, 1) ## Read the first line of text readLines(con1, 1)
## [1] "He wasn't home alone, apparently."
object.size(con1) ## The number of bytes that this R object occupies in memory
## 584 bytes
news<-readLines("C:/Users/wangq/Downloads/final/en_US/en_US.news.txt",encoding="UTF-8")
## Warning in readLines("C:/Users/wangq/Downloads/final/en_US/en_US.news.txt", :
## incomplete final line found on
## 'C:/Users/wangq/Downloads/final/en_US/en_US.news.txt'
con2 <- file("C:/Users/wangq/Downloads/final/en_US/en_US.blogs.txt")
readLines(con2, 1) ## Read the first line of text readLines(con2, 1)
## [1] "In the years thereafter, most of the Oil fields and platforms were named after pagan “gods”."
object.size(con2) ## The number of bytes that this R object occupies in memory
## 584 bytes
blogs<-readLines("C:/Users/wangq/Downloads/final/en_US/en_US.blogs.txt",encoding="UTF-8")
#Create stats function to obtain the number of rows, the number of character, the number of bytes and the number of words that each dataset has.
stats <- function(x) {
length<-length(x)
num_char<-sum(nchar(x))
object_size<-object.size(x)
word_count<-stri_stats_latex(x)[4]
return(c(length, num_char,object_size,word_count))
}
#Apply the stats function and summarize the basic information of those three .txt files under en_US folder.
vector<-sapply(list(twitter,news,blogs), stats)
# The sizes of these three documents in en_US folder are huge. So we sample data set by 5% to speed up the computation speed.
set.seed(2023)
twitter1<-iconv(twitter,"latin1","ASCII",sub="")
news1<-iconv(news,"latin1","ASCII",sub="")
blogs1<-iconv(blogs,"latin1","ASCII",sub="")
sample_data<-c(sample(twitter1,length(twitter1)*0.05),
sample(blogs1,length(blogs1)*0.05),
sample(news1,length(news1)*0.05)
)
data<-VCorpus(VectorSource(sample_data))
data1<-tm_map(data,removePunctuation)
data2<-tm_map(data1,stripWhitespace)
data3<-tm_map(data2,tolower)
data4<-tm_map(data3,removeNumbers)
data5<-tm_map(data4,PlainTextDocument)
data6<-tm_map(data5,removeWords,stopwords("english"))