Extracting Tweets

Retrieve tweets from Twitter

# Load packages
library(rtweet)
library(tidyverse)
# Twitter authentication
create_token(
  app             = "behumble_please",
  consumer_key    = consumer_key,
  consumer_secret = consumer_secret,
  access_token    = access_token,
  access_secret   = access_secret)
## <Token>
## <oauth_endpoint>
##  request:   https://api.twitter.com/oauth/request_token
##  authorize: https://api.twitter.com/oauth/authenticate
##  access:    https://api.twitter.com/oauth/access_token
## <oauth_app> behumble_please
##   key:    OakS8ZIhrXK0l30ZZ9lBXlKxT
##   secret: <hidden>
## <credentials> oauth_token, oauth_token_secret
## ---

Note that the echo = FALSE parameter was added to the code chunk to prevent printing of the R code that generated the plot.

# Retrieve tweets
tweets <- search_tweets("#HariAyah", n = 1000, tweet_mode="extended")
## Searching for tweets...
## Finished collecting tweets!
tweets <- distinct(tweets, text, .keep_all=TRUE)

Tweets Description

## plot time series of tweets
ts_plot(tweets, "10 minutes") +
  theme_minimal() +
  theme(plot.title = ggplot2::element_text(face = "bold")) +
  labs(
    x = NULL, y = NULL,
    title = "Frequency of #HariAyah Twitter statuses from past 3 hours",
    subtitle = "Twitter status (tweet) counts aggregated using 10 minutes",
    caption = "\nSource: Data collected from Twitter's REST API via rtweet"
  )

tail(tweets, 20)
## # A tibble: 20 x 88
##    user_id status_id created_at          screen_name text  source
##    <chr>   <chr>     <dttm>              <chr>       <chr> <chr> 
##  1 463476~ 10618860~ 2018-11-12 07:39:02 ydsf_jkt    "\" ~ Twitt~
##  2 890768~ 10618860~ 2018-11-12 07:38:48 sarascees   "Pay~ Twitt~
##  3 352615~ 10618858~ 2018-11-12 07:38:21 destikrnwt  sela~ Twitt~
##  4 258627~ 10618849~ 2018-11-12 07:34:30 piani_mj08  "#ha~ Twitt~
##  5 832534~ 10618841~ 2018-11-12 07:31:32 ericwillemm "Aku~ Twitt~
##  6 126165~ 10618841~ 2018-11-12 07:31:30 DistafmRad~ "#Se~ Twitt~
##  7 287535~ 10618841~ 2018-11-12 07:31:20 myshininli~ "Sel~ Twitt~
##  8 225285~ 10618841~ 2018-11-12 07:31:16 annafarida~ "Sel~ Insta~
##  9 322297~ 10618840~ 2018-11-12 07:30:51 KppSubang   "Sel~ Faceb~
## 10 606123~ 10618828~ 2018-11-12 07:26:12 akbar_thin~ "Sel~ Insta~
## 11 238500~ 10618816~ 2018-11-12 07:21:36 bwpthehive  "Jik~ Twitt~
## 12 492433~ 10618815~ 2018-11-12 07:21:13 adc1408     #Har~ Mobil~
## 13 103248~ 10618815~ 2018-11-12 07:21:07 Silvanda31  #Har~ Twitt~
## 14 331704~ 10618813~ 2018-11-12 07:20:21 designthin~ kale~ desig~
## 15 740721~ 10618805~ 2018-11-12 07:17:10 culien_elm~ @Fie~ Twitt~
## 16 118222~ 10618802~ 2018-11-12 07:15:49 silviyaris~ Sela~ Twitt~
## 17 966895~ 10618793~ 2018-11-12 07:12:23 PosMandiri  "Aya~ Twitt~
## 18 219524~ 10618790~ 2018-11-12 07:10:59 Exabytes_ID "We ~ Twitt~
## 19 297275~ 10618783~ 2018-11-12 07:08:13 Indonesian~ "Sel~ Tweet~
## 20 101698~ 10618781~ 2018-11-12 07:07:28 DindaSyiff~ #Har~ Twitt~
## # ... with 82 more variables: display_text_width <dbl>,
## #   reply_to_status_id <chr>, reply_to_user_id <chr>,
## #   reply_to_screen_name <chr>, is_quote <lgl>, is_retweet <lgl>,
## #   favorite_count <int>, retweet_count <int>, hashtags <list>,
## #   symbols <list>, urls_url <list>, urls_t.co <list>,
## #   urls_expanded_url <list>, media_url <list>, media_t.co <list>,
## #   media_expanded_url <list>, media_type <list>, ext_media_url <list>,
## #   ext_media_t.co <list>, ext_media_expanded_url <list>,
## #   ext_media_type <chr>, mentions_user_id <list>,
## #   mentions_screen_name <list>, lang <chr>, quoted_status_id <chr>,
## #   quoted_text <chr>, quoted_created_at <dttm>, quoted_source <chr>,
## #   quoted_favorite_count <int>, quoted_retweet_count <int>,
## #   quoted_user_id <chr>, quoted_screen_name <chr>, quoted_name <chr>,
## #   quoted_followers_count <int>, quoted_friends_count <int>,
## #   quoted_statuses_count <int>, quoted_location <chr>,
## #   quoted_description <chr>, quoted_verified <lgl>,
## #   retweet_status_id <chr>, retweet_text <chr>,
## #   retweet_created_at <dttm>, retweet_source <chr>,
## #   retweet_favorite_count <int>, retweet_retweet_count <int>,
## #   retweet_user_id <chr>, retweet_screen_name <chr>, retweet_name <chr>,
## #   retweet_followers_count <int>, retweet_friends_count <int>,
## #   retweet_statuses_count <int>, retweet_location <chr>,
## #   retweet_description <chr>, retweet_verified <lgl>, place_url <chr>,
## #   place_name <chr>, place_full_name <chr>, place_type <chr>,
## #   country <chr>, country_code <chr>, geo_coords <list>,
## #   coords_coords <list>, bbox_coords <list>, status_url <chr>,
## #   name <chr>, location <chr>, description <chr>, url <chr>,
## #   protected <lgl>, followers_count <int>, friends_count <int>,
## #   listed_count <int>, statuses_count <int>, favourites_count <int>,
## #   account_created_at <dttm>, verified <lgl>, profile_url <chr>,
## #   profile_expanded_url <chr>, account_lang <chr>,
## #   profile_banner_url <chr>, profile_background_url <chr>,
## #   profile_image_url <chr>

Text Cleaning

library(tm)
## Loading required package: NLP
## 
## Attaching package: 'NLP'
## The following object is masked from 'package:ggplot2':
## 
##     annotate

Build corpus

# build a corpus, and specify the source to be character vectors 
myCorpus <- Corpus(VectorSource(tweets$text))
# convert to lower case
myCorpus <- tm_map(myCorpus, content_transformer(tolower))
## Warning in tm_map.SimpleCorpus(myCorpus, content_transformer(tolower)):
## transformation drops documents
# remove URLs
removeURL <- function(x) gsub("http[^[:space:]]*", "", x)
myCorpus <- tm_map(myCorpus, content_transformer(removeURL))
## Warning in tm_map.SimpleCorpus(myCorpus, content_transformer(removeURL)):
## transformation drops documents
# remove anything other than English letters or space 
removeNumPunct <- function(x) gsub("[^[:alpha:][:space:]]*", "", x) 
myCorpus <- tm_map(myCorpus, content_transformer(removeNumPunct))
## Warning in tm_map.SimpleCorpus(myCorpus,
## content_transformer(removeNumPunct)): transformation drops documents
# remove stopwords
myStopwords <- c(setdiff(stopwords('english'), c("r", "big")), "use", "see", "used", "via", "amp", "indihome")
stopwords_id <- read.table('E:/stopwords-id.txt', header = FALSE)
myStopwords <- c(myStopwords, as.matrix(stopwords_id$V1), "hi", "yg")
myCorpus <- tm_map(myCorpus, removeWords, myStopwords)
## Warning in tm_map.SimpleCorpus(myCorpus, removeWords, myStopwords):
## transformation drops documents
# remove extra whitespace
myCorpus <- tm_map(myCorpus, stripWhitespace)
## Warning in tm_map.SimpleCorpus(myCorpus, stripWhitespace): transformation
## drops documents
# keep a copy for stem completion later
myCorpusCopy <- myCorpus

Frequent Words

Build Term Document Matrix

tdm <- TermDocumentMatrix(myCorpus, control = list(wordLengths = c(1, Inf)))
tdm
## <<TermDocumentMatrix (terms: 1337, documents: 232)>>
## Non-/sparse entries: 2845/307339
## Sparsity           : 99%
## Maximal term length: 25
## Weighting          : term frequency (tf)

Top Frequent Terms

freq.terms <- findFreqTerms(tdm, lowfreq = 20)
freq.terms[1:50]
##  [1] "ayah"             "hariayah"         "hariayahnasional"
##  [4] "fathersday"       "selamat"          "nasional"        
##  [7] "father"           "selamathariayah"  "indonesia"       
## [10] "kasih"            NA                 NA                
## [13] NA                 NA                 NA                
## [16] NA                 NA                 NA                
## [19] NA                 NA                 NA                
## [22] NA                 NA                 NA                
## [25] NA                 NA                 NA                
## [28] NA                 NA                 NA                
## [31] NA                 NA                 NA                
## [34] NA                 NA                 NA                
## [37] NA                 NA                 NA                
## [40] NA                 NA                 NA                
## [43] NA                 NA                 NA                
## [46] NA                 NA                 NA                
## [49] NA                 NA
term.freq <- rowSums(as.matrix(tdm))
term.freq <- subset(term.freq, term.freq >= 20)
df <- data.frame(term = names(term.freq), freq = term.freq)
ggplot(df,aes(x=term, y=freq)) + geom_bar(stat="identity") +
  xlab("Terms") + ylab("Count") + coord_flip() +
  theme(axis.text=element_text(size=7))

Wordcloud

Build Wordcloud

library(wordcloud)
## Loading required package: RColorBrewer
m <- as.matrix(tdm)
# calculate the frequency of words and sort it by frequency 
word.freq <- sort(rowSums(m), decreasing = T)
# colors
pal <- brewer.pal(9, "BuGn")[-(1:4)]
wordcloud(words = names(word.freq), freq = word.freq, min.freq = 3,
    random.order = F, colors = pal)