Introduction

In Text Mining with R, Chapter 2 looks at Sentiment Analysis. In this assignment, you should start by getting the primary example code from chapter 2 working in an R Markdown document. You should provide a citation to this base code. You’re then asked to extend the code in two ways:

Loading Libraries

library(tidyverse)
## -- Attaching packages --------------------------------------- tidyverse 1.3.0 --
## v ggplot2 3.3.3     v purrr   0.3.4
## v tibble  3.0.6     v dplyr   1.0.4
## v tidyr   1.1.2     v stringr 1.4.0
## v readr   1.4.0     v forcats 0.5.1
## -- Conflicts ------------------------------------------ tidyverse_conflicts() --
## x dplyr::filter() masks stats::filter()
## x dplyr::lag()    masks stats::lag()
library(tidytext)
## Warning: package 'tidytext' was built under R version 4.0.5
library(textdata)
## Warning: package 'textdata' was built under R version 4.0.5
library(janeaustenr)
## Warning: package 'janeaustenr' was built under R version 4.0.5
library(stringr)
library(wordcloud)
## Warning: package 'wordcloud' was built under R version 4.0.5
## Loading required package: RColorBrewer
library(reshape2)
## Warning: package 'reshape2' was built under R version 4.0.4
## 
## Attaching package: 'reshape2'
## The following object is masked from 'package:tidyr':
## 
##     smiths
library(gutenbergr)
## Warning: package 'gutenbergr' was built under R version 4.0.5

Part 1: Example Code

1.1 The sentiments datasets

get_sentiments("afinn")
## # A tibble: 2,477 x 2
##    word       value
##    <chr>      <dbl>
##  1 abandon       -2
##  2 abandoned     -2
##  3 abandons      -2
##  4 abducted      -2
##  5 abduction     -2
##  6 abductions    -2
##  7 abhor         -3
##  8 abhorred      -3
##  9 abhorrent     -3
## 10 abhors        -3
## # ... with 2,467 more rows
get_sentiments("bing")
## # A tibble: 6,786 x 2
##    word        sentiment
##    <chr>       <chr>    
##  1 2-faces     negative 
##  2 abnormal    negative 
##  3 abolish     negative 
##  4 abominable  negative 
##  5 abominably  negative 
##  6 abominate   negative 
##  7 abomination negative 
##  8 abort       negative 
##  9 aborted     negative 
## 10 aborts      negative 
## # ... with 6,776 more rows
get_sentiments("nrc")
## # A tibble: 13,901 x 2
##    word        sentiment
##    <chr>       <chr>    
##  1 abacus      trust    
##  2 abandon     fear     
##  3 abandon     negative 
##  4 abandon     sadness  
##  5 abandoned   anger    
##  6 abandoned   fear     
##  7 abandoned   negative 
##  8 abandoned   sadness  
##  9 abandonment anger    
## 10 abandonment fear     
## # ... with 13,891 more rows

1.2 Sentiment analysis with inner join

#words with a joy score from the NRC lexicon
tidy_books <- austen_books() %>%
  group_by(book) %>%
  mutate(
    linenumber = row_number(),
    chapter = cumsum(str_detect(text, 
                                regex("^chapter [\\divxlc]", 
                                      ignore_case = TRUE)))) %>%
  ungroup() %>%
  unnest_tokens(word, text)

# Now that the text is in a tidy format with one word per row, we are ready to do the sentiment analysis
nrc_joy <- get_sentiments("nrc") %>% 
  filter(sentiment == "joy")

tidy_books %>%
  filter(book == "Emma") %>%
  inner_join(nrc_joy) %>%
  count(word, sort = TRUE)
## Joining, by = "word"
## # A tibble: 303 x 2
##    word        n
##    <chr>   <int>
##  1 good      359
##  2 young     192
##  3 friend    166
##  4 hope      143
##  5 happy     125
##  6 love      117
##  7 deal       92
##  8 found      92
##  9 present    89
## 10 kind       82
## # ... with 293 more rows
# Next, we count up how many positive and negative words there are in defined sections of each book
jane_austen_sentiment <- tidy_books %>%
  inner_join(get_sentiments("bing")) %>%
  count(book, index = linenumber %/% 80, sentiment) %>%
  pivot_wider(names_from = sentiment, values_from = n, values_fill = 0) %>% 
  mutate(sentiment = positive - negative)
## Joining, by = "word"
# Now we can plot these sentiment scores across the plot trajectory of each novel
ggplot(jane_austen_sentiment, aes(index, sentiment, fill = book)) +
  geom_col(show.legend = FALSE) +
  facet_wrap(~book, ncol = 2, scales = "free_x")

1.3 Comparing the three sentiment dictionaries

# Choose only the words from the one novel we are interested in
pride_prejudice <- tidy_books %>% 
  filter(book == "Pride & Prejudice")

pride_prejudice
## # A tibble: 122,204 x 4
##    book              linenumber chapter word     
##    <fct>                  <int>   <int> <chr>    
##  1 Pride & Prejudice          1       0 pride    
##  2 Pride & Prejudice          1       0 and      
##  3 Pride & Prejudice          1       0 prejudice
##  4 Pride & Prejudice          3       0 by       
##  5 Pride & Prejudice          3       0 jane     
##  6 Pride & Prejudice          3       0 austen   
##  7 Pride & Prejudice          7       1 chapter  
##  8 Pride & Prejudice          7       1 1        
##  9 Pride & Prejudice         10       1 it       
## 10 Pride & Prejudice         10       1 is       
## # ... with 122,194 more rows
# define larger sections of text that span multiple lines and find the net sentiment in each of these sections of text
afinn <- pride_prejudice %>% 
  inner_join(get_sentiments("afinn")) %>% 
  group_by(index = linenumber %/% 80) %>% 
  summarise(sentiment = sum(value)) %>% 
  mutate(method = "AFINN")
## Joining, by = "word"
bing_and_nrc <- bind_rows(
  pride_prejudice %>% 
    inner_join(get_sentiments("bing")) %>%
    mutate(method = "Bing et al."),
  pride_prejudice %>% 
    inner_join(get_sentiments("nrc") %>% 
                 filter(sentiment %in% c("positive", 
                                         "negative"))
    ) %>%
    mutate(method = "NRC")) %>%
  count(method, index = linenumber %/% 80, sentiment) %>%
  pivot_wider(names_from = sentiment,
              values_from = n,
              values_fill = 0) %>% 
  mutate(sentiment = positive - negative)
## Joining, by = "word"
## Joining, by = "word"
# Bind them together and visualize them 
bind_rows(afinn, 
          bing_and_nrc) %>%
  ggplot(aes(index, sentiment, fill = method)) +
  geom_col(show.legend = FALSE) +
  facet_wrap(~method, ncol = 1, scales = "free_y")

# how many positive and negative words are in these lexicons
get_sentiments("nrc") %>% 
  filter(sentiment %in% c("positive", "negative")) %>% 
  count(sentiment)
## # A tibble: 2 x 2
##   sentiment     n
## * <chr>     <int>
## 1 negative   3324
## 2 positive   2312
get_sentiments("bing") %>% 
  count(sentiment)
## # A tibble: 2 x 2
##   sentiment     n
## * <chr>     <int>
## 1 negative   4781
## 2 positive   2005

1.4 Most common positive and negative words

bing_word_counts <- tidy_books %>%
  inner_join(get_sentiments("bing")) %>%
  count(word, sentiment, sort = TRUE) %>%
  ungroup()
## Joining, by = "word"
bing_word_counts
## # A tibble: 2,585 x 3
##    word     sentiment     n
##    <chr>    <chr>     <int>
##  1 miss     negative   1855
##  2 well     positive   1523
##  3 good     positive   1380
##  4 great    positive    981
##  5 like     positive    725
##  6 better   positive    639
##  7 enough   positive    613
##  8 happy    positive    534
##  9 love     positive    495
## 10 pleasure positive    462
## # ... with 2,575 more rows
# This can be shown visually, and we can pipe straight into ggplot2
bing_word_counts %>%
  group_by(sentiment) %>%
  slice_max(n, n = 10) %>% 
  ungroup() %>%
  mutate(word = reorder(word, n)) %>%
  ggplot(aes(n, word, fill = sentiment)) +
  geom_col(show.legend = FALSE) +
  facet_wrap(~sentiment, scales = "free_y") +
  labs(x = "Contribution to sentiment",
       y = NULL)

# Anomaly for the word "miss"
custom_stop_words <- bind_rows(tibble(word = c("miss"),  
                                      lexicon = c("custom")), 
                               stop_words)

custom_stop_words
## # A tibble: 1,150 x 2
##    word        lexicon
##    <chr>       <chr>  
##  1 miss        custom 
##  2 a           SMART  
##  3 a's         SMART  
##  4 able        SMART  
##  5 about       SMART  
##  6 above       SMART  
##  7 according   SMART  
##  8 accordingly SMART  
##  9 across      SMART  
## 10 actually    SMART  
## # ... with 1,140 more rows

1.5 Wordclouds

tidy_books %>%
  anti_join(stop_words) %>%
  count(word) %>%
  with(wordcloud(word, n, max.words = 100))
## Joining, by = "word"

#  sentiment analysis to tag positive and negative words using an inner join, then find the most common positive and negative words
tidy_books %>%
  inner_join(get_sentiments("bing")) %>%
  count(word, sentiment, sort = TRUE) %>%
  acast(word ~ sentiment, value.var = "n", fill = 0) %>%
  comparison.cloud(colors = c("gray20", "gray80"),
                   max.words = 100)
## Joining, by = "word"

1.6 Looking at units beyond just words

p_and_p_sentences <- tibble(text = prideprejudice) %>% 
  unnest_tokens(sentence, text, token = "sentences")

p_and_p_sentences$sentence[2]
## [1] "by jane austen"
austen_chapters <- austen_books() %>%
  group_by(book) %>%
  unnest_tokens(chapter, text, token = "regex", 
                pattern = "Chapter|CHAPTER [\\dIVXLC]") %>%
  ungroup()

austen_chapters %>% 
  group_by(book) %>% 
  summarise(chapters = n())
## # A tibble: 6 x 2
##   book                chapters
## * <fct>                  <int>
## 1 Sense & Sensibility       51
## 2 Pride & Prejudice         62
## 3 Mansfield Park            49
## 4 Emma                      56
## 5 Northanger Abbey          32
## 6 Persuasion                25
bingnegative <- get_sentiments("bing") %>% 
  filter(sentiment == "negative")

wordcounts <- tidy_books %>%
  group_by(book, chapter) %>%
  summarize(words = n())
## `summarise()` has grouped output by 'book'. You can override using the `.groups` argument.
tidy_books %>%
  semi_join(bingnegative) %>%
  group_by(book, chapter) %>%
  summarize(negativewords = n()) %>%
  left_join(wordcounts, by = c("book", "chapter")) %>%
  mutate(ratio = negativewords/words) %>%
  filter(chapter != 0) %>%
  slice_max(ratio, n = 1) %>% 
  ungroup()
## Joining, by = "word"
## `summarise()` has grouped output by 'book'. You can override using the `.groups` argument.
## # A tibble: 6 x 5
##   book                chapter negativewords words  ratio
##   <fct>                 <int>         <int> <int>  <dbl>
## 1 Sense & Sensibility      43           161  3405 0.0473
## 2 Pride & Prejudice        34           111  2104 0.0528
## 3 Mansfield Park           46           173  3685 0.0469
## 4 Emma                     15           151  3340 0.0452
## 5 Northanger Abbey         21           149  2982 0.0500
## 6 Persuasion                4            62  1807 0.0343

Part 2: Extension - My Bondage and My Freedom by Frederick Douglass

I will analyze text “My Bondage and My Freedom” by Frederick Douglass. I will use the gutenbergr library to search and download it.

2.1 The sentiments datasets

bondage_count <- gutenberg_download(202)
## Determining mirror for Project Gutenberg from http://www.gutenberg.org/robot/harvest
## Using mirror http://aleph.gutenberg.org
bondage_count
## # A tibble: 12,208 x 2
##    gutenberg_id text                                                            
##           <int> <chr>                                                           
##  1          202 "MY BONDAGE and MY FREEDOM"                                     
##  2          202 ""                                                              
##  3          202 "By Frederick Douglass"                                         
##  4          202 ""                                                              
##  5          202 ""                                                              
##  6          202 "By a principle essential to Christianity, a PERSON is eternall~
##  7          202 "differenced from a THING; so that the idea of a HUMAN BEING, n~
##  8          202 "excludes the idea of PROPERTY IN THAT BEING."                  
##  9          202 "--COLERIDGE"                                                   
## 10          202 ""                                                              
## # ... with 12,198 more rows

2.2 Tidy the data

#removing the first 763 rows of text which are table of contents
bondage_count <- bondage_count[c(763:nrow(bondage_count)),]

#using unnest_tokens to have each line be broken into indidual rows. 
bondage <- bondage_count %>% unnest_tokens(word, text)
bondage
## # A tibble: 129,472 x 2
##    gutenberg_id word       
##           <int> <chr>      
##  1          202 chapter    
##  2          202 i          
##  3          202 _childhood_
##  4          202 place      
##  5          202 of         
##  6          202 birth      
##  7          202 character  
##  8          202 of         
##  9          202 the        
## 10          202 district   
## # ... with 129,462 more rows
bondage_index <- bondage_count %>% 
  filter(text != "") %>%
  mutate(linenumber = row_number(),
         chapter = cumsum(str_detect(text, regex("(?<=Chapter )([\\dII]{1,3})", ignore_case =  TRUE)))) 

bondage_index
## # A tibble: 10,624 x 4
##    gutenberg_id text                                          linenumber chapter
##           <int> <chr>                                              <int>   <int>
##  1          202 "CHAPTER I. _Childhood_"                               1       1
##  2          202 "PLACE OF BIRTH--CHARACTER OF THE DISTRICT--~          2       1
##  3          202 "NAME--CHOPTANK RIVER--TIME OF BIRTH--GENEAL~          3       1
##  4          202 "COUNTING TIME--NAMES OF GRANDPARENTS--THEIR~          4       1
##  5          202 "ESPECIALLY ESTEEMED--\"BORN TO GOOD LUCK\"-~          5       1
##  6          202 "POTATOES--SUPERSTITION--THE LOG CABIN--ITS ~          6       1
##  7          202 "CHILDREN--MY AUNTS--THEIR NAMES--FIRST KNOW~          7       1
##  8          202 "MASTER--GRIEFS AND JOYS OF CHILDHOOD--COMPA~          8       1
##  9          202 "SLAVE-BOY AND THE SON OF A SLAVEHOLDER."              9       1
## 10          202 "In Talbot county, Eastern Shore, Maryland, ~         10       1
## # ... with 10,614 more rows

2.3 Most frequent postive words

bondage %>% 
  inner_join(get_sentiments("bing")) %>% 
  filter(sentiment == "positive") %>%
  count(word, sentiment, sort = TRUE) %>% 
  top_n(10) %>%
  mutate(word = reorder(word, desc(n))) %>%
  ggplot() + 
  aes(x = word, y = n) +
  labs(title = "Most Frequent Positive Words") + 
  ylab("Count") + 
  xlab("Word") +
  geom_col() + 
  geom_text(aes(label = n, vjust = -.5)) + 
  theme(
    panel.background = element_rect(fill = "white", color = NA),
    axis.text.y = element_blank(), 
    axis.ticks.y = element_blank(),
    plot.title = element_text(hjust = 0.5)
  )
## Joining, by = "word"
## Selecting by n

2.4 Most frequent negative words

bondage %>% 
  inner_join(get_sentiments("bing")) %>% 
  filter(sentiment == "negative") %>%
  count(word, sentiment, sort = TRUE) %>% 
  top_n(10) %>%
  mutate(word = reorder(word, desc(n))) %>%
  ggplot() + 
  aes(x = word, y = n) +
  labs(title = "Most Frequent Negative Words") + 
  ylab("Count") + 
  xlab("Word") +
  geom_col() + 
  geom_text(aes(label = n, vjust = -.5)) + 
  theme(
    panel.background = element_rect(fill = "white", color = NA),
    axis.text.y = element_blank(), 
    axis.ticks.y = element_blank(),
    plot.title = element_text(hjust = 0.5)
  )
## Joining, by = "word"
## Selecting by n

Loughran Lexicon

Here, I will use loughran lexicon instead of one of the lexicons used in the sample code.

lghrn <- get_sentiments("loughran")
unique(lghrn$sentiment)
## [1] "negative"     "positive"     "uncertainty"  "litigious"    "constraining"
## [6] "superfluous"
#let’s explore the lexicon to see what types of words are litigious and constraining.

bondage_index %>% 
  unnest_tokens(word, text) %>% 
  inner_join(get_sentiments("loughran")) %>%
  filter(sentiment %in% c("litigious", "constraining")) %>%
  count(word, sentiment, sort = TRUE) %>%
  group_by(sentiment) %>%
  top_n(10) %>%
  ggplot() + 
  aes(x = reorder(word,desc(n)), y = n) + 
  geom_col() +
  facet_grid(~sentiment, scales = "free_x")  + 
  geom_text(aes(label = n, vjust = -.5)) + 
  labs(title = "Words Associated with Litigious and Constraining") + 
  ylab("Count") + 
  xlab("Word") + 
  theme(
    panel.background = element_rect(fill = "white", color = NA),
    axis.text.y = element_blank(), 
    axis.ticks.y = element_blank(),
    plot.title = element_text(hjust = 0.5)
  )
## Joining, by = "word"
## Selecting by n