Load the necessary packages

library(tidyverse)
## ── Attaching packages ────────────────────────────────────────────────── tidyverse 1.2.1 ──
## ✔ ggplot2 3.1.0     ✔ purrr   0.2.5
## ✔ tibble  2.0.0     ✔ dplyr   0.7.8
## ✔ tidyr   0.8.2     ✔ stringr 1.3.1
## ✔ readr   1.3.1     ✔ forcats 0.3.0
## ── Conflicts ───────────────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag()    masks stats::lag()
library(DT)
library(tidytext)        # package for text analysis
library(readxl)          # reads excel files, the format I used for the data

Read data

manifestos <- read_excel("manifestos.xlsx")

Prepare “Manifestos”

manifesto_words <- manifestos %>%
  unnest_tokens(word, text)

Create a table depicting the number of words, lexical diversity and density by author.

manifesto_words %>% 
  group_by(author) %>% 
  summarise(num_words = n(),
            lex_diversity = n_distinct(word), 
            lex_density = n_distinct(word)/n())

Create a table illustrating the mean word lengths by author

manifesto_words %>%
  group_by(author) %>% 
  mutate(word_length = nchar(word)) %>% 
  summarize(mean_word_length = mean(word_length)) %>% 
  arrange(-mean_word_length)

Create historgrams illustrating mean word lengths by author.

manifesto_words %>%
  mutate(word_length = nchar(word)) %>% 
  ggplot(aes(word_length, fill = author)) +
  geom_histogram(binwidth = 1, show.legend = F) +
  facet_wrap(vars(author), scales = "free_y") +
  labs(title = "Word Length Distributions of Manifestos By Author") +
  theme_minimal()+
  scale_fill_viridis_d() 

Get Stop Words

stopwords <- get_stopwords()

Create a graph illustrating the most common words by author

manifesto_words %>% 
  anti_join(stopwords) %>%  
  group_by(author) %>% 
  count(word, sort = T) %>% 
  top_n(5) %>% 
  ungroup() %>% 
  mutate(word = reorder(word, n)) %>%
  ggplot(aes(word, n, fill = author)) +
  geom_col(show.legend = FALSE) +
  labs(x = NULL, y = "Most common words") +
  facet_wrap(vars(author), scales = "free") +
  scale_fill_viridis_d() +
  theme_minimal() +
  coord_flip()
## Joining, by = "word"
## Selecting by n

Calculate tf-idf in relation to “Manifestos”

manifesto_word_counts <- manifestos %>%             # This counts each word per author
  unnest_tokens(word, text) %>%
  count(author, word, sort = TRUE) 

total_words <- manifesto_word_counts %>%               # This counts total words per author
  group_by(author) %>% 
  summarize(total = sum(n))

manifesto_word_counts <- left_join(manifesto_word_counts, total_words)    # Joins the two
## Joining, by = "author"
manifesto_tf_idf <- manifesto_word_counts %>%             # Calculates tf-idf
  bind_tf_idf(word, author, n)

manifesto_tf_idf %>%                                   # Displays it
  arrange(-tf_idf)                          

Create a graph

manifesto_tf_idf %>%
  arrange(-tf_idf) %>%
  mutate(word = factor(word, levels = rev(unique(word)))) %>% 
  group_by(author) %>% 
  top_n(5) %>% 
  ggplot(aes(word, tf_idf, fill = author)) +
  geom_col(show.legend = FALSE) +
  labs(x = NULL, y = "tf-idf") +
  facet_wrap(~author, scales = "free") +
  coord_flip() +
  theme_minimal() +
  scale_fill_viridis_d() +
  labs(title = "Most Important Words in each Manifesto")
## Selecting by tf_idf