This vignette explains how to analyse unstructured data like text documents, by using the tm package and topicmodels package and Latent Dirichlet Allocation method to discover the possible hidden topics within the documents and assigning each of the topics to the documents.

This is really useful if you have a large corpus of documents and you want to explore within them and the relationships between them without having to read, parse and categorise all of them.

For more information, read this blog https://eight2late.wordpress.com/2015/09/29/a-gentle-introduction-to-topic-modeling-using-r/

Note: This vignette assumes that tm package and topicmodels packages are already loaded.

Step 1 Gather the unstructured data

Set working directory to where the .txt documents are located, load the tm library package and move the files into the corpus for analysis

#set working directory to where the corpus is stored on c drive
setwd("C:/Users/Benjibex/Documents/assignment1")
The working directory was changed to C:/Users/Benjibex/Documents/assignment1 inside a notebook chunk. The working directory will be reset when the chunk is finished running. Use the knitr root.dir option in the setup chunk to change the the working directory for notebook chunks.
#load tm library assuming tm package already installed
library(tm)
#load document files into corpus
filenames <- list.files(getwd(),pattern="*.txt")
files <- lapply(filenames,readLines)
docs <- Corpus(VectorSource(files))
#go to parent directory
setwd("../")
getwd()
[1] "C:/Users/Benjibex/Documents"

Step 2 Create the Document-Term Matrix

Format the files to enable analysis of the terms, then create the Document-Term Matrix which will be analysed in topicmodels package in the next steps

#Remove punctuation - replace punctuation marks with " "
docs <- tm_map(docs, removePunctuation)
#Transform to lower case
docs <- tm_map(docs,content_transformer(tolower))
#Strip digits
docs <- tm_map(docs, removeNumbers)
#Remove stopwords from standard stopword list 
docs <- tm_map(docs, removeWords, stopwords("english"))
#Strip whitespace (cosmetic?)
docs <- tm_map(docs, stripWhitespace)
#Stem document to ensure words that have same meaning or different verb forms of the same word arent duplicated 
docs <- tm_map(docs,stemDocument)
#Create document-term matrix
dtm <- DocumentTermMatrix(docs)
dtm
<<DocumentTermMatrix (documents: 28, terms: 4521)>>
Non-/sparse entries: 11938/114650
Sparsity           : 91%
Maximal term length: 80
Weighting          : term frequency (tf)
rownames(dtm) <- filenames

Step 3 Load topicmodels package and run LDA to find n latent topics within the corpus

Use LDA to identify “n”" topics (in this case I have chosen 5) through a process of iterative allocation of the documents to each topic

#Load Topic models
library(topicmodels)
#Run Latent Dirichlet Allocation (LDA) using Gibbs Sampling
#set burn in
burnin <-1000
#set iterations
iter<-2000
#thin the spaces between samples
thin <- 500
#set random starts at 5
nstart <-5
#use random integers as seed 
seed <- list(254672,109,122887,145629037,2)
# return the highest probability as the result
best <-TRUE
#set number of topics 
k <-5
#run the LDA model
ldaOut <- LDA(dtm,k, method="Gibbs", control=
                list(nstart=nstart, seed = seed, best=best, burnin = burnin, iter = iter, thin=thin))

Step 4 Now review the Topics and Topic allocation

#view the top 6 terms for each of the 5 topics, create a matrix and write to csv
terms(ldaOut,6)
     Topic 1   Topic 2        Topic 3      Topic 4    Topic 5  
[1,] "univers" "compani"      "limit"      "said"     "peopl"  
[2,] "will"    "year"         "audit"      "firm"     "work"   
[3,] "public"  "“"          "servic"     "tropfest" "busi"   
[4,] "new"     "entrepreneur" "qualiti"    "analyt"   "help"   
[5,] "valu"    "employe"      "global"     "brand"    "world"  
[6,] "market"  "busi"         "profession" "partner"  "program"
ldaOut.terms <- as.matrix(terms(ldaOut,6))
#view the topic assignment for each document
topics(ldaOut)
 01_AFR1.txt  01_AFR2.txt  01_AFR3.txt  01_AFR4.txt   02_EY1.txt   02_EY2.txt 
           2            4            4            4            5            5 
  02_EY3.txt   02_EY4.txt   02_EY5.txt   02_EY6.txt   02_EY7.txt   02_EY8.txt 
           1            3            2            1            5            4 
  03_EY1.txt  03_EY10.txt   03_EY2.txt   03_EY3.txt   03_EY4.txt   03_EY5.txt 
           2            5            4            4            5            2 
  03_EY6.txt   03_EY7.txt   03_EY8.txt   03_EY9.txt   04_EY1.txt 05_UNSW1.txt 
           4            4            4            5            4            5 
  05_UQ1.txt 05_USYD1.txt  05_UTS1.txt  05_UWA1.txt 
           1            5            1            2 
#create a matrix and write to csv
ldaOut.topics <-as.matrix(topics(ldaOut))
write.csv(ldaOut.topics,file=paste("LDAGibbs",k,"DocsToTopics.csv"))

Step 5 Probability calculation

Finally calculate the probabilities of each document being associated with each topic

#Find probabilities associated with each topic assignment
topicProbabilities <- as.data.frame(ldaOut@gamma) 
write.csv(topicProbabilities,file=paste("LDAGibbs",k,"TopicProbabilities.csv"))
#investigate topic probabilities data.frame
summary(topicProbabilities)
       V1                V2                V3                V4         
 Min.   :0.03176   Min.   :0.01160   Min.   :0.02826   Min.   :0.01077  
 1st Qu.:0.09810   1st Qu.:0.07499   1st Qu.:0.06686   1st Qu.:0.07643  
 Median :0.12333   Median :0.10939   Median :0.08570   Median :0.16297  
 Mean   :0.19975   Mean   :0.19625   Mean   :0.11739   Mean   :0.24264  
 3rd Qu.:0.20316   3rd Qu.:0.28758   3rd Qu.:0.12788   3rd Qu.:0.42299  
 Max.   :0.85475   Max.   :0.61286   Max.   :0.82767   Max.   :0.61438  
       V5         
 Min.   :0.05076  
 1st Qu.:0.11648  
 Median :0.17366  
 Mean   :0.24399  
 3rd Qu.:0.34787  
 Max.   :0.73058  
str(topicProbabilities)
'data.frame':   28 obs. of  5 variables:
 $ V1: num  0.1563 0.0951 0.2126 0.1446 0.117 ...
 $ V2: num  0.4043 0.108 0.0817 0.0661 0.0755 ...
 $ V3: num  0.0782 0.1028 0.0863 0.124 0.1396 ...
 $ V4: num  0.164 0.612 0.484 0.533 0.325 ...
 $ V5: num  0.1968 0.0823 0.1356 0.1322 0.3434 ...

Observations

The summary statistics of the topic probabilities are quite meaningless as the vector of each row’s probability sums to 1. Further analysis on the importance of the topic allocation would be required.

References

Kailash Awati and Sensanalytics Consulting Pty Ltd, Sept 29 2015 “A gentle introduction to topic modeling using r” https://eight2late.wordpress.com/2015/09/29/a-gentle-introduction-to-topic-modeling-using-r/

LS0tDQp0aXRsZTogIlVzaW5nIFRvcGljIE1vZGVscyBwYWNrYWdlIGFuZCBMYXRlbnQgRGlyaWNobGV0IEFsbG9jYXRpb24gdG8gaWRlbnRpZnkgdG9waWNzIGluIHRleHRzIg0KYXV0aG9yOiBUcmFjeSBLZXlzDQpkYXRlOiA5IEFwcmlsIDIwMTcNCm91dHB1dDogaHRtbF9ub3RlYm9vaw0KLS0tDQoNClRoaXMgdmlnbmV0dGUgZXhwbGFpbnMgaG93IHRvIGFuYWx5c2UgdW5zdHJ1Y3R1cmVkIGRhdGEgbGlrZSB0ZXh0IGRvY3VtZW50cywgYnkgdXNpbmcgdGhlIHRtIHBhY2thZ2UgYW5kIHRvcGljbW9kZWxzIHBhY2thZ2UgYW5kIExhdGVudCBEaXJpY2hsZXQgQWxsb2NhdGlvbiBtZXRob2QgdG8gZGlzY292ZXIgdGhlIHBvc3NpYmxlIGhpZGRlbiB0b3BpY3Mgd2l0aGluIHRoZSBkb2N1bWVudHMgYW5kIGFzc2lnbmluZyBlYWNoIG9mIHRoZSB0b3BpY3MgdG8gdGhlIGRvY3VtZW50cy4NCg0KVGhpcyBpcyByZWFsbHkgdXNlZnVsIGlmIHlvdSBoYXZlIGEgbGFyZ2UgY29ycHVzIG9mIGRvY3VtZW50cyBhbmQgeW91IHdhbnQgdG8gZXhwbG9yZSB3aXRoaW4gdGhlbSBhbmQgdGhlIHJlbGF0aW9uc2hpcHMgYmV0d2VlbiB0aGVtIHdpdGhvdXQgaGF2aW5nIHRvIHJlYWQsIHBhcnNlIGFuZCBjYXRlZ29yaXNlIGFsbCBvZiB0aGVtLg0KDQpGb3IgbW9yZSBpbmZvcm1hdGlvbiwgcmVhZCB0aGlzIGJsb2cgaHR0cHM6Ly9laWdodDJsYXRlLndvcmRwcmVzcy5jb20vMjAxNS8wOS8yOS9hLWdlbnRsZS1pbnRyb2R1Y3Rpb24tdG8tdG9waWMtbW9kZWxpbmctdXNpbmctci8NCg0KTm90ZTogVGhpcyB2aWduZXR0ZSBhc3N1bWVzIHRoYXQgdG0gcGFja2FnZSBhbmQgdG9waWNtb2RlbHMgcGFja2FnZXMgYXJlIGFscmVhZHkgbG9hZGVkLg0KDQojIyNTdGVwIDEgR2F0aGVyIHRoZSB1bnN0cnVjdHVyZWQgZGF0YQ0KU2V0IHdvcmtpbmcgZGlyZWN0b3J5IHRvIHdoZXJlIHRoZSAudHh0IGRvY3VtZW50cyBhcmUgbG9jYXRlZCwgbG9hZCB0aGUgdG0gbGlicmFyeSBwYWNrYWdlIGFuZCBtb3ZlIHRoZSBmaWxlcyBpbnRvIHRoZSBjb3JwdXMgZm9yIGFuYWx5c2lzDQoNCmBgYHtyfQ0KI3NldCB3b3JraW5nIGRpcmVjdG9yeSB0byB3aGVyZSB0aGUgY29ycHVzIGlzIHN0b3JlZCBvbiBjIGRyaXZlDQpzZXR3ZCgiQzovVXNlcnMvQmVuamliZXgvRG9jdW1lbnRzL2Fzc2lnbm1lbnQxIikNCg0KI2xvYWQgdG0gbGlicmFyeSBhc3N1bWluZyB0bSBwYWNrYWdlIGFscmVhZHkgaW5zdGFsbGVkDQpsaWJyYXJ5KHRtKQ0KI2xvYWQgZG9jdW1lbnQgZmlsZXMgaW50byBjb3JwdXMNCmZpbGVuYW1lcyA8LSBsaXN0LmZpbGVzKGdldHdkKCkscGF0dGVybj0iKi50eHQiKQ0KZmlsZXMgPC0gbGFwcGx5KGZpbGVuYW1lcyxyZWFkTGluZXMpDQpkb2NzIDwtIENvcnB1cyhWZWN0b3JTb3VyY2UoZmlsZXMpKQ0KI2dvIHRvIHBhcmVudCBkaXJlY3RvcnkNCnNldHdkKCIuLi8iKQ0KZ2V0d2QoKQ0KDQpgYGANCg0KIyMjU3RlcCAyIENyZWF0ZSB0aGUgRG9jdW1lbnQtVGVybSBNYXRyaXgNCkZvcm1hdCB0aGUgZmlsZXMgdG8gZW5hYmxlIGFuYWx5c2lzIG9mIHRoZSB0ZXJtcywgdGhlbiBjcmVhdGUgdGhlIERvY3VtZW50LVRlcm0gTWF0cml4IHdoaWNoIHdpbGwgYmUgYW5hbHlzZWQgaW4gdG9waWNtb2RlbHMgcGFja2FnZSBpbiB0aGUgbmV4dCBzdGVwcyANCg0KYGBge3J9DQojUmVtb3ZlIHB1bmN0dWF0aW9uIC0gcmVwbGFjZSBwdW5jdHVhdGlvbiBtYXJrcyB3aXRoICIgIg0KZG9jcyA8LSB0bV9tYXAoZG9jcywgcmVtb3ZlUHVuY3R1YXRpb24pDQojVHJhbnNmb3JtIHRvIGxvd2VyIGNhc2UNCmRvY3MgPC0gdG1fbWFwKGRvY3MsY29udGVudF90cmFuc2Zvcm1lcih0b2xvd2VyKSkNCiNTdHJpcCBkaWdpdHMNCmRvY3MgPC0gdG1fbWFwKGRvY3MsIHJlbW92ZU51bWJlcnMpDQojUmVtb3ZlIHN0b3B3b3JkcyBmcm9tIHN0YW5kYXJkIHN0b3B3b3JkIGxpc3QgDQpkb2NzIDwtIHRtX21hcChkb2NzLCByZW1vdmVXb3Jkcywgc3RvcHdvcmRzKCJlbmdsaXNoIikpDQojU3RyaXAgd2hpdGVzcGFjZSAoY29zbWV0aWM/KQ0KZG9jcyA8LSB0bV9tYXAoZG9jcywgc3RyaXBXaGl0ZXNwYWNlKQ0KDQojU3RlbSBkb2N1bWVudCB0byBlbnN1cmUgd29yZHMgdGhhdCBoYXZlIHNhbWUgbWVhbmluZyBvciBkaWZmZXJlbnQgdmVyYiBmb3JtcyBvZiB0aGUgc2FtZSB3b3JkIGFyZW50IGR1cGxpY2F0ZWQgDQpkb2NzIDwtIHRtX21hcChkb2NzLHN0ZW1Eb2N1bWVudCkNCg0KI0NyZWF0ZSBkb2N1bWVudC10ZXJtIG1hdHJpeA0KZHRtIDwtIERvY3VtZW50VGVybU1hdHJpeChkb2NzKQ0KZHRtDQpyb3duYW1lcyhkdG0pIDwtIGZpbGVuYW1lcw0KYGBgDQoNCiMjI1N0ZXAgMyBMb2FkIHRvcGljbW9kZWxzIHBhY2thZ2UgYW5kIHJ1biBMREEgdG8gZmluZCBuIGxhdGVudCB0b3BpY3Mgd2l0aGluIHRoZSBjb3JwdXMNClVzZSBMREEgdG8gaWRlbnRpZnkgIm4iIiB0b3BpY3MgKGluIHRoaXMgY2FzZSBJIGhhdmUgY2hvc2VuIDUpIHRocm91Z2ggYSBwcm9jZXNzIG9mIGl0ZXJhdGl2ZSBhbGxvY2F0aW9uIG9mIHRoZSBkb2N1bWVudHMgdG8gZWFjaCB0b3BpYw0KYGBge3J9DQojTG9hZCBUb3BpYyBtb2RlbHMNCmxpYnJhcnkodG9waWNtb2RlbHMpDQoNCiNSdW4gTGF0ZW50IERpcmljaGxldCBBbGxvY2F0aW9uIChMREEpIHVzaW5nIEdpYmJzIFNhbXBsaW5nDQojc2V0IGJ1cm4gaW4NCmJ1cm5pbiA8LTEwMDANCiNzZXQgaXRlcmF0aW9ucw0KaXRlcjwtMjAwMA0KI3RoaW4gdGhlIHNwYWNlcyBiZXR3ZWVuIHNhbXBsZXMNCnRoaW4gPC0gNTAwDQojc2V0IHJhbmRvbSBzdGFydHMgYXQgNQ0KbnN0YXJ0IDwtNQ0KI3VzZSByYW5kb20gaW50ZWdlcnMgYXMgc2VlZCANCnNlZWQgPC0gbGlzdCgyNTQ2NzIsMTA5LDEyMjg4NywxNDU2MjkwMzcsMikNCiMgcmV0dXJuIHRoZSBoaWdoZXN0IHByb2JhYmlsaXR5IGFzIHRoZSByZXN1bHQNCmJlc3QgPC1UUlVFDQoNCiNzZXQgbnVtYmVyIG9mIHRvcGljcyANCmsgPC01DQoNCiNydW4gdGhlIExEQSBtb2RlbA0KbGRhT3V0IDwtIExEQShkdG0saywgbWV0aG9kPSJHaWJicyIsIGNvbnRyb2w9DQogICAgICAgICAgICAgICAgbGlzdChuc3RhcnQ9bnN0YXJ0LCBzZWVkID0gc2VlZCwgYmVzdD1iZXN0LCBidXJuaW4gPSBidXJuaW4sIGl0ZXIgPSBpdGVyLCB0aGluPXRoaW4pKQ0KDQpgYGANCg0KIyMjU3RlcCA0IE5vdyByZXZpZXcgdGhlIFRvcGljcyBhbmQgVG9waWMgYWxsb2NhdGlvbg0KDQoNCmBgYHtyfQ0KDQojdmlldyB0aGUgdG9wIDYgdGVybXMgZm9yIGVhY2ggb2YgdGhlIDUgdG9waWNzLCBjcmVhdGUgYSBtYXRyaXggYW5kIHdyaXRlIHRvIGNzdg0KdGVybXMobGRhT3V0LDYpDQpsZGFPdXQudGVybXMgPC0gYXMubWF0cml4KHRlcm1zKGxkYU91dCw2KSkNCg0KI3ZpZXcgdGhlIHRvcGljIGFzc2lnbm1lbnQgZm9yIGVhY2ggZG9jdW1lbnQNCnRvcGljcyhsZGFPdXQpDQoNCiNjcmVhdGUgYSBtYXRyaXggYW5kIHdyaXRlIHRvIGNzdg0KbGRhT3V0LnRvcGljcyA8LWFzLm1hdHJpeCh0b3BpY3MobGRhT3V0KSkNCndyaXRlLmNzdihsZGFPdXQudG9waWNzLGZpbGU9cGFzdGUoIkxEQUdpYmJzIixrLCJEb2NzVG9Ub3BpY3MuY3N2IikpDQpgYGANCg0KIyMjIFN0ZXAgNSBQcm9iYWJpbGl0eSBjYWxjdWxhdGlvbg0KRmluYWxseSBjYWxjdWxhdGUgdGhlIHByb2JhYmlsaXRpZXMgb2YgZWFjaCBkb2N1bWVudCBiZWluZyBhc3NvY2lhdGVkIHdpdGggZWFjaCB0b3BpYyANCg0KYGBge3J9DQojRmluZCBwcm9iYWJpbGl0aWVzIGFzc29jaWF0ZWQgd2l0aCBlYWNoIHRvcGljIGFzc2lnbm1lbnQNCnRvcGljUHJvYmFiaWxpdGllcyA8LSBhcy5kYXRhLmZyYW1lKGxkYU91dEBnYW1tYSkgDQp3cml0ZS5jc3YodG9waWNQcm9iYWJpbGl0aWVzLGZpbGU9cGFzdGUoIkxEQUdpYmJzIixrLCJUb3BpY1Byb2JhYmlsaXRpZXMuY3N2IikpDQoNCiNpbnZlc3RpZ2F0ZSB0b3BpYyBwcm9iYWJpbGl0aWVzIGRhdGEuZnJhbWUNCnN1bW1hcnkodG9waWNQcm9iYWJpbGl0aWVzKQ0Kc3RyKHRvcGljUHJvYmFiaWxpdGllcykNCg0KYGBgDQoNCiMjIE9ic2VydmF0aW9ucw0KVGhlIHN1bW1hcnkgc3RhdGlzdGljcyBvZiB0aGUgdG9waWMgcHJvYmFiaWxpdGllcyBhcmUgcXVpdGUgbWVhbmluZ2xlc3MgYXMgdGhlIHZlY3RvciBvZiBlYWNoIHJvdydzIHByb2JhYmlsaXR5IHN1bXMgdG8gMS4gDQpGdXJ0aGVyIGFuYWx5c2lzIG9uIHRoZSBpbXBvcnRhbmNlIG9mIHRoZSB0b3BpYyBhbGxvY2F0aW9uIHdvdWxkIGJlIHJlcXVpcmVkLg0KDQojI1JlZmVyZW5jZXMNCkthaWxhc2ggQXdhdGkgYW5kIFNlbnNhbmFseXRpY3MgQ29uc3VsdGluZyBQdHkgTHRkLCBTZXB0IDI5IDIwMTUgIkEgZ2VudGxlIGludHJvZHVjdGlvbiB0byB0b3BpYyBtb2RlbGluZyB1c2luZyByIiANCmh0dHBzOi8vZWlnaHQybGF0ZS53b3JkcHJlc3MuY29tLzIwMTUvMDkvMjkvYS1nZW50bGUtaW50cm9kdWN0aW9uLXRvLXRvcGljLW1vZGVsaW5nLXVzaW5nLXIv