清明連假正是大家準備開心祭祖或是出遊的日子,大眾交通工具成為提供大家往返的工具,尤其是台鐵鐵路運輸的環島交通網,更是往來西岸與東岸最便利的方式,連假的第一天是交通最繁忙的一天,台鐵的太魯閣號卻因為施工單位的疏忽發生了嚴重的出軌意外,造成247輕重傷、49人死亡的悲劇;這場意外究竟是單純外包施工廠商,抑或是積習已久的台灣鐵路公司監督不周所造成的。本組想藉此了解網路上對太魯閣號事件的看法及言論
了解民眾對於太魯閣號事件的態度 網民認為太魯閣號的責任歸咎於誰 針對太魯閣後事件後該做的事
資料來源:中山大學管理學院文字分析平台收集PTT八卦版文章取得之原始csv檔案。 資料集:PPT八卦版 資料日期區間:2021.03.31~2021.04.14 資料的關鍵字:檢索「台鐵」、「李義祥」、「太魯閣」、「工程車」、「義祥」、「出軌」五個關鍵字,共搜尋出1485篇文章。
Sys.setlocale(category = "LC_ALL", locale = "zh_TW.UTF-8") # 避免中文亂碼
## Warning in Sys.setlocale(category = "LC_ALL", locale = "zh_TW.UTF-8"): 作業系統
## 回報無法實現設定語區為 "zh_TW.UTF-8" 的要求
## [1] ""
packages = c("readr", "dplyr","wordcloud2", "stringr", "jiebaR", "tidytext", "NLP", "readr", "tidyr", "ggplot2", "ggraph", "igraph","reshape2", "NLP","scales", "reshape2", "widyr")
existing = as.character(installed.packages()[,1])
for(pkg in packages[!(packages %in% existing)]) install.packages(pkg)
require(data.table)
## Loading required package: data.table
require(dplyr)
## Loading required package: dplyr
##
## Attaching package: 'dplyr'
## The following objects are masked from 'package:data.table':
##
## between, first, last
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
require(readr)
## Loading required package: readr
require(dplyr)
require(stringr)
## Loading required package: stringr
require(jiebaR)
## Loading required package: jiebaR
## Loading required package: jiebaRD
require(tidytext)
## Loading required package: tidytext
require(NLP)
## Loading required package: NLP
require(tidyr)
## Loading required package: tidyr
require(ggplot2)
## Loading required package: ggplot2
##
## Attaching package: 'ggplot2'
## The following object is masked from 'package:NLP':
##
## annotate
require(ggraph)
## Loading required package: ggraph
require(igraph)
## Loading required package: igraph
##
## Attaching package: 'igraph'
## The following object is masked from 'package:tidyr':
##
## crossing
## The following objects are masked from 'package:dplyr':
##
## as_data_frame, groups, union
## The following objects are masked from 'package:stats':
##
## decompose, spectrum
## The following object is masked from 'package:base':
##
## union
require(scales)
## Loading required package: scales
##
## Attaching package: 'scales'
## The following object is masked from 'package:readr':
##
## col_factor
require(reshape2)
## Loading required package: reshape2
##
## Attaching package: 'reshape2'
## The following object is masked from 'package:tidyr':
##
## smiths
## The following objects are masked from 'package:data.table':
##
## dcast, melt
require(widyr)
## Loading required package: widyr
library(data.table)
require(wordcloud2)
## Loading required package: wordcloud2
require(lubridate)
## Loading required package: lubridate
##
## Attaching package: 'lubridate'
## The following objects are masked from 'package:igraph':
##
## %--%, union
## The following objects are masked from 'package:data.table':
##
## hour, isoweek, mday, minute, month, quarter, second, wday, week,
## yday, year
## The following objects are masked from 'package:base':
##
## date, intersect, setdiff, union
require(htmlwidgets)
## Loading required package: htmlwidgets
require(webshot)
## Loading required package: webshot
require(plotly)
## Loading required package: plotly
##
## Attaching package: 'plotly'
## The following object is masked from 'package:igraph':
##
## groups
## The following object is masked from 'package:ggplot2':
##
## last_plot
## The following object is masked from 'package:stats':
##
## filter
## The following object is masked from 'package:graphics':
##
## layout
require( RColorBrewer)
## Loading required package: RColorBrewer
require(servr)
## Loading required package: servr
require(tm)
## Loading required package: tm
require(data.table)
require(stringr)
library(topicmodels)
require(LDAvis)
## Loading required package: LDAvis
require(webshot)
rd <- read_csv("太魯閣事件_artWordFreq.csv")
##
## -- Column specification --------------------------------------------------------
## cols(
## artTitle = col_character(),
## artDate = col_date(format = ""),
## artTime = col_time(format = ""),
## artUrl = col_character(),
## word = col_character(),
## count = col_double()
## )
rd2 <- read_csv("太魯閣事件_articleMetaData.csv")
##
## -- Column specification --------------------------------------------------------
## cols(
## artTitle = col_character(),
## artDate = col_date(format = ""),
## artTime = col_time(format = ""),
## artUrl = col_character(),
## artPoster = col_character(),
## artCat = col_character(),
## commentNum = col_double(),
## push = col_double(),
## boo = col_double(),
## sentence = col_character()
## )
rd$artDate <- rd$artDate %>% as.Date("%Y/%m/%d")
rd
data <- rd %>%
dplyr::select(artDate, artUrl) %>%
distinct()
article_count_by_date <- data %>%
group_by(artDate) %>%
summarise(count = n())
head(article_count_by_date, 20)
plot_date <-
# data
article_count_by_date %>%
# aesthetics
ggplot(aes(x = artDate, y = count)) +
# geometrics
geom_line(color = "#00AFBB", size = 1) +
# coordinates
scale_x_date(labels = date_format("%Y/%m/%d")) +
ggtitle("PTT 八卦版 討論文章數") +
xlab("日期") +
ylab("數量") +
# theme
theme() #加入中文字型設定,避免中文字顯示錯誤。
plot_date
data <- rd %>%
group_by(word) %>%
summarise(sum = sum(count), .groups = 'drop') %>%
arrange(desc(sum))
head(data)
data %>% filter(sum > 50) %>% wordcloud2()
rd_tokens <- rd %>%
select(-artTime, -artUrl)
head(rd_tokens)
rd_tokens_by_date <- rd_tokens %>%
count(artDate, word, sort = TRUE) %>%
filter(n > 5)
stop_words <- scan(file = "dict/stop_words.txt", what=character(),sep='\n',
encoding='utf-8',fileEncoding='utf-8')
## Warning in scan(file = "dict/stop_words.txt", what = character(), sep = "\n", :
## 輸入連結 'dict/stop_words.txt' 中的輸入不正確
plot_merge <- rd_tokens_by_date %>%
filter(!(word %in% stop_words) & !(word %in% "台鐵")) %>%
filter(artDate == as.Date("2021-04-02") |
artDate == as.Date("2021-04-03") |
artDate == as.Date("2021-04-04") |
artDate == as.Date("2021-04-05") |
artDate == as.Date("2021-04-06") |
artDate == as.Date("2021-04-07") |
artDate == as.Date("2021-04-08") |
artDate == as.Date("2021-04-09") |
artDate == as.Date("2021-04-10")) %>%
group_by(artDate) %>%
top_n(10, n) %>%
ungroup() %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(x=word, y=n, fill = artDate)) +
geom_col(show.legend = FALSE) +
labs(x = NULL, y = NULL) +
facet_wrap(~artDate, scales="free", ncol = 5) +
coord_flip()+
theme(text = element_text())
plot_merge
> 4/2號為事情發生的當天,因為此次事故造成的問題原因是工程車掉落而發生太魯閣出軌,從4/4號起大家開始檢討此次事故會發生是該追究於工程、政府或者是李義祥 > 4/7號開始,則認為事故問題可以從政府做改革,而4/9則認為台鐵可效法高鐵作法 *
4/9主要討論台鐵可效法高鐵作法,對台鐵進行改革,例如若有異物落進高鐵的鐵軌中,會有sensor及時感測即時通知列車做出應變措施避免憾事發生。 4/10討論度最高的是李義祥和太魯閣發生
# 正向字典txt檔
# 以,將字分隔
P <- read_file("dict/positive.txt")
# 負向字典txt檔
N <- read_file("dict/negative.txt")
#將字串依,分割
#strsplit回傳list , 我們取出list中的第一個元素
P = strsplit(P, ",")[[1]]
N = strsplit(N, ",")[[1]]
# 建立dataframe 有兩個欄位word,sentiments,word欄位內容是字典向量
P = data.frame(word = P, sentiment = "positive")
N = data.frame(word = N, sentiment = "negative")
LIWC = rbind(P, N)
rd_tokens_by_date %>%
inner_join(LIWC) %>%
select(word) %>%
inner_join(LIWC)
## Joining, by = "word"
## Joining, by = "word"
sentiment_count = rd_tokens_by_date %>%
select(artDate,word,n) %>%
inner_join(LIWC) %>%
group_by(artDate,sentiment) %>%
summarise(count=sum(n))
## Joining, by = "word"
## `summarise()` has grouped output by 'artDate'. You can override using the `.groups` argument.
# 加入自定義的字典
jieba_tokenizer <- worker(user="dict/user_dict.txt", stop_word = "dict/stop_words.txt")
# 設定斷詞function
chi_tokenizer <- function(t) {
lapply(t, function(x) {
if(nchar(x)>1){
tokens <- segment(x, jieba_tokenizer)
# 去掉字串長度爲1的詞彙
tokens <- tokens[nchar(tokens)>1]
return(tokens)
}
})
}
sentiment_count %>%
ggplot() +
geom_line(aes(x=artDate,y=count,colour=sentiment)) +
scale_x_date(labels = date_format("%m/%d"))
rd_tokens_all <- rd2 %>%
unnest_tokens(word, sentence, token=chi_tokenizer) %>%
select(-artTime)
rd_tokens_all %>%
filter(artDate == as.Date("2021-04-02") |
artDate == as.Date("2021-04-03") |
artDate == as.Date("2021-04-04") |
artDate == as.Date("2021-04-05") |
artDate == as.Date("2021-04-06") |
artDate == as.Date("2021-04-07") |
artDate == as.Date("2021-04-08") ) %>%
inner_join(LIWC) %>%
group_by(word,sentiment) %>%
summarise(
count = n()
) %>% data.frame() %>%
top_n(30,wt = count) %>%
ungroup() %>%
mutate(word = reorder(word, count)) %>%
ggplot(aes(word, count, fill = sentiment)) +
geom_col(show.legend = FALSE) +
facet_wrap(~sentiment, scales = "free_y") +
labs(y = "Contribution to sentiment",
x = NULL) +
theme(text=element_text(size=14))+
coord_flip()
## Joining, by = "word"
## `summarise()` has grouped output by 'word'. You can override using the `.groups` argument.
> 從正負面情緒圖觀察發現,在負面情緒中較常出現「問題」、「事故」等字詞,而在正面情緒中比較常出現「安全」、「改善」等字詞,對這些正面情緒用詞可能為民眾建議台鐵在安全這一塊可以再作改善,並在解決後還是會支持以及願意相信台鐵。
ngram_11 <- function(t) {
lapply(t, function(x) {
tokens <- segment(x, jieba_tokenizer)
ngram <- ngrams(tokens, 11)
ngram <- lapply(ngram, paste, collapse = " ")
unlist(ngram)
})
}
g_ngram_11 <- rd2 %>%
select(artUrl, sentence) %>%
unnest_tokens(ngram, sentence, token = ngram_11) %>%
filter(!str_detect(ngram, regex("[0-9a-zA-Z]")))
g_ngrams_11_separated <- g_ngram_11 %>%
separate(ngram, paste0("word", c(1:11),sep=""), sep = " ")
head(g_ngrams_11_separated)
g_check_words <- g_ngrams_11_separated %>%
filter((word6 == "太魯閣"))
g_check_words
g_check_words_count <- g_check_words %>%
melt(id.vars = "artUrl", measure.vars = paste0("word", c(1:11),sep="")) %>%
rename(word=value) %>%
filter(variable!="word6") %>%
filter(!(word %in% stop_words), nchar(word)>1) %>%
count(word, sort = TRUE)
g_check_words_count %>%
arrange(desc(abs(n))) %>%
head(15) %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(word, n, fill = n > 0)) +
geom_col(show.legend = FALSE) +
xlab("出現在「太魯閣」附近的字") +
ylab("出現次數") +
coord_flip()+
theme(text = element_text())
ngram_11 <- function(t) {
lapply(t, function(x) {
tokens <- segment(x, jieba_tokenizer)
ngram <- ngrams(tokens, 11)
ngram <- lapply(ngram, paste, collapse = " ")
unlist(ngram)
})
}
g_ngram_11 <- rd2 %>%
select(artUrl, sentence) %>%
unnest_tokens(ngram, sentence, token = ngram_11) %>%
filter(!str_detect(ngram, regex("[0-9a-zA-Z]")))
g_ngrams_11_separated <- g_ngram_11 %>%
separate(ngram, paste0("word", c(1:11),sep=""), sep = " ")
g_ngrams_11_separated
g_check_words <- g_ngrams_11_separated %>%
filter((word6 == "台鐵"))
g_check_words
g_check_words_count <- g_check_words %>%
melt(id.vars = "artUrl", measure.vars = paste0("word", c(1:11),sep="")) %>%
rename(word=value) %>%
filter(variable!="word6") %>%
filter(!(word %in% stop_words), nchar(word)>1) %>%
count(word, sort = TRUE)
g_check_words_count %>%
arrange(desc(abs(n))) %>%
head(15) %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(word, n, fill = n > 0)) +
geom_col(show.legend = FALSE) +
xlab("出現在「台鐵」附近的字") +
ylab("出現次數") +
coord_flip()+
theme(text = element_text())
# 進行斷詞,並計算各詞彙在各文章中出現的次數
rd_words <- rd2 %>%
unnest_tokens(word, sentence, token=chi_tokenizer) %>%
filter(!str_detect(word, regex("[0-9a-zA-Z]"))) %>%
count(artUrl, word, sort = TRUE)
head(rd_words)
# 計算每篇文章包含的詞數
total_words <- rd_words %>%
group_by(artUrl) %>%
summarize(total = sum(n))
head(total_words)
# 合併 mask_words(每個詞彙在每個文章中出現的次數)
# 與 total_words(每篇文章的詞數)
# 新增各個詞彙在所有詞彙中的總數欄位
rd_words <- left_join(rd_words, total_words)
## Joining, by = "artUrl"
head(rd_words)
# 以每篇文章爲單位,計算每個詞彙在的tf-idf值
rd_words_tf_idf <- rd_words %>%
bind_tf_idf(word, artUrl, n)
#rd_words_tf_idf
# 選出每篇文章,tf-idf最大的十個詞
rd_words_tf_idf %>%
group_by(artUrl) %>%
top_n(10) %>%
arrange(desc(artUrl))
## Selecting by tf_idf
# 選每篇文章,tf-idf最大的十個詞,
# 並查看每個詞被選中的次數
rd_words_tf_idf %>%
group_by(artUrl) %>%
top_n(10) %>%
arrange(desc(artUrl)) %>%
ungroup() %>%
count(word, sort=TRUE)
## Selecting by tf_idf
# 計算兩個詞彙間的相關性
stop_words <- scan(file = "dict/stop_words.txt", what=character(),sep='\n',
encoding='utf-8',fileEncoding='utf-8')
## Warning in scan(file = "dict/stop_words.txt", what = character(), sep = "\n", :
## 輸入連結 'dict/stop_words.txt' 中的輸入不正確
word_cors <- rd_words %>%
group_by(word) %>%
filter(n() >= 20) %>%
filter(!(word %in% stop_words)&&!(word %in% "記者")) %>%
pairwise_cor(word, artUrl, sort = TRUE)
## Warning: `tbl_df()` was deprecated in dplyr 1.0.0.
## Please use `tibble::as_tibble()` instead.
#word_cors
# 與太魯閣相關性高的詞彙
word_cors %>%
filter(item1 == "太魯閣") %>%
head(5)
# 與義祥相關性高的詞彙
word_cors %>%
filter(item1 == "義祥") %>%
head(5)
# 與台鐵相關性高的詞彙
word_cors %>%
filter(item1 == "台鐵") %>%
head(5)
# 分別尋找與 "台鐵", "太魯閣", "政府", "義祥" 相關性最高的 10 個詞彙
word_cors %>%
filter(item1 %in% c("台鐵", "太魯閣", "政府", "義祥")) %>%
group_by(item1) %>%
top_n(10) %>%
ungroup() %>%
mutate(item2 = reorder(item2, correlation)) %>%
ggplot(aes(item2, correlation)) +
geom_bar(stat = "identity") +
facet_wrap(~ item1, scales = "free") +
coord_flip()+
theme(text = element_text())
## Selecting by correlation
針對這四個關鍵字「台鐵」、「太魯閣」、「義祥」、「政府」搜尋前10個相關字詞 在「台鐵」這張圖可分析出,台鐵最重要的應該走向改革,可以和高鐵作為學習,並且民營化、公司化做營運及經營。
這次太魯閣事件主要發生地點在花蓮隧道,因為工程車的滑落而造成列車出軌事故發生,致使1-5節車廂和連結區全數被壓毀殘破不堪。
在「義祥」這張圖可分析出,他全名為李義祥,家中是以工業為主,而是此次營造工程的負責人在「政府」這張圖可分析出,出現最多的是「採購」,比對文本後主要是因為外包商「義翔工程行」曾有參與「政府」「採購」案並發生偽造文書的案例,也因此成為網民針對這件事產生蠻大的討論度;另外也包含對台鐵「組織」、「民營化」等討論議題。
# 顯示相關性大於0.4的組合
set.seed(2020)
word_cors %>%
filter(correlation > 0.4) %>%
graph_from_data_frame() %>%
ggraph(layout = "fr") +
geom_edge_link(aes(edge_alpha = correlation), show.legend = FALSE) +
geom_node_point(color = "lightblue", size = 3) +
geom_node_text(aes(label = name), repel = TRUE) + #加入中文字型設定,避免中文字顯示錯誤。
theme_void()
# 顯示相關性大於0.5的組合
set.seed(2020)
word_cors %>%
filter(correlation > 0.5) %>%
graph_from_data_frame() %>%
ggraph(layout = "fr") +
geom_edge_link(aes(edge_alpha = correlation), show.legend = FALSE) +
geom_node_point(color = "lightblue", size = 3) +
geom_node_text(aes(label = name), repel = TRUE) +
theme_void()
在整個事件中,大眾認為太魯閣號的責任應歸咎於台鐵和政府,並希望在太魯閣事件後台鐵能效法高鐵將公司民營化或公司化。