簡介

2021年5月15日,雙北因為萬華群聚感染爆發而宣布進入三級警戒,我們想探討在雙北與全國進入第三集警戒之後,PTT 上有關疫情的討論的主題和貼文者與回覆者形成的社群網絡

Sys.setlocale(category = "LC_ALL", locale = "zh_TW.UTF-8")
## [1] "zh_TW.UTF-8/zh_TW.UTF-8/zh_TW.UTF-8/C/zh_TW.UTF-8/zh_TW.UTF-8"
library(data.table)
library(ggplot2)
library(dplyr)
library(jiebaR)
library(tidytext)
library(stringr)
library(tm)
library(topicmodels)
library(purrr)
require(RColorBrewer)
library(gridExtra)
mycolors <- colorRampPalette(brewer.pal(8, "Set3"))(20)
library(igraph)
library(scales)
library(showtext) # for chinese font
showtext.auto()

資料描述

metadata <- fread("ptt_prevention_articleMetaData.csv", encoding = "UTF-8")

每日貼文數量

  • 發文數量有兩波高峰,分別是 5/15 與 6/3
metadata$artDate <- as.Date(metadata$artDate, "%Y/%m/%d")

metadata %>% 
  mutate(artDate = as.Date(artDate)) %>%
  group_by(artDate) %>%
  summarise(count = n())%>%
  ggplot(aes(artDate,count))+
    geom_line(color="red")+
    geom_point()

#time.jpg

文章的回覆數量分佈

文章回覆量的中位數為 70.51

metadata %>%
  select(artUrl, commentNum) %>%
  ggplot() +
  geom_density(aes(x = commentNum)) +
  ggtitle("貼文的回覆數量分布") +
  xlab("數量 (commentNum)")

summary(metadata$commentNum)
##    Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
##    0.00    9.00   18.00   70.51   49.00 1478.00

資料前處理

使用默認參數初始化一個斷詞引擎

#預設
jieba_tokenizer = worker()

#加入字典及停用字
user_dict <- scan(file = "user_dict.txt", what=character(),sep='\n',
                   encoding='utf-8',fileEncoding='utf-8')
stop_words <- scan(file = "stop_words.txt", what=character(),sep='\n',
                   encoding='utf-8',fileEncoding='utf-8')
#stop_words

new_user_word(jieba_tokenizer, c(user_dict))
## [1] TRUE
news_tokenizer <- function(t) {
  lapply(t, function(x) {
    if(nchar(x)>1){
      tokens <- segment(x, jieba_tokenizer)
      tokens <- tokens[!tokens %in% stop_words]
      tokens <- tokens[nchar(tokens)>1]
      return(tokens)
    }
  })
}

計算每篇文章各token出現次數

# tokens <- metadata %>%
#   unnest_tokens(word, sentence, token=news_tokenizer) %>%
#   filter((!str_detect(word, regex("[0-9a-zA-Z]"))) | str_detect(word, regex("[Aa][Zz]"))) %>%
#   #filter(!(word %in% stop_words)) %>%
#   count(artUrl, word) %>%
#   rename(count=n)
# tokens %>% head(20)
# tokens
# saveRDS(tokens, file = "0614token_result.rds")
tokens = readRDS(gzfile("0614token_result.rds"))

將資料轉換為Document Term Matrix (DTM)

dtm <-tokens %>% cast_dtm(artUrl, word, count)
dtm
## <<DocumentTermMatrix (documents: 1874, terms: 25447)>>
## Non-/sparse entries: 126658/47561020
## Sparsity           : 100%
## Maximal term length: 26
## Weighting          : term frequency (tf)
inspect(dtm[1:10,1:10])
## <<DocumentTermMatrix (documents: 10, terms: 10)>>
## Non-/sparse entries: 28/72
## Sparsity           : 72%
## Maximal term length: 2
## Weighting          : term frequency (tf)
## Sample             :
##                                                           Terms
## Docs                                                       差距 防疫 沒用 努力
##   https://www.ptt.cc/bbs/Gossiping/M.1621008816.A.F02.html    1    2    1    2
##   https://www.ptt.cc/bbs/Gossiping/M.1621012600.A.CFF.html    0    2    0    0
##   https://www.ptt.cc/bbs/Gossiping/M.1621016951.A.D4A.html    0    3    0    0
##   https://www.ptt.cc/bbs/Gossiping/M.1621025291.A.41B.html    0    0    0    0
##   https://www.ptt.cc/bbs/Gossiping/M.1621029977.A.13B.html    0    1    0    0
##   https://www.ptt.cc/bbs/Gossiping/M.1621031373.A.5E3.html    0    3    0    1
##   https://www.ptt.cc/bbs/Gossiping/M.1621033887.A.383.html    0    2    0    0
##   https://www.ptt.cc/bbs/Gossiping/M.1621037027.A.1B8.html    0    1    0    0
##   https://www.ptt.cc/bbs/Gossiping/M.1621038361.A.C39.html    0    1    0    0
##   https://www.ptt.cc/bbs/Gossiping/M.1621039285.A.F03.html    0    1    0    0
##                                                           Terms
## Docs                                                       失守 台灣 問題 疫苗
##   https://www.ptt.cc/bbs/Gossiping/M.1621008816.A.F02.html    2    1    1    3
##   https://www.ptt.cc/bbs/Gossiping/M.1621012600.A.CFF.html    0    2    0    0
##   https://www.ptt.cc/bbs/Gossiping/M.1621016951.A.D4A.html    0    2    0    0
##   https://www.ptt.cc/bbs/Gossiping/M.1621025291.A.41B.html    0    0    0    0
##   https://www.ptt.cc/bbs/Gossiping/M.1621029977.A.13B.html    2    2    0    1
##   https://www.ptt.cc/bbs/Gossiping/M.1621031373.A.5E3.html    0    0    0    3
##   https://www.ptt.cc/bbs/Gossiping/M.1621033887.A.383.html    0    0    0    0
##   https://www.ptt.cc/bbs/Gossiping/M.1621037027.A.1B8.html    0    0    1    0
##   https://www.ptt.cc/bbs/Gossiping/M.1621038361.A.C39.html    0    0    0    0
##   https://www.ptt.cc/bbs/Gossiping/M.1621039285.A.F03.html    0    0    2    0
##                                                           Terms
## Docs                                                       真正 足夠
##   https://www.ptt.cc/bbs/Gossiping/M.1621008816.A.F02.html    1    1
##   https://www.ptt.cc/bbs/Gossiping/M.1621012600.A.CFF.html    0    0
##   https://www.ptt.cc/bbs/Gossiping/M.1621016951.A.D4A.html    0    0
##   https://www.ptt.cc/bbs/Gossiping/M.1621025291.A.41B.html    0    0
##   https://www.ptt.cc/bbs/Gossiping/M.1621029977.A.13B.html    0    0
##   https://www.ptt.cc/bbs/Gossiping/M.1621031373.A.5E3.html    0    0
##   https://www.ptt.cc/bbs/Gossiping/M.1621033887.A.383.html    0    0
##   https://www.ptt.cc/bbs/Gossiping/M.1621037027.A.1B8.html    0    0
##   https://www.ptt.cc/bbs/Gossiping/M.1621038361.A.C39.html    2    0
##   https://www.ptt.cc/bbs/Gossiping/M.1621039285.A.F03.html    0    0

主題模型

建立LDA模型

# lda <- LDA(dtm, k = 4, control = list(seed = 2021))
# lda <- LDA(dtm, k = 4, control = list(seed = 2021,alpha = 2,delta=0.01),method = "Gibbs") 
# saveRDS(lda, file = "lda.rds")

lda = readRDS(gzfile("lda.rds"))

利用LDA模型建立phi矩陣

topics_words <- tidy(lda, matrix = "beta") 
colnames(topics_words) <- c("topic", "term", "phi")
head(topics_words)
## # A tibble: 6 x 3
##   topic term          phi
##   <int> <chr>       <dbl>
## 1     1 台灣  0.000000200
## 2     2 台灣  0.00829    
## 3     3 台灣  0.000000269
## 4     4 台灣  0.0382     
## 5     1 失守  0.000000200
## 6     2 失守  0.000000204

尋找Topic的代表字

terms依照各主題的phi值由大到小排序,列出前15大

removed_word = c("防疫") 

topics_words %>%
  filter(! term %in% removed_word) %>%
  group_by(topic) %>%
  top_n(10, phi) %>%
  ungroup() %>%
  mutate(top_words = reorder_within(term,phi,topic)) %>%
  ggplot(aes(x = top_words, y = phi, fill = as.factor(topic))) +
  geom_col(show.legend = FALSE) +
  facet_wrap(~ topic, scales = "free") +
  coord_flip() +
  scale_x_reordered()

#lda.jpg

尋找最佳主題數

建立更多主題的主題模型

嘗試2、4、6、10、15個主題數,將結果存起來,再做進一步分析。

# ldas = c()
# topics = c(2,4,6,10,15)
# for(topic in topics){
#   start_time <- Sys.time()
#   lda <- LDA(dtm, k = topic, control = list(seed = 2021))
#   ldas =c(ldas,lda)
#   print(paste(topic ,paste("topic(s) and use time is ", Sys.time() -start_time)))
#   save(ldas,file = "ldas_result.rdata") # 將模型輸出成檔案
# }

載入每個主題的LDA結果

load("ldas_result.rdata")

透過perplexity找到最佳主題數

topics = c(2,4,6,10,15)
data_frame(k = topics, perplex = map_dbl(ldas, topicmodels::perplexity)) %>%
  ggplot(aes(k, perplex)) +
  geom_point() +
  geom_line() +
  labs(title = "Evaluating LDA topic models",
       subtitle = "Optimal number of topics (smaller is better)",
       x = "Number of topics",
       y = "Perplexity")
## Warning: `data_frame()` was deprecated in tibble 1.1.0.
## Please use `tibble()` instead.

#perplexity.jpg

使用 ldatuning 挑選主題數

# install.packages("ldatuning")
library("ldatuning")
# result <- FindTopicsNumber(
#   dtm,
#   topics = topics,
#   metrics = c("Griffiths2004", "CaoJuan2009", "Arun2010", "Deveaud2014"),
#   method = "Gibbs",
#   control = list(seed = 2021),
#   mc.cores = 2L,
#   verbose = TRUE
# )

# saveRDS(result, file = "ldatuning_result.rds")
ldatuning_result = readRDS(gzfile("ldatuning_result.rds"))
FindTopicsNumber_plot(ldatuning_result)

#ldatuning.jpg

LDA 結果分析

將分群結果放上lable

label_topics <- tidy(lda, matrix="gamma") %>% # document topic gamma
                  group_by(document) %>%
                  top_n(1, wt=gamma)
metadata_topic <- merge(x = metadata, y = label_topics, by.x = "artUrl", by.y="document")
# head(metadata_topic)

#write.csv(metadata_topic,file="metadata_topic.csv",row.names = FALSE)

選定4個主題數的主題模型

the_lda = ldas[[2]] ## 選定topic 為 4 的結果

主題命名

topics_name = c("雙北疫情","中央疫情指揮中心","防疫生活","疫苗相關")

Document 主題分佈

# for every document we have a probability distribution of its contained topics
tmResult <- posterior(the_lda)
doc_pro <- tmResult$topics
rownames_doc_pro <- row.names(doc_pro)
document_topics <- doc_pro[metadata[artUrl %in% rownames_doc_pro]$artUrl,]
document_topics_df =data.frame(document_topics)
colnames(document_topics_df) = topics_name
rownames(document_topics_df) = NULL
news_topic = cbind(metadata,document_topics_df)
## Warning in as.data.table.list(x, keep.rownames = keep.rownames, check.names =
## check.names, : Item 2 has 1874 rows but longest item has 1875; recycled with
## remainder.
#saveRDS(news_topic, file = "news_topic.rds")

了解主題不同版的變化

bar_data =  news_topic %>% 
            group_by(artCat) %>%
            summarise_if(is.numeric, sum, na.rm = TRUE) %>%
            select("artCat", "雙北疫情","中央疫情指揮中心","防疫生活","疫苗相關") %>%
            melt(id.vars = "artCat")
bar_data %>%
  ggplot( aes(x=artCat, y=value, fill=variable)) + 
  geom_bar(stat = "identity") + ylab("value") + 
  scale_fill_manual(values=mycolors[c(1,5,8,12)])+
  theme(axis.text.x = element_text(angle = 90, hjust = 1))

#barchart.jpg

以比例了解各版間的主題差異

#八卦的圓餅圖
Gossiping_pie = bar_data %>% 
                filter(artCat == "Gossiping") %>%
                ggplot( aes(x=artCat, y=value, fill=variable)) + 
                geom_bar(stat = "identity") + ylab("value") + 
                scale_fill_manual(values=mycolors[c(1,5,8,12)])+
                coord_polar(theta = "y")+
                theme(axis.title=element_blank(),axis.text=element_blank(),axis.ticks=element_blank())

#政黑的圓餅圖
HatePolitics_pie = bar_data %>% 
                filter(artCat == "HatePolitics") %>%
                ggplot( aes(x=artCat, y=value, fill=variable)) + 
                geom_bar(stat = "identity") + ylab("value") + 
                scale_fill_manual(values=mycolors[c(1,5,8,12)])+
                coord_polar(theta = "y")+
                theme(axis.title=element_blank(),axis.text=element_blank(),axis.ticks=element_blank())

#合併
grid.arrange(Gossiping_pie,HatePolitics_pie,ncol=2,nrow=1)  

#piechart.jpg

Social Network

reviews <- fread("ptt_prevention_articleReviews.csv", encoding = "UTF-8")
reviews <- reviews %>%
  select(artUrl, cmtPoster, cmtStatus, cmtContent)

posts_reviews <- merge(x=metadata, y=reviews, by = "artUrl")

計算每個帳號的發文與回覆次數

# 帳號發文篇數
post_count = metadata %>%
  group_by(artPoster) %>%
  summarise(count = n()) %>%
  arrange(desc(count))
head(post_count)
## # A tibble: 6 x 2
##   artPoster  count
##   <chr>      <int>
## 1 wind200625    26
## 2 tony900735    19
## 3 hugh509       10
## 4 muse87131      9
## 5 hoyumi         8
## 6 HTC92          8
# 帳號回覆總數
review_count = reviews %>%
  group_by(cmtPoster) %>%
  summarise(count = n()) %>%
  arrange(desc(count))
head(review_count)
## # A tibble: 6 x 2
##   cmtPoster count
##   <chr>     <int>
## 1 s9234032    407
## 2 birdy590    218
## 3 kuninaka    207
## 4 Anvec       184
## 5 amida959    175
## 6 TED781120   174
# 發文者
poster_select <- post_count %>% filter(count >= 2)
posts <- metadata %>%  filter(metadata$artPoster %in% poster_select$artPoster)

# 回覆者
reviewer_select <- review_count %>%  filter(count >= 20)
reviews <- reviews %>%  filter(reviews$cmtPoster %in% reviewer_select$cmtPoster)
length(unique(posts_reviews$artPoster)) # 發文者數量 1333
## [1] 1333
length(unique(posts_reviews$cmtPoster)) # 回覆者數量 25892
## [1] 25892
allPoster <- c(posts_reviews$artPoster, posts_reviews$cmtPoster) # 總參與人數 26446
length(unique(allPoster))
## [1] 26446

標記 subject 類型

userList <- data.frame(user=unique(allPoster)) %>%
              mutate(type=ifelse(user%in%posts$artPoster, "poster", "replyer"))
head(userList,3)
##          user    type
## 1    peter308 replyer
## 2 ljsnonocat2  poster
## 3    ex250203  poster

5/15 發文 - 回覆網絡

篩選出 5/15 的文章

link_0515 <- posts_reviews %>%
      group_by(cmtPoster, artUrl) %>% 
      filter(n()>3) %>% 
      filter(commentNum > 20) %>%
      # filter(artCat=="Gossiping") %>% 
      filter(artDate == as.Date('2021-05-15')) %>%
      select(cmtPoster, artPoster, artUrl) %>% 
      unique()

netwrok graph

v = userList
reviewNetwork <- graph_from_data_frame(d=link, v=userList, directed=T)

set.seed(2021)
# v=filtered_user

reviewNetwork = degree(reviewNetwork) > 2
reviewNetwork <- graph_from_data_frame(d=link_0515, v=filtered_user_0515, directed=F)

# 加入 type of subjects
set.seed(2021)
V(reviewNetwork)$color <- ifelse(V(reviewNetwork)$type=="poster", "gold", "lightblue")
# plot(reviewNetwork, vertex.size=3, edge.arrow.size=0.3,vertex.label=NA)

# 加入 name of subjects
filter_degree = 5
set.seed(2021)

# 設定 node 的 label/ color
labels <- degree(reviewNetwork) # 算出每個點的degree
V(reviewNetwork)$label <- names(labels)
V(reviewNetwork)$color <- ifelse(V(reviewNetwork)$type=="poster", "gold", "lightblue")

# plot(
#   reviewNetwork, 
#   vertex.size=3, 
#   edge.width=3, 
#   vertex.label.dist=1,
#   vertex.label=ifelse(degree(reviewNetwork) > filter_degree, V(reviewNetwork)$label, NA),vertex.label.font=2)
# plot(reviewNetwork, vertex.size=3, edge.arrow.size=0.3,vertex.label=NA)
# plot(reviewNetwork, vertex.size=3, edge.arrow.size=0.3,vertex.label=NA)

0515 文章推噓網絡圖

  • WuSam 的貼文主題是有關台北市升級為第三警戒的新聞,多數推文的留言的立場都是支持柯文哲市長
filter_degree = 5 # 使用者degree

# 過濾留言者對發文者的推噓程度
link_0515 <- posts_reviews %>%
  # filter(artCat=="Gossiping") %>%
  filter(commentNum > 20) %>%
  filter(artDate == "2021-05-15") %>%
  filter(cmtStatus != "→") %>%
  group_by(cmtPoster, artUrl) %>%
  filter( n() > 1) %>%
  ungroup() %>% 
  select(cmtPoster, artPoster, artUrl, cmtStatus) %>% 
  unique()

# 接下來把網路圖畫出來,跟前面做的事都一樣,因此不再細述

# 篩選link中有出現的使用者
filtered_user_0515 <- userList %>%
          filter(user%in%link_0515$cmtPoster | user%in%link_0515$artPoster) %>%
          arrange(desc(type))

# 建立網路關係
reviewNetwork_0515 <- graph_from_data_frame(d=link_0515, v=filtered_user_0515, directed=F)

# 依據使用者身份對點進行上色
labels <- degree(reviewNetwork_0515)
V(reviewNetwork_0515)$label <- names(labels)
V(reviewNetwork_0515)$color <- ifelse(V(reviewNetwork_0515)$type=="poster", "gold", "lightblue")


# 依據回覆發生的文章所對應的主題,對他們的關聯線進行上色
E(reviewNetwork_0515)$color <- ifelse(E(reviewNetwork_0515)$cmtStatus == "推", "lightgreen", "palevioletred")

# 畫出社群網路圖
set.seed(2021)
plot(reviewNetwork_0515, vertex.size=4, edge.width=3, vertex.label.dist=1,
     vertex.label=ifelse(degree(reviewNetwork_0515) > filter_degree, V(reviewNetwork_0515)$label, NA),vertex.label.font=2)

# 加入標示
legend("bottomright", c("發文者","回文者"), pch=21,
  col="#777777", pt.bg=c("gold","lightblue"), pt.cex=1, cex=1)
legend("topleft", c("推","噓"), 
       col=c("lightgreen","palevioletred"), lty=1, cex=1)

head(unique(posts_reviews[artDate == "2021-05-15" & artPoster  == "WuSam"]$sentence))
## [1] "https://www.youtube.com/watch?v=ufxH7ec-uXg\n因應國內本土疫情升溫,\n\n台北市內今日共89例本土個案\n\n台北市長柯文哲將於下午14:30召開臨時記者會,\n\n針對台北市防疫升級至三級警戒對外說明\n\n"
head(unique(posts_reviews[artDate == "2021-05-15" & artPoster  == "WuSam"]$cmtContent))
## [1] ":阿北可愛"             ":我寧願看阿北的"       ":阿北救命"            
## [4] ":啊北嘉由"             ":台派蟑螂氣哭"         ":趕緊的同時筆記,2,3波"
# unique(posts_reviews[artDate == "2021-05-15" & artPoster %in% c("KLGlikeshit", "WuSam")])

6/3 發文 - 回覆網絡

篩選出 6/3 的文章

link_0603 <- posts_reviews %>%
      group_by(cmtPoster, artUrl) %>% 
      filter(n()>3) %>% 
      filter(commentNum > 20) %>%
      filter(artDate == as.Date('2021-06-03')) %>%
      select(cmtPoster, artPoster, artUrl) %>% 
      unique()
# head(link_0603)

netwrok graph

v = userList
reviewNetwork <- graph_from_data_frame(d=link, v=userList, directed=T)

set.seed(2021)
# v=filtered_user

reviewNetwork = degree(reviewNetwork) > 2
reviewNetwork <- graph_from_data_frame(d=link_0603, v=filtered_user_0603, directed=F)

# 加入 type of subjects
set.seed(2021)
V(reviewNetwork)$color <- ifelse(V(reviewNetwork)$type=="poster", "gold", "lightblue")
# plot(reviewNetwork, vertex.size=3, edge.arrow.size=0.3,vertex.label=NA)

# 加入 name of subjects
filter_degree = 5
set.seed(2021)

# 設定 node 的 label/ color
labels <- degree(reviewNetwork) # 算出每個點的degree
V(reviewNetwork)$label <- names(labels)
V(reviewNetwork)$color <- ifelse(V(reviewNetwork)$type=="poster", "gold", "lightblue")

# plot(
#   reviewNetwork, 
#   vertex.size=3, 
#   edge.width=3, 
#   vertex.label.dist=1,
#   vertex.label=ifelse(degree(reviewNetwork) > filter_degree, V(reviewNetwork)$label, NA),vertex.label.font=2)
# plot(reviewNetwork, vertex.size=3, edge.arrow.size=0.3,vertex.label=NA)
# plot(reviewNetwork, vertex.size=3, edge.arrow.size=0.3,vertex.label=NA)

0603 文章推噓網絡圖

  • 多數主要 subject 對於 tw689 的貼文都是噓
filter_degree = 5 # 使用者degree

# 過濾留言者對發文者的推噓程度
link_0603 <- posts_reviews %>%
  filter(commentNum > 20) %>%
  filter(artDate == "2021-06-03") %>%
  filter(cmtStatus != "→") %>%
  group_by(cmtPoster, artUrl) %>%
  filter( n() > 1) %>%
  ungroup() %>% 
  select(cmtPoster, artPoster, artUrl, cmtStatus) %>% 
  unique()

# 接下來把網路圖畫出來,跟前面做的事都一樣,因此不再細述

# 篩選link中有出現的使用者
filtered_user_0603 <- userList %>%
          filter(user%in%link_0603$cmtPoster | user%in%link_0603$artPoster) %>%
          arrange(desc(type))

# 建立網路關係
reviewNetwork_0603 <- graph_from_data_frame(d=link_0603, v=filtered_user_0603, directed=F)

# 依據使用者身份對點進行上色
labels <- degree(reviewNetwork_0603)
V(reviewNetwork_0603)$label <- names(labels)
V(reviewNetwork_0603)$color <- ifelse(V(reviewNetwork_0603)$type=="poster", "gold", "lightblue")


# 依據回覆發生的文章所對應的主題,對他們的關聯線進行上色
E(reviewNetwork_0603)$color <- ifelse(E(reviewNetwork_0603)$cmtStatus == "推", "lightgreen", "palevioletred")

# 畫出社群網路圖
set.seed(2021)
plot(reviewNetwork_0603, vertex.size=2, edge.width=3, vertex.label.dist=1,
     vertex.label=ifelse(degree(reviewNetwork_0603) > filter_degree, V(reviewNetwork_0603)$label, NA),vertex.label.font=2)

# 加入標示
legend("bottomright", c("發文者","回文者"), pch=21,
  col="#777777", pt.bg=c("gold","lightblue"), pt.cex=1, cex=1)
legend("topleft", c("推","噓"), 
       col=c("lightgreen","palevioletred"), lty=1, cex=1)

  • tw689 的這篇貼文內容是 0603 這天因為陳時中表示年輕人確診比例增加的原因可能是因為防疫鬆懈, 讓 PTT 上的年輕族群感到不滿
head(unique(posts_reviews[artDate == "2021-06-03" & artPoster  == "tw689"]$sentence))
## [1] "陳時中剛剛在記者會中說(約14:08分)\n\n現在年輕人確診比例增高到佔了25.2%\n\n年輕人防疫鬆懈\n\n可能看到前兩天疫情平穩就開始移動、群聚\n\n特別要提醒年輕人,防疫不要鬆懈\n\n一但鬆懈就可能升高來\n\n提醒20到39歲的年輕人要把群聚減到最低不要鬆懈\n"
head(unique(posts_reviews[artDate == "2021-06-03" & artPoster  == "tw689"]$cmtContent))
## [1] ":幹你娘機掰"           ":五樓肛門也鬆了可憐哪" ":我先"                
## [4] ":817選的"              ":不是你嗎牙醫仔"       ":可悲"

八卦版文章推噓網絡圖

可以發現多數回覆者對 clownT, wind200625 的貼文的態度絕大多數都是推文,

filter_degree = 7 # 使用者degree

# 過濾留言者對發文者的推噓程度
link <- posts_reviews %>%
      filter(artCat== "Gossiping") %>%
      filter(commentNum > 20) %>%
      filter(cmtStatus!="→") %>%
      group_by(cmtPoster, artUrl) %>%
      filter( n() > 3) %>%
      ungroup() %>% 
      select(cmtPoster, artPoster, artUrl, cmtStatus) %>% 
      unique()

# 篩選link中有出現的使用者
filtered_user <- userList %>%
          filter(user%in%link$cmtPoster | user%in%link$artPoster) %>%
          arrange(desc(type))

# 建立網路關係
reviewNetwork <- graph_from_data_frame(d=link, v=filtered_user, directed=F)

# 依據使用者身份對點進行上色
labels <- degree(reviewNetwork)
V(reviewNetwork)$label <- names(labels)
V(reviewNetwork)$color <- ifelse(V(reviewNetwork)$type=="poster", "gold", "lightblue")


# 依據回覆發生的文章所對應的主題,對他們的關聯線進行上色
E(reviewNetwork)$color <- ifelse(E(reviewNetwork)$cmtStatus == "推", "lightgreen", "palevioletred")

# 畫出社群網路圖
set.seed(2021)
plot(reviewNetwork, vertex.size=2, edge.width=3, vertex.label.dist=1,
     vertex.label=ifelse(degree(reviewNetwork) > filter_degree, 
                         V(reviewNetwork)$label, NA),vertex.label.font=2)

# 加入標示
legend("bottomright", c("發文者","回文者"), pch=21,
       col="#777777", pt.bg=c("gold","lightblue"), pt.cex=1, cex=1)
legend("topleft", c("推","噓"), 
       col=c("lightgreen","palevioletred"), lty=1, cex=1)

  • wind200625 這名發文者在八卦版每天固定會張貼台北市防疫記者會的內容
unique(posts_reviews[artCat == "Gossiping" & artPoster  == "wind200625"]$artTitle)
##  [1] "台北開完防疫記者會了"                  
##  [2] "[爆卦]台北開完防疫記者會了,大家快抄啊"
##  [3] "[爆卦]台北開完防疫記者會了,阿北很忙"  
##  [4] "[爆卦]台北開完防疫記者會了,同島一命"  
##  [5] "[爆卦]台北開完防疫記者會了,同胞同助"  
##  [6] "[爆卦]台北開完防疫記者會了,洗手戴罩"  
##  [7] "[爆卦]台北開完防疫記者會了,認真作戰"  
##  [8] "台北開完防疫記者會了,哆啦a夢"         
##  [9] "[爆卦]台北開完防疫記者會了,國家興亡"  
## [10] "[爆卦]台北開完防疫記者會了,優先為民"  
## [11] "[爆卦]台北開完防疫記者會了,良心為民"  
## [12] "[爆卦]台北開完防疫記者會了,人民優先"  
## [13] "[爆卦]台北開完防疫記者會了,長期抗戰"  
## [14] "[爆卦]台北開完防疫記者會了,疫下求生"  
## [15] "[爆卦]台北開完防疫記者會了,滾動修正"  
## [16] "[爆卦]台北開完防疫記者會了,家庭防疫"  
## [17] "[爆卦]台北開完防疫記者會了,豪雨成災"  
## [18] "[爆卦]台北開完防疫記者了,雙北聯防"    
## [19] "[爆卦]台北開完防疫記者會了,視訊問候"  
## [20] "[爆卦]台北開完防疫記者會了,三級延長"  
## [21] "[爆卦]台北開完防疫記者會了,疏困轉型"  
## [22] "[爆卦]台北開完防疫記者會了,檢討改進"  
## [23] "[爆卦]台北開完防疫記者會了,反思改進"  
## [24] "[爆卦]台北開完防疫記者會了,長者接種"  
## [25] "[爆卦]台北開完防疫記者會了,預約接種"
unique(posts_reviews[artCat == "Gossiping" & artPoster  == "wind200625"]$artDate)
##  [1] "2021-05-19" "2021-05-20" "2021-05-21" "2021-05-22" "2021-05-23"
##  [6] "2021-05-24" "2021-05-25" "2021-05-26" "2021-05-27" "2021-05-28"
## [11] "2021-05-29" "2021-05-30" "2021-05-31" "2021-06-01" "2021-06-02"
## [16] "2021-06-03" "2021-06-04" "2021-06-05" "2021-06-06" "2021-06-07"
## [21] "2021-06-08" "2021-06-09" "2021-06-10" "2021-06-11" "2021-06-12"
  • ClownT 這位貼文者的兩篇貼文標題分別是「防疫至今最經典的一句話」與「大家猜台灣多久後會防疫疲乏」
unique(posts_reviews[artCat == "Gossiping" & artPoster  == "ClownT"]$artTitle)
## [1] "[問卦]防疫至今最經典的一句話?"      "[問卦]大家猜台灣多久後會防疫疲乏?"

政黑版文章推噓網絡圖

政黑版多數主要 subject 對於版內的文章的態度都是推

filter_degree = 5 # 使用者degree

# 過濾留言者對發文者的推噓程度
link <- posts_reviews %>%
      filter(artCat== "HatePolitics") %>%
      filter(commentNum > 20) %>%
      filter(cmtStatus!="→") %>%
      group_by(cmtPoster, artUrl) %>%
      filter( n() > 3) %>%
      ungroup() %>% 
      select(cmtPoster, artPoster, artUrl, cmtStatus) %>% 
      unique()

# 篩選link中有出現的使用者
filtered_user <- userList %>%
          filter(user%in%link$cmtPoster | user%in%link$artPoster) %>%
          arrange(desc(type))

# 建立網路關係
reviewNetwork <- graph_from_data_frame(d=link, v=filtered_user, directed=F)

# 依據使用者身份對點進行上色
labels <- degree(reviewNetwork)
V(reviewNetwork)$label <- names(labels)
V(reviewNetwork)$color <- ifelse(V(reviewNetwork)$type=="poster", "gold", "lightblue")


# 依據回覆發生的文章所對應的主題,對他們的關聯線進行上色
E(reviewNetwork)$color <- ifelse(E(reviewNetwork)$cmtStatus == "推", "lightgreen", "palevioletred")

# 畫出社群網路圖
set.seed(2021)
plot(reviewNetwork, vertex.size=2, edge.width=3, vertex.label.dist=1,
     vertex.label=ifelse(degree(reviewNetwork) > filter_degree, 
                         V(reviewNetwork)$label, NA),vertex.label.font=2)

# 加入標示
legend("bottomright", c("發文者","回文者"), pch=21,
       col="#777777", pt.bg=c("gold","lightblue"), pt.cex=1, cex=1)
legend("topleft", c("推","噓"), 
       col=c("lightgreen","palevioletred"), lty=1, cex=1)

head(unique(posts_reviews[artCat == "HatePolitics" & artPoster  == "TheoEpstein"]$artTitle))
## [1] "Re:[黑特]幹!突然覺得防疫跟球賽真他媽像"      
## [2] "[討論]防疫最重要的是中央地方合作"             
## [3] "[討論]有市長,會做事,北部市長齊心防疫"       
## [4] "Re:[黑特]仔細看這一年多防疫台灣真的只是"      
## [5] "[討論]防疫自煮學一下黃捷"                     
## [6] "Re:[轉錄]柯文哲這部影片,獻給每位防疫前線戰士"

不同主題的 network

mask_topics <- tidy(lda, matrix="gamma") %>% # document topic gamma
                  group_by(document) %>%
                  top_n(1, wt=gamma)
posts_topics <- merge(x = posts_reviews, y = mask_topics, 
                      by.x = "artUrl", by.y="document", allow.cartesian = T)
# head(posts_topics,3)

Gossiping

八卦版的文章回覆中位數 = 375

summary(posts_reviews[artCat=="Gossiping"]$commentNum)
##    Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
##     1.0   122.0   375.0   469.1   707.0  1478.0
link <- posts_topics %>%
  group_by(cmtPoster, artUrl) %>% 
  filter(n()>8) %>% 
  filter(commentNum > 375) %>%
  filter(artCat=="Gossiping") %>% #HatePolitics / Gossiping
  # filter(artDate == as.Date('2021-05-15')) %>%
  # filter(topic == 1) %>% 
  select(cmtPoster, artPoster, artUrl, topic) %>% 
  unique()
filtered_user <- userList %>%
          filter(user%in%link$cmtPoster | user%in%link$artPoster) %>%
          arrange(desc(type))
filter_degree = 20

# 建立網路關係
reviewNetwork <- graph_from_data_frame(d=link, v=filtered_user, directed=F)

# 依據使用者身份對點進行上色
labels <- degree(reviewNetwork)
V(reviewNetwork)$label <- names(labels)
V(reviewNetwork)$color <- ifelse(V(reviewNetwork)$type=="poster", "gold", "lightblue")

# 依據回覆發生的文章所對應的主題,對他們的關聯線進行上色
# E(reviewNetwork)$color <- ifelse(E(reviewNetwork)$topic == "1", "palevioletred", "lightgreen")
E(reviewNetwork)$color <- sapply(X = E(reviewNetwork)$topic, 
                                 FUN = function(x){
                                   switch(as.character(x),
                                          "1" = "palevioletred",
                                          "2" = "lightgreen",
                                          "3" = "yellow",
                                          "4" = "gray")}
                                 )

# 畫出社群網路圖(degree>7的才畫出來)
set.seed(2021)
plot(reviewNetwork, vertex.size=3, edge.width=3, vertex.label.dist=1,
     vertex.label=ifelse(degree(reviewNetwork) > filter_degree, V(reviewNetwork)$label, NA),vertex.label.font=2)

# 加入標示
legend("bottomright", c("發文者","回文者"), pch=21, 
  col="#777777", pt.bg=c("gold","lightblue"), pt.cex=1, cex=1)
legend("topleft", c("雙北疫情","中央疫情指揮中心", "防疫生活", "疫苗相關"), 
       col=c("palevioletred", "lightgreen", "yellow", "gray"), lty=1, cex=1)

head(unique(posts_reviews[artCat == "Gossiping" & artPoster  == "wind200625"]$artTitle))
## [1] "台北開完防疫記者會了"                  
## [2] "[爆卦]台北開完防疫記者會了,大家快抄啊"
## [3] "[爆卦]台北開完防疫記者會了,阿北很忙"  
## [4] "[爆卦]台北開完防疫記者會了,同島一命"  
## [5] "[爆卦]台北開完防疫記者會了,同胞同助"  
## [6] "[爆卦]台北開完防疫記者會了,洗手戴罩"

Centrality

cent_gossip <- as.data.frame(eigen_centrality(reviewNetwork, directed = F))
  • 雖然從上面的網絡圖中,wind200625 的連結非常多,但是 eigen-centrality 最高的 subject 是 kech9111
  • 他的貼文標題為「[爆卦]新北市的防疫方法」
cent_gossip %>% 
  arrange(desc(vector)) %>%
  head
##                vector    value options.bmat options.n options.which options.nev
## kech9111    1.0000000 8.071777            I       235            LA           1
## amida959    0.2780544 8.071777            I       235            LA           1
## fenway18    0.2632238 8.071777            I       235            LA           1
## wolfking623 0.2632238 8.071777            I       235            LA           1
## hohohoha    0.2580632 8.071777            I       235            LA           1
## soulboy330  0.2477769 8.071777            I       235            LA           1
##             options.tol options.ncv options.ldv options.ishift options.maxiter
## kech9111              0           0           0              1            1000
## amida959              0           0           0              1            1000
## fenway18              0           0           0              1            1000
## wolfking623           0           0           0              1            1000
## hohohoha              0           0           0              1            1000
## soulboy330            0           0           0              1            1000
##             options.nb options.mode options.start options.sigma options.sigmai
## kech9111             1            1             1             0              0
## amida959             1            1             1             0              0
## fenway18             1            1             1             0              0
## wolfking623          1            1             1             0              0
## hohohoha             1            1             1             0              0
## soulboy330           1            1             1             0              0
##             options.info options.iter options.nconv options.numop
## kech9111               0            2             1            30
## amida959               0            2             1            30
## fenway18               0            2             1            30
## wolfking623            0            2             1            30
## hohohoha               0            2             1            30
## soulboy330             0            2             1            30
##             options.numopb options.numreo
## kech9111                 0             19
## amida959                 0             19
## fenway18                 0             19
## wolfking623              0             19
## hohohoha                 0             19
## soulboy330               0             19
head(unique(posts_reviews[artCat == "Gossiping" & artPoster  == "kech9111"]$artTitle))
## [1] "[爆卦]新北市的防疫方法"

HatePolitics

政黑版的文章回覆中位數 = 86

summary(posts_reviews[artCat=="HatePolitics"]$commentNum)
##    Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
##     1.0    35.0    86.0   146.2   210.0   604.0
link <- posts_topics %>%
  group_by(cmtPoster, artUrl) %>% 
  filter(n()>8) %>% 
  filter(commentNum > 60) %>%
  filter(artCat=="HatePolitics") %>% #HatePolitics / Gossiping
  # filter(artDate == as.Date('2021-05-15')) %>%
  # filter(topic == 1) %>% 
  select(cmtPoster, artPoster, artUrl, topic) %>% 
  unique()
filtered_user <- userList %>%
          filter(user%in%link$cmtPoster | user%in%link$artPoster) %>%
          arrange(desc(type))
filter_degree = 20

# 建立網路關係
reviewNetwork <- graph_from_data_frame(d=link, v=filtered_user, directed=F)

# 依據使用者身份對點進行上色
labels <- degree(reviewNetwork)
V(reviewNetwork)$label <- names(labels)
V(reviewNetwork)$color <- ifelse(V(reviewNetwork)$type=="poster", "gold", "lightblue")

# 依據回覆發生的文章所對應的主題,對他們的關聯線進行上色
# E(reviewNetwork)$color <- ifelse(E(reviewNetwork)$topic == "1", "palevioletred", "lightgreen")
E(reviewNetwork)$color <- sapply(X = E(reviewNetwork)$topic, 
                                 FUN = function(x){
                                   switch(as.character(x),
                                          "1" = "palevioletred",
                                          "2" = "lightgreen",
                                          "3" = "yellow",
                                          "4" = "gray")}
                                 )

# 畫出社群網路圖(degree>7的才畫出來)
set.seed(2021)
plot(reviewNetwork, vertex.size=3, edge.width=3, vertex.label.dist=1,
     vertex.label=ifelse(degree(reviewNetwork) > filter_degree, V(reviewNetwork)$label, NA),vertex.label.font=2)

# 加入標示
legend("bottomright", c("發文者","回文者"), pch=21, 
  col="#777777", pt.bg=c("gold","lightblue"), pt.cex=1, cex=1)
legend("topleft", c("雙北疫情","中央疫情指揮中心", "防疫生活", "疫苗相關"), 
       col=c("palevioletred", "lightgreen", "yellow", "gray"), lty=1, cex=1)

head(unique(posts_reviews[artCat == "HatePolitics" & artPoster  == "TheoEpstein"]$artTitle))
## [1] "Re:[黑特]幹!突然覺得防疫跟球賽真他媽像"      
## [2] "[討論]防疫最重要的是中央地方合作"             
## [3] "[討論]有市長,會做事,北部市長齊心防疫"       
## [4] "Re:[黑特]仔細看這一年多防疫台灣真的只是"      
## [5] "[討論]防疫自煮學一下黃捷"                     
## [6] "Re:[轉錄]柯文哲這部影片,獻給每位防疫前線戰士"

Eigen Centrality

cent_gossip <- as.data.frame(eigen_centrality(reviewNetwork, directed = F))
  • Eigen centrality 最高的 subject 也是 TheoEpstein
cent_gossip %>% 
  arrange(desc(vector)) %>%
  head
##                vector    value options.bmat options.n options.which options.nev
## TheoEpstein 1.0000000 6.953758            I       171            LA           1
## TED781120   0.4722634 6.953758            I       171            LA           1
## kuninaka    0.3801264 6.953758            I       171            LA           1
## moeliliacg  0.3555291 6.953758            I       171            LA           1
## H2          0.3201954 6.953758            I       171            LA           1
## chungrew    0.2225542 6.953758            I       171            LA           1
##             options.tol options.ncv options.ldv options.ishift options.maxiter
## TheoEpstein           0           0           0              1            1000
## TED781120             0           0           0              1            1000
## kuninaka              0           0           0              1            1000
## moeliliacg            0           0           0              1            1000
## H2                    0           0           0              1            1000
## chungrew              0           0           0              1            1000
##             options.nb options.mode options.start options.sigma options.sigmai
## TheoEpstein          1            1             1             0              0
## TED781120            1            1             1             0              0
## kuninaka             1            1             1             0              0
## moeliliacg           1            1             1             0              0
## H2                   1            1             1             0              0
## chungrew             1            1             1             0              0
##             options.info options.iter options.nconv options.numop
## TheoEpstein            0            3             1            40
## TED781120              0            3             1            40
## kuninaka               0            3             1            40
## moeliliacg             0            3             1            40
## H2                     0            3             1            40
## chungrew               0            3             1            40
##             options.numopb options.numreo
## TheoEpstein              0             28
## TED781120                0             28
## kuninaka                 0             28
## moeliliacg               0             28
## H2                       0             28
## chungrew                 0             28

0515 Gossiping

link <- posts_topics %>%
  group_by(cmtPoster, artUrl) %>% 
  filter(n()>2) %>% 
  filter(commentNum > 20) %>%
  filter(artCat=="Gossiping") %>% #HatePolitics / Gossiping
  filter(artDate == as.Date('2021-05-15')) %>%
  # filter(topic == 1) %>% 
  select(cmtPoster, artPoster, artUrl, topic) %>% 
  unique()
filtered_user <- userList %>%
          filter(user%in%link$cmtPoster | user%in%link$artPoster) %>%
          arrange(desc(type))
filter_degree = 20

# 建立網路關係
reviewNetwork <- graph_from_data_frame(d=link, v=filtered_user, directed=F)

# 依據使用者身份對點進行上色
labels <- degree(reviewNetwork)
V(reviewNetwork)$label <- names(labels)
V(reviewNetwork)$color <- ifelse(V(reviewNetwork)$type=="poster", "gold", "lightblue")

# 依據回覆發生的文章所對應的主題,對他們的關聯線進行上色
# E(reviewNetwork)$color <- ifelse(E(reviewNetwork)$topic == "1", "palevioletred", "lightgreen")
E(reviewNetwork)$color <- sapply(X = E(reviewNetwork)$topic, 
                                 FUN = function(x){
                                   switch(as.character(x),
                                          "1" = "palevioletred",
                                          "2" = "lightgreen",
                                          "3" = "yellow",
                                          "4" = "gray")}
                                 )

# 畫出社群網路圖(degree>7的才畫出來)
set.seed(2021)
plot(reviewNetwork, vertex.size=3, edge.width=3, vertex.label.dist=1,
     vertex.label=ifelse(degree(reviewNetwork) > filter_degree, V(reviewNetwork)$label, NA),vertex.label.font=2)

# 加入標示
legend("bottomright", c("發文者","回文者"), pch=21, 
  col="#777777", pt.bg=c("gold","lightblue"), pt.cex=1, cex=1)
legend("topleft", c("雙北疫情","中央疫情指揮中心", "防疫生活", "疫苗相關"), 
       col=c("palevioletred", "lightgreen", "yellow", "gray"), lty=1, cex=1)

0515 HatePolitics

link <- posts_topics %>%
  group_by(cmtPoster, artUrl) %>% 
  # filter(n()>2) %>% 
  filter(commentNum > 10) %>%
  filter(artCat=="HatePolitics") %>% #HatePolitics / Gossiping
  filter(artDate == as.Date('2021-05-15')) %>%
  select(cmtPoster, artPoster, artUrl, topic) %>% 
  unique()
filtered_user <- userList %>%
          filter(user%in%link$cmtPoster | user%in%link$artPoster) %>%
          arrange(desc(type))
filter_degree = 5

# 建立網路關係
reviewNetwork <- graph_from_data_frame(d=link, v=filtered_user, directed=F)

# 依據使用者身份對點進行上色
labels <- degree(reviewNetwork)
V(reviewNetwork)$label <- names(labels)
V(reviewNetwork)$color <- ifelse(V(reviewNetwork)$type=="poster", "gold", "lightblue")

# 依據回覆發生的文章所對應的主題,對他們的關聯線進行上色
# E(reviewNetwork)$color <- ifelse(E(reviewNetwork)$topic == "1", "palevioletred", "lightgreen")
E(reviewNetwork)$color <- sapply(X = E(reviewNetwork)$topic, 
                                 FUN = function(x){
                                   switch(as.character(x),
                                          "1" = "palevioletred",
                                          "2" = "lightgreen",
                                          "3" = "yellow",
                                          "4" = "gray")}
                                 )

# 畫出社群網路圖(degree>7的才畫出來)
set.seed(2021)
plot(reviewNetwork, vertex.size=3, edge.width=3, vertex.label.dist=1,
     vertex.label=ifelse(degree(reviewNetwork) > filter_degree, V(reviewNetwork)$label, NA),vertex.label.font=2)

# 加入標示
legend("bottomright", c("發文者","回文者"), pch=21, 
  col="#777777", pt.bg=c("gold","lightblue"), pt.cex=1, cex=1)
legend("topleft", c("雙北疫情","中央疫情指揮中心", "防疫生活", "疫苗相關"), 
       col=c("palevioletred", "lightgreen", "yellow", "gray"), lty=1, cex=1)

0603 Gossiping

link <- posts_topics %>%
  group_by(cmtPoster, artUrl) %>% 
  filter(n()>2) %>% 
  filter(commentNum > 20) %>%
  filter(artCat=="Gossiping") %>% #HatePolitics / Gossiping
  filter(artDate == as.Date('2021-06-03')) %>%
  select(cmtPoster, artPoster, artUrl, topic) %>% 
  unique()
filtered_user <- userList %>%
          filter(user%in%link$cmtPoster | user%in%link$artPoster) %>%
          arrange(desc(type))
filter_degree = 5

# 建立網路關係
reviewNetwork <- graph_from_data_frame(d=link, v=filtered_user, directed=F)

# 依據使用者身份對點進行上色
labels <- degree(reviewNetwork)
V(reviewNetwork)$label <- names(labels)
V(reviewNetwork)$color <- ifelse(V(reviewNetwork)$type=="poster", "gold", "lightblue")

# 依據回覆發生的文章所對應的主題,對他們的關聯線進行上色
# E(reviewNetwork)$color <- ifelse(E(reviewNetwork)$topic == "1", "palevioletred", "lightgreen")
E(reviewNetwork)$color <- sapply(X = E(reviewNetwork)$topic, 
                                 FUN = function(x){
                                   switch(as.character(x),
                                          "1" = "palevioletred",
                                          "2" = "lightgreen",
                                          "3" = "yellow",
                                          "4" = "gray")}
                                 )

# 畫出社群網路圖(degree>7的才畫出來)
set.seed(2021)
plot(reviewNetwork, vertex.size=3, edge.width=3, vertex.label.dist=1,
     vertex.label=ifelse(degree(reviewNetwork) > filter_degree, V(reviewNetwork)$label, NA),vertex.label.font=2)

# 加入標示
legend("bottomright", c("發文者","回文者"), pch=21, 
  col="#777777", pt.bg=c("gold","lightblue"), pt.cex=1, cex=1)
legend("topleft", c("雙北疫情","中央疫情指揮中心", "防疫生活", "疫苗相關"), 
       col=c("palevioletred", "lightgreen", "yellow", "gray"), lty=1, cex=1)

0603 HatePolitics

link <- posts_topics %>%
  group_by(cmtPoster, artUrl) %>% 
  # filter(n()>2) %>% 
  filter(commentNum > 10) %>%
  filter(artCat=="HatePolitics") %>% #HatePolitics / Gossiping
  filter(artDate == as.Date('2021-06-03')) %>%
  select(cmtPoster, artPoster, artUrl, topic) %>% 
  unique()
filtered_user <- userList %>%
          filter(user%in%link$cmtPoster | user%in%link$artPoster) %>%
          arrange(desc(type))
filter_degree = 5

# 建立網路關係
reviewNetwork <- graph_from_data_frame(d=link, v=filtered_user, directed=F)

# 依據使用者身份對點進行上色
labels <- degree(reviewNetwork)
V(reviewNetwork)$label <- names(labels)
V(reviewNetwork)$color <- ifelse(V(reviewNetwork)$type=="poster", "gold", "lightblue")

# 依據回覆發生的文章所對應的主題,對他們的關聯線進行上色
# E(reviewNetwork)$color <- ifelse(E(reviewNetwork)$topic == "1", "palevioletred", "lightgreen")
E(reviewNetwork)$color <- sapply(X = E(reviewNetwork)$topic, 
                                 FUN = function(x){
                                   switch(as.character(x),
                                          "1" = "palevioletred",
                                          "2" = "lightgreen",
                                          "3" = "yellow",
                                          "4" = "gray")}
                                 )

# 畫出社群網路圖(degree>7的才畫出來)
set.seed(2021)
plot(reviewNetwork, vertex.size=3, edge.width=3, vertex.label.dist=1,
     vertex.label=ifelse(degree(reviewNetwork) > filter_degree, V(reviewNetwork)$label, NA),vertex.label.font=2)

# 加入標示
legend("bottomright", c("發文者","回文者"), pch=21, 
  col="#777777", pt.bg=c("gold","lightblue"), pt.cex=1, cex=1)
legend("topleft", c("雙北疫情","中央疫情指揮中心", "防疫生活", "疫苗相關"), 
       col=c("palevioletred", "lightgreen", "yellow", "gray"), lty=1, cex=1)

Centrality

cent_0603_hateP <- as.data.frame(eigen_centrality(reviewNetwork, directed = F))
cent_0603_hateP %>% 
  arrange(desc(vector)) %>%
  head
##                vector    value options.bmat options.n options.which options.nev
## takuminauki 1.0000000 6.476504            I       132            LA           1
## tinmar      0.1773110 6.476504            I       132            LA           1
## Chen73      0.1684826 6.476504            I       132            LA           1
## icolee      0.1684826 6.476504            I       132            LA           1
## abetterman  0.1646292 6.476504            I       132            LA           1
## kuninaka    0.1605795 6.476504            I       132            LA           1
##             options.tol options.ncv options.ldv options.ishift options.maxiter
## takuminauki           0           0           0              1            1000
## tinmar                0           0           0              1            1000
## Chen73                0           0           0              1            1000
## icolee                0           0           0              1            1000
## abetterman            0           0           0              1            1000
## kuninaka              0           0           0              1            1000
##             options.nb options.mode options.start options.sigma options.sigmai
## takuminauki          1            1             1             0              0
## tinmar               1            1             1             0              0
## Chen73               1            1             1             0              0
## icolee               1            1             1             0              0
## abetterman           1            1             1             0              0
## kuninaka             1            1             1             0              0
##             options.info options.iter options.nconv options.numop
## takuminauki            0            1             1            20
## tinmar                 0            1             1            20
## Chen73                 0            1             1            20
## icolee                 0            1             1            20
## abetterman             0            1             1            20
## kuninaka               0            1             1            20
##             options.numopb options.numreo
## takuminauki              0             19
## tinmar                   0             19
## Chen73                   0             19
## icolee                   0             19
## abetterman               0             19
## kuninaka                 0             19

Python part

TOC

  • Stanza (new coreNLP) Analytic
  • Bert Clustering
  • Bert Classification

這部分主要是改用一些 Python 的工具來幫助 text mining

Stanza Analytic

The Stanford NLP Group’s official Python NLP library. It contains support for running various accurate natural language processing tools on 60+ languages and for accessing the Java Stanford CoreNLP software from Python.

import stanza
stanza.__version__
'1.1.1'
nlp = stanza.Pipeline('zh-hant', use_gpu=False)
2021-06-15 17:28:48 INFO: Loading these models for language: zh-hant (Traditional_Chinese):
=======================
| Processor | Package |
-----------------------
| tokenize  | gsd     |
| pos       | gsd     |
| lemma     | gsd     |
| depparse  | gsd     |
=======================

2021-06-15 17:28:48 INFO: Use device: cpu
2021-06-15 17:28:48 INFO: Loading: tokenize
2021-06-15 17:28:48 INFO: Loading: pos
2021-06-15 17:28:49 INFO: Loading: lemma
2021-06-15 17:28:49 INFO: Loading: depparse
2021-06-15 17:28:50 INFO: Done loading processors!
doc = nlp('現階段武漢肺炎還是主要敵人,我們應該專心做好防疫')

for i, sent in enumerate(doc.sentences):
    print('[Sentence {}]'.format(i+1))
    for word in sent.words:
        print('{:12s}\t{:12s}\t{:6s}\t{:d}\t{:12s}'.format(\
              word.text, word.lemma, word.pos, word.head, word.deprel))
    print('')
[Sentence 1]
現               現               NOUN    7   nmod:tmod   
階段              階段              NOUN    4   nmod        
武漢              武漢              PROPN   4   nmod        
肺炎              肺炎              NOUN    7   nsubj       
還是              還是              AUX     7   cop         
主要              主要              ADJ     7   amod        
敵人              敵人              NOUN    12  advcl       
,               ,               PUNCT   12  punct       
我們              我               PRON    12  nsubj       
應該              應該              AUX     12  aux         
專心              專心              ADV     12  advmod      
做               做               VERB    0   root        
好防              好防              NOUN    12  obj         
疫               疫               PUNCT   12  punct       

Bert Clustering

!pip3 freeze | grep torch[^-]
facenet-pytorch==2.5.2
torch==1.7.1+cu101
torchtext==0.8.0
torchvision==0.8.2+cu101
import torch
import numpy as np
import pandas as pd

df = pd.read_csv('metadata_topic.csv')
df
artUrl artTitle artDate artTime artPoster artCat commentNum push boo sentence topic gamma
0 https://www.ptt.cc/bbs/Gossiping/M.1621008816…. [問卦]防疫贏了別人一整年卻瞬間失守的關鍵? 2021/05/14 16:13:31 peter308 Gossiping 274 115 21 我感覺失守關鍵是疫苗只要疫苗買不到… 4 0.709677
1 https://www.ptt.cc/bbs/Gossiping/M.1621012600…. [問卦]這次防疫破功的最大豬隊友是誰??? 2021/05/14 17:16:37 ljsnonocat2 Gossiping 272 150 33 這次讓台灣成功堅守一年的防疫整個破功的最大豬隊友是誰??1. 范雲. 4 0.804878
2 https://www.ptt.cc/bbs/Gossiping/M.1621016951…. [問卦]怎麼不直接升到五級防疫!? 2021/05/14 18:29:09 ex250203 Gossiping 8 0 2 感覺現在台灣疫情這麼嚴重 居然連三級都沒有!?!… 4 0.441176
3 https://www.ptt.cc/bbs/Gossiping/M.1621025291…. [問卦]確診後隔44小時後公佈算防疫破口嗎? 2021/05/14 20:48:09 windyyw Gossiping 251 89 24 現有執行政策,當天晚上6點後確診的,,要到第三天下午兩點公佈,… 1 0.400000
4 https://www.ptt.cc/bbs/Gossiping/M.1621029977…. Re:[問卦]防疫贏了別人一整年卻瞬間失守的關鍵? 2021/05/14 22:06:15 iampig951753 Gossiping 23 9 1 防疫94這樣啊4瞬間ㄉ事情… 4 0.634146
1929 https://www.ptt.cc/bbs/HatePolitics/M.16234895 [黑特]四分防疫六分打中央 2021/06/12 09:19:33 yien HatePolitics 20 9 1 現階段武漢肺炎還是主要敵人,不希望七分防疫、三分打柯,沒有必要… 4 0.422222
1930 https://www.ptt.cc/bbs/HatePolitics/M.16234976 [討論]防疫鬧劇 2021/06/12 11:33:48 nawussica HatePolitics 4 3 0 2021年初以來的3+11… 4 0.607843
1931 https://www.ptt.cc/bbs/HatePolitics/M.16235032 [討論]民進黨的防疫會不會太忙? 2021/06/12 13:07:20 SoFanCy HatePolitics 7 5 0 要打假訊息… 4 0.538462
1932 https://www.ptt.cc/bbs/HatePolitics/M.16235040 [討論]震驚!防疫壓力太大,新北護理師上吊亡 2021/06/12 13:20:31 MrTexas HatePolitics 168 72 16 https://reurl.cc/AkO9lK\r\n記者陳雕文/新北報導\r\n\r\n新 3 0.709677
1933 https://www.ptt.cc/bbs/HatePolitics/M.16235106 [討論]北高的防疫人員命運大不同 2021/06/12 15:10:11 tenfu HatePolitics 25 9 0 北高的防疫人員 為啥命運大不同. 1 0.405405

1934 rows × 12 columns

import torch
from torch import nn
from transformers import BertTokenizer, BertModel, BertForSequenceClassification

tokenizer = BertTokenizer.from_pretrained('hfl/chinese-macbert-base')
model = BertModel.from_pretrained('hfl/chinese-macbert-base')

device = torch.device('cuda:2')
model = model.to(device)
def get_output(text):
    token = tokenizer(text,
                  truncation=True, padding=True,
                  max_length=512, return_tensors='pt')
    t = token['input_ids'].to(device)
    m = token['attention_mask'].to(device)
    i = token['token_type_ids'].to(device)
    outs = model(t, m, i).pooler_output
    return outs.detach().cpu().numpy()
from tqdm import tqdm

outputs = []
for i in tqdm(range(df.shape[0])):
    outputs.append(get_output(df.loc[i, 'sentence']).squeeze())

100%|██████████| 1934/1934 [00:47<00:00, 40.68it/s]

data_x = np.stack(outputs, axis=0)
data_x.shape

(1934, 768)

from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=4)
kmeans.fit(data_x)

KMeans(algorithm=‘auto’, copy_x=True, init=‘k-means++’, max_iter=300, n_clusters=4, n_init=10, n_jobs=None, precompute_distances=‘auto’, random_state=None, tol=0.0001, verbose=0)

df['topic'].hist()

# 防疫政策
df[df['topic'] == 0].sample(5)
artUrl artTitle artDate artTime artPoster artCat commentNum push boo sentence topic gamma
435 https://www.ptt.cc/bbs/Gossiping/M.1621475202…. Re:[爆卦]首場全國防疫會議召開(反擊假訊息) 2021/05/20 01:46:40 eddisontw Gossiping 197 110 20 這個團隊真的完蛋了… 0 0.297872
1348 https://www.ptt.cc/bbs/Gossiping/M.1623037566…. [問卦]美V.S.台三大防疫政策的不同?? 2021/06/07 03:46:03 pttmovielove Gossiping 8 2 1
  1. 台灣公布「社交距離注意事項」(在室內應保持1.5公尺;室外保持1公尺距離)..
0 0.295918
1332 https://www.ptt.cc/bbs/Gossiping/M.1622995721…. [問卦]民視台灣演義會怎麼介紹今年防疫 2021/06/06 16:08:38 poeta Gossiping 14 4 1 台灣演義是民視週末的節目… 0 0.459459
440 https://www.ptt.cc/bbs/Gossiping/M.1621476351…. Re:[爆卦]首場全國防疫會議召開(反擊假訊息) 2021/05/20 02:05:48 eeccoo Gossiping 18 9 0 我剛剛看公視直播在下面留言,沒其他重要的疫情訊息有… 0 0.515625
506 https://www.ptt.cc/bbs/Gossiping/M.1621531016…. [問卦]是不是該推廣居家內防疫的重要性? 2021/05/20 17:16:54 xxx88550 Gossiping 0 0 0 目前大家都著重在 不群聚、戴口罩這些防疫基本款。,面對親人、朋友們… 0 0.307692
# 防疫新聞
df[df['topic'] == 1].sample(5)
artUrl artTitle artDate artTime artPoster artCat commentNum push boo sentence topic gamma
150 https://www.ptt.cc/bbs/Gossiping/M.1621127161…. [新聞]李愛綺防疫健身房停業50萬飛了 2021/05/16 01:05:58 qwqwaas Gossiping 23 12 5 李愛綺防疫健身房停業50萬飛了://tinyurl.com/ax79kke… 1 0.418079
1579 https://www.ptt.cc/bbs/HatePolitics/M.16213966 [新聞]柯文哲防疫遭批蔡峻維怒嗆基進黨:嘴臭 2021/05/19 03:56:36 goetze HatePolitics 24 10 2 1.新聞網址︰://www.chinatimes.com/realtime… 1 0.648438
531 https://www.ptt.cc/bbs/Gossiping/M.1621576988…. [新聞]防疫旅館「加強版」急加開!柯文哲要觀 2021/05/21 06:03:05 wenge321 Gossiping 32 24 1 防疫旅館「加強版」急加開!柯文哲要觀傳局:一直開房間://www.setn… 1 0.790960
1067 https://www.ptt.cc/bbs/Gossiping/M.1622362085…. [爆卦]台北開完防疫記者會了,人民優先 2021/05/30 08:08:03 wind200625 Gossiping 1122 806 41 誠實結論1.昨日萬華快篩489人,陽性率下降3.9,但用大數據來看,染症足跡… 1 0.640845
355 https://www.ptt.cc/bbs/Gossiping/M.1621379418…. [新聞]快訊/國道火燒車!「75%防疫酒精」物流 2021/05/18 23:10:16 VladeDivac Gossiping 162 100 10 備註請放最後面 違者新聞文章刪除1.媒體來源:… 1 0.753304
# 防疫新聞、政治人物
df[df['topic'] == 2].sample(5)
artUrl artTitle artDate artTime artPoster artCat commentNum push boo sentence topic gamma
1555 https://www.ptt.cc/bbs/HatePolitics/M.16212575 [黑特]一個牙醫懂個屁防疫公共衛生 2021/05/17 13:18:35 moumoon5566 HatePolitics 7 2 3 自己定的防疫sop沒遵守… 2 0.441176
116 https://www.ptt.cc/bbs/Gossiping/M.1621087972…. Re:[問卦]防疫你只相信誰? 2021/05/15 14:12:48 coolhon Gossiping 49 34 2 藉本篇來說一下我最近的想法,恭喜各位,民進黨賭輸了… 2 0.622010
30 https://www.ptt.cc/bbs/Gossiping/M.1621051950…. [問卦]各位該裝防疫APP了吧(發錢) 2021/05/15 04:12:26 LeafLu Gossiping 341 284 4 武肺in台灣相關整理:://reurl.cc/7XeebD… 2 0.440678
615 https://www.ptt.cc/bbs/Gossiping/M.1621663861…. [新聞]行政院周末開防疫會議陳時中:確診率明 2021/05/22 06:10:58 sukiyasuica Gossiping 17 10 3 1.媒體來源:聯合2.記者署名:陳熙文3.完… 2 0.418239
1601 https://www.ptt.cc/bbs/HatePolitics/M.16214840 [新聞]新防疫指揮中心」成立?他指成員有這5人 2021/05/20 04:13:40 xamous HatePolitics 58 24 8 1.新聞網址︰://money.udn.com/money/story/5… 2 0.546012
# 防疫方法
df[df['topic'] == 3].sample(5)
artUrl artTitle artDate artTime artPoster artCat commentNum push boo sentence topic gamma
1628 https://www.ptt.cc/bbs/HatePolitics/M.16215723 [討論]dpp靠反中防疫 2021/05/21 04:45:23 jt13 HatePolitics 13 2 4 守住第一波 因為他叫中國武漢肺炎… 3 0.594595
445 https://www.ptt.cc/bbs/Gossiping/M.1621478742…. [問卦]新北防疫做的超棒的八卦 2021/05/20 02:45:40 twelvethflor Gossiping 18 5 2 我朋友的工地啦!師父都不帶口罩啦!,因為不想起衝突。.. 3 0.400000
1126 https://www.ptt.cc/bbs/Gossiping/M.1622601115…. [問卦]過去一年全世界怎麼看台灣防疫? 2021/06/02 02:31:53 osalucard Gossiping 13 7 0 邊境防守... 3 0.627451
415 https://www.ptt.cc/bbs/Gossiping/M.1621429426…. [問卦]怎麼讓貓狗防疫? 2021/05/19 13:03:44 lobfo Gossiping 12 5 0 有人養貓狗用放養的… 3 0.520000
657 https://www.ptt.cc/bbs/Gossiping/M.1621711480…. Re:[問卦]防疫旅館是只給回國的住嗎? 2021/05/22 19:24:38 wind200625 Gossiping 0 0 0 這篇簡單說明一下、… 3 0.830882

Bert Classification

df['topic'].hist()

import torch
from torch import nn
from transformers import BertTokenizer, BertModel, BertForSequenceClassification

tokenizer = BertTokenizer.from_pretrained('hfl/chinese-macbert-base')
model = BertForSequenceClassification.from_pretrained('hfl/chinese-macbert-base')
for p in model.parameters(): # freeze bert
    p.requires_grad = False
model.classifier = nn.Linear(768, 4, bias=True)
model.bert.encoder.layer[-1].output.dense.weight.requires_grad

False

model.classifier.weight.requires_grad

True

def tokenize_text(text):
    return tokenizer.encode(text, return_tensors='pt')
all_tokens = [tokenize_text(df.loc[i, 'sentence']) for i in range(df.shape[0])]
shapes = [t.shape[1] for t in all_tokens]
st = torch.tensor(shapes).float()
st.min(), st.max()

(tensor(28.), tensor(7344.))

import matplotlib.pyplot as plt
plt.hist(st.numpy())
plt.xlim(0, 3000)

import os
import csv
import torch
from torch.utils.data import Dataset, DataLoader, random_split
class PreventionDataset(Dataset):
    def __init__(self, mode='train', seed=1340):
        self.mode = mode
        
        texts = df['sentence'].values.tolist()
        tokens = tokenizer(texts,
                    truncation=True, padding=True,
                    max_length=512,
                    return_tensors='pt'
                )

        self.tokens = tokens['input_ids']
        self.masks = tokens['attention_mask']
        self.type_ids = tokens['token_type_ids']
        self.labels = torch.tensor(df['topic'].values - 1).long()
    
    def __getitem__(self, idx):
        token = self.tokens[idx]
        mask = self.masks[idx]
        type_id = self.type_ids[idx]
        
        label = self.labels[idx]
        return (token, mask, type_id), label

    def __len__(self):
        return self.labels.shape[0]
dataset = PreventionDataset()
torch.manual_seed(1340)
train_size = int(len(dataset) * 0.7)
test_size = len(dataset) - train_size
dataset_train, dataset_test = random_split(dataset, [train_size, test_size])

batch_size = 64
train_data = DataLoader(dataset_train, batch_size=batch_size, shuffle=True)
test_data = DataLoader(dataset_test, batch_size=batch_size, shuffle=False)
from transformers import AdamW

# device = torch.device('cpu')
device = torch.device('cuda:3')

optimizer = AdamW(model.parameters(), lr=3e-3)
criterion = nn.CrossEntropyLoss()

model = model.to(device)
criterion = criterion.to(device)
def accuracy(raw_preds, y):
    preds = raw_preds.argmax(dim=1)
    acc = (preds == y).sum()
    return acc
from tqdm import tqdm

train_loss_list = []
test_loss_list = []

def train(model, data, optimizer, criterion):
    model.train()
    
    epoch_loss = 0
    epoch_acc = 0
    total = 0
    for (t, m, i), label in tqdm(data, total=len(data)):
        t = t.to(device)
        m = m.to(device)
        i = i.to(device)
        label = label.to(device)
        
        optimizer.zero_grad()
        output = model(t, m, i)
#         print('o', output)
        pred = output.logits
#         print('p', pred)
#         print('p', pred)
#         print('l', label)
        loss = criterion(pred, label)
        acc = accuracy(pred, label)
        
        loss.backward()
        optimizer.step()
        
        epoch_loss += loss.item()
        train_loss_list.append(loss.item())
        epoch_acc += acc.item()
        total += len(t)
    return epoch_loss / total, epoch_acc / total

def test(model, data, criterion, log_loss=False):
    model.eval()
    
    epoch_loss = 0
    epoch_acc = 0
    total = 0
    for (t, m, i), label in tqdm(data, total=len(data)):
        t = t.to(device)
        m = m.to(device)
        i = i.to(device)
        label = label.to(device)

        output = model(t, m, i)
        pred = output.logits
        loss = criterion(pred, label)
        acc = accuracy(pred, label)
        if log_loss:
            test_loss_list.append(loss.item())
        
        epoch_loss += loss.item()
        epoch_acc += acc.item()
        total += len(t)
    return epoch_loss / total, epoch_acc / total
max_epoch = 20
log_interval = 1

best_acc = 0

for epoch in range(1, max_epoch + 1):
    train_loss, train_acc = train(model, train_data, optimizer, criterion)
    test_loss, test_acc = test(model, test_data, criterion, log_loss=True)
    
    if epoch % log_interval == 0:
        print('Epoch {} train_loss: {} train_acc: {}'.format(
            epoch, train_loss, train_acc
        ))
        print('Epoch {} test_loss:  {} test_acc : {}'.format(
            epoch, test_loss, test_acc
        ))
    
    torch.save(model.state_dict(), 'ckpts/e{}.pt'.format(epoch))

#     if val_acc > best_acc:
#         best_model = model
#         best_acc = val_acc
#         print('-'*10, 'e', epoch, 'save best model', '-'*10)

100%|██████████| 22/22 [00:17<00:00, 1.24it/s]
100%|██████████| 10/10 [00:07<00:00, 1.39it/s]

Epoch 1 train_loss: 0.021512867694242918 train_acc: 0.4523281596452328
Epoch 1 test_loss: 0.019873504486839045 test_acc : 0.5507745266781411

100%|██████████| 22/22 [00:17<00:00, 1.22it/s]
100%|██████████| 10/10 [00:07<00:00, 1.38it/s]

Epoch 2 train_loss: 0.019146774446003892 train_acc: 0.5188470066518847
Epoch 2 test_loss: 0.01883598531174783 test_acc : 0.5869191049913941

100%|██████████| 22/22 [00:18<00:00, 1.22it/s]
100%|██████████| 10/10 [00:07<00:00, 1.38it/s]

Epoch 3 train_loss: 0.018122735150373166 train_acc: 0.5461936437546193
Epoch 3 test_loss: 0.017967594880450407 test_acc : 0.6110154905335629

100%|██████████| 22/22 [00:18<00:00, 1.21it/s]
100%|██████████| 10/10 [00:07<00:00, 1.37it/s]

import matplotlib.pyplot as plt
import numpy as np

plt.figure(figsize=(16, 6))

x1 = np.linspace(1, max_epoch, len(train_loss_list))
plt.plot(x1, train_loss_list)
x2 = np.linspace(1, max_epoch, len(test_loss_list))
plt.plot(x2, test_loss_list, color='r')
plt.legend(['train_loss', 'test_loss'])
plt.show()


plt.figure(figsize=(16, 6))

x1 = np.linspace(1, max_epoch, len(train_acc_list))
plt.plot(x1, train_acc_list)
x2 = np.linspace(1, max_epoch, len(test_acc_list))
plt.plot(x2, test_acc_list, color='r')
plt.legend(['train_acc', 'test_acc'])
plt.show()

data_y = df['topic'].values
from sklearn.model_selection import train_test_split
np.random.seed(1340)
train_x, test_x, train_y, test_y = train_test_split(data_x, data_y, train_size=0.7)
print('train x', train_x.shape)
print('test x', test_x.shape)
print('train y', train_y.shape)
print('test y', test_y.shape)

train x (1353, 768)
test x (581, 768)
train y (1353,)
test y (581,)

def train_test_acc(model):
    pred = model.predict(train_x)
    print('train acc')
    print((pred == train_y).mean())
    
    pred = model.predict(test_x)
    print('test acc')
    print((pred == test_y).mean())
from sklearn.linear_model import LogisticRegression
lg = LogisticRegression()
lg.fit(train_x, train_y)

LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, l1_ratio=None, max_iter=100, multi_class=‘warn’, n_jobs=None, penalty=‘l2’, random_state=None, solver=‘warn’, tol=0.0001, verbose=0, warm_start=False)

train_test_acc(lg)

train acc
1.0
test acc
0.9672977624784854

from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=100)
rf.fit(train_x, train_y)

RandomForestClassifier(bootstrap=True, class_weight=None, criterion=‘gini’, max_depth=None, max_features=‘auto’, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=100, n_jobs=None, oob_score=False, random_state=None, verbose=0, warm_start=False)

train_test_acc(rf)

train acc
1.0
test acc
0.9432013769363167