Sys.setlocale(category = "LC_ALL", locale = "zh_TW.UTF-8") # 避免中文亂碼
## Warning in Sys.setlocale(category = "LC_ALL", locale = "zh_TW.UTF-8"): 作業系統
## 回報無法實現設定語區為 "zh_TW.UTF-8" 的要求
## [1] ""
library(dplyr)
##
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
library(stringr)
library(tidytext)
library(wordcloud2)
## Warning: package 'wordcloud2' was built under R version 3.6.3
library(data.table)
## Warning: package 'data.table' was built under R version 3.6.3
##
## Attaching package: 'data.table'
## The following objects are masked from 'package:dplyr':
##
## between, first, last
library(ggplot2)
library(reshape2)
## Warning: package 'reshape2' was built under R version 3.6.3
##
## Attaching package: 'reshape2'
## The following objects are masked from 'package:data.table':
##
## dcast, melt
library(wordcloud)
## Warning: package 'wordcloud' was built under R version 3.6.3
## Loading required package: RColorBrewer
library(tidyr)
##
## Attaching package: 'tidyr'
## The following object is masked from 'package:reshape2':
##
## smiths
library(readr)
library(scales)
##
## Attaching package: 'scales'
## The following object is masked from 'package:readr':
##
## col_factor
require(jiebaR)
## Loading required package: jiebaR
## Warning: package 'jiebaR' was built under R version 3.6.3
## Loading required package: jiebaRD
## Warning: package 'jiebaRD' was built under R version 3.6.3
require(widyr)
## Loading required package: widyr
require(NLP)
## Loading required package: NLP
##
## Attaching package: 'NLP'
## The following object is masked from 'package:ggplot2':
##
## annotate
require(ggraph)
## Loading required package: ggraph
## Warning: package 'ggraph' was built under R version 3.6.3
require(igraph)
## Loading required package: igraph
## Warning: package 'igraph' was built under R version 3.6.3
##
## Attaching package: 'igraph'
## The following object is masked from 'package:tidyr':
##
## crossing
## The following objects are masked from 'package:dplyr':
##
## as_data_frame, groups, union
## The following objects are masked from 'package:stats':
##
## decompose, spectrum
## The following object is masked from 'package:base':
##
## union
載入八卦版資料
gossip_data = fread('./gossip_article.csv',encoding = 'UTF-8')
過濾特殊字元
gossip_data = gossip_data %>%
filter(!grepl('_',word))
轉換日期格式
gossip_data$artDate = gossip_data$artDate %>% as.Date("%Y/%m/%d")
把代表UberEats的詞統一
UberEats = c("ubereats","ubereat","Ubereat","UBEREATS","UberEATS","uberEATS")
gossip_data$word[which(gossip_data$word %in% UberEats)] = "UberEats"
找出有UberEats的文章網址
ubereats_url = gossip_data$artUrl[grepl("UberEats", gossip_data$word)]
依據網址找出有UberEats文章
ubereats_data <- gossip_data %>%
filter(gossip_data$artUrl %in% ubereats_url)
去除停用詞
jieba_tokenizer = worker()
stop_words <- c("https", "com", "新聞", "完整", "沒有","有沒有","現在","八卦","jpg","imgur","news","http","www","udn","gif")
ubereats_data <- ubereats_data %>%
filter(!(ubereats_data$word %in% stop_words))
1.每天篇數
ubereats_day <- ubereats_data %>%
group_by(artDate) %>%
summarise(count = n()) %>%
arrange(desc(count))
day_plot <- ubereats_day %>%
ggplot(aes(x = artDate, y = count)) +
geom_line(color = "blue", size = 1) +
scale_x_date(labels = date_format("%Y/%m/%d")) +
ggtitle("ubereats每日討論篇數") +
xlab("日期") +
ylab("數量")
day_plot
前五多篇文章的日期是:2019/10/14,2019/10/15,2020/04/10,2019/09/26,2020/03/28
2.這五天的的文章標題
ubereats_data %>%
filter(artDate == as.Date("2019/10/14") |
artDate == as.Date("2019/10/15") |
artDate == as.Date("2020/04/10") |
artDate == as.Date("2019/09/26") |
artDate == as.Date("2020/03/28")) %>%
distinct(artUrl, .keep_all = TRUE) %>%
select(artDate,artTitle)
## artDate artTitle
## 1 2019-09-26 [新聞]調查:近4成民眾想當外送員盼每週多賺75
## 2 2019-09-26 [新聞]重機外送員「雙B對決」!憤怒貓左衝車前
## 3 2019-09-26 [新聞]外送餐飲平台夯親民黨團:勞安、道安、
## 4 2019-10-14 [問卦]用電動自行車來外送可行嗎?
## 5 2019-10-14 [新聞]Uber併智利生鮮外送商
## 6 2019-10-14 [新聞]外送員連三起傷亡事故交通部長林佳龍說
## 7 2019-10-14 [爆卦]勞動部認定熊貓跟ubereats是僱傭契約
## 8 2019-10-14 [新聞]勞動部認定Foodpanda、UberEats與外送
## 9 2019-10-14 [新聞]外送平台讓生意爆紅?業者親揭殘酷真相「
## 10 2019-10-14 [新聞]外送車禍又1人!女送餐員和小貨車相撞
## 11 2019-10-14 [新聞]北市勞動局︰UberEats違7項職安法規、Fo
## 12 2019-10-14 [新聞]外送員車禍亡北市勞檢業者涉違反7項職安
## 13 2019-10-14 [新聞]又一起美食外送員車禍女騎士手腳擦傷
## 14 2019-10-14 [新聞]外送員屬僱傭關係健保署也加入戰場:得
## 15 2019-10-14 [新聞]勞部研擬外送員強制納保意外險
## 16 2019-10-14 [新聞]5天3死!外送員撞死違規過馬路老翁
## 17 2019-10-14 [問卦]慣老闆沒有奴工用開始搞外送業的卦?
## 18 2019-10-14 [新聞]勞檢認定foodpanda、UberEats假承攬真
## 19 2019-10-14 [新聞]又一人「送」命!Lalamove外送員撞死老翁
## 20 2019-10-14 [新聞]美食外送為僱傭律師:用牛刀管理將打擊
## 21 2019-10-14 [問卦]為何ubereat不能像uber一樣高素質
## 22 2019-10-14 [新聞]外送員列入雇傭關係後月入10萬夢碎?
## 23 2019-10-14 Re:[新聞]勞檢認定foodpanda、UberEats假承攬真
## 24 2019-10-14 Re:[新聞]不甩勞動部!foodpanda:不接受僱傭關係
## 25 2019-10-14 [新聞]外送爭議蘇貞昌:勞檢不再憂讒畏譏
## 26 2019-10-14 [新聞]外送事故5天3死!Lalamove外送員撞死違規
## 27 2019-10-14 Re:[新聞]不甩勞動部!foodpanda:不接受僱傭關係
## 28 2019-10-14 [新聞]【血汗外送】狠打臉熊貓! 勞檢驚揭業者兩點內規認證「無良
## 29 2019-10-14 [問卦]外送三雄大家偏好哪個????
## 30 2019-10-14 [新聞]外送員5天3死公路總局開罰foodpanda9千
## 31 2019-10-14 Re:[新聞]北市勞工局開罰UberEats認定罹災黃姓
## 32 2019-10-15 Re:[新聞]外送員5天3死公路總局開罰foodpanda9千
## 33 2019-10-15 [新聞]外送員認定雇傭時力轟勞動部怠惰「早該
## 34 2019-10-15 [新聞]外送月薪18萬拿命在賺
## 35 2019-10-15 [新聞]士林外送員遭夾死,行車記錄器曝光
## 36 2019-10-15 Re:[爆卦]外送擋到誰的財路..
## 37 2019-10-15 [新聞]嘉義市美食外送員再傳2起車禍1人受傷送
## 38 2019-10-15 [問卦]為什麼會說機車外送食物是新型態工作?
## 39 2019-10-15 Re:[新聞]士林外送員遭夾死,行車記錄器曝光
## 40 2019-10-15 Re:[新聞]士林外送員遭夾死,行車記錄器曝光
## 41 2019-10-15 [新聞]外送事故頻傳他騎單車送UberEats半路卻
## 42 2019-10-15 Re:[新聞]自己選擇自己負責外送員挺foodpanda承攬
## 43 2019-10-15 Re:[新聞]勞檢認定foodpanda、UberEats假承攬真
## 44 2019-10-15 [新聞]外送員要10萬不要命?45分飆桃園
## 45 2019-10-15 Re:[新聞]Ubereats也不認雇傭關係 勞動部:業者
## 46 2020-03-28 [新聞]今年前二月機車肇事增加外送平台佔4.25%
## 47 2020-03-28 [新聞]寧夏夜市靠外送突圍下一步要走向社交電
## 48 2020-04-10 [新聞]5業者組外送國家隊救餐飲!UberEats、
篇數最多的五天中,最常出現的詞
ubereats_day %>%
top_n(10)
## Selecting by count
## # A tibble: 10 x 2
## artDate count
## <date> <int>
## 1 2019-10-14 3077
## 2 2019-10-15 1715
## 3 2019-10-13 1498
## 4 2019-10-24 824
## 5 2019-10-18 677
## 6 2019-10-16 615
## 7 2019-09-26 467
## 8 2019-10-23 464
## 9 2020-03-28 435
## 10 2019-10-17 409
plot_top5 <- ubereats_data %>%
filter(artDate == as.Date("2019/10/14") |
artDate == as.Date("2019/10/15") |
artDate == as.Date("2020/04/10") |
artDate == as.Date("2019/09/26") |
artDate == as.Date("2020/03/28")) %>%
group_by(artDate) %>%
top_n(7, count) %>%
ungroup() %>%
mutate(word = reorder(word, count)) %>%
ggplot(aes(x=word, y=count, fill = artDate)) +
geom_col(show.legend = FALSE) +
labs(x = NULL, y = NULL) +
facet_wrap(~artDate, scales="free", ncol = 2) +
coord_flip()
plot_top5
推測這幾天出現的事件 2019/10/14,2019/10/15:外送車禍、雇傭關係 2020/04/10:外送國家隊、經濟部補助 2019/09/26:外送平台興起 2020/03/28:與寧夏夜市合作
ubereat_word <- ubereats_data %>%
select(artUrl, word, count)
#將同一篇的斷詞整理在一起
ubereats_sentence = ubereats_data %>%
group_by(artUrl) %>%
summarise(sentence = paste0(word, collapse = ""))
# remove stopwords
jieba_tokenizer = worker()
# unnest_tokens 使用的bigram分詞函數
# Input: a character vector
# Output: a list of character vectors of the same length
jieba_bigram <- function(t) {
lapply(t, function(x) {
if(nchar(x)>1){
tokens <- segment(x, jieba_tokenizer)
bigram<- ngrams(tokens, 2)
bigram <- lapply(bigram, paste, collapse = " ")
unlist(bigram)
}
})
}
#bigram分詞
ubereat_bigram <- ubereats_sentence %>%
unnest_tokens(bigram, sentence, token = jieba_bigram)
ubereat_bigram
## # A tibble: 21,915 x 2
## artUrl bigram
## <chr> <chr>
## 1 https://www.ptt.cc/bbs/Gossiping/M.1567407974.A.E30.html 店家 外送員
## 2 https://www.ptt.cc/bbs/Gossiping/M.1567407974.A.E30.html 外送員 排店
## 3 https://www.ptt.cc/bbs/Gossiping/M.1567407974.A.E30.html 排店 編號
## 4 https://www.ptt.cc/bbs/Gossiping/M.1567407974.A.E30.html 編號 店員
## 5 https://www.ptt.cc/bbs/Gossiping/M.1567407974.A.E30.html 店員 受害人
## 6 https://www.ptt.cc/bbs/Gossiping/M.1567407974.A.E30.html 受害人 羅先生
## 7 https://www.ptt.cc/bbs/Gossiping/M.1567407974.A.E30.html 羅先生 東森
## 8 https://www.ptt.cc/bbs/Gossiping/M.1567407974.A.E30.html 東森 總公司
## 9 https://www.ptt.cc/bbs/Gossiping/M.1567407974.A.E30.html 總公司 取消
## 10 https://www.ptt.cc/bbs/Gossiping/M.1567407974.A.E30.html 取消 這張
## # ... with 21,905 more rows
ubereat_bigram %>%
count(bigram, sort = TRUE)
## # A tibble: 20,245 x 2
## bigram n
## <chr> <int>
## 1 連結 網址 75
## 2 媒體 來源 72
## 3 外送 平台 67
## 4 記者 署名 33
## 5 報導 新聞標題 18
## 6 台北 報導 17
## 7 美食 外送 14
## 8 署名 新聞標題 13
## 9 網址 story 13
## 10 熊貓 ubereats 12
## # ... with 20,235 more rows
ubereat_bigram
## # A tibble: 21,915 x 2
## artUrl bigram
## <chr> <chr>
## 1 https://www.ptt.cc/bbs/Gossiping/M.1567407974.A.E30.html 店家 外送員
## 2 https://www.ptt.cc/bbs/Gossiping/M.1567407974.A.E30.html 外送員 排店
## 3 https://www.ptt.cc/bbs/Gossiping/M.1567407974.A.E30.html 排店 編號
## 4 https://www.ptt.cc/bbs/Gossiping/M.1567407974.A.E30.html 編號 店員
## 5 https://www.ptt.cc/bbs/Gossiping/M.1567407974.A.E30.html 店員 受害人
## 6 https://www.ptt.cc/bbs/Gossiping/M.1567407974.A.E30.html 受害人 羅先生
## 7 https://www.ptt.cc/bbs/Gossiping/M.1567407974.A.E30.html 羅先生 東森
## 8 https://www.ptt.cc/bbs/Gossiping/M.1567407974.A.E30.html 東森 總公司
## 9 https://www.ptt.cc/bbs/Gossiping/M.1567407974.A.E30.html 總公司 取消
## 10 https://www.ptt.cc/bbs/Gossiping/M.1567407974.A.E30.html 取消 這張
## # ... with 21,905 more rows
# 計算兩個詞彙同時出現的總次數
word_pairs <- ubereat_word %>%
pairwise_count(word, artUrl, sort = TRUE)
# 計算兩個詞彙間的相關性
word_cors <- ubereat_word %>%
group_by(word) %>%
filter(n() >= 20) %>%
pairwise_cor(word, artUrl, sort = TRUE)
# 與UberEats相關性高的詞彙
word_cors %>%
filter(item1 == "UberEats") %>%
head(10)
## # A tibble: 10 x 3
## item1 item2 correlation
## <chr> <chr> <dbl>
## 1 UberEats 熊貓 0.127
## 2 UberEats 是不是 0.111
## 3 UberEats foodpanda 0.0955
## 4 UberEats 這麼 0.0911
## 5 UberEats 網友 0.0890
## 6 UberEats 客人 0.0847
## 7 UberEats 剛剛 0.0847
## 8 UberEats 遇到 0.0847
## 9 UberEats 直接 0.0760
## 10 UberEats 不是 0.0760
# 設定幾個詞做爲seed words
seed_words <- c("udn","網址","許多","www","報導")
# 設定threshold爲0.5
threshold <- 0.5
# 跟seed words相關性高於threshold的詞彙會被加入移除列表中
remove_words <- word_cors %>%
filter((item1 %in% seed_words|item2 %in% seed_words), correlation>threshold) %>%
.$item1 %>%
unique()
remove_words
## [1] "網址" "新聞標題" "來源" "連結" "署名" "備註"
## [7] "媒體" "內文" "記者" "報導" "台北" "表示"
# 清除存在這些詞彙的組合
word_cors_new <- word_cors %>%
filter(!(item1 %in% remove_words|item2 %in% remove_words))
word_cors_new %>%
filter(correlation > 0.4) %>%
graph_from_data_frame() %>%
ggraph(layout = "fr") +
geom_edge_link(aes(edge_alpha = correlation), show.legend = FALSE) +
geom_node_point(color = "lightblue", size = 3) +
geom_node_text(aes(label = name), repel = TRUE) +
theme_void()
ubereats_1031 <- ubereats_data %>%
filter(ubereats_data$artDate > as.Date("2019/10/31"))
1.查看每天篇數
ubereats_1031_day <- ubereats_1031 %>%
group_by(artDate) %>%
summarise(count = n()) %>%
arrange(desc(count))
ubereats_1031_day
## # A tibble: 77 x 2
## artDate count
## <date> <int>
## 1 2020-03-28 435
## 2 2019-11-08 324
## 3 2019-11-22 272
## 4 2020-04-18 255
## 5 2019-11-05 251
## 6 2019-11-21 235
## 7 2019-11-11 227
## 8 2020-04-12 224
## 9 2019-11-14 216
## 10 2020-04-15 206
## # ... with 67 more rows
day_1031_plot <- ubereats_1031_day %>%
ggplot(aes(x = artDate, y = count)) +
geom_line(color = "blue", size = 1) +
scale_x_date(labels = date_format("%m/%d")) +
ggtitle("ubereats 10/31後每日討論篇數") +
xlab("日期") +
ylab("數量")
day_1031_plot
2.前五多的文章日期
ubereats_1031_day %>%
top_n(5)
## Selecting by count
## # A tibble: 5 x 2
## artDate count
## <date> <int>
## 1 2020-03-28 435
## 2 2019-11-08 324
## 3 2019-11-22 272
## 4 2020-04-18 255
## 5 2019-11-05 251
篇數最多的五天中,最常出現的詞
plot_top5 <- ubereats_1031 %>%
filter(artDate == as.Date("2020/03/28") |
artDate == as.Date("2020/04/18") |
artDate == as.Date("2020/04/12") |
artDate == as.Date("2020/04/15") |
artDate == as.Date("2020/04/10")) %>%
group_by(artDate) %>%
top_n(5, count) %>%
ungroup() %>%
mutate(word = reorder(word, count)) %>%
ggplot(aes(x=word, y=count, fill = artDate)) +
geom_col(show.legend = FALSE) +
labs(x = NULL, y = NULL) +
facet_wrap(~artDate, scales="free", ncol = 3) +
coord_flip()
plot_top5
ubereats_1031 %>%
filter(artDate == as.Date("2020/03/28") |
artDate == as.Date("2020/04/18") |
artDate == as.Date("2020/04/12") |
artDate == as.Date("2020/04/15") |
artDate == as.Date("2020/04/10")) %>%
distinct(artUrl, .keep_all = TRUE) %>%
select(artDate,artTitle)
## artDate artTitle
## 1 2020-03-28 [新聞]今年前二月機車肇事增加外送平台佔4.25%
## 2 2020-03-28 [新聞]寧夏夜市靠外送突圍下一步要走向社交電
## 3 2020-04-10 [新聞]5業者組外送國家隊救餐飲!UberEats、
## 4 2020-04-12 [新聞]UberEats外送員遭撞送醫不治肇事男遭起
## 5 2020-04-12 [新聞]美食外送員被2車夾死檢察官起訴後車
## 6 2020-04-12 [問卦]開始使用外送後平均都胖幾公斤?
## 7 2020-04-15 Re:[問卦]騎gogoro吃到飽方案外送被罰死怎辦
## 8 2020-04-15 [新聞]外送員慘了!Gogoro嚴查違規車強制升級
## 9 2020-04-18 [問卦]ubereats被盜刷四千元
## 10 2020-04-18 [新聞]外送員趴趴走憂成防疫破口
3/28:寧夏夜市 4/10:補助 4/12:車禍 4/15:Gogoro吃到飽 4/18:防疫破口
3.以LIWC字典統計每天的文章正面字的次數與負面字的次數
# 以LIWC字典判斷文集中的word屬於正面字還是負面字
# 正向字典txt檔
# 以,將字分隔
P <- read_file("./positive.txt")
# 負向字典txt檔
N <- read_file("./negative.txt")
#將字串依,分割
#strsplit回傳list , 我們取出list中的第一個元素
P = strsplit(P, ",")[[1]]
N = strsplit(N, ",")[[1]]
# 建立dataframe 有兩個欄位word,sentiments,word欄位內容是字典向量
P = data.frame(word = P, sentiment = "positive")
N = data.frame(word = N, sentiment = "negative")
LIWC = rbind(P, N)
sentiment_count = ubereats_1031 %>%
select(artDate,word,count) %>%
inner_join(LIWC) %>%
group_by(artDate,sentiment) %>%
summarise(count=sum(count))
## Joining, by = "word"
## Warning: Column `word` joining character vector and factor, coercing into
## character vector
sentiment_count %>%
arrange(desc(count))
## # A tibble: 106 x 3
## # Groups: artDate [63]
## artDate sentiment count
## <date> <fct> <int>
## 1 2020-03-28 positive 16
## 2 2019-11-25 positive 15
## 3 2020-04-10 positive 14
## 4 2020-04-18 negative 13
## 5 2019-11-14 negative 11
## 6 2019-11-27 positive 11
## 7 2020-04-12 negative 11
## 8 2020-04-21 negative 11
## 9 2019-11-05 positive 9
## 10 2019-11-08 negative 9
## # ... with 96 more rows
sentiment_count = ubereats_1031 %>%
select(artDate,word,count) %>%
inner_join(LIWC) %>%
group_by(artDate,sentiment) %>%
summarise(count=sum(count))
## Joining, by = "word"
## Warning: Column `word` joining character vector and factor, coercing into
## character vector
sentiment_count %>%
arrange(desc(count))
## # A tibble: 106 x 3
## # Groups: artDate [63]
## artDate sentiment count
## <date> <fct> <int>
## 1 2020-03-28 positive 16
## 2 2019-11-25 positive 15
## 3 2020-04-10 positive 14
## 4 2020-04-18 negative 13
## 5 2019-11-14 negative 11
## 6 2019-11-27 positive 11
## 7 2020-04-12 negative 11
## 8 2020-04-21 negative 11
## 9 2019-11-05 positive 9
## 10 2019-11-08 negative 9
## # ... with 96 more rows
情緒雲
library(reshape2)
ubereats_1031 %>%
select(word,count) %>%
inner_join(LIWC) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("royalblue1", "royalblue4"), max.words = 100)
## Joining, by = "word"
## Warning: Column `word` joining character vector and factor, coercing into
## character vector
sentiment_count %>%
ggplot()+
geom_line(aes(x=artDate,y=count,colour=sentiment))+
scale_x_date(labels = date_format("%m/%d")) +
geom_vline(aes(xintercept = as.numeric(artDate[which(sentiment_count$artDate == as.Date('2019/11/25'))[1]])),colour = "red") +
geom_vline(aes(xintercept = as.numeric(artDate[which(sentiment_count$artDate == as.Date('2020/03/28'))[1]])),colour = "red") +
geom_vline(aes(xintercept = as.numeric(artDate[which(sentiment_count$artDate == as.Date('2020/04/18'))[1]])),colour = "blue") +
geom_vline(aes(xintercept = as.numeric(artDate[which(sentiment_count$artDate == as.Date('2019/11/14'))[1]])),colour = "blue")
正面情緒最多的兩天文章
ubereats_1031 %>%
filter(artDate == as.Date("2019/11/25") |
artDate == as.Date("2020/03/28") ) %>%
distinct(artUrl, .keep_all = TRUE) %>%
select(artDate,artTitle)
## artDate artTitle
## 1 2019-11-25 [新聞]吃得苦中苦外送員月薪打趴上班族
## 2 2020-03-28 [新聞]今年前二月機車肇事增加外送平台佔4.25%
## 3 2020-03-28 [新聞]寧夏夜市靠外送突圍下一步要走向社交電
11/25:外送員月薪 03/28:寧夏夜市
負面情緒最多的兩天文章
ubereats_1031 %>%
filter(artDate == as.Date("2019/11/14") |
artDate == as.Date("2020/04/18") ) %>%
distinct(artUrl, .keep_all = TRUE) %>%
select(artDate,artTitle)
## artDate artTitle
## 1 2019-11-14 Re:[問卦]UberEats有限制一定要用機車嗎?
## 2 2019-11-14 [新聞]UBEREATS送餐員撞傷女騎士業務過失傷害
## 3 2020-04-18 [問卦]ubereats被盜刷四千元
## 4 2020-04-18 [新聞]外送員趴趴走憂成防疫破口
11/14:撞傷女騎士 04/18:盜刷、防疫破口
covid <- c("防疫","疫情","檢疫","居家","檢疫")
data_ubereats_co <- ubereats_data %>%
filter(ubereats_data$word %in% covid) %>%
distinct(artUrl, .keep_all = TRUE) %>%
select(artTitle,artDate,word)
data_ubereats_co
## artTitle artDate word
## 1 [問卦]外送人員是不是武漢肺炎的高風險職業? 2020-01-28 居家
## 2 [問卦]這波疫情會讓外送平台發大財嗎? 2020-02-04 疫情
## 3 [問卦]Ubereat外送員說他有送過隔離的人怎辦 2020-02-09 居家
## 4 Re:[問卦]外送員會不會變成防疫破口? 2020-03-25 居家
## 5 [新聞]今年前二月機車肇事增加外送平台佔4.25% 2020-03-28 疫情
## 6 [新聞]寧夏夜市靠外送突圍下一步要走向社交電 2020-03-28 疫情
## 7 [新聞]5業者組外送國家隊救餐飲!UberEats、 2020-04-10 疫情
## 8 [新聞]外送員慘了!Gogoro嚴查違規車強制升級 2020-04-15 疫情
## 9 [新聞]點餐平台不提供口罩外送員自抗風險 2020-04-17 防疫
## 10 [新聞]外送員趴趴走憂成防疫破口 2020-04-18 居家
## 11 [新聞]UberEats的復仇!早餐店嗆:嘴巴閉閉乖 2020-04-21 疫情