library(text2vec)
library(jiebaR)
## 载入需要的程辑包:jiebaRD
library(ggplot2)
library(wordcloud)
## 载入需要的程辑包:RColorBrewer
library(e1071)  # 用于朴素贝叶斯
catalog <- read.csv("C:/Users/Lenovo/Desktop/新建文件夹/大三上/机器学习/catalogs.csv", stringsAsFactors = FALSE)
# 查看数据结构
str(catalog)
## 'data.frame':    69494 obs. of  4 variables:
##  $ name  : chr  "双汇 猪舌 700g/袋" "(满38减10)黑猪腊肉 湘西腊肉 即食小吃食猪肉干肉脯湖南张家界土家 香辣味" "精气神 猪肋排段(排骨) 400g/袋 山黑猪  黑猪肉 林间散养" "双汇 猪五花肉片 300g/袋 整肉原切 火锅食材" ...
##  $ first : chr  "生鲜" "生鲜" "生鲜" "生鲜" ...
##  $ second: chr  "猪牛羊肉" "猪牛羊肉" "猪牛羊肉" "猪牛羊肉" ...
##  $ third : chr  "猪肉" "猪肉" "猪肉" "猪肉" ...
grain_oil_seasoning <- subset(catalog, second == "粮油调味")
third_counts <- table(grain_oil_seasoning$third)

# 绘制饼状图
pie_data <- data.frame(third = names(third_counts), count = as.vector(third_counts))
ggplot(pie_data, aes(x = "", y = count, fill = third)) +
  geom_bar(stat = "identity", width = 1) +
  coord_polar("y") +
  theme_void() +
  labs(title = "粮油调味三级分类分布")

# 文字解读
cat("各三级分类的分布情况已展示在饼状图中。调味品比例最大,有机食品比例最小\n")
## 各三级分类的分布情况已展示在饼状图中。调味品比例最大,有机食品比例最小
# 设置jieba分词器
seg <- worker(bylines = TRUE)
# 读入自定义词典
new_words <- read.table("C:/Users/Lenovo/Desktop/新建文件夹/大三上/机器学习/userdict.dat", header = FALSE, stringsAsFactors = FALSE)
new_user_word(seg, new_words$V1)
## [1] TRUE
# 对“粮油调味”类别商品名称进行分词
grain_oil_seasoning$words <- sapply(grain_oil_seasoning$name, function(x) {
  segments <- segment(x, seg)  # 获取分词结果
  return(segments)  # 返回分词结果
})

# 检查分词结果
print(head(grain_oil_seasoning$words))
## $麻辣小海鲜即食麻辣八爪鱼扇贝大虾蚬子鲍鱼海螺海鲜熟食即食罐装500g
##  [1] "麻辣"     "小"       "海鲜即食" "麻辣"     "八爪鱼"   "扇贝"    
##  [7] "大虾"     "蚬子"     "鲍鱼"     "海螺"     "海鲜熟食" "即食"    
## [13] "罐装"     "500"      "g"       
## 
## $`天地粮人 有机 高粱米(东北 粗粮杂粮 粗粮 大米 粥米伴侣)330g`
##  [1] "天地"   "粮人"   "有机"   "高粱米" "东北"   "粗粮"   "杂粮"   "粗粮"  
##  [9] "大米"   "粥"     "米"     "伴侣"   "330"    "g"     
## 
## $福来康泰有机黄金亚麻籽粉熟亚麻子粉胡麻籽粉350g即食炒货开罐即食
##  [1] "福来"   "康泰"   "有机"   "黄金"   "亚麻籽" "粉熟"   "亚麻子" "粉"    
##  [9] "胡麻"   "籽粉"   "350"    "g"      "即食"   "炒货"   "开罐"   "即食"  
## 
## $`得尔乐 有机山茶油礼盒 茶籽油1L*2瓶食用油 压榨一级`
##  [1] "得尔乐" "有机"   "山茶油" "礼盒"   "茶籽油" "1"      "L"      "2"     
##  [9] "瓶"     "食用油" "压榨"   "一级"  
## 
## $`天地粮人 有机 豆浆豆(黄豆 红小豆 绿豆 黑豆 红皮花生等)1.25kg`
##  [1] "天地"     "粮人"     "有机"     "豆浆"     "豆"       "黄豆"    
##  [7] "红小豆"   "绿豆"     "黑豆"     "红皮花生" "等"       "1.25"    
## [13] "kg"      
## 
## $`天地粮人 有机 绿豆(可发豆芽 打豆浆 东北 粗粮杂粮 大米伴侣)1.25kg`
##  [1] "天地" "粮人" "有机" "绿豆" "可"   "发"   "豆芽" "打"   "豆浆" "东北"
## [11] "粗粮" "杂粮" "大米" "伴侣" "1.25" "kg"
# 将分词结果转换为字符向量
grain_oil_seasoning$words <- sapply(grain_oil_seasoning$words, function(x) {
  paste(x, collapse = " ")  # 以空格连接词语
})

# 过滤掉空字符串
grain_oil_seasoning <- grain_oil_seasoning[nchar(grain_oil_seasoning$words) > 0, ]

# 确保words列是字符向量
grain_oil_seasoning$words <- as.character(grain_oil_seasoning$words)

# 统计分词数量
word_counts <- table(unlist(strsplit(grain_oil_seasoning$words, " ")))

word_lengths <- sapply(grain_oil_seasoning$words, function(x) {
  length(unlist(strsplit(x, " ")))  # 分割词语并计算长度
})

# 绘制词数的直方图
hist(word_lengths, 
     breaks = seq(0, max(word_lengths) + 1, by = 1), 
     main = "不同商品名称包含的词数分布", 
     xlab = "词数", 
     ylab = "频次", 
     col = "lightblue", 
     border = "black")

# 生成词云图
wordcloud(words = names(word_counts), freq = word_counts, min.freq = 1,
          max.words = 100, random.order = FALSE, rot.per = 0.35,
          colors = brewer.pal(8, "Dark2"))

# 文字解读
cat("直方图展示了“粮油调味”类商品的分词频次,总体呈正态分布,有一定右偏,词云图则直观展示了词频的高低,从当中可以看出具体的高频词。\n")
## 直方图展示了“粮油调味”类商品的分词频次,总体呈正态分布,有一定右偏,词云图则直观展示了词频的高低,从当中可以看出具体的高频词。
# 数据集拆分
set.seed(1000)
id <- sample(1:nrow(grain_oil_seasoning), 0.7 * nrow(grain_oil_seasoning), FALSE)
train <- grain_oil_seasoning[id, ]
test <- grain_oil_seasoning[-id, ]

# 分词结果相应进行拆分
seg_train <- train$words
seg_test <- test$words

# 去除停用词,筛选特征词,生成语料文件
stop_words <- unique(unlist(seg_train))
stop_words <- stop_words[grepl('^([a-z]|[A-Z])+$', stop_words) | grepl('^[0-9]+(\\.)?[0-9]*(-)?[0-9]*(\\.)?[0-9]*$', stop_words)]
stop_words <- setdiff(stop_words, new_words$V1)

# 设置分词迭代器
it_train <- itoken(seg_train, ids = train$id, progressbar = FALSE)
it_test <- itoken(seg_test, ids = test$id, progressbar = FALSE)

# 创建词汇表
vocab <- create_vocabulary(it_train)

# 修剪低频词、高频词
pruned_vocab <- prune_vocabulary(vocab, doc_count_min = 10, doc_proportion_max = 0.75)
pruned_vocab <- pruned_vocab[!(pruned_vocab$term %in% stop_words), ]

# 保留词频前1000的词语
pruned_vocab <- pruned_vocab[order(pruned_vocab$term_count, decreasing = TRUE), ]
pruned_vocab <- pruned_vocab[1:1000, ]

# 初步形成语料文件
vectorizer <- vocab_vectorizer(pruned_vocab)

#根据语料文件生成文档-词频矩阵
dtm_train <- create_dtm(it_train, vectorizer)
dtm_test <- create_dtm(it_test, vectorizer)

#转换为矩阵并进行二值化处理
dtm_train <- as.matrix(dtm_train)
dtm_test <- as.matrix(dtm_test)
dtm_train[dtm_train > 1] <- 1
dtm_test[dtm_test > 1] <- 1

#转换为因子
dtm_train <- apply(dtm_train, 2, as.factor)
dtm_test <- apply(dtm_test, 2, as.factor)

# 检查矩阵维度
dim(dtm_train)
## [1] 7201 1000
# 建立朴素贝叶斯分类器
classifier <- naiveBayes(dtm_train, as.factor(train$third))

# 进行预测
predictions <- predict(classifier, newdata = dtm_test)

# 11. 计算准确率
accuracy <- sum(predictions == test$third) / nrow(test)
cat("测试集的预测准确率为:", accuracy * 100, "%\n")
## 测试集的预测准确率为: 91.57758 %