parent
330c84e7fc
commit
8202de20a5
@ -0,0 +1,41 @@
|
||||
import jieba
|
||||
import wordcloud
|
||||
import imageio
|
||||
import pandas as pd # 用于读取Excel文件
|
||||
|
||||
# 读取图片作为词云的形状
|
||||
img = imageio.imread('四叶草1.png')
|
||||
|
||||
# 使用pandas读取Excel文件中的所有列
|
||||
df = pd.read_excel('top_ai_danmakus.xlsx')
|
||||
|
||||
# 将Excel所有列的数据拼接成一个长字符串
|
||||
book = ''
|
||||
|
||||
# 遍历每一列并将内容合并成字符串
|
||||
for column in df.columns:
|
||||
# 将当前列转换为字符串并拼接到 book 变量
|
||||
book += ' '.join(df[column].astype(str)) + ' '
|
||||
|
||||
# 使用jieba进行分词
|
||||
book_list = jieba.lcut(book)
|
||||
book_str = ' '.join(book_list)
|
||||
|
||||
# 打印分词后的字符串
|
||||
print(book_str)
|
||||
|
||||
# 创建词云
|
||||
wc = wordcloud.WordCloud(
|
||||
width=500,
|
||||
height=500,
|
||||
background_color='white',
|
||||
mask=img,
|
||||
stopwords={'main','Taipei','afraid','aiden','Britain'}, # 可添加不需要的停用词
|
||||
font_path='msyh.ttc' # 设置中文字体路径
|
||||
)
|
||||
|
||||
# 生成词云
|
||||
wc.generate(book_str)
|
||||
|
||||
# 保存词云图
|
||||
wc.to_file('词云图.png')
|
Loading…
Reference in new issue