From 42a10ae8a90816df30289a423010af1f2e573a2d Mon Sep 17 00:00:00 2001 From: QMZ <1164250597@qq.com> Date: Sat, 14 Sep 2024 18:57:48 +0800 Subject: [PATCH] =?UTF-8?q?demo=201.0=20=E8=8E=B7=E5=8F=96=E5=8D=95?= =?UTF-8?q?=E9=A1=B5=E6=89=80=E6=9C=89=E8=A7=86=E9=A2=91=E7=9A=84=E5=BC=B9?= =?UTF-8?q?=E5=B9=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- demo.py | 92 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 demo.py diff --git a/demo.py b/demo.py new file mode 100644 index 0000000..a459fde --- /dev/null +++ b/demo.py @@ -0,0 +1,92 @@ +import requests +from bs4 import BeautifulSoup +import re +import jieba # 结巴分词 pip install jieba +import wordcloud # 词云图 pip install wordcloud +import imageio # 读取本地图片 修改词云图形 +import matplotlib.pyplot as plt +headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0' } +# 获取搜索结果页面的内容 +def get_search_page(search_url): + response = requests.get(search_url, headers=headers) + response.raise_for_status() # 确保请求成功 + return response.text + +# 提取页面中所有视频的链接 +def extract_video_links(page_content): + soup = BeautifulSoup(page_content, 'html.parser') + video_links = [] + # 选择器根据实际网页结构可能需要调整 + for a_tag in soup.select(".video-list.row div.bili-video-card > div > a"): + link = a_tag.get('href') + video_links.append(link) + + return video_links + +# 提取视频的BV号 +def extract__BV(video_url): + video_id_match = re.search(r'/video/([^/]+)', video_url) + if video_id_match: + return video_id_match.group(1) + return None + +def get_cid_from_bv(bv_id): + # 视频详情 API 地址 + video_url = f'https://api.bilibili.com/x/web-interface/view?bvid={bv_id}' + + + # 发送请求 + response = requests.get(video_url, headers=headers) + response.raise_for_status() + data = response.json() + + # 提取 cid + if data.get('code') == 0: + cid = data.get('data', {}).get('cid') + return cid + else: + return None +def get_danmu(id): + video_url = f'https://api.bilibili.com/x/v1/dm/list.so?oid={id}' + response = requests.get(video_url, headers=headers) #要爬取的网址 + response.encoding='utf-8' #编码方式 + html = response.text + soup = BeautifulSoup(html) #使用beautifulsoup库快速查找我们想要的信息 + all_txt = soup.findAll("d") #寻找到所有包含d的行 + txt=[all_txts.attrs ["p"]for all_txts in all_txt] #寻找到所有包含d的行中属性为p的值,这里边包含了弹幕的虚拟id等 + txtss=[all_txts.string for all_txts in all_txt] #寻找到所有包含d的行中的字符串数据,即弹幕内容 + txtsss=[txts.replace(' ','') for txts in txtss] #将字符串中的空格消除掉 + + return(txtsss) ###打印便可看见一条条弹幕的属性和内容了。 + +# 主函数 +def main(search_url): + page_content = get_search_page(search_url) + video_links = extract_video_links(page_content) + bvs = [] + for link in video_links: + bv = extract__BV(link) + if bv: + bvs.append(bv) + cids = [] + for bv in bvs: + cid = get_cid_from_bv(bv) + cids.append(cid) + return(cids) + +# 示例搜索页 URL(需要替换为实际的搜索页 URL) +search_url = 'https://search.bilibili.com/all?keyword=2024巴黎奥运会' +aa = main(search_url) +alltxt=[] +for id in aa: + txt = get_danmu(id) + alltxt=alltxt + txt +danmustr=''.join(i for i in alltxt) #将所有弹幕拼接在一起 +words=list(jieba.cut(danmustr)) ###利用jieba库将弹幕按词进行切分 +words=[i for i in words if len(i)>1] ###挑出长度大于1的词语(为去除诸如?,哈,啊等字符) +wc=wordcloud.WordCloud(height=1000,width=1000,font_path='simsun.ttc')#利用wordcloud库定义词云图片的信息 +wc.generate(' '.join(words)) ##生成图片 +print(wc) +plt.imshow(wc) +plt.show() +