|
|
|
|
import requests
|
|
|
|
|
from bs4 import BeautifulSoup
|
|
|
|
|
import re
|
|
|
|
|
import time
|
|
|
|
|
import random
|
|
|
|
|
import jieba # 结巴分词 pip install jieba
|
|
|
|
|
import wordcloud # 词云图 pip install wordcloud
|
|
|
|
|
import imageio # 读取本地图片 修改词云图形
|
|
|
|
|
import matplotlib.pyplot as plt
|
|
|
|
|
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0' }
|
|
|
|
|
# 获取搜索结果页面的内容
|
|
|
|
|
def get_search_page(search_url):
|
|
|
|
|
response = requests.get(search_url, headers=headers)
|
|
|
|
|
response.raise_for_status() # 确保请求成功
|
|
|
|
|
return response.text
|
|
|
|
|
|
|
|
|
|
# 提取页面中所有视频的链接
|
|
|
|
|
def extract_video_links(page_content):
|
|
|
|
|
soup = BeautifulSoup(page_content, 'html.parser')
|
|
|
|
|
video_links = []
|
|
|
|
|
# 选择器根据实际网页结构可能需要调整
|
|
|
|
|
for a_tag in soup.select(".video-list.row div.bili-video-card > div > a"):
|
|
|
|
|
link = a_tag.get('href')
|
|
|
|
|
video_links.append(link)
|
|
|
|
|
|
|
|
|
|
return video_links
|
|
|
|
|
|
|
|
|
|
# 提取视频的BV号
|
|
|
|
|
def extract__BV(video_url):
|
|
|
|
|
video_id_match = re.search(r'/video/([^/]+)', video_url)
|
|
|
|
|
if video_id_match:
|
|
|
|
|
return video_id_match.group(1)
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
def get_cid_from_bv(bv_id):
|
|
|
|
|
# 视频详情 API 地址
|
|
|
|
|
video_url = f'https://api.bilibili.com/x/web-interface/view?bvid={bv_id}'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 发送请求
|
|
|
|
|
response = requests.get(video_url, headers=headers)
|
|
|
|
|
response.raise_for_status()
|
|
|
|
|
data = response.json()
|
|
|
|
|
|
|
|
|
|
# 提取 cid
|
|
|
|
|
if data.get('code') == 0:
|
|
|
|
|
cid = data.get('data', {}).get('cid')
|
|
|
|
|
return cid
|
|
|
|
|
else:
|
|
|
|
|
return None
|
|
|
|
|
def get_danmu(id):
|
|
|
|
|
video_url = f'https://api.bilibili.com/x/v1/dm/list.so?oid={id}'
|
|
|
|
|
response = requests.get(video_url, headers=headers) #要爬取的网址
|
|
|
|
|
response.encoding='utf-8' #编码方式
|
|
|
|
|
html = response.text
|
|
|
|
|
soup = BeautifulSoup(html) #使用beautifulsoup库快速查找我们想要的信息
|
|
|
|
|
all_txt = soup.findAll("d") #寻找到所有包含d的行
|
|
|
|
|
txt=[all_txts.attrs ["p"]for all_txts in all_txt] #寻找到所有包含d的行中属性为p的值,这里边包含了弹幕的虚拟id等
|
|
|
|
|
txtss=[all_txts.string for all_txts in all_txt] #寻找到所有包含d的行中的字符串数据,即弹幕内容
|
|
|
|
|
txtsss=[txts.replace(' ','') for txts in txtss] #将字符串中的空格消除掉
|
|
|
|
|
|
|
|
|
|
return(txtsss) ###打印便可看见一条条弹幕的属性和内容了。
|
|
|
|
|
def page(url,num):
|
|
|
|
|
num=num+1
|
|
|
|
|
url=f'https://search.bilibili.com/video?keyword=2024巴黎奥运会&page={num}'
|
|
|
|
|
return url
|
|
|
|
|
|
|
|
|
|
# 主函数
|
|
|
|
|
def main(search_url):
|
|
|
|
|
page_content = get_search_page(search_url)
|
|
|
|
|
video_links = extract_video_links(page_content)
|
|
|
|
|
bvs = []
|
|
|
|
|
for link in video_links:
|
|
|
|
|
bv = extract__BV(link)
|
|
|
|
|
if bv:
|
|
|
|
|
bvs.append(bv)
|
|
|
|
|
cids = []
|
|
|
|
|
for bv in bvs:
|
|
|
|
|
cid = get_cid_from_bv(bv)
|
|
|
|
|
cids.append(cid)
|
|
|
|
|
return(cids)
|
|
|
|
|
|
|
|
|
|
# 示例搜索页 URL(需要替换为实际的搜索页 URL)
|
|
|
|
|
search_url = 'https://search.bilibili.com/video?keyword=2024巴黎奥运会'
|
|
|
|
|
alltxt=[]
|
|
|
|
|
|
|
|
|
|
for i in range(10):
|
|
|
|
|
|
|
|
|
|
aa = main(page(search_url,i))
|
|
|
|
|
for id in aa:
|
|
|
|
|
txt = get_danmu(id)
|
|
|
|
|
alltxt=alltxt + txt
|
|
|
|
|
time.sleep(random.randint(0,3)+random.random())
|
|
|
|
|
|
|
|
|
|
danmustr=''.join(i for i in alltxt) #将所有弹幕拼接在一起
|
|
|
|
|
words=list(jieba.cut(danmustr)) ###利用jieba库将弹幕按词进行切分
|
|
|
|
|
words=[i for i in words if len(i)>1] ###挑出长度大于1的词语(为去除诸如?,哈,啊等字符)
|
|
|
|
|
wc=wordcloud.WordCloud(height=1000,width=1000,font_path='simsun.ttc')#利用wordcloud库定义词云图片的信息
|
|
|
|
|
wc.generate(' '.join(words)) ##生成图片
|
|
|
|
|
print(wc)
|
|
|
|
|
plt.imshow(wc)
|
|
|
|
|
plt.show()
|
|
|
|
|
|