lin 6 months ago
parent 488642136c
commit 985d3c5ec0

Binary file not shown.

@ -0,0 +1,3 @@
# 默认忽略的文件
/shelf/
/workspace.xml

@ -0,0 +1,6 @@
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>

@ -0,0 +1,4 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.11 (虚拟环境测试) (2)" project-jdk-type="Python SDK" />
</project>

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/豆瓣电影.iml" filepath="$PROJECT_DIR$/.idea/豆瓣电影.iml" />
</modules>
</component>
</project>

@ -0,0 +1,10 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$">
<excludeFolder url="file://$MODULE_DIR$/venv" />
</content>
<orderEntry type="jdk" jdkName="Python 3.11 (虚拟环境测试) (2)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>

File diff suppressed because one or more lines are too long

@ -0,0 +1,54 @@
-- 创建数据库 dbmovie如果数据库不存在
CREATE DATABASE IF NOT EXISTS dbmovie
CHARACTER SET utf8mb4
COLLATE utf8mb4_unicode_ci;
-- 选择数据库 dbmovie
USE dbmovie;
-- 创建 movie 表
DROP TABLE IF EXISTS `movie`;
CREATE TABLE `movie` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`directors` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL,
`rate` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL,
`title` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL,
`casts` text CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL,
`cover` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL,
`detailLink` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL,
`year` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL,
`types` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL,
`country` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL,
`lang` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL,
`time` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL,
`movieTime` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL,
`comment_len` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL,
`starts` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL,
`summary` text CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL,
`comments` text CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL,
`imgList` text CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL,
`movieUrl` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE = InnoDB
AUTO_INCREMENT = 1
CHARACTER SET = utf8mb4
COLLATE = utf8mb4_unicode_ci
ROW_FORMAT = Dynamic;
-- 创建 user 表
DROP TABLE IF EXISTS `user`;
CREATE TABLE `user` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`email` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL,
`password` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NULL DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE = InnoDB
AUTO_INCREMENT = 1
CHARACTER SET = utf8mb4
COLLATE = utf8mb4_unicode_ci
ROW_FORMAT = Dynamic;
-- 插入数据到 user 表
INSERT INTO `user` (`email`, `password`) VALUES ('1453641651@qq.com', '123456');
INSERT INTO `user` (`email`, `password`) VALUES ('123@qq.com', '123456');
INSERT INTO `user` (`email`, `password`) VALUES ('1234@qq.com', '123456');

@ -0,0 +1,62 @@
from selenium import webdriver
from bs4 import BeautifulSoup
from selenium.webdriver.edge.options import Options
import time
import json
# 设置 Edge 以无头模式运行
options = Options()
options.add_argument("--headless")
options.add_argument("--disable-gpu")
# 实例化浏览器
driver = webdriver.Edge(options=options)
# driver = webdriver.Edge()
ip_list = []
ip_port_dict = {}
proxies = []
a = 0
for i in range(1, 10):
# 打开网页
driver.get(f'https://www.kuaidaili.com/free/inha/{i}/')
# 获取页面源代码
html = driver.page_source
# 使用BeautifulSoup解析HTML内容
soup = BeautifulSoup(html, 'lxml')
all_l = soup.select('tbody>tr')
for all_2 in all_l:
# 确保tr标签内确实有td标签
if len(all_2.select('td')) >= 2:
a += 1
ip_l = all_2.select('td')[0].text.strip()
port_l = all_2.select('td')[1].text.strip()
# 分别添加到列表和字典中
ip_list.append(ip_l)
ip_port_dict[ip_l] = port_l
print(f'IP代理池正在加载第:{a}')
time.sleep(0.4)
print('IP代理池加载完成')
# 构建proxies列表
for ip in ip_list:
proxies.append({'http': f'http://{ip}:{ip_port_dict[ip]}'})
# 写入JSON文件
with open('ip代理池.json', 'w', encoding='utf-8') as f:
json.dump(proxies, f, ensure_ascii=False, indent=4)
print("写入文件完成ip代理池.json")
# 写入txt文件
# with open('ip代理池.txt', 'w', encoding='UTF-8') as f:
# for i in proxies:
# f.write(str(i) + ',' + "\n")
# print("写入文件完成ip代理池.txt")
driver.quit()
# time.sleep(999)

@ -0,0 +1,326 @@
[
{
"http": "http://114.103.81.14:8089"
},
{
"http": "http://61.158.175.38:9002"
},
{
"http": "http://118.178.121.23:8888"
},
{
"http": "http://113.208.119.142:9002"
},
{
"http": "http://220.178.4.2:8080"
},
{
"http": "http://220.248.70.237:9002"
},
{
"http": "http://114.106.146.233:8089"
},
{
"http": "http://61.191.104.206:8060"
},
{
"http": "http://125.77.25.177:8080"
},
{
"http": "http://112.194.88.13:41122"
},
{
"http": "http://203.57.255.139:3128"
},
{
"http": "http://220.248.70.237:9002"
},
{
"http": "http://114.106.170.27:8089"
},
{
"http": "http://58.20.248.139:9002"
},
{
"http": "http://112.51.96.118:9091"
},
{
"http": "http://47.242.47.64:8888"
},
{
"http": "http://112.194.91.193:41122"
},
{
"http": "http://125.77.25.178:8080"
},
{
"http": "http://203.19.38.114:1080"
},
{
"http": "http://153.101.67.170:9002"
},
{
"http": "http://222.174.178.122:4999"
},
{
"http": "http://60.12.168.114:9002"
},
{
"http": "http://116.63.129.202:6000"
},
{
"http": "http://114.106.135.165:8089"
},
{
"http": "http://112.194.91.49:41122"
},
{
"http": "http://112.194.91.152:41122"
},
{
"http": "http://58.20.248.139:9002"
},
{
"http": "http://114.103.80.199:8089"
},
{
"http": "http://125.77.25.178:8080"
},
{
"http": "http://111.3.102.207:30001"
},
{
"http": "http://8.213.151.128:3128"
},
{
"http": "http://112.51.96.118:9091"
},
{
"http": "http://8.213.151.128:3128"
},
{
"http": "http://183.165.248.4:8089"
},
{
"http": "http://154.85.58.149:80"
},
{
"http": "http://8.213.151.128:3128"
},
{
"http": "http://112.194.90.220:41122"
},
{
"http": "http://114.103.80.42:8089"
},
{
"http": "http://120.197.40.219:9002"
},
{
"http": "http://202.131.65.110:80"
},
{
"http": "http://112.194.93.67:41122"
},
{
"http": "http://112.194.90.60:41122"
},
{
"http": "http://112.194.91.210:41122"
},
{
"http": "http://114.106.172.69:8089"
},
{
"http": "http://183.165.247.55:8089"
},
{
"http": "http://223.113.80.158:9091"
},
{
"http": "http://183.165.248.143:8089"
},
{
"http": "http://120.37.121.209:9091"
},
{
"http": "http://223.113.80.158:9091"
},
{
"http": "http://111.26.177.28:9091"
},
{
"http": "http://120.197.40.219:9002"
},
{
"http": "http://47.243.92.199:3128"
},
{
"http": "http://60.174.1.129:8089"
},
{
"http": "http://103.6.177.174:8002"
},
{
"http": "http://1.194.236.229:5005"
},
{
"http": "http://112.194.91.222:41122"
},
{
"http": "http://122.114.232.137:808"
},
{
"http": "http://112.194.90.93:41122"
},
{
"http": "http://122.116.150.2:9000"
},
{
"http": "http://58.20.248.139:9002"
},
{
"http": "http://103.118.44.176:8080"
},
{
"http": "http://123.126.158.50:80"
},
{
"http": "http://58.20.248.139:9002"
},
{
"http": "http://103.118.46.61:8080"
},
{
"http": "http://8.219.97.248:80"
},
{
"http": "http://114.106.170.80:8089"
},
{
"http": "http://36.133.183.200:7890"
},
{
"http": "http://114.106.172.253:8089"
},
{
"http": "http://103.6.177.174:8002"
},
{
"http": "http://8.219.97.248:80"
},
{
"http": "http://112.194.92.119:41122"
},
{
"http": "http://39.129.73.6:443"
},
{
"http": "http://114.106.173.45:8089"
},
{
"http": "http://111.26.177.28:9091"
},
{
"http": "http://114.106.146.225:8089"
},
{
"http": "http://117.70.49.235:8089"
},
{
"http": "http://122.116.125.115:8888"
},
{
"http": "http://60.12.168.114:9002"
},
{
"http": "http://183.215.23.242:9091"
},
{
"http": "http://183.165.250.253:8089"
},
{
"http": "http://153.101.67.170:9002"
},
{
"http": "http://47.106.112.207:8081"
},
{
"http": "http://114.103.88.147:8089"
},
{
"http": "http://203.19.38.114:1080"
},
{
"http": "http://121.40.110.105:6080"
},
{
"http": "http://183.215.23.242:9091"
},
{
"http": "http://114.106.147.43:8089"
},
{
"http": "http://111.59.4.88:9002"
},
{
"http": "http://183.165.245.16:8089"
},
{
"http": "http://60.188.102.225:18080"
},
{
"http": "http://183.234.215.11:8443"
},
{
"http": "http://116.63.129.202:6000"
},
{
"http": "http://114.106.170.110:8089"
},
{
"http": "http://122.116.125.115:8888"
},
{
"http": "http://121.238.205.43:7788"
},
{
"http": "http://114.103.88.131:8089"
},
{
"http": "http://1.162.9.226:80"
},
{
"http": "http://112.194.88.112:41122"
},
{
"http": "http://112.194.88.43:41122"
},
{
"http": "http://123.126.158.50:80"
},
{
"http": "http://202.131.65.110:80"
},
{
"http": "http://183.165.227.29:8089"
},
{
"http": "http://116.63.129.202:6000"
},
{
"http": "http://58.20.248.139:9002"
},
{
"http": "http://60.174.1.156:8089"
},
{
"http": "http://223.247.47.75:8089"
},
{
"http": "http://112.194.93.96:41122"
},
{
"http": "http://58.20.248.139:9002"
}
]

@ -0,0 +1,534 @@
import csv
import json
import os
import re
import pandas as pd
from pymysql import *
import requests
from fake_useragent import UserAgent
from lxml import etree
from sqlalchemy import create_engine
import 优化ip代理池 as Optimize
engine = create_engine('mysql+pymysql://root:1234@localhost:3306/dbmovie')
class spider(object):
def __init__(self):
self.name = '日本'
# 读取Excel文件 获取url
file_path = f'豆瓣电影网站/{self.name}/{self.name}电影网站(整合后).xlsx'
self.df = pd.read_excel(file_path, engine='openpyxl')
# URL数据在'电影网站'这一列中
self.urls = self.df['电影网站'].tolist() # 转换为列表,方便遍历
# 定义请求头
# self.headers()
self.proxies = [
{'http': 'http://180.121.130.208:8089'},
{'http': 'http://114.106.147.14:8089'},
{'http': 'http://117.86.9.250:8089'},
{'http': 'http://114.231.45.235:8089'},
{'http': 'http://114.132.202.80:8080'},
# 如果有HTTPS代理也可以这样添加
# {'https': 'https://example.com:port'},
]
# 返回测试成功的随机ip代理
# self.success_proxies = Optimize.startup()
def request_headers(self):
# 生成一个模拟随机浏览器的User-Agent
UserAgents = UserAgent().random
# 定义请求头
headers = {
"User-Agent": f"{UserAgents}",
# "Cookie": 'll="118313"; bid=QVNFc5M31Ds; _pk_id.100001.4cf6=b91d85f3dfe7a18f.1708781347.; _vwo_uuid_v2=D41D2EAD2A7C867B2EF7CAA05192E9D9B|0d7f976cadeba2dd51716ba2b90223b7; viewed="1866298_20396037"; __utmz=223695111.1711160671.9.2.utmcsr=search.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/movie/subject_search; push_noty_num=0; push_doumail_num=0; __utmz=30149280.1711182158.14.5.utmcsr=help.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; __utmv=30149280.27921; douban-fav-remind=1; dbcl2="279216488:y4XIrGbz4fQ"; ck=rkAR; frodotk_db="2e17d8cc08294f6a8a478a64187bee3e"; ap_v=0,6.0; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1712647865%2C%22https%3A%2F%2Fsearch.douban.com%2Fmovie%2Fsubject_search%3Fsearch_text%3D%E5%8A%A8%E6%BC%AB%26cat%3D1002%26start%3D60%22%5D; _pk_ses.100001.4cf6=1; __utma=30149280.1472987265.1708781347.1712582234.1712647866.102; __utmb=30149280.0.10.1712647866; __utmc=30149280; __utma=223695111.718378847.1708781347.1712582234.1712647866.100; __utmb=223695111.0.10.1712647866; __utmc=223695111'
}
return headers
def init(self):
if not os.path.exists('./tempData.csv'):
with open('./tempData.csv', 'w', newline='') as writer_f:
writer = csv.writer(writer_f)
writer.writerow(
['directors', 'rate', 'title', 'casts', 'cover', 'detailLink', 'year', 'types', 'country', 'lang',
'time', 'movieTime', 'comment_len', 'starts', 'summary', 'comments', 'imgList', 'movieUrl']
)
# writer.writerow(
# [
# '电影导演', '电影评分', '电影名字', '电影演员', '电影封面', '电影详情链接', '电影年份',
# '电影类型',
# '电影国家', '电影语言', '电影上映时间', '电影片长', '短评个数', '电影星级',
# '电影信息介绍', '电影短评(短评用户,短评评分,评论时间,评论内容)', '图片列表', '预告片链接'
# ]
# )
if not os.path.exists('./spiderPage.txt'):
with open('./spiderPage.txt', 'w', encoding="utf-8") as f:
f.write('0\r')
try:
conn = connect(host='localhost', user='root', password='1234', database='dbmovie', port=3306,
charset='utf8mb4')
sql = '''
create table movie (
id int primary key auto_increment,
directors VARCHAR(255),
rate VARCHAR(255),
title VARCHAR(255),
casts text,
cover VARCHAR(255),
detailLink VARCHAR(255),
year VARCHAR(255),
types VARCHAR(255),
country VARCHAR(255),
lang VARCHAR(255),
time VARCHAR(255),
movieTime VARCHAR(255),
comment_len VARCHAR(255),
starts VARCHAR(255),
summary text,
comments text,
imgList text,
movieUrl VARCHAR(255)
)
'''
cursor = conn.cursor()
cursor.execute(sql)
conn.commit()
except:
pass
def get_page(self):
try:
with open('./spiderPage.txt', 'r') as r_f:
return int(r_f.read().strip())
except FileNotFoundError:
return 0 # 如果没有文件返回0作为起始索引
def set_page(self, newPage):
with open('./spiderPage.txt', 'w') as a_f:
a_f.write(str(newPage))
def spiderMain(self):
def xpathelement(xpath, value=None):
text_xpath = tree.xpath(xpath)
# 如果没有提供 value 值
if not value:
# 获取内容写法[0].text
text = text_xpath[0].text
# print(text)
return text
else:
if value:
if isinstance(value, str): # 检查 value 是否为字符串类型
# 假设 value 是属性名,并且我们想要获取该属性的值
# print("进来了")
xpath = xpath + '/@{}'.format(value)
# print(xpath)
text_xpath = tree.xpath(xpath)[0]
return text_xpath
else:
return text_xpath
max_retries = 99999 # 设置最大重试次数
retries = 0
while retries < max_retries:
start_page = self.get_page() + 1 # 获取上次爬取的页面索引并加1作为本次开始的索引
if start_page > len(self.urls):
print("所有网站已爬取完毕!")
return
print(f'正在从第{start_page}个网站开始爬取')
try:
# 从上次爬取的位置开始继续爬取
for index, url in enumerate(self.urls[start_page - 1:], start=start_page):
headers = self.request_headers()
success = Optimize.startup()
# proxies = random.choice(self.proxies) # 从代理列表中随机选择一个代理
resultData = []
# url = 'https://movie.douban.com/subject/36868913/'
print(url)
# 在每次请求之后添加1秒的延迟
# time.sleep(1)
# 发送请求
response = requests.get(url=url, headers=headers, proxies=success)
# 获取网站返回文本
context = response.text
# 使用 lxml 的 etree 解析 HTML
tree = etree.HTML(context)
if response.status_code == 200:
# 处理成功的响应
# 电影导演
directors = tree.xpath('//*[@id="info"]//a[@rel="v:directedBy"]')
if directors:
directors_list = []
for i in directors:
directors_list.append(i.text)
resultData.append(','.join(directors_list))
else:
resultData.append(0)
# 电影评分
rate = tree.xpath('//*[@id="interest_sectl"]/div/div[2]/strong')
if rate and rate[0].text:
rate_txt = rate[0].text.strip() # 使用strip()去除可能的空白字符
resultData.append(rate_txt)
else:
resultData.append(0)
# 电影名字//*[@id="content"]/h1/span[1]
title = tree.xpath('//*[@id="content"]/h1//span[@property="v:itemreviewed"]')
title_txt = title[0].text
print(title_txt)
resultData.append(title_txt)
# 电影演员
# 查找包含目标span元素的父级span//*[@id="info"]/span[2]/span[2]
actors_links = tree.xpath('//*[@id="info"]//a[@rel="v:starring"]')
# print(actors_links)
if actors_links:
# 初始化一个空列表来保存演员的名字
actors = []
# 遍历每个链接元素
for link in actors_links:
# 获取链接的文本内容
text = link.text
# 打印链接元素和它的text属性
# print(f"Link element: {link}")
# print(f"Text content: {text}")
# 检查text是否为None
if text is not None:
# 使用strip()方法去除文本两侧的空白字符
trimmed_text = text.strip()
# 检查去除空白后的文本是否为空
if trimmed_text:
# 如果不为空,则将其添加到演员列表中
actors.append(trimmed_text)
else:
# 处理text为None的情况
continue
resultData.append(','.join(actors))
else:
resultData.append(0)
# 电影封面
# cover_img_src = tree.xpath('//*[@id="mainpic"]/a/img/@src')[0]
cover_img_src = tree.xpath('//*[@id="mainpic"]/a/img/@src')
if cover_img_src:
resultData.append(cover_img_src[0])
else:
resultData.append(0)
# 电影详情链接
detailLink = url
resultData.append(detailLink)
# 电影年份
# year = tree.xpath('//*[@id="content"]/h1/span[2]/text()')
# print(year)
year = tree.xpath('//*[@id="content"]/h1/span[2]')
if year:
# 使用正则表达式去除所有括号
year_without_parentheses = re.search('\d+', year[0].text).group()
# year_without_parentheses = re.sub(r'\(|\)', '', year)
resultData.append(year_without_parentheses)
else:
resultData.append(0)
# 电影类型
# types = tree.xpath('//span[contains(@property,"v:genre")]')
# types = tree.xpath('//*[@id="info"]/span[contains(@property,"v:genre")]')
types = tree.xpath('//*[@id="info"]/span[@property="v:genre"]')
if types:
types_list = []
for i in types:
types_list.append(i.text)
resultData.append(','.join(types_list))
else:
resultData.append(0)
# 电影国家
textInfo = tree.xpath('//*[@id="info"]/text()')
texts = []
if textInfo:
for i in textInfo:
if i.strip() and not i.strip() == '/':
texts.append(i)
# print(texts) # 打印完整的texts列表检查原始数据
# 假设texts[0]包含国家信息
if len(texts) > 0: # 确保texts至少有两个元素
if texts[0]:
country_info = texts[0].split('/') # 分割国家信息
cleaned_countries = [i.strip() for i in country_info] # 去除每个国家名称的空格(尽管之前已经处理过)
resultData.append(','.join(cleaned_countries)) # 合并成一个字符串并添加到resultData中
else:
resultData.append(0)
else:
resultData.append(0)
else:
resultData.append(0)
# 电影语言
if textInfo:
if len(texts) > 1: # 确保texts至少有两个元素
if texts[1]:
lang_info = texts[1].split(sep='/') # 分割语言信息
cleaned_lang = [i.strip() for i in lang_info] # 去除每个语言的空格(尽管之前已经处理过)
resultData.append(','.join(cleaned_lang)) # 合并成一个字符串并添加到resultData中
else:
resultData.append(0)
else:
resultData.append(0)
else:
resultData.append(0)
# 电影上映时间
time_movie = tree.xpath('//*[@id="info"]/span[@property = "v:initialReleaseDate"]')
if time_movie:
time_list = []
for i in time_movie:
time_list.append(i.text)
# time_one = time_list[0][:10]
# time_one = re.search('\d+', time_list[0]).group()
# 去除括号内的内容
time_str = re.sub(r'\(.*?\)', '', time_list[0])
# 使用正则表达式匹配 YYYY-MM-DD, YYYY-M-DD, 和 YYYY-M-D 格式的日期
match = re.search(r'\b\d{4}-(?:\d{2}|\d)-(?:\d{2}|\d)\b', time_str)
if match:
resultData.append(match.group())
else:
# 如果没有日期格式尝试直接匹配4位数字作为年份
match = re.search(r'\d{4}', time_str)
if match:
resultData.append(match.group())
else:
# 如果没有找到匹配项返回0
resultData.append(0)
else:
resultData.append(0)
# 电影片长
movieTime = tree.xpath('//*[@id="info"]/span[@property="v:runtime"]/@content')
if movieTime:
resultData.append(movieTime[0])
else:
resultData.append(0)
# 短评个数
comment_len = tree.xpath('//*[@id="comments-section"]/div[1]/h2/span/a')
if comment_len:
comment_len_txt = re.search('\d+', comment_len[0].text).group()
resultData.append(comment_len_txt)
else:
resultData.append(0)
# 电影星级占比//*[@id="interest_sectl"]/div/div[3]/div[1]/span[2]
starts = tree.xpath('//*[@id="interest_sectl"]/div/div[3]/div[@class="item"]')
# starts = tree.xpath('//*[@id="interest_sectl"]//div[@class="ratings-on-weight"]/div[@class="item"]')
# starts = tree.xpath('//div[@id="interest_sectl"]//div[@class="ratings-on-weight"]/div[@class="item"]')
if starts:
starts_list = []
for i in starts:
# span_html = etree.tostring(i, pretty_print=True, encoding='unicode')
# # 打印或处理单个span元素的HTML
# print(span_html)
# 类名以"rating_per"开头的span元素
# span_tag = i.xpath('.//span[@class="rating_per"]/text()')
# span_tag = i.xpath('.//span[@class="rating_per"]')[0].text
span_tag = i.xpath('.//span[2]')[0].text
starts_list.append(span_tag)
resultData.append(','.join(starts_list))
else:
resultData.append(0)
# 电影简介
summary = tree.xpath('//*[@id="link-report-intra"]/span/text()')
if summary:
summary_str = ''
for i in summary:
summary_str += i.strip()
# print(i.strip())
resultData.append(summary_str)
else:
resultData.append(0)
# 电影短评(短评用户,短评评分,评论时间,评论内容)
comments = tree.xpath('//*[@id="hot-comments"]/div')
if comments:
comments_list = []
for i in comments:
# 用户
user = i.xpath('.//h3/span[2]/a')[0].text
# 评分
start_classes = i.xpath('.//h3/span[2]/span')
if len(start_classes) == 4:
# 获取class属性值列表
class_attributes = i.xpath('.//h3/span[2]/span[2]/@class')
# 检查是否至少有一个class属性值并且它不是"comment-time-tip"
if class_attributes and class_attributes[0] != 'comment-time-tip':
# 尝试从class属性值中提取数字
match = re.search('\d+', class_attributes[0])
if match:
start = match.group()
else:
# 如果没有找到数字则设置start为默认值或进行其他处理
start = 0 # 或者其他合适的默认值
time_pl = i.xpath('.//h3/span[2]/span[3]/@title')[0]
else:
start = 0
# 时间
time_pl = i.xpath('.//h3/span[2]/span[2]/@title')[0]
# 内容
content = i.xpath('.//div/p/span')[0].text
comments_list.append({
'user': user,
'start': start,
'time': time_pl,
'content': content
})
resultData.append(json.dumps(comments_list))
# resultData.append(comments_list)
else:
resultData.append(0)
# # 图片列表
# imgList = tree.xpath('//*[@id="related-pic"]/ul//a/img/@src')
# if imgList:
# resultData.append(','.join(imgList))
# else:
# resultData.append(0)
# 初始化图片链接列表
img_urls = []
# 查找包含特定 class 的 <a> 标签
a_tags = tree.xpath('//*[@id="related-pic"]/ul//a[@class="related-pic-video"]')
# 提取第一个 video 图片链接
found_video_image = False
for a_tag in a_tags:
style_attr = a_tag.get('style')
if style_attr:
start_index = style_attr.find('url(') + 4
end_index = style_attr.find(')', start_index)
if start_index != -1 and end_index != -1:
img_url = style_attr[start_index:end_index]
img_urls.insert(0, img_url) # 将视频图片链接插入到列表的第一个位置
found_video_image = True
break # 找到后跳出循环
# 如果没有找到 video 图片链接,添加默认值
if not found_video_image:
img_urls.append('0') # 使用字符串 '0' 作为默认值,因为后面会用 join(',')
# 查找图片链接
imgList = tree.xpath('//*[@id="related-pic"]/ul//a/img/@src')
if imgList:
img_urls.extend(imgList) # 将其他图片链接添加到列表中
else:
img_urls.extend('0')
# 将 img_urls 列表转换为逗号分隔的字符串,并添加到 resultData
resultData.append(','.join(img_urls))
# 预告片链接
# movieUrl = tree.xpath('//*[@id="related-pic"]/ul//a[@class="related-pic-video"]/@href')[0]
# print(movieUrl, type(movieUrl))
result = tree.xpath('//*[@id="related-pic"]/ul//a[@class="related-pic-video"]/@href')
if result:
movieUrl = result[0]
resultData.append(movieUrl)
# for i in range(1, 99):
# # 获取视频链接
# # 请求网站
# response_sp = requests.get(url=movieUrl, headers=self.headers, proxies=success)
# # 检查请求是否成功
# # 获取网站返回文本
# context_sp = response_sp.text
# # 使用 lxml 的 etree 解析 HTML
# tree_sp = etree.HTML(context_sp)
# # 查找视频链接,并检查是否找到
# movie_sources = tree_sp.xpath('.//video/source/@src')
# if movie_sources:
# movie = movie_sources[0] # 获取列表中的第一个视频源
# resultData.append(movie)
# break
#
# else:
# print("有视频源链接,尝试获取中...")
else:
resultData.append(0)
# print("未找到相关视频预告片链接")
print(resultData)
if self.save_to_csv(resultData):
# 如果爬取成功,更新页面索引
self.set_page(index)
print(f'成功爬取第{index}个网站')
print()
except Exception as e:
# 捕获异常,并打印错误信息
print(f"爬取时遇到错误: {e}")
retries += 1 # 重试次数加1
if retries < max_retries:
print(f"正在重试({retries}/{max_retries}...")
print()
# time.sleep(1.333) # 等待一段时间再重试,避免过于频繁的请求
# else:
# print("已达到最大重试次数,程序退出")
# raise # 如果需要,可以抛出异常或退出程序
def save_to_csv(self, rowData):
with open(f'./tempData.csv', 'a', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow(rowData)
return True
def save_to_sql(self, df):
df.to_sql('movie', con=engine, index=False, if_exists='append')
print('导入数据完成~')
# df.to_sql('movie', con=engine, index=False, if_exists='append', method='multi')
def clear_csv(self):
df = pd.read_csv('./2024.csv')
df.dropna(inplace=True) # 删除缺失值
df.drop_duplicates(inplace=True) # 删除重复值,并使用 inplace=True 修改原始 DataFrame
self.save_to_sql(df)
def delete_csv_file(self):
# 文件路径这里假设tempData.csv在当前工作目录下
file_path = 'tempData.csv'
# 检查文件是否存在
if os.path.exists(file_path):
# 删除文件
try:
os.remove(file_path)
print(f"文件 {file_path} 已成功删除。")
except OSError as e:
print(f"删除文件 {file_path} 时发生错误: {e.strerror}")
else:
print(f"文件 {file_path} 不存在,无法删除。")
if __name__ == '__main__':
spiderObj = spider()
spiderObj.init()
spiderObj.spiderMain()
# spiderObj.clear_csv()

File diff suppressed because one or more lines are too long

@ -0,0 +1,108 @@
{'http': 'http://114.103.81.14:8089'}
{'http': 'http://61.158.175.38:9002'}
{'http': 'http://118.178.121.23:8888'}
{'http': 'http://113.208.119.142:9002'}
{'http': 'http://220.178.4.2:8080'}
{'http': 'http://220.248.70.237:9002'}
{'http': 'http://114.106.146.233:8089'}
{'http': 'http://61.191.104.206:8060'}
{'http': 'http://125.77.25.177:8080'}
{'http': 'http://112.194.88.13:41122'}
{'http': 'http://203.57.255.139:3128'}
{'http': 'http://220.248.70.237:9002'}
{'http': 'http://114.106.170.27:8089'}
{'http': 'http://58.20.248.139:9002'}
{'http': 'http://112.51.96.118:9091'}
{'http': 'http://47.242.47.64:8888'}
{'http': 'http://112.194.91.193:41122'}
{'http': 'http://125.77.25.178:8080'}
{'http': 'http://203.19.38.114:1080'}
{'http': 'http://153.101.67.170:9002'}
{'http': 'http://222.174.178.122:4999'}
{'http': 'http://60.12.168.114:9002'}
{'http': 'http://116.63.129.202:6000'}
{'http': 'http://114.106.135.165:8089'}
{'http': 'http://112.194.91.49:41122'}
{'http': 'http://112.194.91.152:41122'}
{'http': 'http://58.20.248.139:9002'}
{'http': 'http://114.103.80.199:8089'}
{'http': 'http://125.77.25.178:8080'}
{'http': 'http://111.3.102.207:30001'}
{'http': 'http://8.213.151.128:3128'}
{'http': 'http://112.51.96.118:9091'}
{'http': 'http://8.213.151.128:3128'}
{'http': 'http://183.165.248.4:8089'}
{'http': 'http://154.85.58.149:80'}
{'http': 'http://8.213.151.128:3128'}
{'http': 'http://112.194.90.220:41122'}
{'http': 'http://114.103.80.42:8089'}
{'http': 'http://120.197.40.219:9002'}
{'http': 'http://202.131.65.110:80'}
{'http': 'http://112.194.93.67:41122'}
{'http': 'http://112.194.90.60:41122'}
{'http': 'http://112.194.91.210:41122'}
{'http': 'http://114.106.172.69:8089'}
{'http': 'http://183.165.247.55:8089'}
{'http': 'http://223.113.80.158:9091'}
{'http': 'http://183.165.248.143:8089'}
{'http': 'http://120.37.121.209:9091'}
{'http': 'http://223.113.80.158:9091'}
{'http': 'http://111.26.177.28:9091'}
{'http': 'http://120.197.40.219:9002'}
{'http': 'http://47.243.92.199:3128'}
{'http': 'http://60.174.1.129:8089'}
{'http': 'http://103.6.177.174:8002'}
{'http': 'http://1.194.236.229:5005'}
{'http': 'http://112.194.91.222:41122'}
{'http': 'http://122.114.232.137:808'}
{'http': 'http://112.194.90.93:41122'}
{'http': 'http://122.116.150.2:9000'}
{'http': 'http://58.20.248.139:9002'}
{'http': 'http://103.118.44.176:8080'}
{'http': 'http://123.126.158.50:80'}
{'http': 'http://58.20.248.139:9002'}
{'http': 'http://103.118.46.61:8080'}
{'http': 'http://8.219.97.248:80'}
{'http': 'http://114.106.170.80:8089'}
{'http': 'http://36.133.183.200:7890'}
{'http': 'http://114.106.172.253:8089'}
{'http': 'http://103.6.177.174:8002'}
{'http': 'http://8.219.97.248:80'}
{'http': 'http://112.194.92.119:41122'}
{'http': 'http://39.129.73.6:443'}
{'http': 'http://114.106.173.45:8089'}
{'http': 'http://111.26.177.28:9091'}
{'http': 'http://114.106.146.225:8089'}
{'http': 'http://117.70.49.235:8089'}
{'http': 'http://122.116.125.115:8888'}
{'http': 'http://60.12.168.114:9002'}
{'http': 'http://183.215.23.242:9091'}
{'http': 'http://183.165.250.253:8089'}
{'http': 'http://153.101.67.170:9002'}
{'http': 'http://47.106.112.207:8081'}
{'http': 'http://114.103.88.147:8089'}
{'http': 'http://203.19.38.114:1080'}
{'http': 'http://121.40.110.105:6080'}
{'http': 'http://183.215.23.242:9091'}
{'http': 'http://114.106.147.43:8089'}
{'http': 'http://111.59.4.88:9002'}
{'http': 'http://183.165.245.16:8089'}
{'http': 'http://60.188.102.225:18080'}
{'http': 'http://183.234.215.11:8443'}
{'http': 'http://116.63.129.202:6000'}
{'http': 'http://114.106.170.110:8089'}
{'http': 'http://122.116.125.115:8888'}
{'http': 'http://121.238.205.43:7788'}
{'http': 'http://114.103.88.131:8089'}
{'http': 'http://1.162.9.226:80'}
{'http': 'http://112.194.88.112:41122'}
{'http': 'http://112.194.88.43:41122'}
{'http': 'http://123.126.158.50:80'}
{'http': 'http://202.131.65.110:80'}
{'http': 'http://183.165.227.29:8089'}
{'http': 'http://116.63.129.202:6000'}
{'http': 'http://58.20.248.139:9002'}
{'http': 'http://60.174.1.156:8089'}
{'http': 'http://223.247.47.75:8089'}
{'http': 'http://112.194.93.96:41122'}
{'http': 'http://58.20.248.139:9002'}

@ -0,0 +1,119 @@
import requests
import random
from concurrent.futures import ThreadPoolExecutor
import json
class Optimize:
def __init__(self):
self.proxies = [] # 初始代理列表
self.working_proxies = [] # 当前工作代理列表
self.proxy_cache_file = 'working_proxies.cache' # 代理缓存文件
# ip代理池
self.proxies = [
# {'http': 'http://203.74.125.18:8888'},
# {'http': 'http://39.165.0.137:9002'},
# 如果有HTTPS代理也可以这样添加
# {'https': 'https://example.com:port'},
]
# 加载ip代理池
self.load_proxy_pool()
# 测试成功的ip代理池
self.working_proxies = []
# 加载ip代理池
def load_proxy_pool(self):
with open('ip代理池.json', 'r', encoding='utf-8') as f:
content = json.load(f)
if content: # 检查内容是否不为空
for proxy_dict in content:
self.proxies.append(proxy_dict)
else:
print("ip代理池为空")
def load_working_proxies_from_cache(self):
# 从缓存文件中加载工作代理
try:
with open(self.proxy_cache_file, 'r') as f:
return [line.strip() for line in f.readlines()]
except FileNotFoundError:
return []
def save_working_proxies_to_cache(self, proxies):
# 将工作代理保存到缓存文件
with open(self.proxy_cache_file, 'w') as f:
for proxy in proxies:
f.write(f"{proxy}\n")
def test_proxy(self, proxy):
# 测试单个代理是否有效
# test_url = 'http://example.com'
test_url = 'https://www.baidu.com/'
try:
response = requests.get(url=test_url, proxies=proxy, timeout=5)
return response.status_code == 200
except requests.RequestException:
return False
def refresh_working_proxies(self):
# 刷新工作代理列表
with ThreadPoolExecutor(max_workers=20) as executor: # 使用线程池并行测试代理
futures = {executor.submit(self.test_proxy, proxy): proxy for proxy in self.proxies}
for future in futures:
if future.result():
self.working_proxies.append(futures[future])
# 保存代理到缓存
self.save_working_proxies_to_cache(self.working_proxies)
def get_random_working_proxy(self):
# 获取随机工作代理
if not self.working_proxies:
# 如果工作代理为空,尝试从缓存加载
self.working_proxies = self.load_working_proxies_from_cache()
if not self.working_proxies:
# 如果缓存也为空,则刷新代理列表
self.refresh_working_proxies()
if self.working_proxies:
return random.choice(self.working_proxies)
else:
print("没有找到有效的代理")
return None
def startup():
# 假设你已经有了一个Spider实例
optimize = Optimize()
# 调用refresh_working_proxies方法来刷新工作代理列表
optimize.refresh_working_proxies()
# 调用get_random_working_proxy方法来获取一个随机的工作代理
proxy = optimize.get_random_working_proxy()
if proxy:
print("获取到的代理是:", proxy)
return proxy
# 在这里使用代理进行你的网络请求
else:
print("没有可用的代理")
return None
if __name__ == '__main__':
# 假设你已经有了一个Spider实例
optimize = Optimize()
# 调用refresh_working_proxies方法来刷新工作代理列表
optimize.refresh_working_proxies()
# 调用get_random_working_proxy方法来获取一个随机的工作代理
proxy = optimize.get_random_working_proxy()
if proxy:
print("获取到的代理是:", proxy)
# 在这里使用代理进行你的网络请求
else:
print("没有可用的代理")

@ -0,0 +1,57 @@
import pandas as pd
import os
# 去重电影网站
def deduplication(folder_path, prefix_condition):
# 初始化一个空的DataFrame用于合并数据
merged_df = pd.DataFrame()
# 遍历文件夹中的所有文件
for filename in os.listdir(folder_path):
# 检查文件是否是Excel文件且文件名前两位是否符合条件
if filename.endswith('.xlsx') and filename[:len(prefix_condition)] == prefix_condition:
# 构建文件的完整路径
file_path = os.path.join(folder_path, filename)
# 读取Excel文件
df = pd.read_excel(file_path, sheet_name='Sheet1')
# 根据'电影网站'列删除重复行
df = df.drop_duplicates(subset='电影网站')
# 将读取的数据合并到merged_df中
merged_df = pd.concat([merged_df, df], ignore_index=True)
# 再次在合并后的DataFrame中删除重复项
merged_df = merged_df.drop_duplicates(subset='电影网站')
# 将合并后的数据写入新的Excel文件
merged_df.to_excel(f'豆瓣电影网站/{prefix_condition}/{prefix_condition}电影网站(整合后).xlsx', sheet_name='Sheet1',
index=False)
# merged_df.to_excel(f'豆瓣电影网站/全部电影(整合)/{prefix_condition}电影网站(整合后).xlsx', sheet_name='Sheet1',
# index=False)
print(f'整合完成文件为:{prefix_condition}电影网站(整合后).xlsx')
if __name__ == '__main__':
# 使用函数,传入文件夹路径和匹配条件
deduplication('.', '韩国')
# 单个文件去重复
# import pandas as pd # 导入pandas库并用pd作为别名用于数据处理和分析
# from openpyxl import load_workbook # 导入openpyxl库的load_workbook函数用于处理Excel文件
#
# # 使用pandas的read_excel函数读取名为'新闻.xlsx'的Excel文件并选择名为'Sheet1'的工作表
# df = pd.read_excel('新闻.xlsx', sheet_name='Sheet1')
#
# # 打印DataFrame df的列名用于确认列标题和后续操作
# print(df.columns)
#
# # 根据'新闻链接'这一列的值删除重复的行,保留唯一的'新闻链接'
# df = df.drop_duplicates(subset='新闻链接')
#
# # 将处理后的DataFrame df写入名为'详情页网址.xlsx'的Excel文件并选择名为'Sheet1'的工作表
# # index=False表示在写入时不包含DataFrame的索引列
# df.to_excel('详情页网址.xlsx', sheet_name='Sheet1', index=False)

File diff suppressed because one or more lines are too long

@ -0,0 +1,43 @@
# ip代理.py
这段代码是一个Python脚本它使用Selenium WebDriver和BeautifulSoup库来自动化爬取网页上的免费代理IP地址并将爬取到的IP地址及其端口保存到JSON和TXT文件中。以下是该脚本实现的主要步骤和方法
1. **设置无头模式**:使用`Options`类设置Edge浏览器以无头模式运行这样浏览器就不会显示图形界面。
2. **实例化WebDriver**创建一个Edge浏览器实例用于后续的网页操作。
3. **初始化变量**初始化了用于存储IP地址、端口和代理的列表和字典。
4. **循环爬取**:使用`for`循环和`range`函数遍历不同的子页面(这里假设是分页),并对每个页面进行操作。
5. **获取页面源代码**:使用`driver.page_source`获取当前页面的HTML源代码。
6. **解析HTML**使用BeautifulSoup解析HTML内容以便提取所需的数据。
7. **提取IP和端口**通过CSS选择器定位到包含IP和端口的`td`标签,并提取文本。
8. **添加到列表和字典**将提取到的IP和端口分别添加到列表和字典中。
9. **打印进度**:打印出当前加载的代理数量,以显示进度。
10. **构建代理列表**遍历IP列表为每个IP构建一个包含HTTP代理的字典并添加到代理列表中。
11. **写入JSON文件**:使用`json.dump`将代理列表以格式化的JSON格式写入文件。
12. **写入TXT文件**:打开一个文本文件,将每个代理以字符串形式写入,并在每个代理后添加逗号和换行符。
13. **关闭WebDriver**:使用`driver.quit()`关闭浏览器实例,释放资源。
14. **延时**`time.sleep(999)`会使程序暂停999秒但由于`driver.quit()`已经关闭了浏览器,所以这一行实际上不会执行。
# spiders.py
这段代码是一个使用Python编写的自动化脚本它使用Selenium WebDriver和BeautifulSoup库来自动化Web浏览器操作目的是从豆瓣电影网站爬取特定条件地区和年代下的电影链接并将这些链接保存到Excel文件中。以下是该脚本实现的主要步骤
1. **初始化数据库连接** (`create_engine`): 使用 SQLAlchemy 创建数据库连接,这里连接到一个名为 `dbmovie` 的MySQL数据库。
2. **spider 类**: 定义了一个爬虫对象,包含多个方法用于爬取和处理电影数据。
3. **初始化方法 `__init__`**: 初始化时读取一个Excel文件来获取要爬取的电影网站URL列表并设置请求头和代理。
4. **请求头 `request_headers`**: 生成随机的User-Agent来模拟浏览器请求避免被网站识别为爬虫。
5. **初始化数据库 `init`**: 检查是否存在用于存储临时数据的CSV文件和记录爬取进度的文本文件如果不存在则创建它们。同时尝试创建一个数据库表 `movie` 来存储爬取的数据。
6. **获取和设置页面索引 `get_page``set_page`**: 用于记录爬虫当前爬取到的页面索引,以便下次可以从上次停止的地方继续爬取。
7. **主爬虫方法 `spiderMain`**: 这是执行爬取的核心方法。它循环遍历URL列表对每个URL发送HTTP请求并解析返回的页面内容。
8. **解析页面内容**: 使用 `lxml.etree` 解析HTML文档提取电影的导演、评分、名称、演员、封面、详情链接、年份、类型、国家、语言、上映时间、片长、短评个数、星级占比、简介和短评等信息。
9. **保存到CSV `save_to_csv`**: 将爬取的数据追加到CSV文件中。
10. **保存到SQL数据库 `save_to_sql`**: 将CSV文件中的数据导入到数据库中。
11. **清除CSV文件 `clear_csv`**: 删除CSV文件中的缺失值和重复值然后保存到一个新的CSV文件中并导入到数据库。
12. **删除CSV文件 `delete_csv_file`**: 删除用于临时存储爬取数据的CSV文件。
13. **代理池优化 `Optimize`**: 一个导入的模块用于管理和优化IP代理池但具体的实现细节没有在代码中给出。
14. **异常处理**: 在 `spiderMain` 方法中,使用 `try-except` 块来捕获和处理可能出现的异常,例如网络错误或解析错误。
15. **重试机制**: 如果在爬取过程中遇到错误,程序会重试,最多重试 `max_retries` 次。
16. ***\*main\** 块**: 如果这个脚本作为主程序运行,它将创建一个 `spider` 对象,初始化数据库,并开始爬取过程。

@ -0,0 +1,187 @@
from selenium import webdriver
from bs4 import BeautifulSoup
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
import pandas as pd
import os
import 删除重复项 as remove
from selenium.webdriver.edge.options import Options
# 创建Edge浏览器实例
driver = webdriver.Edge()
# 豆瓣登录页URL
login_url = "https://accounts.douban.com/passport/login?redir=https%3A%2F%2Fmovie.douban.com%2Fexplore"
# 打开登录页面
driver.get(login_url)
def wait_and_interact_with_element(driver, xpath, value=None):
wait = WebDriverWait(driver, 45) # 设置等待时间为45秒
element = wait.until(EC.element_to_be_clickable((By.XPATH, xpath)))
if not value:
text = element.text.strip() # 获取并清理元素的文本内容
element.click() # 点击元素
return text # 返回元素的文本内容
else:
# 如果提供了 value则向输入框发送文本
element.send_keys(value)
# 切换到账号密码登陆界面
wait_and_interact_with_element(driver, '//*[@id="account"]/div[2]/div[2]/div/div[1]/ul[1]/li[2]')
# 定位并输入用户名
wait_and_interact_with_element(driver, '//*[@id="username"]', '18877228660')
# 定位并输入密码
wait_and_interact_with_element(driver, '//*[@id="password"]', 'qq1453641651')
# 定位并提交登录按钮
wait_and_interact_with_element(driver, '//*[@id="account"]/div[2]/div[2]/div/div[2]/div[1]/div[4]/a')
time.sleep(10)
# 假设此时已经登录成功
# # 关闭当前有头模式的浏览器实例
# driver.quit()
#
# # 启动一个新的无头Edge浏览器实例
# # 创建一个Edge浏览器的配置选项对象
# options = Options()
# # 添加参数以启动无头模式
# options.add_argument("--headless")
# # 添加参数以禁用GPU减少资源消耗并避免图形相关问题
# # 在服务器或无图形界面的环境中特别有用
# options.add_argument("--disable-gpu") # 禁用GPU以提高兼容性和减少资源使用
# # 使用配置好的选项启动Edge浏览器实例
# driver = webdriver.Edge(options=options)
#
# # 使用新的无头浏览器实例继续你的操作...
# # 例如:
# driver.get("https://movie.douban.com/explore")
# 4,24
for j in range(7, 24):
time.sleep(1)
# 2,16
for i in range(2, 16):
# 等待地区元素变得可点击,并点击它
wait_and_interact_with_element(driver,
'/html/body/div[3]/div[1]/div/div[1]/div/div/div[1]/div/div[1]/div[2]/div/div[1]')
time.sleep(1)
# 等待韩国元素变得可点击,并点击它
region = wait_and_interact_with_element(driver,
f'//*[@id="app"]/div/div[1]/div/div[1]/div[2]/div/div[2]/div/div/ul/li[{j}]/span')
time.sleep(1)
# 点击年代
wait_and_interact_with_element(driver,
'/html/body/div[3]/div[1]/div/div[1]/div/div/div[1]/div/div[1]/div[3]/div/div')
time.sleep(1)
# 选2020年代
year = wait_and_interact_with_element(driver,
f'/html/body/div[3]/div[1]/div/div[1]/div/div/div[1]/div/div[1]/div[3]/div/div[2]/div/div/ul/li[{i}]/span')
time.sleep(1)
# 等待包含特定文本的span元素变得可点击
# wait = WebDriverWait(driver, 30) # 设置等待时间为30秒
# element_to_click = wait.until(
# EC.element_to_be_clickable((By.XPATH, "//div[contains(@class, 'base-selector-title')]/span[text()='地区']"))
# )
print(f"正在加载{region}-{year}的电影...")
# “加载更多”
n = 0
previous_li_count = 0
while True:
try:
# 等待加载更多按钮变得可点击
wait_and_interact_with_element(driver, '/html/body/div[3]/div[1]/div/div[1]/div/div/div[2]/div/button')
# 等待页面加载新内容
WebDriverWait(driver, 30).until(
lambda driver: len(driver.find_elements(By.CSS_SELECTOR, '.explore-list li')) > previous_li_count
)
# 更新li标签的数量
current_li_count = len(driver.find_elements(By.CSS_SELECTOR, '.explore-list li'))
if current_li_count <= previous_li_count:
break # 如果没有加载新内容,则退出循环
previous_li_count = current_li_count
n += 1
print(n)
time.sleep(1)
except Exception as e:
print(f"{e}已加载到底部或等待超时")
break # 捕获异常并退出循环
# 获取页面源代码
html = driver.page_source
# 使用BeautifulSoup解析HTML内容
soup = BeautifulSoup(html, 'lxml')
# 用于存储所有行数据的列表
all_rows_data = []
# 使用CSS选择器定位.lm_tabe tr标签
li_tags = soup.select('.explore-list li')
index = 0
# 遍历li_tags中的每个li元素
for li in li_tags:
# 在当前li元素内部查找.drc-rating-num标签
pf_tag = li.find('span', class_='drc-rating-num')
# 检查是否找到了.drc-rating-num标签
if pf_tag:
# 获取.drc-rating-num标签的文本内容
rating_text = pf_tag.text.strip()
# 检查文本内容是否为"暂无评分"
if rating_text == "暂无评分":
# 如果是"暂无评分"则跳过当前循环不处理这个li元素
continue
else:
index += 1
# 如果不是"暂无评分"则处理这个li元素例如打印其他信息
# print(f"{index}次处理li元素有评分: {rating_text}")
link_href = li.a.get('href') # 使用.get方法获取href属性值
all_rows_data.append(link_href)
else:
# 如果没有找到.drc-rating-num标签也可以在这里处理这种情况
print("当前li元素内没有.drc-rating-num标签")
print(f"{index}次处理li元素有评分")
# 创建DataFrame
df = pd.DataFrame(all_rows_data, columns=['电影网站'])
# 构造文件路径
file_path = f'豆瓣电影网站/{region}/{region}-{year}电影网站.xlsx'
# 分割文件路径以获取目录路径
directory = os.path.dirname(file_path)
# 如果目录不存在,则创建它
if not os.path.exists(directory):
os.makedirs(directory)
# 将DataFrame保存为Excel文件
df.to_excel(file_path, index=False)
print(f'数据已保存为Excel文件路径为{file_path}')
time.sleep(1)
print(f'{region}电影网站获取全部完成文件路径为:豆瓣电影网站/{region}')
# 去重电影网站
remove.deduplication(f'豆瓣电影网站/{region}', region)
# 浏览器实例停留
time.sleep(99999999)
# 关闭浏览器实例
driver.quit()
Loading…
Cancel
Save