master
黄德铠 3 years ago
parent 104a58dff1
commit 53b479b643

13
.gitignore vendored

@ -1,4 +1,3 @@
# ---> Python
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
@ -21,7 +20,6 @@ parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
@ -51,6 +49,7 @@ coverage.xml
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
@ -73,6 +72,7 @@ instance/
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
@ -83,7 +83,9 @@ profile_default/
ipython_config.py
# pyenv
.python-version
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
@ -129,3 +131,8 @@ dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/

8
.idea/.gitignore vendored

@ -0,0 +1,8 @@
# Default ignored files
/shelf/
/workspace.xml
# Editor-based HTTP Client requests
/httpRequests/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml

@ -0,0 +1,6 @@
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>

@ -0,0 +1,10 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$">
<excludeFolder url="file://$MODULE_DIR$/venv" />
</content>
<orderEntry type="jdk" jdkName="Python 3.10" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>

@ -0,0 +1,4 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.10" project-jdk-type="Python SDK" />
</project>

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/jd-distributed-crawler.iml" filepath="$PROJECT_DIR$/.idea/jd-distributed-crawler.iml" />
</modules>
</component>
</project>

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>

1268
LICENSE

File diff suppressed because it is too large Load Diff

@ -0,0 +1,37 @@
# 京东分布式爬虫
#### Description
selenium爬取搜索页
scrapy爬取详情页
#### Software Architecture
Software architecture description
#### Installation
1. xxxx
2. xxxx
3. xxxx
#### Instructions
1. xxxx
2. xxxx
3. xxxx
#### Contribution
1. Fork the repository
2. Create Feat_xxx branch
3. Commit your code
4. Create Pull Request
#### Gitee Feature
1. You can use Readme\_XXX.md to support different languages, such as Readme\_en.md, Readme\_zh.md
2. Gitee blog [blog.gitee.com](https://blog.gitee.com)
3. Explore open source project [https://gitee.com/explore](https://gitee.com/explore)
4. The most valuable open source project [GVP](https://gitee.com/gvp)
5. The manual of Gitee [https://gitee.com/help](https://gitee.com/help)
6. The most popular members [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/)

@ -1,2 +1,221 @@
# jd-distributed-crawler
# 京东显卡商品信息分布式爬虫
### 介绍
本爬虫分两个部分:
- 第一部分搜索页爬虫是使用selenium爬取搜索页, 主要获取的是商品的sku和价格
- 第二部分商品页爬虫使用scrapy爬取详情页的商品介绍信息, 主要获取商品的型号
由于京东商品种类繁多, 不同的商品品类间差异巨大, 商品页爬虫的代码也就不近相同. 出于成本和时间考虑, 搜索页爬虫使用**3080**作为搜索关键词, 商品页爬虫爬取搜索页爬虫获取的商品对应的详情页
其整体架构如下:
![](D:\python_class\jd-distributed-crawler\img\2022-04-15-16-28-10-image.png)
### 电商网站筛选
爬虫的目的就是为了获取一定量的有价值的信息, 就目前中文互联网来说, 电商网站是信息密度和价值密度比较高的地方, 因此使用爬虫获取信息有一定的可用性.
目前而言, 淘宝获取的商品信息结果层次最多, 品类最全. 但是首先淘宝商品搜索页混杂天猫,天猫国际, 淘宝卖家等各类各不相同的店铺, 因此其商品详情页结构也大相径庭; 其次是淘宝历史包袱比较重, 因此有许多结构混乱的页面混杂其中, 难以提取数据.
因此, 实际考虑后确认, 淘宝不是太适合新人小团队去学习试炼爬虫爬取信息.
相对而言, 京东搜索页基本同一为京东自己的页面, 京东自营, 旗舰店和商家店结构基本没有区别, 详情页结构也一致, 对爬虫比较友好(京东唯一不太友好的地方只有, 其主站页面下并没有写robots.txt). 因此考虑爬取京东.
### 搜索页爬虫
主要由`settings.yaml`和`jdSearchSeleniumSpider.py`两部分组成,`settings.yaml`是配置文件, `jdSearchSeleniumSpider.py`是代码执行主体
#### 前期工作
基本而言, 任何有搜索功能的网站都会对其搜索功能做一定的限制, 以防止单个ip过高频率的访问, 占用过多资源.
京东也不例外, 对于未登录的请求, 甚至是带着header和cookie的请求, 往往都是秉持拒绝的态度, 要求验证登录:
![](D:\python_class\jd-distributed-crawler\img\2022-04-15-13-25-48-image.png)
![](D:\python_class\jd-distributed-crawler\img\2022-04-15-13-26-42-image.png)
搜索引擎的爬虫验证一般级别都比较高, 难以绕过. 考虑到搜索页的数量相对于商品页的数量往往是少一个数量级, 且京东单个搜索词的页面量也大致在几十几百页的数量级之间, 因此使用python的`selenium`库, 驱动浏览器去自动化访问搜索页, 进而获取数据, 是比较简单且可行的方案.
#### settings.yaml
`setting.yaml`是搜索页爬虫的配置文件, 其内容如下:
```yaml
search:
keyword: '3080'
sleep: 5
redis:
host: '120.24.87.40'
port: '6379'
password: 'Guet@207'
db: 1
mongodb:
host: '120.24.87.40'
port: '27017'
username: "admin"
password: "123456"
result:
rpush_key: "jd:start_urls"
```
- `search`
- `keyword`: 要搜索的关键字, 即在京东商品搜索上的要搜索的商品;
- `sleep`: 休眠时间, 单位: 秒; 由于**selenium**操作页面的动作(例如点击翻页, 下拉)是**异步**的, 而读取页面是**同步**的. 京东需要执行下拉这个操作后才加载出所有的商品信息, 为了读取全当前的页面信息, 需要休眠一段时间等待异步操作完成;
- `redis`
- `host`: redis部署的服务器地址;
- `port`: redis部署时开放的端口号;
- `password`: redis部署时设置的auth;
- `db`: 需要使用redis的第几个数据库
- `mongodb`
- `host`: MongoDB部署的服务器地址;
- `port`: MongoDB部署时开放的端口号;
- `username`: MongoDB部署时设置的auth的用户名, 或者说要使用的MongoDB的数据库名;
- `password`: MongoDB部署时设置的auth.
- `result`
- `rpush_key`: 爬虫获取到的数据形成的url要放入redis时设置的key, url在Redis中存储方式是列表
#### jdSearchSeleniumSpider.py
搜索页爬虫的程序入口, 根据京东搜索页面爬取页面元素.
启动程序后进入京东的商品搜索页面. 搜索页的搜索关键字为`settings.py`中设定的`search.keyword`.
京东的搜索页面会分两个阶段加载, 第一个加载阶段加载成功后(即加载出30个商品的信息), 爬虫会执行`scroll_by_xpath()`, 作用是滚动网页到网页底部, 触发京东搜索页面的第二个加载阶段, 把一个页面上的信息都加载到浏览器里.
`scroll_by_xpath()` 内部原理就是驱动浏览器移动视图, 使视图可以看到指定的元素. 在该爬虫中, 指定的网页元素的**xpath**为`//*[@id="J_bottomPage"]/span[1]/a[9]`, 代表的是京东搜索页面的下一页按钮这一网页元素.
两个阶段都加载完成后, 获取网页中所有商品的sku和价格.
![](D:\python_class\jd-distributed-crawler\img\2022-04-15-18-40-23-image.png)
sku是京东给商品的编号, 且可以根据sku拼接对应商品的详情页的URL.
获取到sku和价格后, 将sku拼接为URL, rpush到Redis设置好的数据库的`result.rpush_key`中, 并且将每个商品的数据以`{'sku': sku, 'price': price}`的形式, 放入MongoDB设置好的数据库中. 放入MongoDB前会根据sku字段查询数据是否存在(原子操作, 可分布式), 防止重复插入.
### 商品页爬虫
商品页爬虫使用了`scrapy_redis`去写, 即`Scrapy`的redis版本, 他与`Scrapy`的主要区别就是把存放URL于Reids数据库中, 利用Redis的特性, 即在多个请求请求同一个数据时, 会默认对请求排序, 有一个队列效果, 保证了分布式爬虫获取到不同的URL.
`scrapy_redis`框架下的爬虫执行是异步的, 即, 从Redis数据库的列表中获取URL后, 发起Request, 便可以执行下一次从Redis中获取Redis, 而发起Request得到的response则等得到后再由回调函数处理(即执行`jdsku.py`里的`parse`方法).
该爬虫只需要在**本爬虫目录下**, 调用`scrapy crawl jdsku`即可启动.
#### settings.py
配置文件, 具体配置查看`Scrapy`的配置, 其中有几个是`scrapy_redis`的配置或本次项目引入的配置, 在此说明:
个人配置:
`PROXY_SERVER_URL`: IP代理商提供的直连IP的API地址;
`TEST_PAGE`: 用于测试IP代理商提供的IP代理服务是否能访问JD的测试地址, 可以按需更换不同的京东商品地址;
`mongodb`: 配置时需要以字典形式配置
- `host`: MongoDB服务器的地址;
- `port`: MongoDB服务器暴露的端口;
- `username`: MongoDB要使用的用户名(也是数据库名)
- `password`: MongoDB对应用户名的验证密码
`cookies`: 浏览器访问京东时的`cookies`, 有助于模拟浏览器访问, 降低失败率
scrapy_redis配置:
`DONT_FILTER`: Redis访问不去重, True代表不去重. 不去重的原因是使用代理访问商品详情页时可能会失败, 此时会将URL重新放回Redis的列表开头;
`SCHEDULER='scrapy_redis.scheduler.Scheduler'`: 使用scrapy_redis中的调度器, 即保证每台主机爬取的URL地址都不同的Scheduler.
`DUPEFILTER_CLASS = 'scrapy_redis.dupefilter.RFPDupeFilter'`: 配置scrapy使用的去重类, 即RFPDupeFilter.
`SCHEDULER_SERIALIZER = "scrapy_redis.picklecompat"`: 使用`scrapy_redis.picklecompat`作为序列化, 不序列化的数据可能会出现乱码.
   `AUTOTHROTTLE_ENABLED = True`: 开启自动限速, 控制爬取速度,降低对方服务器压力.
#### items.py
代码如下:
```python
import scrapy
class JdskuspiderItem(scrapy.Item):
# sku-id
sku = scrapy.Field()
# title
title = scrapy.Field()
# 显卡型号
model = scrapy.Field()
# 显存类型
memoryType = scrapy.Field()
# 显存位宽
bitWidth = scrapy.Field()
# 显存容量
memoryCapacity = scrapy.Field()
```
item是爬虫数据的容器, 方便我们按对应字段存储数据
#### jdsku.py
商品页爬虫的程序入口, 根据redis数据库对应的列表访问对应URL, 在京东商品详情页面爬取页面元素. 因为使用`scrapy_redis`框架, 也就是`Scrapy`的Redis分布式版本, 因此大部分代码都是在重写方法.
对于`class JdskuSpider(RedisSpider)`:
**属性**:
`name`: 爬虫的名字, 后续在终端启动爬虫时需要输入的爬虫名;
`allowed_domains`: 运行爬取的URL范围;
`redis_key`: Redis数据库中存放要爬取的URL的列表的Key名, 会获取列表中的第一个数据;
`redis_connection`: 存放一个新的Redis连接, 该连接后续用于访问失败时重新把URL返回给Redis列表;
`mongo_connection`: 存放一个新的MongoDB连接, 该连接后续用于存放最终数据.
**方法**:
两个方法都是重写方法, 方法名和参数都固定, 两个方法都是自动调用.
`make_requests_from_url(self, url)`:
- `scrapy_redis`框架会调用该方法, 发起一个访问参数里URL的request, 返回一个response, 框架会将response发给他的回调函数处理(即下述的`parse`);
- `meta={'url':url}`表示会将`{'url': url}`作为数据传递下去, 这样在他的回调函数(`parse`)里就能使用到该数据.
`parse(self, response)`:
- `scrapy_redis`发送的请求得到response后的回调函数;
- 作用是解析商品详情页(针对特定商品), 提取数据, 并将其保存到MongoDB中, 若提取数据表现出来的是被京东拦截, 那么把该次访问的URL重新返回到Redis列表开头.

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 116 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 116 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 149 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 149 KiB

@ -0,0 +1,115 @@
from time import sleep
import yaml
import redis
from selenium import webdriver
from selenium.webdriver.common.by import By
from pymongo import MongoClient
def scroll_by_xpath(webDriver, xpath):
"""
将webDriver里的网页下拉到xpath的位置
:param webDriver: web driver, 用于操作驱动后的浏览器
:param xpath: 要定位的元素的xpath, str
"""
target = webDriver.find_element(By.XPATH, xpath)
webDriver.execute_script("arguments[0].scrollIntoView();", target)
if __name__ == '__main__':
# 获取配置文件
yaml_file = "./settings.yaml"
with open(yaml_file, 'r') as f:
settings = yaml.safe_load(f)
# 获取redis连接
redis_connection = redis.Redis(
host=settings['redis']['host'],
port=settings['redis']['port'],
password=settings['redis']['password'],
db=settings['redis']['db'],
charset='UTF-8',
encoding='UTF-8')
mongo_connection = MongoClient('mongodb://{}:{}'.format(settings['mongodb']['host'], settings['mongodb']['port']),
username = settings['mongodb']['username'],
password = settings['mongodb']['password'])
# 打开京东商品搜索页
driver = webdriver.Chrome()
driver.get('https://search.jd.com/Search?keyword={}&enc=utf-8&wq={}'.format(settings['search']['keyword'], settings['search']['keyword']))
# 下拉到翻页导航的位置
scroll_by_xpath(driver, '//*[@id="J_bottomPage"]/span[1]/a[9]')
# 强制休眠5s再读取元素, 京东的搜索页需要下拉才加载全, 而下拉操作是异步的, 不匹配下拉时间读取不全元素
sleep(settings['search']['sleep'])
# 获取当前搜索的关键词的最大商品页
max_page = int(driver.find_element(By.XPATH, '//*[@id="J_bottomPage"]/span[2]/em[1]/b').text)
skuid_prev = []
skuid_url = []
page = 0
while page < max_page:
sku_price = []
# 下拉
scroll_by_xpath(driver, '//*[@id="J_bottomPage"]/span[1]/a[9]')
sleep(settings['search']['sleep'])
# 获取搜索页的商品的skuid和对应的价格
li_list = driver.find_elements(By.XPATH, '//*[@id="J_goodsList"]/ul/li')
for li in li_list:
li_text = li.text.split("\n")
result = 0
for text in li_text:
if text[1] in [str(i) for i in range(10)] and text[2] in [str(i) for i in range(10)]:
result = text[1:-1]
break
price = result
sku_price.append({li.get_attribute("data-sku"): price})
# 重复读取时, 重来
if page != 0 and sku_price[0] == skuid_prev[0]:
page -= 1
continue
# 防止未加载完就读取元素, 重新读取
while len(skuid_prev) < len(skuid_prev):
li_list = driver.find_elements(By.XPATH, '//*[@id="J_goodsList"]/ul/li')
for li in li_list:
price = li.find_element(By.XPATH, "//*[@class='gl-i-wrap']/*[@class='p-price']//strong/i")
sku_price.append({li.get_attribute("data-sku"): price})
#拼接成url, 入redis的start_url
for dic in sku_price:
for key in dic.keys():
skuid_url.append('https://item.jd.com/{}.html'.format(key))
for url in skuid_url:
redis_connection.rpush(settings['result']['rpush_key'], url)
#将sku和price放入mongodb
mongoDB = mongo_connection['admin']
mongoCollection = mongoDB['a']
for dic in sku_price:
document = {}
for key in dic.keys():
document['sku'] = key
document['price'] = dic[key]
# 更新MongoDB:
# 当数据库里没有, 插入
mongoCollection.update_one(
{'sku': document['sku']},
{
'$setOnInsert': document #生效需要upsert=True
},
upsert=True
)
print(str(page) + " " + str(len(skuid_url)) + " ", end="")
print(sku_price)
print(skuid_url)
sleep(settings['search']['sleep'])
skuid_prev = sku_price.copy()
skuid_url = []
page += 1
# 翻页
driver.find_element(By.XPATH, '//*[@id="J_bottomPage"]/span[1]/a[9]').click()

@ -0,0 +1,18 @@
search:
keyword: '3080'
sleep: 5
redis:
host: '120.24.87.40'
port: '6379'
password: 'Guet@207'
db: 1
mongodb:
host: '120.24.87.40'
port: '27017'
username: "admin"
password: "123456"
result:
rpush_key: "jd:start_urls"

@ -0,0 +1,28 @@
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class JdskuspiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
# sku-id
sku = scrapy.Field()
# title
title = scrapy.Field()
# 显卡型号
model = scrapy.Field()
# 显存类型
memoryType = scrapy.Field()
# 显存位宽
bitWidth = scrapy.Field()
# 显存容量
memoryCapacity = scrapy.Field()

@ -0,0 +1,177 @@
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import random
from scrapy import signals
import requests
import logging
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class JdskuspiderSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesnt have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class JdskuspiderDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class ProxyMiddleWare:
logger = logging.getLogger(__name__)
proxy = ''
def get_random_proxy(self, spider):
"""
从代理页面获取代理url
:return: 代理url
"""
try:
response = requests.get(spider.settings.get('PROXY_SERVER_URL'))
if response.status_code == 200:
# 得到类似['125.111.146.122:4345', '114.239.146.151:4356', '']
url_list = response.text.split("\r\n")
# 去掉最后一个空字符串
url_list.pop(-1)
proxy = '{}'.format(random.choice(url_list))
ip = {"http": "http://" + proxy}
logging.info("=====获取代理: {}=====".format(ip))
r = requests.get("http://www.baidu.com", proxies=ip, timeout=10)
if r.status_code == 200:
logging.info("=====代理通过测试, 选择该ip: {}=====".format(ip))
return proxy
except:
logging.info('=====未通过代理测试, 重新从代理商处获取代理=====')
return self.get_random_proxy(spider)
def get_proxy(self, spider):
"""
从代理池里获取代理, 节省代理个数
:return: proxy, 形式为: '125.111.146.122:4345'
"""
if self.proxy == "":
self.proxy = self.get_random_proxy(spider)
else:
ip = {"http": "http://" + str(self.proxy)}
r = requests.get(spider.settings.get("TEST_PAGE"), proxies=ip, timeout=10)
if r.status_code == 200:
#'window.location.href'等等...字段是封ip页面开头有的元素
if not "window.location.href" in r.text[0:50]:
logging.info("=====代理通过测试, 继续使用该ip: {}=====".format(self.proxy))
else:
self.proxy = self.get_random_proxy(spider)
else:
self.proxy = self.get_random_proxy(spider)
return self.proxy
def process_request(self, request, spider):
"""
scrapy.middlewares模板的方法
使用代理对要爬取的网页request
"""
proxy = self.get_proxy(spider)
if proxy:
self.logger.debug('======' + '使用代理 ' + str(proxy) + "======")
request.meta['proxy'] = 'https://{proxy}'.format(proxy=proxy)
def process_response(self, request, response, spider):
"""
scrapy.middlewares模板的方法
使用代理对要爬取的网页request失败时, 重新使用代理爬取
"""
if response.status != 200:
self.logger.info("=====使用代理ip request网页获得失败response, 重新获取代理ip=====")
request.meta['proxy'] = 'https://{proxy}'.format(proxy=self.get_random_proxy(spider))
return request
return response

@ -0,0 +1,14 @@
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class JdskuspiderPipeline:
def process_item(self, item, spider):
print(item)
return item

@ -0,0 +1,160 @@
# Scrapy settings for jdSkuSpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'jdSkuSpider'
SPIDER_MODULES = ['jdSkuSpider.spiders']
NEWSPIDER_MODULE = 'jdSkuSpider.spiders'
#代理商提供的代理网址
PROXY_SERVER_URL = 'https://h.shanchendaili.com/api.html?action=get_ip&key=HU015c86520413222339Rp2a&time=10&count=1&protocol=http&type=text&textSep=1&only=1'
#用来测试当前的代理ip是否被封禁
TEST_PAGE = 'https://item.jd.com/100017846659.html'
#不去重(True代表不去重, False代表去重)
DONT_FILTER = True
REDIS_HOST = '120.24.87.40'
REDIS_PORT = '6379'
REDIS_DB = 1
REDIS_PARAMS = {
'password': 'Guet@207',
}
# 使用scrapy_redis中的调度器, 即保证每台主机爬取的URL地址都不同的Scheduler
SCHEDULER = 'scrapy_redis.scheduler.Scheduler'
# 配置scrapy使用的去重类, 即RFPDupeFilter
DUPEFILTER_CLASS = 'scrapy_redis.dupefilter.RFPDupeFilter'
# 序列化
SCHEDULER_SERIALIZER = "scrapy_redis.picklecompat"
#自动限速(控制爬取速度,降低对方服务器压力)
AUTOTHROTTLE_ENABLED = True
mongodb = {
'host': '120.24.87.40',
'port': 27017,
'username': "admin",
'password': "123456"
}
cookies = {
'shshshfpa': '88442a32-4008-3016-2ed4-5f00ba6e02a6-1583306994',
'__jdu': '1645417385963897370071',
'shshshfpb': 'eKzbp55ekVbG%2BmXNii7FmnA%3D%3D',
'unpl': 'JF8EAKpnNSttC0hdDR9XGhMQTw5XW15YGURUPDQCVFwIHFEHGQoSExZ7XlVdXhRKFR9vZxRXXlNKUQ4fBCsSE3tdVV9cD0gVBWduNWRtW0tkBCsCHBcUTl1SXFQMQxABZm8DVltZSlIFKzIcEhl7bWRbXQlKFQVpZAVXbVl7VgQaAB8XFEJcU24cZk4XAmZlSFRaXU9RBR0AEhYYTF9dVlsKTRYCaWc1VlReQ1I1GA',
'__jdv': '76161171|baidu|-|organic|notset|1648984819662',
'areaId': '20',
'PCSYCityID': 'CN_450000_450300_0',
'ipLoc-djd': '20-1726-22884-51455',
'pinId': 'bxSvmv8CNc-KKJVv2AYoCLV9-x-f3wj7',
'pin': 'jd_701eaef2a29ff',
'unick': '%E6%9D%9C%E6%92%B0%E4%B8%AD%E7%9A%84%E6%9C%AA%E5%90%8D',
'_tp': 'Wy5hpT1Zse8ScnmqROf6D38%2BJ8IK3ESqvwXymUySsLE%3D',
'_pst': 'jd_701eaef2a29ff',
'shshshfp': '9dc45f7c100e4d56fc0ddb7d96bc59ab',
'__jdc': '122270672',
'__jda': '122270672.1645417385963897370071.1645417385.1649430078.1649491583.20',
'thor': 'FC76D1F441FDD23810222D65B21A1E62936594BACBBD52CF88EB830809C78B7F75DEEDA34458FE020C54AF1DD6E55F389B2063BCCBBD829B977631DD6FB67A17BA374FEFAA00AF1C8264A11F080FA449884B327D73A31031D1F35232730745A6BD1570FDB90E15A4002FCBF4C8CDED1F588BFA29B2272823A7263C9A88F289B84E2075F1A710BED202A651BD7ABBC09D354497FAFBB4A0A7CBDC9590803F6162',
'ceshi3.com': '000',
'token': '5a5e7384ead314efe3237efb8d9825fb,3,916384',
'__tk': 'OINnrcq4NDN5NiuEqcrdriKiOcJ5sINEsca4rfKiqIhgNLbhOIN4sG,3,916384',
'ip_cityCode': '1726',
'wlfstk_smdl': 'xrr3qwmzs7zoj7y6kbt5nfkai2clqo2n',
'shshshsID': '3c84311b6180b15c5bf06ba762b90ce2_7_1649491714829',
'__jdb': '122270672.15.1645417385963897370071|20.1649491583',
'3AB9D23F7A4B3C9B': '3WE3SSBAN2EJKLE3VP6ZK2I4UOCXTYVZTD7O46KFL2S7J2UVXVZ7IJGBJKC4S3RAWL2DRAMRLPN63TK3LHWTA5JVQQ',
}
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'jdSkuSpider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 1
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'authority': 'item.jd.com',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'accept-language': 'zh,zh-CN;q=0.9',
'cache-control': 'max-age=0',
# Requests sorts cookies= alphabetically
# 'cookie': 'shshshfpa=88442a32-4008-3016-2ed4-5f00ba6e02a6-1583306994; __jdu=1645417385963897370071; shshshfpb=eKzbp55ekVbG%2BmXNii7FmnA%3D%3D; unpl=JF8EAKpnNSttC0hdDR9XGhMQTw5XW15YGURUPDQCVFwIHFEHGQoSExZ7XlVdXhRKFR9vZxRXXlNKUQ4fBCsSE3tdVV9cD0gVBWduNWRtW0tkBCsCHBcUTl1SXFQMQxABZm8DVltZSlIFKzIcEhl7bWRbXQlKFQVpZAVXbVl7VgQaAB8XFEJcU24cZk4XAmZlSFRaXU9RBR0AEhYYTF9dVlsKTRYCaWc1VlReQ1I1GA; __jdv=76161171|baidu|-|organic|notset|1648984819662; areaId=20; PCSYCityID=CN_450000_450300_0; ipLoc-djd=20-1726-22884-51455; pinId=bxSvmv8CNc-KKJVv2AYoCLV9-x-f3wj7; pin=jd_701eaef2a29ff; unick=%E6%9D%9C%E6%92%B0%E4%B8%AD%E7%9A%84%E6%9C%AA%E5%90%8D; _tp=Wy5hpT1Zse8ScnmqROf6D38%2BJ8IK3ESqvwXymUySsLE%3D; _pst=jd_701eaef2a29ff; shshshfp=9dc45f7c100e4d56fc0ddb7d96bc59ab; __jdc=122270672; __jda=122270672.1645417385963897370071.1645417385.1649430078.1649491583.20; thor=FC76D1F441FDD23810222D65B21A1E62936594BACBBD52CF88EB830809C78B7F75DEEDA34458FE020C54AF1DD6E55F389B2063BCCBBD829B977631DD6FB67A17BA374FEFAA00AF1C8264A11F080FA449884B327D73A31031D1F35232730745A6BD1570FDB90E15A4002FCBF4C8CDED1F588BFA29B2272823A7263C9A88F289B84E2075F1A710BED202A651BD7ABBC09D354497FAFBB4A0A7CBDC9590803F6162; ceshi3.com=000; token=5a5e7384ead314efe3237efb8d9825fb,3,916384; __tk=OINnrcq4NDN5NiuEqcrdriKiOcJ5sINEsca4rfKiqIhgNLbhOIN4sG,3,916384; ip_cityCode=1726; wlfstk_smdl=xrr3qwmzs7zoj7y6kbt5nfkai2clqo2n; shshshsID=3c84311b6180b15c5bf06ba762b90ce2_7_1649491714829; __jdb=122270672.15.1645417385963897370071|20.1649491583; 3AB9D23F7A4B3C9B=3WE3SSBAN2EJKLE3VP6ZK2I4UOCXTYVZTD7O46KFL2S7J2UVXVZ7IJGBJKC4S3RAWL2DRAMRLPN63TK3LHWTA5JVQQ',
'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'none',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.75 Safari/537.36',
}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'jdSkuSpider.middlewares.JdskuspiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'jdSkuSpider.middlewares.ProxyMiddleWare':542,
'jdSkuSpider.middlewares.JdskuspiderDownloaderMiddleware': 543,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'scrapy_redis.pipelines.RedisPipeline':400,
'jdSkuSpider.pipelines.JdskuspiderPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

@ -0,0 +1,4 @@
# This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.

@ -0,0 +1,119 @@
import scrapy
import re
import redis
from scrapy_redis.spiders import RedisSpider
from pymongo import MongoClient
from ..items import *
from ..settings import REDIS_HOST,REDIS_PORT,REDIS_DB,REDIS_PARAMS, mongodb
class JdskuSpider(RedisSpider):
name = 'jdsku'
allowed_domains = ['jd.com']
# start_urls = ['https://item.jd.com/100018781645.html#none']
redis_key = 'jd:start_urls'
redis_connection = redis.Redis(
host=REDIS_HOST,
port=REDIS_PORT,
password=REDIS_PARAMS.get('password'),
db=REDIS_DB,
charset='UTF-8',
encoding='UTF-8')
mongo_connection = MongoClient(
host=mongodb.get('host'),
port=mongodb.get('port'),
username=mongodb.get('username'),
password=mongodb.get('password'))
# 解决AttributeError: 'JdskuSpider' object has no attribute 'make_requests_from_url'
def make_requests_from_url(self,url):
#dont_filter=True带表不去重, 默认False, 去重
return scrapy.Request(url,
meta={'url':url}, #将url传给parse
dont_filter=self.settings.get("DONT_FILTER"),
cookies=self.settings.get('cookies'))
def parse(self, response):
sku = response.meta['url']
sku_pattern = re.compile("[0-9]+")
sku = re.search(sku_pattern, sku).group()
bitWidth = -1
model = -1
memoryType = -1
memoryCapacity = -1
title = response.xpath('//*[@id="parameter-brand"]/li/a/text()').extract_first()
print("=========== title =================")
print(response.xpath('/html'))
print("=========== title =================")
li_list = response.xpath('//*[@id="detail"]/div[2]/div[1]/div[1]/ul[2]/li')
print("***********************************************")
for li in li_list:
value = li.xpath('@title').re(".*")[0]
key = li.re(">.*<")[0][1:-1].replace("", ":").replace(": {}".format(value), "").replace(":{}".format(value), "")
print({key:value})
if key == '显存位宽':
bitWidth = value
elif key =='显卡型号':
model = value
elif key =='显存类型':
memoryType = value
elif key == '显存容量':
memoryCapacity = value
elif key == '商品名称':
title = value
print("***********************************************")
item = JdskuspiderItem(
sku=sku,
title=title,
model=model,
memoryType=memoryType,
memoryCapacity=memoryCapacity,
bitWidth=bitWidth
)
print("=========== 现在的sku ==========")
print(sku)
if bitWidth==-1 and memoryType==-1 and memoryCapacity==-1:
print("========== no insert into mongodb ==============")
if "window.location.href='https://passport.jd.com" in response.xpath('/html').xpath('string(.)').extract_first():
print("window.location.href页面")
print("sku重发redis")
self.redis_connection.lpush("jd:start_urls", 'https://item.jd.com/{}.html'.format(sku))
else:
print("非显卡商品, insert -1")
mongoDB = self.mongo_connection['admin']
mongoCollection = mongoDB['jd']
mongoCollection.find_one_and_update(
filter={'sku': sku},
update={'$set': {
'title': title,
'model': model,
'memoryType': memoryType,
'memoryCapacity': memoryCapacity,
'bitWidth': bitWidth
}}
)
else:
print("========== insert into mongodb ==============")
mongoDB = self.mongo_connection['admin']
mongoCollection = mongoDB['jd']
mongoCollection.find_one_and_update(
filter={'sku':sku},
update={'$set':{
'title':title,
'model':model,
'memoryType':memoryType,
'memoryCapacity':memoryCapacity,
'bitWidth': bitWidth
}}
)
yield item

@ -0,0 +1,11 @@
# Automatically created by: scrapy startproject
#
# For more information about the [deploy] section see:
# https://scrapyd.readthedocs.io/en/latest/deploy.html
[settings]
default = jdSkuSpider.settings
[deploy]
#url = http://localhost:6800/
project = jdSkuSpider
Loading…
Cancel
Save