Compare commits
1 Commits
master
...
zhihu、xinl
| Author | SHA1 | Date |
|---|---|---|
|
|
964e4dee84 | 3 years ago |
@ -1,6 +0,0 @@
|
||||
import os
|
||||
import sys
|
||||
from scrapy.cmdline import execute
|
||||
|
||||
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
||||
execute(['scrapy', 'crawl', 'zhihu'])
|
||||
@ -1,16 +0,0 @@
|
||||
BitVector==3.4.8
|
||||
fake-useragent==0.1.11
|
||||
h5py==2.9.0
|
||||
Keras==2.0.1
|
||||
mmh3==2.5.1
|
||||
mouse==0.7.0
|
||||
numpy==1.16.2+mkl
|
||||
Pillow==5.4.1
|
||||
PyMySQL==0.8.0
|
||||
redis==2.10.6
|
||||
requests==2.18.4
|
||||
scikit-learn==0.20.3
|
||||
scipy==1.2.1
|
||||
Scrapy==1.6.0
|
||||
selenium==3.141.0
|
||||
tensorflow==1.13.1
|
||||
@ -1,11 +0,0 @@
|
||||
# Automatically created by: scrapy startproject
|
||||
#
|
||||
# For more information about the [deploy] section see:
|
||||
# https://scrapyd.readthedocs.io/en/latest/deploy.html
|
||||
|
||||
[settings]
|
||||
default = Zhihu.settings
|
||||
|
||||
[deploy]
|
||||
#url = http://localhost:6800/
|
||||
project = Zhihu
|
||||
@ -1,10 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from .connection import ( # NOQA
|
||||
get_redis,
|
||||
get_redis_from_settings,
|
||||
)
|
||||
|
||||
|
||||
__author__ = 'Rolando Espinoza'
|
||||
__email__ = 'rolando at rmax.io'
|
||||
__version__ = '0.7.0-dev'
|
||||
@ -1,90 +0,0 @@
|
||||
import six
|
||||
|
||||
from scrapy.utils.misc import load_object
|
||||
|
||||
from . import defaults
|
||||
|
||||
|
||||
# Shortcut maps 'setting name' -> 'parmater name'.
|
||||
SETTINGS_PARAMS_MAP = {
|
||||
'REDIS_URL': 'url',
|
||||
'REDIS_HOST': 'host',
|
||||
'REDIS_PORT': 'port',
|
||||
'REDIS_ENCODING': 'encoding',
|
||||
}
|
||||
|
||||
|
||||
def get_redis_from_settings(settings):
|
||||
"""Returns a redis client instance from given Scrapy settings object.
|
||||
|
||||
This function uses ``get_client`` to instantiate the client and uses
|
||||
``defaults.REDIS_PARAMS`` global as defaults values for the parameters. You
|
||||
can override them using the ``REDIS_PARAMS`` setting.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
settings : Settings
|
||||
A scrapy settings object. See the supported settings below.
|
||||
|
||||
Returns
|
||||
-------
|
||||
server
|
||||
Redis client instance.
|
||||
|
||||
Other Parameters
|
||||
----------------
|
||||
REDIS_URL : str, optional
|
||||
Server connection URL.
|
||||
REDIS_HOST : str, optional
|
||||
Server host.
|
||||
REDIS_PORT : str, optional
|
||||
Server port.
|
||||
REDIS_ENCODING : str, optional
|
||||
Data encoding.
|
||||
REDIS_PARAMS : dict, optional
|
||||
Additional client parameters.
|
||||
|
||||
"""
|
||||
params = defaults.REDIS_PARAMS.copy()
|
||||
params.update(settings.getdict('REDIS_PARAMS'))
|
||||
# XXX: Deprecate REDIS_* settings.
|
||||
for source, dest in SETTINGS_PARAMS_MAP.items():
|
||||
val = settings.get(source)
|
||||
if val:
|
||||
params[dest] = val
|
||||
|
||||
# Allow ``redis_cls`` to be a path to a class.
|
||||
if isinstance(params.get('redis_cls'), six.string_types):
|
||||
params['redis_cls'] = load_object(params['redis_cls'])
|
||||
|
||||
return get_redis(**params)
|
||||
|
||||
|
||||
# Backwards compatible alias.
|
||||
from_settings = get_redis_from_settings
|
||||
|
||||
|
||||
def get_redis(**kwargs):
|
||||
"""Returns a redis client instance.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
redis_cls : class, optional
|
||||
Defaults to ``redis.StrictRedis``.
|
||||
url : str, optional
|
||||
If given, ``redis_cls.from_url`` is used to instantiate the class.
|
||||
**kwargs
|
||||
Extra parameters to be passed to the ``redis_cls`` class.
|
||||
|
||||
Returns
|
||||
-------
|
||||
server
|
||||
Redis client instance.
|
||||
|
||||
"""
|
||||
redis_cls = kwargs.pop('redis_cls', defaults.REDIS_CLS)
|
||||
url = kwargs.pop('url', None)
|
||||
if url:
|
||||
return redis_cls.from_url(url, **kwargs)
|
||||
else:
|
||||
return redis_cls(**kwargs)
|
||||
@ -1,25 +0,0 @@
|
||||
import redis
|
||||
|
||||
|
||||
# For standalone use.
|
||||
DUPEFILTER_KEY = 'dupefilter:%(timestamp)s'
|
||||
|
||||
PIPELINE_KEY = '%(spider)s:items'
|
||||
|
||||
REDIS_CLS = redis.StrictRedis
|
||||
REDIS_ENCODING = 'utf-8'
|
||||
# Sane connection defaults.
|
||||
REDIS_PARAMS = {
|
||||
'socket_timeout': 30,
|
||||
'socket_connect_timeout': 30,
|
||||
'retry_on_timeout': True,
|
||||
'encoding': REDIS_ENCODING,
|
||||
}
|
||||
|
||||
SCHEDULER_QUEUE_KEY = '%(spider)s:requests'
|
||||
SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.PriorityQueue'
|
||||
SCHEDULER_DUPEFILTER_KEY = '%(spider)s:dupefilter'
|
||||
SCHEDULER_DUPEFILTER_CLASS = 'scrapy_redis.dupefilter.RFPDupeFilter'
|
||||
|
||||
START_URLS_KEY = '%(name)s:start_urls'
|
||||
START_URLS_AS_SET = False
|
||||
@ -1,164 +0,0 @@
|
||||
import logging
|
||||
import time
|
||||
|
||||
from scrapy.dupefilters import BaseDupeFilter
|
||||
from scrapy.utils.request import request_fingerprint
|
||||
|
||||
from . import defaults
|
||||
from .connection import get_redis_from_settings
|
||||
from libs.bloomfilter import BloomFilter, conn
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# TODO: Rename class to RedisDupeFilter.
|
||||
class RFPDupeFilter(BaseDupeFilter):
|
||||
"""Redis-based request duplicates filter.
|
||||
|
||||
This class can also be used with default Scrapy's scheduler.
|
||||
|
||||
"""
|
||||
|
||||
logger = logger
|
||||
|
||||
def __init__(self, server, key, debug=False):
|
||||
"""Initialize the duplicates filter.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
server : redis.StrictRedis
|
||||
The redis server instance.
|
||||
key : str
|
||||
Redis key Where to store fingerprints.
|
||||
debug : bool, optional
|
||||
Whether to log filtered requests.
|
||||
|
||||
"""
|
||||
self.server = server
|
||||
self.key = key
|
||||
self.debug = debug
|
||||
self.logdupes = True
|
||||
self.bf = BloomFilter(conn=conn, key=key)
|
||||
|
||||
@classmethod
|
||||
def from_settings(cls, settings):
|
||||
"""Returns an instance from given settings.
|
||||
|
||||
This uses by default the key ``dupefilter:<timestamp>``. When using the
|
||||
``scrapy_redis.scheduler.Scheduler`` class, this method is not used as
|
||||
it needs to pass the spider name in the key.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
settings : scrapy.settings.Settings
|
||||
|
||||
Returns
|
||||
-------
|
||||
RFPDupeFilter
|
||||
A RFPDupeFilter instance.
|
||||
|
||||
|
||||
"""
|
||||
server = get_redis_from_settings(settings)
|
||||
# XXX: This creates one-time key. needed to support to use this
|
||||
# class as standalone dupefilter with scrapy's default scheduler
|
||||
# if scrapy passes spider on open() method this wouldn't be needed
|
||||
# TODO: Use SCRAPY_JOB env as default and fallback to timestamp.
|
||||
key = defaults.DUPEFILTER_KEY % {'timestamp': int(time.time())}
|
||||
debug = settings.getbool('DUPEFILTER_DEBUG')
|
||||
return cls(server, key=key, debug=debug)
|
||||
|
||||
@classmethod
|
||||
def from_crawler(cls, crawler):
|
||||
"""Returns instance from crawler.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
crawler : scrapy.crawler.Crawler
|
||||
|
||||
Returns
|
||||
-------
|
||||
RFPDupeFilter
|
||||
Instance of RFPDupeFilter.
|
||||
|
||||
"""
|
||||
return cls.from_settings(crawler.settings)
|
||||
|
||||
def request_seen(self, request):
|
||||
"""Returns True if request was already seen.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
request : scrapy.http.Request
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
|
||||
"""
|
||||
fp = self.request_fingerprint(request)
|
||||
if self.bf.is_exist(fp):
|
||||
return True
|
||||
else:
|
||||
self.bf.add(fp)
|
||||
return False
|
||||
# # This returns the number of values added, zero if already exists.
|
||||
# added = self.server.sadd(self.key, fp)
|
||||
# return added == 0
|
||||
|
||||
def request_fingerprint(self, request):
|
||||
"""Returns a fingerprint for a given request.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
request : scrapy.http.Request
|
||||
|
||||
Returns
|
||||
-------
|
||||
str
|
||||
|
||||
"""
|
||||
return request_fingerprint(request)
|
||||
|
||||
@classmethod
|
||||
def from_spider(cls, spider):
|
||||
settings = spider.settings
|
||||
server = get_redis_from_settings(settings)
|
||||
dupefilter_key = settings.get("SCHEDULER_DUPEFILTER_KEY", defaults.SCHEDULER_DUPEFILTER_KEY)
|
||||
key = dupefilter_key % {'spider': spider.name}
|
||||
debug = settings.getbool('DUPEFILTER_DEBUG')
|
||||
return cls(server, key=key, debug=debug)
|
||||
|
||||
def close(self, reason=''):
|
||||
"""Delete data on close. Called by Scrapy's scheduler.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
reason : str, optional
|
||||
|
||||
"""
|
||||
self.clear()
|
||||
|
||||
def clear(self):
|
||||
"""Clears fingerprints data."""
|
||||
self.server.delete(self.key)
|
||||
|
||||
def log(self, request, spider):
|
||||
"""Logs given request.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
request : scrapy.http.Request
|
||||
spider : scrapy.spiders.Spider
|
||||
|
||||
"""
|
||||
if self.debug:
|
||||
msg = "Filtered duplicate request: %(request)s"
|
||||
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
|
||||
elif self.logdupes:
|
||||
msg = ("Filtered duplicate request %(request)s"
|
||||
" - no more duplicates will be shown"
|
||||
" (see DUPEFILTER_DEBUG to show all duplicates)")
|
||||
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
|
||||
self.logdupes = False
|
||||
@ -1,14 +0,0 @@
|
||||
"""A pickle wrapper module with protocol=-1 by default."""
|
||||
|
||||
try:
|
||||
import cPickle as pickle # PY2
|
||||
except ImportError:
|
||||
import pickle
|
||||
|
||||
|
||||
def loads(s):
|
||||
return pickle.loads(s)
|
||||
|
||||
|
||||
def dumps(obj):
|
||||
return pickle.dumps(obj, protocol=-1)
|
||||
@ -1,76 +0,0 @@
|
||||
from scrapy.utils.misc import load_object
|
||||
from scrapy.utils.serialize import ScrapyJSONEncoder
|
||||
from twisted.internet.threads import deferToThread
|
||||
|
||||
from . import connection, defaults
|
||||
|
||||
|
||||
default_serialize = ScrapyJSONEncoder().encode
|
||||
|
||||
|
||||
class RedisPipeline(object):
|
||||
"""Pushes serialized item into a redis list/queue
|
||||
|
||||
Settings
|
||||
--------
|
||||
REDIS_ITEMS_KEY : str
|
||||
Redis key where to store items.
|
||||
REDIS_ITEMS_SERIALIZER : str
|
||||
Object path to serializer function.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, server,
|
||||
key=defaults.PIPELINE_KEY,
|
||||
serialize_func=default_serialize):
|
||||
"""Initialize pipeline.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
server : StrictRedis
|
||||
Redis client instance.
|
||||
key : str
|
||||
Redis key where to store items.
|
||||
serialize_func : callable
|
||||
Items serializer function.
|
||||
|
||||
"""
|
||||
self.server = server
|
||||
self.key = key
|
||||
self.serialize = serialize_func
|
||||
|
||||
@classmethod
|
||||
def from_settings(cls, settings):
|
||||
params = {
|
||||
'server': connection.from_settings(settings),
|
||||
}
|
||||
if settings.get('REDIS_ITEMS_KEY'):
|
||||
params['key'] = settings['REDIS_ITEMS_KEY']
|
||||
if settings.get('REDIS_ITEMS_SERIALIZER'):
|
||||
params['serialize_func'] = load_object(
|
||||
settings['REDIS_ITEMS_SERIALIZER']
|
||||
)
|
||||
|
||||
return cls(**params)
|
||||
|
||||
@classmethod
|
||||
def from_crawler(cls, crawler):
|
||||
return cls.from_settings(crawler.settings)
|
||||
|
||||
def process_item(self, item, spider):
|
||||
return deferToThread(self._process_item, item, spider)
|
||||
|
||||
def _process_item(self, item, spider):
|
||||
key = self.item_key(item, spider)
|
||||
data = self.serialize(item)
|
||||
self.server.rpush(key, data)
|
||||
return item
|
||||
|
||||
def item_key(self, item, spider):
|
||||
"""Returns redis key based on given spider.
|
||||
|
||||
Override this function to use a different key depending on the item
|
||||
and/or spider.
|
||||
|
||||
"""
|
||||
return self.key % {'spider': spider.name}
|
||||
@ -1,147 +0,0 @@
|
||||
from scrapy.utils.reqser import request_to_dict, request_from_dict
|
||||
|
||||
from . import picklecompat
|
||||
|
||||
|
||||
class Base(object):
|
||||
"""Per-spider base queue class"""
|
||||
|
||||
def __init__(self, server, spider, key, serializer=None):
|
||||
"""Initialize per-spider redis queue.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
server : StrictRedis
|
||||
Redis client instance.
|
||||
spider : Spider
|
||||
Scrapy spider instance.
|
||||
key: str
|
||||
Redis key where to put and get messages.
|
||||
serializer : object
|
||||
Serializer object with ``loads`` and ``dumps`` methods.
|
||||
|
||||
"""
|
||||
if serializer is None:
|
||||
# Backward compatibility.
|
||||
# TODO: deprecate pickle.
|
||||
serializer = picklecompat
|
||||
if not hasattr(serializer, 'loads'):
|
||||
raise TypeError("serializer does not implement 'loads' function: %r"
|
||||
% serializer)
|
||||
if not hasattr(serializer, 'dumps'):
|
||||
raise TypeError("serializer '%s' does not implement 'dumps' function: %r"
|
||||
% serializer)
|
||||
|
||||
self.server = server
|
||||
self.spider = spider
|
||||
self.key = key % {'spider': spider.name}
|
||||
self.serializer = serializer
|
||||
|
||||
def _encode_request(self, request):
|
||||
"""Encode a request object"""
|
||||
obj = request_to_dict(request, self.spider)
|
||||
return self.serializer.dumps(obj)
|
||||
|
||||
def _decode_request(self, encoded_request):
|
||||
"""Decode an request previously encoded"""
|
||||
obj = self.serializer.loads(encoded_request)
|
||||
return request_from_dict(obj, self.spider)
|
||||
|
||||
def __len__(self):
|
||||
"""Return the length of the queue"""
|
||||
raise NotImplementedError
|
||||
|
||||
def push(self, request):
|
||||
"""Push a request"""
|
||||
raise NotImplementedError
|
||||
|
||||
def pop(self, timeout=0):
|
||||
"""Pop a request"""
|
||||
raise NotImplementedError
|
||||
|
||||
def clear(self):
|
||||
"""Clear queue/stack"""
|
||||
self.server.delete(self.key)
|
||||
|
||||
|
||||
class FifoQueue(Base):
|
||||
"""Per-spider FIFO queue"""
|
||||
|
||||
def __len__(self):
|
||||
"""Return the length of the queue"""
|
||||
return self.server.llen(self.key)
|
||||
|
||||
def push(self, request):
|
||||
"""Push a request"""
|
||||
self.server.lpush(self.key, self._encode_request(request))
|
||||
|
||||
def pop(self, timeout=0):
|
||||
"""Pop a request"""
|
||||
if timeout > 0:
|
||||
data = self.server.brpop(self.key, timeout)
|
||||
if isinstance(data, tuple):
|
||||
data = data[1]
|
||||
else:
|
||||
data = self.server.rpop(self.key)
|
||||
if data:
|
||||
return self._decode_request(data)
|
||||
|
||||
|
||||
class PriorityQueue(Base):
|
||||
"""Per-spider priority queue abstraction using redis' sorted set"""
|
||||
|
||||
def __len__(self):
|
||||
"""Return the length of the queue"""
|
||||
return self.server.zcard(self.key)
|
||||
|
||||
def push(self, request):
|
||||
"""Push a request"""
|
||||
data = self._encode_request(request)
|
||||
score = -request.priority
|
||||
# We don't use zadd method as the order of arguments change depending on
|
||||
# whether the class is Redis or StrictRedis, and the option of using
|
||||
# kwargs only accepts strings, not bytes.
|
||||
self.server.execute_command('ZADD', self.key, score, data)
|
||||
|
||||
def pop(self, timeout=0):
|
||||
"""
|
||||
Pop a request
|
||||
timeout not support in this queue class
|
||||
"""
|
||||
# use atomic range/remove using multi/exec
|
||||
pipe = self.server.pipeline()
|
||||
pipe.multi()
|
||||
pipe.zrange(self.key, 0, 0).zremrangebyrank(self.key, 0, 0)
|
||||
results, count = pipe.execute()
|
||||
if results:
|
||||
return self._decode_request(results[0])
|
||||
|
||||
|
||||
class LifoQueue(Base):
|
||||
"""Per-spider LIFO queue."""
|
||||
|
||||
def __len__(self):
|
||||
"""Return the length of the stack"""
|
||||
return self.server.llen(self.key)
|
||||
|
||||
def push(self, request):
|
||||
"""Push a request"""
|
||||
self.server.lpush(self.key, self._encode_request(request))
|
||||
|
||||
def pop(self, timeout=0):
|
||||
"""Pop a request"""
|
||||
if timeout > 0:
|
||||
data = self.server.blpop(self.key, timeout)
|
||||
if isinstance(data, tuple):
|
||||
data = data[1]
|
||||
else:
|
||||
data = self.server.lpop(self.key)
|
||||
|
||||
if data:
|
||||
return self._decode_request(data)
|
||||
|
||||
|
||||
# TODO: Deprecate the use of these names.
|
||||
SpiderQueue = FifoQueue
|
||||
SpiderStack = LifoQueue
|
||||
SpiderPriorityQueue = PriorityQueue
|
||||
@ -1,170 +0,0 @@
|
||||
import importlib
|
||||
import six
|
||||
|
||||
from scrapy.utils.misc import load_object
|
||||
|
||||
from . import connection, defaults
|
||||
|
||||
|
||||
# TODO: add SCRAPY_JOB support.
|
||||
class Scheduler(object):
|
||||
"""Redis-based scheduler
|
||||
|
||||
Settings
|
||||
--------
|
||||
SCHEDULER_PERSIST : bool (default: False)
|
||||
Whether to persist or clear redis queue.
|
||||
SCHEDULER_FLUSH_ON_START : bool (default: False)
|
||||
Whether to flush redis queue on start.
|
||||
SCHEDULER_IDLE_BEFORE_CLOSE : int (default: 0)
|
||||
How many seconds to wait before closing if no message is received.
|
||||
SCHEDULER_QUEUE_KEY : str
|
||||
Scheduler redis key.
|
||||
SCHEDULER_QUEUE_CLASS : str
|
||||
Scheduler queue class.
|
||||
SCHEDULER_DUPEFILTER_KEY : str
|
||||
Scheduler dupefilter redis key.
|
||||
SCHEDULER_DUPEFILTER_CLASS : str
|
||||
Scheduler dupefilter class.
|
||||
SCHEDULER_SERIALIZER : str
|
||||
Scheduler serializer.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, server,
|
||||
persist=False,
|
||||
flush_on_start=False,
|
||||
queue_key=defaults.SCHEDULER_QUEUE_KEY,
|
||||
queue_cls=defaults.SCHEDULER_QUEUE_CLASS,
|
||||
dupefilter_key=defaults.SCHEDULER_DUPEFILTER_KEY,
|
||||
dupefilter_cls=defaults.SCHEDULER_DUPEFILTER_CLASS,
|
||||
idle_before_close=0,
|
||||
serializer=None):
|
||||
"""Initialize scheduler.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
server : Redis
|
||||
The redis server instance.
|
||||
persist : bool
|
||||
Whether to flush requests when closing. Default is False.
|
||||
flush_on_start : bool
|
||||
Whether to flush requests on start. Default is False.
|
||||
queue_key : str
|
||||
Requests queue key.
|
||||
queue_cls : str
|
||||
Importable path to the queue class.
|
||||
dupefilter_key : str
|
||||
Duplicates filter key.
|
||||
dupefilter_cls : str
|
||||
Importable path to the dupefilter class.
|
||||
idle_before_close : int
|
||||
Timeout before giving up.
|
||||
|
||||
"""
|
||||
if idle_before_close < 0:
|
||||
raise TypeError("idle_before_close cannot be negative")
|
||||
|
||||
self.server = server
|
||||
self.persist = persist
|
||||
self.flush_on_start = flush_on_start
|
||||
self.queue_key = queue_key
|
||||
self.queue_cls = queue_cls
|
||||
self.dupefilter_cls = dupefilter_cls
|
||||
self.dupefilter_key = dupefilter_key
|
||||
self.idle_before_close = idle_before_close
|
||||
self.serializer = serializer
|
||||
self.stats = None
|
||||
|
||||
def __len__(self):
|
||||
return len(self.queue)
|
||||
|
||||
@classmethod
|
||||
def from_settings(cls, settings):
|
||||
kwargs = {
|
||||
'persist': settings.getbool('SCHEDULER_PERSIST'),
|
||||
'flush_on_start': settings.getbool('SCHEDULER_FLUSH_ON_START'),
|
||||
'idle_before_close': settings.getint('SCHEDULER_IDLE_BEFORE_CLOSE'),
|
||||
}
|
||||
|
||||
# If these values are missing, it means we want to use the defaults.
|
||||
optional = {
|
||||
# TODO: Use custom prefixes for this settings to note that are
|
||||
# specific to scrapy-redis.
|
||||
'queue_key': 'SCHEDULER_QUEUE_KEY',
|
||||
'queue_cls': 'SCHEDULER_QUEUE_CLASS',
|
||||
'dupefilter_key': 'SCHEDULER_DUPEFILTER_KEY',
|
||||
# We use the default setting name to keep compatibility.
|
||||
'dupefilter_cls': 'DUPEFILTER_CLASS',
|
||||
'serializer': 'SCHEDULER_SERIALIZER',
|
||||
}
|
||||
for name, setting_name in optional.items():
|
||||
val = settings.get(setting_name)
|
||||
if val:
|
||||
kwargs[name] = val
|
||||
|
||||
# Support serializer as a path to a module.
|
||||
if isinstance(kwargs.get('serializer'), six.string_types):
|
||||
kwargs['serializer'] = importlib.import_module(kwargs['serializer'])
|
||||
|
||||
server = connection.from_settings(settings)
|
||||
# Ensure the connection is working.
|
||||
server.ping()
|
||||
|
||||
return cls(server=server, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def from_crawler(cls, crawler):
|
||||
instance = cls.from_settings(crawler.settings)
|
||||
# FIXME: for now, stats are only supported from this constructor
|
||||
instance.stats = crawler.stats
|
||||
return instance
|
||||
|
||||
def open(self, spider):
|
||||
self.spider = spider
|
||||
|
||||
try:
|
||||
self.queue = load_object(self.queue_cls)(
|
||||
server=self.server,
|
||||
spider=spider,
|
||||
key=self.queue_key % {'spider': spider.name},
|
||||
serializer=self.serializer,
|
||||
)
|
||||
except TypeError as e:
|
||||
raise ValueError("Failed to instantiate queue class '%s': %s",
|
||||
self.queue_cls, e)
|
||||
|
||||
self.df = load_object(self.dupefilter_cls).from_spider(spider)
|
||||
|
||||
if self.flush_on_start:
|
||||
self.flush()
|
||||
# notice if there are requests already in the queue to resume the crawl
|
||||
if len(self.queue):
|
||||
spider.log("Resuming crawl (%d requests scheduled)" % len(self.queue))
|
||||
|
||||
def close(self, reason):
|
||||
if not self.persist:
|
||||
self.flush()
|
||||
|
||||
def flush(self):
|
||||
self.df.clear()
|
||||
self.queue.clear()
|
||||
|
||||
def enqueue_request(self, request):
|
||||
if not request.dont_filter and self.df.request_seen(request):
|
||||
self.df.log(request, self.spider)
|
||||
return False
|
||||
if self.stats:
|
||||
self.stats.inc_value('scheduler/enqueued/redis', spider=self.spider)
|
||||
self.queue.push(request)
|
||||
return True
|
||||
|
||||
def next_request(self):
|
||||
block_pop_timeout = self.idle_before_close
|
||||
request = self.queue.pop(block_pop_timeout)
|
||||
if request and self.stats:
|
||||
self.stats.inc_value('scheduler/dequeued/redis', spider=self.spider)
|
||||
return request
|
||||
|
||||
def has_pending_requests(self):
|
||||
return len(self) > 0
|
||||
@ -1,187 +0,0 @@
|
||||
from scrapy import signals
|
||||
from scrapy.exceptions import DontCloseSpider
|
||||
from scrapy.spiders import Spider, CrawlSpider
|
||||
|
||||
from . import connection, defaults
|
||||
from .utils import bytes_to_str
|
||||
|
||||
|
||||
class RedisMixin(object):
|
||||
"""Mixin class to implement reading urls from a redis queue."""
|
||||
redis_key = None
|
||||
redis_batch_size = None
|
||||
redis_encoding = None
|
||||
|
||||
# Redis client placeholder.
|
||||
server = None
|
||||
|
||||
def start_requests(self):
|
||||
"""Returns a batch of start requests from redis."""
|
||||
return self.next_requests()
|
||||
|
||||
def setup_redis(self, crawler=None):
|
||||
"""Setup redis connection and idle signal.
|
||||
|
||||
This should be called after the spider has set its crawler object.
|
||||
"""
|
||||
if self.server is not None:
|
||||
return
|
||||
|
||||
if crawler is None:
|
||||
# We allow optional crawler argument to keep backwards
|
||||
# compatibility.
|
||||
# XXX: Raise a deprecation warning.
|
||||
crawler = getattr(self, 'crawler', None)
|
||||
|
||||
if crawler is None:
|
||||
raise ValueError("crawler is required")
|
||||
|
||||
settings = crawler.settings
|
||||
|
||||
if self.redis_key is None:
|
||||
self.redis_key = settings.get(
|
||||
'REDIS_START_URLS_KEY', defaults.START_URLS_KEY,
|
||||
)
|
||||
|
||||
self.redis_key = self.redis_key % {'name': self.name}
|
||||
|
||||
if not self.redis_key.strip():
|
||||
raise ValueError("redis_key must not be empty")
|
||||
|
||||
if self.redis_batch_size is None:
|
||||
# TODO: Deprecate this setting (REDIS_START_URLS_BATCH_SIZE).
|
||||
self.redis_batch_size = settings.getint(
|
||||
'REDIS_START_URLS_BATCH_SIZE',
|
||||
settings.getint('CONCURRENT_REQUESTS'),
|
||||
)
|
||||
|
||||
try:
|
||||
self.redis_batch_size = int(self.redis_batch_size)
|
||||
except (TypeError, ValueError):
|
||||
raise ValueError("redis_batch_size must be an integer")
|
||||
|
||||
if self.redis_encoding is None:
|
||||
self.redis_encoding = settings.get('REDIS_ENCODING', defaults.REDIS_ENCODING)
|
||||
|
||||
self.logger.info("Reading start URLs from redis key '%(redis_key)s' "
|
||||
"(batch size: %(redis_batch_size)s, encoding: %(redis_encoding)s",
|
||||
self.__dict__)
|
||||
|
||||
self.server = connection.from_settings(crawler.settings)
|
||||
# The idle signal is called when the spider has no requests left,
|
||||
# that's when we will schedule new requests from redis queue
|
||||
crawler.signals.connect(self.spider_idle, signal=signals.spider_idle)
|
||||
|
||||
def next_requests(self):
|
||||
"""Returns a request to be scheduled or none."""
|
||||
use_set = self.settings.getbool('REDIS_START_URLS_AS_SET', defaults.START_URLS_AS_SET)
|
||||
fetch_one = self.server.spop if use_set else self.server.lpop
|
||||
# XXX: Do we need to use a timeout here?
|
||||
found = 0
|
||||
# TODO: Use redis pipeline execution.
|
||||
while found < self.redis_batch_size:
|
||||
data = fetch_one(self.redis_key)
|
||||
if not data:
|
||||
# Queue empty.
|
||||
break
|
||||
req = self.make_request_from_data(data)
|
||||
if req:
|
||||
yield req
|
||||
found += 1
|
||||
else:
|
||||
self.logger.debug("Request not made from data: %r", data)
|
||||
|
||||
if found:
|
||||
self.logger.debug("Read %s requests from '%s'", found, self.redis_key)
|
||||
|
||||
def make_request_from_data(self, data):
|
||||
"""Returns a Request instance from data coming from Redis.
|
||||
|
||||
By default, ``data`` is an encoded URL. You can override this method to
|
||||
provide your own message decoding.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
data : bytes
|
||||
Message from redis.
|
||||
|
||||
"""
|
||||
url = bytes_to_str(data, self.redis_encoding)
|
||||
return self.make_requests_from_url(url)
|
||||
|
||||
def schedule_next_requests(self):
|
||||
"""Schedules a request if available"""
|
||||
# TODO: While there is capacity, schedule a batch of redis requests.
|
||||
for req in self.next_requests():
|
||||
self.crawler.engine.crawl(req, spider=self)
|
||||
|
||||
def spider_idle(self):
|
||||
"""Schedules a request if available, otherwise waits."""
|
||||
# XXX: Handle a sentinel to close the spider.
|
||||
self.schedule_next_requests()
|
||||
raise DontCloseSpider
|
||||
|
||||
|
||||
class RedisSpider(RedisMixin, Spider):
|
||||
"""Spider that reads urls from redis queue when idle.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
redis_key : str (default: REDIS_START_URLS_KEY)
|
||||
Redis key where to fetch start URLs from..
|
||||
redis_batch_size : int (default: CONCURRENT_REQUESTS)
|
||||
Number of messages to fetch from redis on each attempt.
|
||||
redis_encoding : str (default: REDIS_ENCODING)
|
||||
Encoding to use when decoding messages from redis queue.
|
||||
|
||||
Settings
|
||||
--------
|
||||
REDIS_START_URLS_KEY : str (default: "<spider.name>:start_urls")
|
||||
Default Redis key where to fetch start URLs from..
|
||||
REDIS_START_URLS_BATCH_SIZE : int (deprecated by CONCURRENT_REQUESTS)
|
||||
Default number of messages to fetch from redis on each attempt.
|
||||
REDIS_START_URLS_AS_SET : bool (default: False)
|
||||
Use SET operations to retrieve messages from the redis queue. If False,
|
||||
the messages are retrieve using the LPOP command.
|
||||
REDIS_ENCODING : str (default: "utf-8")
|
||||
Default encoding to use when decoding messages from redis queue.
|
||||
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def from_crawler(self, crawler, *args, **kwargs):
|
||||
obj = super(RedisSpider, self).from_crawler(crawler, *args, **kwargs)
|
||||
obj.setup_redis(crawler)
|
||||
return obj
|
||||
|
||||
|
||||
class RedisCrawlSpider(RedisMixin, CrawlSpider):
|
||||
"""Spider that reads urls from redis queue when idle.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
redis_key : str (default: REDIS_START_URLS_KEY)
|
||||
Redis key where to fetch start URLs from..
|
||||
redis_batch_size : int (default: CONCURRENT_REQUESTS)
|
||||
Number of messages to fetch from redis on each attempt.
|
||||
redis_encoding : str (default: REDIS_ENCODING)
|
||||
Encoding to use when decoding messages from redis queue.
|
||||
|
||||
Settings
|
||||
--------
|
||||
REDIS_START_URLS_KEY : str (default: "<spider.name>:start_urls")
|
||||
Default Redis key where to fetch start URLs from..
|
||||
REDIS_START_URLS_BATCH_SIZE : int (deprecated by CONCURRENT_REQUESTS)
|
||||
Default number of messages to fetch from redis on each attempt.
|
||||
REDIS_START_URLS_AS_SET : bool (default: True)
|
||||
Use SET operations to retrieve messages from the redis queue.
|
||||
REDIS_ENCODING : str (default: "utf-8")
|
||||
Default encoding to use when decoding messages from redis queue.
|
||||
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def from_crawler(self, crawler, *args, **kwargs):
|
||||
obj = super(RedisCrawlSpider, self).from_crawler(crawler, *args, **kwargs)
|
||||
obj.setup_redis(crawler)
|
||||
return obj
|
||||
@ -1,8 +0,0 @@
|
||||
import six
|
||||
|
||||
|
||||
def bytes_to_str(s, encoding='utf-8'):
|
||||
"""Returns a str if a bytes object is given."""
|
||||
if six.PY3 and isinstance(s, bytes):
|
||||
return s.decode(encoding)
|
||||
return s
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,77 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import scrapy
|
||||
from datetime import datetime
|
||||
from settings import SQL_DATETIME_FORMAT
|
||||
|
||||
|
||||
class ZhihuQuestionItem(scrapy.Item):
|
||||
'''
|
||||
zhihu's question item design
|
||||
'''
|
||||
question_id = scrapy.Field()
|
||||
topics = scrapy.Field()
|
||||
question_url = scrapy.Field()
|
||||
title = scrapy.Field()
|
||||
content = scrapy.Field()
|
||||
create_time = scrapy.Field()
|
||||
update_time = scrapy.Field()
|
||||
answer_nums = scrapy.Field()
|
||||
comment_nums = scrapy.Field()
|
||||
watch_user_nums = scrapy.Field()
|
||||
click_nums = scrapy.Field()
|
||||
crawl_time = scrapy.Field()
|
||||
crawl_update_time = scrapy.Field()
|
||||
|
||||
def get_insert_sql(self):
|
||||
'''
|
||||
get insert_sql and parameters of question
|
||||
'''
|
||||
insert_sql = "insert into question(question_id, topics, question_url, title, content, answer_nums, " \
|
||||
"comment_nums, watch_user_nums, click_nums, crawl_time)VALUES (%s, %s, %s, %s, %s, %s, %s, %s, " \
|
||||
"%s, %s)ON DUPLICATE KEY UPDATE content=VALUES(content), answer_nums=VALUES(" \
|
||||
"answer_nums),comment_nums=VALUES(comment_nums), watch_user_nums=VALUES" \
|
||||
"(watch_user_nums),click_nums=VALUES(click_nums)"
|
||||
|
||||
parameters = (
|
||||
self['question_id'], self['topics'], self['question_url'],
|
||||
self['title'], self['content'], self['answer_nums'],
|
||||
self['comment_nums'], self['watch_user_nums'],
|
||||
self['click_nums'], self['crawl_time']
|
||||
)
|
||||
return insert_sql, parameters
|
||||
|
||||
|
||||
class ZhihuAnswerItem(scrapy.Item):
|
||||
'''
|
||||
zhihu's answer item design
|
||||
'''
|
||||
answer_id = scrapy.Field()
|
||||
question_id = scrapy.Field()
|
||||
answer_url = scrapy.Field()
|
||||
author_id = scrapy.Field()
|
||||
content = scrapy.Field()
|
||||
praise_nums = scrapy.Field()
|
||||
comment_nums = scrapy.Field()
|
||||
create_time = scrapy.Field()
|
||||
update_time = scrapy.Field()
|
||||
crawl_time = scrapy.Field()
|
||||
crawl_update_time = scrapy.Field()
|
||||
|
||||
def get_insert_sql(self):
|
||||
'''
|
||||
get insert_sql and parameters of answer
|
||||
'''
|
||||
insert_sql = "insert into answer(answer_id, question_id, answer_url, author_id, content, praise_nums, " \
|
||||
"comment_nums, create_time, update_time, crawl_time) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, " \
|
||||
"%s, %s)ON DUPLICATE KEY UPDATE content=VALUES(content), praise_nums=VALUES(" \
|
||||
"praise_nums), comment_nums=VALUES(comment_nums), update_time=VALUES(update_time)"
|
||||
|
||||
create_time = datetime.fromtimestamp(self['create_time']).strftime(SQL_DATETIME_FORMAT)
|
||||
update_time = datetime.fromtimestamp(self['update_time']).strftime(SQL_DATETIME_FORMAT)
|
||||
|
||||
parameters = (
|
||||
self['answer_id'], self['question_id'], self['answer_url'],
|
||||
self['author_id'], self['content'], self['praise_nums'],
|
||||
self['comment_nums'], create_time, update_time, self['crawl_time']
|
||||
)
|
||||
return insert_sql, parameters
|
||||
@ -1,48 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from pymysql.cursors import DictCursor
|
||||
from twisted.enterprise import adbapi
|
||||
|
||||
|
||||
class MySQLTwistedPipeline(object):
|
||||
'''
|
||||
将MySQL插入操作变成异步化
|
||||
'''
|
||||
def __init__(self, db_pool):
|
||||
self.db_pool = db_pool
|
||||
|
||||
@classmethod
|
||||
def from_settings(cls, settings):
|
||||
'''
|
||||
create db_pool
|
||||
'''
|
||||
db_parameters = dict(
|
||||
host=settings["MYSQL_HOST"],
|
||||
db=settings["MYSQL_DBNAME"],
|
||||
user=settings["MYSQL_USER"],
|
||||
passwd=settings["MYSQL_PASSWORD"],
|
||||
charset="utf8",
|
||||
cursorclass=DictCursor,
|
||||
use_unicode=True
|
||||
)
|
||||
db_pool = adbapi.ConnectionPool("pymysql", **db_parameters)
|
||||
return cls(db_pool)
|
||||
|
||||
def process_item(self, item, spider):
|
||||
'''
|
||||
process item
|
||||
'''
|
||||
query = self.db_pool.runInteraction(self.do_insert, item)
|
||||
query.addErrback(self.handle_error, item, spider)
|
||||
|
||||
def handle_error(self, failure, item, spider):
|
||||
'''
|
||||
handle error of insert to mysql
|
||||
'''
|
||||
print(failure)
|
||||
|
||||
def do_insert(self, cursor, item):
|
||||
'''
|
||||
insert data into the database
|
||||
'''
|
||||
insert_sql, parameters = item.get_insert_sql()
|
||||
cursor.execute(insert_sql, parameters)
|
||||
Binary file not shown.
@ -1,10 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from .connection import ( # NOQA
|
||||
get_redis,
|
||||
get_redis_from_settings,
|
||||
)
|
||||
|
||||
|
||||
__author__ = 'Rolando Espinoza'
|
||||
__email__ = 'rolando at rmax.io'
|
||||
__version__ = '0.7.0-dev'
|
||||
@ -1,146 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import sys
|
||||
|
||||
BOT_NAME = 'Zhihu'
|
||||
|
||||
SPIDER_MODULES = ['Zhihu.spiders']
|
||||
NEWSPIDER_MODULE = 'Zhihu.spiders'
|
||||
|
||||
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
|
||||
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
|
||||
# Crawl responsibly by identifying yourself (and your website) on the user-agent
|
||||
# USER_AGENT = 'Zhihu (+http://www.yourdomain.com)'
|
||||
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36'
|
||||
|
||||
# Obey robots.txt rules
|
||||
ROBOTSTXT_OBEY = False
|
||||
|
||||
# Configure maximum concurrent requests performed by Scrapy (default: 16)
|
||||
# CONCURRENT_REQUESTS = 32
|
||||
|
||||
# Configure a delay for requests for the same website (default: 0)
|
||||
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
|
||||
# See also autothrottle settings and docs
|
||||
DOWNLOAD_DELAY = 1
|
||||
# The download delay setting will honor only one of:
|
||||
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
|
||||
# CONCURRENT_REQUESTS_PER_IP = 16
|
||||
|
||||
# Disable cookies (enabled by default)
|
||||
COOKIES_ENABLED = True
|
||||
|
||||
# Disable Telnet Console (enabled by default)
|
||||
# TELNETCONSOLE_ENABLED = False
|
||||
|
||||
# Override the default request headers:
|
||||
# DEFAULT_REQUEST_HEADERS = {
|
||||
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
||||
# 'Accept-Language': 'en',
|
||||
# }
|
||||
|
||||
# Enable or disable spider middlewares
|
||||
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
|
||||
# SPIDER_MIDDLEWARES = {
|
||||
# 'Zhihu.middlewares.ZhihuSpiderMiddleware': 543,
|
||||
# }
|
||||
|
||||
# Enable or disable downloader middlewares
|
||||
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
|
||||
DOWNLOADER_MIDDLEWARES = {
|
||||
'Zhihu.middlewares.ZhihuDownloaderMiddleware': 543,
|
||||
'Zhihu.middlewares.RedirectDealDownloaderMiddleware': 3,
|
||||
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
|
||||
'Zhihu.middlewares.RandomUserAgentDownloaderMiddleware': 1,
|
||||
'Zhihu.middlewares.ProxyDownloaderMiddleware': 2,
|
||||
}
|
||||
|
||||
# Enable or disable extensions
|
||||
# See https://doc.scrapy.org/en/latest/topics/extensions.html
|
||||
# EXTENSIONS = {
|
||||
# 'scrapy.extensions.telnet.TelnetConsole': None,
|
||||
# }
|
||||
|
||||
# Configure item pipelines
|
||||
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
|
||||
ITEM_PIPELINES = {
|
||||
# 'Zhihu.pipelines.ZhihuPipeline': 300,
|
||||
'scrapy_redis.pipelines.RedisPipeline': 300,
|
||||
'Zhihu.items.ZhihuAnswerItem': 1,
|
||||
'Zhihu.items.ZhihuQuestionItem': 2,
|
||||
'Zhihu.pipelines.MySQLTwistedPipeline': 3,
|
||||
}
|
||||
|
||||
# Enable and configure the AutoThrottle extension (disabled by default)
|
||||
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
|
||||
# AUTOTHROTTLE_ENABLED = True
|
||||
# The initial download delay
|
||||
# AUTOTHROTTLE_START_DELAY = 5
|
||||
# The maximum download delay to be set in case of high latencies
|
||||
# AUTOTHROTTLE_MAX_DELAY = 60
|
||||
# The average number of requests Scrapy should be sending in parallel to
|
||||
# each remote server
|
||||
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
|
||||
# Enable showing throttling stats for every response received:
|
||||
# AUTOTHROTTLE_DEBUG = False
|
||||
|
||||
# Enable and configure HTTP caching (disabled by default)
|
||||
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
|
||||
# HTTPCACHE_ENABLED = True
|
||||
# HTTPCACHE_EXPIRATION_SECS = 0
|
||||
# HTTPCACHE_DIR = 'httpcache'
|
||||
# HTTPCACHE_IGNORE_HTTP_CODES = []
|
||||
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
||||
MYSQL_HOST = 'localhost'
|
||||
MYSQL_DBNAME = 'zhihu'
|
||||
MYSQL_USER = 'root'
|
||||
MYSQL_PASSWORD = 'root'
|
||||
|
||||
BASE_DIR = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
|
||||
sys.path.insert(0, os.path.join(BASE_DIR, 'Zhihu'))
|
||||
# print(BASE_DIR)
|
||||
|
||||
ZHIHU_ACCOUNT = 'username'
|
||||
ZHIHU_PASSWORD = 'password'
|
||||
|
||||
CHAOJIYING_ACCOUNT = 'username'
|
||||
CHAOJIYING_PASSWORD = 'password'
|
||||
CAPTCHA_TYPE = '898966'
|
||||
|
||||
SQL_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
|
||||
SQL_DATE_FORMAT = '%Y-%m-%d'
|
||||
|
||||
USER_AGENT_LIST = [
|
||||
# "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60",
|
||||
"Opera/8.0 (Windows NT 5.1; U; en)",
|
||||
"Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50",
|
||||
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 9.50",
|
||||
# Firefox
|
||||
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0",
|
||||
"Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10",
|
||||
# Safari
|
||||
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2",
|
||||
# chrome
|
||||
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36",
|
||||
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
|
||||
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16",
|
||||
# 360
|
||||
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36",
|
||||
"Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
|
||||
# 淘宝浏览器
|
||||
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11",
|
||||
# 猎豹浏览器
|
||||
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
|
||||
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)",
|
||||
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)",
|
||||
# QQ浏览器
|
||||
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)",
|
||||
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
|
||||
# sogou浏览器
|
||||
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 SE 2.X MetaSr 1.0",
|
||||
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; SE 2.X MetaSr 1.0)",
|
||||
# maxthon浏览器
|
||||
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Maxthon/4.4.3.4000 Chrome/30.0.1599.101 Safari/537.36",
|
||||
# UC浏览器
|
||||
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 UBrowser/4.0.3214.0 Safari/537.36",
|
||||
]
|
||||
@ -0,0 +1 @@
|
||||
undefined
|
||||
Loading…
Reference in new issue