scrapy-redis 配置 settings
From:https://blog.csdn.net/weixin_37947156/article/details/75082061
小白進階之Scrapy第三篇(基于Scrapy-Redis的分布式以及cookies池):https://cuiqingcai.com/4048.html
開始之前我們得知道scrapy-redis的一些配置:PS 這些配置是寫在Scrapy項目的settings.py中的!
Scrapy 所有默認設(shè)置
scrapy/settings/default_settings.py
""" This module contains the default values for all settings used by Scrapy.For more information about these settings you can read the settings documentation in docs/topics/settings.rstScrapy developers, if you add a setting here remember to:* add it in alphabetical order * group similar settings without leaving blank lines * add its documentation to the available settings documentation(docs/topics/settings.rst)"""import sys from importlib import import_module from os.path import join, abspath, dirnameimport sixAJAXCRAWL_ENABLED = FalseAUTOTHROTTLE_ENABLED = False AUTOTHROTTLE_DEBUG = False AUTOTHROTTLE_MAX_DELAY = 60.0 AUTOTHROTTLE_START_DELAY = 5.0 AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0BOT_NAME = 'scrapybot'CLOSESPIDER_TIMEOUT = 0 CLOSESPIDER_PAGECOUNT = 0 CLOSESPIDER_ITEMCOUNT = 0 CLOSESPIDER_ERRORCOUNT = 0COMMANDS_MODULE = ''COMPRESSION_ENABLED = TrueCONCURRENT_ITEMS = 100CONCURRENT_REQUESTS = 16 CONCURRENT_REQUESTS_PER_DOMAIN = 8 CONCURRENT_REQUESTS_PER_IP = 0COOKIES_ENABLED = True COOKIES_DEBUG = FalseDEFAULT_ITEM_CLASS = 'scrapy.item.Item'DEFAULT_REQUEST_HEADERS = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8','Accept-Language': 'en', }DEPTH_LIMIT = 0 DEPTH_STATS_VERBOSE = False DEPTH_PRIORITY = 0DNSCACHE_ENABLED = True DNSCACHE_SIZE = 10000 DNS_TIMEOUT = 60DOWNLOAD_DELAY = 0# 用戶可自定義的下載處理器 DOWNLOAD_HANDLERS = {} # 默認的下載處理器 DOWNLOAD_HANDLERS_BASE = {'data': 'scrapy.core.downloader.handlers.datauri.DataURIDownloadHandler','file': 'scrapy.core.downloader.handlers.file.FileDownloadHandler','http': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler','https': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler','s3': 'scrapy.core.downloader.handlers.s3.S3DownloadHandler','ftp': 'scrapy.core.downloader.handlers.ftp.FTPDownloadHandler', }DOWNLOAD_TIMEOUT = 180 # 3minsDOWNLOAD_MAXSIZE = 1024*1024*1024 # 1024m DOWNLOAD_WARNSIZE = 32*1024*1024 # 32mDOWNLOAD_FAIL_ON_DATALOSS = TrueDOWNLOADER = 'scrapy.core.downloader.Downloader'DOWNLOADER_HTTPCLIENTFACTORY = 'scrapy.core.downloader.webclient.ScrapyHTTPClientFactory' DOWNLOADER_CLIENTCONTEXTFACTORY = 'scrapy.core.downloader.contextfactory.ScrapyClientContextFactory' DOWNLOADER_CLIENT_TLS_METHOD = 'TLS' # Use highest TLS/SSL protocol version supported by the platform,# also allowing negotiationDOWNLOADER_MIDDLEWARES = {}DOWNLOADER_MIDDLEWARES_BASE = {# Engine side'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': 100,'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware': 300,'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware': 350,'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': 400,'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 500,'scrapy.downloadermiddlewares.retry.RetryMiddleware': 550,'scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware': 560,'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware': 580,'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 590,'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': 600,'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 700,'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 750,'scrapy.downloadermiddlewares.stats.DownloaderStats': 850,'scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware': 900,# Downloader side }DOWNLOADER_STATS = TrueDUPEFILTER_CLASS = 'scrapy.dupefilters.RFPDupeFilter'EDITOR = 'vi' if sys.platform == 'win32':EDITOR = '%s -m idlelib.idle'EXTENSIONS = {}EXTENSIONS_BASE = {'scrapy.extensions.corestats.CoreStats': 0,'scrapy.extensions.telnet.TelnetConsole': 0,'scrapy.extensions.memusage.MemoryUsage': 0,'scrapy.extensions.memdebug.MemoryDebugger': 0,'scrapy.extensions.closespider.CloseSpider': 0,'scrapy.extensions.feedexport.FeedExporter': 0,'scrapy.extensions.logstats.LogStats': 0,'scrapy.extensions.spiderstate.SpiderState': 0,'scrapy.extensions.throttle.AutoThrottle': 0, }FEED_TEMPDIR = None FEED_URI = None FEED_URI_PARAMS = None # a function to extend uri arguments FEED_FORMAT = 'jsonlines' FEED_STORE_EMPTY = False FEED_EXPORT_ENCODING = None FEED_EXPORT_FIELDS = None FEED_STORAGES = {} FEED_STORAGES_BASE = {'': 'scrapy.extensions.feedexport.FileFeedStorage','file': 'scrapy.extensions.feedexport.FileFeedStorage','stdout': 'scrapy.extensions.feedexport.StdoutFeedStorage','s3': 'scrapy.extensions.feedexport.S3FeedStorage','ftp': 'scrapy.extensions.feedexport.FTPFeedStorage', } FEED_EXPORTERS = {} FEED_EXPORTERS_BASE = {'json': 'scrapy.exporters.JsonItemExporter','jsonlines': 'scrapy.exporters.JsonLinesItemExporter','jl': 'scrapy.exporters.JsonLinesItemExporter','csv': 'scrapy.exporters.CsvItemExporter','xml': 'scrapy.exporters.XmlItemExporter','marshal': 'scrapy.exporters.MarshalItemExporter','pickle': 'scrapy.exporters.PickleItemExporter', } FEED_EXPORT_INDENT = 0FILES_STORE_S3_ACL = 'private' FILES_STORE_GCS_ACL = ''FTP_USER = 'anonymous' FTP_PASSWORD = 'guest' FTP_PASSIVE_MODE = TrueHTTPCACHE_ENABLED = False HTTPCACHE_DIR = 'httpcache' HTTPCACHE_IGNORE_MISSING = False HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' HTTPCACHE_EXPIRATION_SECS = 0 HTTPCACHE_ALWAYS_STORE = False HTTPCACHE_IGNORE_HTTP_CODES = [] HTTPCACHE_IGNORE_SCHEMES = ['file'] HTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS = [] HTTPCACHE_DBM_MODULE = 'anydbm' if six.PY2 else 'dbm' HTTPCACHE_POLICY = 'scrapy.extensions.httpcache.DummyPolicy' HTTPCACHE_GZIP = FalseHTTPPROXY_ENABLED = True HTTPPROXY_AUTH_ENCODING = 'latin-1'IMAGES_STORE_S3_ACL = 'private' IMAGES_STORE_GCS_ACL = ''ITEM_PROCESSOR = 'scrapy.pipelines.ItemPipelineManager'ITEM_PIPELINES = {} ITEM_PIPELINES_BASE = {}LOG_ENABLED = True LOG_ENCODING = 'utf-8' LOG_FORMATTER = 'scrapy.logformatter.LogFormatter' LOG_FORMAT = '%(asctime)s [%(name)s] %(levelname)s: %(message)s' LOG_DATEFORMAT = '%Y-%m-%d %H:%M:%S' LOG_STDOUT = False LOG_LEVEL = 'DEBUG' LOG_FILE = None LOG_SHORT_NAMES = FalseSCHEDULER_DEBUG = FalseLOGSTATS_INTERVAL = 60.0MAIL_HOST = 'localhost' MAIL_PORT = 25 MAIL_FROM = 'scrapy@localhost' MAIL_PASS = None MAIL_USER = NoneMEMDEBUG_ENABLED = False # enable memory debugging MEMDEBUG_NOTIFY = [] # send memory debugging report by mail at engine shutdownMEMUSAGE_CHECK_INTERVAL_SECONDS = 60.0 MEMUSAGE_ENABLED = True MEMUSAGE_LIMIT_MB = 0 MEMUSAGE_NOTIFY_MAIL = [] MEMUSAGE_WARNING_MB = 0METAREFRESH_ENABLED = True METAREFRESH_MAXDELAY = 100NEWSPIDER_MODULE = ''RANDOMIZE_DOWNLOAD_DELAY = TrueREACTOR_THREADPOOL_MAXSIZE = 10REDIRECT_ENABLED = True REDIRECT_MAX_TIMES = 20 # uses Firefox default setting REDIRECT_PRIORITY_ADJUST = +2REFERER_ENABLED = True REFERRER_POLICY = 'scrapy.spidermiddlewares.referer.DefaultReferrerPolicy'RETRY_ENABLED = True RETRY_TIMES = 2 # initial response + 2 retries = 3 requests RETRY_HTTP_CODES = [500, 502, 503, 504, 522, 524, 408] RETRY_PRIORITY_ADJUST = -1ROBOTSTXT_OBEY = FalseSCHEDULER = 'scrapy.core.scheduler.Scheduler'# 基于磁盤的任務(wù)隊列(后進先出) SCHEDULER_DISK_QUEUE = 'scrapy.squeues.PickleLifoDiskQueue'# 基于內(nèi)存的任務(wù)隊列(后進先出) SCHEDULER_MEMORY_QUEUE = 'scrapy.squeues.LifoMemoryQueue'# 優(yōu)先級隊列 SCHEDULER_PRIORITY_QUEUE = 'queuelib.PriorityQueue'SPIDER_LOADER_CLASS = 'scrapy.spiderloader.SpiderLoader' SPIDER_LOADER_WARN_ONLY = FalseSPIDER_MIDDLEWARES = {}SPIDER_MIDDLEWARES_BASE = {# Engine side'scrapy.spidermiddlewares.httperror.HttpErrorMiddleware': 50,'scrapy.spidermiddlewares.offsite.OffsiteMiddleware': 500,'scrapy.spidermiddlewares.referer.RefererMiddleware': 700,'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware': 800,'scrapy.spidermiddlewares.depth.DepthMiddleware': 900,# Spider side }SPIDER_MODULES = []STATS_CLASS = 'scrapy.statscollectors.MemoryStatsCollector' STATS_DUMP = TrueSTATSMAILER_RCPTS = []TEMPLATES_DIR = abspath(join(dirname(__file__), '..', 'templates'))URLLENGTH_LIMIT = 2083USER_AGENT = 'Scrapy/%s (+https://scrapy.org)' % import_module('scrapy').__version__TELNETCONSOLE_ENABLED = 1 TELNETCONSOLE_PORT = [6023, 6073] TELNETCONSOLE_HOST = '127.0.0.1' TELNETCONSOLE_USERNAME = 'scrapy' TELNETCONSOLE_PASSWORD = NoneSPIDER_CONTRACTS = {} SPIDER_CONTRACTS_BASE = {'scrapy.contracts.default.UrlContract': 1,'scrapy.contracts.default.ReturnsContract': 2,'scrapy.contracts.default.ScrapesContract': 3, }Scrapy-redis 的一些默認配置
scrapy-redis/defaults.py
import redis#所有的爬蟲通過Redis去重所用到的 key DUPEFILTER_KEY = 'dupefilter:%(timestamp)s'# pipeline 數(shù)據(jù)存到 redis 所使用的的 key PIPELINE_KEY = '%(spider)s:items'# 配置 所使用的 redis REDIS_CLS = redis.StrictRedis# redis 編碼 REDIS_ENCODING = 'utf-8'# redis 連接參數(shù) REDIS_PARAMS = {'socket_timeout': 30,'socket_connect_timeout': 30,'retry_on_timeout': True,'encoding': REDIS_ENCODING, }# 調(diào)度器中 request 存放到 redis 中 所使用的 key SCHEDULER_QUEUE_KEY = '%(spider)s:requests'# 使用優(yōu)先級調(diào)度請求隊列 (默認使用) SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.PriorityQueue'# 去重規(guī)則,在redis中去重時 所用到的 key SCHEDULER_DUPEFILTER_KEY = '%(spider)s:dupefilter' # 去重規(guī)則對應(yīng)處理的類 # 優(yōu)先使用DUPEFILTER_CLASS,如果有SCHEDULER_DUPEFILTER_CLASS則使用這個 SCHEDULER_DUPEFILTER_CLASS = 'scrapy_redis.dupefilter.RFPDupeFilter'START_URLS_KEY = '%(name)s:start_urls' START_URLS_AS_SET = FalseScrapy-redis 用到的一些配置
# 啟用 Scrapy-Redis 調(diào)度存儲請求隊列 SCHEDULER = "scrapy_redis.scheduler.Scheduler"# 去重規(guī)則對應(yīng)處理的類 DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"# 對保存到 redis 中的數(shù)據(jù)進行序列化,默認使用pickle # SCHEDULER_SERIALIZER = "scrapy_redis.picklecompat" # 不清除 Redis 隊列,即是否在關(guān)閉時候保留原來的調(diào)度器和去重記錄。 # True=保留,False=清空。這樣可以暫停/恢復(fù) 爬取 SCHEDULER_PERSIST = True# 是否在開始之前清空 調(diào)度器和去重記錄,True=清空,False=不清空 # SCHEDULER_FLUSH_ON_START = True DEPTH_PRIORITY = 1 # 廣度優(yōu)先 # DEPTH_PRIORITY = -1 # 深度優(yōu)先? #使用優(yōu)先級調(diào)度請求隊列 (默認使用) SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.PriorityQueue'#可選用的其它隊列 PriorityQueue(有序集合),FifoQueue(列表)、LifoQueue(列表) #SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.FifoQueue' # 廣度優(yōu)先 #SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.LifoQueue' # 深度優(yōu)先# 最大空閑時間防止分布式爬蟲因為等待而關(guān)閉 # 去調(diào)度器中獲取數(shù)據(jù)時,如果為空,最多等待時間(最后沒數(shù)據(jù),未獲取到)。 # SCHEDULER_IDLE_BEFORE_CLOSE = 10# 使用 scrapy-redis 的 pipeline 進行數(shù)據(jù)處理 ITEM_PIPELINES = {'scrapy_redis.pipelines.RedisPipeline': 300 }#序列化項目管道作為redis Key存儲 # REDIS_ITEMS_KEY = '%(spider)s:items'#默認使用 ScrapyJSONEncoder進行項目序列化 #You can use any importable path to a callable object. # REDIS_ITEMS_SERIALIZER = 'json.dumps'# 指定連接到redis時使用的端口和地址(可選) # REDIS_HOST = 'localhost' # REDIS_PORT = 6379# 指定用于連接 redis 的 URL(可選)。 # 如果設(shè)置此項,則此項優(yōu)先級高于設(shè)置的 REDIS_HOST 和 REDIS_PORT。 # 如果沒有 user 默認是 root 。 # 示例 REDIS_URL = "redis://root:12345678@192.168.0.100:6379" # REDIS_URL = 'redis://user:pass@hostname:9001'# 連接redis REDIS_HOST = '100.100.100.100' # 主機名 REDIS_PORT = 9999 # 端口 REDIS_PARAMS = {'password':'xxx'} # Redis連接參數(shù)。 REDIS_ENCODING = "utf-8" # redis編碼類型。默認:'utf-8' # 或者: REDIS_URL = 'redis://user:pass@hostname:9001' # 連接 URL(優(yōu)先于以上配置)# 自定義redis客戶端類 # REDIS_PARAMS['redis_cls'] = 'myproject.RedisClient'# 如果為True,則使用redis的'spop'進行操作。 # 如果需要避免起始網(wǎng)址列表出現(xiàn)重復(fù),這個選項非常有用。開啟此選項 urls 必須通過sadd添加,否則會出現(xiàn)類型錯誤。 # REDIS_START_URLS_AS_SET = False# RedisSpider 和 RedisCrawlSpider 默認 start_usls 鍵 # REDIS_START_URLS_KEY = '%(name)s:start_urls'請各位小伙伴兒自行挑選需要的配置寫到項目的settings.py文件中
英語渣靠Google、看不下去的小伙伴兒看這兒:http://scrapy-redis.readthedocs.io/en/stable/readme.html
繼續(xù)在我們上一篇博文中的爬蟲程序修改:
首先把我們需要的redis配置文件寫入settings.py中:
如果你的redis數(shù)據(jù)庫按照前一片博文配置過則需要以下至少三項
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
REDIS_URL = 'redis://root:密碼@主機IP:端口'
第三項請按照你的實際情況配置。
?
總結(jié)
以上是生活随笔為你收集整理的scrapy-redis 配置 settings的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: Python 装饰器 函数
- 下一篇: C 语言 函数调用栈