Scrapy-settings.py常规配置

# Scrapy settings for scrapy_demo project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

# 爬虫项目名,在你使用scrapy startproject <项目名> 时设置的。
BOT_NAME = "scrapy_demo"

# 爬虫文件模块所在的路径
SPIDER_MODULES = ["scrapy_demo.spiders"]
# 使用命令scrapy genspider 生成爬虫文件时存放文件的路径
NEWSPIDER_MODULE = "scrapy_demo.spiders"

# Crawl responsibly by identifying yourself (and your website) on the user-agent
# 可以在这里设置UA,也可以在默认的请求头配置DEFAULT_REQUEST_HEADERS中设置
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36 SE 2.X MetaSr 1.0'

# Obey robots.txt rules
# 遵守 robots.txt 规则,默认为True(遵守就不要爬了...)
# ROBOTSTXT_OBEY = True
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
# 配置 Scrapy 执行的最大并发请求数(针对整个scrapy)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# 为同一网站的请求配置延迟(默认值:0)
# 请参阅 https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# 另请参阅自动节流设置和文档
DOWNLOAD_DELAY = 5

# The download delay setting will honor only one of:
# 下载并发延迟设置(只接收下列两个中的其中一个)
#CONCURRENT_REQUESTS_PER_DOMAIN = 16   # 针对每个domain的并发最大请求数
#CONCURRENT_REQUESTS_PER_IP = 16   # 针对每个IP的并发最大请求数,如果和上面的同时开启,这个设置优先级比较大。

# Disable cookies (enabled by default)
# 是否启动Cookies,默认是启用的。有些网站并不需要使用cookies就能爬,关闭会增加效率。
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
# 覆盖默认的请求头
# (也可以在这里设置UA)
#DEFAULT_REQUEST_HEADERS = {
#    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
#    "Accept-Language": "en",
#}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    "scrapy_demo.middlewares.ScrapyDemoSpiderMiddleware": 543,
#}

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# 下载器中间件,可以在中间件中统一处理UA、代理IP,例如随机选择UA、代理IP
#DOWNLOADER_MIDDLEWARES = {
#    "scrapy_demo.middlewares.ScrapyDemoDownloaderMiddleware": 543,
#}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    "scrapy.extensions.telnet.TelnetConsole": None,
#}

# Configure item pipelines
# 配置pipelines,你写的pipelines必须在这里配置启动,就好像django的子应用需要注册一样
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   "scrapy_demo.pipelines.ScrapyDemoPipeline": 300,  # 这里的数值是定义优先级,数字越小,优先级越高。
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# 自动限流插件
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = "httpcache"
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"

# Set settings whose default value is deprecated to a future-proof value
REQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7"
TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"

# 定义FEED数据输出时默认的编码格式
FEED_EXPORT_ENCODING = "utf-8"

# 下载器的默认超时时间(默认为180秒)
# 这里设置的时每个爬虫的默认下载器超时时间,也可以使用request.meta["download_timeout"]来为每个请求设置下载器的超时时间
DOWNLOAD_TIMEOUT = 20


##### 自定义设置  ###
# IP地址池(一般IP地址池都是放在redis中维护,因为快呀!!)
IP_PROXY_POOL = (
   "127.0.0.1:6789",
   "127.0.0.1:6789",
   "127.0.0.1:6789",
   "127.0.0.1:6789",
)

posted @ 2023-07-17 11:35  蕝戀  阅读(22)  评论(0编辑  收藏  举报