爬虫-scrapy的中间件
scrapy的中间件
-
下载中间件
- 作用:
- 处于引擎和下载器之间,因此该中间件可以批量拦截整个工程中发起所有的请求和响应
- 拦截请求可进行的操作
- 进行代理IP
- request.meta['proxy'] = 'http://ip:port'
- 进行UA伪装
- request.headers['User-Agent'] = 'xxxx'
- 进行代理IP
- 拦截响应可进行的操作
- 篡改响应数据(一般不用)
- 更换响应对象
- 在scrapy中使用selenium
- 爬虫类中定义一个bro的属性(selenium实例化的一个浏览器对象)
- 爬虫类中重写父类的一个方法closed(self,spider),在该方法中关闭浏览器对象
- 在中间件中的process_response中通过spider参数获取爬虫类中的bro属性
- 在中间件中编写相关的浏览器自动化的操作获取页面源码数据
- 将页面源码数据作为新的响应对象的响应数据
- 将新的响应对象返回
- 作用:
-
基于crawlSpier的全站数据爬取
- crawlSpier和Spider之间的关联?
- crawlSpier是SPider的一个子类
- 创建一个基于CrawlSpider的爬虫文件
- scrapy genspider -t crawl xxx www.xxx.com
- crawlSpier和Spider之间的关联?
-
一个简单的IP端口测试
- 项目运行文件设置
import scrapy class MidSpider(scrapy.Spider): name = 'mid' # allowed_domains = ['www.xxx.com'] start_urls = ['http://www.baidu.com/s?wd=ip'] def parse(self, response): page_text = response.text with open('./ip.html','w',encoding='utf-8') as fp: fp.write(page_text)
- item中不需要任何操作
- pipelines文件代码
class MiddleproPipeline(object): def process_item(self, item, spider): return item
- middlewares文件中代码
rom scrapy import signals import random #可被选用的代理IP PROXY_http = [ '153.180.102.104:80', '195.208.131.189:56055', ] PROXY_https = [ '120.83.49.90:9000', '95.189.112.214:35508', ] #UA池的汇总 user_agent_list = [ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 " "(KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1", "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 " "(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 " "(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 " "(KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 " "(KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 " "(KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 " "(KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 " "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 " "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24" ] class MiddleproDownloaderMiddleware(object): #拦截正常请求 def process_request(self, request, spider): #UA伪装:UA池,每次都使用随机的UA request.headers['User-Agent'] = random.choice(user_agent_list) #使用伪装的代理IP访问 request.meta['proxy'] = 'http://39.137.77.66:8080' #拦截所有的响应 def process_response(self, request, response, spider): # Called with the response returned from the downloader. # Must either; # - return a Response object # - return a Request object # - or raise IgnoreRequest return response #拦截发生异常的请求 def process_exception(self, request, exception, spider): #代理ip的设定 if request.url.split(':')[0] == 'http': request.meta['proxy'] = random.choice(PROXY_http) else: request.meta['proxy'] = random.choice(PROXY_https) return request #将修正之后的请求对象进行重新发送
- 此时settings文件中需要将中间件打开
DOWNLOADER_MIDDLEWARES = { 'middlePro.middlewares.MiddleproDownloaderMiddleware': 543, }
-
爬取网易新闻的内容
- 运行文件内容
import scrapy from selenium import webdriver class WangyiSpider(scrapy.Spider): name = 'wangyi' # allowed_domains = ['www.xxx.com'] start_urls = ['https://news.163.com/world/'] #实例化一个浏览器对象,需要添加chromedriver插件 bro = webdriver.Chrome(executable_path=r'D:\爬虫\谷歌访问助手\chromedriver.exe') def parse(self, response): div_list = response.xpath('/html/body/div/div[3]/div[4]/div[1]/div/div/ul/li/div/div') for div in div_list: #xpath表达式需要跳过body标签,遇到body标签常使用//跳过 title = div.xpath('.//div[@class="news_title"]//a/text()').extract_first() detail_url = div.xpath('.//div[@class="news_title"]//a/@href').extract_first() yield scrapy.Request(detail_url,self.parse_detail) print(title,detail_url) def parse_detail(self,response): content = response.xpath('//*[@id="endText"]//text()').extract() #使用join将列表数据组成字符串 content = ''.join(content) print(content) def closed(self,spider): self.bro.quit()
- items文件不需要操作
- pipeline文件的操作
class WangyiproPipeline(object): def process_item(self, item, spider): return item
- middleware文件中的操作
from scrapy import signals from scrapy.http import HtmlResponse from time import sleep class WangyiproDownloaderMiddleware(object): def process_request(self, request, spider): # Called for each request that goes through the downloader # middleware. # Must either: # - return None: continue processing this request # - or return a Response object # - or return a Request object # - or raise IgnoreRequest: process_exception() methods of # installed downloader middleware will be called return None #spider表示的就是爬虫类实例化的对象 def process_response(self, request, response, spider): #将不符合需求的响应对象修改成符合需求的 #body:响应数据 #如何获取爬虫类中生成的浏览器对象呢? if request.url == 'https://news.163.com/world/': bro = spider.bro bro.get('https://news.163.com/world/') sleep(2) #这里使用了js自动下滑处理动态加载数据 bro.excute_script('window.scrollTo(0,document.body.scrollHeight)') sleep(1) bro.excute_script('window.scrollTo(0,document.body.scrollHeight)') sleep(1) bro.excute_script('window.scrollTo(0,document.body.scrollHeight)') sleep(1) page_text = bro.page_source new_response = HtmlResponse(url=bro.current_url,body=page_text,encoding='utf-8',request=request) return new_response else: return response def process_exception(self, request, exception, spider): # Called when a download handler or a process_request() # (from other downloader middleware) raises an exception. # Must either: # - return None: continue processing this exception # - return a Response object: stops process_exception() chain # - return a Request object: stops process_exception() chain pass
- 最后在settings文件中加上中间件的配置
DOWNLOADER_MIDDLEWARES = { 'wangyiPro.middlewares.WangyiproDownloaderMiddleware': 543, }
-
crawlspider全站数据爬取案例
- 执行文件内容
import scrapy from scrapy.linkextractors import LinkExtractor from scrapy.spiders import CrawlSpider, Rule from sunShinePro.items import SunshineproItem,sunConetent # http://wz.sun0769.com/index.php/question/questionType?type=4&page=30 class SunSpider(CrawlSpider): name = 'sun' # allowed_domains = ['www.xxx.com'] start_urls = ['http://wz.sun0769.com/index.php/question/questionType?type=4&page='] #链接提取器 #作用:根据 指定的规则(allow:正则) 提取页面源码中指定的连接,进行对页面中的页码连接提取 link = LinkExtractor(allow=r'type=4&page=\d+') link_detail = LinkExtractor(allow='question/\d+/\d+\.shtml') rules = ( #规则解析器:将连接提取器提取到的连接对应的页面源码数据 根据指定规则(callback) 进行数据解析 Rule(link, callback='parse_item', follow=False), Rule(link_detail, callback='parse_detail'), ) def parse_detail(self,response): content = response.xpath('/html/body/div[9]/table[2]//tr[1]/td/div[2]//text()').extract() content = ''.join(content) item = sunConetent() item['content'] = content yield item def parse_item(self, response): #注意:如果xpath定位的标签中存在tbody,则需要跳过tbody tr_list = response.xpath('//*[@id="morelist"]/div/table[2]//tr/td/table//tr') for tr in tr_list: title = tr.xpath('./td[2]/a[2]/text()').extract_first() status = tr.xpath('./td[3]/span/text()').extract_first() item = SunshineproItem() item['title'] = title item['status'] = status yield item
- items文件内容
import scrapy class SunshineproItem(scrapy.Item): # define the fields for your item here like: title = scrapy.Field() status = scrapy.Field() # pass class sunConetent(scrapy.Item): content = scrapy.Field()
- pipelines文件内容
class SunshineproPipeline(object): def process_item(self, item, spider): if item.__class__.__name__ == 'SunshineproItem': print(item['title'], item['status']) else: print(item['content']) return item
- 此案例未使用中间件配置,因此settings文件中只做之间的配置,并且middleware文件不需要变动
-
爬取网易新闻中的5个模块内的所有内容
-
创建一个工程:scrapy startproject wangyiPro
-
cd wangyiPro
-
创建一个爬虫文件:scrapy genspider wangyi www.xxx.com(指定的url,必须要先CD到项目中,保证爬虫文件在spiders目录中)
-
wangyi文件中内容
import scrapy from wangyiPro.items import WangyiproItem from selenium import webdriver class WangyiSpider(scrapy.Spider): name = 'wangyi' # allowed_domains = ['www.xxx.com'] # 根据起始url解析出五大板块对应的详情页的url start_urls = ['https://news.163.com/'] model_detail_urls = []#五个板块详情页的url bro = webdriver.Chrome(executable_path=r'D:\爬虫\谷歌访问助手\chromedriver.exe') def parse(self, response): #解析出五大板块对应的详情页的url model_list = [] #存储的就是五个板块对应的li标签 indexs = [3,4,6,7,8] li_list = response.xpath('//*[@id="index2016_wrap"]/div[1]/div[2]/div[2]/div[2]/div[2]/div/ul/li') for index in indexs: model_list.append(li_list[index]) for li in model_list: #五个板块对应详情页的url detail_url = li.xpath('./a/@href').extract_first() self.model_detail_urls.append(detail_url) yield scrapy.Request(detail_url,callback=self.parse_detail) #解析:新闻标题和新闻详情页的url def parse_detail(self,response): div_list = response.xpath('/html/body/div/div[3]/div[4]/div[1]/div/div/ul/li/div/div') for div in div_list: title = div.xpath('.//div[@class="news_title"]/h3/a/text()').extract_first() new_detail_url = div.xpath('.//div[@class="news_title"]/h3/a/@href').extract_first() item = WangyiproItem() item['title'] = title #需要通过请求传参将item传递给news_parse yield scrapy.Request(new_detail_url,callback=self.news_parse,meta={'item':item}) #用来解析新闻内容 def news_parse(self,response): content = response.xpath('//*[@id="endText"]//text()').extract() content = ''.join(content) item = response.meta['item'] item['content'] = content yield item def closed(self,spider): self.bro.quit()
- items中定义字段
import scrapy class WangyiproItem(scrapy.Item): # define the fields for your item here like: title = scrapy.Field() content = scrapy.Field() pass
- pipelines文件内容
class WangyiproPipeline(object): fp = None def open_spider(self,spider): self.fp = open('./wangyi.txt','w',encoding='utf-8') def process_item(self, item, spider): self.fp.write(item['title']+':'+item['content']+'\n') return item def close_spider(self,spider): self.fp.close()
- middlewares文件内容
from scrapy.http import HtmlResponse from time import sleep class WangyiproDownloaderMiddleware(object): def process_request(self, request, spider): return None # 可以拦截所有的响应对象:该方法拦截5个指定的响应对象且进行替换操作,其他的响应对象不做处理 def process_response(self, request, response, spider): #定位到5个指定的响应对象 if request.url in spider.model_detail_urls: print(request.url) bro = spider.bro bro.get(request.url) sleep(2) bro.execute_script('window.scrollTo(0,document.body.scrollHeight)') sleep(2) bro.execute_script('window.scrollTo(0,document.body.scrollHeight)') sleep(2) bro.execute_script('window.scrollTo(0,document.body.scrollHeight)') sleep(2) page_text = bro.page_source new_response = HtmlResponse(url=request.url,body=page_text,encoding='utf-8',request=request) return new_response else: return response #将原始的响应对象进行返回 def process_exception(self, request, exception, spider): pass
- 最后在settings文件中做如下配置
#不遵从爬虫Robots协议 ROBOTSTXT_OBEY = False #UA伪装 USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36' #开启下载中间件 DOWNLOADER_MIDDLEWARES = { 'wangyiPro.middlewares.WangyiproDownloaderMiddleware': 543, } #开启管道 ITEM_PIPELINES = { 'wangyiPro.pipelines.WangyiproPipeline': 300, } #显示打印错误信息 LOG_LEVEL = 'ERROR'
-