1、创建一个CrawlerSpider
scrapy genspider -t crawl wx_spider 'wxapp-union.com'
#导入规则
from scrapy.spiders import Rule,CrawlSpider
from scrapy.linkextractors import LinkExtractor
2、Rule规则
class scrapy.spiders.Rule(
link_extractor,#一个LinkExtractor对象,用于定义爬取规则
callback,#满足这个规则的url执行的回调函数 【注意】:不要使用parse函数,因为Crawlspider征用了
follow,#指定根据该规则从response又提取出类似链接要不要继续跟进, 默认为True
process_link,#从link_extractor中获取到的链接会传递给这个函数,用于过滤不需要爬取的链接
)
class scrapy.linkextractors.LinkExtractor(
allow=(),#允许的url,满足这个正则表达式的url都会被提取
deny=(),#不允许的url
allow_domains=(),#允许的域名
deny_domains=(),#禁止的域名
restrict_xpaths=(),#严格的xpath,与allow共同过滤
unique=True/False,#链接是否去重
)
示例(抓取微信小程序社区)
class WxSpiderSpider(CrawlSpider):
name = 'wx_spider'
allowed_domains = ['wxapp-union.com']
start_urls = ['http://www.wxapp-union.com/portal.php?mod=list&catid=2&page=1']
rules = (
Rule(LinkExtractor(allow=r'.+mod=list&catid=2&page=\d'), follow=True),#定义页面抓取规则,这里允许跟进
Rule(LinkExtractor(allow=r'.+/article-.+\.html'),callback = "parse_detail_page", follow=False)#定义内容抓取规则,这里不允许跟进,也就是内容页上的类似链接不跟进
)
def parse_detail_page(self, response):
title = response.xpath('//h1[@class="ph"]/text()').get()
content =response.xpath('//td[@id="article_content"]//text()').getall()
content = ''.join(content)
author_p = response.xpath('//p[@class="authors"]')
author=author_p.xpath('.//a/text()').get()
pub_time=author_p.xpath('.//span/text()').get()
yield WxappItem(title=title,author=author,pub_time=pub_time,content=content)