python爬虫使用scrapy框架

scrapy框架提升篇

关注公众号“轻松学编程”了解更多

1、创建启动爬虫脚本

在项目目录下创建start.py文件:

这里写图片描述

添加代码:

#以后只要运行start.py就可以启动爬虫
import scrapy.cmdline

def main():
    #mytencent为当前项目爬虫名
    scrapy.cmdline.execute(['scrapy', 'crawl', 'mytencent'])

if __name__ == '__main__':
    main()

2、自动爬取多页

在spiders文件夹下的mytencent.py中MytencentSpider类要继承CrawlSpider,然后添加规则即可:

import scrapy
from tencent.items import TencentItem

from scrapy.spiders import CrawlSpider, Rule  # 爬取规则
from scrapy.linkextractors import LinkExtractor  # 提取链接

#爬虫类继承CrawlSpider
class MytencentSpider(CrawlSpider):
    name = 'mytencent'
    allowed_domains = ['hr.tencent.com']
    start_urls = ['https://hr.tencent.com/position.php?keywords=&tid=0&start=10#a']

    #添加爬取url规则,url符合正则start=(\d+)#a")就爬取
    rules = (Rule(LinkExtractor(allow=("start=(\d+)#a")), callback='get_parse', follow=True),)

    # 一定不能用parse()
    def get_parse(self, response):
        jobList = response.xpath('//tr[@class="even"] | //tr[@class="odd"]')

        # 存储对象
        item = TencentItem()

        for job in jobList:
            # .extract()提取文本
            jobName = job.xpath('./td[1]/a/text()').extract()[0]  
            jobType = job.xpath('./td[2]/text()').extract()[0]
            item['jobName'] = jobName
            item['jobType'] = jobType
            
            yield item

3、使用框架自带的Request()构建请求

在spiders文件夹下的mysina.py中:

import scrapy
from scrapy.spiders import CrawlSpider,Rule #爬取规则
from scrapy.linkextractor import LinkExtractor #提取链接

class MysinaSpider(CrawlSpider):
    name = 'mysina'
    allowed_domains = ['sina.com.cn']
    start_urls = ['http://roll.news.sina.com.cn/news/gnxw/gdxw1/index_1.shtml']
    #设置爬取规则,可迭代对象,可设置多个规则
    rules = [Rule(LinkExtractor(allow=("index_(\d+).shtml")),callback='get_parse',follow=True)]

    def get_parse(self, response):
        newsList = response.xpath('//ul[@class="list_009"]/li')
        for news in newsList:
            # 新闻标题
            title = news.xpath('./a/text()').extract()[0]
            # 新闻时间
            newsTime = news.xpath('./span/text()').extract()[0]
            # print('***********',title,'****',newsTime)
            #获取正文的url
            contentsUrl = news.xpath('./a/@href').extract()[0]
            #使用框架自带的Request()构建请求,使用meta传递参数
            '''
            scrapy.Request()参数列表:
            url,
            callback=None, 回调函数
            meta=None, 数据传递
            '''
            request = scrapy.Request(url=contentsUrl,callback=self.get_article,)
            # 使用meta传递参数 是一个字典, 只能传递一层
            request.meta['title'] = title
            request.meta['newsTime'] = newsTime

            yield request

    def get_article(self,response):
        contents = response.xpath('//div[@id="article"]//text()')
        #新闻内容
        newsContent = ""
        for content in contents:
            newsContent += content.extract().strip()+'\n'
        print('*****新闻正文*****',newsContent,'*****新闻正文*****')
        item = SinaItem()
        # 从meta中获取参数
        item['title'] = response.meta['title']
        item['newsTime'] = response.meta['newsTime']
        
        item['newsContent'] = newsContent

        yield item

4、保存进MySQL数据库模板

在MySQL中建立数据库,表,然后在pipelines.py中编写代码如下:

import pymysql

class TencentPipeline(object):
    def __init__(self):
        #连接数据库
        self.conn = None
        #游标
        self.cur = None

    # 打开爬虫时调用,只调用一次
    def open_spider(self,spider):
        self.conn = pymysql.connect(host='127.0.0.1',
                                    user='root',
                                    password="123456",
                                    database='tjob', #数据库为tjob
                                    port=3306,
                                    charset='utf8')
        self.cur = self.conn.cursor()

    def process_item(self, item, spider):
        clos,value = zip(*item.items())
        sql = "INSERT INTO `%s`(%s) VALUES (%s)" % ('tencentjob', #表名为tencentjob
                                                    ','.join(clos),
                                                    ','.join(['%s'] * len(value)))

        self.cur.execute(sql, value)

        self.conn.commit()
        return item

    def close_spider(self, spider):
        self.cur.close()
        self.conn.close()

settings.py中要开启

ITEM_PIPELINES = {
   'tencent.pipelines.TencentPipeline': 300,
}

5、使用中间件做UA代理,IP代理

在middlewares.py中添加:

from scrapy import signals
import random

#ip代理
from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware
#UA代理
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware

from weixinsougou.settings import USER_AGENTS,PROXIES


class RandomUAMiddleware(UserAgentMiddleware):
    '''
    随机UA代理,中间件
    '''
    def process_request(self, request, spider):
        '''
        所有的请求都会经过process_request
        :param request:请求
        :param spider:爬虫名
        :return:
        '''
        ua = random.choice(USER_AGENTS)
        request.headers.setdefault("User-Agent", ua)

class RandomIPMiddleware(HttpProxyMiddleware):
    '''
    随机IP代理
    '''

    def process_request(self, request, spider):
        proxy = random.choice(PROXIES)
        request.meta['proxy'] = 'http://' + proxy['ip_port']
        

#class RandomCookieMiddleware(CookiesMiddleware):
#     '''
#     随机cookie池
#     '''
# 
#     def process_request(self, request, spider):
#         cookie = random.choice(COOKIES)
#         request.cookies = cookie

在settings.py中添加:

# -*- coding: utf-8 -*-


# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Disable cookies (enabled by default)
COOKIES_ENABLED = False


# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  'Accept-Language': 'en',
  'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
}

# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#启用中间件
DOWNLOADER_MIDDLEWARES = {
   # 'weixinsougou.middlewares.WeixinsougouDownloaderMiddleware': 543,
   'weixinsougou.middlewares.RandomUAMiddleware': 543,
   'weixinsougou.middlewares.RandomIPMiddleware': 544,

}

#UA池
USER_AGENTS = [
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
    "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
    "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
    "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
    "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5"
]

#IP池
PROXIES = [
    {'ip_port': '171.38.85.93:8123'},
    {'ip_port': '113.67.227.143:8118'},
    {'ip_port': '101.236.19.165:8866'},
    {'ip_port': '101.236.21.22:8866'},
]

#cookle池
COOKIES = []

# 默认线程数量 10
REACTOR_THREADPOOL_MAXSIZE = 20

# 并发 默认16
CONCURRENT_REQUESTS = 16

# pipelines同时处理数量 默认100
CONCURRENT_ITEMS = 50

# scrapy 深度爬取,默认0 不做深度限制
DEPTH_LIMIT = 4
# 下载超时
DOWNLOAD_TIMEOUT = 180

#####6、使用redis实现分布式爬取

https://blog.csdn.net/lm_is_dc/article/details/81866275

#####7、部署

https://blog.csdn.net/lm_is_dc/article/details/81869508

8、使用gerapy管理爬虫

https://blog.csdn.net/lm_is_dc/article/details/81869508

后记

【后记】为了让大家能够轻松学编程,我创建了一个公众号【轻松学编程】,里面有让你快速学会编程的文章,当然也有一些干货提高你的编程水平,也有一些编程项目适合做一些课程设计等课题。

也可加我微信【1257309054】,拉你进群,大家一起交流学习。
如果文章对您有帮助,请我喝杯咖啡吧!

公众号

公众号

赞赏码

关注我,我们一起成长~~

posted @ 2018-08-20 13:23  轻松学编程  阅读(135)  评论(0编辑  收藏  举报