一个自定义python分布式专用爬虫框架。支持断点爬取和确保消息100%不丢失,哪怕是在爬取进行中随意关停和随意对电脑断电。

0、此框架只能用于爬虫,由框架来调度url请求,必须按照此方式开发,没有做到类似celery的通用分布式功能,也不方便测试。可以使用另外一个,基于函数式编程的,调度一切函数的分布式框架,做到了兼容任何新老代码,满足任何需要分布式的场景。

 

 

一个分布式爬虫框架。比scrapy简单很多,不需要各种item pipeline middwares spider settings run文件之间来回切换写代码,这只需要一个文件,开发时候可以节约很多时间,形式非常松,需要重写一个方发,自己想怎么解析入库都可以,不需要定义item和写pipeline存储。自带的RequestClient支持cookie简单操作,支持一键切换ip代理的使用方式,不需要写这方面的中间件。

 

推荐使用rabbitmq作为消息中间件,能确保消费正确,可以随便任何时候关停程序。使用redis如果随意停止,会丢失正在请求或还没解析入库的任务,线程进程越多,丢的越多。

# coding=utf-8
import abc
import math
import json
import queue
import time
from collections import OrderedDict
# noinspection PyUnresolvedReferences
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from threading import Lock
from pika import BasicProperties
# noinspection PyUnresolvedReferences
from app.utils_ydf import LoggerMixin, LogManager, MongoMixin, RedisMixin, RequestClient, decorators, RedisBulkWriteHelper, RedisOperation, MongoBulkWriteHelper, MysqlBulkWriteHelper, RabbitMqHelper


class BoundedThreadPoolExecutor(ThreadPoolExecutor):
    def __init__(self, max_workers=None, thread_name_prefix=''):
        super().__init__(max_workers, thread_name_prefix)
        self._work_queue = queue.Queue(max_workers * 2)


class StatusError(Exception):
    pass


class VolunteerErrorForSpiderRetry(Exception):
    """
    此类型的错误,如果被__request_and_extract捕获,不记录错误日志。只为了错误重试。
    """


# noinspection PyBroadException
class BaseCustomSpider(LoggerMixin, MongoMixin, RedisMixin, metaclass=abc.ABCMeta):
    """
    一个精简的自定义的基于reids任务调度的分布式基础爬虫框架(所谓分布式就是可以水平扩展,一台机器开启多进程不需要修改代码或者多次重复启动python程序,以及多个机器都可以启动此程序)。子类只需要几行重写_request_and_extract方法,就可以快速开发并发 分布式的爬虫项目,比scrapy简单很多。
    用法BookingListPageSpider继承BaseCustomSpider,重写_request_and_extract完成解析和入库。以下为启动方式。
    BookingListPageSpider('booking:listpage_urls', threads_num=500).set_request_timeout(100).set_request_proxy('kuai').start_craw()  # start_craw是非阻塞的命令,可以直接在当前主线程再运行一个详情页的spider

    """
    lock = Lock()
    pool_schedu_task = BoundedThreadPoolExecutor(200)  # 如果是外网使用redis可能存在延迟,使用10个线程。

    def __init__(self, seed_key: str = None, request_method='get', threads_num=100, proxy_name='kuai', log_level=1):
        """
        :param seed_key: redis的seed键
        :param request_method: 请求方式get或者post
        :param threads_num:request并发数量
        :param proxy_name:可为None, 'kuai', 'abuyun', 'crawlera',为None不使用代理
        """
        self.__check_proxy_name(proxy_name)
        self._seed_key = seed_key
        self._request_metohd = request_method
        self._proxy_name = proxy_name
        self._threads_num = threads_num
        self.theadpool = BoundedThreadPoolExecutor(threads_num)
        self.logger.setLevel(log_level * 10)
        LogManager('RequestClient').get_logger_and_add_handlers(log_level)
        self._initialization_count()
        self._request_headers = None
        self._request_timeout = 60
        self._max_request_retry_times = 5  # 请求错误重试请求的次数
        self._max_parse_retry_times = 3  # 解析错误重试请求的次数
        self._is_print_detail_exception = False
        self.logger.info(f'{self.__class__} 被实例化')

    @staticmethod
    def __check_proxy_name(proxy_name):
        if proxy_name not in (None, 'kuai', 'abuyun', 'crawlera'):
            raise ValueError('设置的代理ip名称错误')

    def _initialization_count(self):
        self._t1 = time.time()
        self._request_count = 0
        self._request_success_count = 0

    def set_max_request_retry_times(self, max_request_retry_times):
        self._max_request_retry_times = max_request_retry_times
        return self

    def set_request_headers(self, headers: dict):
        """
        self.request_headers = {'user-agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}
        """
        self._request_headers = headers
        return self  # 使其可以链式操作

    def set_request_timeout(self, timeout: float):
        self._request_timeout = timeout
        return self

    def set_request_proxy(self, proxy_name):
        self.__check_proxy_name(proxy_name)
        self._proxy_name = proxy_name
        return self

    def set_print_detail_exception(self, is_print_detail_exception: bool):
        self._is_print_detail_exception = is_print_detail_exception
        return self

    def _calculate_count_per_minute(self, flag):
        with self.lock:
            if time.time() - self._t1 > 60:
                # _request_count, _request_success_count = self._request_count, self._request_success_count
                self.logger.info(f'{self.__class__} 一分钟内请求了 {self._request_count}次  成功了 {self._request_success_count}次, redis的{self._seed_key} 键还有 {self.redis_db7.scard(self._seed_key)} 个种子')
                self._initialization_count()
            if flag == 0:
                self._request_count += 1
            if flag == 1:
                self._request_success_count += 1

    def start_craw(self):
        # self._schedu_a_task()
        [self.pool_schedu_task.submit(self._schedu_a_task) for _ in range(10)]  # 如果是外网来链接broker会有传输损耗,影响整体速度。

    # @decorators.tomorrow_threads(300)
    @decorators.keep_circulating(time_sleep=1)  # 防止redis异常了,导致程序中断需要手动重启程序。
    def _schedu_a_task(self):
        while True:
            seed_bytes = self.redis_db7.spop(self._seed_key)
            if seed_bytes:
                seed_dict = json.loads(seed_bytes)
                # noinspection PyProtectedMember
                self.logger.debug(f'当前线程数量是 {len(self.theadpool._threads)} ,种子是:  {seed_dict}')
                self.theadpool.submit(self.__request_and_extract, seed_dict['url'], meta=seed_dict)
            else:
                self.logger.warning(f'redis的 {self._seed_key} 键是空的')
                time.sleep(2)

    # @decorators.handle_exception(50, )
    def _dispacth_request(self, url, data: dict = None, current_url_request_times=0, ):
        # self.__calculate_count_per_minute(0)
        """
        :param url: 请求url
        :param current_url_request_times:
        :param data: post亲戚逇数据
        :return:
        """
        if current_url_request_times < self._max_request_retry_times:
            if current_url_request_times > 0:
                pass
                # self.logger.debug(current_url_request_times)
            # noinspection PyBroadException
            try:
                resp = RequestClient(self._proxy_name, timeout=self._request_timeout).request_with_proxy(method=self._request_metohd, url=url, headers=self._request_headers, data=data)  # 使用快代
            except Exception as e:
                self.logger.error(f'第{current_url_request_times + 1} 次request请求网络错误的原因是: {e}', exc_info=0)
                self._calculate_count_per_minute(0)
                return self._dispacth_request(url, data, current_url_request_times + 1)
            else:
                if resp.status_code == 200:
                    self._calculate_count_per_minute(0)
                    self._calculate_count_per_minute(1)
                    return resp
                else:
                    self.logger.critical(f'返回状态是 {resp.status_code}  --> {url}')
                    self._calculate_count_per_minute(0)
                    return self._dispacth_request(url, data, current_url_request_times + 1)
        else:
            self.logger.critical(f'请求 {url} 达到最大次数{self._max_request_retry_times}后,仍然失败')
            return None

    def put_seed_task_to_broker(self, seed_key: str, seed_dict: OrderedDict):
        seed_str = json.dumps(seed_dict)
        # self.redis_db7.sadd(redis_key, seed_str)
        RedisBulkWriteHelper(self.redis_db7, threshold=50).add_task(RedisOperation('sadd', seed_key, seed_str))

    def __request_and_extract(self, url, meta: OrderedDict, current_retry_times=0):  # 主要threadpoolexcutor没有毁掉结果时候会不记录错误,错误被隐藏了
        # noinspection PyBroadException
        if current_retry_times < self._max_parse_retry_times:
            try:
                self.request_and_extract(url, meta)
            except Exception as e:
                if isinstance(e, VolunteerErrorForSpiderRetry):
                    pass
                else:
                    self.logger.error(f'第{current_retry_times+1}次发生解析错误的url是 {url}  \n {e}', exc_info=self._is_print_detail_exception)
                self.__request_and_extract(url, meta, current_retry_times + 1)
        else:
            self.logger.critical(f'解析 {url} 的页面内容达到最大次数{self._max_parse_retry_times}后,仍然失败')

    # noinspection PyUnusedLocal
    @abc.abstractmethod
    def request_and_extract(self, url, meta: OrderedDict):
        """
        子类需要重写此方法,完成解析和数据入库或者加入提取的url二次链接和传递的参数到redis的某个键。爬虫需要多层级页面提取的,重新实例化一个此类运行即可。
        :param url:
        :param meta:
        :return:
        """
        """
        必须使用_dispacth_request方法来请求url,不要直接使用requests,否则不能够对请求错误成自动重试和每分钟请求数量统计和代理ip设置无效
        response = self._dispacth_request(url)
        print(response.text)
        """
        raise NotImplementedError


# noinspection PyUnresolvedReferences
class RabbitmqBrokerForSpiderMixin(metaclass=abc.ABCMeta):
    """
    推荐使用rabbitmq作为消息中间件。
    不使用redis作为中间件,使用rabbitmq作为中间件,好处是可以随便在爬取过程中关闭程序,不会丢失当前任务。需要同时继承此类和BaseCustomSpider两个类,此类放在继承的第一个位置
    """
    # noinspection PyArgumentEqualDefault
    LogManager('pika.heartbeat').get_logger_and_add_handlers(1)
    lock_channel = Lock()

    @property
    @decorators.cached_method_result
    def _channel_publish(self):
        channel = RabbitMqHelper().creat_a_channel()
        return channel

    @decorators.cached_property
    def _channel_statistics(self):
        channel = RabbitMqHelper().creat_a_channel()
        channel.queue_declare(queue=self._seed_key, durable=True)
        return channel

    @decorators.keep_circulating(time_sleep=1)  # 防止服务器rabbitmq关闭了,修复好后,自动恢复。
    def _schedu_a_task(self):
        channel = RabbitMqHelper().creat_a_channel()
        channel.queue_declare(queue=self._seed_key, durable=True)
        channel.basic_qos(prefetch_count=int(math.ceil(self._threads_num / 10)))

        def callback(ch, method, properties, body):
            seed_dict = json.loads(body)
            # noinspection PyProtectedMember
            self.logger.debug(f'rabbitmq种子是:  {seed_dict}')
            # self.__request_and_extract(ch, method, properties, seed_dict['url'], meta=seed_dict)
            self.theadpool.submit(self.__request_and_extract, ch, method, properties, seed_dict['url'], meta=seed_dict)

        channel.basic_consume(callback,
                              queue=self._seed_key,
                              # no_ack=True                        # 不需要确认,不确认随便关停spider会丢失一些任务。
                              )
        channel.start_consuming()

    def put_seed_task_to_broker(self, seed_key: str, seed_dict: OrderedDict):
        """
        添加种子或任务到redis中
        :param seed_key: 种子/任务在redis的键
        :param seed_dict: 任务,必须是一个有序字典类型,不能用字典,否则会插入相同的任务到redis中。字典中需要至少包含一个名叫url的键,可以添加其余的键用来携带各种初始任务信息。
        :return:
        """
        with self.lock_channel:
            channel = self._channel_publish
            seed_str = json.dumps(seed_dict)
            channel.queue_declare(queue=seed_key, durable=True)
            channel.basic_publish(exchange='',
                                  routing_key=seed_key,
                                  body=seed_str,
                                  properties=BasicProperties(
                                      delivery_mode=2,  # make message persistent
                                  )
                                  )

    # noinspection PyArgumentEqualDefault
    def _calculate_count_per_minute(self, flag):
        with self.lock:
            if time.time() - self._t1 > 60:
                rabbitmq_queue = self._channel_statistics.queue_declare(
                    queue=self._seed_key, durable=True,
                    exclusive=False, auto_delete=False
                )
                self.logger.info(f'{self.__class__} 一分钟内请求了 {self._request_count}次  成功了 {self._request_success_count}次,  rabbitmq的{self._seed_key}队列中还有 {rabbitmq_queue.method.message_count} 条消息 ')
                self._initialization_count()
            if flag == 0:
                self._request_count += 1
            if flag == 1:
                self._request_success_count += 1

    # noinspection PyMethodOverriding
    def __request_and_extract(self, ch, method, properties, url, meta: OrderedDict, current_retry_times=0):
        # 防止有时候页面返回内容不正确,导致解析出错。
        if current_retry_times < self._max_parse_retry_times:
            # noinspection PyBroadException
            try:
                self.request_and_extract(url, meta)
                ch.basic_ack(delivery_tag=method.delivery_tag)
            except Exception as e:
                if isinstance(e, VolunteerErrorForSpiderRetry):
                    pass
                else:
                    self.logger.error(f'第{current_retry_times+1}次发生解析错误的url是 {url}  \n {e}', exc_info=self._is_print_detail_exception)
                self.__request_and_extract(ch, method, properties, url, meta, current_retry_times + 1)
        else:
            self.logger.critical(f'解析 {url} 的页面内容达到最大次数{self._max_parse_retry_times}后,仍然失败')
            ch.basic_ack(delivery_tag=method.delivery_tag)


class BaseRabbitmqSpider(RabbitmqBrokerForSpiderMixin, BaseCustomSpider, metaclass=abc.ABCMeta):
    """
    也可以直接继承这一个类。
    """

 

 

 



测试单核单进程每分钟可以请求两万次,每分钟最大的具体请求次数与网速/网站响应速度/内容大小有关。


另外也可使用通用消费框架来支持并发和断点和任务用不丢失。在另一片博客。
posted @ 2018-10-15 09:56  北风之神0509  阅读(757)  评论(0编辑  收藏  举报