多任务的爬虫
1.多线程的方法使用
在python3中,主线程主进程结束,子线程,子进程不会结束
为了能够让主线程回收子线程,可以把子线程设置为守护线程,即该线程不重要,主线程结束,子线程结束.
t1 = threading.Thread(targe=func,args=(,)) t1.setDaemon(True) t1.start() #此时线程才会启动
2.队列模块的使用
from queue import Queue q = Queue(maxsize=100) item = {} q.put_nowait(item) #不等待直接放,队列满的时候会报错 q.put(item) #放入数据,队列满的时候回等待 q.get_nowait() #不等待直接取,队列空的时候会报错 q.get() #取出数据,队列为空的时候会等待 q.qsize() #获取队列中现存数据的个数 q.join() #队列中维持了一个计数,计数不为0时候让主线程阻塞等待,队列计数为0的时候才会继续往后执行 q.task_done() # put的时候计数+1,get不会-1,get需要和task_done 一起使用才会-1
3.多线程实现思路剖析
- 把爬虫中的每个步骤封装成函数,分别用线程去执行
- 不同的函数通过队列相互通信,函数间解耦
代码如下:
# coding=utf-8 import requests from lxml import etree from queue import Queue import threading import time class QiuBai: def __init__(self): self.temp_url = "http://www.qiushibaike.com/8hr/page/{}" self.headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36"} self.url_queue = Queue() self.html_queue = Queue() self.content_list_queue = Queue() def get_url_list(self): # return [self.temp_url.format(i) for i in range(1,14)] for i in range(1,14): self.url_queue.put(self.temp_url.format(i)) def parse_url(self): while True: url = self.url_queue.get() response = requests.get(url,headers=self.headers) print(response) if response.status_code != 200: self.url_queue.put(url) else: self.html_queue.put(response.content.decode()) self.url_queue.task_done() #让队列的计数-1 def get_content_list(self):#提取数据 while True: html_str = self.html_queue.get() html = etree.HTML(html_str) div_list = html.xpath("//div[@id='content-left']/div") content_list = [] for div in div_list: item = {} item["user_name"] = div.xpath(".//h2/text()")[0].strip() item["content"] = [i.strip() for i in div.xpath(".//div[@class='content']/span/text()")] content_list.append(item) self.content_list_queue.put(content_list) self.html_queue.task_done() def save_content_list(self): #保存 while True: content_list = self.content_list_queue.get() for content in content_list: # print(content) pass self.content_list_queue.task_done() def run(self):#实现做主要逻辑 thread_list = [] #1. 准备url列表 t_url = threading.Thread(target=self.get_url_list) thread_list.append(t_url) #2. 遍历发送请求,获取响应 for i in range(3): t_parse = threading.Thread(target=self.parse_url) thread_list.append(t_parse) #3. 提取数据 t_content = threading.Thread(target=self.get_content_list) thread_list.append(t_content) #4. 保存 t_save = threading.Thread(target=self.save_content_list) thread_list.append(t_save) for t in thread_list: t.setDaemon(True) #把子线程设置为守护线程 t.start() for q in [self.url_queue,self.html_queue,self.content_list_queue]: q.join() #让主线程阻塞,等待队列计数为0 if __name__ == '__main__': t1 = time.time() qiubai = QiuBai() qiubai.run() print("total cost:",time.time()-t1)
多进程程的方法使用
from multiprocessing import Process t1 = Process(targe=func,args=(,)) t1.daemon = True #设置为守护进程 t1.start() #此时线程才会启动
多进程中队列的使用
多进程中使用普通的队列模块会发生阻塞,对应的需要使用multiprocessing
提供的JoinableQueue
模块,其使用过程和在线程中使用的queue方法相同.
代码如下:
# coding=utf-8 import requests from lxml import etree # from queue import Queue # import threading from multiprocessing import Process from multiprocessing import JoinableQueue as Queue import time class QiuBai: def __init__(self): self.temp_url = "http://www.qiushibaike.com/8hr/page/{}" self.headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36"} self.url_queue = Queue() self.html_queue = Queue() self.content_list_queue = Queue() self.proxies = {"http":"http://58.247.179.94:8060"} def get_url_list(self): # return [self.temp_url.format(i) for i in range(1,14)] for i in range(1,14): self.url_queue.put(self.temp_url.format(i)) def parse_url(self): while True: url = self.url_queue.get() response = requests.get(url,headers=self.headers,proxies=self.proxies) print(response) if response.status_code != 200: self.url_queue.put(url) else: self.html_queue.put(response.content.decode()) self.url_queue.task_done() #让队列的计数-1 def get_content_list(self):#提取数据 while True: html_str = self.html_queue.get() html = etree.HTML(html_str) div_list = html.xpath("//div[@id='content-left']/div") content_list = [] for div in div_list: item = {} item["user_name"] = div.xpath(".//h2/text()")[0].strip() item["content"] = [i.strip() for i in div.xpath(".//div[@class='content']/span/text()")] content_list.append(item) self.content_list_queue.put(content_list) self.html_queue.task_done() def save_content_list(self): #保存 while True: content_list = self.content_list_queue.get() for content in content_list: # print(content) pass self.content_list_queue.task_done() def run(self):#实现做主要逻辑 thread_list = [] #1. 准备url列表 t_url = Process(target=self.get_url_list) thread_list.append(t_url) #2. 遍历发送请求,获取响应 for i in range(13): t_parse = Process(target=self.parse_url) thread_list.append(t_parse) #3. 提取数据 t_content = Process(target=self.get_content_list) thread_list.append(t_content) #4. 保存 t_save = Process(target=self.save_content_list) thread_list.append(t_save) for process in thread_list: process.daemon = True #把子线程设置为守护线程 process.start() for q in [self.url_queue,self.html_queue,self.content_list_queue]: q.join() #让主线程阻塞,等待队列计数为0 if __name__ == '__main__': t1 = time.time() qiubai = QiuBai() qiubai.run() print("total cost:",time.time()-t1)
通过线程池实现更快的爬虫
1. 线程池使用方法介绍
1.实例化线程池对象
from multiprocessing.dummy import Pool pool = Pool(process=5) #默认大小是cup的个数
2. 把从发送请求,提取数据,到保存合并成一个函数,交给线程池异步执行
使用方法pool.apply_async(func)
def exetute_requests_item_save(self): url = self.queue.get() html_str = self.parse_url(url) content_list = self.get_content_list(html_str) self.save_content_list(content_list) self.total_response_num +=1 pool.apply_async(self.exetute_requests_item_save)
3.添加回调函数
通过apply_async
的方法能够让函数异步执行,但是只能够执行一次
为了让其能够被反复执行,通过添加回调函数的方式能够让_callback 递归的调用自己
同时需要指定递归退出的条件.
def _callback(self,temp): if self.is_running: pool.apply_async(self.exetute_requests_item_save,callback=self._callback) pool.apply_async(self.exetute_requests_item_save,callback=self._callback)
4.确定程序结束的条件 程序在获取的响应和url数量相同的时候可以结束
while True: #防止主线程结束 time.sleep(0.0001) #避免cpu空转,浪费资源 if self.total_response_num>=self.total_requests_num: self.is_running= False break self.pool.close() #关闭线程池,防止新的线程开启 # self.pool.join() #等待所有的子线程结束
2. 使用线程池实现爬虫的具体实现
# coding=utf-8 import requests from lxml import etree import time from queue import Queue from multiprocessing.dummy import Pool class QiuBai: def __init__(self): self.temp_url = "http://www.qiushibaike.com/8hr/page/{}" self.headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36"} self.queue = Queue() self.pool= Pool(5) self.is_running = True self.total_request_num = 0 self.total_response_num =0 self.proxies = {"http":"http://58.247.179.94:8060"} def get_url_list(self): for i in range(1,14): self.queue.put(self.temp_url.format(i)) self.total_request_num += 1 def parse_url(self,url): # response = requests.get(url,headers=self.headers,proxies=self.proxies) response = requests.get(url,headers=self.headers) print(response) return response.content.decode() def get_content_list(self,html_str):#提取数据 html = etree.HTML(html_str) div_list = html.xpath("//div[@id='content-left']/div") content_list = [] for div in div_list: item = {} item["user_name"] = div.xpath(".//h2/text()")[0].strip() item["content"] = [i.strip() for i in div.xpath(".//div[@class='content']/span/text()")] content_list.append(item) return content_list def save_content_list(self,content_list): #保存 for content in content_list: # print(content) pass def _execete_request_content_save(self): #进行一次url地址的请求,提取,保存 url = self.queue.get() html_str = self.parse_url(url) #3. 提取数据 content_list = self.get_content_list(html_str) #4. 保存 self.save_content_list(content_list) self.total_response_num +=1 def _callback(self,temp): if self.is_running: self.pool.apply_async(self._execete_request_content_save,callback=self._callback) def run(self):#实现做主要逻辑 #1. 准备url列表 self.get_url_list() for i in range(3): #设置并发数为3 self.pool.apply_async(self._execete_request_content_save,callback=self._callback) while True: time.sleep(0.0001) if self.total_response_num>= self.total_request_num: self.is_running = False break if __name__ == '__main__': t1 = time.time() qiubai = QiuBai() qiubai.run() print("total cost:",time.time()-t1)
3. 使用协程池实现爬虫的具体实现
# coding=utf-8 import gevent.monkey gevent.monkey.patch_all() from gevent.pool import Pool import requests from lxml import etree import time from queue import Queue # from multiprocessing.dummy import Pool class QiuBai: def __init__(self): self.temp_url = "http://www.qiushibaike.com/8hr/page/{}" self.headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36"} self.queue = Queue() self.pool= Pool(5) self.is_running = True self.total_request_num = 0 self.total_response_num =0 self.proxies = {"http":"http://58.247.179.94:8060"} def get_url_list(self): for i in range(1,14): self.queue.put(self.temp_url.format(i)) self.total_request_num += 1 def parse_url(self,url): # response = requests.get(url,headers=self.headers,proxies=self.proxies) response = requests.get(url,headers=self.headers) print(response) return response.content.decode() def get_content_list(self,html_str):#提取数据 html = etree.HTML(html_str) div_list = html.xpath("//div[@id='content-left']/div") content_list = [] for div in div_list: item = {} item["user_name"] = div.xpath(".//h2/text()")[0].strip() item["content"] = [i.strip() for i in div.xpath(".//div[@class='content']/span/text()")] content_list.append(item) return content_list def save_content_list(self,content_list): #保存 for content in content_list: # print(content) pass def _execete_request_content_save(self): #进行一次url地址的请求,提取,保存 url = self.queue.get() html_str = self.parse_url(url) #3. 提取数据 content_list = self.get_content_list(html_str) #4. 保存 self.save_content_list(content_list) self.total_response_num +=1 def _callback(self,temp): if self.is_running: self.pool.apply_async(self._execete_request_content_save,callback=self._callback) def run(self):#实现做主要逻辑 #1. 准备url列表 self.get_url_list() for i in range(3): #设置并发数为3 self.pool.apply_async(self._execete_request_content_save,callback=self._callback) while True: time.sleep(0.0001) if self.total_response_num>= self.total_request_num: self.is_running = False break if __name__ == '__main__': t1 = time.time() qiubai = QiuBai() qiubai.run() print("total cost:",time.time()-t1)