随笔 - 31  文章 - 0  评论 - 0  阅读 - 2444

<6> 简易分布式

复制代码
# produce端
from
loguru import logger from redis import StrictRedis class QuotesProduce: name = "start_urls" def __init__(self): self.redis_cli = StrictRedis( host="", port=6379, db=0, password="password", decode_responses=True, ) def produce(self): for index in range(1, 11): url = "https://quotes.toscrape.com/page/{}/".format(index) logger.info(url) self.redis_cli.lpush("start_urls", url) if __name__ == '__main__': crawler = QuotesProduce() crawler.produce()
复制代码
复制代码
# consume端
import
requests from loguru import logger from redis import StrictRedis from parsel import Selector from concurrent.futures import ThreadPoolExecutor class QuotesConsume: name = "start_urls" def __init__(self): self.redis_cli = StrictRedis( host="", port=6379, db=0, password="password", decode_responses=True, ) self.executor = ThreadPoolExecutor(max_workers=10) def consume(self): while True: url = self.redis_cli.brpop("start_urls") logger.info("消费者-这是当前弹出的网址: {}", url[1]) self.executor.submit(self.spider_task, url[1]) def spider_task(self, url): response = requests.get(url, timeout=10) selectors = Selector(text=response.text) for selector in selectors.css(".col-md-8 .quote"): text = selector.css(".text::text").get() author = selector.css(".author::text").get() items = { "text": text, "author": author } logger.info("这是采集的数据: {}", items) if __name__ == '__main__': crawler = QuotesConsume() crawler.consume()
复制代码

 

posted on   不是霉蛋  阅读(15)  评论(0编辑  收藏  举报
相关博文:
阅读排行:
· TypeScript + Deepseek 打造卜卦网站:技术与玄学的结合
· Manus的开源复刻OpenManus初探
· 三行代码完成国际化适配,妙~啊~
· .NET Core 中如何实现缓存的预热?
· 如何调用 DeepSeek 的自然语言处理 API 接口并集成到在线客服系统
< 2025年3月 >
23 24 25 26 27 28 1
2 3 4 5 6 7 8
9 10 11 12 13 14 15
16 17 18 19 20 21 22
23 24 25 26 27 28 29
30 31 1 2 3 4 5

点击右上角即可分享
微信分享提示