<6> 简易分布式
# produce端
from loguru import logger from redis import StrictRedis class QuotesProduce: name = "start_urls" def __init__(self): self.redis_cli = StrictRedis( host="", port=6379, db=0, password="password", decode_responses=True, ) def produce(self): for index in range(1, 11): url = "https://quotes.toscrape.com/page/{}/".format(index) logger.info(url) self.redis_cli.lpush("start_urls", url) if __name__ == '__main__': crawler = QuotesProduce() crawler.produce()
# consume端
import requests from loguru import logger from redis import StrictRedis from parsel import Selector from concurrent.futures import ThreadPoolExecutor class QuotesConsume: name = "start_urls" def __init__(self): self.redis_cli = StrictRedis( host="", port=6379, db=0, password="password", decode_responses=True, ) self.executor = ThreadPoolExecutor(max_workers=10) def consume(self): while True: url = self.redis_cli.brpop("start_urls") logger.info("消费者-这是当前弹出的网址: {}", url[1]) self.executor.submit(self.spider_task, url[1]) def spider_task(self, url): response = requests.get(url, timeout=10) selectors = Selector(text=response.text) for selector in selectors.css(".col-md-8 .quote"): text = selector.css(".text::text").get() author = selector.css(".author::text").get() items = { "text": text, "author": author } logger.info("这是采集的数据: {}", items) if __name__ == '__main__': crawler = QuotesConsume() crawler.consume()
分类:
01-基础篇
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· TypeScript + Deepseek 打造卜卦网站:技术与玄学的结合
· Manus的开源复刻OpenManus初探
· 三行代码完成国际化适配,妙~啊~
· .NET Core 中如何实现缓存的预热?
· 如何调用 DeepSeek 的自然语言处理 API 接口并集成到在线客服系统