Loading

python实现百度贴吧页面爬取

import requests


class TiebaSpider:
    """百度贴吧爬虫类"""

    def __init__(self, tieba_name) -> None:
        self.tieba_name = tieba_name
        self.url_temp = "https://tieba.baidu.com/f?kw="+tieba_name+"ie=utf-8&pn={}"
        # 请求头部
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36"}

    def get_url_list(self) -> list:
        """构造一个url列表,将所有url_temp添加到列表中,并返回列表"""
        url_list = []
        for i in range(10):
            url_list.append(self.url_temp.format(i*50))
        return url_list
        # return [self.url_temp.format(i*50) for i in range(10)]

    def parse_url(self, url) -> str:
        """
        发送请求获取响应

        url:get_url_list方法中返回列表中的url元素
        """
        print(url)  # 打印爬取的当前url
        # 发送请求
        response = requests.get(url, headers=self.headers)
        # 返回解码后的字符串
        return response.content.decode()

    def seve_html(self, html_str, page_num):
        """保存html字符串"""
        # 保存文件名
        file_path = "{}吧_第{}页.html".format(self.tieba_name, page_num)
        with open(file_path, "w", encoding="utf-8") as f:
            f.write(html_str)

    def run(self):
        """主逻辑方法"""
        # 1.将get_url_list方法返回的列表重新赋值给url_list
        url_list = self.get_url_list()
        # 2.遍历,发送请求,获取响应
        for url in url_list:
            # 将parse_url返回的字符串赋值给html_str变量
            html_str = self.parse_url(url)
            # 3.保存
            # 将列表的索引赋值给page_num
            page_num = url_list.index(url) + 1  # 页码
            # 调用seve_html方法,储存html_str
            self.seve_html(html_str, page_num)


if __name__ == "__main__":
    t = TiebaSpider("李毅")

思维导图

posted @ 2023-02-09 22:00  ThankCAT  阅读(47)  评论(0编辑  收藏  举报