爬去自己的博客园呵呵

复制代码
import urllib
import io
from bs4 import BeautifulSoup
import requests
import os
import re


def GetessayContent(essayUrl):# the funcition is for get content to save then is IO
    html_content = requests.get(essayUrl).text
    bs = BeautifulSoup(html_content)
    title = bs.find("a", attrs={"id": "cb_post_title_url"}).text
    body = bs.find("div", attrs={"id": "cnblogs_post_body"}).get_text()
    with open (os.path.abspath(""+"essay/"+title+".txt"),"w",errors="ignore") as r:
        r.write(body)
    print("下载成功")


def GetessayList(SideHrefUrl):# the function is for find essay list to href
    html_content = requests.get(SideHrefUrl).text #html
    bs = BeautifulSoup(html_content)    #bs
    divs = bs.find_all("div", attrs={"class": "entrylistPosttitle"})
    ass = list(map(lambda x:x.find("a")["href"],divs))
    for assurl in ass:
        GetessayContent(assurl) # go function


def GetSideList(): # the funciton is for find side list to href
    blog_url = "https://www.cnblogs.com/zaranet/mvc/blog/sidecolumn.aspx" # responts url
    html_content = requests.get(blog_url).text#this is requests text
    bs = BeautifulSoup(html_content)# new bs
    Side_div_html = bs.find_all("div",attrs={"class":"catListPostCategory"})# html
    html_list = ('').join(str(Side_div_html))
    Side_Pattren = re.compile(r'https:.*?\.(?:html)')  # my pattren
    Side_list = Side_Pattren.findall(html_list)  # find side url
    for MyObj in Side_list: #itertion Side list:
       GetessayList(MyObj)


GetSideList()
复制代码

呵呵

posted @   ZaraNet  阅读(117)  评论(0编辑  收藏  举报
编辑推荐:
· 从 HTTP 原因短语缺失研究 HTTP/2 和 HTTP/3 的设计差异
· AI与.NET技术实操系列:向量存储与相似性搜索在 .NET 中的实现
· 基于Microsoft.Extensions.AI核心库实现RAG应用
· Linux系列:如何用heaptrack跟踪.NET程序的非托管内存泄露
· 开发者必知的日志记录最佳实践
阅读排行:
· TypeScript + Deepseek 打造卜卦网站:技术与玄学的结合
· Manus的开源复刻OpenManus初探
· AI 智能体引爆开源社区「GitHub 热点速览」
· C#/.NET/.NET Core技术前沿周刊 | 第 29 期(2025年3.1-3.9)
· 从HTTP原因短语缺失研究HTTP/2和HTTP/3的设计差异
点击右上角即可分享
微信分享提示