爬去自己的博客园呵呵
import urllib import io from bs4 import BeautifulSoup import requests import os import re def GetessayContent(essayUrl):# the funcition is for get content to save then is IO html_content = requests.get(essayUrl).text bs = BeautifulSoup(html_content) title = bs.find("a", attrs={"id": "cb_post_title_url"}).text body = bs.find("div", attrs={"id": "cnblogs_post_body"}).get_text() with open (os.path.abspath(""+"essay/"+title+".txt"),"w",errors="ignore") as r: r.write(body) print("下载成功") def GetessayList(SideHrefUrl):# the function is for find essay list to href html_content = requests.get(SideHrefUrl).text #html bs = BeautifulSoup(html_content) #bs divs = bs.find_all("div", attrs={"class": "entrylistPosttitle"}) ass = list(map(lambda x:x.find("a")["href"],divs)) for assurl in ass: GetessayContent(assurl) # go function def GetSideList(): # the funciton is for find side list to href blog_url = "https://www.cnblogs.com/zaranet/mvc/blog/sidecolumn.aspx" # responts url html_content = requests.get(blog_url).text#this is requests text bs = BeautifulSoup(html_content)# new bs Side_div_html = bs.find_all("div",attrs={"class":"catListPostCategory"})# html html_list = ('').join(str(Side_div_html)) Side_Pattren = re.compile(r'https:.*?\.(?:html)') # my pattren Side_list = Side_Pattren.findall(html_list) # find side url for MyObj in Side_list: #itertion Side list: GetessayList(MyObj) GetSideList()
呵呵
分类:
Python Crawler
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 从 HTTP 原因短语缺失研究 HTTP/2 和 HTTP/3 的设计差异
· AI与.NET技术实操系列:向量存储与相似性搜索在 .NET 中的实现
· 基于Microsoft.Extensions.AI核心库实现RAG应用
· Linux系列:如何用heaptrack跟踪.NET程序的非托管内存泄露
· 开发者必知的日志记录最佳实践
· TypeScript + Deepseek 打造卜卦网站:技术与玄学的结合
· Manus的开源复刻OpenManus初探
· AI 智能体引爆开源社区「GitHub 热点速览」
· C#/.NET/.NET Core技术前沿周刊 | 第 29 期(2025年3.1-3.9)
· 从HTTP原因短语缺失研究HTTP/2和HTTP/3的设计差异