看效果:
不扯没用的,直接上代码:
from fake_useragent import UserAgent
from bs4 import BeautifulSoup
from urllib import request
from urllib import error
import re
import time
def html_request(url):
if url is None:
return
print("download html is :{0}".format(url))
headers = {'UserAgent': str(UserAgent().random)}
req = request.Request(url, headers=headers)
try:
html = request.urlopen(req).read().decode('utf-8')
except error.URLError as e:
if hasattr(e, "code"):
print(e.code)
if hasattr(e, "reason"):
print(e.reason)
return None
return html
def html_parser(url, html):
if url is None or html is None:
return
pattern_art = '<div class="article-item-box csdn-tracking-statistics" data(.+?)</div>'
articles = re.compile(pattern_art, re.S).findall(html.replace('\n', ''))
print(articles.__len__())
for article in articles:
soup = BeautifulSoup(article, 'html.parser')
title = soup.find('a', attrs={'target': '_blank'})
print(
"文章题目:{0}\n文章类型:{1}".format(title.text.replace(' ', '').replace("原", "").replace("转", ""), title.span.text))
print("文章链接:{0}".format(title.attrs['href']))
html_request(title.attrs['href'])
infors = soup.find('div', attrs={'class': 'info-box d-flex align-content-center'})
pattern_next = '<li class="js-page-next js-page-action ui-pager ui-pager-disabled">'
next = re.compile(pattern_next).findall(html)
print("是否为最后一页:{0}----{1}".format(len(next), next))
if len(next) == 0:
return 0
else:
return 0
if __name__ == '__main__':
name = '你自己的名称'
page = 1
url = "https://blog.csdn.net/" + name + "/article/list/" + str(page) + '?'
while page < 7:
html = html_request(url)
next = html_parser(url, html)
page += 1
if page > 6:
page = 1
url = "https://blog.csdn.net/" + name + "/article/list/" + str(page) + '?'