获取全部校园新闻
1.取出一个新闻列表页的全部新闻 包装成函数。
2.获取总的新闻篇数,算出新闻总页数。
3.获取全部新闻列表页的全部新闻详情。
import requests import re from bs4 import BeautifulSoup from datetime import datetime # 获取点击次数 def getClickCount(newsUrl): newId = re.search('\_(.*).html',newsUrl).group(1).split('/')[1] clickUrl = 'http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newId) clickStr = requests.get(clickUrl).text return re.search("hits'\).html\('(.*)'\);",clickStr).group(1) # 获取新闻详情 def getNewDetail(url): resd = requests.get(url) resd.encoding = 'utf-8' soupd = BeautifulSoup(resd.text, 'html.parser') title = soupd.select('.show-title')[0].text info = soupd.select('.show-info')[0].text t = soupd.select('.show-info')[0].text[0:24].lstrip('发布时间:') dt = datetime.strptime(t, '%Y-%m-%d %H:%M:%S') if info.find('来源:') > 0: source = info[info.find('来源:'):].split()[0].lstrip('来源:') else: source = 'none' if info.find('作者:') > 0: author = info[info.find('作者:'):].split()[0].lstrip('作者:') else: author = 'none'
#获取新闻正文内容 # content = soupd.select('.show-content')[0].text.strip()
#调用getClickCount函数获取新闻点击次数 click = getClickCount(url) print('链接:', url) print('标题:', title) # print('正文:',content) print('发布时间:', dt) print('来源:', source) print('作者:', author) print('点击次数:', click) def getListPage(listPageUrl): res = requests.get(listPageUrl) res.encoding = 'utf-8' soup = BeautifulSoup(res.text,'html.parser') for news in soup.select('li'): if len(news.select('.news-list-title')) > 0: # 获取每条新闻链接 a = news.select('a')[0].attrs['href'] # 调用函数获取新闻详情页的内容 getNewDetail(a) #用新闻条数计算新闻总页数 resn = requests.get('http://news.gzcc.cn/html/xiaoyuanxinwen/') resn.encoding = 'utf-8' soupn = BeautifulSoup(resn.text,'html.parser') n = int(soupn.select('.a1')[0].text.rsplit('条')[0])//10+1 #获取每页新闻的链接 for i in range(2,n): pageUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i) getListPage(pageUrl)