爬取全部的校园新闻

作业来源:https://edu.cnblogs.com/campus/gzcc/GZCC-16SE1/homework/3002

作业要求:  

0.从新闻url获取点击次数,并整理成函数

  • newsUrl
  • newsId(re.search())
  • clickUrl(str.format())
  • requests.get(clickUrl)
  • re.search()/.split()
  • str.lstrip(),str.rstrip()
  • int
  • 整理成函数
1 def click(url):
2     newsId = re.findall('(\d{1,5})', url)[-1]
3     clickUrl = 'http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsId)
4     resClick = requests.get(clickUrl)
5     newsClick = int(resClick.text.split('.html')[-1].lstrip("('").rstrip("');"))
6     return newsClick
  • 获取新闻发布时间及类型转换也整理成函数
1 def newsdate(showinfo):
2     newsDate = showinfo.split()[0].split(':')[1]
3     newsTime = showinfo.split()[1]
4     newsDateTime1 = newsDate + ' ' + newsTime
5     newsDateTime2 = datetime.strptime(newsDateTime1, '%Y-%m-%d %H:%M:%S')
6     newsDateTime = datetime.strftime(newsDateTime2, '%Y{y}-%m{m}-%d{d} %H{H}:%M{M}:%S{S}').format(y='', m='', d='', H='', M='', S='')
7     return newsDateTime

1.从新闻url获取新闻详情: 字典,anews

 1 def anews(url):
 2     newsDetail = {}
 3     res = requests.get(url)
 4     res.encoding = 'utf8'
 5     soup = BeautifulSoup(res.text, 'html.parser')
 6     showinfo = soup.select('.show-info')[0].text
 7     newsDetail['新闻标题'] = soup.select('.show-title')[0].text
 8     newsDetail['发布时间'] = newsdate(showinfo)
 9     newsDetail['点击量'] = click(newsUrl)
10     return newsDetail

2.从列表页的url获取新闻url:列表append(字典) alist

 1 def alist(listUrl):
 2     res = requests.get(listUrl)
 3     res.encoding = 'utf8'
 4     soup = BeautifulSoup(res.text, 'html.parser')
 5     newsList = []
 6     for news in soup.select('li'):
 7         if len(news.select('.news-list-title')) > 0:
 8             newsUrl = news.select('a')[0]['href']
 9             newsDesc = news.select('.news-list-description')[0].text
10             newsDict = anews(newsUrl)
11             newsDict['新闻链接'] = newsUrl
12             newsDict['新闻描述'] = newsDesc
13             newsList.append(newsDict)
14     return newsList

3.生成所页列表页的url并获取全部新闻 :列表extend(列表) allnews

*每个同学爬学号尾数开始的10个列表页

1 allnews = alist(listUrl)
2 for i in range(37, 46):
3     listUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)
4     allnews.extend(alist(listUrl))

4.设置合理的爬取间隔

import time

import random

time.sleep(random.random()*3)

5.用pandas做简单的数据处理并保存

保存到csv或excel文件 

newsdf.to_csv(r'F:\duym\爬虫\gzccnews.csv')

1 newsdf = pd.DataFrame(allnews)
2 newsdf.to_csv(r'E:\gzccnews.csv')

完整代码:

 1 import requests
 2 from bs4 import BeautifulSoup
 3 from datetime import datetime
 4 import re
 5 import pandas as pd
 6 
 7 newsUrl = "http://news.gzcc.cn/html/2019/xiaoyuanxinwen_0323/11052.html"
 8 clickUrl = "http://oa.gzcc.cn/api.php?op=count&id=11052&modelid=80"
 9 listUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
10 
11 def click(url):
12     newsId = re.findall('(\d{1,5})', url)[-1]
13     clickUrl = 'http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsId)
14     resClick = requests.get(clickUrl)
15     newsClick = int(resClick.text.split('.html')[-1].lstrip("('").rstrip("');"))
16     return newsClick
17 
18 
19 def newsdate(showinfo):
20     newsDate = showinfo.split()[0].split(':')[1]
21     newsTime = showinfo.split()[1]
22     newsDateTime1 = newsDate + ' ' + newsTime
23     newsDateTime2 = datetime.strptime(newsDateTime1, '%Y-%m-%d %H:%M:%S')
24     newsDateTime = datetime.strftime(newsDateTime2, '%Y{y}-%m{m}-%d{d} %H{H}:%M{M}:%S{S}').format(y='', m='', d='', H='', M='', S='')
25     return newsDateTime
26 
27 
28 def anews(url):
29     newsDetail = {}
30     res = requests.get(url)
31     res.encoding = 'utf8'
32     soup = BeautifulSoup(res.text, 'html.parser')
33     showinfo = soup.select('.show-info')[0].text
34     newsDetail['新闻标题'] = soup.select('.show-title')[0].text
35     newsDetail['发布时间'] = newsdate(showinfo)
36     newsDetail['点击量'] = click(newsUrl)
37     return newsDetail
38 
39 
40 def alist(listUrl):
41     res = requests.get(listUrl)
42     res.encoding = 'utf8'
43     soup = BeautifulSoup(res.text, 'html.parser')
44     newsList = []
45     for news in soup.select('li'):
46         if len(news.select('.news-list-title')) > 0:
47             newsUrl = news.select('a')[0]['href']
48             newsDesc = news.select('.news-list-description')[0].text
49             newsDict = anews(newsUrl)
50             newsDict['新闻链接'] = newsUrl
51             newsDict['新闻描述'] = newsDesc
52             newsList.append(newsDict)
53     return newsList
54 
55 
56 allnews = alist(listUrl)
57 for i in range(37, 46):
58     listUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)
59     allnews.extend(alist(listUrl))
60 
61 len(allnews)
62 
63 newsdf = pd.DataFrame(allnews)
64 newsdf.to_csv(r'E:\gzccnews.csv')
View Code

运行结果:

 

posted @ 2019-04-12 19:28  ALiuYu  阅读(151)  评论(0编辑  收藏  举报