获取全部的校园新闻
1.从新闻url获取新闻详情: 字典,anews
2.从列表页的url获取新闻url:列表append(字典) alist
3.生成所页列表页的url并获取全部新闻 :列表extend(列表) allnews
*每个同学爬学号尾数开始的10个列表页
4.设置合理的爬取间隔
import time
import random
time.sleep(random.random()*3)
5.用pandas做简单的数据处理并保存
保存到csv或excel文件
newsdf.to_csv(r'F:\duym\爬虫\gzccnews.csv')
保存到数据库
import sqlite3
with sqlite3.connect('gzccnewsdb.sqlite') as db:
newsdf.to_sql('gzccnewsdb',db)
1 import requests 2 from bs4 import BeautifulSoup 3 from datetime import datetime 4 import re 5 import pandas as pd 6 import time 7 import random 8 import sqlite3 9 if __name__ == '__main__': 10 newsUrl = 'http://news.gzcc.cn/html/2005/xiaoyuanxinwen_0710/4.html' 11 listUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/' 12 13 14 def click(url): 15 id = re.findall('(\d{1,5})', url)[-1] 16 clickUrl = 'http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(id) 17 resClick = requests.get(clickUrl) 18 newsClick = int(resClick.text.split('.html')[-1].lstrip("('").rstrip("');")) 19 return newsClick 20 21 22 def newsdt(showinfo): 23 newsDate = showinfo.split()[0].split(':')[1] 24 newsTime = showinfo.split()[1] 25 newsDT = newsDate + ' ' + newsTime 26 dt = datetime.strptime(newsDT, '%Y-%m-%d %H:%M:%S') 27 return dt 28 29 #从新闻url获取新闻详情: 字典,anews 30 def anews(url): 31 newsDetail = {} 32 res = requests.get(url) 33 res.encoding = 'utf-8' 34 soup = BeautifulSoup(res.text, 'html.parser') 35 newsDetail['newsTitle'] = soup.select('.show-title')[0].text 36 showinfo = soup.select('.show-info')[0].text 37 newsDetail['newsDT'] = newsdt(showinfo) 38 newsDetail['newsClick'] = click(newsUrl) 39 return newsDetail 40 41 #从列表页的url获取新闻url:列表append(字典) alist 42 def alist(url): 43 res = requests.get(listUrl) 44 res.encoding = 'utf-8' 45 soup = BeautifulSoup(res.text, 'html.parser') 46 newsList = [] 47 for news in soup.select('li'): 48 if len(news.select('.news-list-title')) > 0: 49 newsUrl = news.select('a')[0]['href'] 50 newsDesc = news.select('.news-list-description')[0].text 51 newsDict = anews(newsUrl) 52 newsDict['description'] = newsDesc 53 newsList.append(newsDict) 54 return newsList 55 56 57 58 res = requests.get('http://news.gzcc.cn/html/xiaoyuanxinwen/') 59 res.encoding = 'utf-8' 60 soup = BeautifulSoup(res.text, 'html.parser') 61 62 for news in soup.select('li'): 63 if len(news.select('.news-list-title')) > 0: 64 newsUrl = news.select('a')[0]['href'] 65 print(anews(newsUrl)) 66 67 allnews = [] 68 for i in range(15, 25):#爬取学号尾数开始的10个列表页 69 listUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i) 70 allnews.extend(alist(listUrl)) 71 72 print("allnewsLength={}".format(len(allnews))) 73 print(allnews) 74 75 res = requests.get('http://news.gzcc.cn/html/xiaoyuanxinwen/') 76 res.encoding = 'utf-8' 77 soup = BeautifulSoup(res.text, 'html.parser') 78 for news in soup.select('li'): 79 if len(news.select('.news-list-title')) > 0: 80 newsUrl = news.select('a')[0]['href'] 81 print(anews(newsUrl)) 82 83 s1 = pd.Series([100, 23, 'bugingcode']) 84 print(s1) 85 pd.Series(anews) 86 newsdf = pd.DataFrame(allnews) 87 for i in range(5): 88 print(i) 89 time.sleep(random.random() * 3)#设置爬取的时间间隔 90 print(newsdf) 91 92 newsdf.to_csv(r'F:\test1\gzcc.csv',encoding='utf_8_sig')#保存成csv格式,编码格式为utf_8_sig 93 94 with sqlite3.connect(r'F:\test1\gzccnewsdb.sqlite') as db:#保存文件为sql 95 newsdf.to_sql('gzccnewsdb',db)