数据结构化与保存
1. 将新闻的正文内容保存到文本文件。
def save(content): f = open("content.txt", "a") f.write(content) f.close()
2. 将新闻数据结构化为字典的列表:
- 单条新闻的详情-->字典news
- 一个列表页所有单条新闻汇总-->列表newsls.append(news)
- 所有列表页的所有新闻汇总列表newstotal.extend(newsls)
#获取新闻详情 def getNewDetail(newsUrl): url = newsUrl res2 = requests.get(url) res2.encoding = 'utf-8' soup2 = BeautifulSoup(res2.text, 'html.parser') info = soup2.select('.show-info')[0].text info = info.lstrip('发布时间:').rstrip('点击:次') list={} time = info[:info.find('作者')].rstrip() # 发布时间 list['time']=time author = info[info.find('作者:') + 3:info.find('审核')].rstrip() # 作者 list['author'] = author check = info[info.find('审核:') + 3:info.find('来源')].rstrip() # 审核 list['check'] = check source = info[info.find("来源:") + 3:info.find('摄影')].rstrip() # 来源 list['source'] = source clicknum = click(url) list['onclick'] = clicknum list['detail'] = soup2.select('.show-content')[0].text.strip() storefile(soup2.select('.show-content')[0].text.strip()) if (info.find("摄影:") > 0): photogra = info[info.find("摄影:") +3:].split() [0].lstrip() # 摄影 list['photogra'] = photogra else: list['photogra'] ="none" if (info.find("来源:") > 0): source = info[info.find("来源:") +3:].split() [0].lstrip() # 来源 list['source'] = source else: list['source'] = "none" return (list)
newstotal = [] pageUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/' newstotal.extend(getListPage(pageUrl))
def getListPage(url): res = requests.get(url) res.encoding = 'utf-8' soup = BeautifulSoup(res.text, 'html.parser') newList = [] for news in soup.select("li"): if (len(news.select('.news-list-info')) > 0): newsUrl = news.select('a')[0].attrs['href'] newDetal = getNewDetail(newsUrl) newList.append(newDetal) return newList
3. 安装pandas,用pandas.DataFrame(newstotal),创建一个DataFrame对象df.
df = pandas.DataFrame(newsTotal) print(df)
4. 通过df将提取的数据保存到csv或excel 文件。
import openpyxl df.to_excel('gzccnews.xlsx')
5. 用pandas提供的函数和方法进行数据分析:
- 提取包含点击次数、标题、来源的前6行数据
- 提取‘学校综合办’发布的,‘点击次数’超过3000的新闻。
- 提取'国际学院'和'学生工作处'发布的新闻。
- 进取2018年3月的新闻
print(df[['title','clickCount','source']][:6]) print(df[(df['clickCount']>3000)&(df['source']=='学校综合办')]) sou = ['国际学院','学生工作处'] print(df[df['source'].isin(sou)]) dftime = df.set_index('time') print(dftime['2018-03'])
6. 保存到sqlite3数据库
import sqlite3
with sqlite3.connect('gzccnewsdb.sqlite') as db:
df3.to_sql('gzccnews05',con = db, if_exists='replace')
7. 从sqlite3读数据
with sqlite3.connect('gzccnewsdb.sqlite') as db:
df2 = pandas.read_sql_query('SELECT * FROM gzccnews05',con=db)
print(df2)
import sqlite3 with sqlite3.connect("gzccdb.sqlite") as db: df1.to_sql('gzccdb01',con=db,if_exists='replace') with sqlite3.connect("gzccdb.sqlite") as db: df2 = pandas.read_sql_query("select *from gzccdb01",con=db) print("df2: \n",df2)
8. df保存到mysql数据库
安装SQLALchemy
安装PyMySQL
MySQL里创建数据库:create database gzccnews charset utf8;
import pymysql
from sqlalchemy import create_engine
conn = create_engine('mysql+pymysql://root:root@localhost:3306/gzccnews?charset=utf8')
pandas.io.sql.to_sql(df, 'gzccnews', con=conn, if_exists='replace')
MySQL里查看已保存了数据。(通过MySQL Client或Navicate。)
import pymysql from sqlalchemy import create_engine coon = create_engine('mysql+pymysql://root:root@localhost:3306/gzccnews?charset=utf8') pandas.io.sql.to_sql(df3,"gzccnews",con=coon,if_exists='replace')