1. 将新闻的正文内容保存到文本文件。

def writeNewsDetails(contents):
    f = open('gzccnews.txt', 'a', encoding='utf-8')
    f.write(contents)
    f.close()

2. 将新闻数据结构化为字典的列表:

  • 单条新闻的详情-->字典news
  • 一个列表页所有单条新闻汇总-->列表newsls.append(news)
  • 所有列表页的所有新闻汇总列表newstotal.extend(newsls)
    def getClickCount(newsUrl):
        newId = re.search('\_(.*).html', newsUrl).group(1).split('/')[-1]
        res = requests.get('http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newId))
        res.encoding = 'utf-8'
        count = int(res.text.split('.html')[-1].lstrip("('").rstrip("');"))
        return count
    def getNewDetail(newsUrl):
        res = requests.get(newsUrl)
        res.encoding = 'utf-8'
        soup = BeautifulSoup(res.text, 'html.parser')
        news = {}
        news['title'] = soup.select('.show-title')[0].text
        # info = soup.select('.show-info')[0].text.split()
        info = soup.select('.show-info')[0].text
        content = soup.select('#content')[0].text
        # writeFile(content)
        # print(soup.select('#content')[0].text)
        s = info.split()[0].lstrip('发布时间:') + " " + info.split()[1# 发布时间
        # zz = info[2].lstrip('作者:')  #作者
        # sh = info[3].lstrip('审核:')  #审核
        # ly = info[4].lstrip("来源:")  #来源
        if info.find('来源:') > 0:
            news['source'] = info[info.find('来源:'):].split()[0].lstrip('来源:')
        else:
            news['source'] = 'none'
        news['time'] = datetime.strptime(s, '%Y-%m-%d %H:%M:%S')
        news['clickCount'] = getClickCount(newsUrl)
        news['newsUrl']= newsUrl
        # print(cc,type(cc))
        print(news)
        return news
     
    def getListPage(pageUrl):
        res = requests.get(pageUrl)
        res.encoding = 'utf-8'
        soup = BeautifulSoup(res.text, 'html.parser')
        newslist = []
        for news in soup.select('li'):
            if len(news.select('.news-list-title')) > 0:
                a = news.select('a')[0].attrs['href'# URL
                newslist.append(getNewDetail(a))
        return newslist
      
      
    def getPageN():
        res = requests.get('http://news.gzcc.cn/html/xiaoyuanxinwen/')
        res.encoding = 'utf-8'
        soup = BeautifulSoup(res.text, 'html.parser')
        pagenumber=int(soup.select('.a1')[0].text.rstrip('条'))
        page = pagenumber//10+1
        return page
      
      
    print(getPageN())
      
    firstUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen'
    res = requests.get(firstUrl)
    res.encoding = 'utf-8'
      
    n=getPageN()
    newstotal = []
    for i in range(2,4):
        print(i)
        listPageUrl='http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)
        temp = getListPage(listPageUrl)
        newstotal.extend(temp)

    3. 安装pandas,用pandas.DataFrame(newstotal),创建一个DataFrame对象df.

df = pandas.DataFrame(newsTotal)
print(df)

4. 通过df将提取的数据保存到csv或excel 文件。

df.to_csv('gzccnews.csv')
df.to_excel('gzccnews.xlsx')

5. 用pandas提供的函数和方法进行数据分析:

  • 提取包含点击次数、标题、来源的前6行数据
  • 提取‘学校综合办’发布的,‘点击次数’超过3000的新闻。
  • 提取'国际学院'和'学生工作处'发布的新闻。
  • 进取2018年3月的新闻
print(df[['title','clickCount','source']][:6])
 
print(df[(df['clickCount']>3000)&(df['source']=='学校综合办')])
 
sou = ['国际学院','学生工作处']
print(df[df['source'].isin(sou)])
 
# 进取2018年3月的新闻
df1 = df.set_index('time')
print(df1['2018-03'])

6. 保存到sqlite3数据库

import sqlite3
with sqlite3.connect('gzccnewsdb.sqlite') as db:
df3.to_sql('gzccnews05',con = db, if_exists='replace')

import sqlite3
with sqlite3.connect('gzccnewsdb.sqlite') as db:
df3.to_sql('gzccnews05',con = db, if_exists='replace')

7. 从sqlite3读数据

with sqlite3.connect('gzccnewsdb.sqlite') as db:
df2 = pandas.read_sql_query('SELECT * FROM gzccnews05',con=db)
print(df2)

with sqlite3.connect('gzccnewsdb.sqlite') as db:
df2 = pandas.read_sql_query('SELECT * FROM gzccnews05',con=db)
print(df2)

8. df保存到mysql数据库

安装SQLALchemy
安装PyMySQL
MySQL里创建数据库:create database gzccnews charset utf8;

import pymysql
from sqlalchemy import create_engine
conn = create_engine('mysql+pymysql://root:root@localhost:3306/gzccnews?charset=utf8')
pandas.io.sql.to_sql(df, 'gzccnews', con=conn, if_exists='replace')

MySQL里查看已保存了数据。(通过MySQL Client或Navicate。)

 

posted on 2018-04-17 22:39  224杨晓潮  阅读(84)  评论(0编辑  收藏  举报