爬取校园网

1. 用requests库和BeautifulSoup库,爬取校园新闻首页新闻的标题、链接、正文。

import requests
from bs4 import BeautifulSoup

url = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
res = requests.get(url)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')

news = soup.select('li')
for new in news:
    if len(new.select('.news-list-title')) > 0:
        title = new.select('.news-list-title')[0].text  # 标题
        aurl = new.select('a')[0].attrs['href']  # URL
        text = new.select('.news-list-description')[0].text #正文
        print(title+'\n'+text+'\n'+aurl+'\n')

 运行结果:

 

2. 分析字符串,获取每篇新闻的发布时间,作者,来源,摄影等信息。

res2 = requests.get(aurl)
        res2.encoding = 'utf-8'
        soup2 = BeautifulSoup(res2.text, 'html.parser')
        info = soup2.select('.show-info')[0].text
        info = info.lstrip('发布时间:').rstrip('点击:次')
        # print(info)
        time = info[:info.find('作者')] # 发布时间
        author = info[info.find('作者:')+3:info.find('审核')] # 作者
        check = info[info.find('审核:')+3:info.find('来源')]  # 审核
        source = info[info.find("来源:")+3:info.find('摄影')] # 来源
        print(time, author, check, source,end="")
        if(info.find("摄影:")>0):
            photogra = info[info.find("摄影:")+3:] # 摄影
            print(photogra)
        else:
            print()

 运行结果:

 

3. 将其中的发布时间由str转换成datetime类型。

from datetime import datetime

time = '2018-04-01 11:57:00'
formart = datetime.strptime(time, '%Y-%m-%d %H:%M:%S')
print(type(formart))
print(formart, formart.strftime('%Y/%m/%d'))

 运行结果:

 

4. 将完整的代码及运行结果截图发布在作业上。

import requests
from bs4 import BeautifulSoup

url = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
res = requests.get(url)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')

news = soup.select('li')
for new in news:
    if len(new.select('.news-list-title')) > 0:
        title = new.select('.news-list-title')[0].text  # 标题
        aurl = new.select('a')[0].attrs['href']  # URL
        text = new.select('.news-list-description')[0].text #正文
        print(title + '\n' + text + '\n' + aurl + '\n')

        res2 = requests.get(aurl)
        res2.encoding = 'utf-8'
        soup2 = BeautifulSoup(res2.text, 'html.parser')
        info = soup2.select('.show-info')[0].text
        info = info.lstrip('发布时间:').rstrip('点击:次')
        # print(info)
        time = info[:info.find('作者')] # 发布时间
        author = info[info.find('作者:')+3:info.find('审核')] # 作者
        check = info[info.find('审核:')+3:info.find('来源')]  # 审核
        source = info[info.find("来源:")+3:info.find('摄影')] # 来源
        print(time, author, check, source,end="")
        if(info.find("摄影:")>0):
            photogra = info[info.find("摄影:")+3:] # 来源
            print(photogra)
        else:
            print()

  

  

 

posted @ 2018-04-04 00:00  247李嘉嘉  阅读(410)  评论(0编辑  收藏  举报