获取全部校园新闻

1.取出一个新闻列表页的全部新闻 包装成函数。

2.获取总的新闻篇数,算出新闻总页数。

3.获取全部新闻列表页的全部新闻详情。

import requests
import string
import re
from datetime import  datetime
newsurl='http://news.gzcc.cn/html/xiaoyuanxinwen/'
res = requests.get(newsurl) #返回response对象
res.encoding='utf-8'
from bs4 import BeautifulSoup
soup = BeautifulSoup(res.text,'html.parser')

def getClickCount(newsUrl):
    newId=re.search('\_(.*).html',newsUrl).group(1).split('/')[1]
    clickUrl='http://oa.gzcc.cn/api.php?op=count&id={}modelid=80'.format(newId)
    return(int(requests.get(clickUrl).text.split('.html')[-1].lstrip("('").rstrip("');")))



def getNewsDetail(newsurl):
    resd=requests.get(newsurl)
    resd.encoding='utf-8'
    soupd=BeautifulSoup(resd.text,'html.parser')
    title=soupd.select('.show-title')[0].text
    info=soupd.select('show-info')[0].text
    dt=datetime.strptime(info.lstrip('发布时间:')[0:19],'%Y-%m-%d %H:%M:%S')
    if info.find('来源')>0:
        source=info[info.find('来源:'):].split()[0].lstrip('来源:')
    else:
        source='none'
    content=soupd.select('.show-content')[0].text.strip()
    click=getClickCount(newsurl)
    print(click,title,newsurl,source,dt)

def getListPage(listPageUrl):
    res=requests.get(listPageUrl)
    res.encoding='utf-8'
    soup=BeautifulSoup(res.text,'html.parser')
    for news in soup.select('li'):
        if len(news.select('.news-list-title'))>0:
            a=news.select('a')[0].attrs['href']
            getNewsDetail(a)



resn=requests.get('http://news.gzcc.cn/html/xiaoyuanxinwen/')
resn.encoding='utf-8'
soupn=BeautifulSoup(resn.text,'html.parser')
n=int(soupn.select('.a1')[0].text.rstrip(''))

for i in range(n,n+1):
    pageUrl='http://news.gzcc.cn/html/xiaoyuanxinwen/[].html'.format(i)
    getListPage(pageUrl)

 4.

爬取网易新闻评论

from bs4 import  BeautifulSoup
import  requests
import json
headers={'User-Agent':'Mozilla/5.0(X11;Linux x86_64) AppleWebKit/\537.11(KHTML.like Gecko) Chrome/23.0.1271.64 Safari/537.11'}
commentUrl='http://comment.news.163.com/news2_bbs/CPISM0FT000189FH.html'

def createUrl(commentUrl,offset,limit):
    s1='http://comment.news.163.com/api/v1/products/a2869674571f77b5a0867c3d71db5856/threads/'
    s2='/comments/newsList?offsent='
    name=commentUrl.split('/')[-1].split('.')[0]
    u=s1+str(name)+s2+str(offset)+'&limit='+str(limit)
    return  u
res=requests.get(url=createUrl(commentUrl,1,40),headers=headers).content
data=json.loads(res.decode())
for key in data['comment'].key():
    print(data['comment'][key]['content'])

 

posted @ 2018-04-10 11:36  160苏伟祥  阅读(144)  评论(0编辑  收藏  举报