py获取全部校园新闻

获取全部校园新闻:

import requests
from bs4 import BeautifulSoup
import re
from datetime import datetime


def gzcc_content_clicks(content_url):
content_id = re.search('(\d+)\.html', content_url).group(1)
click_url = 'http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(content_id)
resp = requests.get(click_url)
resp.encoding = 'utf-8'
click_number = re.search('\(\'#hits\'\)\.html\(\'(\d+)\'\)', resp.text).group(1)
return int(click_number)


def gzcc_content_info(content_url):
content_info = {}
resp = requests.get(content_url)
resp.encoding = 'utf-8'
soup = BeautifulSoup(resp.text, 'html.parser')
match_str = {'author': '作者:(.*)\s+[审核]?', 'examine': '审核:(.*)\s+[来源]?', 'source': '来源:(.*)\s+[摄影]?', \
'photography': '摄影:(.*)\s+[点击]'}
remarks = soup.select('.show-info')[0].text
for i in match_str:
if re.match('.*' + match_str[i], remarks):
content_info[i] = re.search(match_str[i], remarks).group(1).split("\xa0")[0]
else:
content_info[i] = " "
time = re.search('\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}', remarks).group()
content_info['time'] = datetime.strptime(time, '%Y-%m-%d %H:%M:%S')
content_info['title'] = soup.select('.show-title')[0].text
content_info['url'] = content_url
content_info['clicks'] = gzcc_content_clicks(content_url)
# content_info['content'] = soup.select('#content')[0].text
# with open('test.txt', 'a', encoding='UTF-8') as story:
# story.write(content_info['content'])
return content_info


def gzcc_list_page(page_url):
page_news = []
res = requests.get(page_url)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
news_list = soup.select('.news-list')[0]
news_point = news_list.select('li')
for i in news_point:
a = i.select('a')[0]['href']
page_news.append(gzcc_content_info(a))
return page_news


all_news = []
url = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
res = requests.get(url)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
n = int(soup.select('#pages')[0].select("a")[-2].text)
all_news.extend(gzcc_list_page(url))
for i in range(2, n):
all_news.extend(gzcc_list_page('http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)))
for i in all_news:
print(i)

爬取网易_人间_记事栏目的文章:

import requests
from bs4 import BeautifulSoup
import demjson
import jieba
import re


def get_content_info(content_url):
    res = requests.get(content_url)
    res.encoding = 'gbk'
    soup = BeautifulSoup(res.text, 'html.parser')
    return soup.select('#endText')[0].text


def get_page_info(page_url):
    res = requests.get(page_url)
    res.encoding = 'gbk'
    soup = BeautifulSoup(res.text, 'html.parser')
    json_str = soup.select('script')[4].text.replace('var data_list =', '')
    data = demjson.decode(json_str)
    for i in data:
        return get_content_info(i['url'])


def print_words_count(text, top):
    miss_word = "了|他|说|我|你|就|着|又|的|在|是|有|把|到|也|不|都|她|这|便|去|们|还|但|一个|和|却|里|来|要|没|很|\"" \
                "|那|么|一|将|呢|起|于|上|只|得|而|而且|对|所以|见|些|才|从|过|被|并|时|且|给|道|虽然|可以|出"
    text = re.sub("[\s+\.\!\/_\",$%^*+—()?【】“《;》”!\-:,。?、~@#¥%……&*()]+", "", text)
    text = re.sub(miss_word + '+', "", text)
    words = list(jieba.cut(text))
    key_words = {}
    for i in set(words):  # 统计出词频
        key_words[i] = words.count(i)
    sort_word = sorted(key_words.items(), key=lambda d: d[1], reverse=True)  # 排序
    for j in range(top):  # 输出
        print(sort_word[j])


cn = ''
url = 'http://renjian.163.com/special/renjian_jishi/'
res = requests.get(url)
res.encoding = 'gbk'
soup = BeautifulSoup(res.text, 'html.parser')
page = int(soup.select('.list_page')[0].select('a')[-2].text)
for p in range(2, page):
    if p < 10:
        cn += get_page_info('http://renjian.163.com/special/renjian_jishi_0{}/'.format(p))
    else:
        cn += get_page_info('http://renjian.163.com/special/renjian_jishi_{}/'.format(p))
print_words_count(cn, 10)

posted @ 2018-04-10 22:50  162--麦振澎  阅读(395)  评论(0编辑  收藏  举报