使用正则表达式,取得点击次数,函数抽离

1. 用正则表达式判定邮箱是否输入正确。

import re
str=r'^[a-zA-Z0-9]+(\.[a-zA-Z0-9_-]+){0,4}@[a-zA-Z0-9]+(\.[a-zA-Z0-9]+){0,4}$'
are=('361850205@qq.com')
if re.match(str,are):
    print('success')
else:
    print('please input ...')

2. 用正则表达式识别出全部电话号码。

import re;
import requests
newurl='http://news.gzcc.cn/html/2018/xiaoyuanxinwen_0404/9183.html'
res = requests.get(newurl)
res.encoding='utf-8'
tel=re.findall("(\d{3,4}) *- *(\d{7,8})",res.text)
print(tel)

3. 用正则表达式进行英文分词。re.split('',news)

str='''Lee is on a five-day working visit to China starting Sunday, his second visit to China since September. During the visit, he will attend the Boao Forum for Asia's annual conference in Hainan. It will be his first time at the conference and he will deliver a speech at the opening session of the forum.'''
print(re.split("[\s,.?!]+",str))

4. 使用正则表达式取得新闻编号

import re
import requests
def getClickCount(newsUrl):
    newId=re.search("/(\d*).html$",newsUrl).group(1)
    print(newId)

5. 生成点击次数的Request URL

import re
url='http://news.gzcc.cn/html/2018/xiaoyuanxinwen_0404/9183.html'
a=re.match('http://news.gzcc.cn/html/2018/xiaoyuanxinwen_0404/(.*).html',url).group(1)
srac='http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(a)
print(srac)

6. 获取点击次数。

import re
import requests
a='http://news.gzcc.cn/html/2018/xiaoyuanxinwen_0404/9183.html'
newId = re.search('\_(.*).html',a).group(1).split('/')[1]
clickUrl = "http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80".format(newId)
print(requests.get(clickUrl).text.split('.html')[-1].lstrip("('").rstrip("');"))

7. 将456步骤定义成一个函数 def getClickCount(newsUrl):

8. 将获取新闻详情的代码定义成一个函数 def getNewDetail(newsUrl):

import requests
from bs4 import BeautifulSoup
from datetime import datetime
import re

def getClickCount(newsUrl):
    newId = re.search('\_(.*).html', newsUrl).group(1).split('/')[1]
    clickUrl = 'http://oa.gzcc.cn/api.php?op=count&id=9183&modelid=80'.format(newId)
    return(int(requests.get(clickUrl).text.split('.html')[-1].lstrip("('").rstrip("');")))

def getNewsDetail(newsUrl):
    resd = requests.get(a)
    resd.encoding = 'utf-8'
    soupd = BeautifulSoup(resd.text, 'html.parser')

    info = soupd.select('.show-info')[0].text
    d = re.search('发布时间:(.*) \xa0\xa0 \xa0\xa0作者:', info).group(1)
    dt = datetime.strptime(d, '%Y-%m-%d %H:%M:%S')
    print('发布时间:{}'.format(dt))
    print('作者:' + re.search('作者:(.*)审核:', info).group(1))
    print('审核:' + re.search('审核:(.*)来源:', info).group(1))
    print('来源:' + re.search('来源:(.*)摄影:', info).group(1))
    print('摄影:' + re.search('摄影:(.*)点击', info).group(1))
    print(getClickCount(a))
    print('正文:' + soupd.select('.show-content')[0].text)


res = requests.get('http://news.gzcc.cn/html/xiaoyuanxinwen/')
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
for news in soup.select('li'):
    if len(news.select('.news-list-title'))>0:
        a = news.select('a')[0].attrs['href']
        getNewsDetail(a)

        break

 

posted @ 2018-04-10 10:52  125叶胜轩  阅读(94)  评论(0编辑  收藏  举报