使用正则表达式,取得点击次数,函数抽离

1.用正则表达式判断邮箱是否输入正确

2.用正则表达式识别出全部的电话号码(这里用的是固话模式)
3.用正则表达式进行英文单词的分词
 
import re
'''
1.用正则表达式判断邮箱是否输入正确
2.用正则表达式识别出全部的电话号码(这里用的是固话模式)
3.用正则表达式进行英文单词的分词
'''
 
 
def em_match(e):
    try:
        c = re.findall(r'[0-9a-zA-Z][0-9a-zA-z\_]*\@[a-z]+\.[c,o,m,n]+', str(e))
        if c == e:
            print('Success!')
        else:
            print('Wrong layout')
    except Exception :
        print('Wrong layout')
        pass
 
 
def tel_match(n):
    try:
        nn = re.findall(r'[0-9]{3,5}-[0-9]{6,8}', str(n))[0]
        if nn == n:
            print('Success!')
        else:
            print('Wrong layout')
    except Exception :
        print('Wrong layout')
        pass
 
 
def word_split(mm):
    mess = re.split(r"\s", str(mm))
    print(mess)
 
 
e = input("请输入您的email:")
em_match(e)
 
n = input("请输入您的电话:")
tel_match(n)
 
m = '''
Five score years ago, a great American, in whose symbolic shadow we stand today, signed the Emancipation Proclamation.
 This momentous decree came as a great beacon light of hope to millions of Negro slaves who had been seared in the
 flames of withering injustice. It came as a joyous daybreak to end the long night of bad captivity.
   '''
word_split(m)

  

4. 使用正则表达式取得新闻编号

5. 生成点击次数的Request URL

6. 获取点击次数

7. 将456步骤定义成一个函数 def getClickCount(newsUrl):

8. 将获取新闻详情的代码定义成一个函数 def getNewDetail(newsUrl):

import requests
from bs4 import BeautifulSoup
from datetime import datetime
import re
res = requests.get('http://news.gzcc.cn/html/xiaoyuanxinwen/')
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')


# 获取新闻点击次数
def getNewsId(url):
    newsId = re.findall(r'\_(.*).html', url)[0][-4:]
    clickUrl = 'http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsId)
    clickRes = requests.get(clickUrl)
    # 利用正则表达式获取新闻点击次数
    clickCount = int(re.search("hits'\).html\('(.*)'\);", clickRes.text).group(1))
    return clickCount



def getNewDetail(newsUrl):
    # 读取新闻细节
    resDescript = requests.get(newsUrl)
    resDescript.encoding = "utf-8"
    soupDescript = BeautifulSoup(resDescript.text, 'html.parser')

    content = soupDescript.select(".show-content")[0].text  # 正文
    info = soupDescript.select(".show-info")[0].text  # info相关内容
    # 第一种方法 分离 message = info.split()
    # 第二种方法 用正则表达式
    time = re.search("发布时间:(.*) \xa0\xa0 \xa0\xa0作者:", info).group(1)
    author = re.search("作者:(.*)\xa0\xa0审核:", info).group(1)
    right = re.search("审核:(.*)\xa0\xa0来源:", info).group(1)
    resource = re.search('来源:(.*)\xa0\xa0\xa0\xa0摄影:', info).group(1)
    video = re.search("摄影:(.*)\xa0\xa0\xa0\xa0点击:", info).group(1)
    count = getNewsId(newsUrl)
    dateTime = datetime.strptime(time, '%Y-%m-%d %H:%M:%S')

    print('标题' + ': ' + title)
    print('概要' + ': ' + description)
    print('链接' + ': ' + a)
    print('正文' + ' :' + content)
    print('发布时间:{0}\n作者:{1}\n审核:{2}\n来源:{3}\n摄影:{4}\n点击次数:{5}'.format(dateTime, author, right, resource, video,count))
    print("\n")



for s in soup.select("li"):
    if len(s.select(".news-list-title"))>0:
        title = s.select(".news-list-title")[0].text #新闻标题
        description = s.select(".news-list-description")[0].text #新闻描述
        a = s.a.attrs["href"] #观看新闻细节
        getNewDetail(a)

  

 

posted @ 2018-04-11 19:07  083许锦添  阅读(166)  评论(0编辑  收藏  举报