使用正则表达式,取得点击次数,函数抽离
1. 用正则表达式判定邮箱是否输入正确。
import re
import requests
def checkEmail(Email):
if len(Email)>6:
if re.match('^([0-9a-zA-Z_]{0,16}@[0-9a-zA-Z]{0,8}(?:\.\w{2,3}){0,2})$',Email)!=None:
print('success')
else:
print('fail')
else:
print('fail')
Email = '1271191709@qq.com'
checkEmail(Email)
2. 用正则表达式识别出全部电话号码。
import re def checknumber(number): n=re.findall(r"\(?0\d{2,3}[) -]?\d{7,8}",number) if n: print (n) else: print ('not match') nb="(021)88734543 010-44767890" checknumber(nb)
3. 用正则表达式进行英文分词。re.split('',news)
article='''new233old fake help what ''' print(re.split('[\d\s,.?!""]+', article))
4. 使用正则表达式取得新闻编号
5. 生成点击次数的Request URL
6. 获取点击次数
7. 将456步骤定义成一个函数 def getClickCount(newsUrl):
8. 将获取新闻详情的代码定义成一个函数 def getNewDetail(newsUrl):
def getClickCount(newUrl): newId = re.search('\_(.*).html', newUrl).group(1).split('/')[-1] print(newId) clickUrl = 'http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newId) resc = requests.get(clickUrl) print(resc.text.split('.html')[-1].lstrip("('").rsplit("');")) return resc
def
g(a1):
res1
=
requests.get(a1)
res1.encoding
=
'utf-8'
soup1
=
BeautifulSoup(res1.text,
'html.parser'
)
title
=
soup1.select(
".show-title"
)[
0
].text
content
=
soup1.select(
"#content"
)[
0
].text
about
=
soup1.select(
'.show-info'
)[
0
].text
time
=
about.lstrip(
'发布时间:'
)[:
19
]
time
=
datetime.strptime(time,
'%Y-%m-%d %H:%M:%S'
)
if
about.find(
'来源:'
) >
0
:
origin
=
about[about.find(
'来源:'
):].split()[
0
]
if
about.find(
'作者:'
) >
0
:
writer
=
about[about.find(
'作者:'
):].split()[
0
]
if
about.find(
'审核:'
) >
0
:
audit
=
about[about.find(
'审核:'
):].split()[
0
]
photograph
=
'null'
if
about.find(
'摄影:'
) >
0
:
photograph
=
about[about.find(
'摄影:'
):].split()[
0
]
newUrl
=
a1
cUrl
=
'http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'
.
format
(getClickCount(newUrl))
res
=
requests.get(cUrl)
print
(
"标题:"
+
title,
"发布时间:"
, time, origin, audit, writer, photograph,
"点击次数:"
+
res.text.split(
".html"
)[
-
1
].lstrip(
"('"
).rstrip(
"');"
))
url
=
"http://news.gzcc.cn/html/xiaoyuanxinwen/"
resurl
=
requests.get(url)
resurl.encoding
=
'utf-8'
soup
=
BeautifulSoup(resurl.text,
'html.parser'
)
a
=
soup.select(
'li'
)
for
news
in
a:
if
len
(news.select(
'.news-list-title'
)) >
0
:
a1
=
news.select(
'a'
)[
0
].attrs[
'href'
]
g(a1)