python练习
1:邮箱验证
import re
email = input("请输入你的邮箱:")
if re.match(r'^[0-9a-zA-Z_]{0,19}@[0-9a-zA-Z]{1,13}\.[com,cn,net]{1,3}$',email):
print('你输入的邮箱正确')
else:
print('你输入的邮箱不正确,请重新输入')
2:用正则表达式识别出所有电话号码
tellstr='''版权所有:广州商学院 地址:广州市黄埔区九龙大道206号
学校办公室:020-82876130 招生电话:020-82872773
粤公网安备 44011602000060号 粤ICP备15103669号'''
tell=re.findall('(\d{3,4})-(\d{6,8})',tellstr)
print(tell)
3. 用正则表达式进行英文分词。re.split('',news)
str='''Yasuo Fukuda, chairman of the Boao Forum_for Asia, delivers a welcome speech at the BFA 2018 opening ceremony on April 10, 2018.[Photo by Zou Hong/China Daily]'''
print(re.split('[\s?,_-]+',str))
4. 使用正则表达式取得新闻编号
newUrl='http://news.gzcc.cn/html/2018/xiaoyuanxinwen_0404/9183.html'
newNum = re.match('http://news.gzcc.cn/html/2018/xiaoyuanxinwen_(.*).html', newUrl).group(1).split("/")[-1]
print(newNum)
5: 生成点击次数的Request URL
requesId = re.findall('\_(.*).html', newUrl)[0].split('/')[-1]
requesURL = "http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80".format(requesId)
print(requesURL)
6. 获取点击次数
newClickCount = re.findall('\_(.*).html', newUrl)[0].split('/')[-1]
res = requests.get('http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newClickCount))
print(res.text.split(".html")[-1].lstrip("('").rsplit("')")[0])
7. 将456步骤定义成一个函数 def getClickCount(newsUrl):
def getClickCount(newUrl):
# 新闻编号
newNum = re.match('http://news.gzcc.cn/html/2018/xiaoyuanxinwen_(.*).html', newUrl).group(1).split("/")[-1]
print(newNum)
# 新闻点击次数的url
requesId = re.findall('\_(.*).html', newUrl)[0].split('/')[-1]
requesURL = "http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80".format(requesId)
print(requesURL)
# 新闻点击次数
newClickCount = re.findall('\_(.*).html', newUrl)[0].split('/')[-1]
res = requests.get('http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newClickCount))
print(res.text.split(".html")[-1].lstrip("('").rsplit("')")[0])
getClickCount(newUrl)
8. 将获取新闻详情的代码定义成一个函数 def getNewDetail(newsUrl):
def getNewDetail(newsUrl):
# 标题
newT
itle =
new
.
select
(
'.news-list-title'
)[0].text
# 时间
time =
new
.
select
(
'span'
)[0].text
# 来源
re
source =
new
.
select
(
'span'
)[1].text
resd = requests.
get
(newsUrl)
resd.encoding =
'utf-8'
soupd = BeautifulSoup(resd.text,
'html.parser'
)
# 点击次数
clickCount = getClickCount(newsUrl)
# print(clickCount)
print(
'标题:'
+ ti +
' 时间:'
+ tim +
' 来源:'
+ source +
' 点击次数:'
+ clickCount +
' 链接: '
+ url)
# 正文
print(
'正文:'
)
content = soupd.
select
(
'#content'
)[0].text.split()
for
i
in
content:
print(i)
mynewsUrl =
'http://news.gzcc.cn/html/xiaoyuanxinwen/'
res = requests.
get
(mynewsUrl)
res.encoding =
'utf-8'
soup = BeautifulSoup(res.text,
'html.parser'
)
for
n
in
soup.
select
(
'.news-list'
)[0].
select
(
'li'
):
# 链接
a =
new
.
select
(
'a'
)[0].attrs[
'href'
]
getNewDetail(a)
9. 取出一个新闻列表页的全部新闻 包装成函数def getListPage(pageUrl):
def getListPage(newsUrl):
res = requests.
get
(newsUrl)
res.encoding =
'utf-8'
soup = BeautifulSoup(res.text,
'html.parser'
)
fori
in
soup.
select
(
'li'
):
if
len(soup.
select
(
'.news-list-title'
)) > 0:
a = soup.
select
(
'a'
)[0].attrs[
'href'
]
getNewDetail(a)
res = requests.
get
(newsUrl)
res.encoding =
'utf-8'
soup = BeautifulSoup(res.text,
'html.parser'
)
count =
int
(soup.
select
(
'.a1'
)[0].text.rstrip(
'条'
))
for
i
in
range(count, count + 1):
pageUrl =
'http://news.gzcc.cn/html/xiaoyuanxinwen/[].html'
.format(i)
getListPage(pageUrl)
10. 获取总的新闻篇数,算出新闻总页数包装成函数def getPageN():
def getPageN():
res = requests.
get
(newsUrl)
res.encoding =
'utf-8'
soup = BeautifulSoup(res.text,
'html.parser'
)
count =
int
(soup.
select
(
'.a1'
)[0].text.rstrip(
'条'
))
return
(count
//10+1)
11. 获取全部新闻列表页的全部新闻详情。
pageUrl =
'http://news.gzcc.cn/html/xiaoyuanxinwen/'
count = getPageN()
for
i
in
range(count,count + 1):
pageUrl =
'http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'
.format(i)
getListPage(pageUrl)