import  re

import datetime
import requests
from bs4 import BeautifulSoup
# 1.用正则表达式判定邮箱是否输入正确。
mail = '^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$'
# 邮箱只允许英文字母、数字、下划线、英文句号、以及中划线组成,域名只允许英文域名
input='403882658@163.com'
if(re.match(mail,input)):
    print('邮箱正确')
else:
    print('error')

# 2.用正则表达式识别出全部电话号码
str= '''版权所有:广州商学院   地址:广州市黄埔区九龙大道206号
 学校办公室:020-82876130   招生电话:020-82872773
 粤公网安备 44011602000060号    粤ICP备15103669号'''
phone='\d{3,4}-\d{6,8}'
print(re.findall(phone,str))

# 3. 用正则表达式进行英文分词。re.split('',news)

str='''In many ways, there are lots of people like the buzzard, the bat and the bumblebee. They are struggling about with all their problems and frustrations, not realizing that the answer is right there above them. '''
print(re.split("[\s+?:,.\"]",str))
#4. 使用正则表达式取得新闻编号
url='http://news.gzcc.cn/html/xiaoyuanxinwen/'
newsUrl='http://news.gzcc.cn/html/2018/xiaoyuanxinwen_0404/9183.html'
print(re.match("http://news.gzcc.cn/html/\d{4}/xiaoyuanxinwen_(.*).html", newsUrl).group(1).split('/')[-1])
#5.  生成点击次数的Request URL
newsID = re.match("http://news.gzcc.cn/html/\d{4}/xiaoyuanxinwen_(.*).html", newsUrl).group(1).split('/')[-1]
Source = "http://news.gzcc.cn/html/2018/xiaoyuanxinwen_0404/9183.html"
clickUrl = "http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80".format(newsID)
print(clickUrl)
#6.获取点击次数
clickcount = requests.get(clickUrl).text.split('.html')[-1].strip("('").rstrip("');")
int(clickcount)
print(clickcount)
#7. 将456步骤定义成一个函数 def getClickCount(newsUrl):
def getClickCount(newsUrl):
    newsId = re.findall("\_(.*).html",newsUrl)[0].split("/")[-1];
    RequestUrl = "http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80".format(newsId)
    res = requests.get(RequestUrl);
    times=int(res.text.split('.html')[-1].lstrip("(')").rstrip("');"))
    return times
#8. 将获取新闻详情的代码定义成一个函数 def getNewDetail(newsUrl):
def getNewDetail(newsUrl):
    res = requests.get(newsUrl)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, 'html.parser')
    # print(soup.select("#content")[0].text)  # 正文
    info = soup.select(".show-info")[0].text
    time = info.lstrip('发布时间:')[:19]
    # 作者
    if info.find('作者:') > 0:
       author = info[info.find('作者:'):info.find('审核:')].lstrip('作者:').split()[0]
    else:
        author= 'none';
    print("作者:"+author+" "+"发布时间"+time)
# 9. 取出一个新闻列表页的全部新闻 包装成函数def getListPage(pageUrl):
def getListPage(pageUrl):
    res = requests.get(pageUrl)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, 'html.parser')
    for news in soup.select("li"):
        if len(news.select(".news-list-title")) > 0:
            time = news.select(".news-list-info")[0].contents[0].text
            title = news.select(".news-list-title")[0].text
            description = news.select(".news-list-description")[0].text
            url = news.select('a')[0].attrs['href']
            print(time+" "+title+" "+description+" "+url)
            a=getClickCount(url);
            print("点击"+str(a)+"次")
            getNewDetail(url)
#10. 获取总的新闻篇数,算出新闻总页数包装成函数def getPageN():
def getPageN():
    res = requests.get('http://news.gzcc.cn/html/xiaoyuanxinwen/')
    res.encoding = "utf-8"
    soup = BeautifulSoup(res.text, 'html.parser')
    n = int(soup.select('#pages')[0].select('a')[0].text.rstrip('条'))
    return (n // 10 + 1)
#11. 获取全部新闻列表页的全部新闻详情。
n=getPageN();
for i in range(1,n+1):
    if(i==1):
        newsurl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
    else:
        newsurl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)
    getListPage(newsurl);