Python Day04

一.爬虫

1.爬取梨视频页面全部视频:

'''
爬取梨视频:
请求url:
    https://www.pearvideo.com/
      
请求方式:
    GET
      
请求头:
    user-agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36
'''
  
# import requests
# import re  # 正则模块
#
# # 1、对梨视频详情页发送请求,获取响应数据
# response = requests.get(url='https://www.pearvideo.com/')
# print(response.status_code)
# print(response.text)
#
# # re.findall('正则匹配规则', '解析文本', "正则模式")
# # re.S: 全局模式 (对整个文本行进匹配)
# # .指的是当前位置
# # *指的是查找所有
# '''
# <a href="video_1543373"
# <a href="video_(.*?)"  # 提取1543373
# '''
#
# # 2、获取主页视频详情页ID
# res = re.findall('<a href="video_(.*?)"', response.text, re.S)
# print(res)
#
#
# for m_id in res:
#     # 拼接详情页url
#     detail_url = 'https://www.pearvideo.com/video_' + m_id
#     print(detail_url)
  
  
  
import requests
import re  # 正则模块
# uuid.uuid4()  可以根据时间戳生成一段世界上唯一的随机字符串
import uuid
  
# 爬虫三部曲
  
# 1、发送请求
def get_page(url):
    response = requests.get(url)
    return response
  
# 2、解析数据
# 解析主页获取视频详情页ID
def parse_index(text):
    res = re.findall('<a href="video_(.*?)"', text, re.S)
    # print(res)
  
    detail_url_list = []
    for m_id in res:
        # 拼接详情页url
        detail_url = 'https://www.pearvideo.com/video_' + m_id
        # print(detail_url)
        detail_url_list.append(detail_url)
  
    # print(detail_url_list)
  
    return detail_url_list
  
# 解析详情页获取视频url
def parse_detail(text):
    ''''''
    '''
        (.*?): 提取括号的内容
        .*?: 直接匹配
        <video webkit-playsinline="" playsinline="" x-webkit-airplay="" autoplay="autoplay" src="https://video.pearvideo.com/mp4/adshort/20190613/cont-1566073-14015522_adpkg-ad_hd.mp4" style="width: 100%; height: 100%;"></video>
          
    正则: <video.*?src="(.*?)"
      
    # 以上是分析过程,不需要写
      
    正则: srcUrl="(.*?)"
    '''
    movie_url = re.findall('srcUrl="(.*?)"', text, re.S)[0]
    return movie_url
  
  
# 3、保存数据
def save_movie(movie_url):
    response = requests.get(movie_url)
    # 把视频写到本地
    with open(f'{uuid.uuid4()}.mp4', 'wb') as f:
        f.write(response.content)
        f.flush()
  
if __name__ == '__main__':  # main + 回车键
  
    # 1、对主页发送请求
    index_res = get_page(url='https://www.pearvideo.com/')
  
    # 2、对主页进行解析、获取详情页id
    detail_url_list = parse_index(index_res.text)
    # print(detail_url_list)
  
    # 3、对每个详情页url发送请求
    for detail_url in detail_url_list:
        detail_res = get_page(url=detail_url)
        print(detail_res.text)
  
        # 4、解析详情页获取视频url
        movie_url = parse_detail(detail_res.text)
        print(movie_url)
  
        # 5、保存视频
        save_movie(movie_url)

2.异步爬取

import requests
import re  # 正则模块
# uuid.uuid4()  可以根据时间戳生成一段世界上唯一的随机字符串
import uuid
# 导入线程池模块
from concurrent.futures import ThreadPoolExecutor
# 线程池限制50个线程
pool = ThreadPoolExecutor(50)
  
# 爬虫三部曲
  
# 1、发送请求
def get_page(url):
    print(f'开始异步任务: {url}')
    response = requests.get(url)
    return response
  
  
# 2、解析数据
# 解析主页获取视频详情页ID
def parse_index(res):
  
    response = res.result()
    # 提取出主页所有ID
    id_list = re.findall('<a href="video_(.*?)"', response.text, re.S)
    # print(res)
  
    # 循环id列表
    for m_id in id_list:
        # 拼接详情页url
        detail_url = 'https://www.pearvideo.com/video_' + m_id
        # print(detail_url)
        # 把详情页url提交给get_page函数
        pool.submit(get_page, detail_url).add_done_callback(parse_detail)
  
  
# 解析详情页获取视频url
def parse_detail(res):
    response = res.result()
    movie_url = re.findall('srcUrl="(.*?)"', response.text, re.S)[0]
    # 异步提交把视频url传给get_page函数,把返回的结果传给save_movie
    pool.submit(get_page, movie_url).add_done_callback(save_movie)
  
  
# 3、保存数据
def save_movie(res):
  
    movie_res = res.result()
  
    # 把视频写到本地
    with open(f'{uuid.uuid4()}.mp4', 'wb') as f:
        f.write(movie_res.content)
        print(f'视频下载结束: {movie_res.url}')
        f.flush()
  
  
if __name__ == '__main__':  # main + 回车键
  
    # 一 往get_page发送异步请求,把结果交给parse_index函数
    url = 'https://www.pearvideo.com/'
    pool.submit(get_page, url).add_done_callback(parse_index)

3.requests的详细使用:

GET请求讲解
'''
'''
User-Agent
# 访问知乎发现
请求url:
    https://www.zhihu.com/explore
      
请求方式:
    GET
      
请求头:
    user-agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36
      
    cookies
'''
  
# 访问知乎
# import requests
# response = requests.get(url='https://www.zhihu.com/explore')
# print(response.status_code)  # 400
# print(response.text)  # 返回错误页面
  
  
# 携带请求头参数访问知乎:
import requests
  
# 请求头字典
# headers = {
#     'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36'
# }
# 在get请求内,添加user-agent
# response = requests.get(url='https://www.zhihu.com/explore', headers=headers)
# print(response.status_code)  # 200
# # print(response.text)
# with open('zhihu.html', 'w', encoding='utf-8') as f:
#     f.write(response.text)
  
  
'''
params请求参数
访问百度搜查安徽工程大学url
https://www.baidu.com/s?wd=安徽工程大学&pn=10
https://www.baidu.com/s?wd=安徽工程大学&pn=20
  
# '''
from urllib.parse import urlencode
# url = 'https://www.baidu.com/s?wd=%E8%94%A1%E5%BE%90%E5%9D%A4'
# url = 'https://www.baidu.com/s?' + urlencode({"wd": "蔡徐坤"})
url = 'https://www.baidu.com/s?'
headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36'
}
# print(url)
# 在get方法中添加params参数
# response = requests.get(url, headers=headers, params={"wd": "安徽工程大学"})
response = requests.get(url, headers=headers, params={"wd": "安徽工程大学", "pn": "20"})
# print(response.text)
with open('gongcheng2.html', 'w', encoding='utf-8') as f:
    f.write(response.text)
  
  
'''
携带cookies
携带登录cookies破解github登录验证
  
请求url:
    https://github.com/settings/emails
      
请求方式:
    GET
      
请求头:
    User-Agen
      
    Cookie: has_recent_activity=1; _ga=GA1.2.1416117396.1560496852; _gat=1; tz=Asia%2FShanghai; _octo=GH1.1.1728573677.1560496856; _device_id=1cb66c9a9599576a3b46df2455810999; user_session=1V8n9QfKpbgB-DhS4A7l3Tb3jryARZZ02NDdut3J2hy-8scm; __Host-user_session_same_site=1V8n9QfKpbgB-DhS4A7l3Tb3jryARZZ02NDdut3J2hy-8scm; logged_in=yes; dotcom_user=TankJam; _gh_sess=ZS83eUYyVkpCWUZab21lN29aRHJTUzgvWjRjc2NCL1ZaMHRsdGdJeVFQM20zRDdPblJ1cnZPRFJjclZKNkcrNXVKbTRmZ3pzZzRxRFExcUozQWV4ZG9kOUQzZzMwMzA2RGx5V2dSaTMwaEZ2ZDlHQ0NzTTBtdGtlT2tVajg0c0hYRk5IOU5FelYxanY4T1UvVS9uV0YzWmF0a083MVVYVGlOSy9Edkt0aXhQTmpYRnVqdFAwSFZHVHZQL0ZyQyt0ZjROajZBclY4WmlGQnNBNTJpeEttb3RjVG1mM0JESFhJRXF5M2IwSlpHb1Mzekc5M0d3OFVIdGpJaHg3azk2aStEcUhPaGpEd2RyMDN3K2pETmZQQ1FtNGNzYnVNckR4aWtibkxBRC8vaGM9LS1zTXlDSmFnQkFkWjFjanJxNlhCdnRRPT0%3D--04f6f3172b5d01244670fc8980c2591d83864f60
      
'''
import requests
  
# 请求url
url = 'https://github.com/settings/emails'
  
# 请求头
headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36',
    # 在请求头中拼接cookies
    # 'Cookie': 'has_recent_activity=1; _ga=GA1.2.1416117396.1560496852; _gat=1; tz=Asia%2FShanghai; _octo=GH1.1.1728573677.1560496856; _device_id=1cb66c9a9599576a3b46df2455810999; user_session=1V8n9QfKpbgB-DhS4A7l3Tb3jryARZZ02NDdut3J2hy-8scm; __Host-user_session_same_site=1V8n9QfKpbgB-DhS4A7l3Tb3jryARZZ02NDdut3J2hy-8scm; logged_in=yes; dotcom_user=TankJam; _gh_sess=ZS83eUYyVkpCWUZab21lN29aRHJTUzgvWjRjc2NCL1ZaMHRsdGdJeVFQM20zRDdPblJ1cnZPRFJjclZKNkcrNXVKbTRmZ3pzZzRxRFExcUozQWV4ZG9kOUQzZzMwMzA2RGx5V2dSaTMwaEZ2ZDlHQ0NzTTBtdGtlT2tVajg0c0hYRk5IOU5FelYxanY4T1UvVS9uV0YzWmF0a083MVVYVGlOSy9Edkt0aXhQTmpYRnVqdFAwSFZHVHZQL0ZyQyt0ZjROajZBclY4WmlGQnNBNTJpeEttb3RjVG1mM0JESFhJRXF5M2IwSlpHb1Mzekc5M0d3OFVIdGpJaHg3azk2aStEcUhPaGpEd2RyMDN3K2pETmZQQ1FtNGNzYnVNckR4aWtibkxBRC8vaGM9LS1zTXlDSmFnQkFkWjFjanJxNlhCdnRRPT0%3D--04f6f3172b5d01244670fc8980c2591d83864f60'
}
# github_res = requests.get(url, headers=headers)
  
import requests
cookies = {
    'Cookie': 'has_recent_activity=1; _ga=GA1.2.1416117396.1560496852; _gat=1; tz=Asia%2FShanghai; _octo=GH1.1.1728573677.1560496856; _device_id=1cb66c9a9599576a3b46df2455810999; user_session=1V8n9QfKpbgB-DhS4A7l3Tb3jryARZZ02NDdut3J2hy-8scm; __Host-user_session_same_site=1V8n9QfKpbgB-DhS4A7l3Tb3jryARZZ02NDdut3J2hy-8scm; logged_in=yes; dotcom_user=TankJam; _gh_sess=ZS83eUYyVkpCWUZab21lN29aRHJTUzgvWjRjc2NCL1ZaMHRsdGdJeVFQM20zRDdPblJ1cnZPRFJjclZKNkcrNXVKbTRmZ3pzZzRxRFExcUozQWV4ZG9kOUQzZzMwMzA2RGx5V2dSaTMwaEZ2ZDlHQ0NzTTBtdGtlT2tVajg0c0hYRk5IOU5FelYxanY4T1UvVS9uV0YzWmF0a083MVVYVGlOSy9Edkt0aXhQTmpYRnVqdFAwSFZHVHZQL0ZyQyt0ZjROajZBclY4WmlGQnNBNTJpeEttb3RjVG1mM0JESFhJRXF5M2IwSlpHb1Mzekc5M0d3OFVIdGpJaHg3azk2aStEcUhPaGpEd2RyMDN3K2pETmZQQ1FtNGNzYnVNckR4aWtibkxBRC8vaGM9LS1zTXlDSmFnQkFkWjFjanJxNlhCdnRRPT0%3D--04f6f3172b5d01244670fc8980c2591d83864f60'
}
  
github_res = requests.get(url, headers=headers, cookies=cookies)
  
print('15622792660' in github_res.text)

4.实例:爬取豆瓣top250电影信息:

''''''
'''
主页:
    https://movie.douban.com/top250
    GET
    User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36
      
re正则:
    # 电影详情页url、图片链接、电影名称、电影评分、评价人数
    <div class="item">.*?href="(.*?)">.*?src="(.*?)".*?<span class="title">(.*?)</span>.*?<span class="rating_num".*?>(.*?)</span>.*?<span>(.*?)人评价
'''
import requests
import re
url = 'https://movie.douban.com/top250'
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36'
}
# 1、往豆瓣TOP250发送请求获取响应数据
response = requests.get(url, headers=headers)
  
# print(response.text)
  
# 2、通过正则解析提取数据
# 电影详情页url、图片链接、电影名称、电影评分、评价人数
movie_content_list = re.findall(
    # 正则规则
    '<div class="item">.*?href="(.*?)">.*?src="(.*?)".*?<span class="title">(.*?)</span>.*?<span class="rating_num".*?>(.*?)</span>.*?<span>(.*?)人评价',
  
    # 解析文本
    response.text,
  
    # 匹配模式
    re.S)
  
for movie_content in movie_content_list:
    # 解压赋值每一部电影
    detail_url, movie_jpg, name, point, num = movie_content
    data = f'电影名称:{name},   详情页url:{detail_url}, 图片url:{movie_jpg}, 评分: {point}, 评价人数: {num} \n'
    print(data)
  
    # 3、保存数据,把电影信息写入文件中
    with open('douban.txt', 'a', encoding='utf-8') as f:
        f.write(data)

5.作业:爬取豆瓣top250全部电影信息:

1 '''
  2 爬取豆瓣电影top250
  3 
  4 Request URL:
  5     https://movie.douban.com/top250
  6 
  7 Request Method:
  8     GET
  9 User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36
 10 
 11 '''
 12 
 13 import requests
 14 import re
 15 headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36',
 16            "Host": "movie.douban.com",
 17 
 18            }
 19 # 获取url
 20 
 21 # url = 'https://movie.douban.com/top250'
 22 
 23 def get_pageurl():
 24     i = 0
 25     pageurl_list = []
 26     while i < 250:
 27 
 28         pageurl_list .append(f'https://movie.douban.com/top250?start={i}&filter=')
 29         i += 25
 30     # print(pageurl_list)
 31     return pageurl_list
 32 # 1.发出请求
 33 # 获取每个页面
 34 def get_page(list1):
 35     i = 0
 36     pagecontent = []
 37     while i < 10:
 38         page_url = list1[i]
 39         i += 1
 40 
 41         res = requests.get(page_url, headers=headers)
 42 
 43         pagecontent.append(res.text)
 44     # print(pagecontent)
 45 
 46     return pagecontent
 47 
 48 # 2.通过正则解析提取数据
 49 # 电影详情页url、图片链接、电影vie review名称、电影评分、评价人数、导演、主演、电影上映时间、电影上映地点、简介
 50 # 解析数据
 51 def page_detail(text):
 52     '''
 53 正则规则:
 54 <li>
 55             <div class="item">
 56                 <div class="pic">
 57                     <em class="">1</em>
 58                     <a href="https://movie.douban.com/subject/1292052/">
 59                         <img width="100" alt="肖申克的救赎" src="https://img3.doubanio.com/view/photo/s_ratio_poster/public/p480747492.webp" class="">
 60                     </a>
 61                 </div>
 62                 <div class="info">
 63                     <div class="hd">
 64                         <a href="https://movie.douban.com/subject/1292052/" class="">
 65                             <span class="title">肖申克的救赎</span>
 66                                     <span class="title">&nbsp;/&nbsp;The Shawshank Redemption</span>
 67                                 <span class="other">&nbsp;/&nbsp;月黑高飞(港)  /  刺激1995(台)</span>
 68                         </a>
 69 
 70 
 71                             <span class="playable">[可播放]</span>
 72                     </div>
 73                     <div class="bd">
 74                         <p class="">
 75                             导演: 弗兰克·德拉邦特 Frank Darabont&nbsp;&nbsp;&nbsp;主演: 蒂姆·罗宾斯 Tim Robbins /...<br>
 76                             1994&nbsp;/&nbsp;美国&nbsp;/&nbsp;犯罪 剧情
 77                         </p>
 78 
 79 
 80                         <div class="star">
 81                                 <span class="rating5-t"></span>
 82                                 <span class="rating_num" property="v:average">9.6</span>
 83                                 <span property="v:best" content="10.0"></span>
 84                                 <span>1450669人评价</span>
 85                         </div>
 86 
 87                             <p class="quote">
 88                                 <span class="inq">希望让人自由。</span>
 89                             </p>
 90                     </div>
 91                 </div>
 92             </div>
 93         </li>
 94 
 95 <div class="item">.*?href=".*?">.*?src=".*?" .*?<span class="title">(.*?)</span>.*?<div class="bd">.*?导演:(.*?)<br>(.*?)</p>.*?<span class="rating_num" property="v:average">(.*?)</span>.*?<span>(.*?)人评价</span>.*?<span class="inq">(.*?)/span>
 96                             </p>
 97 '''
 98     i = 0
 99     j = 0
100     movie_detail_list = []
101     data_list = []
102 
103     while i < 10:
104         movie_detail = re.findall(
105             # 正则规则
106             '<div class="item">.*?href="(.*?)">.*?src="(.*?)" .*?<span class="title">(.*?)</span>.*?<div class="bd">.*?导演:(.*?)<br>(.*?)</p>.*?<span class="rating_num" property="v:average">(.*?)</span>.*?<span>(.*?)人评价</span>.*?<span class="inq">(.*?)</span>',
107             #解析文本
108             pagecontent[i],
109             # 匹配模式
110             re.S)
111         i += 1
112         movie_detail_list.append(movie_detail)
113         # print(movie_detail_list)
114 
115         while j < i:
116             movie_detail1 = movie_detail_list[j]
117             for moviedetail in movie_detail1:
118                 # 解压赋值每一部电影
119                 detail_url, iamge_url, name, director_and_actor, year_and_type, rate, num, movie_review = moviedetail
120                 data = f'电影名称:{name},   详情页url:{detail_url},    图片url:{iamge_url},  导演:{director_and_actor},    上映时间、日期、影片类型:{year_and_type},   评分:{rate},  评价人数:{num}, 简介:{movie_review}\n\n'
121                 data = data.replace('\n', '', 2)
122 
123                 data = data.replace(' ', '')
124                 data = data.replace('&nbsp;&nbsp;&nbsp', '  ')
125                 data = data.replace('&nbsp;/&nbsp', ' ')
126 
127 
128                 # print(data)
129                 data_list.append(data)
130             j += 1
131     return data_list
132 # 保存数据
133 def save_data(list):
134     for res in list:
135         print(res)
136         with open('douban_TOP250.txt', 'a', encoding='utf-8') as f:
137             f.write(res)
138 
139 #
140 if __name__ == '__main__':
141     pageurl = []
142 
143     pageurl = get_pageurl()
144     pagecontent = get_page(pageurl)
145     detail = page_detail(pagecontent)
146     save_data(detail)
147     # print(data_list)

二.总结

  1.什么是爬虫?

             爬虫指的是爬取数据。
  2.什么是互联网?
    由一堆网络设备把一台一台的计算机互联到-起。
  3.互联网建立的目的?
    数据的传递与数据的共享。
  4.上网的全过程:
  一普通用户
    打开浏览器一>往目标站点发送请求一> 接收响应数据一>渲染到页面 上。
  一爬虫程序
    模拟浏览器一>往目标站点发送请求一>接收响应数据一> 提取有用的数据一> 保存到本地/教据库 。
 
  5.浏览器发送的是什么请求?
         http协议的请求:
    -请求url
    一请求方式:
    GET、POST

    一请求头:
    cookies
    user- agent
    host ;

  6.爬虫的全过程:
    1、发送请求(请求库)
      _requests模块
      _selenium模块

    2、获取响应数据<服务器返回>
 
    3、解析并提取数据(解析库)
      _bs4 (BeautifulSoup4)
      _Xpath
 
    4、保存数据(数据库)
      _MongoDB
 
    1、3、4需要手动写 

  7.爬虫框架
    Scrapy(基于面向对象)
 
posted @ 2019-06-16 21:57  有生-L  阅读(159)  评论(0编辑  收藏  举报