Day04

课堂笔记

 

爬取梨视频:

'''
爬取梨视频
请求url:
https://www.pearvideo.com/
请求方式:
User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36

'''

import requests
import re#正则模块
#uuid.uuid4() 可以根据实际戳生成一段世界上唯一的随机字符串
import uuid


#爬虫三部曲
#1.发送请求
def get_page(url):
    response=requests.get(url)
    return response

#2.解析数据
#解析主页获取视频详情页ID
def parse_index(text):
    res = re.findall('<a href="video_(.*?)"',text, re.S)

    # print(res)

    detail_url_list = []

    for m_id in res:
        detail_url = 'https://www.pearvideo.com/video_' + m_id
        # print(detail_url)
        detail_url_list.append(detail_url)

        # print(detail_url_list)

    return detail_url_list

#解析详情页获取视频URl
def parse_detail(text):
    # re.findall('正则匹配规则','解析文本','正则模式)
    # re.S:全局模块(对整个文本进行匹配)
    # .指的是当前位置
    # *指的是查找所有

    movie_url=re.findall('srcUrl="(.*?)"',text,re.S)[0]
    return movie_url

#3.保存数据
def save_movie(movie_url):
    response=requests.get(movie_url)
    #把视频写到本地
    with open(f'{uuid.uuid4()}.mp4','wb')as f:
        f.write(response.content)
        f.flush()

if __name__=='__main__' :
    #1.对主页发送请求
    index_res=get_page(url='https://www.pearvideo.com/')

#2.对主页进行解析,获取详情页id
    detail_url_list=parse_index(index_res.text)
# print(detail_url_list)

#3.对每个详情页url发送请求
for detail_url in detail_url_list:
    detail_res=get_page(url=detail_url)
    #print(detail_res.text)

    #4.解析详情页获取视频url
    movie_url=parse_detail(detail_res.text)
    print(movie_url)

    #5.保存视频
    # save_movie(movie_url)

requests详细使用:

import requests

headers={
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36'
}

# #在get请求内添加user-agent
# response=requests.get(url='https://www.zhihu.com/explore',headers=headers)
#
# print(response.status_code)
#
# with open('zhihu.html','w',encoding='utf-8')as f:
#     f.write(response.text)
#

'''
params参数
访问百度搜查王嘉尔url
'''
#
# from urllib.parse import urlencode
# # url='https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&rsv_idx=1&tn=baidu&wd=%E7%8E%8B%E5%98%89%E5%B0%94&oq=%25E7%259F%25A5%25E4%25B9%258E&rsv_pq=847b40850001732b&rsv_t=4a75X49edBkcU8OcSUPfEvxeRVv0RD12dGNsJ7AmeAQVqP6ZFqOzLS1gjXs&rqlang=cn&rsv_enter=1&rsv_sug3=9&rsv_sug1=7&rsv_sug7=101&bs=%E7%9F%A5%E4%B9%8E/'
# url='https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&rsv_idx=1&tn=baidu&sv_sug1=7&'+urlencode({"wd":"王嘉尔"})
# print(url)
# response=requests.get(url)
#
# with open('z','w',encoding='utf-8')as f:
#     f.write(response.text)


response=requests.get(url,headers=headers,params={'百度':'安徽工程大学'})
with open('gonngcheng.html','w',encoding='utf-8')as f:
    f.write(response.text) 

 

爬取豆瓣top250电影信息:

''''''
'''
主页:
    https://movie.douban.com/top250
    GET
    User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36
    
re正则:
    # 电影详情页url、图片链接、电影名称、电影评分、评价人数
    <div class="item">.*?href="(.*?)">.*?src="(.*?)".*?<span class="title">(.*?)</span>.*?<span class="rating_num".*?>(.*?)</span>.*?<span>(.*?)人评价
'''
import requests
import re
url = 'https://movie.douban.com/top250'
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36'
}
# 1、往豆瓣TOP250发送请求获取响应数据
response = requests.get(url, headers=headers)

# print(response.text)

# 2、通过正则解析提取数据
# 电影详情页url、图片链接、电影名称、电影评分、评价人数
movie_content_list = re.findall(
    # 正则规则
    '<div class="item">.*?href="(.*?)">.*?src="(.*?)".*?<span class="title">(.*?)</span>.*?<span class="rating_num".*?>(.*?)</span>.*?<span>(.*?)人评价',

    # 解析文本
    response.text,

    # 匹配模式
    re.S)

for movie_content in movie_content_list:
    # 解压赋值每一部电影
    detail_url, movie_jpg, name, point, num = movie_content
    data = f'电影名称:{name},   详情页url:{detail_url}, 图片url:{movie_jpg}, 评分: {point}, 评价人数: {num} \n'
    print(data)

    # 3、保存数据,把电影信息写入文件中
    with open('douban.txt', 'a', encoding='utf-8') as f:
        f.write(data)

高性能爬虫:

import requests
import re  # 正则模块
# uuid.uuid4()  可以根据时间戳生成一段世界上唯一的随机字符串
import uuid
# 导入线程池模块
from concurrent.futures import ThreadPoolExecutor
# 线程池限制50个线程
pool = ThreadPoolExecutor(50)

# 爬虫三部曲

# 1、发送请求
def get_page(url):
    print(f'开始异步任务: {url}')
    response = requests.get(url)
    return response


# 2、解析数据
# 解析主页获取视频详情页ID
def parse_index(res):

    response = res.result()
    # 提取出主页所有ID
    id_list = re.findall('<a href="video_(.*?)"', response.text, re.S)
    # print(res)

    # 循环id列表
    for m_id in id_list:
        # 拼接详情页url
        detail_url = 'https://www.pearvideo.com/video_' + m_id
        # print(detail_url)
        # 把详情页url提交给get_page函数
        pool.submit(get_page, detail_url).add_done_callback(parse_detail)


# 解析详情页获取视频url
def parse_detail(res):
    response = res.result()
    movie_url = re.findall('srcUrl="(.*?)"', response.text, re.S)[0]
    # 异步提交把视频url传给get_page函数,把返回的结果传给save_movie
    pool.submit(get_page, movie_url).add_done_callback(save_movie)


# 3、保存数据
def save_movie(res):

    movie_res = res.result()

    # 把视频写到本地
    with open(f'{uuid.uuid4()}.mp4', 'wb') as f:
        f.write(movie_res.content)
        print(f'视频下载结束: {movie_res.url}')
        f.flush()


if __name__ == '__main__':  # main + 回车键

    # 一 往get_page发送异步请求,把结果交给parse_index函数
    url = 'https://www.pearvideo.com/'
    pool.submit(get_page, url).add_done_callback(parse_index)

 

posted @ 2019-06-17 19:00  A-solitary  阅读(240)  评论(0编辑  收藏  举报