使用requests+BeaBeautiful Soup爬取妹子图图片

1. Requests:让 HTTP 服务人类

Requests 继承了urllib2的所有特性。Requests支持HTTP连接保持和连接池,支持使用cookie保持会话,支持文件上传,支持自动确定响应内容的编码,支持国际化的 URL 和 POST 数据自动编码。

中文文档API:http://docs.python-requests.org/zh_CN/latest/index.html

安装方式

两种方式任选其一

pip install requests

easy_install requests

2. BeautifulSoup

Beautiful Soup 也是一个HTML/XML的解析器,主要的功能也是如何解析和提取 HTML/XML 数据。

2.1 安装

 

pip install beautifulsoup4

 

2.2 导入

from bs4 import BeautifulSoup

爬取代码


import requests
from bs4 import BeautifulSoup
import os
import re

Hostreferer = {
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',
'Referer': 'http://www.mzitu.com'
}
Picreferer = {
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',
'Referer': 'http://i.meizitu.net'
}


def get_page_name(url): # 获得图集最大页数和名称
html = get_html(url)
# 创建Beautiful Soup对象,指定lxml解析器
soup = BeautifulSoup(html, 'lxml')
span = soup.findAll('span')
title = soup.find('h2', class_="main-title")
return span[9].text, title.text


def get_html(url): # 获得页面html代码
req = requests.get(url, headers=Hostreferer)
html = req.text
return html


def get_img_url(url, name): # 获取图集里面的每张图片
html = get_html(url)
soup = BeautifulSoup(html, 'lxml')
img_url = soup.find('img', alt=name)
return img_url['src']


def save_img(img_url, count, name): # 保存图集里面的每张图片
req = requests.get(img_url, headers=Picreferer)
new_name = rename(name)
with open(new_name + '/' + str(count) + '.jpg', 'wb') as f:
f.write(req.content)


def rename(name): # 正则匹配
rstr = r'[\/\\\:\*\?\<\>\|]'
new_name = re.sub(rstr, "", name)
return new_name


def save_one_atlas(old_url): # 保存图集
page, name = get_page_name(old_url)
new_name = rename(name)
os.mkdir(new_name)

print("图集--" + name + "--开始保存")
for i in range(1, int(page) + 1):
url = old_url + "/" + str(i)
img_url = get_img_url(url, name)
# print(img_url)
save_img(img_url, i, name)
print('正在保存第' + str(i) + '张图片')
print("图集--" + name + "保存成功")


def get_atlas_list(url): # 获取网站里面的所有图集
req = requests.get(url, headers=Hostreferer)
soup = BeautifulSoup(req.text, 'lxml')
atlas = soup.find_all(attrs={'class': 'lazy'})
atlas_list = []
for atla in atlas:
atlas_list.append(atla.parent['href'])
return atlas_list


def save_one_page(start_url): # 调用方法save_one_atlas保存
atlas_url = get_atlas_list(start_url)
for url in atlas_url:
save_one_atlas(url)


if __name__ == '__main__':
start_url = "http://www.mzitu.com/"
for count in range(1, 3):
url = start_url + "page/" + str(count) + "/"
save_one_page(url)
print("爬取完成")

 

posted @ 2018-09-08 11:18  晴空!  阅读(292)  评论(0编辑  收藏  举报