遇一山,过一山,处处有风景;只要勇敢向前,一路尽是繁花盛开。 | (点击查看→)【测试干货】python/java自动化、持续集成、性能、测开、简历、笔试面试等

py3+requests+urllib+bs4+threading,爬取斗图图片

实现原理及思路请参考我的另外几篇爬虫实践博客

py3+urllib+bs4+反爬,20+行代码教你爬取豆瓣妹子图:http://www.cnblogs.com/uncleyong/p/6892688.html
py3+requests+json+xlwt,爬取拉勾招聘信息:http://www.cnblogs.com/uncleyong/p/6960044.html
py3+urllib+re,轻轻松松爬取双色球最近100期中奖号码:http://www.cnblogs.com/uncleyong/p/6958242.html

实现代码如下:

#-*- coding:utf-8 -*-
import requests, threading, time
from lxml import etree
from bs4 import BeautifulSoup

# 获取源码
def get_html(url):
    # url = 'http://www.doutula.com/article/list/?page=1'
    headers = {'user-agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'}
    request = requests.get(url=url, headers=headers) # 网址发送get请求
    response = request.content.decode('utf-8') # 获取源码
    # print(response)
    return response

# 匹配图片url
def get_img_html(html):
    # soup = BeautifulSoup(html,'html.parser')
    soup = BeautifulSoup(html,'lxml') # 解析网页
    all_a = soup.find_all('a',class_='list-group-item') # 获取a标签,如果有class或id来命名,一定要加上名字
    # class="list-group-item"是a标签的名字
    # <a class="list-group-item" href="http://www.doutula.com/article/detail/7536783">
    # print(type(all_a)) # <class 'bs4.element.ResultSet'>
    # print(all_a)
    for i in all_a:
        # print(i['href'])
        img_html = get_html(i['href']) # 获取内页源码,i['href']表示获取属性值
        # print(img_html)
        get_img(img_html)
# 获取图片url def get_img(html): # soup = etree.HTML(html) # 初始化源码 # items = soup.xpath('//div[@class="artile_des"]') # //表示某个目录下,从匹配选择的当前节点选择文档中的节点,而不考虑它们的位置。 # # []表示过滤条件 # for item in items: # imgurl_list = item.xpath('table/tbody/tr/td/a/img/@onerror') # # print(imgurl_list) # # start_save_img(imgurl_list) soup = BeautifulSoup(html, 'lxml') items = soup.find('div',class_='swiper-slide').find_all('div',class_='artile_des') # 不能写成这样:find_all后面不能跟find,因为find是找一个,find_all是找多个,从多个中找一个是不对的 # items = soup.find('div',class_='swiper-slide').find_all('div',class_='artile_des').find('img')['src'] # print(items) imgurl_list = [] for i in items: imgurl = i.find('img')['src'] # img标签下的src属性 # print(type(imgurl)) # <class 'str'> # print(imgurl) imgurl_list.append(imgurl) start_save_img(imgurl_list) # 这里是对每一组套图做多线程 # 下载图片 x = 1 def save_img(img_url): # global x # 全局变量 # x +=1 # img_url = img_url.split('=')[-1][1:-2].replace('jp','jpg') # 以=分割 # print('正在下载'+'http:'+img_url) # img_content = requests.get('http:'+img_url).content # with open('doutu/%s.jpg'%x, 'wb') as f:# urllib下的retrieve也可以下载 # f.write(img_content) global x # 全局变量 x +=1 print('正在下载:'+img_url) geshi = img_url.split('.')[-1] # 因为图片格式不一样,所以切片,把链接中图片后缀获取到,用于下面拼接文件名 img_content = requests.get(img_url).content with open('doutu/%s.%s'%(x,geshi), 'wb') as f: # urllib下的retrieve也可以下载 f.write(img_content) def start_save_img(imgurl_list): for i in imgurl_list: # print(i) th = threading.Thread(target=save_img,args=(i,)) # i后面加逗号表示args是一个元组 # target是可调用对象,是一个函数名,线程启动后执行, th.start() th.join()
# 主函数 def main(): start_url = 'http://www.doutula.com/article/list/?page={}' for i in range(1,2): # print(start_url.format(i)) start_html = get_html(start_url.format(i)) get_img_html(start_html) # 获取内页图片的url if __name__ == '__main__': # 判断文件入口 start_time = time.time() main() end_time = time.time() print(start_time) print(end_time) print(end_time-start_time)

 

 

posted @ 2017-06-09 17:53  全栈测试笔记  阅读(483)  评论(0编辑  收藏  举报
浏览器标题切换
浏览器标题切换end