Py之Crawler:爬虫利用随机选取代理访问服务器的方法实现下载某网址上所有的图片到指定文件夹——Jason niu

#Py之Crawler:爬虫利用随机选取代理访问服务器的方法实现下载某网址上所有的图片到指定文件夹
import urllib.request
import os  
import random

def open_url(url):
    req=urllib.request.Request(url) 
    req.add_header("User-Agent", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.90 Safari/537.36 2345Explorer/9.2.1.17116")

    proxies=['61.135.217.7:80',"122.114.31.177:808","27.19.80.110:808"] 
    proxy_support = urllib.request.ProxyHandler({'http':random.choice(proxies)})
    opener = urllib.request.build_opener(proxy_support) 
    urllib.request.install_opener(opener)  

    response=urllib.request.urlopen(url) 
    html=response.read()
    return html

def get_page(url):
    html=open_url(url).decode("utf-8") 
    a=html.find('current-comment-page')+23 
    b=html.find(']',a)  
    print(html[a:b]) 
    return html[a:b]
    
def find_imgs(url):
    html=open_url(url).decode("utf-8") 
    img_addrs = []
    a=html.find("img src=")
    while a!= -1:
        
        b=html.find(".jpg",a,a+255)   
        if b!=-1:
            img_addres.append(html[a+9:b+4])
        else:
            b=a+9
        a=html.find("img src=",b)      
    return img_addrs  

def save_imgs(folder, img_addrs): 
    for each in img_addrs:  
        filename = each.split('/')[-1]
        with open(filename,'wb') as f:  
            img = open_url('http:' + each)  
            f.write(img)                 

def download_mm(folder="imgfile",pages=10):
    os.mkdir(folder)
    os.chdir(folder)
    url="http://jandan.net/ooxx/"
    page_num=int(get_page(url)) 
    
    for i in range(pages):
        page_num -= i  
        page_url = url+"page"+str(page_num)+"#comments"   
        img_addres = find_imgs(page_url) 
        save_imgs(folder,img_addres)
        
# if __name__=="__main__":  
#     download_mm()
download_mm()

  

posted @ 2018-03-17 11:30  一个处女座的程序猿  阅读(175)  评论(0编辑  收藏  举报