Python 爬虫 七夕福利
祝大家七夕愉快
妹子图
1 import requests 2 from lxml import etree 3 import os 4 def headers(refere):#图片的下载可能和头部的referer有关,所以将referer设为变换值,以躲避反扒 5 headers = { 6 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36', 7 'Refere': '{}'.format(refere)} 8 return headers 9 def Tuji(pag):#找图集 10 fullurl = 'http://www.mzitu.com/page/{}/'.format(pag) 11 shouye_html_text = requests.get(fullurl).text 12 shouye_ele = etree.HTML(shouye_html_text) 13 tj_list = shouye_ele.xpath('//*[@id="pins"]/li/a/@href')#找每页的图集url 14 Tuji_url_list = [] 15 for tj_url in tj_list: 16 Tuji_url_list.append(tj_url) 17 return Tuji_url_list 18 def gettuji_info(tj_url_list):#图集的url列表 收集图集的相关信息 19 for tj_url_1 in tj_url_list: #tj_url_1 --- > http://www.mzitu.com/146823 20 tj_html_text = requests.get(tj_url_1, headers=headers(tj_url_1)).text 21 tj_ele = etree.HTML(tj_html_text) 22 img_title = tj_ele.xpath('//h2[@class="main-title"]/text()')[0] # 图集名称 23 max_pag_list = int(tj_ele.xpath('/html/body/div[2]/div[1]/div[4]/a[5]/span/text()')[0]) # 找最大页数 24 if os.path.exists(img_title): 25 pass 26 else: 27 os.mkdir(img_title) 28 for i in range(1, int(max_pag_list + 1)): 29 tj_url_2 = tj_url_1 + '/'+str(i) #tj_url_2 ---> http://www.mzitu.com/146823 + pag 30 tj_html = requests.get(tj_url_2, headers=headers(tj_url_1)) 31 tj_html_text = tj_html.text 32 tj_ele = etree.HTML(tj_html_text) 33 img_url = tj_ele.xpath('//div[@class="main-image"]/p/a/img/@src')[0] # 从不同的tj_url_2中找图片的url 34 print('正在下载'+img_title+'第'+str(i)+'张') 35 with open(img_title+'/'+str(i)+'.jpg', "wb+") as jpg: 36 jpg.write(requests.get(img_url, headers=headers(tj_url_2)).content) 37 jpg.close() 38 if __name__ == '__main__': 39 pags = int(input('你想搞几页的嘿嘿?')) 40 for pag in range(1,pags+1): 41 gettuji_info(Tuji(pag))
- -疏影横斜水清浅,暗香浮动月黄昏。- -