import requests
from bs4 import BeautifulSoup
import io
from selenium import webdriver
import time
import sys

 

def get_img_list(url):
img_list = []
title_list=[]
browser = webdriver.Chrome()
browser.get(url)
time.sleep(5)
html = browser.page_source
soup = BeautifulSoup(html, 'html.parser')
div = soup.find_all(name="div", attrs={"class": "figure figure-img"})
span=soup.find_all(name='span',attrs={"class": "absolute block align-center ellipsis pic-bone-imgname-list"})
for i in span:
a=i.find('a')
title=a.text
title_list.append(title)
for i in div:
a = i.find('a')
href = a.get('href')
img_list.append(href)
browser.quit()
return img_list,title_list
def get_img(url_1):
links=[]
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0)Gecko/20100101 Firefox/61.0"}
requests.adapters.DEFAULT_RETRIES = 5 # 增加重连次数
s = requests.session()
s.keep_alive = False
#s.proxies = { 'http': '117.69.152.142:9999'}
r = s.get(url_1, headers=headers) # 增加headers, 模拟浏览器
r.encoding = "utf-8"
html=r.text
soup = BeautifulSoup(html, 'html.parser')
ul=soup.find('ul',id='grid')
a=ul.find_all('a')
for i in a:
link= i.get('href')
links.append(link)
return links

def main():
n=0
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0)Gecko/20100101 Firefox/61.0"}
requests.adapters.DEFAULT_RETRIES = 5 # 增加重连次数
s = requests.session()
s.keep_alive = False
url = 'https://www.plmm.com.cn/tags-234-0.html'
img_list=get_img_list(url)[0]
title_list=get_img_list(url)[1]
for i in img_list:
links=get_img('https:'+i)
title=title_list[n]
n+=1
num=0
print(title)
for i in links:
num+=1
res= s.get('https:'+i,headers=headers)

string=u'{title}_{num}.jpg'.format(title=title,num=num)
with io.open(string,'wb') as f:
f.write(res.content,)
print('complete')
print('DONE!')

 

if __name__ == '__main__':
main()