爬虫糗事百科图片代码

#在平常写代码的文件夹下新建一个image_spider的文件夹作为工程文件,并在此文件夹目录下新建一个image文件夹作为保存图片的文件

#抓取百科的图片
#访问url必须有http开头
import requests
import re

def crawl_image(image_url, image_local_path):
image_url = "http:"+image_url
r = requests.get(image_url,stream=True)
with open(image_local_path,"wb") as f:
f.write(r.content)

def crawl(page):
url = "https://www.qiushibaike.com/imgrank/page/" + str(page)
res = requests.get(url)
content_list = re.findall("<div class=\"thumb\">(.*?)</div>",res.content.decode("utf-8"),re.S)
for content in content_list:
image_list = re.findall("<img src=\"(.*?)\"",content)
# print(image_list)
for image_url in image_list:
crawl_image(image_url,"D:/Program Files/python/image_spider/image/" + image_url.strip().split('/')[-1])
#print("./image/" + image_url.strip().split('/')[-1])

if __name__ == '__main__':
crawl(1)
posted @ 2018-04-21 16:23  lili414  阅读(133)  评论(0编辑  收藏  举报