图片爬取与保存

pillow运用
需要安装pillow 图像处理库 pip install pillow

在使用lmagesPipeline下载图片数据时,我们需要对其中的三个管道类方法进行重写

get_medla_requests 根据图片地址,进行图片数据请求
def get_media_requests(self, item, info):
urls = ItemAdapter(item).get(self,images_urls_field,[])
return [Request(u) for u in urls]

file_path 指定图片持久化存储的路径
def file_path(self, request, response=None, info=None, *, item=None):
image_guid = hashlib.shal(to_bytes(request.url)).hexdigest()
return f'full/{image_guid},jpg'

item_completed 返回item,将其返回给一个即将被执行的管道类
def item_completed(self, results, item, info):
return item

settings中添加图片保存的位置
在settings中输入./图片(是图片的路径,必须是绝对路径)
路径如:C:\\Users\\Administrator\\PycharmProjects\\untitled\\pacongke\\13_crapy存图片\\zhanshi\\img
IMAGES_STORE = './图片'

运行终端 scrapy crawl zz(必须在目录下运行)
终端强行终止,ctrl+c
'''
===================================
'''
urllib分为四个模块
1,请求模块
2,异常处理模块
3,url解析模块
4,robots.txt解析模块

'''
from urllib import request
'''
请求模块
'''
url = 'http://httpbin.org/get'
# urlopen用来发送一个简单的网络请求 url可以是字符串,也可以是一个request对象
resp = request.urlopen(url)

# read() 返回响应的数据
# data = resp.read()
# print(data)

# info()获取响应头信息
# print(resp.info())

# geturl() 获取访问的链接
# print(resp.geturl())

# getcode() 返回状态码
# print(resp.getcode())
=================================
添加请求头
import urllib.request

test_url ='http://httpbin.org/get'

headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'
}

# 请求对象
req = urllib.request.Request(test_url,headers=headers)

response = urllib.request.urlopen(req)
print(response.read().decode())
====================================
添加代理
import urllib.request

url = 'http://httpbin.org/get'

headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'
}

req = urllib.request.Request(url=url,headers=headers)
proxies = {'http':'30.168.206.199:1133'}

# 代理处理器
handnler = urllib.request, urllib.ProxyHandler(proxies=proxies)

# 获取openner对象
openner = urllib.request.build_opener(handnler)

# 调用open方法
response = openner.open(req)

res = response.read()
print(res)
============================
# parse模块是一个工具模块,提供了需要对url处理的方法,用于解析url

from urllib import parse

# 单个参数
# name = '猫咪'
# asc_name = parse.quote(name) # 汉字转ASCII码
# print(asc_name)
#
# new_name = '%E7%8C%AB%E5%92%AA'
# name = parse.unquote(new_name)
# print(name)

# 多个参数
name = {'name1':'爬虫','name2':'哈哈'}
a = parse.urlencode(name)
print(a)




=============================
# URLError 封装的错误信息一般是由网络引起的,包括url错误
# HTTPError 封装的错误一般是由服务器反悔了错误代码
from urllib import request
from urllib import error

url = 'http://baidaxuuuuu.com'

try:
res = request.Request(url)
response = request.urlopen(res)
print(response.read().decode())


except error.HTTPError as f1:
print(f1,'http')

except error.URLError as f2:
print(f2,'url')

except Exception as e:
print(e)


======================
import scrapy
from zhanshi.items import ZhanshiItem

class ZzSpider(scrapy.Spider):
name = 'zz'
allowed_domains = ['chinaz.com']
start_urls = ['https://sc.chinaz.com/tupian/']

def parse(self, response,**kwargs):
# 找到所有的div
data_list = response.xpath('/html/body/div[3]/div[2]/div')
# print(data_list)
for data in data_list:
item = ZhanshiItem()
src = "https:"+str(data.xpath('./img/@data-original').extract_first())
item['src'] = src
# print(src)
# print(item)
yield item

# 翻页的爬取
url = response.xpath('//a[@class="nextpage"]/@href').extract_first()
# url路径拼接
next_url = "https://sc.chinaz.com/tupian/"+str(url)
yield scrapy.Request(url=next_url,callback=self.parse)

=============================
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class ZhanshiItem(scrapy.Item):
# define the fields for your item here like:
src = scrapy.Field()
==============================
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import scrapy
from itemadapter import ItemAdapter
import requests
from scrapy.pipelines.images import ImagesPipeline


# class ZhanshiPipeline:
# 一般写法
# def process_item(self, item, spider):
# src = item['src']
# # 获取图片名及后缀
# name = src.split('/')[-1]
# # C:\\Users\\Administrator\\PycharmProjects\\untitled\\pacongke\\13_crapy存图片\\zhanshi\\img
# file_path = "C:\\Users\\Administrator\\PycharmProjects\\untitled\\pacongke\\13_crapy存图片\\zhanshi\\img"
# res = requests.get(src).content
# with open(file_path+"/"+name,'wb') as f1:
# print('正在保存',name)
# f1.write(res)

# return item
# 框架写法

class ZhanshiPipeline(ImagesPipeline):
# 对图片路径发起请求
def get_media_requests(self, item, info):

yield scrapy.Request(item['src'])

def file_path(self, request, response=None, info=None, *, item=None):
# 图片的名字
img_name = request.url.split('/')[-1]
print('正在保存',img_name)
return img_name
# 将item返回,以便于后面的方法进行利用
def item_completed(self, results, item, info):
return item
========================================
# Scrapy settings for zhanshi project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'zhanshi'

SPIDER_MODULES = ['zhanshi.spiders']
NEWSPIDER_MODULE = 'zhanshi.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'lla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False
LOG_LEVEL = "ERROR"
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'zhanshi.middlewares.ZhanshiSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'zhanshi.middlewares.ZhanshiDownloaderMiddleware': 543,
#}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'zhanshi.pipelines.ZhanshiPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'


IMAGES_STORE ="C:\\Users\\Administrator\\PycharmProjects\\untitled\\pacongke\\13_crapy存图片\\zhanshi\\img"


 

 














posted @ 2022-09-02 19:32  冬天不下雨  阅读(81)  评论(0编辑  收藏  举报