item pipeline 实例:爬取360摄像图片

 

生成项目

scrapy startproject image360

cd Image360 && scrapy genspider images  images.so.com

 

一. 构造请求

1. 在setting.py中增加MAX_PAGE=5,表示爬取5页

2. 在images.py中定义start_requests()方法,用来生成5次请求

 

二. 提取信息

1. 在item.py中定义ImageItem,用于存放需要的数据

2. 在images.py中定义parse函数来提取信息

 

 

三. 存储信息

在pipelines.py中定义三个类,来分别实现图片在本地目录,mongodb和mysql中的存储

 

 

四. 各文件完整代码如下

 

1. setting.py文件

# -*- coding: utf-8 -*-


BOT_NAME = 'images360'

SPIDER_MODULES = ['images360.spiders']
NEWSPIDER_MODULE = 'images360.spiders'

# 设为False才能爬取网站后台数据
ROBOTSTXT_OBEY = False

#使pipeline.py中设置生效
ITEM_PIPELINES = {
    'images360.pipelines.ImagePipeline': 300,
    'images360.pipelines.MongoPipeline': 301,
    'images360.pipelines.MysqlPipeline': 302,
}

#下载图片后本地存放位置
IMAGES_STORE = './images'

#爬5页
MAX_PAGE = 5

#mongodb变量
MONGO_URI = 'localhost'
MONGO_DB = 'images360'

#mysql变量
MYSQL_HOST = 'localhost'
MYSQL_DATABASE = 'images360'
MYSQL_USER = 'root'
MYSQL_PASSWORD = '123'
MYSQL_PORT = 3306
View Code

 

2. items.py

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html

from scrapy import Item, Field


class ImageItem(Item):

    #定义mongodb存储的collection名称和mysql存储的表名称,都定义为images字符串
    collection = table = 'images'
    
    #定义图片的ID,链接,标题,缩略图
    id = Field()
    url = Field()
    title = Field()
    thumb = Field()
View Code

 

3. images.py

# -*- coding: utf-8 -*-
from scrapy import Spider, Request
from urllib.parse import urlencode
import json

from images360.items import ImageItem


class ImagesSpider(Spider):
    name = 'images'
    allowed_domains = ['images.so.com']
    start_urls = ['http://images.so.com/']


    def start_requests(self):
        data = {'ch': 'photography', 'listtype': 'new'}
        base_url = 'https://image.so.com/zj?'  #网站后台地址
        for page in range(1, self.settings.get('MAX_PAGE') + 1):
            data['sn'] = page * 30
            params = urlencode(data)
            url = base_url + params
            yield Request(url, self.parse)

    def parse(self, response):
        result = json.loads(response.text)
        for image in result.get('list'):
            item = ImageItem()
            item['id'] = image.get('imageid')
            item['url'] = image.get('qhimg_url')
            item['title'] = image.get('group_title')
            item['thumb'] = image.get('qhimg_thumb_url')
            yield item
View Code

 

4. . pipelines.py

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html


import pymongo
import pymysql
from scrapy import Request
from scrapy.exceptions import DropItem
from scrapy.pipelines.images import ImagesPipeline


class MongoPipeline(object):
    def __init__(self, mongo_uri, mongo_db):
        self.mongo_uri = mongo_uri
        self.mongo_db = mongo_db
    
    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            mongo_uri=crawler.settings.get('MONGO_URI'),
            mongo_db=crawler.settings.get('MONGO_DB')
        )
    
    def open_spider(self, spider):
        self.client = pymongo.MongoClient(self.mongo_uri)
        self.db = self.client[self.mongo_db]
    
    def process_item(self, item, spider):
        name = item.collection
        self.db[name].insert(dict(item))
        return item
    
    def close_spider(self, spider):
        self.client.close()


class MysqlPipeline():
    def __init__(self, host, database, user, password, port):
        self.host = host
        self.database = database
        self.user = user
        self.password = password
        self.port = port
    
    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            host=crawler.settings.get('MYSQL_HOST'),
            database=crawler.settings.get('MYSQL_DATABASE'),
            user=crawler.settings.get('MYSQL_USER'),
            password=crawler.settings.get('MYSQL_PASSWORD'),
            port=crawler.settings.get('MYSQL_PORT'),
        )
    
    def open_spider(self, spider):
        self.db = pymysql.connect(self.host, self.user, self.password, self.database, charset='utf8',
                                  port=self.port)
        self.cursor = self.db.cursor()
    
    def close_spider(self, spider):
        self.db.close()
    
    def process_item(self, item, spider):
        print(item['title'])
        data = dict(item)
        keys = ', '.join(data.keys())
        values = ', '.join(['%s'] * len(data))
        sql = 'insert into %s (%s) values (%s)' % (item.table, keys, values)
        self.cursor.execute(sql, tuple(data.values()))
        self.db.commit()
        return item


class ImagePipeline(ImagesPipeline):
    def file_path(self, request, response=None, info=None):
        url = request.url
        file_name = url.split('/')[-1]
        return file_name
    
    def item_completed(self, results, item, info):
        image_paths = [x['path'] for ok, x in results if ok]
        if not image_paths:
            raise DropItem('Image Downloaded Failed')
        return item
    
    def get_media_requests(self, item, info):
        yield Request(item['url'])
View Code

 

posted @ 2018-08-02 17:20  坚强的小蚂蚁  阅读(233)  评论(0编辑  收藏  举报