scrapy自定义pipeline类将采集数据保存到mongodb的代码

把写内容过程经常用的一些内容段做个珍藏,下面内容段是关于scrapy自定义pipeline类将采集数据保存到mongodb的内容,应该能对各位有较大用处。
# Standard Python library imports

# 3rd party modules
import pymongo

from scrapy import log
from scrapy.conf import settings
from scrapy.exceptions import DropItem


class MongoDBPipeline(object):
def __init__(self):
self.server = settings['MONGODB_SERVER']
self.port = settings['MONGODB_PORT']
self.db = settings['MONGODB_DB']
self.col = settings['MONGODB_COLLECTION']
connection = pymongo.Connection(self.server, self.port)
db = connection[self.db]
self.collection = db[self.col]

def process_item(self, item, spider):
err_msg = ''
for field, data in item.items():
if not data:
err_msg += 'Missing %s of poem from %sn' % (field, item['url'])
if err_msg:
raise DropItem(err_msg)
self.collection.insert(dict(item))
log.msg('Item written to MongoDB database %s/%s' % (self.db, self.col),
level=log.DEBUG, spider=spider)
return item




 

posted on 2019-04-05 09:20  whoamboys  阅读(185)  评论(0编辑  收藏  举报

导航