scrapy+redis去重实现增量抓取

class ProjectnameDownloaderMiddleware(object):


# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
def __init__(self):
self.Client = pymongo.MongoClient()
self.Mydb = self.Client['jinrong']
self.Table = self.Mydb['jinrog_table']


@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s

def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
sha1 = hashlib.sha1(request.url)
sha1_url = sha1.hexdigest()
My_Query = {'sha1_url':sha1_url}
Mydoc = self.Table.find(My_Query)
if Mydoc.count() == 0:
self.Table.insert({'url':request.url,'sha1_url':sha1_url})
return None
else:
return request.url
posted @ 2018-08-16 22:30  破晓e  阅读(1274)  评论(0编辑  收藏  举报