BloomFilter与redis联合去重的python的代码
我们在爬大型网站的时候,需要处理上千万乃至上亿的url的去重。如果采用python的自带set,或者redis的set,那就需要占用很大的内存。如果存入将url存入数据库去重,那速度又会变慢。这种量级以上的去重,一般是采用BloomFilter,但是如果机器down机了,那BloomFilter在内存的数据中的数据,就没了。我们知道redis的数据既可以存在内存中,也可以存在硬盘中。如果能将BloomFilter和redis结合起来,那就非常棒了。
# encoding=utf-8
import redis
from hashlib import md5
class SimpleHash(object):
def __init__(self, cap, seed):
self.cap = cap
self.seed = seed
def hash(self, value):
ret = 0
for i in range(len(value)):
ret += self.seed * ret + ord(value[i])
return (self.cap - 1) & ret
class BloomFilter(object):
def __init__(self, host='localhost', port=6379, db=0, blockNum=1, key='bloomfilter'):
"""
:param host: the host of Redis
:param port: the port of Redis
:param db: witch db in Redis
:param blockNum: one blockNum for about 90,000,000; if you have more strings for filtering, increase it.
:param key: the key's name in Redis
"""
self.server = redis.Redis(host=host, port=port, db=db)
# <<表示二进制向左移动位数,比如2<<2,2的二进制表示000010,向左移2位,就是001000,就是十进制的8
self.bit_size = 1 <<31 # Redis的String类型最大容量为512M,现使用256M
self.seeds = [5, 7, 11, 13, 31, 37, 61]
self.key = key
self.blockNum = blockNum
self.hashfunc = []
for seed in self.seeds:
self.hashfunc.append(SimpleHash(self.bit_size, seed))
def isContains(self, str_input):
if not str_input:
return False
m5 = md5()
m5.update(str_input)
str_input = m5.hexdigest()
ret = True
name = self.key + str(int(str_input[0:2], 16) % self.blockNum)
for f in self.hashfunc:
loc = f.hash(str_input)
ret = ret & self.server.getbit(name, loc)
return ret
def insert(self, str_input):
m5 = md5()
m5.update(str_input)
str_input = m5.hexdigest()
name = self.key + str(int(str_input[0:2], 16) % self.blockNum)
for f in self.hashfunc:
loc = f.hash(str_input)
self.server.setbit(name, loc, 1)
if __name__ == '__main__':
""" 第一次运行时会显示 not exists!,之后再运行会显示 exists! """
bf = BloomFilter(