本文整理汇总了Python中pybloom.BloomFilter类的典型用法代码示例。如果您正苦于以下问题:Python BloomFilter类的具体用法?Python BloomFilter怎么用?Python BloomFilter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了BloomFilter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, server):
# redis server
self.server = server
# 用来判断是否重复出现
allowed = [
"qq.com",
"163.com",
"people.com.cn",
"xinhuanet.com",
"cntv.cn",
"ifeng.com",
"hexun.com",
"sina.com.cn",
"sohu.com",
"dbw.cn",
]
self.bloom_domain_filter = BloomFilter(capacity=32)
for a in allowed:
self.bloom_domain_filter.add(a)
# 正则过滤, 一些博客
self.qzone_filter = re.compile(r"^http://.*\.qzone\.qq\.com")
self.wangyiblog_filter = re.compile(r"^http://.*\.blog\.163\.com")
self.hexunblog_filter = re.compile(r"^http://.*\.blog\.hexun\.com")
self.sohublog_filter = re.compile(r"http://.*\.blog\.sohu\.com")
self.sohui_filter = re.compile(r"http://.*\.i\.sohu\.com")
self.bloom_domain_vec = BloomFilter(capacity=1<<16, error_rate=0.001)
self.bloom_netloc_vec = BloomFilter(capacity=1<<16, error_rate=0.001)
示例2: main
def main():
if os.path.isfile(nsrl_path):
print "BUILDING: Reading in NSRL Database"
with open(nsrl_path) as f_line:
# Strip off header
_ = f_line.readline()
print "BUILDING: Calculating number of hashes in NSRL..."
num_lines = sum(bl.count("\n") for bl in blocks(f_line))
print "BUILDING: There are %s hashes in the NSRL Database" % num_lines
with open(nsrl_path) as f_nsrl:
# Strip off header
_ = f_nsrl.readline()
print "BUILDING: Creating bloomfilter"
bf = BloomFilter(num_lines, error_rate)
print "BUILDING: Inserting hashes into bloomfilter"
for line in f_nsrl:
md5_hash = line.split(",")[1].strip('"')
if md5_hash:
try:
bf.add(md5_hash)
except Exception as e:
print "ERROR: %s" % e
print "BUILDING: NSRL bloomfilter contains {} items.".format(len(bf))
with open('nsrl.bloom', 'wb') as nb:
bf.tofile(nb)
print "BUILDING: Complete"
else:
print("ERROR: No such file or directory: %s", nsrl_path)
return
示例3: main
def main(argv):
if argv:
error_rate = float(argv[0])
print "[BUILDING] Using error-rate: {}".format(error_rate)
if os.path.isfile(nsrl_path):
print "[BUILDING] Reading in NSRL Database"
with open(nsrl_path) as f_line:
# Strip off header
_ = f_line.readline()
print "[BUILDING] Calculating number of hashes in NSRL..."
num_lines = sum(bl.count("\n") for bl in blocks(f_line))
print "[BUILDING] There are %s hashes in the NSRL Database" % num_lines
with open(nsrl_path) as f_nsrl:
# Strip off header
_ = f_nsrl.readline()
print "[BUILDING] Creating bloomfilter"
bf = BloomFilter(num_lines, error_rate)
print "[BUILDING] Inserting hashes into bloomfilter"
for line in f_nsrl:
sha1_hash = line.split(",")[0].strip('"')
if sha1_hash:
try:
sha1 = binascii.unhexlify(sha1_hash)
bf.add(sha1)
except Exception as e:
print "[ERROR] %s" % e
print "[BUILDING] NSRL bloomfilter contains {} items.".format(len(bf))
with open('nsrl.bloom', 'wb') as nb:
bf.tofile(nb)
print "[BUILDING] Complete"
else:
print("[ERROR] No such file or directory: %s", nsrl_path)
return
示例4: generateBloomFilter
def generateBloomFilter(file):
"Generates the bloom filter for entries in file."
# this probably isnt enough, need to look the data formatting over more
# thoroughly
d = BloomFilter(1000, 0.001)
for line in file:
d.add(line.split(1)[0])
示例5: test_insert_then_test
def test_insert_then_test(self):
result = create_index(
'/tmp/fake.csv', # input filename
self.test_file, # file-like object
0.0001, # error rate
1, # skip lines
[1, 2], # fields
',', # delimiter
False) # recursive domain
self.assertEqual(
{'/tmp/fake.csv.2.bfindex': 6,
'/tmp/fake.csv.1.bfindex': 5},
result)
b1 = BloomFilter.fromfile(open('/tmp/fake.csv.1.bfindex', 'rb'))
b2 = BloomFilter.fromfile(open('/tmp/fake.csv.2.bfindex', 'rb'))
self.assertEqual(False, 'FieldA' in b1)
self.assertEqual(False, 'FieldB' in b2)
for word in ('apple', 'banana', 'orange', 'pear', 'pineapple'):
self.assertEqual(True, word in b1)
self.assertEqual(False, word in b2)
for word in ('carrot', 'potato', 'leek', 'cauliflower', 'bean'):
self.assertEqual(True, word in b2)
self.assertEqual(False, word in b1)
示例6: _build_filter
def _build_filter():
bf = BloomFilter(capacity=10000, error_rate=0.001)
worst = [w[:-2] for w in open(_WORST_DUMP).readlines()]
map(bf.add, worst)
with open(_BLOOM_DUMP, 'w') as f:
bf.tofile(f)
print "Serialized bloom filter to ", _BLOOM_DUMP
示例7: __init__
class UrlBloom:
'''BloomFilter: check elements repetition'''
def __init__(self, _capacity=1000000, _error_rate=0.00001):
self.is_full = False
# determine if open backup bloom data by time
if CONFIG.get('BACKUP', 0) == 1:
self.bomb = TimeBomb(CONFIG['TMP_DIR'] + CONFIG['BLOOM_FILE'])
self.filter = self.bomb.load()
if self.filter is None:
self.filter = BloomFilter(capacity=_capacity, error_rate=_error_rate)
self.bomb.dump(self.filter)
else:
self.filter = BloomFilter(capacity=_capacity, error_rate=_error_rate)
def add(self, links):
if self.is_full:
return
try:
for ele in links:
self.filter.add(ele)
except IndexError:
# rasie IndexError when bloom is at capacity
self.is_full = True
def clean(self, links):
res = []
for ele in links:
if ele not in self.filter:
res.append(ele)
return res
示例8: BLOOMDupeFilter
class BLOOMDupeFilter(BaseDupeFilter):
"""Request Fingerprint duplicates filter"""
def __init__(self, path=None):
self.file = None
# capacity
# this BloomFilter must be able to store at least *capacity* elements
# while maintaining no more than *error_rate* chance of false
# positives
# error_rate
# the error_rate of the filter returning false positives. This
# determines the filters capacity. Inserting more than capacity
# elements greatly increases the chance of false positives.
self.fingerprints = BloomFilter(capacity=2000000, error_rate=0.00001)
# get all the urls from database
db = DynamoDBPipeline()
urls = db.get_url_list()
[self.fingerprints.add(url) for url in urls]
@classmethod
def from_settings(cls, settings):
return cls(job_dir(settings))
def request_seen(self, request):
fp = request.url
if fp in self.fingerprints:
return True
self.fingerprints.add(fp)
def close(self, reason):
self.fingerprints = None
示例9: __init__
def __init__(self):
try:
with open(FILTER_FILE) as f:
self.f = BloomFilter.fromfile(f)
except IOError:
self.f = BloomFilter(capacity=10000000, error_rate=0.001)
self.num = 0
示例10: create_empty_bloomfilter
def create_empty_bloomfilter(self):
"""Create an empty bloom filter with byte aligness."""
bf = BloomFilter(capacity=self.cache.quota, error_rate=self.error_rate)
bs = bf.bitarray.tostring()
bf.bitarray = bitarray()
bf.bitarray.fromstring(bs)
return bf
示例11: user_init
def user_init():
import re
users = BloomFilter(10000000, 0.001)
f= open(u"D:/工作/数据美化/data/简书用户id1.txt")
for line in f:
users.add(line.strip())
return users
示例12: add
def add(self, key):
"""Adds a key to this bloom filter.
If the key already exists in this filter it will return True.
Otherwise False.
>>> b = ScalableBloomFilter(initial_capacity=100, error_rate=0.001, \
mode=ScalableBloomFilter.SMALL_SET_GROWTH)
>>> b.add("hello")
False
>>> b.add("hello")
True
"""
if key in self:
return True
if not self.filters:
filter = BloomFilter(capacity=self.initial_capacity, error_rate=self.error_rate * (1.0 - self.ratio))
self.filters.append(filter)
else:
filter = self.filters[-1]
if filter.count >= filter.capacity:
filter = BloomFilter(capacity=filter.capacity * self.scale, error_rate=filter.error_rate * self.ratio)
self.filters.append(filter)
filter.add(key, skip_check=True)
return False
示例13: determine_lookup_speed_threshold
def determine_lookup_speed_threshold(self):
from time import time
#do each one 5 times
bf = BloomFilter(capacity=self.bloom_size, error_rate=self.bloom_error)
count = 1
repetitions = 5
self_bf_holder = self.bf
self.bf = bf
while True:
bf.add('andrew_' + str(count))
bin_faster_count = 0
for j in xrange(repetitions):
#Linear scan
t1 = time()
self.linear_scan_count('andrew')
t2 = time()
linear_time = t2-t1
t1 = time()
self.binsearch_count('andrew')
t2 = time()
bin_time = t2-t1
bin_faster_count += int(bin_time < linear_time)
if 1.*bin_faster_count / repetitions >= 0.75:
del bf
self.bf = self_bf_holder
return count
count += 1
示例14: UrlSpider
class UrlSpider(CrawlSpider):
name = "urlspider"
allowed_domains = ["tianya.cn"]
start_urls = ("http://www.hao123.com", )
rules = (
Rule(SgmlLinkExtractor(allow=()), callback="parse_resp", follow= True),
)
def __init__(self, *args, **kwargs):
# run using: scrapy crawl xss_spider -a url='http://example.com'
super(UrlSpider, self).__init__(*args, **kwargs)
self.start_urls = [kwargs.get('url')]
hostname = urlparse(self.start_urls[0]).hostname
self.allowed_domains = [hostname] # adding [] around the value seems to allow it to crawl subdomain of value
self.fingerprints = BloomFilter(3000000, 0.0001)
def parse_start_url(self, response):
print "start:"+response.url
return
def parse_resp(self, response):
fp = response.url
new_fp = obtain_key(fp)
if new_fp in self.fingerprints:
return
self.fingerprints.add(new_fp)
item = SiteCrawlItem()
item["url"] = response.url
yield item
示例15: vacuum_all
def vacuum_all(self, limit=None, time_limit=None, unupdated=False):
logger.debug('Begin vacuum_all(limit=%s, time_limit=%s, unupdated=%s)', limit, time_limit, unupdated)
##TODO delete SCIFields with SCFilterId not found in SCFilter
self.plugins = self.load_plugins()
self.ts = self.term_stat('SupplierCatalogItemFields Vacuum', len(self.plugins))
now = start_time = datetime.now()
try:
transaction.begin()
for plug in self.plugins.itervalues():
supplier_catalog_filter_id = plug.supplier_catalog_filter_id()
### Generate a bloom filter set of SCIF id's in VersionModel
model_name = plug.version_model() + 'Model'
VersionModel = getattr(model, model_name)
query = DBSession.query(VersionModel.supplier_catalog_item_field_id)
s = BloomFilter(capacity=query.count() + 1)
self.ts['sub_total'] = query.count()
for (supplier_catalog_item_field_id, ) in query.yield_per(100):
s.add(supplier_catalog_item_field_id)
self.ts['sub_done'] += 1
del query
### Iterate through SCIFields, deleting any that don't appear in the bloom filter.
query = DBSession.query(SupplierCatalogItemFieldModel)
query = query.filter(SupplierCatalogItemFieldModel.supplier_catalog_filter_id == supplier_catalog_filter_id)
if unupdated is not True:
query = query.filter(SupplierCatalogItemFieldModel.updated != None)
if limit is not None:
query = query.order_by(SupplierCatalogItemFieldModel.vacuumed.nullsfirst())
query = query.limit(limit)
logger.debug("LIMIT %i, supplier_catalog_filter_id %s", limit, supplier_catalog_filter_id)
self.ts['sub_done'] = 0
self.ts['sub_total'] = query.count()
for supplier_catalog_item_field in query.yield_per(100):
if supplier_catalog_item_field.id not in s:
logger.debug("Deleting SupplierCatalogItemField %s", supplier_catalog_item_field.id)
DBSession.delete(supplier_catalog_item_field)
else:
supplier_catalog_item_field.vacuumed = now
if self.ts['sub_done'] % 1000 == 0:
DBSession.flush()
self.ts['sub_done'] += 1
del query
DBSession.flush()
if time_limit is not None:
if datetime.now() > start_time + time_limit:
logger.info("Reached Time Limit at %i of %i", self.ts['done'], self.ts['total'])
transaction.commit()
break;
self.ts['done'] += 1
transaction.commit()
except Exception:
logger.exception("Caught Exception: ")
transaction.abort()
finally:
self.ts.finish()
logger.debug('End vacuum()')