本文整理汇总了Python中w3af.core.data.bloomfilter.scalable_bloom.ScalableBloomFilter类的典型用法代码示例。如果您正苦于以下问题:Python ScalableBloomFilter类的具体用法?Python ScalableBloomFilter怎么用?Python ScalableBloomFilter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ScalableBloomFilter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: filtered_freq_generator
def filtered_freq_generator(freq_list):
already_tested = ScalableBloomFilter()
for freq in freq_list:
if freq not in already_tested:
already_tested.add(freq)
yield freq
示例2: __init__
def __init__(self):
CrawlPlugin.__init__(self)
# Internal variables
self._already_crawled = ScalableBloomFilter()
self._already_verified = ScalableBloomFilter()
# User configured parameters
self._max_depth = 3
示例3: test_bloom_filter
def test_bloom_filter(self):
f = ScalableBloomFilter()
for i in xrange(20000):
data = (i, i)
f.add(data)
for i in xrange(20000):
data = (i, i)
data in f
示例4: __init__
def __init__(self):
CrawlPlugin.__init__(self)
# User configured parameters
self._wordlist = os.path.join(ROOT_PATH, "plugins", "crawl", "content_negotiation", "common_filenames.db")
# Internal variables
self._already_tested_dir = ScalableBloomFilter()
self._already_tested_resource = ScalableBloomFilter()
self._content_negotiation_enabled = None
self._to_bruteforce = Queue.Queue()
# I want to try 3 times to see if the remote host is vulnerable
# detection is not thaaaat accurate!
self._tries_left = 3
示例5: __init__
def __init__(self):
CrawlPlugin.__init__(self)
# Internal variables
self._analyzed_dirs = ScalableBloomFilter()
self._analyzed_filenames = ScalableBloomFilter()
self._dvcs = {}
self._dvcs['git repository'] = {}
self._dvcs['git ignore'] = {}
self._dvcs['hg repository'] = {}
self._dvcs['hg ignore'] = {}
self._dvcs['bzr repository'] = {}
self._dvcs['bzr ignore'] = {}
self._dvcs['svn repository'] = {}
self._dvcs['svn ignore'] = {}
self._dvcs['cvs repository'] = {}
self._dvcs['cvs ignore'] = {}
self._dvcs['git repository']['filename'] = '.git/index'
self._dvcs['git repository']['function'] = self.git_index
self._dvcs['git ignore']['filename'] = '.gitignore'
self._dvcs['git ignore']['function'] = self.ignore_file
self._dvcs['hg repository']['filename'] = '.hg/dirstate'
self._dvcs['hg repository']['function'] = self.hg_dirstate
self._dvcs['hg ignore']['filename'] = '.hgignore'
self._dvcs['hg ignore']['function'] = self.ignore_file
self._dvcs['bzr repository']['filename'] = '.bzr/checkout/dirstate'
self._dvcs['bzr repository']['function'] = self.bzr_checkout_dirstate
self._dvcs['bzr ignore']['filename'] = '.bzrignore'
self._dvcs['bzr ignore']['function'] = self.ignore_file
self._dvcs['svn repository']['filename'] = '.svn/entries'
self._dvcs['svn repository']['function'] = self.svn_entries
self._dvcs['svn ignore']['filename'] = '.svnignore'
self._dvcs['svn ignore']['function'] = self.ignore_file
self._dvcs['cvs repository']['filename'] = 'CVS/Entries'
self._dvcs['cvs repository']['function'] = self.cvs_entries
self._dvcs['cvs ignore']['filename'] = '.cvsignore'
self._dvcs['cvs ignore']['function'] = self.ignore_file
示例6: __init__
def __init__(self):
CrawlPlugin.__init__(self)
self._already_visited = ScalableBloomFilter()
# User options
self._fuzz_images = False
self._max_digit_sections = 4
示例7: __init__
def __init__(self):
InfrastructurePlugin.__init__(self)
# Internal variables
self._first_exec = True
self._already_queried = ScalableBloomFilter()
self._can_resolve_domain_names = False
示例8: __init__
def __init__(self, grep_plugins, w3af_core):
"""
:param grep_plugins: Instances of grep plugins in a list
:param w3af_core: The w3af core that we'll use for status reporting
"""
# max_in_queue_size, is the number of items that will be stored in-memory
# in the consumer queue
#
# Any items exceeding max_in_queue_size will be stored on-disk, which
# is slow but will prevent any high memory usage imposed by this part
# of the framework
max_in_queue_size = 20
# thread_pool_size defines how many threads we'll use to run grep plugins
thread_pool_size = 2
# max_pool_queued_tasks defines how many tasks we'll keep in memory waiting
# for a worker from the pool to be available
max_pool_queued_tasks = thread_pool_size * 3
super(grep, self).__init__(grep_plugins,
w3af_core,
create_pool=False,
#max_pool_queued_tasks=max_pool_queued_tasks,
#thread_pool_size=thread_pool_size,
thread_name='Grep',
max_in_queue_size=max_in_queue_size)
self._already_analyzed = ScalableBloomFilter()
示例9: __init__
def __init__(self):
CrawlPlugin.__init__(self)
# internal variables
self._exec = True
self._already_analyzed = ScalableBloomFilter()
# User configured parameters
self._db_file = os.path.join(ROOT_PATH, 'plugins', 'crawl', 'pykto',
'scan_database.db')
self._extra_db_file = os.path.join(ROOT_PATH, 'plugins', 'crawl',
'pykto', 'w3af_scan_database.db')
self._cgi_dirs = ['/cgi-bin/']
self._admin_dirs = ['/admin/', '/adm/']
self._users = ['adm', 'bin', 'daemon', 'ftp', 'guest', 'listen', 'lp',
'mysql', 'noaccess', 'nobody', 'nobody4', 'nuucp',
'operator', 'root', 'smmsp', 'smtp', 'sshd', 'sys',
'test', 'unknown']
self._nuke = ['/', '/postnuke/', '/postnuke/html/', '/modules/',
'/phpBB/', '/forum/']
self._mutate_tests = False
示例10: __init__
def __init__(self):
GrepPlugin.__init__(self)
# Internal variables
self._comments = DiskDict(table_prefix='html_comments')
self._already_reported = ScalableBloomFilter()
self._end_was_called = False
示例11: __init__
def __init__(self):
GrepPlugin.__init__(self)
self._already_reported = ScalableBloomFilter()
# regex to split between words
self._split_re = re.compile('[^\w]')
示例12: __init__
def __init__(self):
CrawlPlugin.__init__(self)
self._headers = None
self._first_time = True
self._fuzz_images = False
self._seen = ScalableBloomFilter()
示例13: frontpage_version
class frontpage_version(InfrastructurePlugin):
"""
Search FrontPage Server Info file and if it finds it will determine its version.
:author: Viktor Gazdag ( [email protected] )
"""
VERSION_RE = re.compile('FPVersion="(.*?)"', re.IGNORECASE)
ADMIN_URL_RE = re.compile('FPAdminScriptUrl="(.*?)"', re.IGNORECASE)
AUTHOR_URL_RE = re.compile('FPAuthorScriptUrl="(.*?)"', re.IGNORECASE)
def __init__(self):
InfrastructurePlugin.__init__(self)
# Internal variables
self._analyzed_dirs = ScalableBloomFilter()
@runonce(exc_class=RunOnce)
def discover(self, fuzzable_request):
"""
For every directory, fetch a list of files and analyze the response.
:param fuzzable_request: A fuzzable_request instance that contains
(among other things) the URL to test.
"""
for domain_path in fuzzable_request.get_url().get_directories():
if domain_path in self._analyzed_dirs:
continue
# Save the domain_path so I know I'm not working in vane
self._analyzed_dirs.add(domain_path)
# Request the file
frontpage_info_url = domain_path.url_join("_vti_inf.html")
try:
response = self._uri_opener.GET(frontpage_info_url,
cache=True)
except BaseFrameworkException, w3:
fmt = 'Failed to GET Frontpage Server _vti_inf.html file: "%s"'\
'. Exception: "%s".'
om.out.debug(fmt % (frontpage_info_url, w3))
else:
# Check if it's a Frontpage Info file
if not is_404(response):
fr = FuzzableRequest(response.get_uri())
self.output_queue.put(fr)
self._analyze_response(response)
示例14: __init__
def __init__(self):
InfrastructurePlugin.__init__(self)
# Internal variables
self._already_tested = ScalableBloomFilter()
# On real web applications, if we can't trigger an error in the first
# MAX_TESTS tests, it simply won't happen and we have to stop testing.
self.MAX_TESTS = 25
示例15: __init__
def __init__(self):
CrawlPlugin.__init__(self)
# Internal variables
self._analyzed_dirs = ScalableBloomFilter()
# -rw-r--r-- 1 andresr w3af 8139 Apr 12 13:23 foo.zip
regex_str = '[a-z-]{10}\s*\d+\s*(.*?)\s+(.*?)\s+\d+\s+\w+\s+\d+\s+[0-9:]{4,5}\s+(.*)'
self._listing_parser_re = re.compile(regex_str)