本文整理汇总了Python中w3af.core.data.db.disk_dict.DiskDict.cleanup方法的典型用法代码示例。如果您正苦于以下问题:Python DiskDict.cleanup方法的具体用法?Python DiskDict.cleanup怎么用?Python DiskDict.cleanup使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类w3af.core.data.db.disk_dict.DiskDict
的用法示例。
在下文中一共展示了DiskDict.cleanup方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_remove_table
# 需要导入模块: from w3af.core.data.db.disk_dict import DiskDict [as 别名]
# 或者: from w3af.core.data.db.disk_dict.DiskDict import cleanup [as 别名]
def test_remove_table(self):
disk_dict = DiskDict()
table_name = disk_dict.table_name
db = get_default_temp_db_instance()
self.assertTrue(db.table_exists(table_name))
disk_dict.cleanup()
self.assertFalse(db.table_exists(table_name))
示例2: test_table_with_prefix
# 需要导入模块: from w3af.core.data.db.disk_dict import DiskDict [as 别名]
# 或者: from w3af.core.data.db.disk_dict.DiskDict import cleanup [as 别名]
def test_table_with_prefix(self):
_unittest = 'unittest'
disk_dict = DiskDict(_unittest)
self.assertIn(_unittest, disk_dict.table_name)
db = get_default_temp_db_instance()
self.assertTrue(db.table_exists(disk_dict.table_name))
disk_dict.cleanup()
self.assertFalse(db.table_exists(disk_dict.table_name))
示例3: ssi
# 需要导入模块: from w3af.core.data.db.disk_dict import DiskDict [as 别名]
# 或者: from w3af.core.data.db.disk_dict.DiskDict import cleanup [as 别名]
#.........这里部分代码省略.........
:return: A string, see above.
"""
yield '<!--#exec cmd="echo -n %s;echo -n %s" -->' % (rand_alpha(5),
rand_alpha(5))
# TODO: Add mod_perl ssi injection support
# http://www.sens.buffalo.edu/services/webhosting/advanced/perlssi.shtml
#yield <!--#perl sub="sub {print qq/If you see this, mod_perl is working!/;}" -->
def _extract_result_from_payload(self, payload):
"""
Extract the expected result from the payload we're sending.
"""
match = self._extract_results_re.search(payload)
return match.group(1) + match.group(2)
def _analyze_result(self, mutant, response):
"""
Analyze the result of the previously sent request.
:return: None, save the vuln to the kb.
"""
if self._has_no_bug(mutant):
e_res = self._extract_result_from_payload(mutant.get_token_value())
if e_res in response and not e_res in mutant.get_original_response_body():
desc = 'Server side include (SSI) was found at: %s'
desc = desc % mutant.found_at()
v = Vuln.from_mutant('Server side include vulnerability', desc,
severity.HIGH, response.id, self.get_name(),
mutant)
v.add_to_highlight(e_res)
self.kb_append_uniq(self, 'ssi', v)
def end(self):
"""
This method is called when the plugin wont be used anymore and is used
to find persistent SSI vulnerabilities.
Example where a persistent SSI can be found:
Say you have a "guestbook" (a CGI application that allows visitors
to leave messages for everyone to see) on a server that has SSI
enabled. Most such guestbooks around the Net actually allow visitors
to enter HTML code as part of their comments. Now, what happens if a
malicious visitor decides to do some damage by entering the following:
<!--#exec cmd="ls" -->
If the guestbook CGI program was designed carefully, to strip SSI
commands from the input, then there is no problem. But, if it was not,
there exists the potential for a major headache!
For a working example please see moth VM.
"""
multi_in_inst = multi_in(self._expected_res_mutant.keys())
def filtered_freq_generator(freq_list):
already_tested = ScalableBloomFilter()
for freq in freq_list:
if freq not in already_tested:
already_tested.add(freq)
yield freq
def analyze_persistent(freq, response):
for matched_expected_result in multi_in_inst.query(response.get_body()):
# We found one of the expected results, now we search the
# self._persistent_data to find which of the mutants sent it
# and create the vulnerability
mutant = self._expected_res_mutant[matched_expected_result]
desc = 'Server side include (SSI) was found at: %s' \
' The result of that injection is shown by browsing'\
' to "%s".'
desc = desc % (mutant.found_at(), freq.get_url())
v = Vuln.from_mutant('Persistent server side include vulnerability',
desc, severity.HIGH, response.id,
self.get_name(), mutant)
v.add_to_highlight(matched_expected_result)
self.kb_append(self, 'ssi', v)
self._send_mutants_in_threads(self._uri_opener.send_mutant,
filtered_freq_generator(self._freq_list),
analyze_persistent,
cache=False)
self._expected_res_mutant.cleanup()
self._freq_list.cleanup()
def get_long_desc(self):
"""
:return: A DETAILED description of the plugin functions and features.
"""
return """
示例4: VariantDB
# 需要导入模块: from w3af.core.data.db.disk_dict import DiskDict [as 别名]
# 或者: from w3af.core.data.db.disk_dict.DiskDict import cleanup [as 别名]
class VariantDB(object):
"""
See the notes on PARAMS_MAX_VARIANTS and PATH_MAX_VARIANTS above. Also
understand that we'll keep "dirty" versions of the references/fuzzable
requests in order to be able to answer "False" to a call for
need_more_variants in a situation like this:
need_more_variants('http://foo.com/abc?id=32') --> True
append('http://foo.com/abc?id=32')
need_more_variants('http://foo.com/abc?id=32') --> False
"""
HASH_IGNORE_HEADERS = ('referer',)
TAG = '[variant_db]'
def __init__(self, params_max_variants=PARAMS_MAX_VARIANTS,
path_max_variants=PATH_MAX_VARIANTS):
self._variants_eq = DiskDict(table_prefix='variant_db_eq')
self._variants = DiskDict(table_prefix='variant_db')
self.params_max_variants = params_max_variants
self.path_max_variants = path_max_variants
self._db_lock = threading.RLock()
def cleanup(self):
self._variants_eq.cleanup()
self._variants.cleanup()
def append(self, fuzzable_request):
"""
:return: True if we added a new fuzzable request variant to the DB,
False if no more variants are required for this fuzzable
request.
"""
with self._db_lock:
#
# Is the fuzzable request already known to us? (exactly the same)
#
request_hash = fuzzable_request.get_request_hash(self.HASH_IGNORE_HEADERS)
already_seen = self._variants_eq.get(request_hash, False)
if already_seen:
return False
# Store it to avoid duplicated fuzzable requests in our framework
self._variants_eq[request_hash] = True
#
# Do we need more variants for the fuzzable request? (similar match)
#
clean_dict_key = clean_fuzzable_request(fuzzable_request)
count = self._variants.get(clean_dict_key, None)
if count is None:
self._variants[clean_dict_key] = 1
return True
# We've seen at least one fuzzable request with this pattern...
url = fuzzable_request.get_uri()
has_params = url.has_query_string() or fuzzable_request.get_raw_data()
# Choose which max_variants to use
if has_params:
max_variants = self.params_max_variants
else:
max_variants = self.path_max_variants
if count >= max_variants:
return False
else:
self._variants[clean_dict_key] = count + 1
return True
示例5: html_comments
# 需要导入模块: from w3af.core.data.db.disk_dict import DiskDict [as 别名]
# 或者: from w3af.core.data.db.disk_dict.DiskDict import cleanup [as 别名]
#.........这里部分代码省略.........
if (word, response.get_url()) not in self._already_reported_interesting:
desc = 'A comment with the string "%s" was found in: "%s".'\
' This could be interesting.'
desc = desc % (word, response.get_url())
i = Info('Interesting HTML comment', desc,
response.id, self.get_name())
i.set_dc(request.get_dc())
i.set_uri(response.get_uri())
i.add_to_highlight(word)
kb.kb.append(self, 'interesting_comments', i)
om.out.information(i.get_desc())
self._already_reported_interesting.add((word,
response.get_url()))
def _html_in_comment(self, comment, request, response):
"""
Find HTML code in HTML comments
"""
html_in_comment = self.HTML_RE.search(comment)
if html_in_comment and \
(comment, response.get_url()) not in self._already_reported_interesting:
# There is HTML code in the comment.
comment = comment.strip()
comment = comment.replace('\n', '')
comment = comment.replace('\r', '')
comment = comment[:40]
desc = 'A comment with the string "%s" was found in: "%s".'\
' This could be interesting.'
desc = desc % (comment, response.get_url())
i = Info('HTML comment contains HTML code', desc,
response.id, self.get_name())
i.set_dc(request.get_dc())
i.set_uri(response.get_uri())
i.add_to_highlight(html_in_comment.group(0))
kb.kb.append(self, 'html_comment_hides_html', i)
om.out.information(i.get_desc())
self._already_reported_interesting.add(
(comment, response.get_url()))
def _is_new(self, comment, response):
"""
Make sure that we perform a thread safe check on the self._comments dict,
in order to avoid duplicates.
"""
with self._plugin_lock:
#pylint: disable=E1103
comment_data = self._comments.get(comment, None)
if comment_data is None:
self._comments[comment] = [(response.get_url(), response.id), ]
return True
else:
if response.get_url() not in [x[0] for x in comment_data]:
comment_data.append((response.get_url(), response.id))
self._comments[comment] = comment_data
return True
#pylint: enable=E1103
return False
def end(self):
"""
This method is called when the plugin wont be used anymore.
:return: None
"""
inform = []
for comment in self._comments.iterkeys():
urls_with_this_comment = self._comments[comment]
stick_comment = ' '.join(comment.split())
if len(stick_comment) > 40:
msg = 'A comment with the string "%s..." (and %s more bytes)'\
' was found on these URL(s):'
om.out.information(
msg % (stick_comment[:40], str(len(stick_comment) - 40)))
else:
msg = 'A comment containing "%s" was found on these URL(s):'
om.out.information(msg % (stick_comment))
for url, request_id in urls_with_this_comment:
inform.append('- ' + url +
' (request with id: ' + str(request_id) + ')')
inform.sort()
for i in inform:
om.out.information(i)
self._comments.cleanup()
def get_long_desc(self):
"""
:return: A DETAILED description of the plugin functions and features.
"""
return """
示例6: DiskDeque
# 需要导入模块: from w3af.core.data.db.disk_dict import DiskDict [as 别名]
# 或者: from w3af.core.data.db.disk_dict.DiskDict import cleanup [as 别名]
class DiskDeque(object):
"""
The base code for this file comes from [0], I've modified it to use a
DiskDict which stores the "self.data" dictionary to disk in order to save
memory.
[0] https://code.activestate.com/recipes/259179/
"""
def __init__(self, iterable=(), maxsize=-1):
if not hasattr(self, 'data'):
self.left = self.right = 0
self.data = DiskDict(table_prefix='deque')
self.maxsize = maxsize
self.extend(iterable)
def append(self, x):
self.data[self.right] = x
self.right += 1
if self.maxsize != -1 and len(self) > self.maxsize:
self.popleft()
def appendleft(self, x):
self.left -= 1
self.data[self.left] = x
if self.maxsize != -1 and len(self) > self.maxsize:
self.pop()
def pop(self):
if self.left == self.right:
raise IndexError('cannot pop from empty deque')
self.right -= 1
elem = self.data[self.right]
del self.data[self.right]
return elem
def popleft(self):
if self.left == self.right:
raise IndexError('cannot pop from empty deque')
elem = self.data[self.left]
del self.data[self.left]
self.left += 1
return elem
def clear(self):
self.data.cleanup()
self.left = self.right = 0
def extend(self, iterable):
for elem in iterable:
self.append(elem)
def extendleft(self, iterable):
for elem in iterable:
self.appendleft(elem)
def rotate(self, n=1):
if self:
n %= len(self)
for i in xrange(n):
self.appendleft(self.pop())
def __getitem__(self, i):
if i < 0:
i += len(self)
try:
return self.data[i + self.left]
except KeyError:
raise IndexError
def __setitem__(self, i, value):
if i < 0:
i += len(self)
try:
self.data[i + self.left] = value
except KeyError:
raise IndexError
def __delitem__(self, i):
size = len(self)
if not (-size <= i < size):
raise IndexError
data = self.data
if i < 0:
i += size
for j in xrange(self.left+i, self.right-1):
data[j] = data[j+1]
self.pop()
def __len__(self):
return self.right - self.left
def __cmp__(self, other):
if type(self) != type(other):
return cmp(type(self), type(other))
return cmp(list(self), list(other))
def __repr__(self, _track=[]):
if id(self) in _track:
return '...'
#.........这里部分代码省略.........
示例7: ssi
# 需要导入模块: from w3af.core.data.db.disk_dict import DiskDict [as 别名]
# 或者: from w3af.core.data.db.disk_dict.DiskDict import cleanup [as 别名]
#.........这里部分代码省略.........
"""
# Store the mutants in order to be able to analyze the persistent case
# later
expected_results = self._get_expected_results(mutant)
for expected_result in expected_results:
self._expected_mutant_dict[expected_result] = mutant
# Now we analyze the "reflected" case
if self._has_bug(mutant):
return
for expected_result in expected_results:
if expected_result not in response:
continue
if expected_result in mutant.get_original_response_body():
continue
desc = "Server side include (SSI) was found at: %s"
desc %= mutant.found_at()
v = Vuln.from_mutant(
"Server side include vulnerability", desc, severity.HIGH, response.id, self.get_name(), mutant
)
v.add_to_highlight(expected_result)
self.kb_append_uniq(self, "ssi", v)
def end(self):
"""
This method is called when the plugin wont be used anymore and is used
to find persistent SSI vulnerabilities.
Example where a persistent SSI can be found:
Say you have a "guest book" (a CGI application that allows visitors
to leave messages for everyone to see) on a server that has SSI
enabled. Most such guest books around the Net actually allow visitors
to enter HTML code as part of their comments. Now, what happens if a
malicious visitor decides to do some damage by entering the following:
<!--#exec cmd="ls" -->
If the guest book CGI program was designed carefully, to strip SSI
commands from the input, then there is no problem. But, if it was not,
there exists the potential for a major headache!
For a working example please see moth VM.
"""
fuzzable_request_set = kb.kb.get_all_known_fuzzable_requests()
self._send_mutants_in_threads(
self._uri_opener.send_mutant, fuzzable_request_set, self._analyze_persistent, cache=False
)
self._expected_mutant_dict.cleanup()
def _analyze_persistent(self, freq, response):
"""
Analyze the response of sending each fuzzable request found by the
framework, trying to identify any locations where we might have injected
a payload.
:param freq: The fuzzable request
:param response: The HTTP response
:return: None, vulns are stored in KB
"""
multi_in_inst = multi_in(self._expected_mutant_dict.keys())
for matched_expected_result in multi_in_inst.query(response.get_body()):
# We found one of the expected results, now we search the
# self._expected_mutant_dict to find which of the mutants sent it
# and create the vulnerability
mutant = self._expected_mutant_dict[matched_expected_result]
desc = (
"Server side include (SSI) was found at: %s"
" The result of that injection is shown by browsing"
' to "%s".'
)
desc %= (mutant.found_at(), freq.get_url())
v = Vuln.from_mutant(
"Persistent server side include vulnerability",
desc,
severity.HIGH,
response.id,
self.get_name(),
mutant,
)
v.add_to_highlight(matched_expected_result)
self.kb_append(self, "ssi", v)
def get_long_desc(self):
"""
:return: A DETAILED description of the plugin functions and features.
"""
return """
示例8: CachedDiskDict
# 需要导入模块: from w3af.core.data.db.disk_dict import DiskDict [as 别名]
# 或者: from w3af.core.data.db.disk_dict.DiskDict import cleanup [as 别名]
class CachedDiskDict(object):
"""
This data structure keeps the `max_in_memory` most frequently accessed
keys in memory and stores the rest on disk.
It is ideal for situations where a DiskDict is frequently accessed,
fast read / writes are required, and items can take considerable amounts
of memory.
"""
def __init__(self, max_in_memory=50, table_prefix=None):
"""
:param max_in_memory: The max number of items to keep in memory
"""
assert max_in_memory > 0, 'In-memory items must be > 0'
table_prefix = self._get_table_prefix(table_prefix)
self._max_in_memory = max_in_memory
self._disk_dict = DiskDict(table_prefix=table_prefix)
self._in_memory = dict()
self._access_count = dict()
def cleanup(self):
self._disk_dict.cleanup()
def _get_table_prefix(self, table_prefix):
if table_prefix is None:
table_prefix = 'cached_disk_dict_%s' % rand_alpha(16)
else:
args = (table_prefix, rand_alpha(16))
table_prefix = 'cached_disk_dict_%s_%s' % args
return table_prefix
def get(self, key, default=-456):
try:
return self[key]
except KeyError:
if default is not -456:
return default
raise KeyError()
def __getitem__(self, key):
try:
value = self._in_memory[key]
except KeyError:
# This will raise KeyError if k is not found, and that is OK
# because we don't need to increase the access count when the
# key doesn't exist
value = self._disk_dict[key]
self._increase_access_count(key)
return value
def _get_keys_for_memory(self):
"""
:return: Generate the names of the keys that should be kept in memory.
For example, if `max_in_memory` is set to 2 and:
_in_memory: {1: None, 2: None}
_access_count: {1: 10, 2: 20, 3: 5}
_disk_dict: {3: None}
Then the method will generate [1, 2].
"""
items = self._access_count.items()
items.sort(sort_by_value)
iterator = min(self._max_in_memory, len(items))
for i in xrange(iterator):
yield items[i][0]
def _belongs_in_memory(self, key):
"""
:param key: A key
:return: True if the key should be stored in memory
"""
if key in self._get_keys_for_memory():
return True
return False
def _increase_access_count(self, key):
access_count = self._access_count.get(key, 0)
access_count += 1
self._access_count[key] = access_count
self._move_key_to_disk_if_needed(key)
self._move_key_to_memory_if_needed(key)
def _move_key_to_disk_if_needed(self, key):
"""
Analyzes the current access count for the last accessed key and
checks if any if the keys in memory should be moved to disk.
:param key: The key that was last accessed
:return: The name of the key that was moved to disk, or None if
all the keys are still in memory.
#.........这里部分代码省略.........