当前位置: 首页>>代码示例>>Python>>正文


Python DiskList.cleanup方法代码示例

本文整理汇总了Python中w3af.core.data.db.disk_list.DiskList.cleanup方法的典型用法代码示例。如果您正苦于以下问题:Python DiskList.cleanup方法的具体用法?Python DiskList.cleanup怎么用?Python DiskList.cleanup使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在w3af.core.data.db.disk_list.DiskList的用法示例。


在下文中一共展示了DiskList.cleanup方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_remove_table_then_add

# 需要导入模块: from w3af.core.data.db.disk_list import DiskList [as 别名]
# 或者: from w3af.core.data.db.disk_list.DiskList import cleanup [as 别名]
    def test_remove_table_then_add(self):
        disk_list = DiskList()
        disk_list.append(1)

        disk_list.cleanup()

        self.assertRaises(AssertionError, disk_list.append, 1)
开发者ID:PatidarWeb,项目名称:w3af,代码行数:9,代码来源:test_disk_list.py

示例2: test_remove_table

# 需要导入模块: from w3af.core.data.db.disk_list import DiskList [as 别名]
# 或者: from w3af.core.data.db.disk_list.DiskList import cleanup [as 别名]
 def test_remove_table(self):
     disk_list = DiskList()
     table_name = disk_list.table_name
     db = get_default_temp_db_instance()
     
     self.assertTrue(db.table_exists(table_name))
     
     disk_list.cleanup()
     
     self.assertFalse(db.table_exists(table_name))
开发者ID:PatidarWeb,项目名称:w3af,代码行数:12,代码来源:test_disk_list.py

示例3: test_table_name_with_prefix

# 需要导入模块: from w3af.core.data.db.disk_list import DiskList [as 别名]
# 或者: from w3af.core.data.db.disk_list.DiskList import cleanup [as 别名]
    def test_table_name_with_prefix(self):
        _unittest = 'unittest'
        disk_list = DiskList(_unittest)

        self.assertIn(_unittest, disk_list.table_name)
        db = get_default_temp_db_instance()

        self.assertTrue(db.table_exists(disk_list.table_name))

        disk_list.cleanup()

        self.assertFalse(db.table_exists(disk_list.table_name))
开发者ID:PatidarWeb,项目名称:w3af,代码行数:14,代码来源:test_disk_list.py

示例4: generic

# 需要导入模块: from w3af.core.data.db.disk_list import DiskList [as 别名]
# 或者: from w3af.core.data.db.disk_list.DiskList import cleanup [as 别名]

#.........这里部分代码省略.........

        ratio = self._diff_ratio + (1 - original_to_limit)

        #om.out.debug('original_to_error: ' +  str(original_to_error) )
        #om.out.debug('limit_to_error: ' +  str(limit_to_error) )
        #om.out.debug('original_to_limit: ' +  str(original_to_limit) )
        #om.out.debug('ratio: ' +  str(ratio) )

        if original_to_error < ratio and limit_to_error < ratio:
            # Maybe the limit I requested wasn't really a non-existant one
            # (and the error page really found the limit),
            # let's request a new limit (one that hopefully doesn't exist)
            # in order to remove some false positives
            limit_response2 = self._get_limit_response(mutant)

            id_list = [orig_resp.id, limit_response.id, error_response.id]

            if relative_distance(limit_response2.get_body(), limit_response.get_body()) > \
                    1 - self._diff_ratio:
                # The two limits are "equal"; It's safe to suppose that we have found the
                # limit here and that the error string really produced an error
                self._potential_vulns.append((mutant.get_url(),
                                              mutant.get_var(),
                                              mutant, id_list))


    def _get_limit_response(self, m):
        """
        We request the limit (something that doesn't exist)
            - If http://localhost/a.php?b=1 ; then I should request b=12938795
                                                                 (random number)
            - If http://localhost/a.php?b=abc ; then I should request b=hnv98yks
                                                                    (random alnum)

        :return: The limit response object
        """
        # Copy the dc, needed to make a good vuln report
        dc = copy.deepcopy(m.get_dc())

        if m.get_original_value().isdigit():
            m.set_mod_value(rand_number(length=8))
        else:
            m.set_mod_value(rand_alnum(length=8))
        limit_response = self._uri_opener.send_mutant(m)

        # restore the dc
        m.set_dc(dc)
        return limit_response

    def end(self):
        """
        This method is called when the plugin wont be used anymore.
        """
        all_vulns_and_infos = kb.kb.get_all_vulns()
        all_vulns_and_infos.extend(kb.kb.get_all_infos())

        for url, variable, mutant, id_list in self._potential_vulns:
            for info in all_vulns_and_infos:
                if info.get_var() == variable and info.get_url() == url:
                    break
            else:
                desc = 'An unidentified vulnerability was found at: %s'
                desc = desc % mutant.found_at()
                
                v = Vuln.from_mutant('Unidentified vulnerability', desc,
                                     severity.MEDIUM, id_list, self.get_name(),
                                     mutant)
        
                self.kb_append_uniq(self, 'generic', v)
        
        self._potential_vulns.cleanup()        
                
    def get_options(self):
        """
        :return: A list of option objects for this plugin.
        """
        ol = OptionList()

        d = 'If two strings have a diff ratio less than diff_ratio, then they'\
            '  are really different.'
        o = opt_factory('diff_ratio', self._diff_ratio, d, 'float')
        ol.add(o)

        return ol

    def set_options(self, options_list):
        """
        This method sets all the options that are configured using the user interface
        generated by the framework using the result of get_options().

        :param OptionList: A dictionary with the options for the plugin.
        :return: No value is returned.
        """
        self._diff_ratio = options_list['diff_ratio'].get_value()

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
开发者ID:3rdDegree,项目名称:w3af,代码行数:104,代码来源:generic.py

示例5: click_jacking

# 需要导入模块: from w3af.core.data.db.disk_list import DiskList [as 别名]
# 或者: from w3af.core.data.db.disk_list.DiskList import cleanup [as 别名]
class click_jacking(GrepPlugin):
    """
    Grep every page for X-Frame-Options header.

    :author: Taras ([email protected])
    """

    def __init__(self):
        GrepPlugin.__init__(self)

        self._total_count = 0
        self._vuln_count = 0
        self._vulns = DiskList(table_prefix='click_jacking')
        self._ids = DiskList(table_prefix='click_jacking')

    def grep(self, request, response):
        """
        Check x-frame-options header
        """
        if not response.is_text_or_html():
            return

        self._total_count += 1

        headers = response.get_headers()
        x_frame_options, header_name = headers.iget('x-frame-options', '')

        if not x_frame_options.lower() in ('deny', 'sameorigin'):
            self._vuln_count += 1
            if response.get_url() not in self._vulns:
                self._vulns.append(response.get_url())
                self._ids.append(response.id)

    def end(self):
        # If all URLs implement protection, don't report anything.
        if not self._vuln_count:
            return

        response_ids = [_id for _id in self._ids]
        
        # If none of the URLs implement protection, simply report
        # ONE vulnerability that says that.
        if self._total_count == self._vuln_count:
            desc = ('The whole target has no protection (X-Frame-Options'
                    ' header) against Click-Jacking attacks')
        # If most of the URLs implement the protection but some
        # don't, report ONE vulnerability saying: "Most are protected,
        # but x, y are not.
        if self._total_count > self._vuln_count:
            desc = ('Some URLs have no protection (X-Frame-Options header)'
                    ' against Click-Jacking attacks. The list of vulnerable:'
                    ' URLs is:\n\n - ')

            desc += ' - '.join([str(url) + '\n' for url in self._vulns])

        v = Vuln('Click-Jacking vulnerability', desc,
                 severity.MEDIUM, response_ids, self.get_name())
        
        self.kb_append(self, 'click_jacking', v)
        
        self._vulns.cleanup()
        self._ids.cleanup()

    def get_long_desc(self):
        return """
开发者ID:batmanWjw,项目名称:w3af,代码行数:67,代码来源:click_jacking.py

示例6: csp

# 需要导入模块: from w3af.core.data.db.disk_list import DiskList [as 别名]
# 或者: from w3af.core.data.db.disk_list.DiskList import cleanup [as 别名]
class csp(GrepPlugin):
    """
    Identifies incorrect or too permissive Content Security Policy headers.
    """
    VULN_NAME = 'CSP vulnerability'

    def __init__(self):
        """
        Class init
        """
        GrepPlugin.__init__(self)

        self._total_count = 0
        self._vulns = DiskList(table_prefix='csp')
        self._urls = DiskList(table_prefix='csp')
                
    def get_long_desc(self):
        return """
        This plugin identifies incorrect or too permissive CSP (Content Security Policy)
        headers returned by the web application under analysis.

        Additional information: 
         * https://www.owasp.org/index.php/Content_Security_Policy
         * http://www.w3.org/TR/CSP
        """        

    def grep(self, request, response):
        """
        Perform search on current HTTP request/response exchange.
        Store information about vulns for further global processing.
        
        @param request: HTTP request
        @param response: HTTP response  
        """
        # Check that current URL has not been already analyzed
        response_url = response.get_url().uri2url()
        if response_url in self._urls:
            return        

        self._urls.append(response_url)
                
        # Search issues using dedicated module
        csp_vulns = find_vulns(response)
        
        # Analyze issue list
        if len(csp_vulns) > 0:
            vuln_store_item = DiskCSPVulnStoreItem(response_url,
                                                   response.id,
                                                   csp_vulns)
            self._vulns.append(vuln_store_item)

            # Increment the vulnerabilities counter
            for csp_directive_name in csp_vulns:
                self._total_count += len(csp_vulns[csp_directive_name])
                
    def end(self):
        """
        Perform global analysis for all vulnerabilities found.
        """
        # Check if vulns have been found
        if self._total_count == 0:
            return
        
        # Parse vulns collection
        vuln_already_reported = []

        for vuln_store_item in self._vulns:
            for csp_directive_name, csp_vulns_list in vuln_store_item.csp_vulns.iteritems():
                for csp_vuln in csp_vulns_list:
                    # Check if the current vuln is common (shared) to several
                    # url processed and has been already reported
                    if csp_vuln.desc in vuln_already_reported:
                        continue

                    # Search for current vuln occurrences in order to know if
                    # the vuln is common (shared) to several url processed
                    occurrences = self._find_occurrences(csp_vuln.desc)

                    if len(occurrences) > 1:
                        # Shared vuln case
                        v = Vuln(self.VULN_NAME, csp_vuln.desc,
                                 csp_vuln.severity, occurrences,
                                 self.get_name())
                        v.set_url(vuln_store_item.url.base_url())

                        vuln_already_reported.append(csp_vuln.desc)
                    else:
                        # Isolated vuln case
                        v = Vuln(self.VULN_NAME, csp_vuln.desc,
                                 csp_vuln.severity, vuln_store_item.resp_id,
                                 self.get_name())
                        v.set_url(vuln_store_item.url)

                    # Report vuln
                    self.kb_append(self, 'csp', v)
                
        # Cleanup
        self._urls.cleanup()
        self._vulns.cleanup()

#.........这里部分代码省略.........
开发者ID:RON313,项目名称:w3af,代码行数:103,代码来源:csp.py

示例7: cache_control

# 需要导入模块: from w3af.core.data.db.disk_list import DiskList [as 别名]
# 或者: from w3af.core.data.db.disk_list.DiskList import cleanup [as 别名]

#.........这里部分代码省略.........
        elif response.body == '':
            return
        
        else:
            self._total_count += 1
    
            cache_control_settings = self._get_cache_control(response)
            self._analyze_cache_control(cache_control_settings, response)
    
    def _get_cache_control(self, response):
        """
        :param response: The http response we want to extract the information
                         from.
        :return: A list with the headers and meta tag information used to
                 configure the browser cache control.
        """
        res = []

        cache_control_headers = self.SAFE_CONFIG.keys()
        headers = response.get_headers()
        
        for _type in cache_control_headers:
            header_value, _ = headers.iget(_type, None)
            if header_value is not None:
                res.append(CacheSettings(_type, header_value.lower()))
                
        try:
            doc_parser = parser_cache.dpc.get_document_parser_for(response)
        except BaseFrameworkException:
            pass
        else:
            for meta_tag in doc_parser.get_meta_tags():
                header_name = meta_tag.get('http-equiv', None)
                header_value = meta_tag.get('content', None)
                if header_name is not None and header_value is not None:
                    header_name = header_name.lower()
                    header_value = header_value.lower()
                    if header_name in cache_control_headers:
                        res.append(CacheSettings(header_name, header_value))
        
        return res

    def _analyze_cache_control(self, cache_control_settings, response):
        """
        Analyze the cache control settings set in headers and meta tags,
        store the information to report the vulnerabilities.
        """
        received_headers = set()
        
        for cache_setting in cache_control_settings:
            expected_header = self.SAFE_CONFIG[cache_setting.type]
            received_header = cache_setting.value.lower()
            received_headers.add(cache_setting.type)
            if expected_header not in received_header:
                # The header has an incorrect value
                self.is_vuln(response)
                return
        
        if len(received_headers) != len(self.SAFE_CONFIG):
            # No cache control header found
            self.is_vuln(response)
    
    def is_vuln(self, response):
        self._vuln_count += 1
        if response.get_url() not in self._vulns:
            self._vulns.append(response.get_url())
            self._ids.append(response.id)
    
    def end(self):
        # If all URLs implement protection, don't report anything.
        if not self._vuln_count:
            return

        # If none of the URLs implement protection, simply report
        # ONE vulnerability that says that.
        if self._total_count == self._vuln_count:
            desc = 'The whole target web application has no protection (Pragma'\
                   ' and Cache-Control headers) against sensitive content'\
                   ' caching.'
            
        # If most of the URLs implement the protection but some
        # don't, report ONE vulnerability saying: "Most are protected, but x, y
        # are not.
        if self._total_count > self._vuln_count:
            desc = 'Some URLs have no protection (Pragma and Cache-Control'\
                   ' headers) against sensitive content caching. Among them:\n'
            desc += ' '.join([str(url) + '\n' for url in self._vulns])
        
        response_ids = [_id for _id in self._ids]
        
        v = Vuln('Missing cache control for HTTPS content', desc,
                 severity.LOW, response_ids, self.get_name())
        
        self.kb_append_uniq(self, 'cache_control', v, 'URL')
        
        self._vulns.cleanup()
        self._ids.cleanup()

    def get_long_desc(self):
        return """\
开发者ID:0x554simon,项目名称:w3af,代码行数:104,代码来源:cache_control.py

示例8: path_disclosure

# 需要导入模块: from w3af.core.data.db.disk_list import DiskList [as 别名]
# 或者: from w3af.core.data.db.disk_list.DiskList import cleanup [as 别名]

#.........这里部分代码省略.........

                        v.set_url(realurl)
                        v['path'] = match
                        v.add_to_highlight(match)
                        
                        self.kb_append(self, 'path_disclosure', v)
                        return True
                    
        return False

    def _longest(self, a, b):
        """
        :param a: A string.
        :param a: Another string.
        :return: The longest string.
        """
        return cmp(len(a), len(b))

    def _attr_value(self, path_disclosure_string, response_body):
        """
        This method was created to remove some false positives.

        :return: True if path_disclosure_string is the value of an attribute inside a tag.

        Examples:
            path_disclosure_string = '/home/image.png'
            response_body = '....<img src="/home/image.png">...'
            return: True

            path_disclosure_string = '/home/image.png'
            response_body = '...<b>Error while processing /home/image.png</b>...'
            return: False
        """
        regex = '<.+?(["|\']%s["|\']).*?>' % re.escape(path_disclosure_string)
        regex_res = re.findall(regex, response_body)
        in_attr = path_disclosure_string in regex_res
        return in_attr

    def _update_KB_path_list(self):
        """
        If a path disclosure was found, I can create a list of full paths to
        all URLs ever visited. This method updates that list.
        """
        path_disc_vulns = kb.kb.get('path_disclosure', 'path_disclosure')
        url_list = kb.kb.get_all_known_urls()
        
        # Now I find the longest match between one of the URLs that w3af has
        # discovered, and one of the path disclosure strings that this plugin
        # has found. I use the longest match because with small match_list I
        # have more probability of making a mistake.
        longest_match = ''
        longest_path_disc_vuln = None
        for path_disc_vuln in path_disc_vulns:
            for url in url_list:
                path_and_file = url.get_path()

                if path_disc_vuln['path'].endswith(path_and_file):
                    if len(longest_match) < len(path_and_file):
                        longest_match = path_and_file
                        longest_path_disc_vuln = path_disc_vuln

        # Now I recalculate the place where all the resources are in disk, all
        # this is done taking the longest_match as a reference, so... if we
        # don't have a longest_match, then nothing is actually done
        if not longest_match:
            return

        # Get the webroot
        webroot = longest_path_disc_vuln['path'].replace(longest_match, '')

        #
        # This if fixes a strange case reported by Olle
        #         if webroot[0] == '/':
        #         IndexError: string index out of range
        # That seems to be because the webroot == ''
        #
        if not webroot:
            return
        
        # Check what path separator we should use (linux / windows)
        path_sep = '/' if webroot.startswith('/') else '\\'

        # Create the remote locations
        remote_locations = []
        for url in url_list:
            remote_path = url.get_path().replace('/', path_sep)
            remote_locations.append(webroot + remote_path)
        remote_locations = list(set(remote_locations))

        kb.kb.raw_write(self, 'list_files', remote_locations)
        kb.kb.raw_write(self, 'webroot', webroot)

    def end(self):
        self._already_added.cleanup()

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
开发者ID:Adastra-thw,项目名称:Tortazo,代码行数:104,代码来源:path_disclosure.py

示例9: ssi

# 需要导入模块: from w3af.core.data.db.disk_list import DiskList [as 别名]
# 或者: from w3af.core.data.db.disk_list.DiskList import cleanup [as 别名]

#.........这里部分代码省略.........

        :return: A string, see above.
        """
        yield '<!--#exec cmd="echo -n %s;echo -n %s" -->' % (rand_alpha(5),
                                                             rand_alpha(5))

        # TODO: Add mod_perl ssi injection support
        # http://www.sens.buffalo.edu/services/webhosting/advanced/perlssi.shtml
        #yield <!--#perl sub="sub {print qq/If you see this, mod_perl is working!/;}" -->

    def _extract_result_from_payload(self, payload):
        """
        Extract the expected result from the payload we're sending.
        """
        match = self._extract_results_re.search(payload)
        return match.group(1) + match.group(2)

    def _analyze_result(self, mutant, response):
        """
        Analyze the result of the previously sent request.
        :return: None, save the vuln to the kb.
        """
        if self._has_no_bug(mutant):
            e_res = self._extract_result_from_payload(mutant.get_token_value())
            if e_res in response and not e_res in mutant.get_original_response_body():
                
                desc = 'Server side include (SSI) was found at: %s'
                desc = desc % mutant.found_at()
                
                v = Vuln.from_mutant('Server side include vulnerability', desc,
                                     severity.HIGH, response.id, self.get_name(),
                                     mutant)

                v.add_to_highlight(e_res)
                self.kb_append_uniq(self, 'ssi', v)

    def end(self):
        """
        This method is called when the plugin wont be used anymore and is used
        to find persistent SSI vulnerabilities.

        Example where a persistent SSI can be found:

        Say you have a "guestbook" (a CGI application that allows visitors
        to leave messages for everyone to see) on a server that has SSI
        enabled. Most such guestbooks around the Net actually allow visitors
        to enter HTML code as part of their comments. Now, what happens if a
        malicious visitor decides to do some damage by entering the following:

        <!--#exec cmd="ls" -->

        If the guestbook CGI program was designed carefully, to strip SSI
        commands from the input, then there is no problem. But, if it was not,
        there exists the potential for a major headache!

        For a working example please see moth VM.
        """
        multi_in_inst = multi_in(self._expected_res_mutant.keys())

        def filtered_freq_generator(freq_list):
            already_tested = ScalableBloomFilter()

            for freq in freq_list:
                if freq not in already_tested:
                    already_tested.add(freq)
                    yield freq

        def analyze_persistent(freq, response):

            for matched_expected_result in multi_in_inst.query(response.get_body()):
                # We found one of the expected results, now we search the
                # self._persistent_data to find which of the mutants sent it
                # and create the vulnerability
                mutant = self._expected_res_mutant[matched_expected_result]
                
                desc = 'Server side include (SSI) was found at: %s' \
                       ' The result of that injection is shown by browsing'\
                       ' to "%s".' 
                desc = desc % (mutant.found_at(), freq.get_url())
                
                v = Vuln.from_mutant('Persistent server side include vulnerability',
                                     desc, severity.HIGH, response.id,
                                     self.get_name(), mutant)
                
                v.add_to_highlight(matched_expected_result)
                self.kb_append(self, 'ssi', v)

        self._send_mutants_in_threads(self._uri_opener.send_mutant,
                                      filtered_freq_generator(self._freq_list),
                                      analyze_persistent,
                                      cache=False)
        
        self._expected_res_mutant.cleanup()
        self._freq_list.cleanup()

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
开发者ID:ElAleyo,项目名称:w3af,代码行数:104,代码来源:ssi.py

示例10: xss

# 需要导入模块: from w3af.core.data.db.disk_list import DiskList [as 别名]
# 或者: from w3af.core.data.db.disk_list.DiskList import cleanup [as 别名]

#.........这里部分代码省略.........

    def _analyze_echo_result(self, mutant, response):
        """
        Do we have a reflected XSS?
        
        :return: None, record all the results in the kb.
        """
        # Add data for the persistent xss checking
        if self._check_persistent_xss:
            self._xss_mutants.append((mutant, response.id))

        with self._plugin_lock:
            
            if self._has_bug(mutant):
                return
            
            mod_value = mutant.get_mod_value()

            body_lower = response.get_body().lower()
            mod_value_lower = mod_value.lower()

            for context in get_context_iter(body_lower, mod_value_lower):
                if context.is_executable() or context.can_break(mod_value_lower):
                    self._report_vuln(mutant, response, mod_value)
                    return

    def end(self):
        """
        This method is called when the plugin wont be used anymore.
        """
        if self._check_persistent_xss:
            self._identify_persistent_xss()
        
        self._xss_mutants.cleanup()
    
    def _identify_persistent_xss(self):
        """
        This method is called to check for persistent xss. 
    
        Many times a xss isn't on the page we get after the GET/POST of
        the xss string. This method searches for the xss string on all
        the pages that are known to the framework.
        
        :return: None, Vuln (if any) are saved to the kb.
        """
        # Get all known fuzzable requests from the core
        fuzzable_requests = kb.kb.get_all_known_fuzzable_requests()
        
        self._send_mutants_in_threads(self._uri_opener.send_mutant,
                                      fuzzable_requests,
                                      self._analyze_persistent_result,
                                      grep=False, cache=False)    
    
    def _analyze_persistent_result(self, fuzzable_request, response):
        """
        After performing an HTTP request to "fuzzable_request" and getting
        "response" analyze if the response contains any of the information sent
        by any of the mutants.
        
        :return: None, Vuln (if any) are saved to the kb.
        """
        response_body = response.get_body()
        
        for mutant, mutant_response_id in self._xss_mutants:
            
            mod_value = mutant.get_mod_value()
开发者ID:aricciard,项目名称:w3af,代码行数:70,代码来源:xss.py

示例11: generic

# 需要导入模块: from w3af.core.data.db.disk_list import DiskList [as 别名]
# 或者: from w3af.core.data.db.disk_list.DiskList import cleanup [as 别名]

#.........这里部分代码省略.........
                                           error_response.get_body())
        original_to_limit = relative_distance(limit_response.get_body(),
                                              orig_resp.get_body())

        ratio = self._diff_ratio + (1 - original_to_limit)

        if original_to_error < ratio and limit_to_error < ratio:
            # Maybe the limit I requested wasn't really a non-existent one
            # (and the error page really found the limit),
            # let's request a new limit (one that hopefully doesn't exist)
            # in order to remove some false positives
            limit_response_2 = self._get_limit_response(mutant)
            limit_to_limit = relative_distance(limit_response_2.get_body(),
                                               limit_response.get_body())

            if limit_to_limit > 1 - self._diff_ratio:
                # The two limits are "equal"; It's safe to suppose that we have
                # found the limit here and that the error string really produced
                # an error
                id_list = [orig_resp.id, limit_response.id, error_response.id]
                self._add_potential_vuln(mutant, id_list)

    def _get_limit_response(self, mutant):
        """
        We request the limit (something that doesn't exist)
            - If http://localhost/a.php?b=1
                then I should request b=12938795 (random number)
            - If http://localhost/a.php?b=abc
                then I should request b=hnv98yks (random alnum)

        :return: The limit response object
        """
        mutant_copy = mutant.copy()

        is_digit = mutant.get_token_original_value().isdigit()
        value = rand_number(length=8) if is_digit else rand_alnum(length=8)
        mutant_copy.set_token_value(value)
        limit_response = self._uri_opener.send_mutant(mutant_copy)

        return limit_response

    def end(self):
        """
        This method is called when the plugin wont be used anymore.
        """
        all_findings = kb.kb.get_all_findings()

        for url, variable, mutant, id_list in self._potential_vulns:
            for info in all_findings:
                if info.get_token_name() == variable and info.get_url() == url:
                    break
            else:
                desc = ('An unhandled error, which could potentially translate'
                        ' to a vulnerability, was found at: %s')
                desc %= mutant.found_at()
                
                v = Vuln.from_mutant('Unhandled error in web application', desc,
                                     severity.LOW, id_list, self.get_name(),
                                     mutant)
        
                self.kb_append_uniq(self, 'generic', v)
        
        self._potential_vulns.cleanup()
                
    def get_options(self):
        """
        :return: A list of option objects for this plugin.
        """
        ol = OptionList()

        d = ('Ratio to use when comparing two HTTP response bodies, if two'
             ' strings have a ratio less than diff_ratio, then they are'
             ' really different.')
        o = opt_factory('diff_ratio', self._diff_ratio, d, FLOAT)
        ol.add(o)

        d = ('When enabled this plugin will send an extended payload set which'
             ' might trigger bugs and vulnerabilities which are not found by'
             ' the default (reduced, fast) payload set.')
        o = opt_factory('extensive', self._extensive, d, BOOL)
        ol.add(o)

        return ol

    def set_options(self, options_list):
        """
        This method sets all the options that are configured using the user
        interface generated by the framework using the result of get_options().

        :param options_list: A dictionary with the options for the plugin.
        :return: No value is returned.
        """
        self._diff_ratio = options_list['diff_ratio'].get_value()
        self._extensive = options_list['extensive'].get_value()

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
开发者ID:foobarmonk,项目名称:w3af,代码行数:104,代码来源:generic.py

示例12: xss

# 需要导入模块: from w3af.core.data.db.disk_list import DiskList [as 别名]
# 或者: from w3af.core.data.db.disk_list.DiskList import cleanup [as 别名]

#.........这里部分代码省略.........
    def _analyze_echo_result(self, mutant, response):
        """
        Do we have a reflected XSS?
        
        :return: None, record all the results in the kb.
        """
        # Add data for the persistent xss checking
        if self._check_persistent_xss:
            self._xss_mutants.append((mutant, response.id))

        with self._plugin_lock:
            
            if self._has_bug(mutant):
                return
            
            sent_payload = mutant.get_token_payload()

            # TODO: https://github.com/andresriancho/w3af/issues/12305
            body_lower = response.get_body().lower()
            sent_payload_lower = sent_payload.lower()

            for context in get_context_iter(body_lower, sent_payload_lower):
                if context.is_executable() or context.can_break():
                    self._report_vuln(mutant, response, sent_payload)
                    return

    def end(self):
        """
        This method is called when the plugin wont be used anymore.
        """
        if self._check_persistent_xss:
            self._identify_persistent_xss()
        
        self._xss_mutants.cleanup()
    
    def _identify_persistent_xss(self):
        """
        This method is called to check for persistent xss. 
    
        Many times a xss isn't on the page we get after the GET/POST of
        the xss string. This method searches for the xss string on all
        the pages that are known to the framework.
        
        :return: None, Vuln (if any) are saved to the kb.
        """
        # Get all known fuzzable requests from the core
        fuzzable_requests = kb.kb.get_all_known_fuzzable_requests()
        
        self._send_mutants_in_threads(self._uri_opener.send_mutant,
                                      fuzzable_requests,
                                      self._analyze_persistent_result,
                                      grep=False, cache=False)    
    
    def _analyze_persistent_result(self, fuzzable_request, response):
        """
        After performing an HTTP request to "fuzzable_request" and getting
        "response" analyze if the response contains any of the information sent
        by any of the mutants.
        
        :return: None, Vuln (if any) are saved to the kb.
        """
        body = response.get_body()

        for mutant, mutant_response_id in self._xss_mutants:

            sent_payload = mutant.get_token_payload()
开发者ID:everping,项目名称:w3af,代码行数:70,代码来源:xss.py

示例13: error_pages

# 需要导入模块: from w3af.core.data.db.disk_list import DiskList [as 别名]
# 或者: from w3af.core.data.db.disk_list.DiskList import cleanup [as 别名]

#.........这里部分代码省略.........

            data = (title, desc, response.id, response.get_url(), msg)
            self._potential_vulns.append(data)

            # Just report one instance for each HTTP response, no
            # matter if multiple strings match
            break

    def _avoid_report(self, request, response, msg):
        # We should avoid multiple reports for the same error message
        # the idea here is that the root cause for the same error
        # message might be the same, and fixing one will fix all.
        #
        # So the user receives the first report with MAX_REPORTED_PER_MSG
        # vulnerabilities, fixes the root cause, scans again and then
        # all those instances go away.
        #
        # Without this code, the scanner will potentially report
        # thousands of issues for the same error message. Which will
        # overwhelm the user.
        count = 0

        for title, desc, _id, url, highlight in self._potential_vulns:
            if highlight == msg:
                count += 1

        if count < self.MAX_REPORTED_PER_MSG:
            return False

        if msg not in self._already_reported_max_msg_exceeded:
            self._already_reported_max_msg_exceeded.append(msg)

            desc = ('The application returned multiple HTTP responses'
                    ' containing detailed error pages containing exceptions'
                    ' and internal information. The maximum number of'
                    ' vulnerabilities for this issue type was reached'
                    ' and no more issues will be reported.')

            i = Info('Multiple descriptive error pages', desc, [], self.get_name())
            self.kb_append_uniq(self, 'error_page', i)

        return True

    def end(self):
        """
        This method is called when the plugin wont be used anymore.
        """
        all_findings = kb.kb.get_all_findings()

        for title, desc, _id, url, highlight in self._potential_vulns:
            for info in all_findings:
                # This makes sure that if the sqli plugin found a vulnerability
                # in the same URL as we found a detailed error, we won't report
                # the detailed error.
                #
                # If the user fixes the sqli vulnerability and runs the scan again
                # most likely the detailed error will disappear too. If the sqli
                # vulnerability disappears and this one remains, it will appear
                # as a new vulnerability in the second scan.
                if info.get_url() == url:
                    break
            else:
                i = Info(title, desc, _id, self.get_name())
                i.set_url(url)
                i.add_to_highlight(highlight)

                self.kb_append_uniq(self, 'error_page', i)

        self._potential_vulns.cleanup()

    def find_version_numbers(self, request, response):
        """
        Now i'll check if I can get a version number from the error page
        This is common in apache, tomcat, etc...
        """
        if 400 < response.get_code() < 600:

            for match, _, _, server in self._multi_re.query(response.body):
                match_string = match.group(0)
                if match_string not in self._already_reported_versions:
                    # Save the info obj
                    desc = 'An error page sent this %s version: "%s".'
                    desc %= (server, match_string)

                    i = Info('Error page with information disclosure',
                             desc, response.id, self.get_name())
                    i.set_url(response.get_url())
                    i.add_to_highlight(server)
                    i.add_to_highlight(match_string)
                    
                    kb.kb.append(self, 'server', i)
                    kb.kb.raw_write(self, 'server', match_string)
                    
                    self._already_reported_versions.append(match_string)

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
开发者ID:everping,项目名称:w3af,代码行数:104,代码来源:error_pages.py

示例14: xss

# 需要导入模块: from w3af.core.data.db.disk_list import DiskList [as 别名]
# 或者: from w3af.core.data.db.disk_list.DiskList import cleanup [as 别名]

#.........这里部分代码省略.........
    def _analyze_echo_result(self, mutant, response):
        """
        Do we have a reflected XSS?
        
        :return: None, record all the results in the kb.
        """
        # Add data for the persistent xss checking
        if self._check_persistent_xss:
            self._xss_mutants.append((mutant, response.id))

        with self._plugin_lock:
            
            if self._has_bug(mutant):
                return
            
            sent_payload = mutant.get_token_payload()

            # TODO: https://github.com/andresriancho/w3af/issues/12305
            body_lower = response.get_body().lower()
            sent_payload_lower = sent_payload.lower()

            for context in get_context_iter(body_lower, sent_payload_lower):
                if context.is_executable() or context.can_break():
                    self._report_vuln(mutant, response, sent_payload)
                    return

    def end(self):
        """
        This method is called when the plugin wont be used anymore.
        """
        if self._check_persistent_xss:
            self._identify_persistent_xss()
        
        self._xss_mutants.cleanup()
    
    def _identify_persistent_xss(self):
        """
        This method is called to check for persistent xss. 
    
        Many times a xss isn't on the page we get after the GET/POST of
        the xss string. This method searches for the xss string on all
        the pages that are known to the framework.
        
        :return: None, Vuln (if any) are saved to the kb.
        """
        # Get all known fuzzable requests from the core
        fuzzable_requests = kb.kb.get_all_known_fuzzable_requests()
        
        self._send_mutants_in_threads(self._uri_opener.send_mutant,
                                      fuzzable_requests,
                                      self._analyze_persistent_result,
                                      grep=False, cache=False)    
    
    def _analyze_persistent_result(self, fuzzable_request, response):
        """
        After performing an HTTP request to "fuzzable_request" and getting
        "response" analyze if the response contains any of the information sent
        by any of the mutants.
        
        :return: None, Vuln (if any) are saved to the kb.
        """
        body = response.get_body()

        for mutant, mutant_response_id in self._xss_mutants:

            sent_payload = mutant.get_token_payload()
开发者ID:batmanWjw,项目名称:w3af,代码行数:70,代码来源:xss.py

示例15: path_disclosure

# 需要导入模块: from w3af.core.data.db.disk_list import DiskList [as 别名]
# 或者: from w3af.core.data.db.disk_list.DiskList import cleanup [as 别名]

#.........这里部分代码省略.........
        #   >>> re.findall('/htdocs/.*','/var/www/foobar/htdocs/article.php')
        #   ['/htdocs/article.php']
        #   >>>
        #
        #   What I need to do here, is to keep the longest match.
        for real_url_reported, match_reported in self._reported:
            if match_reported.endswith(match):
                break
        else:
            # Note to self: I get here when "break" is NOT executed.
            # It's a new one, report!
            return False

        return True

    def _is_attr_value(self, path_disclosure_string, response):
        """
        This method was created to remove some false positives.

        :return: True if path_disclosure_string is the value of an attribute
                 inside a tag.

        Examples:
            path_disclosure_string = '/home/image.png'
            response_body = '....<img src="/home/image.png">...'
            return: True

            path_disclosure_string = '/home/image.png'
            response_body = '...<b>Error while checking /home/image.png</b>...'
            return: False
        """
        for tag in mp_doc_parser.get_tags_by_filter(response, None):
            for value in tag.attrib.itervalues():
                if path_disclosure_string in value:
                    return True

        return False

    def _update_kb_path_list(self):
        """
        If a path disclosure was found, I can create a list of full paths to
        all URLs ever visited. This method updates that list.
        """
        path_disc_vulns = kb.kb.get('path_disclosure', 'path_disclosure')
        url_list = kb.kb.get_all_known_urls()
        
        # Now I find the longest match between one of the URLs that w3af has
        # discovered, and one of the path disclosure strings that this plugin
        # has found. I use the longest match because with small match_list I
        # have more probability of making a mistake.
        longest_match = ''
        longest_path_disc_vuln = None
        for path_disc_vuln in path_disc_vulns:
            for url in url_list:
                path_and_file = url.get_path()

                if path_disc_vuln['path'].endswith(path_and_file):
                    if len(longest_match) < len(path_and_file):
                        longest_match = path_and_file
                        longest_path_disc_vuln = path_disc_vuln

        # Now I recalculate the place where all the resources are in disk, all
        # this is done taking the longest_match as a reference, so... if we
        # don't have a longest_match, then nothing is actually done
        if not longest_match:
            return

        # Get the webroot
        webroot = longest_path_disc_vuln['path'].replace(longest_match, '')

        #
        # This if fixes a strange case reported by Olle
        #         if webroot[0] == '/':
        #         IndexError: string index out of range
        # That seems to be because the webroot == ''
        #
        if not webroot:
            return
        
        # Check what path separator we should use (linux / windows)
        path_sep = '/' if webroot.startswith('/') else '\\'

        # Create the remote locations
        remote_locations = []
        for url in url_list:
            remote_path = url.get_path().replace('/', path_sep)
            remote_locations.append(webroot + remote_path)
        remote_locations = list(set(remote_locations))

        kb.kb.raw_write(self, 'list_files', remote_locations)
        kb.kb.raw_write(self, 'webroot', webroot)

    def end(self):
        self._reported.cleanup()

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
开发者ID:batmanWjw,项目名称:w3af,代码行数:104,代码来源:path_disclosure.py


注:本文中的w3af.core.data.db.disk_list.DiskList.cleanup方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。