当前位置: 首页>>代码示例>>Python>>正文


Python ScalableBloomFilter.add方法代码示例

本文整理汇总了Python中w3af.core.data.bloomfilter.scalable_bloom.ScalableBloomFilter.add方法的典型用法代码示例。如果您正苦于以下问题:Python ScalableBloomFilter.add方法的具体用法?Python ScalableBloomFilter.add怎么用?Python ScalableBloomFilter.add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在w3af.core.data.bloomfilter.scalable_bloom.ScalableBloomFilter的用法示例。


在下文中一共展示了ScalableBloomFilter.add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: filtered_freq_generator

# 需要导入模块: from w3af.core.data.bloomfilter.scalable_bloom import ScalableBloomFilter [as 别名]
# 或者: from w3af.core.data.bloomfilter.scalable_bloom.ScalableBloomFilter import add [as 别名]
        def filtered_freq_generator(freq_list):
            already_tested = ScalableBloomFilter()

            for freq in freq_list:
                if freq not in already_tested:
                    already_tested.add(freq)
                    yield freq
开发者ID:ElAleyo,项目名称:w3af,代码行数:9,代码来源:ssi.py

示例2: test_bloom_filter

# 需要导入模块: from w3af.core.data.bloomfilter.scalable_bloom import ScalableBloomFilter [as 别名]
# 或者: from w3af.core.data.bloomfilter.scalable_bloom.ScalableBloomFilter import add [as 别名]
    def test_bloom_filter(self):
        f = ScalableBloomFilter()

        for i in xrange(20000):
            data = (i, i)
            f.add(data)

        for i in xrange(20000):
            data = (i, i)
            data in f
开发者ID:andresriancho,项目名称:w3af,代码行数:12,代码来源:test_scalable_performance.py

示例3: frontpage_version

# 需要导入模块: from w3af.core.data.bloomfilter.scalable_bloom import ScalableBloomFilter [as 别名]
# 或者: from w3af.core.data.bloomfilter.scalable_bloom.ScalableBloomFilter import add [as 别名]
class frontpage_version(InfrastructurePlugin):
    """
    Search FrontPage Server Info file and if it finds it will determine its version.
    :author: Viktor Gazdag ( [email protected] )
    """
    VERSION_RE = re.compile('FPVersion="(.*?)"', re.IGNORECASE)
    ADMIN_URL_RE = re.compile('FPAdminScriptUrl="(.*?)"', re.IGNORECASE)
    AUTHOR_URL_RE = re.compile('FPAuthorScriptUrl="(.*?)"', re.IGNORECASE)

    def __init__(self):
        InfrastructurePlugin.__init__(self)

        # Internal variables
        self._analyzed_dirs = ScalableBloomFilter()

    @runonce(exc_class=RunOnce)
    def discover(self, fuzzable_request):
        """
        For every directory, fetch a list of files and analyze the response.

        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        for domain_path in fuzzable_request.get_url().get_directories():

            if domain_path in self._analyzed_dirs:
                continue

            # Save the domain_path so I know I'm not working in vane
            self._analyzed_dirs.add(domain_path)

            # Request the file
            frontpage_info_url = domain_path.url_join("_vti_inf.html")
            try:
                response = self._uri_opener.GET(frontpage_info_url,
                                                cache=True)
            except BaseFrameworkException, w3:
                fmt = 'Failed to GET Frontpage Server _vti_inf.html file: "%s"'\
                      '. Exception: "%s".'
                om.out.debug(fmt % (frontpage_info_url, w3))
            else:
                # Check if it's a Frontpage Info file
                if not is_404(response):
                    fr = FuzzableRequest(response.get_uri())
                    self.output_queue.put(fr)

                    self._analyze_response(response)
开发者ID:0x554simon,项目名称:w3af,代码行数:49,代码来源:frontpage_version.py

示例4: blank_body

# 需要导入模块: from w3af.core.data.bloomfilter.scalable_bloom import ScalableBloomFilter [as 别名]
# 或者: from w3af.core.data.bloomfilter.scalable_bloom.ScalableBloomFilter import add [as 别名]
class blank_body(GrepPlugin):
    """
    Find responses with empty body.

    :author: Andres Riancho ([email protected])
    """

    METHODS = ('GET', 'POST')
    HTTP_CODES = (401, 304, 302, 301, 204, 405)
    
    def __init__(self):
        GrepPlugin.__init__(self)
        self.already_reported = ScalableBloomFilter()

    def grep(self, request, response):
        """
        Plugin entry point, find the blank bodies and report them.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        if response.get_body() == '' and request.get_method() in self.METHODS\
        and response.get_code() not in self.HTTP_CODES\
        and not response.get_headers().icontains('location')\
        and response.get_url().uri2url() not in self.already_reported:

            self.already_reported.add(response.get_url().uri2url())

            desc = 'The URL: "%s" returned an empty body, this could indicate'\
                   ' an application error.'
            desc = desc % response.get_url()

            i = Info('Blank http response body', desc, response.id,
                     self.get_name())
            i.set_url(response.get_url())
            
            self.kb_append(self, 'blank_body', i)

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
开发者ID:batmanWjw,项目名称:w3af,代码行数:46,代码来源:blank_body.py

示例5: archive_dot_org

# 需要导入模块: from w3af.core.data.bloomfilter.scalable_bloom import ScalableBloomFilter [as 别名]
# 或者: from w3af.core.data.bloomfilter.scalable_bloom.ScalableBloomFilter import add [as 别名]
class archive_dot_org(CrawlPlugin):
    """
    Search archive.org to find new pages in the target site.

    :author: Andres Riancho ([email protected])
    :author: Darren Bilby, thanks for the good idea!
    """

    ARCHIVE_START_URL = 'http://web.archive.org/web/*/%s'
    INTERESTING_URLS_RE = '<a href="(http://web\.archive\.org/web/\d*?/https?://%s/.*?)"'
    NOT_IN_ARCHIVE = '<p>Wayback Machine doesn&apos;t have that page archived.</p>'

    def __init__(self):
        CrawlPlugin.__init__(self)

        # Internal variables
        self._already_crawled = ScalableBloomFilter()
        self._already_verified = ScalableBloomFilter()

        # User configured parameters
        self._max_depth = 3

    def crawl(self, fuzzable_request):
        """
        Does a search in archive.org and searches for links on the html. Then
        searches those URLs in the target site. This is a time machine !

        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        domain = fuzzable_request.get_url().get_domain()

        if is_private_site(domain):
            msg = 'There is no point in searching archive.org for "%s"'\
                  ' because it is a private site that will never be indexed.'
            om.out.information(msg % domain)
            raise RunOnce(msg)

        # Initial check to verify if domain in archive
        start_url = self.ARCHIVE_START_URL % fuzzable_request.get_url()
        start_url = URL(start_url)
        http_response = self._uri_opener.GET(start_url, cache=True)

        if self.NOT_IN_ARCHIVE in http_response.body:
            msg = 'There is no point in searching archive.org for "%s"'
            msg += ' because they are not indexing this site.'
            om.out.information(msg % domain)
            raise RunOnce(msg)

        references = self._spider_archive(
            [start_url, ], self._max_depth, domain)
        self._analyze_urls(references)

    def _analyze_urls(self, references):
        """
        Analyze which references are cached by archive.org

        :return: A list of query string objects for the URLs that are in
                 the cache AND are in the target web site.
        """
        real_urls = []

        # Translate archive.org URL's to normal URL's
        for url in references:
            url = url.url_string[url.url_string.index('http', 1):]
            real_urls.append(URL(url))

        real_urls = list(set(real_urls))

        if len(real_urls):
            om.out.debug('Archive.org cached the following pages:')
            for u in real_urls:
                om.out.debug('- %s' % u)
        else:
            om.out.debug('Archive.org did not find any pages.')

        # Verify if they exist in the target site and add them to
        # the result if they do. Send the requests using threads:
        self.worker_pool.map(self._exists_in_target, real_urls)

    def _spider_archive(self, url_list, max_depth, domain):
        """
        Perform a classic web spidering process.

        :param url_list: The list of URL strings
        :param max_depth: The max link depth that we have to follow.
        :param domain: The domain name we are checking
        """
        # Start the recursive spidering
        res = []

        def spider_worker(url, max_depth, domain):
            if url in self._already_crawled:
                return []

            self._already_crawled.add(url)

            try:
                http_response = self._uri_opener.GET(url, cache=True)
            except:
#.........这里部分代码省略.........
开发者ID:andresriancho,项目名称:w3af-kali,代码行数:103,代码来源:archive_dot_org.py

示例6: get_emails

# 需要导入模块: from w3af.core.data.bloomfilter.scalable_bloom import ScalableBloomFilter [as 别名]
# 或者: from w3af.core.data.bloomfilter.scalable_bloom.ScalableBloomFilter import add [as 别名]
class get_emails(GrepPlugin):
    """
    Find email accounts.

    :author: Andres Riancho ([email protected])
    """

    def __init__(self):
        GrepPlugin.__init__(self)

        # User configured variables
        self._only_target_domain = True
        self._already_reported = ScalableBloomFilter()

    def grep(self, request, response):
        """
        Plugin entry point, get the emails and save them to the kb.

        :param request: The HTTP request
        :param request: The HTTP response
        :return: None
        """
        self._grep_worker(request, response, 'emails',
                          response.get_url().get_root_domain())

        if not self._only_target_domain:
            self._grep_worker(request, response, 'external_emails')

    def _grep_worker(self, request, response, kb_key, domain=None):
        """
        Helper method for using in self.grep()

        :param request: The HTTP request
        :param response: The HTTP response
        :param kb_key: Knowledge base dict key
        :param domain: Target domain for get_emails filter
        :return: None
        """
        try:
            dp = parser_cache.dpc.get_document_parser_for(response)
        except BaseFrameworkException:
            msg = 'Failed to get document parser for "%s" at get_emails.'
            om.out.debug(msg % response.get_url())
            return

        emails = set(dp.get_emails(domain))

        for mail_address in emails:
            # Reduce false positives
            if request.sent(mail_address):
                continue

            # Email address are case insensitive
            mail_address = mail_address.lower()
            url = response.get_url()
            uniq_key = (mail_address, url)

            if uniq_key in self._already_reported:
                continue

            # Avoid dups
            self._already_reported.add(uniq_key)

            # Create a new info object, and report it
            desc = 'The mail account: "%s" was found at "%s".'
            desc = desc % (mail_address, url)

            i = Info('Email address disclosure', desc, response.id,
                     self.get_name())
            i.add_to_highlight(mail_address)
            i.set_url(url)
            i[EmailInfoSet.ITAG] = mail_address
            i['user'] = mail_address.split('@')[0]

            self.kb_append_uniq_group('emails', kb_key, i,
                                      group_klass=EmailInfoSet)

    def set_options(self, options_list):
        self._only_target_domain = options_list['only_target_domain'].get_value()

    def get_options(self):
        """
        :return: A list of option objects for this plugin.
        """
        ol = OptionList()

        d1 = 'Only search emails for domain of target'
        o1 = opt_factory('only_target_domain', self._only_target_domain,
                         d1, 'boolean')
        ol.add(o1)

        return ol

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
开发者ID:0x554simon,项目名称:w3af,代码行数:100,代码来源:get_emails.py

示例7: digit_sum

# 需要导入模块: from w3af.core.data.bloomfilter.scalable_bloom import ScalableBloomFilter [as 别名]
# 或者: from w3af.core.data.bloomfilter.scalable_bloom.ScalableBloomFilter import add [as 别名]
class digit_sum(CrawlPlugin):
    """
    Take an URL with a number (index2.asp) and try to find related files
    (index1.asp, index3.asp).

    :author: Andres Riancho ([email protected])
    """

    def __init__(self):
        CrawlPlugin.__init__(self)
        self._already_visited = ScalableBloomFilter()

        # User options
        self._fuzz_images = False
        self._max_digit_sections = 4

    def crawl(self, fuzzable_request):
        """
        Searches for new URLs by adding and substracting numbers to the file
        and the parameters.

        :param fuzzable_request: A fuzzable_request instance that contains
                                     (among other things) the URL to test.
        """
        url = fuzzable_request.get_url()
        headers = Headers([("Referer", url.url_string)])

        original_response = self._uri_opener.GET(fuzzable_request.get_uri(), cache=True, headers=headers)

        if original_response.is_text_or_html() or self._fuzz_images:

            fr_generator = self._mangle_digits(fuzzable_request)
            response_repeater = repeat(original_response)
            header_repeater = repeat(headers)

            args = izip(fr_generator, response_repeater, header_repeater)

            self.worker_pool.map_multi_args(self._do_request, args)

            # I add myself so the next call to this plugin wont find me ...
            # Example: index1.html ---> index2.html --!!--> index1.html
            self._already_visited.add(fuzzable_request.get_uri())

    def _do_request(self, fuzzable_request, original_resp, headers):
        """
        Send the request.

        :param fuzzable_request: The modified fuzzable request
        :param original_resp: The response for the original request that was
                              sent.
        """
        response = self._uri_opener.GET(fuzzable_request.get_uri(), cache=True, headers=headers)

        add = False

        if not is_404(response):
            # We have different cases:
            #    - If the URLs are different, then there is nothing to think
            #      about, we simply found something new!
            if response.get_url() != original_resp.get_url():
                add = True

            #    - If the content type changed, then there is no doubt that
            #      we've found something new!
            elif response.doc_type != original_resp.doc_type:
                add = True

            #    - If we changed the query string parameters, we have to check
            #      the content
            elif fuzzy_not_equal(response.get_clear_text_body(), original_resp.get_clear_text_body(), 0.8):
                # In this case what might happen is that the number we changed
                # is "out of range" and when requesting that it will trigger an
                # error in the web application, or show us a non-interesting
                # response that holds no content.
                #
                # We choose to return these to the core because they might help
                # with the code coverage efforts. Think about something like:
                #     foo.aspx?id=OUT_OF_RANGE&foo=inject_here
                # vs.
                #     foo.aspx?id=IN_RANGE&foo=inject_here
                #
                # This relates to the EXPECTED_URLS in test_digit_sum.py
                add = True

        if add:
            for fr in self._create_fuzzable_requests(response):
                self.output_queue.put(fr)

    def _mangle_digits(self, fuzzable_request):
        """
        Mangle the digits (if any) in the fr URL.

        :param fuzzable_request: The original FuzzableRequest
        :return: A generator which returns mangled fuzzable requests
        """
        # First i'll mangle the digits in the URL file
        filename = fuzzable_request.get_url().get_file_name()
        domain_path = fuzzable_request.get_url().get_domain_path()
        for fname in self._do_combinations(filename):
            fr_copy = fuzzable_request.copy()
#.........这里部分代码省略.........
开发者ID:jatkatz,项目名称:w3af,代码行数:103,代码来源:digit_sum.py

示例8: web_spider

# 需要导入模块: from w3af.core.data.bloomfilter.scalable_bloom import ScalableBloomFilter [as 别名]
# 或者: from w3af.core.data.bloomfilter.scalable_bloom.ScalableBloomFilter import add [as 别名]
class web_spider(CrawlPlugin):
    """
    Crawl the web application.

    :author: Andres Riancho ([email protected])
    """
    UNAUTH_FORBID = {http_constants.UNAUTHORIZED, http_constants.FORBIDDEN}

    def __init__(self):
        CrawlPlugin.__init__(self)

        # Internal variables
        self._compiled_ignore_re = None
        self._compiled_follow_re = None
        self._broken_links = DiskSet(table_prefix='web_spider')
        self._first_run = True
        self._target_urls = []
        self._target_domain = None
        self._already_filled_form = ScalableBloomFilter()
        self._variant_db = VariantDB()

        # User configured variables
        self._ignore_regex = ''
        self._follow_regex = '.*'
        self._only_forward = False
        self._compile_re()

    def crawl(self, fuzzable_req):
        """
        Searches for links on the html.

        :param fuzzable_req: A fuzzable_req instance that contains
                             (among other things) the URL to test.
        """
        self._handle_first_run()

        #
        # If it is a form, then smart_fill the parameters to send something that
        # makes sense and will allow us to cover more code.
        #
        data_container = fuzzable_req.get_raw_data()
        if isinstance(data_container, Form):

            if fuzzable_req.get_url() in self._already_filled_form:
                return

            self._already_filled_form.add(fuzzable_req.get_url())
            data_container.smart_fill()

        # Send the HTTP request
        resp = self._uri_opener.send_mutant(fuzzable_req)

        # Nothing to do here...
        if resp.get_code() == http_constants.UNAUTHORIZED:
            return

        # Nothing to do here...
        if resp.is_image():
            return

        # And we don't trust what comes from the core, check if 404
        if is_404(resp):
            return

        self._extract_html_forms(resp, fuzzable_req)
        self._extract_links_and_verify(resp, fuzzable_req)

    def _extract_html_forms(self, resp, fuzzable_req):
        """
        Parses the HTTP response body and extract HTML forms, resulting forms
        are put() on the output queue.
        """
        # Try to find forms in the document
        try:
            dp = parser_cache.dpc.get_document_parser_for(resp)
        except BaseFrameworkException:
            # Failed to find a suitable parser for the document
            return

        same_domain = lambda f: f.get_action().get_domain() == \
                                resp.get_url().get_domain()

        # Create one FuzzableRequest for each form variant
        mode = cf.cf.get('form_fuzzing_mode')
        for form_params in dp.get_forms():

            if not same_domain(form_params):
                continue

            headers = fuzzable_req.get_headers()

            for form_params_variant in form_params.get_variants(mode):
                data_container = dc_from_form_params(form_params_variant)

                # Now data_container is one of Multipart of URLEncoded form
                # instances, which is a DataContainer. Much better than the
                # FormParameters instance we had before in form_params_variant
                r = FuzzableRequest.from_form(data_container, headers=headers)
                self.output_queue.put(r)

#.........这里部分代码省略.........
开发者ID:image-tester,项目名称:w3af-module,代码行数:103,代码来源:web_spider.py

示例9: find_vhosts

# 需要导入模块: from w3af.core.data.bloomfilter.scalable_bloom import ScalableBloomFilter [as 别名]
# 或者: from w3af.core.data.bloomfilter.scalable_bloom.ScalableBloomFilter import add [as 别名]
class find_vhosts(InfrastructurePlugin):
    """
    Modify the HTTP Host header and try to find virtual hosts.
    :author: Andres Riancho ([email protected])
    """

    def __init__(self):
        InfrastructurePlugin.__init__(self)

        # Internal variables
        self._first_exec = True
        self._already_queried = ScalableBloomFilter()
        self._can_resolve_domain_names = False

    def discover(self, fuzzable_request):
        """
        Find virtual hosts.

        :param fuzzable_request: A fuzzable_request instance that contains
                                 (among other things) the URL to test.
        """
        analysis_result = self._analyze(fuzzable_request)
        self._report_results(fuzzable_request, analysis_result)

    def _analyze(self, fuzzable_request):
        vhost_list = []
        if self._first_exec:
            self._first_exec = False
            vhost_list.extend(self._generic_vhosts(fuzzable_request))

        # I also test for ""dead links"" that the web programmer left in the
        # page. For example, If w3af finds a link to
        # "http://corporative.intranet.corp/" it will try to resolve the dns
        # name, if it fails, it will try to request that page from the server
        vhost_list.extend(self._get_dead_links(fuzzable_request))
        return vhost_list

    def _report_results(self, fuzzable_request, analysis_result):
        """
        Report our findings
        """
        reported = set()
        for vhost, request_id in analysis_result:
            if vhost in reported:
                continue

            reported.add(vhost)

            domain = fuzzable_request.get_url().get_domain()
            desc = 'Found a new virtual host at the target web server, the'\
                   ' virtual host name is: "%s". To access this site' \
                   ' you might need to change your DNS resolution settings'\
                   ' in order to point "%s" to the IP address of "%s".'
            desc = desc % (vhost, vhost, domain)

            v = Vuln.from_fr('Virtual host identified', desc, severity.LOW,
                             request_id, self.get_name(), fuzzable_request)

            kb.kb.append(self, 'find_vhosts', v)
            om.out.information(v.get_desc())

    def _get_dead_links(self, fuzzable_request):
        """
        Find every link on a HTML document verify if the domain is reachable or
        not; after that, verify if the web found a different name for the target
        site or if we found a new site that is linked. If the link points to a
        dead site then report it (it could be pointing to some private address
        or something...)
        """
        # Get some responses to compare later
        base_url = fuzzable_request.get_url().base_url()
        original_response = self._uri_opener.GET(fuzzable_request.get_uri(),
                                                 cache=True)
        base_response = self._uri_opener.GET(base_url, cache=True)
        base_resp_body = base_response.get_body()

        try:
            dp = parser_cache.dpc.get_document_parser_for(original_response)
        except BaseFrameworkException:
            # Failed to find a suitable parser for the document
            return []

        # Set the non existent response
        non_existent_response = self._get_non_exist(fuzzable_request)
        nonexist_resp_body = non_existent_response.get_body()

        # Note:
        # - With parsed_references I'm 100% that it's really something in the
        #   HTML that the developer intended to add.
        #
        # - The re_references are the result of regular expressions, which in
        #   some cases are just false positives.
        #
        # In this case, and because I'm only going to use the domain name of the
        # URL I'm going to trust the re_references also.
        parsed_references, re_references = dp.get_references()
        parsed_references.extend(re_references)

        res = []

#.........这里部分代码省略.........
开发者ID:EnDe,项目名称:w3af,代码行数:103,代码来源:find_vhosts.py

示例10: dot_net_errors

# 需要导入模块: from w3af.core.data.bloomfilter.scalable_bloom import ScalableBloomFilter [as 别名]
# 或者: from w3af.core.data.bloomfilter.scalable_bloom.ScalableBloomFilter import add [as 别名]
class dot_net_errors(InfrastructurePlugin):
    """
    Request specially crafted URLs that generate ASP.NET errors in order
    to gather information.

    :author: Andres Riancho (([email protected]))
    """

    def __init__(self):
        InfrastructurePlugin.__init__(self)

        # Internal variables
        self._already_tested = ScalableBloomFilter()
        # On real web applications, if we can't trigger an error in the first
        # MAX_TESTS tests, it simply won't happen and we have to stop testing.
        self.MAX_TESTS = 25

    def discover(self, fuzzable_request):
        """
        Requests the special filenames.

        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        if len(self._already_tested) < self.MAX_TESTS \
                and fuzzable_request.get_url() not in self._already_tested:
            self._already_tested.add(fuzzable_request.get_url())

            test_generator = self._generate_URLs(fuzzable_request.get_url())

            self.worker_pool.map(self._send_and_check,
                                    test_generator,
                                    chunksize=1)

    def _generate_URLs(self, original_url):
        """
        Generate new URLs based on original_url.

        :param original_url: The original url that has to be modified in
                                 order to trigger errors in the remote application.
        """
        special_chars = ['|', '~']

        filename = original_url.get_file_name()
        if filename != '' and '.' in filename:
            splitted_filename = filename.split('.')
            extension = splitted_filename[-1:][0]
            name = '.'.join(splitted_filename[0:-1])

            for char in special_chars:
                new_filename = name + char + '.' + extension
                new_url = original_url.url_join(new_filename)
                yield new_url

    def _send_and_check(self, url):
        """
        :param response: The HTTPResponse object that holds the content of
                             the response to analyze.
        """
        response = self._uri_opener.GET(url, cache=True)

        viewable_remote_machine = '<b>Details:</b> To enable the details of this'
        viewable_remote_machine += ' specific error message to be viewable on'
        viewable_remote_machine += ' remote machines'

        if viewable_remote_machine not in response.body\
        and '<h2> <i>Runtime Error</i> </h2></span>' in response.body:

            desc = 'Detailed information about ASP.NET error messages can be'\
                   ' viewed from remote sites. The URL: "%s" discloses'\
                   ' detailed error messages.'
            desc = desc % response.get_url()
        
            v = Vuln('Information disclosure via .NET errors', desc,
                     severity.LOW, response.id, self.get_name())
        
            kb.kb.append(self, 'dot_net_errors', v)

    def get_plugin_deps(self):
        """
        :return: A list with the names of the plugins that should be run before the
        current one.
        """
        return ['grep.error_pages']

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
开发者ID:3rdDegree,项目名称:w3af,代码行数:92,代码来源:dot_net_errors.py

示例11: websocket_hijacking

# 需要导入模块: from w3af.core.data.bloomfilter.scalable_bloom import ScalableBloomFilter [as 别名]
# 或者: from w3af.core.data.bloomfilter.scalable_bloom.ScalableBloomFilter import add [as 别名]
class websocket_hijacking(AuditPlugin):
    """
    Detect Cross-Site WebSocket hijacking vulnerabilities.
    :author: Dmitry Roshchin ([email protected])
    """
    W3AF_DOMAIN = 'w3af.org'
    W3AF_ORIGIN = 'http://www.w3af.org/'

    def __init__(self):
        super(websocket_hijacking, self).__init__()
        self.already_tested_websockets = ScalableBloomFilter()

    def audit(self, freq, orig_response, debugging_id):
        """
        Detect websockets for Cross-Site WebSocket hijacking vulnerabilities.

        This plugin works really well and can be improved in two different ways:

            * Add new check_* methods to this class which detect websocket
              vulnerabilities and then add them to known_checks

            * Extend the websocket link detection in grep.websockets_links,
              which is the weak part of the process, this is because we're doing
              a very trivial regular expression match to find WS links, which
              will most likely fail in "complex" web applications

        :param freq: A FuzzableRequest
        :param orig_response: The HTTP response associated with the fuzzable request
        :param debugging_id: A unique identifier for this call to audit()
        """
        # We can only work if there are known web sockets
        ws_links = kb.kb.get('websockets_links', 'websockets_links')

        for web_socket_info_set in ws_links:
            web_socket_url = web_socket_info_set['ws_link']

            # Checking if we already tested this web socket URL
            if web_socket_url in self.already_tested_websockets:
                continue

            self.already_tested_websockets.add(web_socket_url)

            web_socket_url = URL(web_socket_url)
            web_socket_version = negotiate_websocket_version(self._uri_opener,
                                                             web_socket_url)
            self.check_websocket_security(web_socket_url,
                                          web_socket_version)

    def check_websocket_security(self, web_socket_url, web_socket_version):
        """
        Analyze the security of a web socket

        :param web_socket_url: The URL of the web socket
        :param web_socket_version: The protocol version
        :return: None, results (if any) are stored to the KB
        """
        known_checks = (self.check_is_open_web_socket,
                        self.check_is_restricted_by_origin_with_match_bug,
                        self.check_is_restricted_by_origin,
                        self.check_need_basic_auth_origin_not_restricted,
                        self.check_need_cookie_origin_not_restricted)

        for check in known_checks:
            if check(web_socket_url, web_socket_version):
                break

    def check_is_open_web_socket(self, web_socket_url, web_socket_version):
        """
        Note that this method only makes sense if called in a loop with the
        other check_* methods.

        :param web_socket_url: The URL of the web socket
        :param web_socket_version: The protocol version

        :return: True if the web socket is open:
                    * Any Origin can connect
                    * No cookies required for authentication
                    * No basic auth required for authentication
        """
        upgrade_request = build_ws_upgrade_request(web_socket_url,
                                                   web_socket_version=web_socket_version,
                                                   origin=self.W3AF_ORIGIN)
        upgrade_response = self._uri_opener.send_mutant(upgrade_request,
                                                        cookies=False,
                                                        use_basic_auth=False)

        if not is_successful_upgrade(upgrade_response):
            return False

        msg = ('An HTML5 WebSocket which allows connections from any origin'
               ' without authentication was found at "%s"')
        msg %= web_socket_url

        v = Vuln.from_fr('Open WebSocket', msg, severity.LOW,
                         upgrade_response.id, self.get_name(), upgrade_request)
        self.kb_append_uniq(self, 'websocket_hijacking', v)
        return True

    def check_is_restricted_by_origin_with_match_bug(self, web_socket_url,
                                                     web_socket_version):
#.........这里部分代码省略.........
开发者ID:foobarmonk,项目名称:w3af,代码行数:103,代码来源:websocket_hijacking.py

示例12: find_vhosts

# 需要导入模块: from w3af.core.data.bloomfilter.scalable_bloom import ScalableBloomFilter [as 别名]
# 或者: from w3af.core.data.bloomfilter.scalable_bloom.ScalableBloomFilter import add [as 别名]
class find_vhosts(InfrastructurePlugin):
    """
    Modify the HTTP Host header and try to find virtual hosts.
    :author: Andres Riancho ([email protected])
    """

    def __init__(self):
        InfrastructurePlugin.__init__(self)

        # Internal variables
        self._first_exec = True
        self._already_queried = ScalableBloomFilter()
        self._can_resolve_domain_names = False

    def discover(self, fuzzable_request):
        """
        Find virtual hosts.

        :param fuzzable_request: A fuzzable_request instance that contains
                                 (among other things) the URL to test.
        """
        analysis_result = self._analyze(fuzzable_request)
        self._report_results(fuzzable_request, analysis_result)

    def _analyze(self, fuzzable_request):
        vhost_list = []
        if self._first_exec:
            self._first_exec = False
            vhost_list.extend(self._generic_vhosts(fuzzable_request))

        # I also test for ""dead links"" that the web developer left in the
        # page. For example, If w3af finds a link to
        # "http://corporative.intranet.corp/" it will try to resolve the dns
        # name, if it fails, it will try to request that page from the server
        vhost_list.extend(self._get_dead_links(fuzzable_request))
        return vhost_list

    def _report_results(self, fuzzable_request, analysis_result):
        """
        Report our findings
        """
        reported = set()
        for vhost, request_id in analysis_result:
            if vhost in reported:
                continue

            reported.add(vhost)

            domain = fuzzable_request.get_url().get_domain()
            desc = (
                u"Found a new virtual host at the target web server, the"
                u' virtual host name is: "%s". To access this site'
                u" you might need to change your DNS resolution settings"
                u' in order to point "%s" to the IP address of "%s".'
            )
            desc %= (vhost, vhost, domain)

            v = Vuln.from_fr(
                "Virtual host identified", desc, severity.LOW, request_id, self.get_name(), fuzzable_request
            )

            kb.kb.append(self, "find_vhosts", v)
            om.out.information(v.get_desc())

    def _get_dead_links(self, fuzzable_request):
        """
        Find every link on a HTML document verify if the domain is reachable or
        not; after that, verify if the web found a different name for the target
        site or if we found a new site that is linked. If the link points to a
        dead site then report it (it could be pointing to some private address
        or something...)
        """
        # Get some responses to compare later
        base_url = fuzzable_request.get_url().base_url()
        original_response = self._uri_opener.GET(fuzzable_request.get_uri(), cache=True)
        base_response = self._uri_opener.GET(base_url, cache=True)
        base_resp_body = base_response.get_body()

        try:
            dp = parser_cache.dpc.get_document_parser_for(original_response)
        except BaseFrameworkException:
            # Failed to find a suitable parser for the document
            return []

        # Set the non existent response
        non_existent_response = self._get_non_exist(fuzzable_request)
        nonexist_resp_body = non_existent_response.get_body()

        # Note:
        # - With parsed_references I'm 100% that it's really something in the
        #   HTML that the developer intended to add.
        #
        # - The re_references are the result of regular expressions, which in
        #   some cases are just false positives.
        #
        # In this case, and because I'm only going to use the domain name of the
        # URL I'm going to trust the re_references also.
        parsed_references, re_references = dp.get_references()
        parsed_references.extend(re_references)

#.........这里部分代码省略.........
开发者ID:ZionOps,项目名称:w3af,代码行数:103,代码来源:find_vhosts.py

示例13: html_comments

# 需要导入模块: from w3af.core.data.bloomfilter.scalable_bloom import ScalableBloomFilter [as 别名]
# 或者: from w3af.core.data.bloomfilter.scalable_bloom.ScalableBloomFilter import add [as 别名]
class html_comments(GrepPlugin):
    """
    Extract and analyze HTML comments.

    :author: Andres Riancho ([email protected])
    """

    HTML_RE = re.compile('<[a-zA-Z]*.*?>.*?</[a-zA-Z]>')

    INTERESTING_WORDS = (
        # In English
        'user', 'pass', 'xxx', 'fix', 'bug', 'broken', 'oops', 'hack',
        'caution', 'todo', 'note', 'warning', '!!!', '???', 'shit',
        'pass', 'password', 'passwd', 'pwd', 'secret', 'stupid',
        
        # In Spanish
        'tonto', 'porqueria', 'cuidado', 'usuario', u'contraseña',
        'puta', 'email', 'security', 'captcha', 'pinga', 'cojones',
        
        # some in Portuguese
        'banco', 'bradesco', 'itau', 'visa', 'bancoreal', u'transfêrencia',
        u'depósito', u'cartão', u'crédito', 'dados pessoais'
    )

    _multi_in = MultiIn([' %s ' % w for w in INTERESTING_WORDS])

    def __init__(self):
        GrepPlugin.__init__(self)

        # Internal variables
        self._comments = DiskDict(table_prefix='html_comments')
        self._already_reported = ScalableBloomFilter()
        self._end_was_called = False

    def grep(self, request, response):
        """
        Plugin entry point, parse those comments!

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        if not response.is_text_or_html():
            return
        
        try:
            dp = parser_cache.dpc.get_document_parser_for(response)
        except BaseFrameworkException:
            return
        
        for comment in dp.get_comments():
            # These next two lines fix this issue:
            # audit.ssi + grep.html_comments + web app with XSS = false positive
            if request.sent(comment):
                continue

            if self._is_new(comment, response):

                self._interesting_word(comment, request, response)
                self._html_in_comment(comment, request, response)

    def _interesting_word(self, comment, request, response):
        """
        Find interesting words in HTML comments
        """
        comment = comment.lower()

        for word in self._multi_in.query(comment):
            if (word, response.get_url()) in self._already_reported:
                continue

            desc = ('A comment with the string "%s" was found in: "%s".'
                    ' This could be interesting.')
            desc %= (word, response.get_url())

            i = Info.from_fr('Interesting HTML comment', desc, response.id,
                             self.get_name(), request)
            i.add_to_highlight(word)

            kb.kb.append(self, 'interesting_comments', i)
            om.out.information(i.get_desc())
                
            self._already_reported.add((word, response.get_url()))

    def _html_in_comment(self, comment, request, response):
        """
        Find HTML code in HTML comments
        """
        html_in_comment = self.HTML_RE.search(comment)

        if html_in_comment is None:
            return

        if (comment, response.get_url()) in self._already_reported:
            return

        # There is HTML code in the comment.
        comment = comment.strip()
        comment = comment.replace('\n', '')
        comment = comment.replace('\r', '')
#.........这里部分代码省略.........
开发者ID:foobarmonk,项目名称:w3af,代码行数:103,代码来源:html_comments.py

示例14: find_backdoors

# 需要导入模块: from w3af.core.data.bloomfilter.scalable_bloom import ScalableBloomFilter [as 别名]
# 或者: from w3af.core.data.bloomfilter.scalable_bloom.ScalableBloomFilter import add [as 别名]
class find_backdoors(CrawlPlugin):
    """
    Find web backdoors and web shells.

    :author: Andres Riancho ([email protected])
    """
    WEBSHELL_DB = os.path.join(ROOT_PATH, 'plugins', 'crawl', 'find_backdoors',
                               'web_shells.txt')

    def __init__(self):
        CrawlPlugin.__init__(self)

        # Internal variables
        self._analyzed_dirs = ScalableBloomFilter()

    def crawl(self, fuzzable_request):
        """
        For every directory, fetch a list of shell files and analyze the
        response.

        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        domain_path = fuzzable_request.get_url().get_domain_path()

        if domain_path not in self._analyzed_dirs:
            self._analyzed_dirs.add(domain_path)

            # Read the web shell database
            web_shells = self._iter_web_shells()

            # Send the requests using threads:
            args_iter = (domain_path.url_join(fname) for fname in web_shells)
            self.worker_pool.map(self._check_if_exists, args_iter)

    def _iter_web_shells(self):
        """
        :yield: lines from the web shell DB
        """
        for line in file(self.WEBSHELL_DB).readline():
            if line.startswith('#'):
                continue

            if not line:
                continue

            yield line.strip()

    def _check_if_exists(self, web_shell_url):
        """
        Check if the file exists.

        :param web_shell_url: The URL to check
        """
        try:
            response = self._uri_opener.GET(web_shell_url, cache=True)
        except BaseFrameworkException:
            om.out.debug('Failed to GET webshell:' + web_shell_url)
        else:
            if self._is_possible_backdoor(response):
                desc = 'A web backdoor was found at: "%s"; this could ' \
                       'indicate that the server has been compromised.'
                desc = desc % response.get_url()

                v = Vuln('Potential web backdoor', desc, severity.HIGH,
                         response.id, self.get_name())
                v.set_url(response.get_url())

                kb.kb.append(self, 'backdoors', v)
                om.out.vulnerability(v.get_desc(), severity=v.get_severity())

                fr = FuzzableRequest.from_http_response(response)
                self.output_queue.put(fr)

    def _is_possible_backdoor(self, response):
        """
        Heuristic to infer if the content of <response> has the pattern of a
        backdoor response.

        :param response: HTTPResponse object
        :return: A bool value
        """
        if not is_404(response):
            body_text = response.get_body()
            dom = response.get_dom()
            if dom is not None:
                for ele, attrs in BACKDOOR_COLLECTION.iteritems():
                    for attrname, attr_vals in attrs.iteritems():
                        # Set of lowered attribute values
                        dom_attr_vals = \
                            set(n.get(attrname).lower() for n in
                                (dom.xpath('//%s[@%s]' % (ele, attrname))))
                        # If at least one elem in intersection return True
                        if (dom_attr_vals and set(attr_vals)):
                            return True

            # If no regex matched then try with keywords. At least 2 should be
            # contained in 'body_text' to succeed.
            times = 0
            for back_kw in KNOWN_OFFENSIVE_WORDS:
#.........这里部分代码省略.........
开发者ID:ElAleyo,项目名称:w3af,代码行数:103,代码来源:find_backdoors.py

示例15: hash_analysis

# 需要导入模块: from w3af.core.data.bloomfilter.scalable_bloom import ScalableBloomFilter [as 别名]
# 或者: from w3af.core.data.bloomfilter.scalable_bloom.ScalableBloomFilter import add [as 别名]
class hash_analysis(GrepPlugin):
    """
    Identify hashes in HTTP responses.

    :author: Andres Riancho ([email protected])
    """

    def __init__(self):
        GrepPlugin.__init__(self)

        self._already_reported = ScalableBloomFilter()

        # regex to split between words
        self._split_re = re.compile('[^\w]')

    def grep(self, request, response):
        """
        Plugin entry point, identify hashes in the HTTP response.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        # I know that by doing this I loose the chance of finding hashes in
        # PDF files, but... this is much faster
        if not response.is_text_or_html():
            return

        body = response.get_body()
        splitted_body = self._split_re.split(body)
        for possible_hash in splitted_body:

            #    This is a performance enhancement that cuts the execution
            #    time of this plugin in half.
            if len(possible_hash) < 31 or\
            len(possible_hash) > 129 :
                return
            
            hash_type = self._get_hash_type(possible_hash)
            if not hash_type:
                return

            possible_hash = possible_hash.lower()
            if self._has_hash_distribution(possible_hash):
                if (possible_hash, response.get_url()) not in self._already_reported:
                    desc = 'The URL: "%s" returned a response that may contain'\
                          ' a "%s" hash. The hash string is: "%s". This is'\
                          ' uncommon and requires human verification.'
                    desc = desc % (response.get_url(), hash_type, possible_hash)
                    
                    i = Info('Hash string in HTML content', desc,
                             response.id, self.get_name())
                    i.set_url(response.get_url())
                    i.add_to_highlight(possible_hash)
                    
                    self.kb_append(self, 'hash_analysis', i)

                    self._already_reported.add( (possible_hash,
                                                 response.get_url()) )

    def _has_hash_distribution(self, possible_hash):
        """
        :param possible_hash: A string that may be a hash.
        :return: True if the possible_hash has an equal (aprox.) distribution
        of numbers and letters and only has hex characters (0-9, a-f)
        """
        numbers = 0
        letters = 0
        for char in possible_hash:
            if char.isdigit():
                numbers += 1
            elif char in 'abcdef':
                letters += 1
            else:
                return False

        if numbers in range(letters - len(possible_hash) / 2, letters + len(possible_hash) / 2):
            # Seems to be a hash, let's make a final test to avoid false positives with
            # strings like:
            # 2222222222222222222aaaaaaaaaaaaa
            is_hash = True
            for char in possible_hash:
                if possible_hash.count(char) > len(possible_hash) / 5:
                    is_hash = False
                    break
            return is_hash

        else:
            return False

    def _get_hash_type(self, possible_hash):
        """
        :param possible_hash: A string that may be a hash.
        :return: The hash type if the string seems to be a md5 / sha1 hash.
        None otherwise.
        """
        # When adding something here, please review the code above where
        # we also check the length.
        hash_type_len = {
                         'MD5': 32,
#.........这里部分代码省略.........
开发者ID:0x554simon,项目名称:w3af,代码行数:103,代码来源:hash_analysis.py


注:本文中的w3af.core.data.bloomfilter.scalable_bloom.ScalableBloomFilter.add方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。