当前位置: 首页>>代码示例>>Python>>正文


Python ScalableBloomFilter.add方法代码示例

本文整理汇总了Python中core.data.bloomfilter.pybloom.ScalableBloomFilter.add方法的典型用法代码示例。如果您正苦于以下问题:Python ScalableBloomFilter.add方法的具体用法?Python ScalableBloomFilter.add怎么用?Python ScalableBloomFilter.add使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在core.data.bloomfilter.pybloom.ScalableBloomFilter的用法示例。


在下文中一共展示了ScalableBloomFilter.add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: dotNetErrors

# 需要导入模块: from core.data.bloomfilter.pybloom import ScalableBloomFilter [as 别名]
# 或者: from core.data.bloomfilter.pybloom.ScalableBloomFilter import add [as 别名]
class dotNetErrors(baseDiscoveryPlugin):
    '''
    Request specially crafted URLs that generate ASP.NET errors in order to gather information.
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseDiscoveryPlugin.__init__(self)

        # Internal variables
        self._already_tested = ScalableBloomFilter()

    def discover(self, fuzzableRequest ):
        '''
        Requests the special filenames.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
        '''
        if fuzzableRequest.getURL() not in self._already_tested:
            self._already_tested.add( fuzzableRequest.getURL() )

            # Generate the URLs to GET
            to_test = self._generate_URLs( fuzzableRequest.getURL() )
            for url in to_test:
                try:
                    response = self._urlOpener.GET( url, useCache=True )
                except KeyboardInterrupt,e:
                    raise e
                except w3afException,w3:
                    om.out.error( str(w3) )
                else:
                    self._analyze_response( response )
开发者ID:DavisHevin,项目名称:sqli_benchmark,代码行数:34,代码来源:dotNetErrors.py

示例2: frontpage_version

# 需要导入模块: from core.data.bloomfilter.pybloom import ScalableBloomFilter [as 别名]
# 或者: from core.data.bloomfilter.pybloom.ScalableBloomFilter import add [as 别名]
class frontpage_version(baseDiscoveryPlugin):
    '''
    Search FrontPage Server Info file and if it finds it will determine its version.
    @author: Viktor Gazdag ( [email protected] )
    '''

    def __init__(self):
        baseDiscoveryPlugin.__init__(self)
        
        # Internal variables
        self._analyzed_dirs = ScalableBloomFilter()
        self._exec = True

    def discover(self, fuzzableRequest ):
        '''
        For every directory, fetch a list of files and analyze the response.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
        '''
        fuzzable_return_value = []
        
        if not self._exec:
            # This will remove the plugin from the discovery plugins to be runned.
            raise w3afRunOnce()
            
        else:
            # Run the plugin.
            self._exec = False

        for domain_path in urlParser.getDirectories(fuzzableRequest.getURL() ):

            if domain_path not in self._analyzed_dirs:

                # Save the domain_path so I know I'm not working in vane
                self._analyzed_dirs.add( domain_path )

                # Request the file
                frontpage_info_url = urlParser.urlJoin(  domain_path , "_vti_inf.html" )
                try:
                    response = self._urlOpener.GET( frontpage_info_url, useCache=True )
                    om.out.debug( '[frontpage_version] Testing "' + frontpage_info_url + '".' )
                except w3afException,  w3:
                    msg = 'Failed to GET Frontpage Server _vti_inf.html file: "'
                    msg += frontpage_info_url + '". Exception: "' + str(w3) + '".'
                    om.out.debug( msg )
                else:
                    # Check if it's a Fronpage Info file
                    if not is_404( response ):
                        fuzzable_return_value.extend( self._createFuzzableRequests( response ) )
                        self._analyze_response( response )
                        return fuzzable_return_value
开发者ID:DavisHevin,项目名称:sqli_benchmark,代码行数:53,代码来源:frontpage_version.py

示例3: directoryIndexing

# 需要导入模块: from core.data.bloomfilter.pybloom import ScalableBloomFilter [as 别名]
# 或者: from core.data.bloomfilter.pybloom.ScalableBloomFilter import add [as 别名]
class directoryIndexing(baseGrepPlugin):
    '''
    Grep every response for directory indexing problems.
      
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseGrepPlugin.__init__(self)
        
        self._already_visited = ScalableBloomFilter()
        
        # Added performance by compiling all the regular expressions
        # before using them. The setup time of the whole plugin raises,
        # but the execution time is lowered *a lot*.
        self._compiled_regex_list = [ re.compile(regex, re.IGNORECASE | re.DOTALL) for regex in self._get_indexing_regex() ]

    def grep(self, request, response):
        '''
        Plugin entry point, search for directory indexing.
        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None
        '''
        if urlParser.getDomainPath(response.getURL()) in self._already_visited:
            # Already worked for this URL, no reason to work twice
            return
        
        else:
            # Save it,
            self._already_visited.add( urlParser.getDomainPath(response.getURL()) )
            
            # Work,
            if response.is_text_or_html():
                html_string = response.getBody()
                for indexing_regex in self._compiled_regex_list:
                    if indexing_regex.search( html_string ):
                        v = vuln.vuln()
                        v.setPluginName(self.getName())
                        v.setURL( response.getURL() )
                        msg = 'The URL: "' + response.getURL() + '" has a directory '
                        msg += 'indexing vulnerability.'
                        v.setDesc( msg )
                        v.setId( response.id )
                        v.setSeverity(severity.LOW)
                        path = urlParser.getPath( response.getURL() )
                        v.setName( 'Directory indexing - ' + path )
                        
                        kb.kb.append( self , 'directory' , v )
                        break
    
    def setOptions( self, OptionList ):
        pass
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol

    def _get_indexing_regex(self):
        '''
        @return: A list of the regular expression strings, in order to be compiled in __init__
        '''
        dir_indexing_regexes = []
        ### TODO: verify if I need to add more values here, IIS !!!
        dir_indexing_regexes.append("<title>Index of /") 
        dir_indexing_regexes.append('<a href="\\?C=N;O=D">Name</a>') 
        dir_indexing_regexes.append("Last modified</a>")
        dir_indexing_regexes.append("Parent Directory</a>")
        dir_indexing_regexes.append("Directory Listing for")
        dir_indexing_regexes.append("<TITLE>Folder Listing.")
        dir_indexing_regexes.append('<table summary="Directory Listing" ')
        dir_indexing_regexes.append("- Browsing directory ")
        dir_indexing_regexes.append('">\\[To Parent Directory\\]</a><br><br>') # IIS 6.0 and 7.0
        dir_indexing_regexes.append('<A HREF=".*?">.*?</A><br></pre><hr></body></html>') # IIS 5.0
        return dir_indexing_regexes
        
    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        self.printUniq( kb.kb.getData( 'directoryIndexing', 'directory' ), 'URL' )
            
    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
开发者ID:DavisHevin,项目名称:sqli_benchmark,代码行数:99,代码来源:directoryIndexing.py

示例4: findvhost

# 需要导入模块: from core.data.bloomfilter.pybloom import ScalableBloomFilter [as 别名]
# 或者: from core.data.bloomfilter.pybloom.ScalableBloomFilter import add [as 别名]
class findvhost(baseDiscoveryPlugin):
    '''
    Modify the HTTP Host header and try to find virtual hosts.
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseDiscoveryPlugin.__init__(self)
        
        # Internal variables
        self._first_exec = True
        self._already_queried = ScalableBloomFilter()
        self._can_resolve_domain_names = False
        self._non_existant_response = None
        
    def discover(self, fuzzableRequest ):
        '''
        Find virtual hosts.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains
                                                    (among other things) the URL to test.
        '''
        vhost_list = []
        if self._first_exec:
            # Only run once
            self._first_exec = False
            vhost_list = self._generic_vhosts( fuzzableRequest )
            
            # Set this for later
            self._can_resolve_domain_names = self._can_resolve_domains()
            
        
        # I also test for ""dead links"" that the web programmer left in the page
        # For example, If w3af finds a link to "http://corporative.intranet.corp/" it will try to
        # resolve the dns name, if it fails, it will try to request that page from the server
        vhost_list.extend( self._get_dead_links( fuzzableRequest ) )
        
        # Report our findings
        for vhost, request_id in vhost_list:
            v = vuln.vuln()
            v.setPluginName(self.getName())
            v.setURL( fuzzableRequest.getURL() )
            v.setMethod( 'GET' )
            v.setName( 'Shared hosting' )
            v.setSeverity(severity.LOW)
            
            domain = urlParser.getDomain(fuzzableRequest.getURL())
            
            msg = 'Found a new virtual host at the target web server, the virtual host name is: "'
            msg += vhost + '". To access this site you might need to change your DNS resolution'
            msg += ' settings in order to point "' + vhost + '" to the IP address of "'
            msg += domain + '".'
            v.setDesc( msg )
            v.setId( request_id )
            kb.kb.append( self, 'findvhost', v )
            om.out.information( v.getDesc() )       
        
        return []
        
    def _get_dead_links(self, fuzzableRequest):
        '''
        Find every link on a HTML document verify if the domain is reachable or not; after that,
        verify if the web found a different name for the target site or if we found a new site that
        is linked. If the link points to a dead site then report it (it could be pointing to some 
        private address or something...)
        '''
        res = []
        
        # Get some responses to compare later
        base_url = urlParser.baseUrl(fuzzableRequest.getURL())
        original_response = self._urlOpener.GET(fuzzableRequest.getURI(), useCache=True)
        base_response = self._urlOpener.GET(base_url, useCache=True)
        base_resp_body = base_response.getBody()
        
        try:
            dp = dpCache.dpc.getDocumentParserFor(original_response)
        except w3afException:
            # Failed to find a suitable parser for the document
            return []
        
        # Set the non existant response
        non_existant = 'iDoNotExistPleaseGoAwayNowOrDie' + createRandAlNum(4) 
        self._non_existant_response = self._urlOpener.GET(base_url, 
                                                useCache=False, headers={'Host': non_existant})
        nonexist_resp_body = self._non_existant_response.getBody()
        
        # Note:
        # - With parsed_references I'm 100% that it's really something in the HTML
        # that the developer intended to add.
        #
        # - The re_references are the result of regular expressions, which in some cases
        # are just false positives.
        #
        # In this case, and because I'm only going to use the domain name of the URL
        # I'm going to trust the re_references also.
        parsed_references, re_references = dp.getReferences()
        parsed_references.extend(re_references)
        
        for link in parsed_references:
            domain = urlParser.getDomain(link)
#.........这里部分代码省略.........
开发者ID:DavisHevin,项目名称:sqli_benchmark,代码行数:103,代码来源:findvhost.py

示例5: fileUpload

# 需要导入模块: from core.data.bloomfilter.pybloom import ScalableBloomFilter [as 别名]
# 或者: from core.data.bloomfilter.pybloom.ScalableBloomFilter import add [as 别名]
class fileUpload(baseGrepPlugin):
    '''
    Find HTML forms with file upload capabilities.
      
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseGrepPlugin.__init__(self)
        
        # Internal variables
        self._already_inspected = ScalableBloomFilter()

    def grep(self, request, response):
        '''
        Plugin entry point, verify if the HTML has a form with file uploads.
        
        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None
        '''
        url = response.getURL()

        if response.is_text_or_html() and not url in self._already_inspected:

            self._already_inspected.add(url)
            dom = response.getDOM()

            # In some strange cases, we fail to normalize the document
            if dom is not None:

                # Loop through file inputs tags                
                for input_file in dom.xpath(FILE_INPUT_XPATH):
                    i = info.info()
                    i.setPluginName(self.getName())
                    i.setName('File upload form')
                    i.setURL(url)
                    i.setId(response.id)
                    msg = 'The URL: "%s" has form with file upload ' \
                    'capabilities.' % url
                    i.setDesc(msg)
                    to_highlight = etree.tostring(input_file)
                    i.addToHighlight(to_highlight)
                    kb.kb.append(self, 'fileUpload', i)

    
    def setOptions( self, OptionList ):
        pass
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol

    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        self.printUniq( kb.kb.getData( 'fileUpload', 'fileUpload' ), 'URL' )

    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
开发者ID:DavisHevin,项目名称:sqli_benchmark,代码行数:76,代码来源:fileUpload.py

示例6: blankBody

# 需要导入模块: from core.data.bloomfilter.pybloom import ScalableBloomFilter [as 别名]
# 或者: from core.data.bloomfilter.pybloom.ScalableBloomFilter import add [as 别名]
class blankBody(baseGrepPlugin):
    '''
    Find responses with empty body.
      
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseGrepPlugin.__init__(self)
        self._already_reported = ScalableBloomFilter()
        
    def grep(self, request, response):
        '''
        Plugin entry point, find the blank bodies and report them.

        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None

        Init
        >>> from core.data.url.httpResponse import httpResponse
        >>> from core.data.request.fuzzableRequest import fuzzableRequest
        >>> from core.controllers.misc.temp_dir import create_temp_dir
        >>> o = create_temp_dir()

        Simple test, empty string.
        >>> body = ''
        >>> url = 'http://www.w3af.com/'
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> request = fuzzableRequest()
        >>> request.setURL( url )
        >>> request.setMethod( 'GET' )
        >>> b = blankBody()
        >>> b.grep(request, response)
        >>> assert len(kb.kb.getData('blankBody', 'blankBody')) == 1

        With some content.
        >>> kb.kb.save('blankBody','blankBody',[])
        >>> body = 'header body footer'
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> b.grep(request, response)
        >>> assert len(kb.kb.getData('ssn', 'ssn')) == 0

        Strange method, empty body.
        >>> kb.kb.save('blankBody','blankBody',[])
        >>> body = ''
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> request = fuzzableRequest()
        >>> request.setURL( url )
        >>> request.setMethod( 'ARGENTINA' )
        >>> b.grep(request, response)
        >>> assert len(kb.kb.getData('ssn', 'ssn')) == 0

        Response codes,
        >>> kb.kb.save('blankBody','blankBody',[])
        >>> body = ''
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(401, body , headers, url, url)
        >>> request = fuzzableRequest()
        >>> request.setURL( url )
        >>> request.setMethod( 'GET' )
        >>> b.grep(request, response)
        >>> len(kb.kb.getData('ssn', 'ssn'))
        0
        '''
        if response.getBody() == '' and request.getMethod() in ['GET', 'POST']\
        and response.getCode() not in [401, 304, 204] and 'location' not in response.getLowerCaseHeaders()\
        and response.getURL() not in self._already_reported:
            
            #   report these informations only once
            self._already_reported.add( response.getURL() )
            
            #   append the info object to the KB.
            i = info.info()
            i.setPluginName(self.getName())
            i.setName('Blank body')
            i.setURL( response.getURL() )
            i.setId( response.id )
            msg = 'The URL: "'+ response.getURL()  + '" returned an empty body. '
            msg += 'This could indicate an error.'
            i.setDesc(msg)
            kb.kb.append( self, 'blankBody', i )
        
    def setOptions( self, OptionList ):
        '''
        Nothing to do here, no options.
        '''
        pass
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol
        
    def end(self):
#.........这里部分代码省略.........
开发者ID:DavisHevin,项目名称:sqli_benchmark,代码行数:103,代码来源:blankBody.py

示例7: metaTags

# 需要导入模块: from core.data.bloomfilter.pybloom import ScalableBloomFilter [as 别名]
# 或者: from core.data.bloomfilter.pybloom.ScalableBloomFilter import add [as 别名]
class metaTags(baseGrepPlugin):
    '''
    Grep every page for interesting meta tags.
      
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseGrepPlugin.__init__(self)
        
        self._comments = {}
        self._search404 = False
        
        self._interesting_words = {'user':None, 'pass':None, 'microsoft':None,
        'visual':None, 'linux':None, 'source':None, 'author':None, 'release':None,
        'version':None, 'verify-v1':'Google Sitemap' }
        self._already_inspected = ScalableBloomFilter()
        
        '''
        Can someone explain what this meta tag does?
        <meta name="verify-v1" content="/JBoXnwT1d7TbbWCwL8tXe+Ts2I2LXYrdnnK50g7kdY=" /> 
        
        Answer:
        That's one of the verification elements used by Google Sitemaps. When you sign up
        for Sitemaps you have to add that element to a root page to demonstrate to Google that
        you're the site owner. So there is probably a Sitemaps account for the site, if you 
        haven't found it already. 
        '''
        
    def grep(self, request, response):
        '''
        Plugin entry point, search for meta tags.

        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None
        '''
        uri = response.getURI()
        
        if response.is_text_or_html() and not is_404( response ) and \
            uri not in self._already_inspected:

            self._already_inspected.add(uri)
            
            try:
                dp = dpCache.dpc.getDocumentParserFor( response )
            except w3afException:
                pass
            else:
                meta_tag_list = dp.getMetaTags()
                
                for tag in meta_tag_list:
                    name = self._find_name( tag )
                    for attr in tag:
                        for word in self._interesting_words:

                            # Check if we have something interesting
                            # and WHERE that thing actually is
                            where = value = None
                            if ( word in attr[0].lower() ):
                                where = 'name'
                                value = attr[0].lower()
                            elif ( word in attr[1].lower() ):
                                where = 'value'
                                value = attr[1].lower()
                            
                            # Now... if we found something, report it =)
                            if where:
                                # The atribute is interesting!
                                i = info.info()
                                i.setPluginName(self.getName())
                                i.setName('Interesting META tag')
                                i.setURI( response.getURI() )
                                i.setId( response.id )
                                msg = 'The URI: "' +  i.getURI() + '" sent a META tag with '
                                msg += 'attribute '+ where +' "'+ value +'" which'
                                msg += ' looks interesting.'
                                i.addToHighlight( where, value )
                                if self._interesting_words.get(name, None):
                                    msg += ' The tag is used for '
                                    msg += self._interesting_words[name] + '.'
                                i.setDesc( msg )
                                kb.kb.append( self , 'metaTags' , i )

                            else:
                                # The attribute is not interesting
                                pass
    
    def _find_name( self, tag ):
        '''
        @return: the tag name.
        '''
        for attr in tag:
            if attr[0].lower() == 'name':
                return attr[1]
        return ''
        
    def setOptions( self, optionsMap ):
        self._search404 = optionsMap['search404'].getValue()
    
#.........这里部分代码省略.........
开发者ID:DavisHevin,项目名称:sqli_benchmark,代码行数:103,代码来源:metaTags.py

示例8: phpEggs

# 需要导入模块: from core.data.bloomfilter.pybloom import ScalableBloomFilter [as 别名]
# 或者: from core.data.bloomfilter.pybloom.ScalableBloomFilter import add [as 别名]

#.........这里部分代码省略.........
                ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"), 
                ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo") ]
        self._egg_DB["5.2.5-3"] = [ 
                ("b7e4385bd7f07e378d92485b4722c169", "PHP Credits"), 
                ("c48b07899917dfb5d591032007041ae3", "PHP Logo"), 
                ("0152ed695f4291488741d98ba066d280", "PHP Logo 2"), 
                ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo") ]
        self._egg_DB["5.2.6"] = [ 
                ("bbd44c20d561a0fc5a4aa76093d5400f", "PHP Credits"), 
                ("c48b07899917dfb5d591032007041ae3", "PHP Logo"), 
                ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"), 
                ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo") ]
        self._egg_DB["5.2.6RC4-pl0-gentoo"] = [ 
                ("d03b2481f60d9e64cb5c0f4bd0c87ec1", "PHP Credits"), 
                ("c48b07899917dfb5d591032007041ae3", "PHP Logo"), 
                ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"), 
                ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo") ]
        self._egg_DB['5.2.8-pl1-gentoo'] = [
                ('c48b07899917dfb5d591032007041ae3', 'PHP Logo'), 
                ('40410284d460552a6c9e10c1f5ae7223', 'PHP Credits'), 
                ('50caaf268b4f3d260d720a1a29c5fe21', 'PHP Logo 2'), 
                ('7675f1d01c927f9e6a4752cf182345a2', 'Zend Logo')]
        
    def discover(self, fuzzableRequest ):
        '''
        Nothing strange, just do some GET requests to the eggs and analyze the response.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
        '''
        if not self._exec:
            # This will remove the plugin from the discovery plugins to be runned.
            raise w3afRunOnce()
        else:
            # Get the extension of the URL (.html, .php, .. etc)
            ext = urlParser.getExtension( fuzzableRequest.getURL() )
            
            # Only perform this analysis if we haven't already analyzed this type of extension
            # OR if we get an URL like http://f00b5r/4/     (Note that it has no extension)
            # This logic will perform some extra tests... but we won't miss some special cases
            # Also, we aren't doing something like "if 'php' in ext:" because we never depend
            # on something so changable as extensions to make decisions.
            if ext == '' or ext not in self._already_analyzed_ext:
                
                # Init some internal variables
                GET_results = []
                original_response = self._urlOpener.GET( fuzzableRequest.getURL(), useCache=True )
                
                # Perform the GET requests to see if we have a phpegg
                for egg, egg_desc in self._get_eggs():
                    egg_URL = urlParser.uri2url( fuzzableRequest.getURL() ) + egg
                    try:
                        response = self._urlOpener.GET( egg_URL, useCache=True )
                    except KeyboardInterrupt,e:
                        raise e
                    except w3afException, w3:
                        raise w3
                    else:
                        GET_results.append( (response, egg_desc, egg_URL) )
                        
                #
                #   Now I analyze if this is really a PHP eggs thing, or simply a response that
                #   changes a lot on each request. Before, I had something like this:
                #
                #       if relative_distance(original_response.getBody(), response.getBody()) < 0.1:
                #
                #   But I got some reports about false positives with this approach, so now I'm
                #   changing it to something a little bit more specific.
                images = 0
                not_images = 0
                for response, egg_desc, egg_URL in GET_results:
                    if 'image' in response.getContentType():
                        images += 1
                    else:
                        not_images += 1
                
                if images == 3 and not_images == 1:
                    #
                    #   The remote web server has expose_php = On. Report all the findings.
                    #
                    for response, egg_desc, egg_URL in GET_results:
                        i = info.info()
                        i.setPluginName(self.getName())
                        i.setName('PHP Egg - ' + egg_desc)
                        i.setURL( egg_URL )
                        desc = 'The PHP framework running on the remote server has a "'
                        desc += egg_desc +'" easter egg, access to the PHP egg is possible'
                        desc += ' through the URL: "'+  egg_URL + '".'
                        i.setDesc( desc )
                        kb.kb.append( self, 'eggs', i )
                        om.out.information( i.getDesc() )
                        
                        #   Only run once.
                        self._exec = False
                
                    # analyze the info to see if we can identify the version
                    self._analyze_egg( GET_results )
                
                # Now we save the extension as one of the already analyzed
                if ext != '':
                    self._already_analyzed_ext.add(ext)
开发者ID:DavisHevin,项目名称:sqli_benchmark,代码行数:104,代码来源:phpEggs.py

示例9: allowedMethods

# 需要导入模块: from core.data.bloomfilter.pybloom import ScalableBloomFilter [as 别名]
# 或者: from core.data.bloomfilter.pybloom.ScalableBloomFilter import add [as 别名]
class allowedMethods(baseDiscoveryPlugin):
    '''
    Enumerate the allowed methods of an URL.
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseDiscoveryPlugin.__init__(self)

        # Internal variables
        self._exec = True
        self._already_tested = ScalableBloomFilter()
        self._bad_codes = [ httpConstants.UNAUTHORIZED, httpConstants.NOT_IMPLEMENTED,
                                    httpConstants.METHOD_NOT_ALLOWED, httpConstants.FORBIDDEN]
        
        # Methods
        self._dav_methods = [ 'DELETE', 'PROPFIND', 'PROPPATCH', 'COPY', 'MOVE', 'LOCK', 
                                        'UNLOCK', 'MKCOL']
        self._common_methods = [ 'OPTIONS', 'GET', 'HEAD', 'POST', 'TRACE', 'PUT']
        self._uncommon_methods = ['*', 'SUBSCRIPTIONS', 'NOTIFY', 'DEBUG', 'TRACK', 'POLL', 'PIN', 
                                                    'INVOKE', 'SUBSCRIBE', 'UNSUBSCRIBE']
        
        # Methods taken from http://www.w3.org/Protocols/HTTP/Methods.html 
        self._proposed_methods = [ 'CHECKOUT', 'SHOWMETHOD', 'LINK', 'UNLINK', 'CHECKIN', 
                                                'TEXTSEARCH', 'SPACEJUMP', 'SEARCH', 'REPLY']
        self._extra_methods = [ 'CONNECT', 'RMDIR', 'MKDIR', 'REPORT', 'ACL', 'DELETE', 'INDEX', 
                                        'LABEL', 'INVALID']
        self._version_control = [ 'VERSION_CONTROL', 'CHECKIN', 'UNCHECKOUT', 'PATCH', 'MERGE', 
                                            'MKWORKSPACE', 'MKACTIVITY', 'BASELINE_CONTROL']       
        
        self._supported_methods = self._dav_methods  + self._common_methods + self._uncommon_methods
        self._supported_methods += self._proposed_methods + self._extra_methods
        self._supported_methods += self._version_control

 
        # User configured variables
        self._exec_one_time = True
        self._report_dav_only = True
        
    def discover(self, fuzzableRequest ):
        '''
        Uses several technics to try to find out what methods are allowed for an URL.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
        '''
        if not self._exec:
            # This will remove the plugin from the discovery plugins to be runned.
            raise w3afRunOnce()
            
        else:
            # Run the plugin.
            if self._exec_one_time:
                self._exec = False
            
            domain_path = urlParser.getDomainPath( fuzzableRequest.getURL() )
            if domain_path not in self._already_tested:
                self._already_tested.add( domain_path )
                self._check_methods( domain_path )
        return []
    
    def _check_methods( self, url ):
        '''
        Find out what methods are allowed.
        @parameter url: Where to check.
        '''
        allowed_methods = []
        with_options = False
        id_list = []
        
        # First, try to check available methods using OPTIONS, if OPTIONS isn't 
        # enabled, do it manually
        res = self._urlOpener.OPTIONS( url )
        headers = res.getLowerCaseHeaders()
        for header_name in ['allow', 'public']:
            if header_name in headers:
                allowed_methods.extend( headers[header_name].split(',') )
                allowed_methods = [ x.strip() for x in allowed_methods ]
                with_options = True
                allowed_methods = list(set(allowed_methods))

        # Save the ID for later
        if with_options:
            id_list.append( res.id )

        else:
            #
            #   Before doing anything else, I'll send a request with a non-existant method
            #   If that request succeds, then all will...
            #
            try:
                non_exist_response = self._urlOpener.ARGENTINA( url )
                get_response = self._urlOpener.GET( url )
            except:
                pass
            else:
                if non_exist_response.getCode() not in self._bad_codes\
                and get_response.getBody() == non_exist_response.getBody():
                    i = info.info()
                    i.setPluginName(self.getName())
                    i.setName( 'Non existent methods default to GET' )
#.........这里部分代码省略.........
开发者ID:DavisHevin,项目名称:sqli_benchmark,代码行数:103,代码来源:allowedMethods.py

示例10: ajax

# 需要导入模块: from core.data.bloomfilter.pybloom import ScalableBloomFilter [as 别名]
# 或者: from core.data.bloomfilter.pybloom.ScalableBloomFilter import add [as 别名]

#.........这里部分代码省略.........
        >>> a.grep(request, response)
        >>> assert len(kb.kb.getData('ajax', 'ajax')) == 1

        Discover ajax with a broken script, head and html tags.
        >>> kb.kb.save('ajax','ajax',[])
        >>> body = '<html><head><script>xhr = new XMLHttpRequest(); xhr.open(GET, "data.txt",  true);'
        >>> url = 'http://www.w3af.com/'
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> request = fuzzableRequest()
        >>> request.setURL( url )
        >>> request.setMethod( 'GET' )
        >>> a = ajax()
        >>> a.grep(request, response)
        >>> assert len(kb.kb.getData('ajax', 'ajax')) == 1

        Another ajax function, no broken html.
        >>> kb.kb.save('ajax','ajax',[])
        >>> body = '<html><head><script> ... xhr = new ActiveXObject("Microsoft.XMLHTTP"); ... </script></head><html>'
        >>> url = 'http://www.w3af.com/'
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> request = fuzzableRequest()
        >>> request.setURL( url )
        >>> request.setMethod( 'GET' )
        >>> a = ajax()
        >>> a.grep(request, response)
        >>> assert len(kb.kb.getData('ajax', 'ajax')) == 1

        Two functions, I only want one report for this page.
        >>> kb.kb.save('ajax','ajax',[])
        >>> body = '<script> ... xhr = new XMLHttpRequest(); ... xhr = new ActiveXObject("Microsoft.XMLHTTP"); ... </script>'
        >>> url = 'http://www.w3af.com/'
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> request = fuzzableRequest()
        >>> request.setURL( url )
        >>> request.setMethod( 'GET' )
        >>> a = ajax()
        >>> a.grep(request, response)
        >>> len(kb.kb.getData('ajax', 'ajax'))
        1
        '''
        url = response.getURL()
        if response.is_text_or_html() and url not in self._already_inspected:
            
            # Don't repeat URLs
            self._already_inspected.add(url)
            
            dom = response.getDOM()
            # In some strange cases, we fail to normalize the document
            if dom is not None:

                script_elements = dom.xpath('.//script')
                for element in script_elements:
                    # returns the text between <script> and </script>
                    script_content = element.text
                    
                    if script_content is not None:
                        
                        res = self._ajax_regex_re.search(script_content)
                        if res:
                            i = info.info()
                            i.setPluginName(self.getName())
                            i.setName('AJAX code')
                            i.setURL(url)
                            i.setDesc('The URL: "%s" has an AJAX code.' % url)
                            i.setId(response.id)
                            i.addToHighlight(res.group(0))
                            kb.kb.append(self, 'ajax', i)

    
    def setOptions( self, OptionList ):
        pass
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol
        
    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        self.printUniq( kb.kb.getData( 'ajax', 'ajax' ), 'URL' )

    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
开发者ID:DavisHevin,项目名称:sqli_benchmark,代码行数:104,代码来源:ajax.py

示例11: strangeParameters

# 需要导入模块: from core.data.bloomfilter.pybloom import ScalableBloomFilter [as 别名]
# 或者: from core.data.bloomfilter.pybloom.ScalableBloomFilter import add [as 别名]
class strangeParameters(baseGrepPlugin):
    '''
    Grep the HTML response and find URIs that have strange parameters.
      
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseGrepPlugin.__init__(self)
        
        # Internal variables
        self._already_reported = ScalableBloomFilter()
        
    def grep(self, request, response):
        '''
        Plugin entry point.
        
        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None, all results are saved in the kb.
        '''
        try:
            dp = dpCache.dpc.getDocumentParserFor( response )
        except w3afException:
            pass
        else:
            # Note:
            # - With parsed_references I'm 100% that it's really something in the HTML
            # that the developer intended to add.
            #
            # - The re_references are the result of regular expressions, which in some cases
            # are just false positives.
            parsed_references, re_references = dp.getReferences()
            
            for ref in parsed_references:
                
                qs = urlParser.getQueryString( ref )
                
                for param_name in qs:
                    # This for loop is to address the repeated parameter name issue
                    for element_index in xrange(len(qs[param_name])):
                        if self._is_strange( request, param_name, qs[param_name][element_index] )\
                        and ref not in self._already_reported:
                            # Don't repeat findings
                            self._already_reported.add(ref)

                            i = info.info()
                            i.setPluginName(self.getName())
                            i.setName('Strange parameter')
                            i.setURI( ref )
                            i.setId( response.id )
                            msg = 'The URI: "' +  i.getURI() + '" has a parameter named: "' + param_name
                            msg += '" with value: "' + qs[param_name][element_index] + '", which is quite odd.'
                            i.setDesc( msg )
                            i.setVar( param_name )
                            i['parameterValue'] = qs[param_name][element_index]
                            i.addToHighlight(qs[param_name][element_index])

                            kb.kb.append( self , 'strangeParameters' , i )
                            
                        # To find this kind of vulns
                        # http://thedailywtf.com/Articles/Oklahoma-
                        # Leaks-Tens-of-Thousands-of-Social-Security-Numbers,-Other-
                        # Sensitive-Data.aspx
                        if self._is_SQL( request, param_name, qs[param_name][element_index] )\
                        and ref not in self._already_reported:
                            
                            # Don't repeat findings
                            self._already_reported.add(ref)
                            
                            v = vuln.vuln()
                            v.setPluginName(self.getName())
                            v.setName('Parameter has SQL sentence')
                            v.setURI( ref )
                            v.setId( response.id )
                            msg = 'The URI: "' +  v.getURI() + '" has a parameter named: "' + param_name
                            msg +='" with value: "' + qs[param_name][element_index] + '", which is a SQL sentence.'
                            v.setDesc( msg )
                            v.setVar( param_name )
                            v['parameterValue'] = qs[param_name][element_index]
                            i.addToHighlight(qs[param_name][element_index])
                            kb.kb.append( self , 'strangeParameters' , v )
    
    def setOptions( self, OptionList ):
        pass
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol

    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        self.printUniq( kb.kb.getData( 'strangeParameters', 'strangeParameters' ), 'VAR' )

    def _is_SQL(self, request, parameter, value):
#.........这里部分代码省略.........
开发者ID:DavisHevin,项目名称:sqli_benchmark,代码行数:103,代码来源:strangeParameters.py

示例12: pykto

# 需要导入模块: from core.data.bloomfilter.pybloom import ScalableBloomFilter [as 别名]
# 或者: from core.data.bloomfilter.pybloom.ScalableBloomFilter import add [as 别名]
class pykto(baseDiscoveryPlugin):
    '''
    A nikto port to python. 
    @author: Andres Riancho ( [email protected] )  
    '''

    def __init__(self):
        baseDiscoveryPlugin.__init__(self)
        
        # internal variables
        self._exec = True
        self._already_visited = ScalableBloomFilter()
        self._first_time = True
        self._show_remote_server = True
        
        # User configured parameters
        self._db_file = 'plugins' + os.path.sep + 'discovery' + os.path.sep + 'pykto'
        self._db_file += os.path.sep + 'scan_database.db'
        
        self._extra_db_file = 'plugins' + os.path.sep + 'discovery' + os.path.sep
        self._extra_db_file += 'pykto' + os.path.sep + 'w3af_scan_database.db'
        
        self._cgi_dirs = ['/cgi-bin/']
        self._admin_dirs = ['/admin/', '/adm/'] 
        self._users = ['adm', 'bin', 'daemon', 'ftp', 'guest', 'listen', 'lp',
        'mysql', 'noaccess', 'nobody', 'nobody4', 'nuucp', 'operator',
        'root', 'smmsp', 'smtp', 'sshd', 'sys', 'test', 'unknown']                  
        self._nuke = ['/', '/postnuke/', '/postnuke/html/', '/modules/', '/phpBB/', '/forum/']

        self._mutate_tests = False
        self._generic_scan = False
        self._update_scandb = False
        self._source = ''
        
    def discover(self, fuzzableRequest ):
        '''
        Runs pykto to the site.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains
                                                      (among other things) the URL to test.
        '''
        self._new_fuzzable_requests = []
        
        if not self._exec:
            # dont run anymore
            raise w3afRunOnce()
            
        else:
            # run!
            if self._update_scandb:
                self._update_db()
            
            # Run the basic scan (only once)
            if self._first_time:
                self._first_time = False
                url = urlParser.baseUrl( fuzzableRequest.getURL() )
                self._exec = False
                self.__run( url )
            
            # And now mutate if the user configured it...
            if self._mutate_tests:
                
                # If mutations are enabled, I should keep running
                self._exec = True
                
                # Tests are to be mutated
                url = urlParser.getDomainPath( fuzzableRequest.getURL() )
                if url not in self._already_visited:
                    # Save the directories I already have tested
                    self._already_visited.add( url )
                    self.__run( url )

        return self._new_fuzzable_requests
                
    def __run( self, url ):
        '''
        Really run the plugin.
        
        @parameter url: The URL I have to test.
        '''
        try:
            # read the nikto database.
            db_file_1 = open(self._db_file, "r")
            # read the w3af scan database.
            db_file_2 = open(self._extra_db_file, "r")
        except Exception, e:
            raise w3afException('Failed to open the scan databases. Exception: "' + str(e) + '".')
        else:
开发者ID:DavisHevin,项目名称:sqli_benchmark,代码行数:90,代码来源:pykto.py

示例13: feeds

# 需要导入模块: from core.data.bloomfilter.pybloom import ScalableBloomFilter [as 别名]
# 或者: from core.data.bloomfilter.pybloom.ScalableBloomFilter import add [as 别名]
class feeds(baseGrepPlugin):
    '''
    Grep every page and finds rss, atom, opml feeds.
      
    @author: Andres Riancho ( [email protected] )
    '''
    
    def __init__(self):
        baseGrepPlugin.__init__(self)
        self._rss_tag_attr = [('rss', 'version', 'RSS'),# <rss version="...">
                              ('feed', 'version', 'OPML'),# <feed version="..."
                              ('opml', 'version', 'OPML') # <opml version="...">
                              ]
        self._already_inspected = ScalableBloomFilter()
                
    def grep(self, request, response):
        '''
        Plugin entry point, find feeds.
        
        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None
        '''
        dom = response.getDOM()
        uri = response.getURI()
        
        # In some strange cases, we fail to normalize the document
        if uri not in self._already_inspected and dom is not None:

            self._already_inspected.add(uri)

            for tag_name, attr_name, feed_type in self._rss_tag_attr:
                
                # Find all tags with tag_name
                element_list = dom.xpath('//%s' % tag_name)
            
                for element in element_list:
                    
                    if attr_name in element.attrib:
                        
                        version = element.attrib[attr_name]                        
                        i = info.info()
                        i.setPluginName(self.getName())
                        i.setName(feed_type +' feed')
                        i.setURI(uri)
                        msg = 'The URL: "' + uri + '" is a ' + feed_type + ' version "' 
                        msg += version + '" feed.'
                        i.setDesc( msg )
                        i.setId( response.id )
                        i.addToHighlight( feed_type )
                        kb.kb.append( self, 'feeds', i )
    
    def setOptions( self, OptionList ):
        pass
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol

    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        self.printUniq( kb.kb.getData( 'feeds', 'feeds' ), 'URL' )

    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
开发者ID:DavisHevin,项目名称:sqli_benchmark,代码行数:82,代码来源:feeds.py

示例14: archiveDotOrg

# 需要导入模块: from core.data.bloomfilter.pybloom import ScalableBloomFilter [as 别名]
# 或者: from core.data.bloomfilter.pybloom.ScalableBloomFilter import add [as 别名]

#.........这里部分代码省略.........
                url = url[url.index('http', 1):]
            except Exception:
                pass
            else:
                real_URLs.append( url )
        real_URLs = list(set(real_URLs))
        
        if len( real_URLs ):
            om.out.debug('Archive.org cached the following pages:')
            for i in real_URLs:
                om.out.debug('- ' + i )
        else:
            om.out.debug('Archive.org did not find any pages.')
        
        # Verify if they exist in the target site and add them to the result if they do.
        for real_url in real_URLs:
            if self._exists_in_target( real_url ):
                QSObject = urlParser.getQueryString( real_url )
                qsr = httpQsRequest()
                qsr.setURI( real_url )
                qsr.setDc( QSObject )
                res.append( qsr )

        if len( res ):
            msg = 'The following pages are in Archive.org cache and also in'
            msg += ' the target site:'
            om.out.debug(msg)
            for i in res:
                om.out.debug('- ' + i.getURI() )
        else:
            om.out.debug('All pages found in archive.org cache are missing in the target site.')
            
        return res
    
    def _spider_archive( self, url_list, max_depth, domain ):
        '''
        Perform a classic web spidering process.
        
        @parameter url_list: The list of URL strings
        @parameter max_depth: The max link depth that we have to follow.
        @parameter domain: The domain name we are checking
        '''
        # Start the recursive spidering         
        res = []
        
        for url in url_list:
            if url not in self._already_visited:
                self._already_visited.add( url )
                
                try:
                    http_response = self._urlOpener.GET( url, useCache=True )
                except Exception:
                    pass
                else:
                    # Get the references
                    try:
                        document_parser = dpc.getDocumentParserFor( http_response )
                    except w3afException:
                        # Failed to find a suitable document parser
                        pass
                    else:
                        # Note:
                        # - With parsed_references I'm 100% that it's really something in the HTML
                        # that the developer intended to add.
                        #
                        # - The re_references are the result of regular expressions, which in some cases
                        # are just false positives.
                        parsed_references, re_references = document_parser.getReferences()
                        
                        # Filter the ones I want
                        url_regex = 'http://web\.archive\.org/web/.*/http[s]?://' + domain + '/.*'
                        new_urls = [ u for u in parsed_references if re.match(url_regex, u ) ]
                        
                        # Go recursive
                        if max_depth -1 > 0:
                            if new_urls:
                                res.extend( new_urls )
                                res.extend( self._spider_archive( new_urls, max_depth -1, domain ) )
                        else:
                            msg = 'Some sections of the archive.org site were not analyzed because'
                            msg += ' of the configured max_depth.'
                            om.out.debug(msg)
                            return new_urls
        
        return res
    
    def _exists_in_target( self, url ):
        '''
        Check if a resource still exists in the target web site.
        
        @parameter url: The resource.
        '''
        res = False
        
        try:
            response = self._urlOpener.GET( url, useCache=True )
        except KeyboardInterrupt,e:
            raise e
        except w3afException,e:
            pass
开发者ID:DavisHevin,项目名称:sqli_benchmark,代码行数:104,代码来源:archiveDotOrg.py

示例15: dotNetEventValidation

# 需要导入模块: from core.data.bloomfilter.pybloom import ScalableBloomFilter [as 别名]
# 或者: from core.data.bloomfilter.pybloom.ScalableBloomFilter import add [as 别名]
class dotNetEventValidation(baseGrepPlugin):
    '''
    Grep every page and identify the ones that have viewstate and don't have event validation.
      
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseGrepPlugin.__init__(self)

        vsRegex = r'<input type="hidden" name="__VIEWSTATE" id="__VIEWSTATE" value=".*?" />'
        self._viewstate = re.compile( vsRegex, re.IGNORECASE|re.DOTALL)
        
        evRegex = r'<input type="hidden" name="__EVENTVALIDATION" '
        evRegex += 'id="__EVENTVALIDATION" value=".*?" />'
        self._eventvalidation = re.compile( evRegex, re.IGNORECASE|re.DOTALL)

        encryptedVsRegex = r'<input type="hidden" name="__VIEWSTATEENCRYPTED" '
        encryptedVsRegex += 'id="__VIEWSTATEENCRYPTED" value=".*?" />'
        self._encryptedVs = re.compile( encryptedVsRegex, re.IGNORECASE|re.DOTALL)

        self._already_reported = ScalableBloomFilter()

    def grep(self, request, response):
        '''
        If I find __VIEWSTATE and empty __EVENTVALIDATION => vuln.
        
        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        '''
        if response.is_text_or_html():

            # First verify if we havent greped this yet
            if request.getURI() in self._already_reported:
                return
            else:
                self._already_reported.add(request.getURI())

            res = self._viewstate.search(response.getBody())
            if res:
                # I have __viewstate!, verify if event validation is enabled
                if not self._eventvalidation.search(response.getBody()):
                    # Nice! We found a possible bug =)
                    i = info.info()
                    i.setPluginName(self.getName())
                    i.setName('.NET Event Validation is disabled')
                    i.setURL( response.getURL() )
                    i.setId( response.id )
                    i.addToHighlight(res.group())
                    msg = 'The URL: "' + i.getURL() + '" has .NET Event Validation disabled. '
                    msg += 'This programming/configuration error should be manually verified.'
                    i.setDesc( msg )
                    kb.kb.append( self, 'dotNetEventValidation', i )

                if not self._encryptedVs.search(response.getBody()):
                    # Nice! We can decode the viewstate! =)
                    i = info.info()
                    i.setPluginName(self.getName())
                    i.setName('.NET ViewState encryption is disabled')
                    i.setURL( response.getURL() )
                    i.setId( response.id )
                    msg = 'The URL: "' + i.getURL() + '" has .NET ViewState encryption disabled. '
                    msg += 'This programming/configuration error could be exploited '
                    msg += 'to decode the viewstate contents.'
                    i.setDesc( msg )
                    kb.kb.append( self, 'dotNetEventValidation', i )

    
    def setOptions( self, OptionList ):
        '''
        Do nothing, I don't have any options.
        '''
        pass
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol

    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        # Print alerts
        self.printUniq( kb.kb.getData( 'dotNetEventValidation', 'dotNetEventValidation' ), 'URL' )
        
    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
开发者ID:DavisHevin,项目名称:sqli_benchmark,代码行数:102,代码来源:dotNetEventValidation.py


注:本文中的core.data.bloomfilter.pybloom.ScalableBloomFilter.add方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。