本文整理汇总了Python中golismero.api.text.wordlist.WordListLoader类的典型用法代码示例。如果您正苦于以下问题:Python WordListLoader类的具体用法?Python WordListLoader怎么用?Python WordListLoader使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了WordListLoader类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: recv_info
def recv_info(self, info):
m_parsed_url = info.parsed_url
m_results = []
#------------------------------------------------------------------
# Find suspicious URLs by matching against known substrings.
# Load wordlists
m_wordlist_middle = WordListLoader.get_wordlist(Config.plugin_config['middle'])
m_wordlist_extensions = WordListLoader.get_wordlist(Config.plugin_config['extensions'])
# Add matching keywords at any positions of URL.
m_results.extend([SuspiciousURLPath(info, x)
for x in m_wordlist_middle
if x in m_parsed_url.directory.split("/") or
x == m_parsed_url.filebase or
x == m_parsed_url.extension])
# Add matching keywords at any positions of URL.
m_results.extend([SuspiciousURLPath(info, x)
for x in m_wordlist_extensions
if m_parsed_url.extension == x])
#------------------------------------------------------------------
# Find suspicious URLs by calculating the Shannon entropy of the hostname.
# Idea from: https://github.com/stricaud/urlweirdos/blob/master/src/urlw/plugins/shannon/__init__.py
# TODO: test with unicode enabled hostnames!
# Check the Shannon entropy for the hostname.
hostname = info.parsed_url.hostname
entropy = calculate_shannon_entropy(hostname)
if entropy > 4.0:
m_results.append( SuspiciousURLPath(info, hostname) )
# Check the Shannon entropy for the subdomains.
for subdomain in info.parsed_url.hostname.split('.'):
if len(subdomain) > 3:
entropy = calculate_shannon_entropy(subdomain)
if entropy > 4.0:
m_results.append( SuspiciousURLPath(info, subdomain) )
#------------------------------------------------------------------
#
#
#
# Get malware suspicious links
#
#
#
#------------------------------------------------------------------
p = None
m_url = info.url
Logger.log_more_verbose("Looking for output links to malware sites")
try:
allow_redirects = Config.audit_config.follow_redirects or \
(info.depth == 0 and Config.audit_config.follow_first_redirect)
p = download(m_url, self.check_download, allow_redirects=allow_redirects)
except NetworkException,e:
Logger.log_more_verbose("Error while processing %r: %s" % (m_url, str(e)))
示例2: test_all_wordlist_property
def test_all_wordlist_property(self):
# Set Config plugin
LocalFile._LocalFile__plugin_path = os.path.abspath(W_DIR)
# Create plugin wordlists
_create_plugin_info()
# Clean and configure new store
WordListLoader._WordListLoader__store = {}
WordListLoader._WordListLoader__load_wordlists(W_DIR)
try:
assert WordListLoader.all_wordlists == ["test_wordlist.txt"]
finally:
_destroy_plugin_info()
示例3: load_wordlists
def load_wordlists(wordlists):
"""
Load the with names pased as parameter.
This function receives a list of names of wordlist, defined in plugin
configuration file, and return a dict with instances of wordlists.
:param wordlists: list with wordlists names
:type wordlists: list
:returns: A dict with wordlists
:rtype: dict
"""
m_tmp_wordlist = {}
# Get wordlist to load
for l_w in wordlists:
for wordlist_family, l_wordlists in Config.plugin_extra_config.iteritems():
if wordlist_family.lower() in l_w.lower():
m_tmp_wordlist[l_w] = l_wordlists
# Load the wordlist
m_return = {}
for k, w_paths in m_tmp_wordlist.iteritems():
m_return[k] = [WordListLoader.get_wordlist(w) for w in w_paths]
return m_return
示例4: analyze_url
def analyze_url(self, info):
m_parsed_url = info.parsed_url
m_results = []
Logger.log_more_verbose("Processing URL: %s" % m_parsed_url)
#----------------------------------------------------------------------
# Find suspicious URLs by matching against known substrings.
# Load wordlists
m_wordlist_middle = WordListLoader.get_wordlist(Config.plugin_config['middle'])
m_wordlist_extensions = WordListLoader.get_wordlist(Config.plugin_config['extensions'])
# Add matching keywords at any positions of URL.
m_results.extend([SuspiciousURLPath(info, x)
for x in m_wordlist_middle
if x in m_parsed_url.directory.split("/") or
x == m_parsed_url.filebase or
x == m_parsed_url.extension])
# Add matching keywords at any positions of URL.
m_results.extend([SuspiciousURLPath(info, x)
for x in m_wordlist_extensions
if m_parsed_url.extension == x])
#----------------------------------------------------------------------
# Find suspicious URLs by calculating the Shannon entropy of the hostname.
# Idea from: https://github.com/stricaud/urlweirdos/blob/master/src/urlw/plugins/shannon/__init__.py
# TODO: test with unicode enabled hostnames!
# Check the Shannon entropy for the hostname.
hostname = info.parsed_url.hostname
entropy = calculate_shannon_entropy(hostname)
if entropy > 4.0:
m_results.append( SuspiciousURLPath(info, hostname) )
# Check the Shannon entropy for the subdomains.
for subdomain in info.parsed_url.hostname.split('.'):
if len(subdomain) > 3:
entropy = calculate_shannon_entropy(subdomain)
if entropy > 4.0:
m_results.append( SuspiciousURLPath(info, subdomain) )
return m_results
示例5: __detect_wordpress_installation
def __detect_wordpress_installation(self, url, wordpress_urls):
"""
Try to detect a wordpress instalation in the current path.
:param url: URL where try to find the WordPress installation.
:type url: str
:param wordpress_urls: string with wordlist name with WordPress URLs.
:type wordpress_urls: str
:return: True if wordpress installation found. False otherwise.
:rtype: bool
"""
Logger.log_more_verbose("Detecting Wordpress instalation in URI: '%s'." % url)
total_urls = 0
urls_found = 0
error_page = get_error_page(url).raw_data
for u in WordListLoader.get_wordlist(wordpress_urls):
total_urls += 1
tmp_url = urljoin(url, u)
r = HTTP.get_url(tmp_url, use_cache=False)
if r.status == "200":
# Try to detect non-default error pages
ratio = get_diff_ratio(r.raw_response, error_page)
if ratio < 0.35:
urls_found += 1
discard_data(r)
# If Oks > 85% continue
if (urls_found / float(total_urls)) < 0.85:
# If all fails, make another last test
url_wp_admin = urljoin(url, "wp-admin/")
try:
p = HTTP.get_url(url_wp_admin, use_cache=False, allow_redirects=False)
if p:
discard_data(p)
except Exception, e:
return False
if p.status == "302" and "wp-login.php?redirect_to=" in p.headers.get("Location", ""):
return True
else:
return False
示例6: test__get_wordlist_descriptor_exits_in_plugin_path
def test__get_wordlist_descriptor_exits_in_plugin_path(self):
# Config plugin
LocalFile._LocalFile__plugin_path = os.path.abspath(W_DIR)
_create_plugin_info()
try:
wordlist_file = WordListLoader._WordListLoader__get_wordlist_descriptor(W_PATH)
# Checks if wordlist is file
wordlist_file == wordlist_file == open(W_PATH, "rU")
# Checks if wordlist is non file
pytest.raises(WordlistNotFound, WordListLoader._WordListLoader__get_wordlist_descriptor, "plugin_tmp_dir")
finally:
_destroy_plugin_info()
示例7: recv_info
def recv_info(self, info):
# Make sure it's a CNAME record.
# This is required because a plugin can't ask for a specific DNS
# register type - all types are received together.
if info.type != "CNAME":
return
# Get the root domain.
root = info.target
Logger.log_verbose(
"Looking for poisoned domains at: *.%s" % root)
# Load the malware URLs list.
wordlist_filename = Config.plugin_args["wordlist"]
try:
wordlist = WordListLoader.get_advanced_wordlist_as_list(
wordlist_filename)
except WordlistNotFound:
Logger.log_error_verbose(
"Wordlist not found: " + wordlist_filename)
return
except TypeError:
Logger.log_error_verbose(
"Wordlist is not a file: " + wordlist_filename)
return
results = []
root_set = set([root])
for x in root_set.intersection(set(wordlist)):
v = DNSPoisoning(x)
v.add_information(info)
results.append(v)
# Log how many results we got.
if results:
Logger.log_verbose(
"Discovered %s poisoned domains." % len(results))
else:
Logger.log_verbose("No poisoned domains found.")
# Return the results.
return results
示例8: get_list_from_wordlist
def get_list_from_wordlist(wordlist):
"""
Load the content of the wordlist and return a set with the content.
:param wordlist: wordlist name.
:type wordlist: str
:return: a set with the results.
:rtype result_output: set
"""
try:
m_commom_wordlists = set()
for v in Config.plugin_extra_config[wordlist].itervalues():
m_commom_wordlists.update(WordListLoader.get_advanced_wordlist_as_list(v))
return m_commom_wordlists
except KeyError,e:
Logger.log_error_more_verbose(str(e))
return set()
示例9: recv_info
def recv_info(self, info):
# Make sure it's really a CNAME record.
# This check should never fail anyway!
if info.type != "CNAME":
Logger.log_error_verbose("No CNAME found, skipped.")
return
# Get the root domain.
root = info.target
Logger.log_verbose("Looking for poisoned domains at: *.%s" % root)
# Load the malware URLs list.
wordlist_filename = Config.plugin_args["wordlist"]
try:
wordlist = WordListLoader.get_advanced_wordlist_as_list(wordlist_filename)
except WordlistNotFound:
Logger.log_error_verbose("Wordlist not found: " + wordlist_filename)
return
except TypeError:
Logger.log_error_verbose("Wordlist is not a file: " + wordlist_filename)
return
results = []
root_set = set([root])
for x in root_set.intersection(set(wordlist)):
v = DNSPoisoning(x)
v.add_information(info)
results.append(v)
# Log how many results we got.
if results:
Logger.log_verbose("Discovered %s poisoned domains." % len(results))
else:
Logger.log_verbose("No poisoned domains found.")
# Return the results.
return results
示例10: ttl_platform_detection
def ttl_platform_detection(self, main_url):
"""
This function tries to recognize the remote platform doing a ping and analyzing the
TTL of IP header response.
:param main_url: Base url to test.
:type main_url: str
:return: Possible platforms.
:rtype: list(tuple(OS, version))
"""
# Do a ping
try:
m_ttl = do_ping_and_receive_ttl(ParsedURL(main_url).hostname, 2)
# Load words for the wordlist
l_wordlist_instance = WordListLoader.get_advanced_wordlist_as_dict(Config.plugin_extra_config["Wordlist_ttl"]["ttl"])
# Looking for matches
l_matches = l_wordlist_instance.matches_by_value(m_ttl)
if l_matches:
m_ret = {}
for v in l_matches:
sp = v.split("|")
k = sp[0].strip()
v = sp[1].strip()
m_ret[k] = v
return [(k,v) for k,v in m_ret.iteritems()]
else:
return {}
except EnvironmentError:
Logger.log_error("[!] You can't run the platform detection plugin if you're not root.")
return {}
except Exception, e:
Logger.log_error("[!] Platform detection failed, reason: %s" % e)
return {}
示例11: get_fingerprinting_wordlist
def get_fingerprinting_wordlist(wordlist):
"""
Load the wordlist of fingerprints and prepare the info in a dict.
It using as a keys the name of the server family and, as value, an
iterable with the keywords related with this web server.
:return: The results of load of webservers keywords info and related webservers.
:rtype: tuple(WEBSERVER_KEYWORDS, RELATED_SERVES) <=> (dict(SERVERNAME: set(str(KEYWORDS))), dict(SERVER_NAME, set(str(RELATED_SERVERS)))
"""
# Load the wordlist
m_w = WordListLoader.get_advanced_wordlist_as_dict(wordlist, separator=";", smart_load=True)
# Load references.
#
# References in the wordlist are specified by # prefix.
#
already_parsed = set()
related = defaultdict(set)
m_webservers_keys = extend_items(m_w, already_parsed, related)
return (m_webservers_keys, related)
示例12: http_analyzers
#.........这里部分代码省略.........
# Update the status
update_status_func((float(i) * 100.0) / float(m_data_len))
Logger.log_more_verbose("Making '%s' test." % (l_wordlist))
i += 1
# Analyze for each wordlist
#
# Store the server banner
try:
m_banners_counter[l_response.headers["Server"]] += l_weight
except KeyError:
pass
#
# =====================
# HTTP directly related
# =====================
#
#
for l_http_header_name, l_header_wordlist in m_wordlists_HTTP_fields.iteritems():
# Check if HTTP header field is in response
if l_http_header_name not in l_response.headers:
continue
l_curr_header_value = l_response.headers[l_http_header_name]
# Generate concrete wordlist name
l_wordlist_path = Config.plugin_extra_config[l_wordlist][l_header_wordlist]
# Load words for the wordlist
l_wordlist_instance = WordListLoader.get_advanced_wordlist_as_dict(l_wordlist_path)
# Looking for matches
l_matches = l_wordlist_instance.matches_by_value(l_curr_header_value)
m_counters.inc(l_matches, l_action, l_weight, l_http_header_name, message="HTTP field: " + l_curr_header_value)
#
# =======================
# HTTP INdirectly related
# =======================
#
#
#
# Status code
# ===========
#
l_wordlist_instance = WordListLoader.get_advanced_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["statuscode"])
# Looking for matches
l_matches = l_wordlist_instance.matches_by_value(l_response.status)
m_counters.inc(l_matches, l_action, l_weight, "statuscode", message="Status code: " + l_response.status)
#
# Status text
# ===========
#
l_wordlist_instance = WordListLoader.get_advanced_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["statustext"])
# Looking for matches
l_matches = l_wordlist_instance.matches_by_value(l_response.reason)
m_counters.inc(l_matches, l_action, l_weight, "statustext", message="Status text: " + l_response.reason)
示例13: test__load_wordlists_input
def test__load_wordlists_input(self):
# Reload wordlist
WordListLoader._WordListLoader__load_wordlists("../../wordlist")
# Check
assert len(WordListLoader._WordListLoader__store) != 0
示例14: recv_info
def recv_info(self, info):
m_url = info.url
Logger.log_more_verbose("Start to process URL: %r" % m_url)
#
# Get the remote web server fingerprint
#
m_webserver_finger = info.get_associated_informations_by_category(WebServerFingerprint.information_type)
m_wordlist = set()
# There is fingerprinting information?
if m_webserver_finger:
m_webserver_finger = m_webserver_finger.pop()
m_server_canonical_name = m_webserver_finger.name_canonical
m_servers_related = m_webserver_finger.related # Set with related web servers
#
# Load wordlists
#
m_wordlist_update = m_wordlist.update
# Common wordlist
try:
w = Config.plugin_extra_config["common"]
m_wordlist_update([l_w for l_w in w.itervalues()])
except KeyError:
Logger.log_error("Can't load wordlists")
# Wordlist of server name
try:
w = Config.plugin_extra_config["%s_predictables" % m_server_canonical_name]
m_wordlist_update([l_w for l_w in w.itervalues()])
except KeyError:
Logger.log_error("Can't load wordlists")
# Wordlist of related with the server found
try:
for l_servers_related in m_servers_related:
w = Config.plugin_extra_config["%s_predictables" % m_server_canonical_name]
m_wordlist_update([l_w for l_w in w.itervalues()])
except KeyError:
Logger.log_error("Can't load wordlists")
else:
# Common wordlists
try:
w = Config.plugin_extra_config["common"]
m_wordlist.update([l_w for l_w in w.itervalues()])
except KeyError:
Logger.log_error("Can't load wordlists")
# Load content of wordlists
m_urls = set()
m_urls_update = m_urls.update
# Fixed Url
m_url_fixed = m_url if m_url.endswith("/") else "%s/" % m_url
for l_w in m_wordlist:
# Use a copy of wordlist to avoid modify the original source
l_loaded_wordlist = WordListLoader.get_advanced_wordlist_as_list(l_w)
m_urls_update((urljoin(m_url_fixed, (l_wo[1:] if l_wo.startswith("/") else l_wo)) for l_wo in l_loaded_wordlist))
# Generates the error page
m_error_response = get_error_page(m_url)
# Create the matching analyzer
try:
m_store_info = MatchingAnalyzer(m_error_response, min_ratio=0.65)
except ValueError:
# Thereis not information
return
# Create the partial funs
_f = partial(process_url,
severity_vectors['predictables'],
get_http_method(m_url),
m_store_info,
self.update_status,
len(m_urls))
# Process the URLs
for i, l_url in enumerate(m_urls):
_f((i, l_url))
# Generate and return the results.
return generate_results(m_store_info.unique_texts)
示例15: recv_info
def recv_info(self, info):
m_domain = info.root
# Skips localhost
if m_domain == "localhost":
return
m_return = None
# Checks if the hostname has been already processed
if not self.state.check(m_domain):
#
# Looking for
#
m_subdomains = WordListLoader.get_advanced_wordlist_as_list("subs_small.txt")
# Run in parallel
self.base_domain = m_domain
self.completed = Counter(0)
self.total = len(m_subdomains)
r = pmap(self.get_subdomains_bruteforcer, m_subdomains, pool_size=10)
#
# Remove repeated
#
# The results
m_domains = set()
m_domains_add = m_domains.add
m_domains_already = []
m_domains_already_append = m_domains_already.append
m_ips = set()
m_ips_add = m_ips.add
m_ips_already = []
m_ips_already_append = m_ips_already.append
if r:
for doms in r:
for dom in doms:
# Domains
if dom.type == "CNAME":
if not dom.target in m_domains_already:
m_domains_already_append(dom.target)
if dom.target in Config.audit_scope:
m_domains_add(dom)
else:
discard_data(dom)
# IPs
if dom.type == "A":
if dom.address not in m_ips_already:
m_ips_already_append(dom.address)
m_ips_add(dom)
# Unify
m_domains.update(m_ips)
m_return = m_domains
# Add the information to the host
map(info.add_information, m_return)
# Set the domain as processed
self.state.set(m_domain, True)
Logger.log_verbose("DNS analyzer plugin found %d subdomains" % len(m_return))
# Write the info as more user friendly
if Logger.MORE_VERBOSE:
m_tmp = []
m_tmp_append = m_tmp.append
for x in m_return:
if getattr(x, "address", False):
m_tmp_append("%s (%s)" % (getattr(x, "address"), str(x)))
elif getattr(x, "target", False):
m_tmp_append("%s (%s)" % (getattr(x, "target"), str(x)))
else:
m_tmp_append(str(x))
Logger.log_more_verbose("Subdomains found: \n\t+ %s" % "\n\t+ ".join(m_tmp))
return m_return