本文整理汇总了Python中golismero.api.net.http.HTTP类的典型用法代码示例。如果您正苦于以下问题:Python HTTP类的具体用法?Python HTTP怎么用?Python HTTP使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了HTTP类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __detect_wordpress_installation
def __detect_wordpress_installation(self, url, wordpress_urls):
"""
Try to detect a wordpress instalation in the current path.
:param url: URL where try to find the WordPress installation.
:type url: str
:param wordpress_urls: string with wordlist name with WordPress URLs.
:type wordpress_urls: str
:return: True if wordpress installation found. False otherwise.
:rtype: bool
"""
Logger.log_more_verbose("Detecting Wordpress instalation in URI: '%s'." % url)
total_urls = 0
urls_found = 0
error_page = get_error_page(url).raw_data
for u in WordListLoader.get_wordlist(wordpress_urls):
total_urls += 1
tmp_url = urljoin(url, u)
r = HTTP.get_url(tmp_url, use_cache=False)
if r.status == "200":
# Try to detect non-default error pages
ratio = get_diff_ratio(r.raw_response, error_page)
if ratio < 0.35:
urls_found += 1
discard_data(r)
# If Oks > 85% continue
if (urls_found / float(total_urls)) < 0.85:
# If all fails, make another last test
url_wp_admin = urljoin(url, "wp-admin/")
try:
p = HTTP.get_url(url_wp_admin, use_cache=False, allow_redirects=False)
if p:
discard_data(p)
except Exception, e:
return False
if p.status == "302" and "wp-login.php?redirect_to=" in p.headers.get("Location", ""):
return True
else:
return False
示例2: process_url
def process_url(risk_level, method, matcher, updater_func, total_urls, url):
"""
Checks if an URL exits.
:param risk_level: risk level of the tested URL, if discovered.
:type risk_level: int
:param method: string with HTTP method used.
:type method: str
:param matcher: instance of MatchingAnalyzer object.
:type matcher: `MatchingAnalyzer`
:param updater_func: update_status function to send updates
:type updater_func: update_status
:param total_urls: total number of URL to globally process.
:type total_urls: int
:param url: a tuple with data: (index, the URL to process)
:type url: tuple(int, str)
"""
i, url = url
updater_func((float(i) * 100.0) / float(total_urls))
# Logger.log_more_verbose("Trying to discover URL %s" % url)
# Get URL
p = None
try:
p = HTTP.get_url(url, use_cache=False, method=method)
if p:
discard_data(p)
except Exception, e:
Logger.log_error_more_verbose("Error while processing: '%s': %s" % (url, str(e)))
示例3: get_http_method
def get_http_method(url):
"""
This function determinates if the method HEAD is available. To do that, compare between two responses:
- One with GET method
- One with HEAD method
If both are seem more than 90%, the response are the same and HEAD method are not allowed.
"""
m_head_response = HTTP.get_url(url, method="HEAD") # FIXME handle exceptions!
discard_data(m_head_response)
m_get_response = HTTP.get_url(url) # FIXME handle exceptions!
discard_data(m_get_response)
# Check if HEAD reponse is different that GET response, to ensure that results are valids
return "HEAD" if HTTP_response_headers_analyzer(m_head_response.headers, m_get_response.headers) < 0.90 else "GET"
示例4: find_htm_file
def find_htm_file(url):
new_file = []
for file_name in ['DeveloperMenu.htm']:
url_check = url[1:] if url.startswith("/") else url
tmp_u = urljoin(url_check, file_name)
p = HTTP.get_url(tmp_u, use_cache=False, method="GET")
if p.status == "200":
file_save = download(tmp_u)
new_file = re.findall(r'href=[\'"]?([^\'" >]+)', file_save.raw_data)
return new_file
示例5: find_xml_files
def find_xml_files(url):
new_file = []
for file_name in ['execute.xml', 'DeveloperMenu.xml']:
url_check = url[1:] if url.startswith("/") else url
tmp_u = urljoin(url_check, file_name)
p = HTTP.get_url(tmp_u, use_cache=False, method="GET")
if p.status == "200":
file_save = download(tmp_u)
tree = ET.fromstring(file_save.raw_data)
try:
for links in tree.findall('Object'):
Logger.log(links.find('ObjLink').text)
new_file.append(links.find('ObjLink').text)
except Exception:
##raise # XXX DEBUG
pass
return new_file
示例6: get_error_page
def get_error_page(url):
"""
Generates an error page an get their content.
:param url: string with the base Url.
:type url: str
:return: a string with the content of response.
:rtype: str
"""
#
# Generate an error in server to get an error page, using a random string
#
# Make the URL
m_error_url = "%s%s" % (url, generate_random_string())
# Get the request
m_error_response = HTTP.get_url(m_error_url) # FIXME handle exceptions!
discard_data(m_error_response)
m_error_response = m_error_response.data
示例7: recv_info
def recv_info(self, info):
# Get the response page.
response = HTTP.get_url(info.url, callback = self.check_response)
if response:
try:
# Look for a match.
page_text = response.data
total = float(len(signatures))
for step, (server_name, server_page) in enumerate(signatures.iteritems()):
# Update status
progress = float(step) / total
self.update_status(progress=progress)
level = get_diff_ratio(page_text, server_page)
if level > 0.95: # magic number :)
# Match found.
vulnerability = DefaultErrorPage(info, server_name)
vulnerability.add_information(response)
return [vulnerability, response]
# Discard the response if no match was found.
discard_data(response)
except Exception:
# Discard the response on error.
discard_data(response)
raise
示例8: http_analyzers
def http_analyzers(main_url, update_status_func, number_of_entries=4):
"""
Analyze HTTP headers for detect the web server. Return a list with most possible web servers.
:param main_url: Base url to test.
:type main_url: str
:param update_status_func: function used to update the status of the process
:type update_status_func: function
:param number_of_entries: number of resutls tu return for most probable web servers detected.
:type number_of_entries: int
:return: Web server family, Web server version, Web server complete description, related web servers (as a dict('SERVER_RELATED' : set(RELATED_NAMES))), others web server with their probabilities as a dict(CONCRETE_WEB_SERVER, PROBABILITY)
"""
# Load wordlist directly related with a HTTP fields.
# { HTTP_HEADER_FIELD : [wordlists] }
m_wordlists_HTTP_fields = {
"Accept-Ranges" : "accept-range",
"Server" : "banner",
"Cache-Control" : "cache-control",
"Connection" : "connection",
"Content-Type" : "content-type",
"WWW-Authenticate" : "htaccess-realm",
"Pragma" : "pragma",
"X-Powered-By" : "x-powered-by"
}
m_actions = {
'GET' : { 'wordlist' : 'Wordlist_get' , 'weight' : 1 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': '/' },
'LONG_GET' : { 'wordlist' : 'Wordlist_get_long' , 'weight' : 1 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': '/%s' % ('a' * 200) },
'NOT_FOUND' : { 'wordlist' : 'Wordlist_get_notfound' , 'weight' : 2 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': '/404_NOFOUND__X02KAS' },
'HEAD' : { 'wordlist' : 'Wordlist_head' , 'weight' : 3 , 'protocol' : 'HTTP/1.1', 'method' : 'HEAD' , 'payload': '/' },
'OPTIONS' : { 'wordlist' : 'Wordlist_options' , 'weight' : 2 , 'protocol' : 'HTTP/1.1', 'method' : 'OPTIONS' , 'payload': '/' },
'DELETE' : { 'wordlist' : 'Wordlist_delete' , 'weight' : 5 , 'protocol' : 'HTTP/1.1', 'method' : 'DELETE' , 'payload': '/' },
'TEST' : { 'wordlist' : 'Wordlist_attack' , 'weight' : 5 , 'protocol' : 'HTTP/1.1', 'method' : 'TEST' , 'payload': '/' },
'INVALID' : { 'wordlist' : 'Wordlist_wrong_method' , 'weight' : 5 , 'protocol' : 'HTTP/9.8', 'method' : 'GET' , 'payload': '/' },
'ATTACK' : { 'wordlist' : 'Wordlist_wrong_version' , 'weight' : 2 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': "/etc/passwd?format=%%%%&xss=\x22><script>alert('xss');</script>&traversal=../../&sql='%20OR%201;"}
}
# Store results for others HTTP params
m_d = ParsedURL(main_url)
m_hostname = m_d.hostname
m_port = m_d.port
m_debug = False # Only for develop
# Counter of banners. Used when others methods fails.
m_banners_counter = Counter()
# Score counter
m_counters = HTTPAnalyzer(debug=m_debug)
# Var used to update the status
m_data_len = len(m_actions)
i = 1 # element in process
for l_action, v in m_actions.iteritems():
if m_debug:
print "###########"
l_method = v["method"]
l_payload = v["payload"]
l_proto = v["protocol"]
l_wordlist = v["wordlist"]
# Each type of probe hast different weight.
#
# Weights go from 0 - 5
#
l_weight = v["weight"]
# Make the URL
l_url = urljoin(main_url, l_payload)
# Make the raw request
#l_raw_request = "%(method)s %(payload)s %(protocol)s\r\nHost: %(host)s:%(port)s\r\nConnection: Close\r\n\r\n" % (
l_raw_request = "%(method)s %(payload)s %(protocol)s\r\nHost: %(host)s\r\n\r\n" % (
{
"method" : l_method,
"payload" : l_payload,
"protocol" : l_proto,
"host" : m_hostname,
"port" : m_port
}
)
if m_debug:
print "REQUEST"
print l_raw_request
# Do the connection
l_response = None
try:
m_raw_request = HTTP_Raw_Request(l_raw_request)
discard_data(m_raw_request)
l_response = HTTP.make_raw_request(
host = m_hostname,
port = m_port,
raw_request = m_raw_request,
#.........这里部分代码省略.........
示例9: http_simple_analyzer
def http_simple_analyzer(main_url, update_status_func, number_of_entries=4):
"""Simple method to get fingerprint server info
:param main_url: Base url to test.
:type main_url: str
:param update_status_func: function used to update the status of the process
:type update_status_func: function
:param number_of_entries: number of resutls tu return for most probable web servers detected.
:type number_of_entries: int
:return: a typle as format: Web server family, Web server version, Web server complete description, related web servers (as a dict('SERVER_RELATED' : set(RELATED_NAMES))), others web server with their probabilities as a dict(CONCRETE_WEB_SERVER, PROBABILITY)
"""
m_actions = {
'GET' : { 'wordlist' : 'Wordlist_get' , 'weight' : 1 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': '/' },
'LONG_GET' : { 'wordlist' : 'Wordlist_get_long' , 'weight' : 1 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': '/%s' % ('a' * 200) },
'NOT_FOUND' : { 'wordlist' : 'Wordlist_get_notfound' , 'weight' : 2 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': '/404_NOFOUND__X02KAS' },
'HEAD' : { 'wordlist' : 'Wordlist_head' , 'weight' : 3 , 'protocol' : 'HTTP/1.1', 'method' : 'HEAD' , 'payload': '/' },
'OPTIONS' : { 'wordlist' : 'Wordlist_options' , 'weight' : 2 , 'protocol' : 'HTTP/1.1', 'method' : 'OPTIONS' , 'payload': '/' },
'DELETE' : { 'wordlist' : 'Wordlist_delete' , 'weight' : 5 , 'protocol' : 'HTTP/1.1', 'method' : 'DELETE' , 'payload': '/' },
'TEST' : { 'wordlist' : 'Wordlist_attack' , 'weight' : 5 , 'protocol' : 'HTTP/1.1', 'method' : 'TEST' , 'payload': '/' },
'INVALID' : { 'wordlist' : 'Wordlist_wrong_method' , 'weight' : 5 , 'protocol' : 'HTTP/9.8', 'method' : 'GET' , 'payload': '/' },
'ATTACK' : { 'wordlist' : 'Wordlist_wrong_version' , 'weight' : 2 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': "/etc/passwd?format=%%%%&xss=\x22><script>alert('xss');</script>&traversal=../../&sql='%20OR%201;"}
}
m_d = ParsedURL(main_url)
m_hostname = m_d.hostname
m_port = m_d.port
m_debug = False # Only for develop
i = 0
m_counters = HTTPAnalyzer()
m_data_len = len(m_actions) # Var used to update the status
m_banners_counter = Counter()
for l_action, v in m_actions.iteritems():
if m_debug:
print "###########"
l_method = v["method"]
l_payload = v["payload"]
l_proto = v["protocol"]
#l_wordlist = v["wordlist"]
# Each type of probe hast different weight.
#
# Weights go from 0 - 5
#
l_weight = v["weight"]
# Make the raw request
l_raw_request = "%(method)s %(payload)s %(protocol)s\r\nHost: %(host)s\r\n\r\n" % (
{
"method" : l_method,
"payload" : l_payload,
"protocol" : l_proto,
"host" : m_hostname,
"port" : m_port
}
)
if m_debug:
print "REQUEST"
print l_raw_request
# Do the connection
l_response = None
try:
m_raw_request = HTTP_Raw_Request(l_raw_request)
discard_data(m_raw_request)
l_response = HTTP.make_raw_request(
host = m_hostname,
port = m_port,
raw_request = m_raw_request,
callback = check_raw_response)
if l_response:
discard_data(l_response)
except NetworkException,e:
Logger.log_error_more_verbose("Server-Fingerprint plugin: No response for URL (%s) with method '%s'. Message: %s" % (m_hostname, l_method, str(e)))
continue
if not l_response:
Logger.log_error_more_verbose("No response for host '%s' with method '%s'." % (m_hostname, l_method))
continue
if m_debug:
print "RESPONSE"
print l_response.raw_headers
# Update the status
update_status_func((float(i) * 100.0) / float(m_data_len))
Logger.log_more_verbose("Making '%s' test." % l_method)
i += 1
# Analyze for each wordlist
#
# Store the server banner
try:
m_banners_counter[l_response.headers["Server"]] += l_weight
except KeyError:
#.........这里部分代码省略.........
示例10: set
# If is a disallow URL, it must be suspicious
if m_key.lower() == "disallow":
m_discovered_suspicious.append(tmp_discovered)
except Exception,e:
continue
#
# Filter results
#
# Delete repeated
m_discovered_urls = set(m_discovered_urls)
# Generating error page
m_error_page = generate_error_page_url(m_url_robots_txt)
m_response_error_page = HTTP.get_url(m_error_page, callback=self.check_response)
if m_response_error_page:
m_return.append(m_response_error_page)
# Analyze results
match = {}
m_analyzer = MatchingAnalyzer(m_response_error_page.data)
m_total = len(m_discovered_urls)
for m_step, l_url in enumerate(m_discovered_urls):
# Update only odd iterations
if m_step % 2:
progress = (float(m_step * 100) / m_total)
self.update_status(progress=progress)
l_url = fix_url(l_url, m_url)
示例11: str
except Exception, e:
Logger.log_more_verbose("Error while processing: '%s': %s" % (url, str(e)))
# Check if the url is acceptable by comparing
# the result content.
#
# If the maching level between the error page
# and this url is greater than 52%, then it's
# the same URL and must be discarded.
#
if p and p.status == "200":
# If the method used to get URL was HEAD, get complete URL
if method != "GET":
try:
p = HTTP.get_url(url, use_cache=False, method="GET")
if p:
discard_data(p)
except Exception, e:
Logger.log_more_verbose("Error while processing: '%s': %s" % (url, str(e)))
# Append for analyze and display info if is accepted
if matcher.analyze(p.raw_response, url=url, risk=risk_level):
updater_func(text="Discovered partial url: '%s'" % url)
#----------------------------------------------------------------------
#
# Aux functions
#
#----------------------------------------------------------------------
示例12: is_URL_in_windows
def is_URL_in_windows(self, main_url):
"""
Detect if platform is Windows or \*NIX. To do this, get the first link, in scope, and
does two resquest. If are the same response, then, platform are Windows. Else are \*NIX.
:returns: True, if the remote host is a Windows system. False is \*NIX or None if unknown.
:rtype: bool
"""
m_forbidden = (
"logout",
"logoff",
"exit",
"sigout",
"signout",
)
# Get the main web page
m_r = download(main_url, callback=self.check_download)
if not m_r:
return None
discard_data(m_r)
# Get the first link
if m_r.information_type == Information.INFORMATION_HTML:
m_links = extract_from_html(m_r.raw_data, main_url)
else:
m_links = extract_from_text(m_r.raw_data, main_url)
if not m_links:
return None
# Get the first link of the page that's in scope of the audit
m_first_link = None
for u in m_links:
if u in Config.audit_scope and not any(x in u for x in m_forbidden):
m_first_link = u
break
if not m_first_link:
return None
# Now get two request to the links. One to the original URL and other
# as upper URL.
# Original
m_response_orig = HTTP.get_url(m_first_link, callback=self.check_response) # FIXME handle exceptions!
discard_data(m_response_orig)
# Uppercase
m_response_upper = HTTP.get_url(m_first_link.upper(), callback=self.check_response) # FIXME handle exceptions!
discard_data(m_response_upper)
# Compare them
m_orig_data = m_response_orig.raw_response if m_response_orig else ""
m_upper_data = m_response_upper.raw_response if m_response_upper else ""
m_match_level = get_diff_ratio(m_orig_data, m_upper_data)
# If the responses are equal by 90%, two URL are the same => Windows; else => *NIX
m_return = None
if m_match_level > 0.95:
m_return = True
else:
m_return = False
return m_return
示例13: __find_plugins
def __find_plugins(self, url, plugins_wordlist, update_func):
"""
Try to find available plugins
:param url: base URL to test.
:type url: str
:param plugins_wordlist: path to wordlist with plugins lists.
:type plugins_wordlist: str
:param update_func: function to update plugin status.
:type update_func: function
:return: list of lists as format:
list([PLUGIN_NAME, PLUGIN_URL, PLUGIN_INSTALLED_VERSION, PLUGIN_LAST_VERSION, [CVE1, CVE2...]])
:type: list(list())
"""
results = []
urls_to_test = {
"readme.txt": r"(Stable tag:[\svV]*)([0-9\.]+)",
"README.txt": r"(Stable tag:[\svV]*)([0-9\.]+)",
}
# Generates the error page
error_response = get_error_page(url).raw_data
# Load plugins info
plugins = []
plugins_append = plugins.append
with open(plugins_wordlist, "rU") as f:
for x in f:
plugins_append(x.replace("\n", ""))
# Calculate sizes
total_plugins = len(plugins)
# Load CSV info
csv_info = csv.reader(plugins)
# Process the URLs
for i, plugin_row in enumerate(csv_info):
# Plugin properties
plugin_URI = plugin_row[0]
plugin_name = plugin_row[1]
plugin_last_version = plugin_row[2]
plugin_CVEs = [] if plugin_row[3] == "" else plugin_row[3].split("|")
# Update status
update_func((float(i) * 100.0) / float(total_plugins))
# Make plugin URL
partial_plugin_url = "%s/%s" % (url, "wp-content/plugins/%s" % plugin_URI)
# Test each URL with possible plugin version info
for target, regex in urls_to_test.iteritems():
plugin_url = "%s/%s" % (partial_plugin_url, target)
# Try to get plugin
p = None
try:
p = HTTP.get_url(plugin_url, use_cache=False)
if p:
discard_data(p)
except Exception, e:
Logger.log_error_more_verbose("Error while download: '%s': %s" % (plugin_url, str(e)))
continue
plugin_installed_version = None
if p.status == "403": # Installed, but inaccesible
plugin_installed_version = "Unknown"
elif p.status == "200":
# Check if page is and non-generic not found page with 404 code
if get_diff_ratio(error_response, p.raw_response) < 0.52:
# Find the version
tmp_version = re.search(regex, p.raw_response)
if tmp_version is not None:
plugin_installed_version = tmp_version.group(2)
# Store info
if plugin_installed_version is not None:
Logger.log("Discovered plugin: '%s (installed version: %s)' (latest version: %s)" %
(plugin_name, plugin_installed_version, plugin_last_version))
results.append([
plugin_name,
plugin_url,
plugin_installed_version,
plugin_last_version,
plugin_CVEs
])
# Plugin found -> not more URL test for this plugin
break