本文整理匯總了Python中core.colors.run方法的典型用法代碼示例。如果您正苦於以下問題:Python colors.run方法的具體用法?Python colors.run怎麽用?Python colors.run使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類core.colors
的用法示例。
在下文中一共展示了colors.run方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: getQuark
# 需要導入模塊: from core import colors [as 別名]
# 或者: from core.colors import run [as 別名]
def getQuark():
if 'quark.html' not in os.listdir():
cwd = os.getcwd()
print ('%s Quark is neccessary to view graphs generated by Orbit.' % bad)
print ('%s Downloading Quark [2.37 MB]' % run)
os.system('git clone https://github.com/s0md3v/Quark %s/Quark -q' % cwd)
os.system('mv ' + cwd + '/Quark/libs ' + cwd)
os.system('mv ' + cwd + '/Quark/quark.html ' + cwd)
os.remove(cwd + '/Quark/README.md')
shutil.rmtree(cwd + '/Quark')
print ('%s Quark was installed successfully' % info)
示例2: updater
# 需要導入模塊: from core import colors [as 別名]
# 或者: from core.colors import run [as 別名]
def updater():
"""Update the current installation.
git clones the latest version and merges it with the current directory.
"""
print('%s Checking for updates' % run)
# Changes must be separated by ;
changes = '''major bug fixes;removed ninja mode;dropped python < 3.2 support;fixed unicode output;proxy support;more intels'''
latest_commit = requester('https://raw.githubusercontent.com/s0md3v/Photon/master/core/updater.py', host='raw.githubusercontent.com')
# Just a hack to see if a new version is available
if changes not in latest_commit:
changelog = re.search(r"changes = '''(.*?)'''", latest_commit)
# Splitting the changes to form a list
changelog = changelog.group(1).split(';')
print('%s A new version of Photon is available.' % good)
print('%s Changes:' % info)
for change in changelog: # print changes
print('%s>%s %s' % (green, end, change))
current_path = os.getcwd().split('/') # if you know it, you know it
folder = current_path[-1] # current directory name
path = '/'.join(current_path) # current directory path
choice = input('%s Would you like to update? [Y/n] ' % que).lower()
if choice != 'n':
print('%s Updating Photon' % run)
os.system('git clone --quiet https://github.com/s0md3v/Photon %s'
% (folder))
os.system('cp -r %s/%s/* %s && rm -r %s/%s/ 2>/dev/null'
% (path, folder, path, path, folder))
print('%s Update successful!' % good)
else:
print('%s Photon is up to date!' % good)
示例3: photon
# 需要導入模塊: from core import colors [as 別名]
# 或者: from core.colors import run [as 別名]
def photon(seedUrl, headers, depth, threadCount):
forms = [] # web forms
processed = set() # urls that have been crawled
storage = set() # urls that belong to the target i.e. in-scope
scheme = urlparse(seedUrl).scheme
host = urlparse(seedUrl).netloc
main_url = scheme + '://' + host
storage.add(seedUrl)
def rec(url):
processed.add(url)
urlPrint = (url + (' ' * 60))[:60]
print ('%s Parsing %-40s' % (run, urlPrint), end='\r')
url = getUrl(url, '', True)
params = getParams(url, '', True)
if '=' in url:
inps = []
for name, value in params.items():
inps.append({'name': name, 'value': value})
forms.append(
{url: {0: {'action': url, 'method': 'get', 'inputs': inps}}})
response = requester(url, params, headers, True, 0).text
forms.append({url: zetanize(url, response)})
matches = findall(
r'<[aA][^>]*?(href|HREF)=["\']{0,1}(.*?)["\']', response)
for link in matches: # iterate over the matches
# remove everything after a "#" to deal with in-page anchors
link = link[1].split('#')[0].lstrip(' ')
if link[:4] == 'http':
if link.startswith(main_url):
storage.add(link)
elif link[:2] == '//':
if link.split('/')[2].startswith(host):
storage.add(scheme + '://' + link)
elif link[:1] == '/':
storage.add(remove_file(url) + link)
else:
usable_url = remove_file(url)
if usable_url.endswith('/'):
storage.add(usable_url + link)
elif link.startswith('/'):
storage.add(usable_url + link)
else:
storage.add(usable_url + '/' + link)
for x in range(depth):
urls = storage - processed
threadpool = concurrent.futures.ThreadPoolExecutor(max_workers=10)
futures = (threadpool.submit(rec, url) for url in urls)
for i in concurrent.futures.as_completed(futures):
pass
return [forms, len(processed)]
示例4: photon
# 需要導入模塊: from core import colors [as 別名]
# 或者: from core.colors import run [as 別名]
def photon(seedUrl):
forms = [] # web forms
processed = set() # urls that have been crawled
storage = set() # urls that belong to the target i.e. in-scope
schema = urlparse(seedUrl).scheme # extract the scheme e.g. http or https
host = urlparse(seedUrl).netloc # extract the host e.g. example.com
main_url = schema + '://' + host # join scheme and host to make the root url
storage.add(seedUrl) # add the url to storage
checkedScripts = set()
all_techs = []
all_outdated_js = []
def rec(target):
processed.add(target)
urlPrint = (target + (' ' * 60))[:60]
print ('%s Parsing %-40s' % (run, urlPrint), end='\r')
url = getUrl(target, True)
params = getParams(target, '', True)
if '=' in target: # if there's a = in the url, there should be GET parameters
inps = []
for name, value in params.items():
inps.append({'name': name, 'value': value})
forms.append({0: {'action': url, 'method': 'get', 'inputs': inps}})
raw_response = requester(url, params, True)
response = raw_response.text
js = js_extractor(response)
scripts = script_extractor(response)
for each in retirejs(url, response, checkedScripts):
all_outdated_js.append(each)
all_techs.extend(wappalyzer(raw_response, js, scripts))
parsed_response = zetanize(response)
forms.append(parsed_response)
matches = re.finditer(r'<[aA][^>]*?(?:href|HREF)=[\'"`]?([^\s>]*?)[\'"`]?>', response)
for link in matches: # iterate over the matches
# remove everything after a "#" to deal with in-page anchors
this_url = handle_anchor(target, link.group(1))
if is_link(this_url, processed):
if urlparse(this_url).netloc == host:
storage.add(this_url.split('#')[0])
for x in range(2):
urls = storage - processed # urls to crawl = all urls - urls that have been crawled
threadpool = concurrent.futures.ThreadPoolExecutor(
max_workers=10)
futures = (threadpool.submit(rec, url) for url in urls)
for i in concurrent.futures.as_completed(futures):
pass
return [forms, processed, set(all_techs), all_outdated_js]
示例5: zap
# 需要導入模塊: from core import colors [as 別名]
# 或者: from core.colors import run [as 別名]
def zap(input_url, archive, domain, host, internal, robots, proxies):
"""Extract links from robots.txt and sitemap.xml."""
if archive:
print('%s Fetching URLs from archive.org' % run)
if False:
archived_urls = time_machine(domain, 'domain')
else:
archived_urls = time_machine(host, 'host')
print('%s Retrieved %i URLs from archive.org' % (
good, len(archived_urls) - 1))
for url in archived_urls:
verb('Internal page', url)
internal.add(url)
# Makes request to robots.txt
response = requests.get(input_url + '/robots.txt',
proxies=random.choice(proxies)).text
# Making sure robots.txt isn't some fancy 404 page
if '<body' not in response:
# If you know it, you know it
matches = re.findall(r'Allow: (.*)|Disallow: (.*)', response)
if matches:
# Iterating over the matches, match is a tuple here
for match in matches:
# One item in match will always be empty so will combine both
# items
match = ''.join(match)
# If the URL doesn't use a wildcard
if '*' not in match:
url = input_url + match
# Add the URL to internal list for crawling
internal.add(url)
# Add the URL to robots list
robots.add(url)
print('%s URLs retrieved from robots.txt: %s' % (good, len(robots)))
# Makes request to sitemap.xml
response = requests.get(input_url + '/sitemap.xml',
proxies=random.choice(proxies)).text
# Making sure robots.txt isn't some fancy 404 page
if '<body' not in response:
matches = xml_parser(response)
if matches: # if there are any matches
print('%s URLs retrieved from sitemap.xml: %s' % (
good, len(matches)))
for match in matches:
verb('Internal page', match)
# Cleaning up the URL and adding it to the internal list for
# crawling
internal.add(match)