本文整理汇总了Python中core.requester.requester方法的典型用法代码示例。如果您正苦于以下问题:Python requester.requester方法的具体用法?Python requester.requester怎么用?Python requester.requester使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类core.requester
的用法示例。
在下文中一共展示了requester.requester方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: quickBruter
# 需要导入模块: from core import requester [as 别名]
# 或者: from core.requester import requester [as 别名]
def quickBruter(params, originalResponse, originalCode, reflections, factors, include, delay, headers, url, GET):
joined = joiner(params, include)
newResponse = requester(url, joined, headers, GET, delay)
if newResponse.status_code == 429:
if core.config.globalVariables['stable']:
print('%s Hit rate limit, stabilizing the connection..')
time.sleep(30)
return params
else:
print('%s Target has rate limiting in place, please use --stable switch' % bad)
raise ConnectionError
if newResponse.status_code != originalCode:
return params
elif factors['sameHTML'] and len(newResponse.text) != (len(originalResponse)):
return params
elif factors['samePlainText'] and len(removeTags(originalResponse)) != len(removeTags(newResponse.text)):
return params
elif True:
for param, value in joined.items():
if param not in include and newResponse.text.count(value) != reflections:
return params
else:
return False
示例2: getTransactions
# 需要导入模块: from core import requester [as 别名]
# 或者: from core.requester import requester [as 别名]
def getTransactions(address, processed, database, limit):
addresses = []
increment = 0
database[address] = {}
pages = pageLimit(limit)
for i in range(pages):
if pages > 1 and increment != 0:
trail = '?offset=%i' % increment
response = requester(address)
matches = findall(r'"addr":".*?"', response)
for match in matches:
found = match.split('"')[3]
if found not in database[address]:
database[address][found] = 0
database[address][found] += 1
addresses.append(found)
increment += 50
processed.add(address)
return addresses
示例3: vulners
# 需要导入模块: from core import requester [as 别名]
# 或者: from core.requester import requester [as 别名]
def vulners(software, version, cpe=False):
if software and version:
pass
else:
return False
cached = query_cache(software, version, cpe)
if cached:
if cached == 'vulnerable':
return True
else:
return False
kind = 'software'
if cpe:
kind = 'cpe'
data = '{"software": "%s", "version": "%s", "type" : "%s", "maxVulnerabilities" : %i}' % (software, version, kind, 1)
response = requester('https://vulners.com/api/v3/burp/software/', get=False, data=data).text
cache(software, version, response, cpe)
if 'Nothing found for Burpsuite search request' in response:
return False
return True
示例4: retireJs
# 需要导入模块: from core import requester [as 别名]
# 或者: from core.requester import requester [as 别名]
def retireJs(url, response):
scripts = js_extractor(response)
for script in scripts:
if script not in getVar('checkedScripts'):
updateVar('checkedScripts', script, 'add')
uri = handle_anchor(url, script)
response = requester(uri, '', getVar('headers'), True, getVar('delay'), getVar('timeout')).text
result = main_scanner(uri, response)
if result:
logger.red_line()
logger.good('Vulnerable component: ' + result['component'] + ' v' + result['version'])
logger.info('Component location: %s' % uri)
details = result['vulnerabilities']
logger.info('Total vulnerabilities: %i' % len(details))
for detail in details:
logger.info('%sSummary:%s %s' % (green, end, detail['identifiers']['summary']))
logger.info('Severity: %s' % detail['severity'])
logger.info('CVE: %s' % detail['identifiers']['CVE'][0])
logger.red_line()
示例5: jscanner
# 需要导入模块: from core import requester [as 别名]
# 或者: from core.requester import requester [as 别名]
def jscanner(url):
"""Extract endpoints from JavaScript code."""
response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed)
# Extract URLs/endpoints
matches = rendpoint.findall(response)
# Iterate over the matches, match is a tuple
for match in matches:
# Combining the items because one of them is always empty
match = match[0] + match[1]
# Making sure it's not some JavaScript code
if not re.search(r'[}{><"\']', match) and not match == '/':
verb('JS endpoint', match)
endpoints.add(match)
# Records the time at which crawling started
示例6: extractForms
# 需要导入模块: from core import requester [as 别名]
# 或者: from core.requester import requester [as 别名]
def extractForms(url):
response = requester(url, {}, headers, True, 0).text
forms = zetanize(url, response)
for each in forms.values():
localTokens = set()
inputs = each['inputs']
for inp in inputs:
value = inp['value']
if value and match(r'^[\w\-_]+$', value):
if strength(value) > 10:
simTokens.append(value)
示例7: retirejs
# 需要导入模块: from core import requester [as 别名]
# 或者: from core.requester import requester [as 别名]
def retirejs(url, response, checkedScripts):
final_result = []
scripts = script_extractor(response)
for script in scripts:
if script not in checkedScripts:
checkedScripts.add(script)
uri = handle_anchor(url, script)
response = requester(uri).text
result = main_scanner(uri, response)
if result:
final_result.append(result)
return final_result
示例8: security_trails
# 需要导入模块: from core import requester [as 别名]
# 或者: from core.requester import requester [as 别名]
def security_trails(domain):
response = requester('https://securitytrails.com/list/apex_domain/' + domain).text
prefixes = json.loads(re.search(r'(?m)"subdomains":(\[.*?\])', response).group(1))
return [prefix + '.' + domain for prefix in prefixes]
示例9: whatcms
# 需要导入模块: from core import requester [as 别名]
# 或者: from core.requester import requester [as 别名]
def whatcms(domain):
response = requester('https://whatcms.org/?gpreq=json&jsoncallback=jQuery1124008091494457806547_1554361369057&s=%s&na=&nb=1cg805dlm7d7e5eickf67rzxrn12mju6bnch3a99hrt88v7n8rhf0lovwr8d0zm1&verified=&_=1554361369059' % domain).text
match = re.search(r'uses<\\/div>[^>]+>(.*?)<\\/a>', response)
if match:
return match.group(1)
else:
return None
示例10: findsubdomains
# 需要导入模块: from core import requester [as 别名]
# 或者: from core.requester import requester [as 别名]
def findsubdomains(host):
response = requester('https://findsubdomains.com/subdomains-of/' +
host).text
matches = re.finditer(r'(?s)<div class="domains js-domain-name">(.*?)</div>', response)
return [match.group(1).lstrip('\n').rstrip(' ').lstrip(' ') for match in matches]
示例11: bruteforcer
# 需要导入模块: from core import requester [as 别名]
# 或者: from core.requester import requester [as 别名]
def bruteforcer(target, paramData, payloadList, encoding, headers, delay, timeout):
GET, POST = (False, True) if paramData else (True, False)
host = urlparse(target).netloc # Extracts host out of the url
logger.debug('Parsed host to bruteforce: {}'.format(host))
url = getUrl(target, GET)
logger.debug('Parsed url to bruteforce: {}'.format(url))
params = getParams(target, paramData, GET)
logger.debug_json('Bruteforcer params:', params)
if not params:
logger.error('No parameters to test.')
quit()
for paramName in params.keys():
progress = 1
paramsCopy = copy.deepcopy(params)
for payload in payloadList:
logger.run('Bruteforcing %s[%s%s%s]%s: %i/%i\r' %
(green, end, paramName, green, end, progress, len(payloadList)))
if encoding:
payload = encoding(unquote(payload))
paramsCopy[paramName] = payload
response = requester(url, paramsCopy, headers,
GET, delay, timeout).text
if encoding:
payload = encoding(payload)
if payload in response:
logger.info('%s %s' % (good, payload))
progress += 1
logger.no_format('')
示例12: singleFuzz
# 需要导入模块: from core import requester [as 别名]
# 或者: from core.requester import requester [as 别名]
def singleFuzz(target, paramData, encoding, headers, delay, timeout):
GET, POST = (False, True) if paramData else (True, False)
# If the user hasn't supplied the root url with http(s), we will handle it
if not target.startswith('http'):
try:
response = requester('https://' + target, {},
headers, GET, delay, timeout)
target = 'https://' + target
except:
target = 'http://' + target
logger.debug('Single Fuzz target: {}'.format(target))
host = urlparse(target).netloc # Extracts host out of the url
logger.debug('Single fuzz host: {}'.format(host))
url = getUrl(target, GET)
logger.debug('Single fuzz url: {}'.format(url))
params = getParams(target, paramData, GET)
logger.debug_json('Single fuzz params:', params)
if not params:
logger.error('No parameters to test.')
quit()
WAF = wafDetector(
url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout)
if WAF:
logger.error('WAF detected: %s%s%s' % (green, WAF, end))
else:
logger.good('WAF Status: %sOffline%s' % (green, end))
for paramName in params.keys():
logger.info('Fuzzing parameter: %s' % paramName)
paramsCopy = copy.deepcopy(params)
paramsCopy[paramName] = xsschecker
fuzzer(url, paramsCopy, headers, GET,
delay, timeout, WAF, encoding)
示例13: checker
# 需要导入模块: from core import requester [as 别名]
# 或者: from core.requester import requester [as 别名]
def checker(url, params, headers, GET, delay, payload, positions, timeout, encoding):
checkString = 'st4r7s' + payload + '3nd'
if encoding:
checkString = encoding(unquote(checkString))
response = requester(url, replaceValue(
params, xsschecker, checkString, copy.deepcopy), headers, GET, delay, timeout).text.lower()
reflectedPositions = []
for match in re.finditer('st4r7s', response):
reflectedPositions.append(match.start())
filledPositions = fillHoles(positions, reflectedPositions)
# Itretating over the reflections
num = 0
efficiencies = []
for position in filledPositions:
allEfficiencies = []
try:
reflected = response[reflectedPositions[num]
:reflectedPositions[num]+len(checkString)]
efficiency = fuzz.partial_ratio(reflected, checkString.lower())
allEfficiencies.append(efficiency)
except IndexError:
pass
if position:
reflected = response[position:position+len(checkString)]
if encoding:
checkString = encoding(checkString.lower())
efficiency = fuzz.partial_ratio(reflected, checkString)
if reflected[:-2] == ('\\%s' % checkString.replace('st4r7s', '').replace('3nd', '')):
efficiency = 90
allEfficiencies.append(efficiency)
efficiencies.append(max(allEfficiencies))
else:
efficiencies.append(0)
num += 1
return list(filter(None, efficiencies))
示例14: checky
# 需要导入模块: from core import requester [as 别名]
# 或者: from core.requester import requester [as 别名]
def checky(param, paraNames, url, headers, GET, delay, timeout):
if param not in paraNames:
logger.debug('Checking param: {}'.format(param))
response = requester(url, {param: xsschecker},
headers, GET, delay, timeout).text
if '\'%s\'' % xsschecker in response or '"%s"' % xsschecker in response or ' %s ' % xsschecker in response:
paraNames[param] = ''
logger.good('Valid parameter found: %s%s', green, param)
示例15: wafDetector
# 需要导入模块: from core import requester [as 别名]
# 或者: from core.requester import requester [as 别名]
def wafDetector(url, params, headers, GET, delay, timeout):
with open(sys.path[0] + '/db/wafSignatures.json', 'r') as file:
wafSignatures = json.load(file)
# a payload which is noisy enough to provoke the WAF
noise = '<script>alert("XSS")</script>'
params['xss'] = noise
# Opens the noise injected payload
response = requester(url, params, headers, GET, delay, timeout)
page = response.text
code = str(response.status_code)
headers = str(response.headers)
logger.debug('Waf Detector code: {}'.format(code))
logger.debug_json('Waf Detector headers:', response.headers)
if int(code) >= 400:
bestMatch = [0, None]
for wafName, wafSignature in wafSignatures.items():
score = 0
pageSign = wafSignature['page']
codeSign = wafSignature['code']
headersSign = wafSignature['headers']
if pageSign:
if re.search(pageSign, page, re.I):
score += 1
if codeSign:
if re.search(codeSign, code, re.I):
score += 0.5 # increase the overall score by a smaller amount because http codes aren't strong indicators
if headersSign:
if re.search(headersSign, headers, re.I):
score += 1
# if the overall score of the waf is higher than the previous one
if score > bestMatch[0]:
del bestMatch[:] # delete the previous one
bestMatch.extend([score, wafName]) # and add this one
if bestMatch[0] != 0:
return bestMatch[1]
else:
return None
else:
return None