本文整理汇总了Python中googlesearch.search方法的典型用法代码示例。如果您正苦于以下问题:Python googlesearch.search方法的具体用法?Python googlesearch.search怎么用?Python googlesearch.search使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类googlesearch
的用法示例。
在下文中一共展示了googlesearch.search方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: getposts
# 需要导入模块: import googlesearch [as 别名]
# 或者: from googlesearch import search [as 别名]
def getposts(web):
web0 = web
if "@" in web0:
web0 = web0.split("@")[1]
site = str(web0)
def clear_cookie():
fo = open(".google-cookie", "w")
fo.close()
def google_it (dork):
clear_cookie()
for title in search(dork, stop=30):
print(B+' [!] Post Found :> '+C+title)
time.sleep(0.5)
try:
print(C+" [*] Finding Pastebin posts ...\n")
google_it("site:pastebin.com intext:"+site+"")
except urllib.error.HTTPError as err:
if err.code == 503:
print(R+' [-] Captcha appeared...\n')
pass
示例2: googleSearch
# 需要导入模块: import googlesearch [as 别名]
# 或者: from googlesearch import search [as 别名]
def googleSearch():
try:
time.sleep(0.4)
#print(R+'\n ===========================')
print(R+'\n G O O G L E S E A R C H')
print(R+' ––·‹›·––·‹›·––·‹›·––·‹›·––·\n')
lol = input(O+ " [§] QUERY :> " + color.END)
time.sleep(0.8)
m = input(C+' [§] Search limit (not recommended above 30) :> ')
print(C+ " [!] Below are the list of websites with info on '" +lol+ "'")
x = search(lol, tld='com', lang='es', stop=int(m))
for url in x:
print(O+" [!] Site Found :>"+C+color.TR3+C+G + url+C+color.TR2+C)
q = open('.google-cookie','w')
q.close()
except urllib.error.HTTPError:
print(R+' [-] You have used google many times.')
print(R+' [-] Service temporarily unavailable.')
示例3: d0rkit
# 需要导入模块: import googlesearch [as 别名]
# 或者: from googlesearch import search [as 别名]
def d0rkit (site,dork,filename):
clear_cookie()
out=open(filename,"a")
for title in search(
dork,
tld = 'com', # The top level domain
lang = 'en', # The language
num = 10, # Number of results per page
start = 0, # First result to retrieve
stop = 50, # Last result to retrieve
pause = 2.0, # This is required to bypass google's limiting unfortunately
):
print(title)
out.write(title)
out.write("\n")
out.close()
示例4: statistical_report
# 需要导入模块: import googlesearch [as 别名]
# 或者: from googlesearch import search [as 别名]
def statistical_report(url, hostname):
try:
ip_address = socket.gethostbyname(hostname)
except:
return -1
url_match = re.search(
r'at\.ua|usa\.cc|baltazarpresentes\.com\.br|pe\.hu|esy\.es|hol\.es|sweddy\.com|myjino\.ru|96\.lt|ow\.ly', url)
ip_match = re.search(
'146\.112\.61\.108|213\.174\.157\.151|121\.50\.168\.88|192\.185\.217\.116|78\.46\.211\.158|181\.174\.165\.13|46\.242\.145\.103|121\.50\.168\.40|83\.125\.22\.219|46\.242\.145\.98|'
'107\.151\.148\.44|107\.151\.148\.107|64\.70\.19\.203|199\.184\.144\.27|107\.151\.148\.108|107\.151\.148\.109|119\.28\.52\.61|54\.83\.43\.69|52\.69\.166\.231|216\.58\.192\.225|'
'118\.184\.25\.86|67\.208\.74\.71|23\.253\.126\.58|104\.239\.157\.210|175\.126\.123\.219|141\.8\.224\.221|10\.10\.10\.10|43\.229\.108\.32|103\.232\.215\.140|69\.172\.201\.153|'
'216\.218\.185\.162|54\.225\.104\.146|103\.243\.24\.98|199\.59\.243\.120|31\.170\.160\.61|213\.19\.128\.77|62\.113\.226\.131|208\.100\.26\.234|195\.16\.127\.102|195\.16\.127\.157|'
'34\.196\.13\.28|103\.224\.212\.222|172\.217\.4\.225|54\.72\.9\.51|192\.64\.147\.141|198\.200\.56\.183|23\.253\.164\.103|52\.48\.191\.26|52\.214\.197\.72|87\.98\.255\.18|209\.99\.17\.27|'
'216\.38\.62\.18|104\.130\.124\.96|47\.89\.58\.141|78\.46\.211\.158|54\.86\.225\.156|54\.82\.156\.19|37\.157\.192\.102|204\.11\.56\.48|110\.34\.231\.42',
ip_address)
if url_match:
return -1
elif ip_match:
return -1
else:
return 1
开发者ID:philomathic-guy,项目名称:Malicious-Web-Content-Detection-Using-Machine-Learning,代码行数:23,代码来源:features_extraction.py
示例5: cli
# 需要导入模块: import googlesearch [as 别名]
# 或者: from googlesearch import search [as 别名]
def cli(google,wiki):
browser = mechanize.Browser()
browser.set_handle_robots(False) #Allows everything to be written
cookies = mechanize.CookieJar()
browser.set_cookiejar(cookies)
browser.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.517.41 Safari/534.7')]
browser.set_handle_refresh(False) #Sometimes hangs without this
if(google):
query = raw_input("Enter the topic you want to search about: ")
for link in search(query, tld="co.in", num=10, stop=1, pause=2):
print link
if(wiki):
wiki_topic = raw_input('Enter the topic you want to read about: ')
result = wikipedia.page(title=wiki_topic,auto_suggest=True,redirect=True, preload=False)
wiki_content = result.content
print wiki_content
示例6: main
# 需要导入模块: import googlesearch [as 别名]
# 或者: from googlesearch import search [as 别名]
def main():
if args.domain == None:
# Use sys.argv[0] here so we never have to update this line if we change the script name
print("Please enter the domain you wish to target: "+ sys.argv[0] +" -d target.com")
# -1 exit code error
exit(-1)
else:
site = args.domain
# This is where the results will be stored
results_list = []
# A keyed list of dorks
dorks = {
'site' : 'site:"'+site+'" inurl:"wp-" OR inurl:"plugin" OR inurl:"upload" OR inurl:"download"',
'php' : 'inurl:"?id=" AND filetype:"php"',
'loginPage': "loginpage.txt"
}
# Loop over the dict of dorks
for dork in dorks:
# Verbose output
if args.verbose :
print("Currently Running : " + dorks[dork])
# Perform the google search
search_results = search(dorks[dork], tld='com', lang='en', num=10, start=0, stop=None, pause=2.0)
# Loop over the search results and add them to the results_list variable
for i in search_results:
results_list.append(i)
# Print the results_list
print(results_list)
示例7: having_ip_address
# 需要导入模块: import googlesearch [as 别名]
# 或者: from googlesearch import search [as 别名]
def having_ip_address(url):
ip_address_pattern = ipv4_pattern + "|" + ipv6_pattern
match = re.search(ip_address_pattern, url)
return -1 if match else 1
开发者ID:philomathic-guy,项目名称:Malicious-Web-Content-Detection-Using-Machine-Learning,代码行数:6,代码来源:features_extraction.py
示例8: shortening_service
# 需要导入模块: import googlesearch [as 别名]
# 或者: from googlesearch import search [as 别名]
def shortening_service(url):
match = re.search(shortening_services, url)
return -1 if match else 1
开发者ID:philomathic-guy,项目名称:Malicious-Web-Content-Detection-Using-Machine-Learning,代码行数:5,代码来源:features_extraction.py
示例9: having_at_symbol
# 需要导入模块: import googlesearch [as 别名]
# 或者: from googlesearch import search [as 别名]
def having_at_symbol(url):
match = re.search('@', url)
return -1 if match else 1
开发者ID:philomathic-guy,项目名称:Malicious-Web-Content-Detection-Using-Machine-Learning,代码行数:5,代码来源:features_extraction.py
示例10: double_slash_redirecting
# 需要导入模块: import googlesearch [as 别名]
# 或者: from googlesearch import search [as 别名]
def double_slash_redirecting(url):
# since the position starts from 0, we have given 6 and not 7 which is according to the document.
# It is convenient and easier to just use string search here to search the last occurrence instead of re.
last_double_slash = url.rfind('//')
return -1 if last_double_slash > 6 else 1
开发者ID:philomathic-guy,项目名称:Malicious-Web-Content-Detection-Using-Machine-Learning,代码行数:7,代码来源:features_extraction.py
示例11: prefix_suffix
# 需要导入模块: import googlesearch [as 别名]
# 或者: from googlesearch import search [as 别名]
def prefix_suffix(domain):
match = re.search('-', domain)
return -1 if match else 1
开发者ID:philomathic-guy,项目名称:Malicious-Web-Content-Detection-Using-Machine-Learning,代码行数:5,代码来源:features_extraction.py
示例12: https_token
# 需要导入模块: import googlesearch [as 别名]
# 或者: from googlesearch import search [as 别名]
def https_token(url):
match = re.search(http_https, url)
if match and match.start() == 0:
url = url[match.end():]
match = re.search('http|https', url)
return -1 if match else 1
开发者ID:philomathic-guy,项目名称:Malicious-Web-Content-Detection-Using-Machine-Learning,代码行数:8,代码来源:features_extraction.py
示例13: abnormal_url
# 需要导入模块: import googlesearch [as 别名]
# 或者: from googlesearch import search [as 别名]
def abnormal_url(domain, url):
hostname = domain.name
match = re.search(hostname, url)
return 1 if match else -1
# IFrame Redirection
开发者ID:philomathic-guy,项目名称:Malicious-Web-Content-Detection-Using-Machine-Learning,代码行数:9,代码来源:features_extraction.py
示例14: google_index
# 需要导入模块: import googlesearch [as 别名]
# 或者: from googlesearch import search [as 别名]
def google_index(url):
site = search(url, 5)
return 1 if site else -1
开发者ID:philomathic-guy,项目名称:Malicious-Web-Content-Detection-Using-Machine-Learning,代码行数:5,代码来源:features_extraction.py
示例15: get_hostname_from_url
# 需要导入模块: import googlesearch [as 别名]
# 或者: from googlesearch import search [as 别名]
def get_hostname_from_url(url):
hostname = url
# TODO: Put this pattern in patterns.py as something like - get_hostname_pattern.
pattern = "https://|http://|www.|https://www.|http://www."
pre_pattern_match = re.search(pattern, hostname)
if pre_pattern_match:
hostname = hostname[pre_pattern_match.end():]
post_pattern_match = re.search("/", hostname)
if post_pattern_match:
hostname = hostname[:post_pattern_match.start()]
return hostname
# TODO: Put the DNS and domain code into a function.
开发者ID:philomathic-guy,项目名称:Malicious-Web-Content-Detection-Using-Machine-Learning,代码行数:17,代码来源:features_extraction.py