当前位置: 首页>>代码示例>>Python>>正文


Python google.search方法代码示例

本文整理汇总了Python中google.search方法的典型用法代码示例。如果您正苦于以下问题:Python google.search方法的具体用法?Python google.search怎么用?Python google.search使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在google的用法示例。


在下文中一共展示了google.search方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_candidates

# 需要导入模块: import google [as 别名]
# 或者: from google import search [as 别名]
def get_candidates(qatp):

    wk = wiki.Wiki(prm.pages_path)
    titles_pos = wk.get_titles_pos()

    candidates = []
    n = 0
    for q,a,t,p in qatp:
        if n % 100 == 0:
            print 'finding candidates sample', n
        n+=1

        c = []

        for page in google.search(q.lower() + ' site:wikipedia.org', num=prm.max_candidates,stop=prm.max_candidates, pause=45):
            title = page.replace('https://en.wikipedia.org/wiki/','').replace('_',' ').lower()
            if title in titles_pos:
                c.append(titles_pos(title))

        candidates.append(c)
        
    return candidates 
开发者ID:nyu-dl,项目名称:dl4ir-webnav,代码行数:24,代码来源:google_search.py

示例2: getposts

# 需要导入模块: import google [as 别名]
# 或者: from google import search [as 别名]
def getposts(web):
    web0 = web
    if "@" in web0:
        web0 = web0.split("@")[1]
    site = str(web0)
    def clear_cookie():
        fo = open(".google-cookie", "w")
        fo.close()


    def google_it (dork):
        clear_cookie()
        for title in search(dork, stop=30):
            print(B+' [!] Post Found :> '+C+title)
            time.sleep(0.5)

    try:
        print(C+" [*] Finding Pastebin posts ...\n")
        google_it("site:pastebin.com intext:"+site+"")

    except urllib.error.HTTPError as err:
        if err.code == 503:
            print(R+' [-] Captcha appeared...\n')
            pass 
开发者ID:VainlyStrain,项目名称:Vaile,代码行数:26,代码来源:pastebin.py

示例3: googleSearch

# 需要导入模块: import google [as 别名]
# 或者: from google import search [as 别名]
def googleSearch():

    try:
        time.sleep(0.4)
        #print(R+'\n   ===========================')
        print(R+'\n    G O O G L E   S E A R C H')
        print(R+'   ––·‹›·––·‹›·––·‹›·––·‹›·––·\n')
                    
        lol = input(O+ " [§] QUERY :> " + color.END)
        time.sleep(0.8)
        m = input(C+' [§] Search limit (not recommended above 30) :> ')
        print(C+ " [!] Below are the list of websites with info on '" +lol+ "'")
        x = search(lol, tld='com', lang='es', stop=int(m))
        for url in x:
            print(O+"   [!] Site Found :>"+C+color.TR3+C+G + url+C+color.TR2+C)
            q = open('.google-cookie','w')
            q.close()
    except urllib.error.HTTPError:
        print(R+' [-] You have used google many times.')
        print(R+' [-] Service temporarily unavailable.') 
开发者ID:VainlyStrain,项目名称:Vaile,代码行数:22,代码来源:googleSearch.py

示例4: is_search

# 需要导入模块: import google [as 别名]
# 或者: from google import search [as 别名]
def is_search(event):
    '''Determine whether it's a search command'''
    command = event["command"]
    if "search" in event["verbs"]:
        return True
    question_words = [
        "what",
        "when",
        "why",
        "how",
        "who",
        "are",
        "is"
    ]
    first_word = command.split(" ")[0].lower()
    log.debug("First word in command is {0}".format(first_word))
    if first_word in question_words:
        return True
    return False 
开发者ID:ironman5366,项目名称:W.I.L.L,代码行数:21,代码来源:search.py

示例5: main

# 需要导入模块: import google [as 别名]
# 或者: from google import search [as 别名]
def main(data):
    '''Start the search'''
    response = {"text": None, "data":{}, "type": "success"}
    query = data["command"]
    log.info("In main search function with query {0}".format(query))
    db = data["db"]
    answer = False
    wolfram_key = tools.load_key("wolfram", db)
    wolfram_response = search_wolfram(query, wolfram_key)
    # If it found an answer answer will be set to that, if not it'll still be false
    answer = wolfram_response
    if answer:
        response["text"] = answer
    else:
        response["text"]=search_google(query)
    return response 
开发者ID:ironman5366,项目名称:W.I.L.L,代码行数:18,代码来源:search.py

示例6: getposts

# 需要导入模块: import google [as 别名]
# 或者: from google import search [as 别名]
def getposts(web):
    web0 = web
    if "@" in web0:
        web0 = web0.split("@")[1]
    site = str(web0)
    def clear_cookie():
        fo = open(".google-cookie", "w")
        fo.close()


    def google_it (dork):
        clear_cookie()
        for title in search(dork, stop=30):
            print(B+' [!] Profile Found :> '+C+title)
            time.sleep(0.5)

    try:
        print(GR+" [*] Finding LinkedIn Employees ...\n")
        google_it("site:linkedin.com employees "+site+"")
        print(O+' [!] Pausing to avoid captcha...'+C)
        time.sleep(10)

        print(GR+' [*] Finding Linkedin company profiles...\n')
        google_it("site:linkedin.com comapany "+site+"")

    except urllib.error.HTTPError as err:
        if err.code == 503:
            print(R+' [-] Captcha appeared...\n')
            pass 
开发者ID:VainlyStrain,项目名称:Vaile,代码行数:31,代码来源:linkedin.py

示例7: setsearch

# 需要导入模块: import google [as 别名]
# 或者: from google import search [as 别名]
def setsearch(self, ctx: Context):
        """Set search settings."""
        if ctx.invoked_subcommand is None:
            await send_cmd_help(ctx) 
开发者ID:smlbiobot,项目名称:SML-Cogs,代码行数:6,代码来源:search.py

示例8: search

# 需要导入模块: import google [as 别名]
# 或者: from google import search [as 别名]
def search(self, ctx: Context):
        """Google."""
        if ctx.invoked_subcommand is None:
            await send_cmd_help(ctx) 
开发者ID:smlbiobot,项目名称:SML-Cogs,代码行数:6,代码来源:search.py

示例9: search_google

# 需要导入模块: import google [as 别名]
# 或者: from google import search [as 别名]
def search_google(
            self, ctx: Context, search_str: str, lang='english', stop=1):
        """Google search and return URL results."""
        out = []
        await self.bot.send_typing(ctx.message.channel)
        for url in google.search(search_str, num=5, stop=stop):
            await self.bot.send_typing(ctx.message.channel)
            async with aiohttp.get(url) as response:
                soup = BeautifulSoup(await response.text(), "html.parser")
                out.append(soup.title.string)
            out.append("<{}>\n".format(url))
            # out.append(gout)
        for page in pagify('\n'.join(out)):
            await self.bot.say(page) 
开发者ID:smlbiobot,项目名称:SML-Cogs,代码行数:16,代码来源:search.py

示例10: search_google_images

# 需要导入模块: import google [as 别名]
# 或者: from google import search [as 别名]
def search_google_images(
            self, ctx: Context, search_str: str, stop=1):
        """Google search images."""
        out = []
        await self.bot.send_typing(ctx.message.channel)
        for url in google.search_images(search_str, num=5, stop=stop):
            await self.bot.send_typing(ctx.message.channel)
            async with aiohttp.get(url) as response:
                soup = BeautifulSoup(await response.text(), "html.parser")
                out.append(soup.title.string)
            out.append("<{}>\n".format(url))
            # out.append(gout)
        for page in pagify('\n'.join(out)):
            await self.bot.say(page) 
开发者ID:smlbiobot,项目名称:SML-Cogs,代码行数:16,代码来源:search.py

示例11: search_imgur

# 需要导入模块: import google [as 别名]
# 或者: from google import search [as 别名]
def search_imgur(self, ctx: Context, *, query: str):
        """Imgur search."""
        search_id = 0

        await self.bot.send_typing(ctx.message.channel)

        try:
            client_id = self.settings["imgur"]["id"]
            client_secret = self.settings["imgur"]["secret"]
        except KeyError:
            await self.bot.say("Please set imgur id and secret.")
            return

        try:
            search_id = self.settings["imgur"]["search_id"]
        except KeyError:
            self.settings["imgur"]["search_id"] = 0

        # count = 0
        client = ImgurClient(client_id, client_secret)
        results = client.gallery_search(query)

        try:
            result = next(islice(results, search_id, None))
            if result.is_album:
                img = client.get_image(result.cover)
            else:
                img = result
            await self.bot.say(str(img.link))
            search_id += 1
        except StopIteration:
            search_id = 0

        self.settings["imgur"]["search_id"] = search_id
        dataIO.save_json(JSON, self.settings) 
开发者ID:smlbiobot,项目名称:SML-Cogs,代码行数:37,代码来源:search.py

示例12: search_links

# 需要导入模块: import google [as 别名]
# 或者: from google import search [as 别名]
def search_links(self):
        """Print the first 30 links from a Web search

        We set the limit of 30 links because this script serve as demonstration and it's
        not intended to be use for personal purpose.
        """
        for url in google.search(self.searchString, num=30, stop=1):
            parsed = urlparse(url)
            self.parsedUrls.append(parsed.scheme + "://" + parsed.netloc) 
开发者ID:Pirate-Crew,项目名称:IPTV,代码行数:11,代码来源:Crawler.py

示例13: run

# 需要导入模块: import google [as 别名]
# 或者: from google import search [as 别名]
def run(self, args, lookup, reportDir):

        self.args = args

        #C58EA28C-18C0-4a97-9AF2-036E93DDAFB3 is string for open OWA attachments, for example
        #init lists

        #iterate the lookup list
        for i, l in enumerate(lookup):
            for d in self.args.dorks:

                #add header to result
                self.google_result.append('[i] Google query for: "%s site:%s"' % (str(d),str(l)))

                #open a file for each domain searched
                googleFile=open(reportDir+l+'/'+l+'_google_dork.txt','w')

                #show user whiat is being searched
                print ('[+] Google query %s for %s site:%s' % (str(i + 1),str(d),str(l)))
                print('[+] Results:')
                
                try:
                    #iterate url results from search of password(for now) and site:current list value
                    for url in search(str(self.args.dorks)+' site:'+str(l), stop = 20):
                        #append results together
                        self.google_result.append(url)

                        #rate limit with 2 second delay
                        time.sleep(2)
                #catch exceptions
                except Exception as e:
                    print ('[!] Error encountered: %s' % e)
                    pass
        #iterate results
        for r in self.google_result:
            #write results on newlines
            googleFile.writelines(r + '\r\n')

        #verbosity flag
        if self.args.verbose is True:
            for r in self.google_result: print (''.join(r))
                
        #return results list
        return self.google_result 
开发者ID:bharshbarger,项目名称:AutOSINT,代码行数:46,代码来源:googledork.py

示例14: search_google

# 需要导入模块: import google [as 别名]
# 或者: from google import search [as 别名]
def search_google(query):
    '''Search google and determine if wikipedia is in it'''
    search_object = google.search(query)
    #Determine if a wikipedia url is in the first 5 searches
    urls = []
    for i in range(0, 4):
        url = search_object.__next__()
        urls.append(url)
        if "wikipedia.org/wiki" in url:
            wikipedia_search = wikipedia.search(query)[0]
            url = wikipedia.page(wikipedia_search).url
            response = wikipedia.summary(wikipedia_search) + " ({0})".format(url)
            return response
    #If there were no wikipedia pages
    first_url = urls[0]
    try:
        article = Article(first_url)
        article.download()
        article.parse()
        article.nlp()
        article_summary = article.summary
        article_title = article.title
        return "{0}\n{1} - ({2})".format(
            article_summary, article_title, first_url
        )

    except Exception as article_exception:
        try:
            log.debug("Got error {0}, {1} while using newspaper, switching to bs4".format(
            article_exception.message,article_exception.args
            ))
            html = requests.get(first_url).text
            #Parse the html using bs4
            soup = BeautifulSoup(html, "html.parser")
            [s.extract() for s in soup(['style', 'script', '[document]', 'head', 'title'])]
            text = soup.getText()
         # break into lines and remove leading and trailing space on each
            lines = (line.strip() for line in text.splitlines())
            # break multi-headlines into a line each
            chunks = (phrase.strip() for line in lines for phrase in line.split("  "))
            # drop blank lines
            soup_text = '\n'.join(chunk for chunk in chunks if " " in chunk)
            response = format(soup_text) + " ({0})".format(first_url)
            return response
        except Exception as search_exception:
            log.info("Error {0},{1} occurred while searching query {2}".format(
                search_exception.message, search_exception.args, query
            ))
            return "Error encountered on query {0}".format(query) 
开发者ID:ironman5366,项目名称:W.I.L.L,代码行数:51,代码来源:search.py


注:本文中的google.search方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。