当前位置: 首页>>代码示例>>Python>>正文


Python link_finder.LinkFinder类代码示例

本文整理汇总了Python中link_finder.LinkFinder的典型用法代码示例。如果您正苦于以下问题:Python LinkFinder类的具体用法?Python LinkFinder怎么用?Python LinkFinder使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了LinkFinder类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: gather_links

 def gather_links(page_url):
     try:
         finder = LinkFinder(Spider.base_url, page_url)
         finder.feed(CustomConnection.URL(page_url))
     except:
         return set()
     return finder.page_links()
开发者ID:tutu86,项目名称:Spider,代码行数:7,代码来源:spider.py

示例2: gather_links

    def gather_links(page_url):
        html_string = ''
        try:
            print("urlopen("+page_url+Spider.suffix+")")
            response = urlopen(page_url+Spider.suffix)
            #if response.getheader('Content-Type') == 'text/html':
            html_bytes = response.read()
            html_string = html_bytes.decode("utf-8")
            print('page_url = '+page_url)
            urlElems = page_url.split('/')
            fileName = Spider.project_name +'/'+urlElems[-1]+'.html'
            print("save to "+fileName)
            with open(fileName, 'w') as f:
                f.write(html_string)
            #else:
            #    print('Failed to get Content-Type')
            finder = LinkFinder(Spider.base_url, page_url, Spider.ahref_class)
            finder.feed(html_string)

            converter = HTMLToTXTConverter()
            converter.feed(html_string)
            fileName = Spider.project_name +'/'+urlElems[-1]+'.txt'
            print("save to "+fileName)
            with open(fileName, 'w') as f:
                f.write(converter.getText())

        except:
            e = sys.exc_info()[0]
            print(e)
            print('Error: can not crawl page')
            return set()
        return finder.page_links()
开发者ID:hbdhj,项目名称:python,代码行数:32,代码来源:spider.py

示例3: gather_links

    def gather_links(page_url):
        html_string = ""
        try:
            response = urlopen(page_url)

            if "text/html" in response.getheader("content-Type"):
                zipped_html_bytes = response.read()
                if Spider.html_gzipped:
                    try:
                        html_bytes = gzip.decompress(zipped_html_bytes)
                    except IOError:
                        Spider.html_gzipped = False
                        html_bytes = zipped_html_bytes
                else:
                    html_bytes = zipped_html_bytes
                try:
                    html_string = html_bytes.decode("utf-8")
                except UnicodeDecodeError:
                    try:
                        html_string = html_bytes.decode("gbk")
                    except Exception as e:
                        print(e)
            finder = LinkFinder(Spider.base_url, page_url)
            finder.feed(html_string)
        except Exception as e:
            print(e)
            print("Error: can not craw page.")
            return set()
        response.close()
        return finder.page_links()
开发者ID:safetychinese,项目名称:link_crawler,代码行数:30,代码来源:spider.py

示例4: gather_links

 def gather_links(page_url):
     try:
         finder = LinkFinder(Spider.base_url, page_url)
         finder.getAllExternalLinks(page_url)
     except:
         print('Error : can not crawl page')
         return set()
     return finder.page_internalLink()
开发者ID:everfree19,项目名称:ProjectLexicon,代码行数:8,代码来源:spider.py

示例5: gather_links

 def gather_links(page_url):
     html_string = ''
     try:
         response = urlopen(page_url)
         if 'text/html' in response.getheader('Content-Type'):
             html_string = response.read().decode('utf-8')
         finder = LinkFinder(Spider.base_url, Spider.page_url)
         finder.feed(html_string)
     except Exception as e:
         print('Error: can not crawl page| ', e)
         return set()
     return finder.page_links()
开发者ID:suqingdong,项目名称:Sources,代码行数:12,代码来源:spider.py

示例6: gather_links

 def gather_links(page_url):
     html_string = ''
     try:
         response = urlopen(page_url)
         if 'text/html' in response.getheader('Content-Type'):
             html_bytes = response.read()
             html_string = html_bytes.decode('utf-8')
         finder = LinkFinder(Spider.base_url, page_url)
         finder.feed(html_string)
     except:
         print("Error : Can't crawl page")
         return set()
     return finder.page_links()
开发者ID:Agham,项目名称:Spidey,代码行数:13,代码来源:spider.py

示例7: gather_links

 def gather_links(page_url):
     html_string = ''
     try:
         response = urlopen(page_url)
         if 'text/html' in response.getheader('Content-Type'):
             html_bytes = response.read()
             html_string = html_bytes.decode("utf-8")
         finder = LinkFinder(Spider.base_url, page_url)
         finder.feed(html_string)
     except Exception as e:
         print(str(e))
         return set()
     return finder.page_links()
开发者ID:deviantdear,项目名称:Python_Webscraper,代码行数:13,代码来源:spider.py

示例8: gather_links

 def gather_links(page_url):
     html_string = ''
     try:
          response = urlopen(page_url)
          if response.getheader('Content-type') == 'text/html; charset=utf-8':
              html_bytes = response.read()
              html_string = html_bytes.decode('utf-8')
          finder = LinkFinder(Spider.base_url, page_url)
          finder.feed(html_string)
     except:
         print('Error: can not crawl page')
         return set()
     return finder.page_links()
开发者ID:parkchul72,项目名称:Crawler,代码行数:13,代码来源:spider.py

示例9: gather_links

 def gather_links(page_url):
     html_string = ''
     try:
         response = requests.get(page_url)
         if 'text/html' in response.headers['Content-Type']:
             html_string = str(response.content)
         finder = LinkFinder(Spider.base_url, page_url)
         finder.feed(html_string)
     except Exception as e:
         print(e)
         print('Error: can not crawl page')
         return set()
     return finder.page_links()
开发者ID:andreisid,项目名称:python,代码行数:13,代码来源:spider.py

示例10: gather_links

 def gather_links(page_url):
     html_string = ''
     #try:
     response = urlopen(page_url)
     #if 'text/html' in response.getheader('Content-Type'):
     html_bytes = response.read()
     html_string = html_bytes
     finder = LinkFinder(Spider.base_url, page_url)
     links = finder.parseAndGetLinks(html_string)
     '''except Exception as e:
         print(str(e))
         return set()'''
     return links
开发者ID:zangree,项目名称:Spider,代码行数:13,代码来源:spider.py

示例11: gather_links

 def gather_links(page__url):
     html_string = ""
     try:
         response = urlopen(page__url)
         if response.getheader("Content-Type") == "text/html":
             html_bytes = response.read()
             html_string = html_bytes.decode("utf-8")
         finder = LinkFinder(Spider.base_url, page__url)
         finder.feed(html_string)
     except:
         print("Error: cannot crawl page")
         return set()
     return finder.page_links()
开发者ID:keegaz,项目名称:Python,代码行数:13,代码来源:spider.py

示例12: gather_link

	def gather_link(page_rul):
		html_string = ''
		try: 
			response =urlopen(page_url)
			if response.getheader('content-type'=='text/html'):
				html_bytes = response.read()
				html_string = html_bytes.decode("utf-8")
			finder = LinkFinder(Spider.base_rul,Spider.page_url)
			finder.feed(html_bytes)
		except:
			print("error")
			return set()

		return finder.page_links
开发者ID:yuqingwang15,项目名称:pythonproblempractices,代码行数:14,代码来源:spider.py

示例13: gather_links

 def gather_links(page_url):
     html_str = ''
     try:
         request = Request(page_url, headers=Spider.headers)
         response = urlopen(request)
         if 'text/html' in response.getheader('Content-Type'):
             html_bytes = response.read()
             html_str = html_bytes.decode('utf-8')
         finder = LinkFinder(Spider.base_url, page_url)
         finder.feed(html_str)
     except:
         print('Cannot access ' + page_url)
         return set()
     return finder.page_links()
开发者ID:macctown,项目名称:Crawler,代码行数:14,代码来源:spider.py

示例14: gather_links

    def gather_links(page_url):
        html_str=''
        try:
            response=urlopen(page_url)

            if 'text/html' in response.info().getheader('Content-Type'):
                html_bytes=response.read()
                html_string=html_bytes.decode("utf-8")
            finder=LinkFinder(Spider.base_url)
            finder.feed(html_string)
            # 返回爬取的url集合
            return finder.get_links();
        except:
            print('Error:can not crawl page.')
            return set()
开发者ID:lixiongjiu,项目名称:Spider2,代码行数:15,代码来源:spider.py

示例15: gather_links

 def gather_links(page_url):
     html_string = ''
     try:
         header = {
             'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 Safari/537.36 SE 2.X MetaSr 1.0'
         }
         response=requests.get(page_url,header)
         header=response.headers['Content-Type']
         if header=='text/html; charset=utf-8':
             html_string=response.text
         finder=LinkFinder(Spider.base_url, page_url)
         finder.feed(html_string)
     except:
         print('Error: can not crawl page')
         return set()
     return finder.page_links()
开发者ID:lq08025107,项目名称:pyspider,代码行数:16,代码来源:spider.py


注:本文中的link_finder.LinkFinder类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。