当前位置: 首页>>代码示例>>Python>>正文


Python spiders.CrawlSpider类代码示例

本文整理汇总了Python中scrapy.spiders.CrawlSpider的典型用法代码示例。如果您正苦于以下问题:Python CrawlSpider类的具体用法?Python CrawlSpider怎么用?Python CrawlSpider使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了CrawlSpider类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(self):
        # 汎用データ辞書/リスト
        self.generalData_dict = dict()
        self.generalData_list = list()
        self.setup_hooks() # フックセットアップ
        self.setup_domains() # ドメイン名セットアップ

        # クロールスパイダーを初期化(最後にするのが肝)
        CrawlSpider.__init__(self)
开发者ID:henpin,项目名称:module,代码行数:9,代码来源:basic_crawler.py

示例2: __init__

 def __init__(self):
     self.domain = "www.gsmarena.com"
     self.name = "gsmarena"
     self.custom_settings = {}
     self.allowed_domains = ["www.gsmarena.com"]
     CrawlSpider.__init__(self)
     self.start_urls = ["http://www.gsmarena.com/","http://www.gsmarena.com/makers.php3"]
     self.count = 0
     self.deny = ""
     self.crawl_limt = 0
     self.real_count = 0
开发者ID:karanisverma,项目名称:devicefinder,代码行数:11,代码来源:gsm_sypder.py

示例3: __init__

 def __init__(self):
     self.domain = "www.gsmarena.com"
     self.name = "gsmarena"
     self.custom_settings = {}
     self.allowed_domains = ["www.gsmarena.com"]
     CrawlSpider.__init__(self)
     self.start_urls = ["http://www.gsmarena.com/makers.php3",
                        "http://www.gsmarena.com/acer-phones-59.php",
                        "http://www.gsmarena.com/alcatel-phones-5.php"]
     self.count = 0
     self.deny = ""
     self.crawl_limt = 0
     self.real_count = 0
     self.batch_size = 300
     self.mobile_product = []
开发者ID:karanisverma,项目名称:devicefinder,代码行数:15,代码来源:gsm_sypder.py

示例4: __init__

 def __init__(self, rule, worksheet, logging):
     CrawlSpider.__init__(self)
     # use any browser you wish
     self.browser = webdriver.Firefox()
     self.logging = logging
     self.rule = rule
     self.name = self.rule["ranking_name"]
     self.logging.info("==============================")
     self.logging.info("self.rule[start_urls]: %s" % self.rule["start_urls"])
     self.start_urls = self.rule["start_urls"]
     # slef.next_page is a defined array.
     self.next_page = self.rule["next_page"] \
                         if ("next_page" in self.rule) else ["NONE"]
     self.logging.info("#### self.next_page %s" % self.next_page)
     self.flag = self.rule["flag"] \
                         if ("flag" in self.rule) else ["NONE"]
     self.logging.info("#### self.flag %s" % self.flag)
     self.worksheet = worksheet
     self.logging.info("Finish the __init__ method ... ")
开发者ID:JoySnow,项目名称:GraduationProject_PythonCrawler,代码行数:19,代码来源:timesranking.py

示例5: __init__

    def __init__(self, **kwargs):
        '''
        :param kwargs:
         Read user arguments and initialize variables
        '''
        CrawlSpider.__init__(self)
        self.outDir = kwargs['outDir']
        self.startYear = kwargs['startYear']
        self.endYear = kwargs['endYear']
        print('startYear: ', self.startYear)
        print('self.endYear: ', self.endYear)
        print('self.outDir: ', self.outDir)

        self.headers = ({'User-Agent': 'Mozilla/5.0',
                         'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
                         'X-Requested-With': 'XMLHttpRequest'})
        self.payload = {'username': '[user name for The Globe and Mail]', 'password': '[password for The Globe and Mail]'}
        self.apikey = '[API Key for Gigya]'
        self.categoryID = 'Production'
开发者ID:hanhanwu,项目名称:Hanhan_NLP,代码行数:19,代码来源:my_news_spider.py

示例6: __init__

    def __init__(self):
        CrawlSpider.__init__(self)
        #create database
        try :
            dbfile = '%s/%s' % (conf.PROJECT_PATH['data'], conf.SQLITE['file'])
            if os.path.exists(dbfile):
                moveto = '%s.%d' % (dbfile, int(time.time()))
                shutil.move(dbfile, moveto)
                print 'old db file %s is moved to %s.' % (dbfile, moveto)
            
            conn = sqlite3.connect(dbfile)
            cursor = conn.cursor()
            for table in conf.SQLITE['tables']:
                cursor.execute(table['sql'])

            conn.commit()
            print 'db initialization complete!'
            
        finally:
            conn.close()
开发者ID:Marcnuth,项目名称:Websites-Keywords-Generator,代码行数:20,代码来源:keywords.py

示例7: __init__

    def __init__(self):
        CrawlSpider.__init__(self)
        self.verificationErrors=[]
        # self.selenium=selenium('localhost',4444,"*chrome")

        self.driver=webdriver.Firefox()
开发者ID:swh123456,项目名称:cnkiCrawl,代码行数:6,代码来源:cnkiSpider.py

示例8: __init__

 def __init__(self, *arg, **karg):
     self.name = karg['name']
     self.init_yaml('scrapy_service/templates/product.yaml',self.name)
     CrawlSpider.__init__(self, *arg)
开发者ID:nhat2008,项目名称:vietnam-ecommerce-crawler,代码行数:4,代码来源:product_spider_object_type_html.py

示例9: parse

 def parse(self, response):
     for res in CrawlSpider.parse(self, response):
         yield self.get_request(res.url)
开发者ID:hsh075623201,项目名称:scrapy_tutorial,代码行数:3,代码来源:spider.py

示例10: __init__

 def __init__(self, *args, **kwargs):
     CrawlSpider.__init__(self, *args, **kwargs)
     self.proxy_pool = proxy_list
开发者ID:hsh075623201,项目名称:scrapy_tutorial,代码行数:3,代码来源:spider.py

示例11: _requests_to_follow

 def _requests_to_follow(self, response):
     if getattr(response, "encoding", None) != None:
             return CrawlSpider._requests_to_follow(self, response)
     else:
             return []
开发者ID:alexchung1233,项目名称:HezbollahScrapper,代码行数:5,代码来源:aljazeera_spider.py

示例12: __init__

 def __init__(self, *arg, **karg):
     self.init_yaml('scrapy_service/templates/product.yaml','lazada_sitemap')
     CrawlSpider.__init__(self, *arg)
开发者ID:nhat2008,项目名称:vietnam-ecommerce-crawler,代码行数:3,代码来源:product_spider_object_type_xml.py

示例13: __init__

	def __init__(self):
		CrawlSpider.__init__(self)
		self.verificationErrors = []
		self.selenium = selenium("localhost", 4444, "*chrome", "http://www.domain.com")
		self.selenium.start()
开发者ID:gsduong,项目名称:ScrapyPython,代码行数:5,代码来源:test_selenium.py

示例14: __init__

 def __init__(self, *args, **kwargs):
     CrawlSpider.__init__(self, *args, **kwargs)
开发者ID:clab,项目名称:gv-crawl,代码行数:2,代码来源:nolink-crawler.py

示例15: set_crawler

 def set_crawler(self, crawler):
   print 'call set_crawler'
   CrawlSpider.set_crawler(self, crawler)
开发者ID:cfhb,项目名称:crawl_youtube,代码行数:3,代码来源:video_spider.py


注:本文中的scrapy.spiders.CrawlSpider类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。