当前位置: 首页>>代码示例>>Python>>正文


Python Request.meta['fobj']方法代码示例

本文整理汇总了Python中scrapy.http.request.Request.meta['fobj']方法的典型用法代码示例。如果您正苦于以下问题:Python Request.meta['fobj']方法的具体用法?Python Request.meta['fobj']怎么用?Python Request.meta['fobj']使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在scrapy.http.request.Request的用法示例。


在下文中一共展示了Request.meta['fobj']方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: parse

# 需要导入模块: from scrapy.http.request import Request [as 别名]
# 或者: from scrapy.http.request.Request import meta['fobj'] [as 别名]
 def parse(self, response):
     
     dirname = os.getcwd()
     tabs= []
     tab_selector = response.xpath('//div[contains(@id, "SMWrapr")]')
     ### loop for all tabs
     for tab in tab_selector.xpath('.//div[contains(@id, "Tab")]'):
        # tabItem = TabItem()
        
         tabNameSel = tab.xpath('div/span[2]/text()').extract()
                    
         if tabNameSel:
             tabName = tabNameSel[0]
             
             
         os.chdir(dirname)
         if not os.path.exists(currDir+"/"+tabName):
         	os.makedirs(currDir+"/"+tabName)         
         #os.chdir(tabName)
         fobj = open(currDir+"/"+tabName+".txt", 'w')
         cat_selector = tab.xpath('div[2]/div[contains(@class, "SMSubCat")]')
         ### loop for all categories
         for category in cat_selector.xpath('div'): #'.//div[contains(@class, "ht180")]
          #   catItem = CatItem()
             catNameSel = category.xpath('div/a/@title').extract()
             if catNameSel:
                 catName = catNameSel[0]
                 
             subcat_selector = category.xpath('.//ul')
             ### loop for all subcategories
             for subcat in subcat_selector.xpath('.//li'):
                 subcatNameSel = subcat.xpath('.//a/@title').extract()
                 if subcatNameSel:
                     subcatName = subcatNameSel[0]
                 subcatLinkSel = subcat.xpath('.//a/@href').extract()
                 if subcatLinkSel:
                     subcatLink = subcatLinkSel[0]+"?sort=plrty"
                     
                 request = Request(subcatLink,callback=self.parse_subcatpage)
                 request.meta['fobj'] = fobj
                 request.meta['tabName'] = tabName
                 request.meta['catName'] = catName
                 request.meta['subcatName'] = subcatName
                 yield request
         
                 
                     #(response,tabName,catName,subcatName)
               
                 #print subcatLink
                 #print tabName, ":", catName, ":", subcatName
           #  categories.append(catItem)
         #return categories
         #categories = [dict(categories)]
         #tabs.append(tabItem)
     #return tabs
     	
     	os.chdir(dirname)
     	
     fobj.close()
开发者ID:ank-26,项目名称:Ecomm,代码行数:61,代码来源:snapdeal_spider.py

示例2: parse

# 需要导入模块: from scrapy.http.request import Request [as 别名]
# 或者: from scrapy.http.request.Request import meta['fobj'] [as 别名]
    def parse(self, response):

        fobj = open("abus.txt", 'a+')
        link_selector = response.xpath('//div[@class="detrow"]')
        #print link_selector
        temp = link_selector.xpath('.//ul')
        for link in temp.xpath('li/a/@href'):
            #print link.extract()
            url = link.extract().encode('utf-8')
            request =  Request(url,callback=self.parse_link)
            request.meta['fobj'] = fobj
            yield request

        pagenav = response.xpath('//div[@class="pagenav"]')
        nextSel = pagenav.xpath('a[contains(text(),"Next")]')
        if nextSel:
            nextLinkSel = nextSel.xpath('@href').extract()
            
            nextLink = nextLinkSel[0].encode('utf-8')
            print nextLink
            request2 = Request(nextLink,callback=self.parse)
            yield request2
开发者ID:ank-26,项目名称:Ecomm,代码行数:24,代码来源:abus_spider.py


注:本文中的scrapy.http.request.Request.meta['fobj']方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。