本文整理汇总了Python中scrapy.FormRequest.meta["mid_category"]方法的典型用法代码示例。如果您正苦于以下问题:Python FormRequest.meta["mid_category"]方法的具体用法?Python FormRequest.meta["mid_category"]怎么用?Python FormRequest.meta["mid_category"]使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scrapy.FormRequest
的用法示例。
在下文中一共展示了FormRequest.meta["mid_category"]方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _parse_page_research
# 需要导入模块: from scrapy import FormRequest [as 别名]
# 或者: from scrapy.FormRequest import meta["mid_category"] [as 别名]
def _parse_page_research(self, response):
total_pages = int(clean_text(response.xpath(".//*[@class='pages']//a//text()").extract()[-2].strip()))
first_url = response.meta["first_url"]
request = FormRequest(first_url, callback=self._parse_research, dont_filter=True)
request.meta["large_category"] = response.meta["large_category"]
request.meta["mid_category"] = response.meta["mid_category"]
yield request
if total_pages>1:
for i in xrange(1,total_pages):
next_page = first_url[:-5] + '-p' + str(i+1) + '.html'
request = FormRequest(next_page, callback=self._parse_research, dont_filter=True)
request.meta["large_category"] = response.meta["large_category"]
request.meta["mid_category"] = response.meta["mid_category"]
yield request
示例2: parse
# 需要导入模块: from scrapy import FormRequest [as 别名]
# 或者: from scrapy.FormRequest import meta["mid_category"] [as 别名]
def parse(self, response):
url = response.url
if "research" in url:
categories = response.xpath(".//*[@class='catec']")
for i in xrange(len(categories)-1):
large_categories = categories[i].xpath(".//*[@class='fl']")
large_category_name = clean_text(large_categories.xpath(".//text()").extract()[0].strip())
mid_categories = categories[i].xpath(".//span")
for mid_category in mid_categories:
mid_category_name = clean_text(mid_category.xpath(".//text()").extract()[0].strip())
page_url = mid_category.xpath(".//@href").extract()[0]
request = FormRequest(page_url, callback=self._parse_page_research, dont_filter=True)
request.meta["large_category"] = large_category_name
request.meta["mid_category"] = mid_category_name
request.meta["first_url"] = page_url
yield request
elif "free" in url:
large_categories = response.xpath(".//*[@class='tul2']//h2//a")
for i in xrange(len(large_categories)):
large_category_name = clean_text(large_categories[i].xpath(".//text()").extract()[0].strip())
page_url = large_categories[i].xpath("./@href").extract()[0]
request = FormRequest(page_url, callback=self._parse_page_free, dont_filter=True)
request.meta["large_category"] = large_category_name
request.meta["first_url"] = page_url
yield request
示例3: parse_middle_category
# 需要导入模块: from scrapy import FormRequest [as 别名]
# 或者: from scrapy.FormRequest import meta["mid_category"] [as 别名]
def parse_middle_category(self, response):
mid_categories = response.xpath(".//*[@class='report2']//h2//a")
for mid_category in mid_categories:
mid_category_name = clean_text(mid_category.xpath("./text()").extract()[0].strip())
page_url = mid_category.xpath("./@href").extract()[0]
url = urljoin(self.base_url, page_url)
request = FormRequest(url, callback=self._parse_item, dont_filter=True)
request.meta["large_category"] = response.meta["large_category"]
request.meta["mid_category"] = mid_category_name
request.meta["first_url"] = url
yield request
示例4: parse
# 需要导入模块: from scrapy import FormRequest [as 别名]
# 或者: from scrapy.FormRequest import meta["mid_category"] [as 别名]
def parse(self, response):
large_categories = response.xpath(".//*[@class='shopleft_bt']//a")
middle_categories = response.xpath(".//*[@class='shopnav2']")
for i in xrange(len(large_categories)):
large_category_name = clean_text(large_categories[i].xpath("./text()").extract()[0].strip())
middle_category_list = middle_categories[i].xpath(".//*[@class='shopleft_wt']")
for middle_category in middle_category_list:
middle_category_name = clean_text(middle_category.xpath(".//a/text()").extract())
page_url = middle_category.xpath(".//a//@href").extract()[0]
url = urljoin(self.base_url, page_url)
request = FormRequest(url, callback=self._parse_item, dont_filter=True)
request.meta["large_category"] = large_category_name
request.meta["mid_category"] = middle_category_name
yield request
示例5: _parse_item
# 需要导入模块: from scrapy import FormRequest [as 别名]
# 或者: from scrapy.FormRequest import meta["mid_category"] [as 别名]
def _parse_item(self, response):
reports = response.xpath(".//*[@class='info']")
if len(reports)>0:
for report in reports:
item = IndustryReportSpiderItem()
item["industry_large_category"] = response.meta["large_category"]
item["industry_mid_category"] = response.meta["mid_category"]
item["report_name"] = clean_text(report.xpath(".//h3//a/text()").extract()[0].strip())
industry_small_chs_name = parseIndustryName(item["report_name"])
if industry_small_chs_name != None:
item["industry_small_chs_name"] = industry_small_chs_name
page_url = report.xpath(".//@href").extract()[0]
url = urljoin(self.base_url, page_url)
item["report_link"] = url
string =clean_text(report.xpath(" //*[@class='rdate']//span/text()").extract()[0].strip())
temp = self.parseItem(string)
if len(temp)==1:
item["report_revision_time"] = temp[0][0]
item["report_page_count"] = temp[0][1]
item["report_graph_count"] = temp[0][2]
date, date_precision = parse_date(item["report_revision_time"])
item["report_revision_time_standard"] = date.replace(tzinfo=pytz.timezone('Asia/Shanghai'))
item["source_domain"] = self.allowed_domains[0]
item["source_name"] = u"中国投资咨询网"
item["price_free"] = False
yield item
if_nextpage = response.xpath(".//*[@class='zw']")
if len(if_nextpage)>0:
if (if_nextpage.xpath(".//text()").extract()[-1])==u'下一页': #存在翻页
page_url =if_nextpage.xpath(".//@href").extract()[-1]
url = urljoin(self.base_url, page_url)
request = FormRequest(url, callback=self._parse_item, dont_filter=True)
request.meta["large_category"] = response.meta["large_category"]
request.meta["mid_category"] = response.meta["mid_category"]
yield request
示例6: _parse_item
# 需要导入模块: from scrapy import FormRequest [as 别名]
# 或者: from scrapy.FormRequest import meta["mid_category"] [as 别名]
def _parse_item(self, response):
reports = response.xpath(".//*[@class='img_des']/a")
if len(reports)>0:
for report in reports:
item = IndustryReportSpiderItem()
item["industry_large_category"] = response.meta["large_category"]
item["industry_mid_category"] = response.meta["mid_category"]
item["report_name"] = clean_text(report.xpath("./text()").extract()[0].strip())
industry_small_chs_name = parseIndustryName(item["report_name"])
if industry_small_chs_name != None:
item["industry_small_chs_name"] = industry_small_chs_name
page_url = report.xpath(".//@href").extract()[0]
url = urljoin(self.base_url, page_url)
item["report_link"] = url
report_time = self.parseTime(item["report_link"])
if report_time != None:
item["report_revision_time"] = report_time
item["source_domain"] = self.allowed_domains[0]
item["source_name"] = u"欧咨网"
date, date_precision = parse_date(item["report_revision_time"])
item["report_revision_time_standard"] = date.replace(tzinfo=pytz.timezone('Asia/Shanghai'))
item["price_free"] = False
yield item
if len(response.xpath(".//*[@class='page']//@href"))>1: #存在翻页
page_len = clean_text(response.xpath(".//*[@class='page']//*[@class='fl_l']/text()").extract()[0].strip())
nextPageurl = response.xpath(".//*[@class='page']//@href").extract()[-1]
finds = self.pattern_page.findall(page_len)
currentPage = finds[0][0]
totlePage = finds[0][1]
if currentPage != totlePage:
url = urljoin(self.base_url, nextPageurl)
request = FormRequest(url, callback=self._parse_item, dont_filter=True)
request.meta["large_category"] = response.meta["large_category"]
request.meta["mid_category"] = response.meta["mid_category"]
yield request