本文整理汇总了Python中scrapy.FormRequest.meta["large_category_name"]方法的典型用法代码示例。如果您正苦于以下问题:Python FormRequest.meta["large_category_name"]方法的具体用法?Python FormRequest.meta["large_category_name"]怎么用?Python FormRequest.meta["large_category_name"]使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scrapy.FormRequest
的用法示例。
在下文中一共展示了FormRequest.meta["large_category_name"]方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _parse_list
# 需要导入模块: from scrapy import FormRequest [as 别名]
# 或者: from scrapy.FormRequest import meta["large_category_name"] [as 别名]
def _parse_list(self, response):
report_list = response.xpath("//div[@class=\"reportlist bluelink\"]/ul//a/@href")
for report_url in report_list:
request = FormRequest(urljoin(self.base_url, report_url.extract()), callback=self.parse_item, dont_filter=False)
request.meta["large_category_name"] = response.meta["large_category_name"]
request.meta["mid_category_name"] = response.meta["mid_category_name"]
request.meta["report_type"] = response.meta["report_type"]
yield request
示例2: parse_middle_category
# 需要导入模块: from scrapy import FormRequest [as 别名]
# 或者: from scrapy.FormRequest import meta["large_category_name"] [as 别名]
def parse_middle_category(self, response):
report_types = response.xpath(u"//li[contains(text(),'报告')]")
for report_type in report_types:
mid_category_url = urljoin(self.base_url, report_type.xpath(u"./preceding-sibling::span[1]/a/@href").extract()[0])
request = FormRequest(mid_category_url, callback=self.parse_page, dont_filter=True)
request.meta["large_category_name"] = response.meta["large_category_name"]
request.meta["mid_category_name"] = response.meta["mid_category_name"]
request.meta["report_type"] = clean_text(report_type.xpath("./text()").extract()[0].strip())
request.meta["page_base_url"] = mid_category_url
yield request
示例3: parse
# 需要导入模块: from scrapy import FormRequest [as 别名]
# 或者: from scrapy.FormRequest import meta["large_category_name"] [as 别名]
def parse(self, response):
large_categories = response.xpath("//*[@class='tabContent bluelink']//*[contains(@style, 'padding')]/a")
for large_category in large_categories:
large_category_name = clean_text(large_category.xpath(".//text()").extract()[0].strip())
mid_categorys = large_category.xpath("./parent::*/following-sibling::*[1]/a")
for mid_category in mid_categorys:
mid_category_name = clean_text(mid_category.xpath("./text()").extract()[0])
mid_category_url = urljoin(self.base_url, mid_category.xpath("./@href").extract()[0])
request = FormRequest(mid_category_url, callback=self.parse_middle_category, dont_filter=True)
request.meta["large_category_name"] = large_category_name
request.meta["mid_category_name"] = mid_category_name
yield request
示例4: parse_page
# 需要导入模块: from scrapy import FormRequest [as 别名]
# 或者: from scrapy.FormRequest import meta["large_category_name"] [as 别名]
def parse_page(self, response):
request_list = self._parse_list(response)
for r in request_list:
yield r
next_page = response.xpath(u"//*[@id='AspNetPager1']/a[text()=\"下一页\"]/@href")
if len(next_page) > 0:
next_page_url = urljoin(self.base_url, next_page.extract()[0])
if not next_page_url.startswith(response.meta["page_base_url"]):
if next_page_url.endswith("html"):
next_page_url = response.meta["page_base_url"] + next_page_url[next_page_url.rindex("/") + 1:len(next_page_url)]
request = FormRequest(next_page_url, callback=self.parse_page, dont_filter=True)
request.meta["large_category_name"] = response.meta["large_category_name"]
request.meta["mid_category_name"] = response.meta["mid_category_name"]
request.meta["report_type"] = response.meta["report_type"]
request.meta["page_base_url"] = response.meta["page_base_url"]
yield request