本文整理汇总了Python中pyvirtualdisplay.Display.close方法的典型用法代码示例。如果您正苦于以下问题:Python Display.close方法的具体用法?Python Display.close怎么用?Python Display.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyvirtualdisplay.Display
的用法示例。
在下文中一共展示了Display.close方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Spider
# 需要导入模块: from pyvirtualdisplay import Display [as 别名]
# 或者: from pyvirtualdisplay.Display import close [as 别名]
class Spider(scrapy.Spider):
name = "mayors"
allowed_domains = ["www.cec.gov.tw"]
start_urls = ["https://www.cec.gov.tw/pc/zh_TW/IDX/indexC.html",]
download_delay = 1
def __init__(self, ad=None, *args, **kwargs):
super(Spider, self).__init__(*args, **kwargs)
self.display = Display(visible=0, size=(800, 600))
self.display.start()
self.driver = webdriver.Chrome("/var/chromedriver/chromedriver")
def spider_closed(self, spider):
self.display.close()
def parse(self, response):
self.driver.get(response.url)
nodes = scrapy.Selector(text=self.driver.page_source).xpath('//a[@target="_top"]')
for node in nodes:
county = node.xpath('text()').extract_first()
print county
yield response.follow(node, callback=self.parse_list, meta={'meta': county})
def parse_list(self, response):
for tr in response.css(u'table.tableT tr.trT'):
d = {}
d['type'] = 'mayors'
d['county'] = response.meta['meta']
d['constituency'] = 0
d['elected'] = tr.xpath('td[1]/text()').extract_first().strip()
d['number'] = int(tr.xpath('td[2]/text()').extract_first())
d['votes'] = int(re.sub('\D', '', tr.xpath('td[5]/text()').extract_first()))
d['votes_percentage'] = tr.xpath('td[6]/text()').extract_first()
yield d
示例2: Xvfb
# 需要导入模块: from pyvirtualdisplay import Display [as 别名]
# 或者: from pyvirtualdisplay.Display import close [as 别名]
class Xvfb(object):
def __init__(self, width=1366, height=768, visible=0):
self.__virtual_display = None
self.width = width
self.height = height
self.visible = visible
def __init_display(self):
if self.__virtual_display is None:
self.__virtual_display = Display(visible=self.visible, size=(self.width, self.height))
self.__virtual_display.start()
def __enter__(self):
self.__init_display()
def __exit__(self, exc_type, exc_val, exc_tb):
self._close_display()
def _close_display(self):
if self.__virtual_display:
try:
self.__virtual_display.close()
except:
pass
self.__virtual_display = None
@staticmethod
def run(func, *args, **kwargs):
runner = Xvfb()
with runner:
return func(*args, **kwargs)
示例3: Spider
# 需要导入模块: from pyvirtualdisplay import Display [as 别名]
# 或者: from pyvirtualdisplay.Display import close [as 别名]
class Spider(scrapy.Spider):
name = "lis_by_ad"
allowed_domains = ["lis.ly.gov.tw"]
start_urls = [
"http://lis.ly.gov.tw/lylgmeetc/lgmeetkm_lgmem",
]
download_delay = 1
def __init__(self, ad=None, *args, **kwargs):
super(Spider, self).__init__(*args, **kwargs)
self.display = Display(visible=0, size=(800, 600))
self.display.start()
self.driver = webdriver.Chrome("/var/chromedriver/chromedriver")
self.ad = ad
def spider_closed(self, spider):
self.display.close()
def parse(self, response):
yield FormRequest.from_response(
response,
formdata={
'_20_8_T': str(self.ad).zfill(2),
'INFO': response.xpath('//input[@name="INFO"]/@value').extract_first()
},
callback=self.parse_max_per_page
)
def parse_max_per_page(self, response):
href = response.xpath('//select[@onchange="instback(this)"]/option[re:test(text(), "^\d+$")]/@value').extract()
yield Request(urljoin(response.url, href[-1]), callback=self.parse_law_bill_list, dont_filter=True)
def parse_law_bill_list(self, response):
self.driver.get(response.url)
while (True):
try:
element = WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located((By.ID, "block30"))
)
except:
continue
sleep(randint(1, 2))
nodes = Selector(text=self.driver.page_source).xpath('//a[@class="link02"]')
for node in nodes[1::2]:
href = node.xpath('@href').extract_first()
yield Request(urljoin(response.url, href), callback=self.parse_law_bill, dont_filter=True)
try:
next_page = self.driver.find_element_by_xpath('//input[@name="_IMG_次頁"]')
next_page.click()
except:
break
self.driver.close()
def parse_law_bill(self, response):
trs = response.xpath('//tr[@class="rectr"]')
item = {tr.xpath('td[1]/nobr/text()').extract_first(): first_or_list(tr.xpath('td[1]/nobr/text()').extract_first(), tr.xpath('td[2]//text()').extract()) for tr in trs}
item.pop(u"關係文書", None) # this one not proper info, parse below
has_motions = response.xpath(u'//img[@src="/lylegis/images/ref4.png"]/parent::a/@href').extract_first()
bill_ref_pdf = response.xpath(u'//img[@src="/lylgmeet/img/view.png"]/parent::a/@href').extract_first()
bill_ref_doc = response.xpath(u'//img[@src="/lylgmeet/img/doc_icon.png"]/parent::a/@href').extract_first()
if bill_ref_pdf:
bill_ref = urljoin(response.url, '/lgcgi/lgmeetimage?%s' % bill_ref_pdf.split('^')[-1])
elif bill_ref_doc:
bill_ref = urljoin(response.url, bill_ref_doc)
else:
bill_ref = ''
item['links'] = {
u'關係文書': bill_ref,
u'審議進度': urljoin(response.url, has_motions) if has_motions else None
}
if has_motions:
yield Request(item['links'][u'審議進度'], callback=self.parse_law_bill_motions, dont_filter=True, meta={'item': item})
else:
item['motions'] = []
yield item
def parse_law_bill_motions(self, response):
item = response.request.meta['item']
motions = []
for node in response.xpath('//tr[@class="onetr0"]/parent::table'):
motion = {}
for tr in node.xpath('.//tr[@class="onetr1"]'):
motion[tr.xpath('td[1]/text()').extract_first()] = first_or_list(tr.xpath('td[2]//text()').extract())
motion.pop(u"影像", None)
motions.append(motion)
item['motions'] = motions
yield item