当前位置: 首页>>代码示例>>Python>>正文


Python Firefox.maximize_window方法代码示例

本文整理汇总了Python中selenium.webdriver.Firefox.maximize_window方法的典型用法代码示例。如果您正苦于以下问题:Python Firefox.maximize_window方法的具体用法?Python Firefox.maximize_window怎么用?Python Firefox.maximize_window使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在selenium.webdriver.Firefox的用法示例。


在下文中一共展示了Firefox.maximize_window方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: temp_main

# 需要导入模块: from selenium.webdriver import Firefox [as 别名]
# 或者: from selenium.webdriver.Firefox import maximize_window [as 别名]
def temp_main():
    urls = map(lambda x: x[1], gen_url_contactinfo())
    urls = map(lambda x: x + "?spm=a2615.7691481.0.0.OCyk7j", urls)
    driver = Firefox()
    driver.maximize_window()
    for url in urls:
        print(url)
        get_parser(url, driver)
开发者ID:yangmingsong,项目名称:python,代码行数:10,代码来源:spider_third_page.py

示例2: FunctionalTests

# 需要导入模块: from selenium.webdriver import Firefox [as 别名]
# 或者: from selenium.webdriver.Firefox import maximize_window [as 别名]
class FunctionalTests(LiveServerTestCase):
    """Base para os testes funcionais."""

    def setUp(self):
        """Inicializa serviços necessários para execução dos testes funcionais."""
        self.driver = Firefox()
        self.driver.maximize_window()
        self.driver.implicitly_wait(5)

    def tearDown(self):
        """Finaliza serviços."""
        self.driver.close()

    def get_live_url(self, url_name):
        """Obtém url_name em relação ao servidor de testes."""
        return '{}{}'.format(self.live_server_url, reverse(url_name))
开发者ID:Igor-Carvalho,项目名称:django-aws-template,代码行数:18,代码来源:histories.py

示例3: browser

# 需要导入模块: from selenium.webdriver import Firefox [as 别名]
# 或者: from selenium.webdriver.Firefox import maximize_window [as 别名]
def browser(request):
    """
    initialize the selenium test case
    """
    profile = FirefoxProfile()
    profile.set_preference("browser.download.folderList", 2)
    profile.set_preference("browser.download.manager.showWhenStarting", False)
    profile.set_preference("browser.download.dir", DOWNLOAD_DIR)
    profile.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/csv")

    b = Firefox(firefox_profile=profile)
    b.implicitly_wait(10)
    b.maximize_window()
    request.addfinalizer(lambda *args: b.quit())
    yield b
    time.sleep(10)
开发者ID:hoelsner,项目名称:product-database,代码行数:18,代码来源:conftest.py

示例4: read_url

# 需要导入模块: from selenium.webdriver import Firefox [as 别名]
# 或者: from selenium.webdriver.Firefox import maximize_window [as 别名]
def read_url(url):
    driver = Firefox(options=options)
    driver.maximize_window()
    driver.get(url)
    time.sleep(4)
    height = driver.execute_script("return document.body.scrollHeight")
    print(height)

    position = 0

    while position < height:
        driver.execute_script(f"window.scrollTo(0, {position});")
        delta = random.randint(50, 500)
        position += delta
        duration = delta // 20
        # print(height, position, delta, duration)
        time.sleep(duration)

    driver.close()
开发者ID:ChillarAnand,项目名称:01,代码行数:21,代码来源:selenium_exp.py

示例5: __init__

# 需要导入模块: from selenium.webdriver import Firefox [as 别名]
# 或者: from selenium.webdriver.Firefox import maximize_window [as 别名]
class BaiDuWaiMaiCrawler:
    def __init__(self):
        self.comment_root_url = "http://waimai.baidu.com/shopui/?qt=shopcomment&shop_id="
        self.comment_root_path = "files/baiduwaimai_comments-%s.json" % datetime.now().strftime("%Y-%m-%d")
        self.browser = Firefox()
        self.ids = defaultdict(list)
        self.crawled_ids = []
        self.crawled_id_filepath = "files/crawled_ids.txt"
        self.get_crawled_ids()

    def __del__(self):
        self.browser.quit()

    def get_crawled_ids(self):
        if exists(self.crawled_id_filepath):
            with open(self.crawled_id_filepath, encoding="utf-8") as f:
                for line in f:
                    self.crawled_ids.append(line.strip())

    def record_crawled_id(self, shop_id):
        with open(self.crawled_id_filepath, mode="a", encoding="utf-8") as f:
            f.write("%s\n" % shop_id)

    @staticmethod
    def get_address_urls_from_file():
        urls = []
        pattern = re.compile("\s+")
        with open("files/baiduwaimai_address_urls.txt") as f:
            for line in f:
                results = pattern.split(line.strip())
                if len(results) >= 2:
                    urls.append(results[0])
        print("从文件内得到所有地址的url")
        return urls

    def get_shop_ids_from_file(self, filepath, encoding="utf-8"):
        pattern = re.compile("\s+")
        with open(filepath, encoding=encoding) as f:
            for line in f:
                results = pattern.split(line.strip())
                if len(results) >= 2:
                    self.ids[results[0]] = results[1].split(",")

    def get_shop_ids_from_net(self):
        address_urls = self.get_address_urls_from_file()
        for index, url in enumerate(address_urls):
            self.shop_urls_at_a_address(url, index)

    def shop_urls_at_a_address(self, url, line_index):
        self.browser.get(url)
        self.browser.maximize_window()
        for i in range(10):
            self.browser.find_element_by_id("baiducopy").click()
            time.sleep(2)
        page_source = self.browser.page_source
        # self.browser.close()

        soup = BeautifulSoup(page_source, "html.parser")
        if soup.find("ul", class_="shopcards-list"):
            for li in soup.find("ul", class_="shopcards-list").find_all("li", class_="list-item"):
                key = li.get("class")[2][4:]
                address_id = str(line_index)
                self.ids[key].append(address_id)

    def get_comments_in_one_shop(self, shop_id):
        self.browser.get("%s%s" % (self.comment_root_url, shop_id))
        self.browser.maximize_window()
        while True:
            footer = self.browser.find_element_by_xpath("//div[@class='footer-items']")
            for i in range(2):
                ActionChains(self.browser).move_to_element(footer).perform()
                time.sleep(1)

            page_source = self.browser.page_source
            soup = BeautifulSoup(page_source, "html.parser")
            div = soup.find("section", "comment-list").find("div", "comment-con")
            if div.find("div", class_="no-result") is not None:
                break
            else:
                for a_div in div.find_all("div", class_="list clearfix"):
                    self.get_one_comment(a_div, shop_id)
            try:
                the_next = self.browser.find_element_by_xpath(
                    "//div[@class='pagination']//a[@class='mod-page-item mod-page-item-next']")
                the_next.click()
                time.sleep(2)
            except NoSuchElementException:
                break
        # self.browser.close()
        print("爬完ID为 '", shop_id, "' 的餐厅的评论信息。")
        self.record_crawled_id(shop_id)
        self.crawled_ids.append(shop_id)

    def get_one_comment(self, div, shop_id):
        try:
            comment_info = {"shop_id": shop_id}
            top_sec = div.find("div", class_="top-section").get_text("|", strip=True).split("|")
            comment_info["user_name"] = top_sec[0]  # a_div.find("span", class_="user-name").string.strip()
            comment_info["mark"] = top_sec[1][:-1]
            comment_info["delivery_time"] = top_sec[2]  # a_div.find("span", class_="delivery-time").string.strip()
#.........这里部分代码省略.........
开发者ID:chaoming0625,项目名称:FusionOfMultipleClassifers,代码行数:103,代码来源:cralwer.py

示例6: getTestDates

# 需要导入模块: from selenium.webdriver import Firefox [as 别名]
# 或者: from selenium.webdriver.Firefox import maximize_window [as 别名]
                    # Run the test with json output
                    runner = unittest.TextTestRunner()
                    runner.run(singleTestCase('_test_json_out'))

        elif argv[1] == '--dump-dates':
            testUsers = getTestDates()
            for user, dates in testUsers.items():
                print 'Test dates for user %s:' % user
                print '    ' + ', '.join([str(date) for date in dates])

        elif argv[1] == '--debug':

            # Scratch area where you can put whatever debug code
            # Best used with python's -i option so you can poke around.
            d = Firefox()
            d.maximize_window()
            m = mainMyuwHandler(d, testconfig.testUrl)

            m.setUser('seagrad')
            m.setDate('2013-3-27 3:4:5')
            try:
                m.browseLanding()
            except Exception as e:
                print e
            #el = d.find_element_by_id('SummerRegStatusCard1')
            #print el.is_displayed()
            #print repr(el.text)
            #time.sleep(4)
            a = m.cards
            #
            #e = myuwExpected.getExpectedResults('javerage', '2013-06-10')
开发者ID:mattventura,项目名称:myuw-new-selenium,代码行数:33,代码来源:main.py


注:本文中的selenium.webdriver.Firefox.maximize_window方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。