当前位置: 首页>>代码示例>>Python>>正文


Python Firefox.close方法代码示例

本文整理汇总了Python中selenium.webdriver.Firefox.close方法的典型用法代码示例。如果您正苦于以下问题:Python Firefox.close方法的具体用法?Python Firefox.close怎么用?Python Firefox.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在selenium.webdriver.Firefox的用法示例。


在下文中一共展示了Firefox.close方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: CaptchaPage

# 需要导入模块: from selenium.webdriver import Firefox [as 别名]
# 或者: from selenium.webdriver.Firefox import close [as 别名]
class CaptchaPage():
    def __init__(self):
        print "Captcha Page Initializing"

        parser = ConfigParser.ConfigParser()
        base_path = os.path.join(os.environ['HOME'], '.mozilla/firefox/')
        parser.read(os.path.join(base_path, "profiles.ini"))
        profile_path = os.path.join(base_path, filter(lambda x: x[0].lower() == 'path', parser.items('Profile0'))[0][1])
        try:
            profile = FirefoxProfile(profile_path)
        except OSError:
            raise Exception("You must execute the following command:\nsudo chmod +r -R %s" % profile_path)
        self.driver = Firefox(profile)

        self.driver.get("file://%s/index.html" % os.getcwdu())

    def get_url_sound(self):
        self.driver.find_element_by_xpath('//*[@id="recaptcha_switch_audio"]').click()
        return self.driver.find_element_by_xpath('//*[@id="recaptcha_audio_download"]').get_attribute('href')

    def get_recaptcha_challenge_field(self):
        return self.driver.find_element_by_xpath('//*[@id="recaptcha_challenge_field"]').get_attribute('value')

    def get_captcha_textbox(self):
        print "Getting Captcha Textbox"
        return Textbox(self.driver.find_element_by_xpath('//*[@id="recaptcha_response_field"]'))

    def get_submit_button(self):
        print "Getting Submit Form Button"
        return Button(self.driver.find_element_by_xpath("/html/body/form/input"))

    def close(self):
        print "Closing Captcha Page"
        self.driver.close()
开发者ID:nuxlic,项目名称:flask-restful-recaptcha,代码行数:36,代码来源:robot.py

示例2: main

# 需要导入模块: from selenium.webdriver import Firefox [as 别名]
# 或者: from selenium.webdriver.Firefox import close [as 别名]
def main(argv=sys.argv[1:]):
    parser = argparse.ArgumentParser()
    parser.add_argument('--url', default='http://127.0.0.1:8000/static/index.html')
    args = parser.parse_args(argv)

    url = args.url

    browser = WebDriver()
    browser.get(url)
    tags = browser.find_elements_by_css_selector('li')
    for tag in tags:
        print(tag.text)
    browser.close()
开发者ID:shimizukawa,项目名称:happy-scraping,代码行数:15,代码来源:can-scrape.py

示例3: FunctionalTests

# 需要导入模块: from selenium.webdriver import Firefox [as 别名]
# 或者: from selenium.webdriver.Firefox import close [as 别名]
class FunctionalTests(LiveServerTestCase):
    """Base para os testes funcionais."""

    def setUp(self):
        """Inicializa serviços necessários para execução dos testes funcionais."""
        self.driver = Firefox()
        self.driver.maximize_window()
        self.driver.implicitly_wait(5)

    def tearDown(self):
        """Finaliza serviços."""
        self.driver.close()

    def get_live_url(self, url_name):
        """Obtém url_name em relação ao servidor de testes."""
        return '{}{}'.format(self.live_server_url, reverse(url_name))
开发者ID:Igor-Carvalho,项目名称:django-aws-template,代码行数:18,代码来源:histories.py

示例4: __init__

# 需要导入模块: from selenium.webdriver import Firefox [as 别名]
# 或者: from selenium.webdriver.Firefox import close [as 别名]
class ContentRetrieverUsingSelenium:
    def __init__(self, timeout):
        self.browser = Firefox()
        self.timeout = timeout
    
    def getContentOfPage(self, url):
        self.browser.get(url)
        
        time.sleep(self.timeout)
        
        page_source = self.browser.page_source
        page_source = page_source.encode('gbk', 'ignore')
        
        return (self.browser.current_url, BeautifulSoup(page_source))
    
    def close(self):
        self.browser.close()
开发者ID:Stromausfall,项目名称:AnkiFlashCardCreationTools,代码行数:19,代码来源:CrawlTools.py

示例5: read_url

# 需要导入模块: from selenium.webdriver import Firefox [as 别名]
# 或者: from selenium.webdriver.Firefox import close [as 别名]
def read_url(url):
    driver = Firefox(options=options)
    driver.maximize_window()
    driver.get(url)
    time.sleep(4)
    height = driver.execute_script("return document.body.scrollHeight")
    print(height)

    position = 0

    while position < height:
        driver.execute_script(f"window.scrollTo(0, {position});")
        delta = random.randint(50, 500)
        position += delta
        duration = delta // 20
        # print(height, position, delta, duration)
        time.sleep(duration)

    driver.close()
开发者ID:ChillarAnand,项目名称:01,代码行数:21,代码来源:selenium_exp.py

示例6: WeixinSelenium

# 需要导入模块: from selenium.webdriver import Firefox [as 别名]
# 或者: from selenium.webdriver.Firefox import close [as 别名]
class WeixinSelenium(Base):
    def __init__(self):
        self.start_page = START_PAGE
        self.end_page = END_PAGE
        self.weixin_url = REFER_FIRST

        self.driver = Firefox()

        self.client = MongoClient(HOST, PORT)
        self.collection = self.client[DB][COLLECTION]
        self.all_uids = self.uids

    def open_weixin_browser(self, word):
        try:
            self.driver.get(self.weixin_url)
            self.driver.set_page_load_timeout(3)

            self.driver.find_element_by_id('upquery').send_keys(word)
            self.driver.find_element_by_class_name('swz').click()
            self.driver.implicitly_wait(3)

            urls_uids = self.extract_urls_uids(word=word)
            Article(urls_uids=urls_uids, word=word).extract()
        except Exception as e:
            storage_word.append([word, 0])
            self.logger.info('Open weixin error: type <{}>, mag <{}>'.format(e.__class__, e))
            self.close_browser()
            return True
        return False

    def get_total_pages_to_word(self):
        pages = []
        page_id_css = 'pagebar_container'

        try:
            e = self.driver.find_element_by_id(page_id_css)
            for _p in e.text.split():
                _p = _p.strip()

                if not _p.isdigit():
                    return pages[-1]
                else:
                    pages.append(int(_p))
            return 1
        except (NoSuchElementException, NoSuchWindowException, TypeError, IndexError):
            pass

    def get_query_words(self):
        query_words = []

        for docs in self.collection.find({}, {'rel': 1, 'conp': 1}).sort([('_id', 1)]):
            w = docs['conp']

            if w not in query_words:
                query_words.append(w)

            for item in docs['rel']:
                if item not in query_words:
                    query_words.append(item)

        self.client.close()
        return query_words

    @property
    def uids(self):
        return {docs['uid'] for docs in in_collection.find({}, {'uid': 1}) if 'uid' in docs}

    def extract_urls_uids(self, word):
        urls_uids = []
        timestamp = [_t.get_attribute('t') for _t in self.driver.find_elements_by_css_selector('div.s-p')]
        urls_tits = [(t.get_attribute('href'), self.trim(t.text))
                     for t in self.driver.find_elements_by_css_selector('h4 a')]

        if len(urls_tits) != len(timestamp):
            return urls_uids

        for index, url_tit in enumerate(urls_tits):
            try:
                uid = self.md5(timestamp[index] + url_tit[1] + word)

                if uid not in self.all_uids:
                    self.all_uids.add(uid)
                    urls_uids.append({'url': url_tit[0], 'uid': uid})
            except (TypeError, IndexError):
                pass
        return urls_uids

    @staticmethod
    def query_index(words, cut_word):
        try:
            index = words.index(cut_word)
            return index
        except ValueError:
            pass
        return 0

    @property
    def is_forbidden(self):
        css_id = 'seccodeForm'

#.........这里部分代码省略.........
开发者ID:xutaoding,项目名称:csf_scraper,代码行数:103,代码来源:wx_selenium.py

示例7: raw_input

# 需要导入模块: from selenium.webdriver import Firefox [as 别名]
# 或者: from selenium.webdriver.Firefox import close [as 别名]
button.click()

# # Identify all states in the list, read as text using Selenium
list_item = browser.find_element_by_class_name('drop-down-list')
states = list_item.text

# sel_st = raw_input('Type in 2 letter st abbreviation: ')
find_st = browser.find_element_by_link_text('CO')
find_st.click()
sleep(1)
list_region = browser.find_element_by_xpath('//*[@id="select-region"]/div[2]') # find and click Choose Region button
list_region.click()
cur_state = browser.find_element_by_xpath('//*[@id="select-region"]/div[3]')

# text_region = cur_state.find_elements_by_tag_name('a') # find all regions by <a> tag
l_regions = cur_state.text

# creates text file with all the region
regions = open('Regions.text', 'w')
regions.write(l_regions)

print l_regions
# Place region points on map
gmaps2.gmaps_mapit(l_regions)


# url = './mymap.html'
# webbrowser.open_new_tab(url)

browser.close()
开发者ID:killyouinhalf,项目名称:Map_Locations,代码行数:32,代码来源:charter_ch_lineup3.py

示例8: RegistrationWebTest

# 需要导入模块: from selenium.webdriver import Firefox [as 别名]
# 或者: from selenium.webdriver.Firefox import close [as 别名]
class RegistrationWebTest(TestCase):
    """
    Test all facets of the registration process
    """

    @classmethod
    def clear_database(cls):
        """
        Clear the database before and after use
        """
        collection = cls.mongo.collection
        for user in ['UnittestExistingTestUser', 'UnittestNonExistingTestUser']:
            test_user = collection.find_one({
                'username': user,
            })
            if test_user:
                collection.remove(test_user)

    @classmethod
    def setUpClass(cls):
        """
        Setup test data, browser and server
        """
        cls.mongo = UserDatabaseConnectivity()
        cls.clear_database()
        test_user = {
            'username': 'UnittestExistingTestUser',
            'salt': '000',
            'password': '000',
            'enabled': False,
        }
        cls.mongo.collection.save(test_user)
        cls.config = dict()
        prepare_test(cls)
        cls.base_url = 'http://{:s}:{:d}/static/index.xhtml'.format(cls.config['bind_ip'], cls.config['bind_port'])

    @classmethod
    def tearDownClass(cls):
        """
        Disconnect from mongo and cleanup browser, server, etc.
        """
        cls.clear_database()
        del cls.mongo
        cleanup(cls)

    def setUp(self):
        """
        Force a page refresh between tests
        """
        self.webdriver = Firefox()
        self.webdriver.implicitly_wait(10)

    def tearDown(self):
        """
        Throw test user out of database
        """
        self.webdriver.close()
        self.webdriver.quit()
        collection = self.mongo.collection
        test_user = collection.find_one({
            'username': 'UnittestNonExistingTestUser',
        })
        if test_user:
            collection.remove(test_user)

    def __util_get_reg_button(self):
        """
        Get the registration form button
        """
        self.webdriver.get(self.base_url)
        self.webdriver.implicitly_wait(10)
        sleep(3)
        button = self.webdriver.find_element_by_xpath('//xhtml:button[@data-formaction="registrationForm"]')
        return button

    def __util_open_dialog(self):
        """
        Open the registration dialog
        """
        button = self.__util_get_reg_button()
        button.click()
        self.webdriver.implicitly_wait(10)
        sleep(5)

    def test_find_button(self):
        """
        Is the button there?
        """
        self.assertIsNotNone(self.__util_get_reg_button())

    def test_open_dialog(self):
        """
        Can we open the dialog?
        """
        dialog_xpath = '//xhtml:div[contains(@class, "bootstrap-dialog")]'
        # Test that there is no dialog open at the moment
        self.assertRaises(NoSuchElementException, self.webdriver.find_element_by_xpath, dialog_xpath)
        self.__util_open_dialog()
        dialog = self.webdriver.find_element_by_xpath(dialog_xpath)
        self.assertIsNotNone(dialog)
#.........这里部分代码省略.........
开发者ID:timetraq,项目名称:tt-server,代码行数:103,代码来源:test_registration_web.py

示例9: len

# 需要导入模块: from selenium.webdriver import Firefox [as 别名]
# 或者: from selenium.webdriver.Firefox import close [as 别名]
            # Which table we  are processing
            title = table.find_previous_sibling('h3')
            if title:
                title = title.string
            else:
                title = table.parent.find_previous_sibling('h3')
                if title:
                    title = title.string
                else:
                    title = 'No inmediate name'

            df['region'] = pd.Series(data=[current_region] * len(df.index))
            df['description'] = pd.Series(data=[title] * len(df.index))

            # Decide in which list to put the extracted table
            if 'SLES Premium' in df.columns:
                suse_list.append(df)
            else:
                pricing_list.append(df)

            print('{0}: {1}'.format(title, df.shape))

    if not os.path.exists('./azure/data'):
        os.mkdir('./azure/data')

    save_df_list(suse_list, './azure/data/azure_pricing_vm_suse.csv')
    save_df_list(pricing_list, './azure/data/azure_pricing_vm_common.csv')

    driver.close()
开发者ID:dominoFire,项目名称:sweeper-extractor,代码行数:31,代码来源:pricing_vm_parser.py

示例10: Firefox

# 需要导入模块: from selenium.webdriver import Firefox [as 别名]
# 或者: from selenium.webdriver.Firefox import close [as 别名]
#!/usr/bin/python3
import requests
from bs4 import BeautifulSoup
from selenium.webdriver import Firefox

b = Firefox()
b.get("https://service.cloud.teu.ac.jp/moodle/course/view.php?id=7661")

soup = BeautifulSoup(b.page_source, "html.parser")
print(soup.find_all("a"))
b.close()
开发者ID:ctare,项目名称:kogi,代码行数:13,代码来源:main.py

示例11: __init__

# 需要导入模块: from selenium.webdriver import Firefox [as 别名]
# 或者: from selenium.webdriver.Firefox import close [as 别名]
class Scraper:
    """ A Simple Scraper Example using Selenium """

    def __init__(self, base_url, query_params):
        self.__take_results_backup()
        options = Options()
        options.add_argument("--headless")
        try:
            self.driver=Chrome(options=options)
        except Exception as e:
            print(f'Error occured during Chrome driver : {e}')
            self.driver=Firefox()
        self.driver.get(base_url + query_params)
        # set up the next page element
        self.nextpage_element=self.driver.find_element_by_css_selector(
                ".pager-next a")


    def __take_results_backup(self):
        if os.path.exists('outfile.csv'):
            stamp=f'outfile{time.asctime().replace(":", "-").replace(" ","_")}'
            shutil.move('outfile.csv', stamp)

    def __save_info(self, lines):
        """
        This method saves the recently collected information line from webpage
        """

        with open('outfile.csv', 'a') as f:
            for line in lines:
                f.write(line)

    def nextpage(self, css_locator):
        self.driver.find_element_by_css_selector(
                css_locator).click()

    def scrape_page(self):
        providers = self.driver.find_elements_by_css_selector(".provider-row")

        for provider in providers:
            try:
                name = provider.find_element_by_css_selector(
                        ".provider-base-info h3 a").text
                email = provider.find_element_by_css_selector(
                        ".provider-link-details .icon-mail+a").get_attribute(
                                'href').replace('mailto:','')
                website = provider.find_element_by_css_selector(
                        ".provider-link-details .website-link a").get_attribute('href')
                location = provider.find_element_by_css_selector(
                        ".provider-info__details div.list-item:nth-of-type(4)").text

                lineitem=f'{name.replace(",","-")},{email},{website},{location.replace(",","-")}'

                # append the results
                self.__save_info(lineitem + "\n")

            except NoSuchElementException:
                # skip information and continue scraping the page
                continue

            except Exception as e:
                # discontinue in case of unknown error
                raise ScrapePageError(f"Error occured during scrape page : {e}")

    def scrape(self):
        # scrape until nextpage function doesn't fail
        while True:
            print(f"scraping the website... ")
            try:
                self.scrape_page()
                self.nextpage(".pager-next a")

            except ScrapePageError as e:
                print(e)
                self.nextpage(".pager-next a")
                continue

            except Exception as e:
                print("Something went wrong: ", e)
                self.driver.close()
                break
开发者ID:stupidnetizen,项目名称:expat,代码行数:83,代码来源:selenium_scraper.py


注:本文中的selenium.webdriver.Firefox.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。