当前位置: 首页>>代码示例>>Python>>正文


Python WebDriverWait.find_element_by_link_text方法代码示例

本文整理汇总了Python中selenium.webdriver.support.ui.WebDriverWait.find_element_by_link_text方法的典型用法代码示例。如果您正苦于以下问题:Python WebDriverWait.find_element_by_link_text方法的具体用法?Python WebDriverWait.find_element_by_link_text怎么用?Python WebDriverWait.find_element_by_link_text使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在selenium.webdriver.support.ui.WebDriverWait的用法示例。


在下文中一共展示了WebDriverWait.find_element_by_link_text方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_add_report_to_open_ticket_with_put_on_pending_action

# 需要导入模块: from selenium.webdriver.support.ui import WebDriverWait [as 别名]
# 或者: from selenium.webdriver.support.ui.WebDriverWait import find_element_by_link_text [as 别名]
def test_add_report_to_open_ticket_with_put_on_pending_action(
        browser_o, opened_ticket):
    content = 'foo ' * 10
    action = 'put_on_pending'
    days_after_today = 4
    now = timezone.now()
    browser_o.get(reverse(admin_urlname(Report._meta, 'add')) +
                  '?ticket={}'.format(opened_ticket.id,))
    browser_o.driver.find_element_by_id('id_content').send_keys(content)
    visible_from_req = browser_o.driver.find_element_by_id(
        'id_visible_from_requester')
    if visible_from_req.is_selected():
        visible_from_req.click()
    browser_o.driver.find_element_by_css_selector(
        'input[value="{}"]'.format(action)).click()
    estimated_end_pending_date = WebDriverWait(browser_o.driver, 10).until(
        ec.visibility_of_element_located(
            (By.ID, 'id_estimated_end_pending_date')))
    estimated_end_pending_date.click()
    datepicker = WebDriverWait(browser_o.driver, 10).until(
        ec.visibility_of_element_located(
            (By.CSS_SELECTOR, 'table.ui-datepicker-calendar')))
    datepicker.find_element_by_link_text(
        str((now + timezone.timedelta(days=days_after_today)).day)).click()
    browser_o.driver.find_element_by_name('_save').click()
    browser_o.driver.find_element_by_id("ticket_form")
    report = Report.objects.filter(ticket__id=opened_ticket.id).latest()
    assert report.ticket.id == opened_ticket.id
    assert report.content == content
    assert report.sender.pk == browser_o.user.pk
    assert report.recipient.pk == opened_ticket.requester.pk
    assert report.action_on_ticket == action
    assert report.visible_from_requester is False
    ticket = Ticket.objects.get(id=opened_ticket.id)
    assert ticket.status == Ticket.STATUS.pending
    statuschangelog = ticket.status_changelogs.latest()
    assert statuschangelog.before == Ticket.STATUS.open
    assert statuschangelog.after == Ticket.STATUS.pending
    assert statuschangelog.changer.pk == browser_o.user.pk
    pending_range = ticket.pending_ranges.all().latest()
    expected_estimated_end_date = now + timezone.timedelta(
        days=days_after_today)
    assert pending_range.estimated_end.year == expected_estimated_end_date.year
    assert (pending_range.estimated_end.month ==
            expected_estimated_end_date.month)
    assert pending_range.estimated_end.day == expected_estimated_end_date.day
    assert pending_range.start == statuschangelog.created
开发者ID:TicketHelpdesk,项目名称:TicketHelpdesk,代码行数:49,代码来源:test_operator_stories.py

示例2: clickThruCheckpoints

# 需要导入模块: from selenium.webdriver.support.ui import WebDriverWait [as 别名]
# 或者: from selenium.webdriver.support.ui.WebDriverWait import find_element_by_link_text [as 别名]
try:
    while driver.find_element_by_id('checkpointSubmitButton'):
        clickThruCheckpoints(driver)
except Exception:
    pass

# Scroll until we can scroll no more!
friends_seen = 0
while True:
    page_height = getPageHeight(driver)
    friends_shown = driver.find_elements_by_css_selector('.uiProfileBlockContent a:not(.uiLinkSubtle)')
    for friend in friends_shown[friends_seen:]:
        webdriver.ActionChains(driver).move_to_element(friend).perform()
        time.sleep(1) # Let Facebook load this hovercard, remove the old one.
        try:
            el = WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.CLASS_NAME, 'HovercardFollowButton')))
            btn = el.find_element_by_link_text('Following')
            btn.click()
        except Exception:
            pass # We're not following this person so forget it.
        finally:
            friends_seen += 1
    time.sleep(3) # Give it a few seconds to load.
    new_page_height = getPageHeight(driver)
    if new_page_height == page_height:
        break

# And we're done! :)
driver.quit()
开发者ID:meitar,项目名称:fb-unfollow,代码行数:31,代码来源:unfollow_facebook_users.py

示例3: click_navbar_element

# 需要导入模块: from selenium.webdriver.support.ui import WebDriverWait [as 别名]
# 或者: from selenium.webdriver.support.ui.WebDriverWait import find_element_by_link_text [as 别名]
 def click_navbar_element(self, name, sleep=1):
     target = WebDriverWait(self.selenium, self.default_timeout).until(
         EC.visibility_of_element_located((By.TAG_NAME, 'nav'))
     )
     target.find_element_by_link_text(name).click()
     time.sleep(sleep)
开发者ID:NAVADMC,项目名称:ADSM,代码行数:8,代码来源:test_functional.py

示例4: main

# 需要导入模块: from selenium.webdriver.support.ui import WebDriverWait [as 别名]
# 或者: from selenium.webdriver.support.ui.WebDriverWait import find_element_by_link_text [as 别名]
def main(OutputFileName="ITTCourseList.csv", FileDelimiter=";", GetCoursesFromURL='http://courses.it-tallaght.ie/',DeptListDivID='homeProgrammes', WebPageLoadDelay=10):
    # Function Parameters for IT-Tallaght  OutputFileName="ITTCourseList.csv", FileDelimiter=";", GetCoursesFromURL='http://courses.it-tallaght.ie/',DeptListDivID='homeProgrammes', WebPageLoadDelay=10
    # Function Parameters for IT-Blanch Course: OutputFileName="ITBlanchCourseList.csv", FileDelimiter=";", GetCoursesFromURL='http://courses.itb.ie/',DeptListDivID='homeProgrammesWide', WebPageLoadDelay=10
    Spacer ="\n------File Writer------\n"
    TextContentsFileName ="Text/"
    # Create files to store the output in (w)rite mode and add the header to the FileDelimiter specified in the function parameters 
    MyCSVFile = open(OutputFileName, "wb")    
    CourseList = csv.writer(MyCSVFile, delimiter=FileDelimiter)
    # Write the 1st row to give the column names
    CourseList.writerow(['Dept', 'link', 'CourseName','CourseAward', 'CourseCode', 'CourseDelivery', 'SemesterCount', 'CourseNFQLevel', 'CourseDepartment']) 
    URLToParse = GetCoursesFromURL
    # Open the webpage using 
    WebContent = urllib2.urlopen(URLToParse)
    #Parse the content using soup but strip out non ascii chars first
    soup = BeautifulSoup(WebContent, "html.parser")
    # Open the webpage using selenium
    driver = webdriver.Firefox()
    # Give the page time to load before continuing by waiting 5 seconds
    driver.implicitly_wait(WebPageLoadDelay)  # seconds
    print('Trying to parse ', URLToParse ,' now')
    driver.get(URLToParse)
    subset = driver.find_element_by_id(DeptListDivID)
    # Just get the part of the document that contains the list of department #  xpath //*[(@id = "homeProgrammes")] contains the list of departments but just need the id field here
    print('Finding the DIV Id', DeptListDivID, " on the webpage")
    Depts = soup.find(id=DeptListDivID)
    # print("Print out the nicely formatted Unicode with tags on their own line")
    #print(soup.prettify())
    # print("Print just the part of the doc where the id homeProgrammes was found")
    # print(Depts)
    for links in Depts.findAll('a'): 
            print(links)
            # print("--------SPACER-----------------")
            print('Processing Department ',links.string,' link(s) now')
            # Using selenium find the link to the depts list of courses that matches the link string from beautiful soup and click it
            FollowLink = subset.find_element_by_link_text(links.string)
            FollowLink.click()
            # Try waiting 10 seconds for the element with ID 'ProgrammeListForDepartment' is available 
            try: 
                # Get the Overlay i.e the list of the course in the div ProgrammeListForDepartment (it could also be homeProgrammesWide so check the webpage source and use the appropriate parameter 
                Overlay = WebDriverWait(driver, WebPageLoadDelay).until(EC.presence_of_element_located((By.ID, "ProgrammeListForDepartment")))
                # Get it as a Beautiful soup object too as its easier to read
                SoupOverlay = BeautifulSoup(Overlay.get_attribute('outerHTML'), "html.parser")
                #print(Soup.prettify())
                # close the overlay
                Overlay.find_element_by_link_text("close").click()
            except NoSuchElementException: 
                print(NoSuchElementException.msg())
                # Exit now
                sys.exit(1)
                # loop over the links 
            for courselink in SoupOverlay.findAll('a'): 
                if courselink.get('href') != "":  
                    FullLink = URLToParse + courselink.get('href')
                    # Add them to the file
                    # = [links.string, courselink.get_text(), FullLink];
                    print("--Found these non blank urls--")
                    print("Dept: ", links.string, " link ",FullLink," Course Name", courselink.getText())
                    #Parse the course link itself and its child modules
                    print('Getting the course details and module text for ',courselink.getText()," now")
                    CourseContentsDictionary = ParseCoursePage.main(FullLink, URLToParse)
                    print("Got the following keys", CourseContentsDictionary.keys(), " back from the parsing function")
                    #Use the Coursecode as the unique filename
                    TextContentsFileName = CourseContentsDictionary['CourseCode']
                    #Get the non-unicode value so u'CourseCode' don't corrupt the html when its printed to file
                    TextContentsFileName = str(TextContentsFileName.strip())
                    #Create a file with utf-8 encoding
                    MyHTMLFile = codecs.open(TextContentsFileName+".html", "w",encoding='utf-8')
                    HeaderText = "<h1> Course Outcomes for "+ TextContentsFileName +"</h1>"
                    MyHTMLFile.write(HeaderText)
                    #Add html div tags to the CourseOutcomes text and include an ID value for equal measure
                    EncasedCourseOutcomes = "<div id=",TextContentsFileName,">",CourseContentsDictionary['CourseOutcomes'],"</div>"
                    MyHTMLFile.write(EncasedCourseOutcomes.__str__()) 
                    MyHTMLFile.write("<h1> Module Content </h1>")
                    MyHTMLFile.write(CourseContentsDictionary['CourseModuleText']) 
                    print("Writing the Module contents for ",TextContentsFileName," to file")
                    # Write the results to the file after calling the ParseCoursePage function to pull the data from that page and the module pages linked to it
                    print('Writing ', courselink.getText(), 'to file','TextContentsFile')
                    #CourseList. Row Structure (['Dept', 'link', 'CourseName','CourseAward', 'CourseCode', 'CourseDelivery', 'SemesterCount', 'CourseNFQLevel', 'CourseDepartment', 'CourseOutcomes', 'CourseModuleText']) 
                    CourseList.writerow([links.string, FullLink, courselink.getText(),CourseContentsDictionary['CourseAward'] ,CourseContentsDictionary['CourseCode'],CourseContentsDictionary['CourseDelivery'], CourseContentsDictionary['SemesterCount'] ,CourseContentsDictionary['CourseNFQLevel'] ,CourseContentsDictionary['CourseDepartment']])
                    MyCSVFile.flush()       
    # Close the csv file
    print('File', MyCSVFile.name ,' closed')
    MyCSVFile.close
    MyHTMLFile.close()
    driver.close()
    print('External Web browser closed')
    # Exit successfully
    sys.exit(0)
开发者ID:mobcdi,项目名称:Scrape,代码行数:90,代码来源:CourseListToCSV.py


注:本文中的selenium.webdriver.support.ui.WebDriverWait.find_element_by_link_text方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。