当前位置: 首页>>代码示例>>Python>>正文


Python Browser.back方法代码示例

本文整理汇总了Python中splinter.Browser.back方法的典型用法代码示例。如果您正苦于以下问题:Python Browser.back方法的具体用法?Python Browser.back怎么用?Python Browser.back使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在splinter.Browser的用法示例。


在下文中一共展示了Browser.back方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: twitterMagic

# 需要导入模块: from splinter import Browser [as 别名]
# 或者: from splinter.Browser import back [as 别名]
def twitterMagic():
    print "Twitter Magic Time!!!"
    browser = Browser("firefox")
    browser.visit("https://twitter.com/signup")
    nameslist = grabNames()
    emaillist = grabEmails()
    passlist = grabPasses()
    userlist = grabUsers()
    # for each name in the list, fill the form in with data from the text files
    # note to self - you have to set variables to loop through and pick the next name after the first is signed up
    # STEPS!!!
    # fill name field
    # fill email
    # fill password
    # uncheck check mark
    # click signup button
    # (NEXT PAGE)
    # fill username?
    # profit
    x = 0
    for x in nameslist:
        browser.fill(nameslist[x], "full-name")
        browser.fill(emaillist[x], "email")
        browser.fill(passlist[x], "password")
        browser.fill(userlist[x], "username")
        browser.uncheck("checkbox")
        browser.find_by_name("Sign Up").first.click()
        browser.back()
        x = x + 1
开发者ID:gh054,项目名称:TwitterCreate,代码行数:31,代码来源:twitter-bot.py

示例2: splinter

# 需要导入模块: from splinter import Browser [as 别名]
# 或者: from splinter.Browser import back [as 别名]
def splinter(url):
    #"""""""""""""""""""""""""MySQL DEF**********************************************
    conn = MySQLdb.connect(host='192.168.1.8',user='root',passwd='123123',db='gwycf')
    cursor = conn.cursor()#create cursor operate db
    #"""""""""""""""""""""""""MySQL DEF**********************************************
    data = xlrd.open_workbook('./chafen.xlsx')
    table = data.sheets()[0]
    nrows = table.nrows 
    ncols = table.ncols
    print nrows
    
    browser = Browser('firefox')
#    browser = Browser('chrome')
    dir(browser)
    browser.visit(url)
    time.sleep(5)
    count = 0
    #<================================================>
    for i in range(nrows):
        #HaoMa = str(table.row_values(i)[1]).split(".")[0]
        name = table.row_values(i)[0]
        HaoMa = table.row_values(i)[1]
#        epost = table.row_values(i)[2]

        browser.find_by_name('TxtName').fill(name)
        browser.find_by_name('TxtHaoMa').fill(HaoMa)
        browser.find_by_id('btnSubmit').click()
	#=================获取页面数据=====================
        epost = browser.find_by_tag('td')[10].value
        ecode = browser.find_by_tag('td')[14].value
        xingce = browser.find_by_tag('td')[16].value
        shenlun = browser.find_by_tag('td')[18].value
        jiafen = browser.find_by_tag('td')[20].value
        zongfen = browser.find_by_tag('td')[22].value
	#=================获取页面数据======================
        query = u"insert into info values('%s','%s','%s','%s','%s','%s','%s','%s',0)" % (name,HaoMa,epost,ecode,xingce,shenlun,jiafen,zongfen)
        print count,query
        cursor.execute(query.encode('utf-8')) #原始数据可以根据gbk运行无错,现在改成utf8
        conn.commit()
        browser.back()
        count = count +1
    cursor.close()
    conn.commit()
    conn.close()
开发者ID:ourzizz,项目名称:gwycf,代码行数:46,代码来源:FetchInfoByBrowser.py

示例3: str

# 需要导入模块: from splinter import Browser [as 别名]
# 或者: from splinter.Browser import back [as 别名]
browser.visit('https://egov.uscis.gov/cris/Dashboard/CaseStatus.do')
receipt_search = '000'
total_num = 0
while True:
    input = browser.find_by_id('receipt')
    button = browser.find_by_id('dashboardForm').find_by_name('submit')
    receipt_pre = 'EAC1490146'
    input.first.fill(receipt_pre + receipt_search)
    button.first.click()
    status = browser.find_by_id('caseStatus').find_by_xpath('//div/div/h4')
    details = browser.find_by_id('caseStatus').find_by_xpath('//div/div/p')
    target = False
    index_end = 3
    date = ""
    for detail in details:
        if 'we received this I765 APPLICATION FOR EMPLOYMENT AUTHORIZATION' in detail.value:
            target = True
            index_end = detail.value.index('we received this I765 APPLICATION FOR EMPLOYMENT AUTHORIZATION')
            date = detail.value[3:index_end-2]
            break
    #time.sleep(60)
    if target and 'Initial Review' in status[0].value:
        print receipt_pre+str(receipt_search)+"    "+date
	total_num = total_num + 1
    receipt_search = str(int(receipt_search) + 1).zfill(3)
    if int(receipt_search) >= 999:
        break
    browser.back()
print 'done'
print str(total_num)
开发者ID:BraveHorseGogogo,项目名称:save_labor,代码行数:32,代码来源:I765_autocheck.py

示例4: SplinterBrowserDriver

# 需要导入模块: from splinter import Browser [as 别名]
# 或者: from splinter.Browser import back [as 别名]

#.........这里部分代码省略.........

    @property
    def page_source(self):
        return self._browser.html

    @property
    def page_title(self):
        return self._browser.title

    def open_url(self, url):
        self._browser.driver.get(url)

    def quit(self):
        return self._browser.quit()

    def is_element_visible(self, element):
        return element.visible

    def get_element_text(self, element):
        return element.text

    def get_element_by_xpath(self, selector):
        return self._browser.find_by_xpath(selector)

    def get_element_by_css(self, selector):
        return self._browser.find_by_css(selector)

    def get_element_by_id(self, selector):
        return self._browser.find_by_id(selector)

    def get_element_by_tag(self, selector):
        return self._browser.find_by_tag(selector)

    @element_action
    def type(self, element, text, slowly=False):
        return element.type(text, slowly)

    @element_action
    def fill(self, element, text):
      return element.fill(text)

    @element_action
    def clear(self, element):
      self.fill(element, '')

    @element_action
    def click(self, element):
        return element.click()

    @element_action
    def check(self, element):
        return element.check()

    @element_action
    def uncheck(self, element):
        return element.uncheck()

    @element_action
    def mouse_over(self, element):
        return element.mouse_over()

    @element_action
    def mouse_out(self, element):
        return element.mouse_out()

    def reload(self):
        return self._browser.reload()

    def go_back(self):
        return self._browser.back()

    def go_forward(self):
        return self._browser.forward()

    def execute_script(self, script):
        return self._browser.evaluate_script(script)

    def get_iframe(self, iframe_id):
        return self._browser.get_iframe(iframe_id)

    def get_alert(self):
        return self._browser.get_alert()

    def attach_file(self, input_name, file_path):
        return self._browser.attach_file(input_name, file_path)

    def wait_pageload(self, timeout=30):
        wait_interval = 0.05
        elapsed = 0

        while self.execute_script('document.readyState') != 'complete':
            self.wait(wait_interval)
            elapsed += wait_interval

            if elapsed > timeout:
                raise PageNotLoadedException

    def click_and_wait(self, element, timeout=30):
        self.click(element)
        self.wait_pageload(timeout)
开发者ID:leogamas,项目名称:pyfunct,代码行数:104,代码来源:splinter_driver.py

示例5: DownPatent

# 需要导入模块: from splinter import Browser [as 别名]
# 或者: from splinter.Browser import back [as 别名]
class DownPatent(object):
    def __init__(self, db, down_url):
        self.db = db
        self.down_url = down_url
        self.browser = Browser("phantomjs", wait_time=10)
        #self.browser = Browser()

    #下载专利
    def download(self, patentno):
        #访问网页
        #网页加载超时
        #down_flag, 0:未下载,1:不存在,2:下载失败
        download_link = ""
        down_flag = 0
        if True:
            print "打开网页"
            self.browser.visit(self.down_url)
            if not self.browser.is_element_not_present_by_value("查询", wait_time=10):
                #填写专利号
                self.browser.fill("cnpatentno", patentno)
                self.browser.find_by_value("查询").first.click()
                print "填写专利号"
                #连接超时,404
                if self.browser:
                    print "打开验证码网页"
                    #一个最多循环20次
                    code_handler = CodeHandler()
                    #填写验证码
                    list_fill_text = []
                    #验证码路径
                    list_code_path = []
                    #验证码分割标志
                    list_split_flag = []
                    #验证码识别标志
                    list_reg_flag = []
                    for code_num in xrange(20):
                        print code_num
                        #查找验证码
                        if not self.browser.is_element_not_present_by_id("getcode", wait_time=5):
                            print "查找验证码"
                            #截图
                            #self.browser.driver.maximize_window()
                            self.browser.driver.save_screenshot("screenshot.png")
                            #获取验证码图片
                            image = Image.open("screenshot.png")
                            image_location = self.find_location(image)
                            image_code = image.crop((image_location[0], image_location[1], image_location[0]+52, image_location[1]+21))
                            save_path = "static/images/onlinecode/" + time.ctime() + ".png"
                            save_path_temp = "../%s" % save_path
                            image_code.save(save_path_temp)
                            list_code_path.append(save_path)

                            #分割图片
                            list_split_image = self.deal_split(code_handler, image_code)
                            
                            #识别,如果能正确识别,则识别,不能,则重新获取验证码      
                            if len(list_split_image) == 4:
                                print "正确分割"
                                list_split_flag.append(1)
                                reg_plain_text = self.reg_code(list_split_image)
                                fill_text = "".join(reg_plain_text)
                                list_fill_text.append(fill_text)
                                #填写验证码
                                #hand_fill_text = raw_input("Enter fill text:")
                                self.browser.fill("ValidCode", fill_text)
                                self.browser.find_by_value("确定").first.click()

                                print self.browser.html.encode("utf-8").find("验证码输入错误") 
                                if self.browser.html.encode("utf-8").find("验证码输入错误") == -1:
                                    list_reg_flag.append(1)
                                    if self.browser.html.encode("utf-8").find("没有找到该专利") == -1:
                                        down_link_one = self.browser.find_link_by_text("申请公开说明书图形下载(标准版)")
                                        down_link_two = self.browser.find_link_by_text("申请公开说明书图形下载(极速版)")
                                        if down_link_one or down_link_two:
                                            print "查找说明书图形下载链接"
                                            list_reg_flag.append(1)
                                            if down_link_one:
                                                self.browser.click_link_by_text("申请公开说明书图形下载(标准版)")
                                            else:
                                                self.browser.click_link_by_text("申请公开说明书图形下载(极速版)")
                                            
                                            print "查找下载链接"
                                            #查找下载链接
                                            download_a = self.browser.find_link_by_text("下载专利")
                                            if download_a:
                                                download_link = download_a["href"]
                                            
                                                #找到下载链接
                                                down_flag = 3
                                                break
                                            else:
                                                print "下载失败"
                                                #下载失败
                                                down_flag = 2
                                                break
                                        '''
                                        else:
                                            print "识别正确,未找到链接"
                                            list_reg_flag.append(0)
                                            self.browser.back()
#.........这里部分代码省略.........
开发者ID:happyin3,项目名称:captchaimgs,代码行数:103,代码来源:patentclass.py

示例6: scrape

# 需要导入模块: from splinter import Browser [as 别名]
# 或者: from splinter.Browser import back [as 别名]
def scrape():
    # Create dictionary to return
    return_dict = {}

    # Create initial browser object
    executable_path = {'executable_path': '/Users/joshchung/Bootcamp/chromedriver'}
    browser = Browser('chrome', **executable_path, headless=False)

    # Scrape NASA Mars news
    url = 'https://mars.nasa.gov/news/'
    browser.visit(url)
    html = browser.html
    soup = bs(html, 'lxml')
    results = soup.find('li', class_="slide")
    article_date = results.find('div', class_="list_date").text
    article_title = results.find('div', class_="content_title").text
    article_teaser = results.find('div', class_="article_teaser_body").text
    return_dict.update({'article_date':article_date,
                        'article_title':article_title,
                        'article_teaser':article_teaser})

    # Scrape JPL image
    url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
    browser.visit(url)
    html = browser.html
    soup = bs(html, 'lxml')
    results = soup.find_all('article', class_="carousel_item")
    url_string = results[0].get('style')
    url_string = url_string.split("url('")
    url_string = url_string[1].split("');")
    url_string = url_string[0]
    img_url = 'https://www.jpl.nasa.gov' + url_string
    return_dict.update({'img_url':img_url})

    # Scrape Twitter
    url = 'https://twitter.com/marswxreport'
    browser.visit(url)
    html = browser.html
    soup = bs(html, 'lxml')
    last_tweet = soup.find('p', class_="tweet-text").text
    last_tweet = last_tweet.replace('\n', ' ')
    return_dict.update({'last_tweet':last_tweet})

    # Scrape Mars facts
    url = 'https://space-facts.com/mars/'
    tables = pd.read_html(url)
    mars_df = tables[0]
    mars_df.columns = ['Statistic','Values']
    mars_df = mars_df.set_index('Statistic')
    mars_table = mars_df.to_html()
    mars_table = mars_table.replace('\n', '')
    return_dict.update({'mars_table':mars_table})

    # Scrape Mars hemisphere images
    url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
    mars_urls = {}
    for x in range(0,4):
        browser.visit(url)
        links = browser.find_by_tag('h3')
        links[x].click()
        html = browser.html
        soup = bs(html, 'lxml')
        downloads = soup.find('div', class_="downloads")
        dl_links = downloads.find_all('a')
        img_link = dl_links[0].get('href')
        dld_link = dl_links[1].get('href')
        title = soup.find('h2', class_="title").text
        mars_urls.update({
            f"marsimg_{x}" : img_link,
            f"marstitle_{x}": title,
            f"marsdld_{x}": dld_link
        })
        browser.back()
    return_dict.update(mars_urls)

    # Return dictionary when function is run
    return return_dict
开发者ID:jchung243,项目名称:homework-ten-webscrape,代码行数:79,代码来源:scrape_mars.py

示例7: scrape

# 需要导入模块: from splinter import Browser [as 别名]
# 或者: from splinter.Browser import back [as 别名]

#.........这里部分代码省略.........
    # # In[17]:


    tables = pd.read_html(url)
    tables


    # # In[18]:


    df = tables[0]
    df.head()


    # # In[19]:


    df.set_index(0, inplace=True)
    clean_df = df
    clean_df


    # # In[20]:


    html_table = clean_df.to_html()
    html_table


    # # In[21]:


    html_table.replace('\n', '')


    # # In[22]:


    df.to_html('mars_table.html')


    # # In[23]:


    # #Visit the USGS Astrogeology site here to obtain high resolution images for each of Mar's hemispheres.
    # #You will need to click each of the links to the hemispheres in order to find the image url to the full resolution image.
    # #Save both the image url string for the full resolution hemipshere image, and the Hemisphere title containing the hemisphere name. Use a Python dictionary to store the data using the keys img_url and title.
    # #Append the dictionary with the image url string and the hemisphere title to a list. This list will contain one dictionary for each hemisphere.
    executable_path = {'executable_path': 'chromedriver.exe'}
    browser = Browser('chrome', **executable_path, headless=False)


    # # In[24]:


    # #opening browser
    url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
    browser.visit(url)


    # # In[25]:


    # #clicking into Cerberbus Hemisphere Enhanced page
    # #this needs to be modified to click into new hyperlink each time (store hyperlinks in a list to access?)
    hemisphere_info = []
    hyperlinks = ['Cerberus Hemisphere Enhanced', 'Schiaparelli Hemisphere Enhanced', 'Syrtis Major Hemisphere Enhanced', 'Valles Marineris Hemisphere Enhanced']

    for hyperlink in hyperlinks:
        browser.click_link_by_partial_text(hyperlink)
        html = browser.html
        soup = bs(html, 'html.parser')
        image = soup.find('img', class_='wide-image')
        url = image.get('src')
        image_url = 'https://astrogeology.usgs.gov' + url
        results = soup.find('h2', class_="title").text
        hemisphere_info.append({'title':results, 'img_url': image_url})
        time.sleep(1)
        browser.back()



    # # In[26]:


    # #print(hemisphere_info)


    # # In[ ]:


    browser.quit()
    mars_info = {
        "image_URL": featured_image_url,
        "Mars_weather": mars_tweet,
        "Mars_table": mars_table(),
       # 'mars_facts': 'foo bar baz', 
        "Hemisphere_info": hemisphere_info
    }
    return mars_info
开发者ID:sach7x,项目名称:Sach,代码行数:104,代码来源:scrape_mars.py

示例8: SurfThread

# 需要导入模块: from splinter import Browser [as 别名]
# 或者: from splinter.Browser import back [as 别名]
class SurfThread(threading.Thread):

   
    def __init__(self, hoehe, breite, _format):
        threading.Thread.__init__(self) 
        self.seiten = []
        self.words = []
        self.toWait = None
        self.elemNo = None
        self.wordNo = None
        self.clickNo = None
        self.clickX = None
        self.clickY = None
        self.back = None
        self.changeTabs = None
        self.__browser = Browser("firefox", profile=constants.profile)
        time.sleep(5)
        #self.__maximizeWindow()
        #time.sleep(5)        
        SurfThread.timer = False
        SurfThread.hoehe = hoehe
        SurfThread.breite = breite 
        SurfThread._format = _format


    def __readData(self):
        # read homepages to visit 
        surfListe = open("/home/steffi/Dokumente/surfListe.txt", "rb")
        for line in surfListe: 
            self.seiten.append(line)
        surfListe.close()
        # read words for search in google, wikipedia, amazon, youtube
        keyWords = open("/home/steffi/Dokumente/keyWords.txt", "rb").readlines()
        for line in keyWords: 
            self.words.append(line.decode("utf-8"))
        #keyWords.close(), 
    print "data read"
    
    
    def run(self):
        
        self.__readData()    
       
        rand = random.randint(2,5)
        for i in range(0, rand):
            print "noch "+ str(i) +" mal"
	    print "TIMER:" +str(SurfThread.timer)
            if SurfThread.timer == False :
            
                self.__generateRandom()
                    
                print "visit: "+self.seiten[self.elemNo]
                self.__visitHomepage( self.seiten[self.elemNo].strip())
                print "clickNo: "+ str(self.clickNo)
		print "towait = "+ str(self.toWait)
                time.sleep(self.toWait)
                for i in range(self.clickNo):
                    time.sleep(random.randrange(5,10))
                    if i % 2 == 0:
                        self.__generateRandomClick()
                    if i == 2:
                        self.__pageDown()
                        time.sleep(random.randrange(1,5))
                    if i == (self.clickNo-1):
                        self.__pageBottom()
                        time.sleep(random.randrange(2,10))
                    if i%2 == 0 and self.back == 1:
                        self.__goBack()
                        time.sleep(random.randrange(2,10))  

    	path = self.__browser.driver.firefox_profile.profile_dir
    	print path
    	os.remove(constants.profile+'/places.sqlite')
    	shutil.copyfile(path+'/places.sqlite', constants.profile+'/places.sqlite')
        self.__closeWindow()
    	shutil.rmtree(path)
    	#os.rmdir(path)
        print "Firefox beendet"
        
        
    def starte(self):
        self.run()
    
    def __generateRandom(self):
        self.toWait = random.randrange(5,45)
        self.elemNo = random.randrange(0,len(self.seiten))
        self.clickNo = random.randrange(2,7)
        self.back = random.randrange(0,10)
        self.wordNo = random.randrange(0, len(self.words))
    
    def __generateRandomClick(self):
        self.clickX = random.randrange(100,constants.BREITE - 50) #1366
        self.clickY = random.randrange(50,constants.HOEHE-50) #768
        command = "mousemove "+ str(self.clickX) + " "+ str(self.clickY)
        print command
        subprocess.call(["xte", command])
        subprocess.call(["xte", "mouseclick 1"])
      
    def __followLink(self, text, index=0):
        if index == None:
#.........这里部分代码省略.........
开发者ID:mmulazzani,项目名称:alibiFramework,代码行数:103,代码来源:surfThread.py


注:本文中的splinter.Browser.back方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。