当前位置: 首页>>代码示例>>Python>>正文


Python Browser.back方法代码示例

本文整理汇总了Python中mechanize.Browser.back方法的典型用法代码示例。如果您正苦于以下问题:Python Browser.back方法的具体用法?Python Browser.back怎么用?Python Browser.back使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mechanize.Browser的用法示例。


在下文中一共展示了Browser.back方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: authorize

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import back [as 别名]
def authorize(n):

	number = "0"

	# Permutations of sizes between 1 and n!
	for k in range(1, n):

		# generates permutations of the string

		# add n copies of each number to the the list, allows for 0000 or 1111 (permutations with repeated digits)

		perms = [''.join(p) for p in permutations('0123456789' * n, k)]
		print "Printing permutations for k = " + str(k)

		# create a set to remove any possible duplicates that result from having multiple copies of the same number
		perms = set(perms)

		for permutation in perms:

			br = Browser()
			br.open("<URL_GOES_HERE>")

			# if a page has multiple forms, change the index the index appropriately
			# e.g. the 4th form would have index 3
			br.form = list(br.forms())[0]

			print "Trying permutation: " + permutation

			# copy and paste this line to fill in all the fields
			br.form["<FIELD_NAME>"] = "<VALUE_FOR_FIELD>"

			# the line that guesses at the field
			br.form["<NAME_OF_CODE_FIELD>"] = permutation

			# prints the finished form, can remove to reduce I/O costs
			print br.form

			# submits the form and grabs the html page after the submit
			response = br.submit()
			htmlFile = response.get_data()

			# most websites display a message if the code is not successful, replace the field below with this
			# searches for the error/failure message in the returned html page
			# if it doesn't find it, the permutation worked! otherwise resets the form

			if "<FAILURE_MESSAGE>" not in htmlFile:
				number = perm
				break
			else:
				br.back()
	return number
开发者ID:bsmarimon,项目名称:form_field_guesser,代码行数:53,代码来源:formIntegerGuesser.py

示例2: __init__

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import back [as 别名]
class Lockerz:
    def __init__( self, name, password ):
        self.name = name
        self.password = password
        self.br = Browser()

    def connect( self ):
        self.br.open( "http://www.lockerz.com" )
        self.br.select_form( nr=0 )
        self.br["handle"] = self.name
        self.br["password"] = self.password

        self.br.submit()
        return "Lockerz : My Locker" in self.br.title()

    def answer_all( self, generator, recursive=False ):
        page = self.br.open( "http://www.lockerz.com/dailies" );
        self._answer_all( page, generator )
        # ..
        if recursive:
            i = 0
            while True:
                try:
                    i+=1
                    page = self.br.follow_link( text_regex="< Previous Posts" )
                    print "-- page %d" % i
                    self._answer_all( page, generator )
                except LinkNotFoundError:
                    break

    def answer( self, id, answer ):
        d = urllib.urlencode( { "id": id, "a": answer, "o": None } )
        r = self.br.open( "http://www.lockerz.com/daily/answer", d );
        print r.read()
        self.br.back()

    def getPTZ( self ):
        s = BeautifulSoup( self.br.open( "http://www.lockerz.com" ).read() )
        return s.find( "span", attrs={ "class": "ptz_value" } ).string
 
    def _answer_all( self, page, generator ):
        s = BeautifulSoup( page.read() )
        e = s.findAll( "div", attrs={ "class": re.compile( "dailiesEntry dailyIndex*" ) } )
        for i in e:
            try:
                self.answer( i["id"], generator.getRandomSentence() )
            except KeyError:
                print "Already answered ..."    
开发者ID:jackMort,项目名称:mechLockerz,代码行数:50,代码来源:lockerz.py

示例3: test_reload_read_incomplete

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import back [as 别名]
 def test_reload_read_incomplete(self):
     from mechanize import Browser
     browser = Browser()
     r1 = browser.open(urljoin(self.uri, "bits/mechanize_reload_test.html"))
     # if we don't do anything and go straight to another page, most of the
     # last page's response won't be .read()...
     r2 = browser.open(urljoin(self.uri, "mechanize"))
     self.assert_(len(r1.get_data()) < 4097)  # we only .read() a little bit
     # ...so if we then go back, .follow_link() for a link near the end (a
     # few kb in, past the point that always gets read in HTML files because
     # of HEAD parsing) will only work if it causes a .reload()...
     r3 = browser.back()
     browser.follow_link(text="near the end")
     # ... good, no LinkNotFoundError, so we did reload.
     # we have .read() the whole file
     self.assertEqual(len(r3._seek_wrapper__cache.getvalue()), 4202)
开发者ID:Almad,项目名称:Mechanize,代码行数:18,代码来源:functional_tests.py

示例4: get_br

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import back [as 别名]
def get_br():
    #todo low
    #headers
    #Accept-Encoding: identity
    # Host: _login.weibo.cn
    # Referer: http://weibo.cn/
    # Connection: close
    # User-Agent: Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0)
    br = Browser(factory=RobustFactory(), history=NoHistory(),)
    cj = cookielib.LWPCookieJar()
    br.back = back_func
    br.set_cookiejar(cj)
    br.set_handle_equiv(True)
    #br.set_handle_gzip(True) #gzip在mechanize里面还不是正式功能
    br.set_handle_redirect(True)
    br.set_handle_referer(True)
    br.set_handle_robots(False)
    br.set_handle_refresh(HTTPRefreshProcessor(), max_time=10)
    br.addheaders = [('User-agent', USER_AGENT)]
    return br
开发者ID:hackrole,项目名称:scrapy-utils,代码行数:22,代码来源:mechanize_br.py

示例5: __init__

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import back [as 别名]

#.........这里部分代码省略.........
        if not response:
            return False
        else:
            return True
         
    def getLabelByValueDict(self, control):
        """
        From control items, create a dictionary by values
        """   
        d = {}
        for item in control.items:
            value = item.attrs['value']
            label = item.attrs['label']
            d[value] = label
                
        return d
    
    def getValueByLabelDict(self, control):
        """
        From control items, create a dictionary by labels
        """
        d = {}
        for item in control.items:
            value = item.attrs['value']
            label = item.attrs['label']
            d[label] = value

        return d
    
    def getRequests(self,**kargs):
        """
        getRequests Actually goes through all the savannah requests and create json files if the 
        ticket is not Closed and the status of the item is Done.
        It also reports back the summary of the requests in savannah
        """
        requests = []
        
        # Open Browser and login into Savannah
        self.br=Browser()
        self.isLoggedIn = self.login2Savannah()
        
        if self.isLoggedIn:
            if not kargs:
                self.selectQueryForm(approval_status='1',task_status='0')
            else:
                self.selectQueryForm(**kargs)
            self.createValueDicts()
        
            self.br.select_form(name="bug_form")
            response = self.br.submit()

            html_ouput = response.read()
            
            scramArchByCMSSW = self.getScramArchByCMSSW()
            self.nodeMappings = self.phedex.getNodeMap()
            
            for link in self.br.links(text_regex="#[0-9]+"):
                response = self.br.follow_link(link)
                
                try:
                    ## Get Information
                    self.br.select_form(name="item_form")

                    ## remove leading &nbsp and # from task
                    task = link.text.replace('#','').decode('utf-8').strip()
                    print("Processing ticket: %s" % task)
开发者ID:BrunoCoimbra,项目名称:WMCore,代码行数:70,代码来源:SavannahRequestQuery.py

示例6: sleep

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import back [as 别名]
            
        moredeets = fonts[5].findAll('b')
        warranttype = moredeets[1].get_text(strip=True)
        court = moredeets[2].get_text(strip=True)
            
        agency = fonts[6].findAll('b')[0].get_text(strip=True)
           
        due = fonts[7].findAll('b')[0].get_text(strip=True)
            
        charges = []
            
        for row in table2.findAll('tr')[1:]:
            col = row.findAll('td')
            crime = col[0].get_text(strip=True)
            charges.append(crime) 
                
        problems = ' and '.join(charges)
            
        fullrecord = (warrant_number, rest, last, dob, eyes, hair, race, sex, height, weight, address, apt, city, state, issued, status, warranttype, court, agency, due, problems, "\n")
        print rest.upper() + " " + last.upper()
            
        f.write("|".join(fullrecord))
        count = count + 1
        
        # navigate back
        mech.back()
        sleep(1)

f.flush()
f.close()
开发者ID:cjwinchester,项目名称:tripwire,代码行数:32,代码来源:sarpywarrants.py

示例7: Downloader

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import back [as 别名]
class Downloader(object):
    """ Downloads all words.
    """

    def __init__(self):

        self.browser = Browser()
        self.browser.set_handle_robots(False)
        self.words = open('tmp.dict', 'ab')

    def parse_word(self, url):
        """ Downloads word description.
        """

        print 'Parsing:', url
        page = self.browser.follow_link(tag="a", url=url).read()
        page = self.browser.follow_link(text_regex=r'taisyti').read()

        self.browser.back()
        self.browser.back()

        word, meaning = page.split('<textarea')
        word = word.split('<h2>')[-1].split('</h2>')[0]
        meaning = meaning.split('>', 1)[1]
        meaning = meaning.split('</textarea>')[0]

        for search, replace in [
                ('\n', '',),
                #('\x8d', u'\u2013\u0308'.encode('utf-8'),),
                ]:
            word = word.replace(search, replace)
            meaning = meaning.replace(search, replace)

        self.words.write(word)
        self.words.write('=')
        self.words.write(meaning)
        self.words.write('\n')

    def parse_page(self, url):
        """ Downloads all words from single page.
        """

        print 'Parsing:', url
        page = self.browser.open(url).read()

        page = page.split('<table cellpadding="6"><tr valign="top"><td>')[1]
        page = page.split('</td></tr></table>')[0]
        page = page.replace('</td>\n<td>', '')

        open('tmp.html', 'wb').write(page)

        for a in page.split('\n'):
            try:
                word_url = a.split('\"')[1]
            except IndexError:
                continue
            oldurl = self.browser.geturl()
            try:
                self.parse_word(word_url)
            except Exception as e:
                print "Error:", e
                self.browser.open(oldurl)

        time.sleep(10)

    def parse_letter(self, url):
        """ Downloads all words from given letter page.
        """

        print 'Parsing:', url

        page = self.browser.open(url).read()
        page = page.split('</a></p></td></tr></table>')[0]
        open('tmp.html', 'wb').write(page)
        try:
            pages_count = int(page.split('\">')[-1])
        except ValueError:
            pages_count = 1

        for i in range(pages_count):
            self.parse_page(url + str(i + 1) + '/')

        time.sleep(60)

    def parse(self, url, skip):
        """ Downloads all words from given url.
        """

        page = self.browser.open(url).read()
        page = page.split('bgcolor="#FFD780" colspan="2">')[1]
        page = page.split('</td></tr><tr>', 1)[0]

        for i, a in enumerate(page.split(' | ')):
            if i < skip:
                continue
            letter_url = a.split('\"')[1]
            self.words.write('#LETTER ({1}):{0}\n'.format(letter_url, i))
            self.parse_letter(url + letter_url)
开发者ID:vakaras,项目名称:mopendict,代码行数:100,代码来源:download.py

示例8: main

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import back [as 别名]
def main(ra, senha, arquivos):

    if not ra:
        ra = raw_input("Digite seu RA: ")

    if not senha:
        senha = getpass.getpass("Senha: ")

    br = Browser()
    br.set_handle_equiv(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(True)
    br.set_handle_robots(False)

    br.addheaders = [('User-agent',
        'Mozilla/5.0 (X11; Linux x86_64; rv:9.0.1) Gecko/20100101 Firefox/9.0.1')]

    #link = 'https://progradweb.ufscar.br/progradweb/servlet/Superior'
    link = 'http://progradweb.ufscar.br:8080/progradweb/servlet/Superior'
    br.open(link)
    br.select_form(name="fSuperior")
    br.form["Usuario"] = ra
    br.form["sess"] = senha
    br.submit()

    br.select_form(name="fPrincipal")
    resp = br.submit()
    #Corrige nested FORMs
    soup = BeautifulSoup(resp.get_data())
    resp.set_data(soup.prettify())
    br.set_response(resp)

    br.select_form(name="fHistorico")
    pagina = br.submit()

    data = pagina.get_data()

    # Possui mais de 1 enfase?
    if data.find("Clique em uma das &ecirc;nfases abaixo para ver o") != -1:
        links = list(br.links(url_regex=re.compile(r"^javascript:submita")))

        print 'Enfases:'
        for index, link in enumerate(links, start=1):
            print '({}) - {}'.format(index, link.text)

        n = int(raw_input("Digite o numero da enfase: "))

        pattern = re.compile(r'''
        javascript:submita\(\'
        (\d*)\',%20\'
        (\d*)\',%20\'
        (\d)
        ''', re.VERBOSE)

        enfase, ano, semestre = pattern.search(links[n - 1].url).groups()

        br.back()
        br.select_form(name="fHistorico")

        br.form.new_control('text', 'RA', {'value': ra})
        br.form.new_control('text', 'Enfase', {'value': enfase})
        br.form.new_control('text', 'AnoIni', {'value': ano})
        br.form.new_control('text', 'SemIni', {'value': semestre})
        br.form.new_control('text', 'Tipo', {'value': '1'})
        br.form.new_control('text', 'MaisEnfase', {'value': 'S'})
        br.form.new_control('text', 'Modo', {'value': '2'})
        br.form.new_control('text', 'CodigoCurso', {'value': ''})
        br.form.new_control('text', 'Certificado', {'value': '0'})
        br.form.new_control('text', 'Consulta', {'value': '0'})
        br.form.new_control('text', 'sC', {'value': '51'})
        br.form.fixup()
        pagina = br.submit()

    html = BeautifulSoup(pagina.get_data())
    linhas = html.findAll('tr')

    creditos_aprovados = 0
    creditos_solicitados = 0
    creditos_desistentes = 0
    creditos_trancados = 0
    creditos_reprovados = 0
    nota_ponderada = 0

    if arquivos:
        materias = []

    for lin in linhas:
        if len(lin) == 21:
            materia = lin.findAll('td')

            nome = materia[2].text.encode('utf-8')
            nota = materia[3].text
            if nota == '&nbsp;':
                nota = 0
            nota = float(nota)

            resultado = materia[5].text
            creditos = int(materia[6].text)

            if arquivos:
#.........这里部分代码省略.........
开发者ID:agnaldoneto,项目名称:juliobs,代码行数:103,代码来源:ira.py

示例9: list

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import back [as 别名]
    #print "rad 159"
    #print response10.read()
    #print list(br.links())
    root = lxml.html.fromstring(response10.read())
    namn = root.cssselect("td.reportTitle h1")[0] 
    namn = namn.text_content()
    #print type(namn)
    namn= namn.encode('utf-8') #namn har typen ElementUnicodeResult. Detta konverterar det till en sträng
    #print type(namn)
    print namn


    oms = root.cssselect("tr.bgLightPink td")
    print oms[14].text_content()

    br.back()

    br.select_form(name="f_search")
    br["what"]=namn
    response11 = br.submit()

    #print response11.read()

    root = lxml.html.fromstring(response11.read())
    print root.cssselect("td.text11grey6 span") #här verkar det bli fel ibland, dvs. listan som returneras är tom. Varför? 22 maj 2013
    if len(root.cssselect("td.text11grey6 span"))>1:

        verksamhet = root.cssselect("td.text11grey6 span")[1]
        verksamhet =  verksamhet.tail
    else:
        verksamhet = "Verksamhet ej funnen"    
开发者ID:carriercomm,项目名称:scraperwiki-scraper-vault,代码行数:33,代码来源:styrelseandringar.py

示例10: __init__

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import back [as 别名]

#.........这里部分代码省略.........
                    ## Get current status
                    control = self.br.find_control("resolution_id",type="select")
                    status_id = control.value
                
                    ## Get current request status
                    control = self.br.find_control("status_id",type="select")
                    request_status_id = control.value
                    RequestStatusByValueDict = self.getLabelByValueDict(control)

                    ## Get assigned to
                    control = self.br.find_control("assigned_to",type="select")
                    AssignedToByValueDict = self.getLabelByValueDict(control)
                    assignedTo_id = control.value

                    ##Assign task to the physics group squad
                    if AssignedToByValueDict[assignedTo_id[0]]!=group_squad:
                        control.value = [self.getValueByLabelDict(control)[group_squad]]
                        self.br.submit()

                    ## Construction of the new dataset name
                    ## remove leading hypernews or physics group name and StoreResults+Version

                    if len(dataset_version)>0:
                        dataset_prefix = "StoreResults-"+dataset_version
                    else:
                        dataset_prefix = "StoreResults"
                    
                    if input_processed_dataset.find(self.GroupByValueDict[group_id])==0:
                        new_dataset = input_processed_dataset.replace(self.GroupByValueDict[group_id],dataset_prefix,1)
                    else:
                        stripped_dataset = input_processed_dataset.split("-")[1:]
                        new_dataset = dataset_prefix+'-'+'-'.join(stripped_dataset)
                
                    self.br.back()

                    ## remove leading &nbsp and # from task
                    task = link.text.replace('#','').decode('utf-8').strip()

                    infoDict = {}
                
                    infoDict["primaryDataset"] = input_primary_dataset
                    infoDict["processedDataset"] = input_processed_dataset
                    infoDict["outputDataset"] = new_dataset
                    infoDict["physicsGroup"] = self.GroupByValueDict[group_id]
                    infoDict["inputDBSURL"] = dbs_url

                    # close the request if deprecated release was used
                    try:
                        infoDict["cmsswRelease"] = self.ReleaseByValueDict[release_id[0]]
                    except:
                        if len(self.ReleaseByValueDict)>0 and RequestStatusByValueDict[request_status_id[0]] != "Closed":
                            msg = "Your request is not valid anymore, since the given CMSSW release is deprecated. If your request should be still processed, please reopen the request and update the CMSSW release to a more recent *working* release.\n"
                            msg+= "\n"
                            msg+= "Thanks,\n"
                            msg+= "Your StoreResults team"
                            self.closeRequest(task,msg)
            
                    
                    #Fill json file, if status is done
                    if self.StatusByValueDict[status_id[0]]=='Done' and RequestStatusByValueDict[request_status_id[0]] != "Closed":
                        self.writeJSONFile(task, infoDict)

                    infoDict["task"] = int(task)
                    infoDict["ticketStatus"] = self.StatusByValueDict[status_id[0]]
                    infoDict["assignedTo"] = AssignedToByValueDict[assignedTo_id[0]]
开发者ID:giffels,项目名称:PRODAGENT,代码行数:69,代码来源:RequestQuery.py

示例11: str

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import back [as 别名]
# login successful, home page redirect
print "\n***", rsp.geturl()
print "Logged in properly on home page; click Account link"
assert rsp.geturl() == "http://us.pycon.org/2011/home/", rsp.geturl()
page = rsp.read()
assert "Logout" in page, "Logout not in page"
rsp = br.follow_link(text_regex="Account")

# account page
print "\n***", rsp.geturl()
print "Email address parseable on Account page; go back"
assert rsp.geturl() == "http://us.pycon.org/2011/account/email/", rsp.geturl()
page = rsp.read()
assert "Email Addresses" in page, "Missing email addresses"
print "    Primary e-mail: %r" % str(BS(page).find("table").find("tr").find("td").find("b").string)
rsp = br.back()

# back to home page
print "\n***", rsp.geturl()
print "Back works, on home page again; click Logout link"
assert rsp.geturl() == "http://us.pycon.org/2011/home/", rsp.geturl()
rsp = br.follow_link(url_regex="logout")

# logout page
print "\n***", rsp.geturl()
print "Confirm on Logout page and Log in link at the top"
assert rsp.geturl() == "http://us.pycon.org/2011/account/logout/", rsp.geturl()
page = rsp.read()
assert "Log in" in page, "Log in not in page"
print "\n*** DONE"
开发者ID:trenton3983,项目名称:Core_Python_Applictions_Programming,代码行数:32,代码来源:mech.py

示例12: RegPublDownloader

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import back [as 别名]
class RegPublDownloader(LegalSource.Downloader):
    
    def __init__(self,baseDir="data"):
        self.dir = baseDir + "/regpubl/downloaded"
        if not os.path.exists(self.dir):
            Util.mkdir(self.dir)
        self.config = ConfigObj("%s/%s.ini" % (self.dir, __moduledir__))

        # Why does this say "super() argument 1 must be type, not classobj"
        # super(RegPublDownloader,self).__init__()
        self.browser = Browser()
    
    def DownloadAll(self):
        # we use mechanize instead of our own Robot class to list
        # available documents since we can't get the POST/cookie based
        # search to work.
        doctype = '160'
        log.info(u'Selecting documents of type %s' % doctype)
        self.browser.open("http://www.regeringen.se/sb/d/108/action/browse/c/%s" % doctype)
        log.info(u'Posting search form')
        self.browser.select_form(nr=1)
        self.browser.submit()

        pagecnt = 1
        done = False
        while not done:
            log.info(u'Result page #%s' % pagecnt)
            for l in self.browser.links(url_regex=r'/sb/d/108/a/\d+'):
                self._downloadSingle(l.absolute_url)
                self.browser.back()
            try:
                self.browser.find_link(text='N\xe4sta sida')
                self.browser.follow_link(text='N\xe4sta sida')
            except LinkNotFoundError:
                log.info(u'No next page link found, this was the last page')
                done = True
            pagecnt += 1
        self.config['last_update'] = datetime.date.today()    
        self.config.write()
        
    def DownloadNew(self):
        if 'last_update' in self.config:
            then = datetime.datetime.strptime(self.config['last_update'], '%Y-%m-%d')
        else:
            # assume last update was more than a year ago
            then = datetime.datetime.now() - datetime.timedelta(-367)
        
        now =  datetime.datetime.now()
        if (now - then).days > 30:
            pass
            # post a "last 30 days" query
        elif (now - then).days > 365:
            pass
            # post a "last 12 months" query
        else:
            # post a full query
            self.DownloadAll()        
        
    def _downloadSingle(self,url):
        docid = re.match(r'http://www.regeringen.se/sb/d/108/a/(\d+)', url).group(1)

        fname = "%s/%s/index.html" % (self.dir, docid)
        log.info(u'    Loading docidx %s' % url)
        self.browser.open(url)
        if not os.path.exists(fname):
            Util.ensureDir(fname)
            self.browser.retrieve(url,fname)
        
        for l in self.browser.links(url_regex=r'/download/(\w+\.pdf).*'):
            filename = re.match(r'http://www.regeringen.se/download/(\w+\.pdf).*',l.absolute_url).group(1)
            # note; the url goes to a redirect script; however that
            # part of the URL tree (/download/*) is off-limits for
            # robots. But we can figure out the actual URL anyway!
            if len(docid) > 4:
                path = "c6/%02d/%s/%s" % (int(docid[:-4]),docid[-4:-2],docid[-2:])
            else:
                path = "c4/%02d/%s" % (int(docid[:-2]),docid[-2:])
            fileurl = "http://regeringen.se/content/1/%s/%s" % (path,filename)
            
            df = "%s/%s/%s" % (self.dir,docid, filename)
            if not os.path.exists(df):
                log.info(u'        Downloading %s' % (fileurl))
                self.browser.retrieve(fileurl, df)
            else:
                log.info(u'        Already downloaded %s' % (fileurl))
开发者ID:staffanm,项目名称:legacy.lagen.nu,代码行数:87,代码来源:RegPubl.py

示例13: open

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import back [as 别名]
			flag = 1
	else:
		if '</SELECT>' not in line:
			x = line.split('"')
			try:
				classtype.write(x[1])
				classtype.write('\n')
				class_type.append(x[1])
			except:
				break

#make sure the directory for this semester exists
if not os.path.exists(os.getcwd()+"/html/%s" % semester):
	os.makedirs(os.getcwd()+"/html/%s" % semester)

for c in class_type:
	time.sleep(5)
	f = ''.join(c)
	path=os.getcwd()+"/html/%s/%s.html" % (semester, f.replace('&', ''))
	temp = open(path, "w")
	browser.select_form(nr=0)
	item = browser.find_control(id="subj_id").get("%s" % c)
	item.selected = True
	response = browser.submit()
	content = response.read()
	temp.write(content)
	print "Wrote %s" % path
	temp.close()
	time.sleep(5)
	browser.back()
开发者ID:fengyuhan,项目名称:school-assignments,代码行数:32,代码来源:getClassBySemester.py

示例14: attributes

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import back [as 别名]
assert br.viewing_html()
print br.title()
print br.geturl()
#print br.info()  # headers
#print br.read()  # body
#br.close()  # (shown for clarity; in fact Browser does this for you)

br.select_form(name="vb_login_username=User Name")
# Browser passes through unknown attributes (including methods)
# to the selected HTMLForm (from ClientForm).
br["vb_login_username"] = ["sleven"]  # (the method here is __setitem__)
response2 = br.submit()  # submit current form

# print currently selected form (don't call .submit() on this, use br.submit())
print br.form

response3 = br.back()  # back to cheese shop (same data as response1)
# the history mechanism returns cached response objects
# we can still use the response, even though we closed it:
response3.seek(0)
response3.read()
response4 = br.reload()  # fetches from server

for form in br.forms():
    print form
# .links() optionally accepts the keyword args of .follow_/.find_link()
for link in br.links(url_regex="python.org"):
    print link
    br.follow_link(link)  # takes EITHER Link instance OR keyword args
    br.back()
开发者ID:sleven,项目名称:ProxeScrape,代码行数:32,代码来源:logintest2.py


注:本文中的mechanize.Browser.back方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。