本文整理汇总了Python中mechanize.Browser.read方法的典型用法代码示例。如果您正苦于以下问题:Python Browser.read方法的具体用法?Python Browser.read怎么用?Python Browser.read使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mechanize.Browser
的用法示例。
在下文中一共展示了Browser.read方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: download_with_mech
# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import read [as 别名]
def download_with_mech(email, destination, file):
'''
download data from nrao archive. Now it only works for filling the form. It cannot
submit by clicking "Get my data" buttom
'''
br = Browser()
br.set_handle_robots(False) # ignore robots
br.open(url)
br.select_form(nr=0)
br["PROJECT_CODE"] = "14A-425"
submit_response = br.submit(name = "SUBMIT", label = "Submit Query")
content = submit_response.read()
#print br.read()
'''redirect to the download page'''
br.select_form(name = "Form1")
br["EMAILADDR"] = "[email protected]" #replace by email
br["COPYFILEROOT"] = "/lustre/aoc/projects/fasttransients/moving" #replace by destination
br["CONVERT2FORMAT"] = ["SDM"]
achive_files = br.form.find_control(name = "FTPCHECKED")
for v in range(0, len(achive_files.items)):
# file name should be replaced by FILE
if "14A-425_sb29260830_1_000.56825.290659375" in str(achive_files.items[v]):
achive_files.items[v].selected = True
break
print str(br.read())
示例2: Browser
# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import read [as 别名]
# MechanizeTests.py
import re
from mechanize import Browser
br = Browser()
br.open("http://www.google.com/")
print br.title()
print br.read()
示例3: Browser
# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import read [as 别名]
'-c', '--critical', type=int, default=15,
help='critical threshold. If the datanode usage differs from average \
usage to more than this threshold, raise a critical. Defaults to 15.'
)
args = parser.parse_args()
# Get the web page from the namenode
url = "http://%s:%d/dfsnodelist.jsp?whatNodes=LIVE" % (args.namenode, args.port)
try:
page = Browser().open(url)
except IOError:
print 'CRITICAL: Cannot access namenode interface on %s:%d!' % (args.namenode, args.port)
sys.exit(2)
# parse the page and storing the {datanode: pct_usage} hash
html = page.read()
soup = BeautifulSoup(html)
datanodes = soup.findAll('td', {'class' : 'name'})
pcused = soup.findAll('td', {'class' : 'pcused', 'align' : 'right'})
nodes_pct = {}
for (idx, node) in enumerate(datanodes):
pct = float(pcused[idx].contents[0].strip())
node = datanodes[idx].findChildren('a')[0].contents[0].strip()
nodes_pct[node] = pct
# Each node variation against the average pct must be under the threshold
w_msg = ''
c_msg = ''
perfdata = ''
avg = 0
if len(nodes_pct) > 0: