本文整理汇总了Python中urllib.FancyURLopener类的典型用法代码示例。如果您正苦于以下问题:Python FancyURLopener类的具体用法?Python FancyURLopener怎么用?Python FancyURLopener使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了FancyURLopener类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: unicode_urlopen
def unicode_urlopen(url, accept_lang=None):
"""Returns a *Unicode* file-like object for non-local documents.
Client must ensure that the URL points to non-binary data. Pass in
an Accept-Language value to configure the FancyURLopener we
use."""
opener = FancyURLopener()
if accept_lang:
opener.addheader("Accept-Language", accept_lang)
# We want to convert the bytes file-like object returned by
# urllib, which is bytes in both Python 2 and Python 3
# fortunately, and turn it into a Unicode file-like object
# with a little help from our StringIO friend.
page = opener.open(url)
encoding = page.headers['content-type']
encoding = encoding.split('charset=')
if len(encoding) > 1:
encoding = encoding[-1]
page = page.read().decode(encoding)
else:
page = page.read()
encoding = meta_encoding(page) or 'utf8'
page = page.decode(encoding)
page = StringIO(page)
return page
示例2: __init__
def __init__(self):
try:
context = ssl._create_unverified_context()
except AttributeError:
context = None
FancyURLopener.__init__(self, context=context)
示例3: POST
def POST(self):
# disable nginx buffering
web.header('X-Accel-Buffering', 'no')
i = web.input(fast=False)
#get app config if not exist will create it
servers = get_servers(i.app_name)
if not servers:
servers = ['deploy']
save_app_option(i.app_name, 'deploy_servers', 'deploy')
yield "%d:%s" % (logging.INFO, render_ok("Application allowed to deploy those servers"))
yield "%d:%s" % (logging.INFO, render_ok(','.join(servers)))
servers = escape_servers(servers)
result = {}
data = {'app_name': i.app_name, 'app_url': i.app_url}
for server in servers:
url = SUFFIX % server
try:
opener = FancyURLopener()
f = opener.open(url, urlencode(data))
line = '' # to avoid NameError for line if f has no output at all.
for line in iter(f.readline, ''):
logger.info(line)
yield line
if not any(word in line for word in ['succeeded', 'failed']):
result[server] = 'Failed'
else:
result[server] = 'Succeeded'
except Exception, e:
yield "%d:%s" % (logging.ERROR, render_err(str(e)))
result[server] = 'Failed'
示例4: get
def get(self, url, headers=None):
o = FancyURLopener()
if headers:
for k, v in headers.items():
o.addheader(k, v)
self.req = o.open(url)
return self
示例5: ensureFileLocal
def ensureFileLocal(self, inFilePathOrURL):
'''
Takes a file path or URL. Sets self.localFilePath
to the same path if file is local, or
if the file is remote but uncompressed.
If a file is remote and compressed, retrieves
the file into a local tmp file and returns that
file name. In this case the flag self.deleteTempFile
is set to True.
:param inFilePathOrURL: file path or URL to file
:type inFilePathOrURL: String
'''
self.localFilePath = inFilePathOrURL
self.deleteTempFile = False
if self.compression == COMPRESSION_TYPE.NO_COMPRESSION:
return
# Got compressed file; is it local?
parseResult = urlparse(inFilePathOrURL)
if parseResult.scheme == 'file':
self.localFilePath = parseResult.path
return
opener = FancyURLopener()
# Throws IOError if URL does not exist:
self.localFilePath = opener.retrieve(inFilePathOrURL)[0]
self.deleteTempFile = True
示例6: getNaturalRandom
def getNaturalRandom(self, min=1, max=49, nbNumbers=6):
unique = False
while not unique:
url_opener = FancyURLopener()
data = url_opener.open("http://www.random.org/integers/?num=%s&min=%s&max=%s&col=%s&base=10&format=plain&rnd=new" % (nbNumbers, min, max, nbNumbers))
randList = data.readlines()[0].rstrip('\n').split('\t')
unique = bool(len(randList) == len(list(set(randList))))
return sorted([int(i) for i in randList])
示例7: utOpen
def utOpen(file):
# Open file
if 'http' in file:
opener = FancyURLopener()
f = opener.open(file)
else:
f = open(file,'rb+')
return f
示例8: utRead
def utRead(file):
""" Open file on local or remote system. """
if 'http' in file:
opener = FancyURLopener()
f = opener.open(file)
else:
f = open(file,'rb+')
return f
示例9: http_error_default
def http_error_default(self, url, fp, errcode, errmsg, headers):
if errcode == 404:
raise urllib2.HTTPError(url, errcode, errmsg, headers, fp)
else:
FancyURLopener.http_error_default(
url,
fp,
errcode,
errmsg,
headers)
示例10: __load_photo_page
def __load_photo_page(self, photo_id):
opener = FancyURLopener()
res = None
body = None
link = photo_page_template % photo_id
try:
res = opener.open(link)
body = res.read()
except IOError, error:
print "[!] {0}".format(error.strerror)
示例11: fill_hot_cache
def fill_hot_cache( self ):
bases = [ 'a', 'g', 'c', 't' ]
url = self.url + urlencode( self.query )
url_opener = FancyURLopener( )
fh = url_opener.open( url )
hot_rand_handle = SGMLExtractorHandle( fh, [ 'pre', ] )
hot_cache = fh.read()
self.hot_cache = hot_cache
fh.close()
return self.hot_cache
示例12: __init__
def __init__(self, *args, **kwargs):
self._last_url = u''
FancyURLopener.__init__(self, *args, **kwargs)
# Headers to add to every request.
# XXX: IMDb's web server doesn't like urllib-based programs,
# so lets fake to be Mozilla.
# Wow! I'm shocked by my total lack of ethic! <g>
self.set_header('User-agent', 'Mozilla/5.0')
# XXX: This class is used also to perform "Exact Primary
# [Title|Name]" searches, and so by default the cookie is set.
c_header = 'id=%s; uu=%s' % (_cookie_id, _cookie_uu)
self.set_header('Cookie', c_header)
示例13: download
def download (self, download_dir):
result = path.join (download_dir, self.package_basename)
if path.exists (result):
print 'Found install', self.package_basename
else:
dir_util.mkpath (download_dir)
url = "http://www.eiffel-loop.com/download/" + self.package_basename
print 'Downloading:', url
web = FancyURLopener ()
web.retrieve (url, result, display_progress)
return result
示例14: __init__
def __init__(self, ftpproxy=''):
"""RebaseUpdate([ftpproxy]]) -> new RebaseUpdate instance.
if ftpproxy is not given RebaseUpdate uses the corresponding
variable from RanaConfig.
ftpproxy is the proxy to use if any.
"""
proxy = {'ftp': ftpproxy or ftp_proxy}
if not Rebase_name:
raise FtpNameError('Rebase')
if not proxy['ftp']:
proxy = {}
FancyURLopener.__init__(self, proxy)
示例15: __init__
def __init__(self, *args, **kwargs):
self._last_url = u""
FancyURLopener.__init__(self, *args, **kwargs)
# Headers to add to every request.
# XXX: IMDb's web server doesn't like urllib-based programs,
# so lets fake to be Mozilla.
# Wow! I'm shocked by my total lack of ethic! <g>
for header in ("User-Agent", "User-agent", "user-agent"):
self.del_header(header)
self.set_header("User-Agent", "Mozilla/5.0")
self.set_header("Accept-Language", "en-us,en;q=0.5")
# XXX: This class is used also to perform "Exact Primary
# [Title|Name]" searches, and so by default the cookie is set.
c_header = "uu=%s; id=%s" % (_cookie_uu, _cookie_id)
self.set_header("Cookie", c_header)