本文整理汇总了Python中six.moves.urllib.error.URLError方法的典型用法代码示例。如果您正苦于以下问题:Python error.URLError方法的具体用法?Python error.URLError怎么用?Python error.URLError使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类six.moves.urllib.error
的用法示例。
在下文中一共展示了error.URLError方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: download
# 需要导入模块: from six.moves.urllib import error [as 别名]
# 或者: from six.moves.urllib.error import URLError [as 别名]
def download(self):
"""Downloads the archive from one of the mirrors"""
if not os.path.exists(self.archive_path):
for turn in xrange(self.trials_404):
for i, link in enumerate(self.download_links()):
try:
return self.do_download(link, self.archive_path)
except HTTPError as e:
if e.code != 404:
raise
print("HTTP 404 while trying to get the archive using link" \
" '%s' (trial %d/%d)" % (link, turn+1, self.trials_404))
except URLError as e:
print("Error while trying to get the archive using link" \
" '%s' (trial %d/%d)" % (link, turn+1, self.trials_404))
raise e
示例2: _is_pingable
# 需要导入模块: from six.moves.urllib import error [as 别名]
# 或者: from six.moves.urllib.error import URLError [as 别名]
def _is_pingable(self, mgmt_ip='', retry=5, timeout=5, port=80, **kwargs):
"""Checks whether the server is reachable by using urllib.
Waits for connectivity for `timeout` seconds,
and if connection refused, it will retry `retry`
times.
:param mgmt_ip: IP to check
:param retry: times to reconnect if connection refused
:param timeout: seconds to wait for connection
:param port: port number to check connectivity
:return: bool - True or False depending on pingability.
"""
url = 'http://' + mgmt_ip + ':' + str(port)
if netaddr.valid_ipv6(mgmt_ip):
url = 'http://[' + mgmt_ip + ']:' + str(port)
for retry_index in range(int(retry)):
try:
urlreq.urlopen(url, timeout=timeout)
return True
except urlerr.URLError:
LOG.warning('Unable to reach to the url %s', url)
return 'failure'
示例3: DetectGce
# 需要导入模块: from six.moves.urllib import error [as 别名]
# 或者: from six.moves.urllib.error import URLError [as 别名]
def DetectGce():
"""Determine whether or not we're running on GCE.
This is based on:
https://cloud.google.com/compute/docs/metadata#runninggce
Returns:
True iff we're running on a GCE instance.
"""
metadata_url = 'http://{}'.format(
os.environ.get('GCE_METADATA_ROOT', 'metadata.google.internal'))
try:
o = urllib_request.build_opener(urllib_request.ProxyHandler({})).open(
urllib_request.Request(
metadata_url, headers={'Metadata-Flavor': 'Google'}))
except urllib_error.URLError:
return False
return (o.getcode() == http_client.OK and
o.headers.get('metadata-flavor') == 'Google')
示例4: FetchDiscoveryDoc
# 需要导入模块: from six.moves.urllib import error [as 别名]
# 或者: from six.moves.urllib.error import URLError [as 别名]
def FetchDiscoveryDoc(discovery_url, retries=5):
"""Fetch the discovery document at the given url."""
discovery_urls = _NormalizeDiscoveryUrls(discovery_url)
discovery_doc = None
last_exception = None
for url in discovery_urls:
for _ in range(retries):
try:
content = _GetURLContent(url)
if isinstance(content, bytes):
content = content.decode('utf8')
discovery_doc = json.loads(content)
if discovery_doc:
return discovery_doc
except (urllib_error.HTTPError, urllib_error.URLError) as e:
logging.info(
'Attempting to fetch discovery doc again after "%s"', e)
last_exception = e
if discovery_doc is None:
raise CommunicationError(
'Could not find discovery doc at any of %s: %s' % (
discovery_urls, last_exception))
示例5: run
# 需要导入模块: from six.moves.urllib import error [as 别名]
# 或者: from six.moves.urllib.error import URLError [as 别名]
def run(self):
print('Polyglot Downloader')
while True:
self._simple_interactive_menu(
'd) Download', 'l) List', ' u) Update', 'c) Config', 'h) Help', 'q) Quit')
user_input = unicode(input('Downloader> ').strip())
if not user_input: print(); continue
command = user_input.lower().split()[0]
args = user_input.split()[1:]
try:
if command == 'l':
print()
self._ds.list(self._ds.download_dir, header=False,
more_prompt=True)
elif command == 'h':
self._simple_interactive_help()
elif command == 'c':
self._simple_interactive_config()
elif command in ('q', 'x'):
return
elif command == 'd':
self._simple_interactive_download(args)
elif command == 'u':
self._simple_interactive_update()
else:
print('Command %r unrecognized' % user_input)
except HTTPError as e:
print('Error reading from server: %s'%e)
except URLError as e:
print('Error connecting to server: %s'%e.reason)
# try checking if user_input is a package name, &
# downloading it?
print()
示例6: pdb_downloader_and_metadata
# 需要导入模块: from six.moves.urllib import error [as 别名]
# 或者: from six.moves.urllib.error import URLError [as 别名]
def pdb_downloader_and_metadata(self, outdir=None, pdb_file_type=None, force_rerun=False):
"""Download ALL mapped experimental structures to the protein structures directory.
Args:
outdir (str): Path to output directory, if protein structures directory not set or other output directory is
desired
pdb_file_type (str): Type of PDB file to download, if not already set or other format is desired
force_rerun (bool): If files should be re-downloaded if they already exist
Returns:
list: List of PDB IDs that were downloaded
"""
if not outdir:
outdir = self.structure_dir
if not outdir:
raise ValueError('Output directory must be specified')
if not pdb_file_type:
pdb_file_type = self.pdb_file_type
# Check if we have any PDBs
if self.num_structures_experimental == 0:
log.debug('{}: no structures available - nothing will be downloaded'.format(self.id))
return
downloaded_pdb_ids = []
# Download the PDBs
for s in self.get_experimental_structures():
log.debug('{}: downloading structure file from the PDB...'.format(s.id))
try:
s.download_structure_file(outdir=outdir, file_type=pdb_file_type, force_rerun=force_rerun, load_header_metadata=True)
downloaded_pdb_ids.append(s.id)
except URLError:
log.error('{}: PDB not available to download'.format(s.id))
return downloaded_pdb_ids
示例7: read_url_content
# 需要导入模块: from six.moves.urllib import error [as 别名]
# 或者: from six.moves.urllib.error import URLError [as 别名]
def read_url_content(url):
try:
content = request.urlopen(url).read()
except error.URLError:
raise exc.CommandError(_('Could not fetch contents for %s') % url)
if content:
try:
content.decode('utf-8')
except ValueError:
content = base64.encodestring(content)
return content
示例8: process_template_path
# 需要导入模块: from six.moves.urllib import error [as 别名]
# 或者: from six.moves.urllib.error import URLError [as 别名]
def process_template_path(template_path, object_request=None, existing=False):
"""Read template from template path.
Attempt to read template first as a file or url. If that is unsuccessful,
try again to assuming path is to a template object.
"""
try:
return get_template_contents(template_file=template_path,
existing=existing)
except error.URLError:
return get_template_contents(template_object=template_path,
object_request=object_request,
existing=existing)
示例9: download
# 需要导入模块: from six.moves.urllib import error [as 别名]
# 或者: from six.moves.urllib.error import URLError [as 别名]
def download(baseurl, parameters={}, headers={}):
'''Download Data from an url and returns it as a String
@param baseurl Url to download from (e.g. http://www.google.com)
@param parameters Parameter dict to be encoded with url
@param headers Headers dict to pass with Request
@returns String of data from URL
'''
url = '?'.join([baseurl, urlencode(parameters)])
log.debug('Downloading: ' + url)
data = ""
for _ in range(MAX_RETRIES):
try:
req = Request(url, headers=headers)
req.add_header(USER_AGENT, USER_AGENT_STRING)
response = urlopen(req)
if six.PY2:
data = response.read()
else:
data = response.read().decode('utf-8')
response.close()
break
except Exception as err:
if not isinstance(err, URLError):
log.debug("Error %s during HTTP Request, abort", repr(err))
raise # propagate non-URLError
log.debug("Error %s during HTTP Request, retrying", repr(err))
else:
raise
return data
示例10: read_geonames_csv
# 需要导入模块: from six.moves.urllib import error [as 别名]
# 或者: from six.moves.urllib.error import URLError [as 别名]
def read_geonames_csv():
print("Downloading geoname data from: " + GEONAMES_ZIP_URL)
try:
url = request.urlopen(GEONAMES_ZIP_URL)
except URLError:
print("If you are operating behind a firewall, try setting the HTTP_PROXY/HTTPS_PROXY environment variables.")
raise
zipfile = ZipFile(BytesIO(url.read()))
print("Download complete")
# Loading geonames data may cause errors without setting csv.field_size_limit:
if sys.platform == "win32":
max_c_long_on_windows = (2**32 / 2) - 1
csv.field_size_limit(max_c_long_on_windows)
else:
csv.field_size_limit(sys.maxint if six.PY2 else six.MAXSIZE)
with zipfile.open('allCountries.txt') as f:
reader = unicodecsv.DictReader(f,
fieldnames=[
k for k, v in geonames_field_mappings],
encoding='utf-8',
delimiter='\t',
quoting=csv.QUOTE_NONE)
for d in reader:
d['population'] = parse_number(d['population'], 0)
d['latitude'] = parse_number(d['latitude'], 0)
d['longitude'] = parse_number(d['longitude'], 0)
if len(d['alternatenames']) > 0:
d['alternatenames'] = d['alternatenames'].split(',')
else:
d['alternatenames'] = []
yield d
示例11: test_check_connectivity
# 需要导入模块: from six.moves.urllib import error [as 别名]
# 或者: from six.moves.urllib.error import URLError [as 别名]
def test_check_connectivity(self):
with mock.patch('downstream_farmer.farmer.restore', autospec=True) \
as r, mock.patch('six.moves.urllib.request.urlopen') as patch:
r.side_effect = MockRestore(
{'historyfile': dict(), 'identityfile': dict()})
farmer = Farmer(self.test_args)
patch.side_effect = URLError('Problem')
with self.assertRaises(DownstreamError):
farmer.check_connectivity()
with mock.patch('six.moves.urllib.request.urlopen') as patch:
farmer.check_connectivity()
self.assertTrue(patch.called)
示例12: runningOnEC2
# 需要导入模块: from six.moves.urllib import error [as 别名]
# 或者: from six.moves.urllib.error import URLError [as 别名]
def runningOnEC2():
def file_begins_with(path, prefix):
with open(path) as f:
return f.read(len(prefix)) == prefix
hv_uuid_path = '/sys/hypervisor/uuid'
if os.path.exists(hv_uuid_path) and file_begins_with(hv_uuid_path, 'ec2'):
return True
# Some instances do not have the /sys/hypervisor/uuid file, so check the identity document instead.
# See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html
try:
urlopen('http://169.254.169.254/latest/dynamic/instance-identity/document', timeout=1)
return True
except URLError:
return False
示例13: wait_api_port_ready
# 需要导入模块: from six.moves.urllib import error [as 别名]
# 或者: from six.moves.urllib.error import URLError [as 别名]
def wait_api_port_ready(api_port, host='127.0.0.1'):
"""Wait until an http services becomes available
:param api_port: api service port
:type api_port: integer
:param host: host running the service (default: 127.0.0.1)
:type host: string
:return boolean
"""
log = logging.getLogger(__name__ + ".wait_api_port_ready")
urlopen_timeout = 1
max_retries = 30
count = 0
while count < max_retries:
time.sleep(1)
count += 1
try:
request.urlopen(
"http://%s:%s/" % (host, api_port), timeout=urlopen_timeout)
return False
except url_error.HTTPError as he:
if he.code == 300:
return True
pass
except url_error.URLError:
pass
except socket.timeout:
log.warning(
"Timeout at attempt {} of {} after {}s waiting for API port..."
.format(count, max_retries, urlopen_timeout))
pass
raise RuntimeError(
"wait_api_port_ready: Max retries {} reached".format(max_retries))
示例14: test_monitor_call_for_failure
# 需要导入模块: from six.moves.urllib import error [as 别名]
# 或者: from six.moves.urllib.error import URLError [as 别名]
def test_monitor_call_for_failure(self, mock_urlopen):
mock_urlopen.side_effect = urlerr.URLError("MOCK Error")
test_vnf = {}
test_kwargs = {
'mgmt_ip': 'a.b.c.d'
}
monitor_return = self.monitor_http_ping.monitor_call(test_vnf,
test_kwargs)
self.assertEqual('failure', monitor_return)
示例15: site_reachable
# 需要导入模块: from six.moves.urllib import error [as 别名]
# 或者: from six.moves.urllib.error import URLError [as 别名]
def site_reachable(url, timeout=3):
"""Checks if the given URL is accessible."""
try:
urlopen(url, timeout=timeout)
except (URLError, HTTPError):
return False
return True
# Create lazily evaluated, cached site checks for JSOC and KIS.