本文整理汇总了Python中urllib.error.URLError方法的典型用法代码示例。如果您正苦于以下问题:Python error.URLError方法的具体用法?Python error.URLError怎么用?Python error.URLError使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类urllib.error
的用法示例。
在下文中一共展示了error.URLError方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_plugin_config
# 需要导入模块: from urllib import error [as 别名]
# 或者: from urllib.error import URLError [as 别名]
def get_plugin_config(config_uri):
"""
Downloads/opens configuration yaml file, returns
dict of Galaxy plugins
"""
# Try to open the URI as a URL or fall back to opening local file
try:
config_uri_parsed = urlparse(config_uri)
if config_uri_parsed.scheme in ['https', 'http']:
url = urlopen(config_uri)
yaml_data = url.read()
else:
with open(config_uri, 'r') as file_data:
yaml_data = file_data.read()
except URLError as e:
print(e)
# Parse the YAML configuration
try:
plugin_data = yaml.safe_load(yaml_data)
return plugin_data['plugins']
except yaml.YAMLError as e:
print(e)
示例2: get_sdf
# 需要导入模块: from urllib import error [as 别名]
# 或者: from urllib.error import URLError [as 别名]
def get_sdf(self):
"""Function to return the SDF (structure-data file) of the PubChem object."""
from urllib.request import urlopen, Request
from urllib.parse import quote
from urllib.error import URLError
if len(self.dataSDF) == 0:
url = "https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/cid/{}/SDF?record_type=3d".format(
quote(str(self.cid))
)
req = Request(url, headers={"Accept": "chemical/x-mdl-sdfile"})
try:
self.dataSDF = urlopen(req).read().decode("utf-8")
except URLError as e:
msg = "Unable to open\n\n%s\n\ndue to the error\n\n%s\n\n" % (url, e)
msg += "It is possible that 3D information does not exist for this molecule in the PubChem database\n"
print(msg)
raise ValidationError(msg)
return self.dataSDF
示例3: _http_request
# 需要导入模块: from urllib import error [as 别名]
# 或者: from urllib.error import URLError [as 别名]
def _http_request(url, headers=None, time_out=10):
"""Perform an HTTP request and return request"""
log(0, 'Request URL: {url}', url=url)
try:
if headers:
request = Request(url, headers=headers)
else:
request = Request(url)
req = urlopen(request, timeout=time_out)
log(0, 'Response code: {code}', code=req.getcode())
if 400 <= req.getcode() < 600:
raise HTTPError('HTTP %s Error for url: %s' % (req.getcode(), url), response=req)
except (HTTPError, URLError) as err:
log(2, 'Download failed with error {}'.format(err))
if yesno_dialog(localize(30004), '{line1}\n{line2}'.format(line1=localize(30063), line2=localize(30065))): # Internet down, try again?
return _http_request(url, headers, time_out)
return None
return req
示例4: _checkout
# 需要导入模块: from urllib import error [as 别名]
# 或者: from urllib.error import URLError [as 别名]
def _checkout(self, local_dir):
user = self.config.get("user")
repo = self.config.get("repo")
version = self.config.get("version", "master")
# TODO : Sanitize URL
url = URL.format(user=user, repo=repo, version=version)
logger.info("Downloading {}/{} from github".format(user, repo))
try:
(filename, headers) = urllib.urlretrieve(url)
except URLError as e:
raise RuntimeError("Failed to download '{}'. '{}'".format(url, e.reason))
t = tarfile.open(filename)
(cache_root, core) = os.path.split(local_dir)
# Ugly hack to get the first part of the directory name of the extracted files
tmp = t.getnames()[0]
t.extractall(cache_root)
os.rename(os.path.join(cache_root, tmp), os.path.join(cache_root, core))
示例5: validate_
# 需要导入模块: from urllib import error [as 别名]
# 或者: from urllib.error import URLError [as 别名]
def validate_(self, value, context=None):
url = self.valid_url(value)
if not url:
raise StopValidationError(self.messages['invalid_url'])
if self.verify_exists:
url_string = urlquote(urlunsplit((
url['scheme'],
(url['host6'] or url['host4'] or url['hostn_enc']) + ':' + (url['port'] or ''),
url['path'],
url['query'],
url['frag'])
).encode('utf-8'), safe=VALID_CHAR_STRING)
try:
urlopen(url_string)
except URLError:
raise StopValidationError(self.messages['not_found'])
示例6: get_cf_ranges
# 需要导入模块: from urllib import error [as 别名]
# 或者: from urllib.error import URLError [as 别名]
def get_cf_ranges(cf_url):
response = None
ranges = []
while response is None:
try:
response = urlopen(cf_url)
except URLError as e:
print(' [?] Got URLError trying to get CloudFront IP ranges. Retrying...')
except:
print(' [?] Got an unexpected error trying to get CloudFront IP ranges. Exiting...')
raise
cf_data = json.load(response)
for item in cf_data['prefixes']:
service = item.get('service')
if service == 'CLOUDFRONT':
ranges.append(item.get('ip_prefix'))
return ranges
# find more domains and correct for CloudFront
示例7: find_cf_issues
# 需要导入模块: from urllib import error [as 别名]
# 或者: from urllib.error import URLError [as 别名]
def find_cf_issues(domains):
error_domains = []
for domain in domains:
try:
response = urlopen('http://' + domain)
except HTTPError as e:
if e.code == 403 and 'Bad request' in e.fp.read():
try:
response = urlopen('https://' + domain)
except URLError as e:
if 'handshake' in str(e).lower() or e.code == 403 and 'Bad request' in e.fp.read():
error_domains.append(domain)
except:
pass
except:
pass
return error_domains
# add a domain to CloudFront
示例8: start
# 需要导入模块: from urllib import error [as 别名]
# 或者: from urllib.error import URLError [as 别名]
def start(self):
"""Start application in a separate process."""
def worker(app, host, port):
app.run(host=host, port=port, use_reloader=False, threaded=True)
self._process = multiprocessing.Process(
target=worker,
args=(self.app, self.host, self.port)
)
self._process.start()
# We must wait for the server to start listening with a maximum
# timeout of 5 seconds.
timeout = 5
while timeout > 0:
time.sleep(1)
try:
urlopen(self.url())
timeout = 0
except URLError:
timeout -= 1
示例9: wait_until_status
# 需要导入模块: from urllib import error [as 别名]
# 或者: from urllib.error import URLError [as 别名]
def wait_until_status(url, status_code=200):
"""Pause the program until the given url returns the required status.
Args:
url (str): The url to query.
status_code (int, optional): The required status code. Defaults to 200.
"""
sleep_time = 1
while True:
try:
response = urlopen(url)
if response.getcode() == status_code:
return
except HTTPError as err:
if err.code == status_code:
return
LOGGER.debug('failed to read url: %s', str(err))
except URLError as err:
LOGGER.debug('failed to read url: %s', str(err))
LOGGER.debug('Retrying in %s secs', sleep_time)
time.sleep(sleep_time)
示例10: getLinks
# 需要导入模块: from urllib import error [as 别名]
# 或者: from urllib.error import URLError [as 别名]
def getLinks(articleUrl):
try:
html = urlopen("http://en.wikipedia.org"+articleUrl)
except HTTPError:
ServerLog.writeLog("HTTPError")
return None
except URLError:
ServerLog.writeLog("URLError")
print("Sleeping!")
time.sleep(URLERROR_SLEEP_TIME)
html = urlopen("http://en.wikipedia.org"+articleUrl)
bsObj = BeautifulSoup(html, "lxml")
return bsObj.find("div", {"id":"bodyContent"}).findAll("a", href=re.compile("^(/wiki/)((?!:).)*$"))
# 抓取IP
示例11: getHistoryIPs
# 需要导入模块: from urllib import error [as 别名]
# 或者: from urllib.error import URLError [as 别名]
def getHistoryIPs(pageUrl):
pageUrl = pageUrl.replace("/wiki/", "")
historyUrl = "http://en.wikipedia.org/w/index.php?title="+pageUrl+"&action=history"
print("history url:", historyUrl)
time.sleep(SLEEP_TIME)
try:
html = urlopen(historyUrl)
except HTTPError:
return None
except URLError:
print("Sleeping!")
time.sleep(URLERROR_SLEEP_TIME)
html = urlopen(historyUrl)
bsObj = BeautifulSoup(html, "lxml")
ipAddresses = bsObj.findAll("a", {"class":"mw-anonuserlink"})
addressList = set()
for ipAddress in ipAddresses:
print(pageUrl+": "+ipAddress.get_text())
addressList.add(ipAddress.get_text())
return addressList #返回一个IP列表
# 得到所有IP的国家代号
示例12: getLinks
# 需要导入模块: from urllib import error [as 别名]
# 或者: from urllib.error import URLError [as 别名]
def getLinks(articleUrl):
'''
分析网站的源码并返回内链
'''
try:
html = urlopen("http://en.wikipedia.org" + articleUrl)
except HTTPError:
return None
except URLError:
print("Sleeping!")
time.sleep(URLERROR_SLEEP_TIME)
html = urlopen("http://en.wikipedia.org" + articleUrl)
bsObj = BeautifulSoup(html, "lxml")
return bsObj.find("div", {"id": "bodyContent"}).findAll("a", href=re.compile("^(/wiki/)((?!:).)*$"))
# 设置缓冲队列
示例13: getCountry
# 需要导入模块: from urllib import error [as 别名]
# 或者: from urllib.error import URLError [as 别名]
def getCountry(ipAddress):
'''
判断一个IP的所在地
'''
try:
response = urlopen("http://freegeoip.net/json/" +
ipAddress).read().decode('utf-8')
except URLError:
print("Sleeping!")
time.sleep(URLERROR_SLEEP_TIME)
response = urlopen("http://freegeoip.net/json/" +
ipAddress).read().decode('utf-8')
except:
return 'Unknown'
responseJson = json.loads(response)
return responseJson.get("country_code") # 返回国家代号
示例14: download_http
# 需要导入模块: from urllib import error [as 别名]
# 或者: from urllib.error import URLError [as 别名]
def download_http(url, timeout=60):
try:
response = urllib.request.urlopen(URL(url), timeout=timeout).read()
except urllib.error.URLError as e:
try:
msg = e.args[0].strerror
except (AttributeError, IndexError):
msg = (getattr(e, 'msg', None) or
getattr(e, 'strerror', None) or
'Failed')
raise error.ConnectionError(url, msg)
except socket.timeout:
raise error.ConnectionError(url, 'Timed out')
except http.client.HTTPException:
raise error.ConnectionError(url, 'No HTTP response')
except (OSError, IOError):
raise error.ConnectionError(url, 'Unknown error')
else:
return response
示例15: check_ds_link
# 需要导入模块: from urllib import error [as 别名]
# 或者: from urllib.error import URLError [as 别名]
def check_ds_link(url):
if not url in datasheet_links:
request = Request(url)
request.get_method = lambda : 'HEAD'
request.add_header("User-Agent", "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0")
try:
response = urlopen(request, timeout=5)
datasheet_links[url] = response.getcode()
except URLError as e:
datasheet_links[url] = getattr(e, 'code', str(e))
except Exception as e:
datasheet_links[url] = str(e)
# Some 'special' hosts dont implement the HEAD method
if datasheet_links[url] == 405:
try:
request.get_method = lambda : 'GET'
response = urlopen(request, timeout=3)
datasheet_links[url] = response.getcode()
except URLError as e:
datasheet_links[url] = getattr(e, 'code', str(e))
except Exception as e:
datasheet_links[url] = str(e)
return datasheet_links[url]