本文整理汇总了Python中rfc3987.parse方法的典型用法代码示例。如果您正苦于以下问题:Python rfc3987.parse方法的具体用法?Python rfc3987.parse怎么用?Python rfc3987.parse使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类rfc3987
的用法示例。
在下文中一共展示了rfc3987.parse方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _apply_icon
# 需要导入模块: import rfc3987 [as 别名]
# 或者: from rfc3987 import parse [as 别名]
def _apply_icon(app, icon):
app_path = osx.path_for_app(app)
if not app_path:
raise AppMissingError("Application not found: '{0}'".format(app))
try:
components = urlparse(icon)
if not components["scheme"] or components['scheme'] == "file":
icon_path = components["path"]
else:
tmpdir = mkdtemp()
icon_path = os.path.join(tmpdir,
os.path.basename(components["path"]))
print(tty.progress("Downloading {0} icon: {1}".format(app, icon)))
curl(icon, icon_path)
except ValueError:
icon_path = icon
osx.set_icon(app_path, os.path.expanduser(icon_path))
示例2: ld_object
# 需要导入模块: import rfc3987 [as 别名]
# 或者: from rfc3987 import parse [as 别名]
def ld_object(attribute_name, entity_id):
out = entity_id
try:
d = parse(entity_id, rule='URI')
scheme = d['scheme']
if scheme != 'urn' and scheme != 'http' and scheme != 'https':
raise ValueError
except ValueError:
entity_type = ''
if attribute_name.startswith('ref'):
entity_type = attribute_name[3:]
out = ngsild_uri(entity_type, entity_id)
return out
# Do all the transformation work
示例3: is_uri
# 需要导入模块: import rfc3987 [as 别名]
# 或者: from rfc3987 import parse [as 别名]
def is_uri(instance):
if not isinstance(instance, str_types):
return True
return rfc3987.parse(instance, rule="URI")
示例4: uri_validator
# 需要导入模块: import rfc3987 [as 别名]
# 或者: from rfc3987 import parse [as 别名]
def uri_validator(value, **kwargs):
try:
parts = rfc3987.parse(value, rule='URI')
except ValueError:
raise ValidationError(MESSAGES['format']['invalid_uri'].format(value))
if not parts['scheme'] or not parts['authority']:
raise ValidationError(MESSAGES['format']['invalid_uri'].format(value))
示例5: checker
# 需要导入模块: import rfc3987 [as 别名]
# 或者: from rfc3987 import parse [as 别名]
def checker(url):
'''
Check if the url is a valid one or not.
'''
try:
parse(url)
return True
except ValueError:
return False
return False
示例6: _uri
# 需要导入模块: import rfc3987 [as 别名]
# 或者: from rfc3987 import parse [as 别名]
def _uri(s):
if rfc3987.parse(s).get("scheme") in ["http", "https"]:
return s
raise ValueError
示例7: ld_id
# 需要导入模块: import rfc3987 [as 别名]
# 或者: from rfc3987 import parse [as 别名]
def ld_id(entity_id, entity_type):
out = entity_id
try:
d = parse(entity_id, rule='URI')
scheme = d['scheme']
if scheme != 'urn' and scheme != 'http' and scheme != 'https':
raise ValueError
except ValueError:
out = ngsild_uri(entity_type, entity_id)
return out
# Generates a Relationship's object as a URI
示例8: IsUri
# 需要导入模块: import rfc3987 [as 别名]
# 或者: from rfc3987 import parse [as 别名]
def IsUri(text):
try:
rfc3987.parse(text, rule='URI')
except:
return False
return True
示例9: spider
# 需要导入模块: import rfc3987 [as 别名]
# 或者: from rfc3987 import parse [as 别名]
def spider(base_urls, target):
'''
Loop through the initial links found in the given page. Each new link
discovered will be added to the list if it's not already there, and thus
crawled aswell looking for more links.
wannabe list works as the placeholder for the urls that are yet to crawl.
base_urls is a list with all the already crawled urls.
'''
global target_
target_ = parse(target)
p = Pool(arguments.process)
wannabe = [url for url in base_urls if target_['authority'] in parse(url)['authority']]
while True:
#retrieve all the urls returned by the workers
new_urls = p.map(worker, wannabe)
#flatten them and remove repeated ones
new_urls = list(set(itertools.chain(*new_urls)))
wannabe = []
i = 0
#if new_urls is empty meaning no more urls are being discovered, exit the loop
if new_urls == []:
break
else:
for url in new_urls:
if url not in base_urls:
'''
For each new url, check if it hasn't been crawled. If it's
indeed new and contains the target domain it gets appended to
the wannabe list so in the next iteration it will be crawled.
'''
i += 1
if target_['authority'] in parse(url)['authority']:
wannabe.append(url)
base_urls.append(url)
print(colored('\nNew urls appended: {}\n'.format(i), 'green', attrs=['bold']))
#once all the links for the given depth have been analyzed, execute the parser
parser(base_urls)