本文整理汇总了Python中werkzeug._internal._decode_unicode函数的典型用法代码示例。如果您正苦于以下问题:Python _decode_unicode函数的具体用法?Python _decode_unicode怎么用?Python _decode_unicode使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了_decode_unicode函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: uri_to_iri
def uri_to_iri(uri, charset = 'utf-8', errors = 'ignore'):
uri = url_fix(str(uri), charset)
scheme, auth, hostname, port, path, query, fragment = _uri_split(uri)
scheme = _decode_unicode(scheme, 'ascii', errors)
try:
hostname = hostname.decode('idna')
except UnicodeError:
if errors not in ('ignore', 'replace'):
raise
hostname = hostname.decode('ascii', errors)
if auth:
if ':' in auth:
auth, password = auth.split(':', 1)
else:
password = None
auth = _decode_unicode(_unquote(auth), charset, errors)
if password:
auth += u':' + _decode_unicode(_unquote(password), charset, errors)
hostname = auth + u'@' + hostname
if port:
hostname += u':' + port.decode(charset, errors)
path = _decode_unicode(_unquote(path, '/;?'), charset, errors)
query = _decode_unicode(_unquote(query, ';/?:@&=+,$'), charset, errors)
return urlparse.urlunsplit([scheme,
hostname,
path,
query,
fragment])
示例2: uri_to_iri
def uri_to_iri(uri, charset='utf-8', errors='replace'):
r"""Converts a URI in a given charset to a IRI.
Examples for URI versus IRI
>>> uri_to_iri('http://xn--n3h.net/')
u'http://\u2603.net/'
>>> uri_to_iri('http://%C3%BCser:p%C3%[email protected]/p%C3%A5th')
u'http://\xfcser:p\[email protected]\u2603.net/p\xe5th'
Query strings are left unchanged:
>>> uri_to_iri('/?foo=24&x=%26%2f')
u'/?foo=24&x=%26%2f'
.. versionadded:: 0.6
:param uri: the URI to convert
:param charset: the charset of the URI
:param errors: the error handling on decode
"""
uri = url_fix(str(uri), charset)
scheme, auth, hostname, port, path, query, fragment = _uri_split(uri)
scheme = _decode_unicode(scheme, 'ascii', errors)
try:
hostname = hostname.decode('idna')
except UnicodeError:
# dammit, that codec raised an error. Because it does not support
# any error handling we have to fake it.... badly
if errors not in ('ignore', 'replace'):
raise
hostname = hostname.decode('ascii', errors)
if ':' in hostname:
hostname = '[' + hostname + ']'
if auth:
if ':' in auth:
auth, password = auth.split(':', 1)
else:
password = None
auth = _decode_unicode(_unquote(auth), charset, errors)
if password:
auth += u':' + _decode_unicode(_unquote(password),
charset, errors)
hostname = auth + u'@' + hostname
if port:
# port should be numeric, but you never know...
hostname += u':' + port.decode(charset, errors)
path = _decode_unicode(_unquote(path, '/;?'), charset, errors)
query = _decode_unicode(_unquote(query, ';/?:@&=+,$'),
charset, errors)
return urlparse.urlunsplit([scheme, hostname, path, query, fragment])
示例3: from_file
def from_file(cls, file, charset='utf-8', errors='strict',
unicode_mode=True, encoding=None):
"""Load a template from a file.
.. versionchanged:: 0.5
The encoding parameter was renamed to charset.
:param file: a filename or file object to load the template from.
:param charset: the charset of the template to load.
:param errors: the error behavior of the charset decoding.
:param unicode_mode: set to `False` to disable unicode mode.
:return: a template
"""
if encoding is not None:
from warnings import warn
warn(DeprecationWarning('the encoding parameter is deprecated. '
'use charset instead.'), stacklevel=2)
charset = encoding
close = False
if isinstance(file, basestring):
f = open(file, 'r')
close = True
try:
data = _decode_unicode(f.read(), charset, errors)
finally:
if close:
f.close()
return cls(data, getattr(f, 'name', '<template>'), charset,
errors, unicode_mode)
示例4: parse_cookie
def parse_cookie(header, charset="utf-8", errors="replace", cls=None):
"""Parse a cookie. Either from a string or WSGI environ.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
:exc:`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
This function now returns a :class:`TypeConversionDict` instead of a
regular dict. The `cls` parameter was added.
:param header: the header to be used to parse the cookie. Alternatively
this can be a WSGI environment.
:param charset: the charset for the cookie values.
:param errors: the error behavior for the charset decoding.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`TypeConversionDict` is
used.
"""
if isinstance(header, dict):
header = header.get("HTTP_COOKIE", "")
if cls is None:
cls = TypeConversionDict
cookie = _ExtendedCookie()
cookie.load(header)
result = {}
# decode to unicode and skip broken items. Our extended morsel
# and extended cookie will catch CookieErrors and convert them to
# `None` items which we have to skip here.
for key, value in cookie.iteritems():
if value.value is not None:
result[key] = _decode_unicode(unquote_header_value(value.value), charset, errors)
return cls(result)
示例5: path
def path(self):
"""Requested path as unicode. This works a bit like the regular path
info in the WSGI environment but will always include a leading slash,
even if the URL root is accessed.
"""
path = '/' + (self.environ.get('PATH_INFO') or '').lstrip('/')
return _decode_unicode(path, self.charset, self.encoding_errors)
示例6: from_file
def from_file(cls, file, charset='utf-8', errors='strict',
unicode_mode=True):
"""Load a template from a file.
.. versionchanged:: 0.5
The encoding parameter was renamed to charset.
:param file: a filename or file object to load the template from.
:param charset: the charset of the template to load.
:param errors: the error behavior of the charset decoding.
:param unicode_mode: set to `False` to disable unicode mode.
:return: a template
"""
close = False
f = file
if isinstance(file, basestring):
f = open(file, 'r')
close = True
try:
data = _decode_unicode(f.read(), charset, errors)
finally:
if close:
f.close()
return cls(data, getattr(f, 'name', '<template>'), charset,
errors, unicode_mode)
示例7: __init__
def __init__(self, source, filename = '<template>', charset = 'utf-8', errors = 'strict', unicode_mode = True):
if isinstance(source, str):
source = _decode_unicode(source, charset, errors)
if isinstance(filename, unicode):
filename = filename.encode('utf-8')
node = Parser(tokenize(u'\n'.join(source.splitlines()), filename), filename).parse()
self.code = TemplateCodeGenerator(node, filename).getCode()
self.filename = filename
self.charset = charset
self.errors = errors
self.unicode_mode = unicode_mode
示例8: start_file_streaming
def start_file_streaming(self, filename, headers, total_content_length):
filename = _decode_unicode(filename, self.charset, self.errors)
filename = self._fix_ie_filename(filename)
content_type = headers.get('content-type')
try:
content_length = int(headers['content-length'])
except (KeyError, ValueError):
content_length = 0
container = self.stream_factory(total_content_length, content_type,
filename, content_length)
return filename, container
示例9: __init__
def __init__(self, source, filename="<template>", charset="utf-8", errors="strict", unicode_mode=True):
if isinstance(source, str):
source = _decode_unicode(source, charset, errors)
if isinstance(filename, unicode):
filename = filename.encode("utf-8")
node = Parser(tokenize(u"\n".join(source.splitlines()), filename), filename).parse()
self.code = TemplateCodeGenerator(node, filename).getCode()
self.filename = filename
self.charset = charset
self.errors = errors
self.unicode_mode = unicode_mode
示例10: from_file
def from_file(cls, file, charset = 'utf-8', errors = 'strict', unicode_mode = True):
close = False
if isinstance(file, basestring):
f = open(file, 'r')
close = True
try:
data = _decode_unicode(f.read(), charset, errors)
finally:
if close:
f.close()
return cls(data, getattr(f, 'name', '<template>'), charset, errors, unicode_mode)
示例11: parse_cookie
def parse_cookie(header, charset = 'utf-8', errors = 'ignore', cls = None):
if isinstance(header, dict):
header = header.get('HTTP_COOKIE', '')
if cls is None:
cls = TypeConversionDict
cookie = _ExtendedCookie()
cookie.load(header)
result = {}
for key, value in cookie.iteritems():
if value.value is not None:
result[key] = _decode_unicode(unquote_header_value(value.value), charset, errors)
return cls(result)
示例12: url_unquote_plus
def url_unquote_plus(s, charset='utf-8', errors='ignore'):
"""URL decode a single string with the given decoding and decode
a "+" to whitespace.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
:param s: the string to unquote.
:param charset: the charset to be used.
:param errors: the error handling for the charset decoding.
"""
return _decode_unicode(_unquote_plus(s), charset, errors)
示例13: url_unquote
def url_unquote(s, charset='utf-8', errors='replace'):
"""URL decode a single string with a given decoding.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
:param s: the string to unquote.
:param charset: the charset to be used.
:param errors: the error handling for the charset decoding.
"""
if isinstance(s, unicode):
s = s.encode(charset)
return _decode_unicode(_unquote(s), charset, errors)
示例14: _url_decode_impl
def _url_decode_impl(pair_iter, charset, decode_keys, include_empty,
errors):
for pair in pair_iter:
if not pair:
continue
if '=' in pair:
key, value = pair.split('=', 1)
else:
if not include_empty:
continue
key = pair
value = ''
key = _unquote_plus(key)
if decode_keys:
key = _decode_unicode(key, charset, errors)
yield key, url_unquote_plus(value, charset, errors)
示例15: url_decode
def url_decode(s, charset='utf-8', decode_keys=False, include_empty=True,
errors='ignore', separator='&', cls=None):
"""Parse a querystring and return it as :class:`MultiDict`. Per default
only values are decoded into unicode strings. If `decode_keys` is set to
`True` the same will happen for keys.
Per default a missing value for a key will default to an empty key. If
you don't want that behavior you can set `include_empty` to `False`.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
In previous versions ";" and "&" could be used for url decoding.
This changed in 0.5 where only "&" is supported. If you want to
use ";" instead a different `separator` can be provided.
The `cls` parameter was added.
:param s: a string with the query string to decode.
:param charset: the charset of the query string.
:param decode_keys: set to `True` if you want the keys to be decoded
as well.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
"""
if cls is None:
cls = MultiDict
result = []
for pair in str(s).split(separator):
if not pair:
continue
if '=' in pair:
key, value = pair.split('=', 1)
else:
key = pair
value = ''
key = _unquote_plus(key)
if decode_keys:
key = _decode_unicode(key, charset, errors)
result.append((key, url_unquote_plus(value, charset, errors)))
return cls(result)