本文整理汇总了Python中werkzeug._compat.to_native函数的典型用法代码示例。如果您正苦于以下问题:Python to_native函数的具体用法?Python to_native怎么用?Python to_native使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了to_native函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: dump_cookie
def dump_cookie(key, value='', max_age=None, expires=None, path='/',
domain=None, secure=None, httponly=False, sync_expires=True):
"""Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
The parameters are the same as in the cookie Morsel object in the
Python standard library but it accepts unicode data, too.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session. Additionally `timedelta` objects
are accepted, too.
:param expires: should be a `datetime` object or unix timestamp.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
:param domain: Use this if you want to set a cross-domain cookie. For
example, ``domain=".example.com"`` will set a cookie
that is readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param secure: The cookie will only be available via HTTPS
:param httponly: disallow JavaScript to access the cookie. This is an
extension to the cookie standard and probably not
supported by all browsers.
:param charset: the encoding for unicode values.
:param sync_expires: automatically set expires if max_age is defined
but expires not.
"""
if not isinstance(key, (bytes, text_type)):
raise TypeError('invalid key %r' % key)
if not isinstance(value, (bytes, text_type)):
raise TypeError('invalid value %r' % value)
key, value = to_native(key, _cookie_charset), to_native(value, _cookie_charset)
value = quote_header_value(value)
morsel = _ExtendedMorsel(key, value)
if isinstance(max_age, timedelta):
max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
if expires is not None:
if not isinstance(expires, string_types):
expires = cookie_date(expires)
morsel['expires'] = expires
elif max_age is not None and sync_expires:
morsel['expires'] = cookie_date(time() + max_age)
if domain and ':' in domain:
# The port part of the domain should NOT be used. Strip it
domain = domain.split(':', 1)[0]
if domain:
assert '.' in domain, (
"Setting \"domain\" for a cookie on a server running localy (ex: "
"localhost) is not supportted by complying browsers. You should "
"have something like: \"127.0.0.1 localhost dev.localhost\" on "
"your hosts file and then point your server to run on "
"\"dev.localhost\" and also set \"domain\" for \"dev.localhost\""
)
for k, v in (('path', path), ('domain', domain), ('secure', secure),
('max-age', max_age), ('httponly', httponly)):
if v is not None and v is not False:
morsel[k] = str(v)
return to_unicode(morsel.output(header='').lstrip(), _cookie_charset)
示例2: sourcelines
def sourcelines(self):
"""The sourcecode of the file as list of unicode strings."""
# get sourcecode from loader or file
source = None
if self.loader is not None:
try:
if hasattr(self.loader, 'get_source'):
source = self.loader.get_source(self.module)
elif hasattr(self.loader, 'get_source_by_code'):
source = self.loader.get_source_by_code(self.code)
except Exception:
# we munch the exception so that we don't cause troubles
# if the loader is broken.
pass
if source is None:
try:
f = open(to_native(self.filename, get_filesystem_encoding()),
mode='rb')
except IOError:
return []
try:
source = f.read()
finally:
f.close()
# already unicode? return right away
if isinstance(source, text_type):
return source.splitlines()
# yes. it should be ascii, but we don't want to reject too many
# characters in the debugger if something breaks
charset = 'utf-8'
if source.startswith(UTF8_COOKIE):
source = source[3:]
else:
for idx, match in enumerate(_line_re.finditer(source)):
match = _coding_re.search(match.group())
if match is not None:
charset = match.group(1)
break
if idx > 1:
break
# on broken cookies we fall back to utf-8 too
charset = to_native(charset)
try:
codecs.lookup(charset)
except LookupError:
charset = 'utf-8'
return source.decode(charset, 'replace').splitlines()
示例3: test_shared_data_middleware
def test_shared_data_middleware(self):
def null_application(environ, start_response):
start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])
yield b'NOT FOUND'
test_dir = get_temporary_directory()
with open(path.join(test_dir, to_native(u'äöü', 'utf-8')), 'w') as test_file:
test_file.write(u'FOUND')
app = wsgi.SharedDataMiddleware(null_application, {
'/': path.join(path.dirname(__file__), 'res'),
'/sources': path.join(path.dirname(__file__), 'res'),
'/pkg': ('werkzeug.debug', 'shared'),
'/foo': test_dir
})
for p in '/test.txt', '/sources/test.txt', '/foo/äöü':
app_iter, status, headers = run_wsgi_app(app, create_environ(p))
self.assert_equal(status, '200 OK')
with closing(app_iter) as app_iter:
data = b''.join(app_iter).strip()
self.assert_equal(data, b'FOUND')
app_iter, status, headers = run_wsgi_app(
app, create_environ('/pkg/debugger.js'))
with closing(app_iter) as app_iter:
contents = b''.join(app_iter)
self.assert_in(b'$(function() {', contents)
app_iter, status, headers = run_wsgi_app(
app, create_environ('/missing'))
self.assert_equal(status, '404 NOT FOUND')
self.assert_equal(b''.join(app_iter).strip(), b'NOT FOUND')
示例4: test_shared_data_middleware
def test_shared_data_middleware(tmpdir):
def null_application(environ, start_response):
start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])
yield b'NOT FOUND'
test_dir = str(tmpdir)
with open(path.join(test_dir, to_native(u'äöü', 'utf-8')), 'w') as test_file:
test_file.write(u'FOUND')
for t in [list, dict]:
app = wsgi.SharedDataMiddleware(null_application, t([
('/', path.join(path.dirname(__file__), 'res')),
('/sources', path.join(path.dirname(__file__), 'res')),
('/pkg', ('werkzeug.debug', 'shared')),
('/foo', test_dir)
]))
for p in '/test.txt', '/sources/test.txt', '/foo/äöü':
app_iter, status, headers = run_wsgi_app(app, create_environ(p))
assert status == '200 OK'
with closing(app_iter) as app_iter:
data = b''.join(app_iter).strip()
assert data == b'FOUND'
app_iter, status, headers = run_wsgi_app(
app, create_environ('/pkg/debugger.js'))
with closing(app_iter) as app_iter:
contents = b''.join(app_iter)
assert b'$(function() {' in contents
app_iter, status, headers = run_wsgi_app(
app, create_environ('/missing'))
assert status == '404 NOT FOUND'
assert b''.join(app_iter).strip() == b'NOT FOUND'
示例5: test_shared_data_middleware
def test_shared_data_middleware(tmpdir):
def null_application(environ, start_response):
start_response("404 NOT FOUND", [("Content-Type", "text/plain")])
yield b"NOT FOUND"
test_dir = str(tmpdir)
with open(path.join(test_dir, to_native(u"äöü", "utf-8")), "w") as test_file:
test_file.write(u"FOUND")
app = wsgi.SharedDataMiddleware(
null_application,
{
"/": path.join(path.dirname(__file__), "res"),
"/sources": path.join(path.dirname(__file__), "res"),
"/pkg": ("werkzeug.debug", "shared"),
"/foo": test_dir,
},
)
for p in "/test.txt", "/sources/test.txt", "/foo/äöü":
app_iter, status, headers = run_wsgi_app(app, create_environ(p))
assert status == "200 OK"
with closing(app_iter) as app_iter:
data = b"".join(app_iter).strip()
assert data == b"FOUND"
app_iter, status, headers = run_wsgi_app(app, create_environ("/pkg/debugger.js"))
with closing(app_iter) as app_iter:
contents = b"".join(app_iter)
assert b"$(function() {" in contents
app_iter, status, headers = run_wsgi_app(app, create_environ("/missing"))
assert status == "404 NOT FOUND"
assert b"".join(app_iter).strip() == b"NOT FOUND"
示例6: url_encode_stream
def url_encode_stream(obj, stream=None, charset='utf-8', encode_keys=False,
sort=False, key=None, separator=b'&'):
"""Like :meth:`url_encode` but writes the results to a stream
object. If the stream is `None` a generator over all encoded
pairs is returned.
.. versionadded:: 0.8
:param obj: the object to encode into a query string.
:param stream: a stream to write the encoded object into or `None` if
an iterator over the encoded pairs should be returned. In
that case the separator argument is ignored.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
Python 3.x)
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
separator = to_native(separator, 'ascii')
gen = _url_encode_impl(obj, charset, encode_keys, sort, key)
if stream is None:
return gen
for idx, chunk in enumerate(gen):
if idx:
stream.write(separator)
stream.write(chunk)
示例7: parse_multipart_headers
def parse_multipart_headers(iterable):
"""Parses multipart headers from an iterable that yields lines (including
the trailing newline symbol). The iterable has to be newline terminated.
The iterable will stop at the line where the headers ended so it can be
further consumed.
:param iterable: iterable of strings that are newline terminated
"""
result = []
for line in iterable:
line = to_native(line)
line, line_terminated = _line_parse(line)
if not line_terminated:
raise ValueError('unexpected end of line in multipart header')
if not line:
break
elif line[0] in ' \t' and result:
key, value = result[-1]
result[-1] = (key, value + '\n ' + line[1:])
else:
parts = line.split(':', 1)
if len(parts) == 2:
result.append((parts[0].strip(), parts[1].strip()))
# we link the list to the headers, no need to create a copy, the
# list was not shared anyways.
return Headers(result)
示例8: iri_to_uri
def iri_to_uri(iri, charset='utf-8', errors='strict'):
r"""
Converts any unicode based IRI to an acceptable ASCII URI. Werkzeug always
uses utf-8 URLs internally because this is what browsers and HTTP do as
well. In some places where it accepts an URL it also accepts a unicode IRI
and converts it into a URI.
Examples for IRI versus URI:
>>> iri_to_uri(u'http://☃.net/')
'http://xn--n3h.net/'
>>> iri_to_uri(u'http://üser:pä[email protected]☃.net/påth')
'http://%C3%BCser:p%C3%[email protected]/p%C3%A5th'
.. versionadded:: 0.6
:param iri: The IRI to convert.
:param charset: The charset for the URI.
"""
if isinstance(iri, tuple):
iri = url_unparse(iri)
iri = url_parse(to_unicode(iri, charset, errors))
netloc = iri.encode_netloc().decode('ascii')
path = url_quote(iri.path, charset, errors, '/:~+%')
query = url_quote(iri.query, charset, errors, '%&[]:;$*()+,!?*/=')
fragment = url_quote(iri.fragment, charset, errors, '=%&[]:;$()+,!?*/')
return to_native(url_unparse((iri.scheme, netloc,
path, query, fragment)))
示例9: url_quote
def url_quote(string, charset='utf-8', errors='strict', safe='/:', unsafe=''):
"""URL encode a single string with a given encoding.
:param s: the string to quote.
:param charset: the charset to be used.
:param safe: an optional sequence of safe characters.
:param unsafe: an optional sequence of unsafe characters.
.. versionadded:: 0.9.2
The `unsafe` parameter was added.
"""
if not isinstance(string, (text_type, bytes, bytearray)):
string = text_type(string)
if isinstance(string, text_type):
string = string.encode(charset, errors)
if isinstance(safe, text_type):
safe = safe.encode(charset, errors)
if isinstance(unsafe, text_type):
unsafe = unsafe.encode(charset, errors)
safe = frozenset(bytearray(safe) + _always_safe) - frozenset(bytearray(unsafe))
rv = bytearray()
for char in bytearray(string):
if char in safe:
rv.append(char)
else:
rv.extend(('%%%02X' % char).encode('ascii'))
return to_native(bytes(rv))
示例10: url_fix
def url_fix(s, charset='utf-8'):
r"""Sometimes you get an URL by a user that just isn't a real URL because
it contains unsafe characters like ' ' and so on. This function can fix
some of the problems in a similar way browsers handle data entered by the
user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'
:param s: the string with the URL to fix.
:param charset: The target charset for the URL if the url was given as
unicode string.
"""
# First step is to switch to unicode processing and to convert
# backslashes (which are invalid in URLs anyways) to slashes. This is
# consistent with what Chrome does.
s = to_unicode(s, charset, 'replace').replace('\\', '/')
# For the specific case that we look like a malformed windows URL
# we want to fix this up manually:
if s.startswith('file://') and s[7:8].isalpha() and s[8:10] in (':/', '|/'):
s = 'file:///' + s[7:]
url = url_parse(s)
path = url_quote(url.path, charset, safe='/%+$!*\'(),')
qs = url_quote_plus(url.query, charset, safe=':&%=+$!*\'(),')
anchor = url_quote_plus(url.fragment, charset, safe=':&%=+$!*\'(),')
return to_native(url_unparse((url.scheme, url.encode_netloc(),
path, qs, anchor)))
示例11: port
def port(self):
"""The port in the URL as an integer if it was present, `None`
otherwise. This does not fill in default ports.
"""
try:
rv = int(to_native(self._split_host()[1]))
if 0 <= rv <= 65535:
return rv
except (ValueError, TypeError):
pass
示例12: ascii_host
def ascii_host(self):
"""Works exactly like :attr:`host` but will return a result that
is restricted to ASCII. If it finds a netloc that is not ASCII
it will attempt to idna decode it. This is useful for socket
operations when the URL might include internationalized characters.
"""
rv = self.host
if rv is not None and isinstance(rv, text_type):
rv = _encode_idna(rv)
return to_native(rv, 'ascii', 'ignore')
示例13: unserialize
def unserialize(cls, string, secret_key):
"""Load the secure cookie from a serialized string.
:param string: the cookie value to unserialize.
:param secret_key: the secret key used to serialize the cookie.
:return: a new :class:`SecureCookie`.
"""
if isinstance(string, text_type):
string = string.encode('utf-8', 'replace')
if isinstance(secret_key, text_type):
secret_key = secret_key.encode('utf-8', 'replace')
try:
base64_hash, data = string.split(b'?', 1)
except (ValueError, IndexError):
items = ()
else:
items = {}
mac = hmac(secret_key, None, cls.hash_method)
for item in data.split(b'&'):
mac.update(b'|' + item)
if not b'=' in item:
items = None
break
key, value = item.split(b'=', 1)
# try to make the key a string
key = url_unquote_plus(key.decode('ascii'))
try:
key = to_native(key)
except UnicodeError:
pass
items[key] = value
# no parsing error and the mac looks okay, we can now
# sercurely unpickle our cookie.
try:
client_hash = base64.b64decode(base64_hash)
except TypeError:
items = client_hash = None
if items is not None and safe_str_cmp(client_hash, mac.digest()):
try:
for key, value in iteritems(items):
items[key] = cls.unquote(value)
except UnquoteError:
items = ()
else:
if '_expires' in items:
if time() > items['_expires']:
items = ()
else:
del items['_expires']
else:
items = ()
return cls(items, secret_key, False)
示例14: __init__
def __init__(self, servers=None, default_timeout=300, key_prefix=None):
BaseCache.__init__(self, default_timeout)
if servers is None or isinstance(servers, (list, tuple)):
if servers is None:
servers = ['127.0.0.1:11211']
self._client = self.import_preferred_memcache_lib(servers)
if self._client is None:
raise RuntimeError('no memcache module found')
else:
# NOTE: servers is actually an already initialized memcache
# client.
self._client = servers
self.key_prefix = to_native(key_prefix)
示例15: encode_netloc
def encode_netloc(self):
"""Encodes the netloc part to an ASCII safe URL as bytes."""
rv = self.ascii_host or ''
if ':' in rv:
rv = '[%s]' % rv
port = self.port
if port is not None:
rv = '%s:%d' % (rv, port)
auth = ':'.join(filter(None, [
url_quote(self.raw_username or '', 'utf-8', 'strict', '/:%'),
url_quote(self.raw_password or '', 'utf-8', 'strict', '/:%'),
]))
if auth:
rv = '%[email protected]%s' % (auth, rv)
return to_native(rv)