本文整理汇总了Python中werkzeug.wsgi.make_line_iter函数的典型用法代码示例。如果您正苦于以下问题:Python make_line_iter函数的具体用法?Python make_line_iter怎么用?Python make_line_iter使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了make_line_iter函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_multi_part_line_breaks
def test_multi_part_line_breaks():
test_stream = StringIO('abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK')
lines = list(make_line_iter(test_stream, limit=1024, buffer_size=16))
assert lines == ['abcdef\r\n', 'ghijkl\r\n', 'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK']
test_stream = StringIO('abc\r\nThis line is broken by the buffer length.\r\nFoo bar baz')
lines = list(make_line_iter(test_stream, limit=1024, buffer_size=24))
assert lines == ['abc\r\n', 'This line is broken by the buffer length.\r\n', 'Foo bar baz']
示例2: test_multi_part_line_breaks_bytes
def test_multi_part_line_breaks_bytes(self):
data = b"abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK"
test_stream = BytesIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=16))
self.assert_equal(lines, [b"abcdef\r\n", b"ghijkl\r\n", b"mnopqrstuvwxyz\r\n", b"ABCDEFGHIJK"])
data = b"abc\r\nThis line is broken by the buffer length." b"\r\nFoo bar baz"
test_stream = BytesIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=24))
self.assert_equal(lines, [b"abc\r\n", b"This line is broken by the " b"buffer length.\r\n", b"Foo bar baz"])
示例3: test_multi_part_line_breaks
def test_multi_part_line_breaks(self):
data = 'abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK'
test_stream = StringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=16))
self.assert_equal(lines, ['abcdef\r\n', 'ghijkl\r\n', 'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK'])
data = 'abc\r\nThis line is broken by the buffer length.\r\nFoo bar baz'
test_stream = StringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=24))
self.assert_equal(lines, ['abc\r\n', 'This line is broken by the buffer length.\r\n', 'Foo bar baz'])
示例4: test_multi_part_line_breaks
def test_multi_part_line_breaks():
data = "abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK"
test_stream = NativeStringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=16))
assert lines == ["abcdef\r\n", "ghijkl\r\n", "mnopqrstuvwxyz\r\n", "ABCDEFGHIJK"]
data = "abc\r\nThis line is broken by the buffer length." "\r\nFoo bar baz"
test_stream = NativeStringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=24))
assert lines == ["abc\r\n", "This line is broken by the buffer " "length.\r\n", "Foo bar baz"]
示例5: test_multi_part_line_breaks_bytes
def test_multi_part_line_breaks_bytes():
data = b'abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK'
test_stream = BytesIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
buffer_size=16))
assert lines == [b'abcdef\r\n', b'ghijkl\r\n', b'mnopqrstuvwxyz\r\n',
b'ABCDEFGHIJK']
data = b'abc\r\nThis line is broken by the buffer length.' \
b'\r\nFoo bar baz'
test_stream = BytesIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
buffer_size=24))
assert lines == [b'abc\r\n', b'This line is broken by the buffer '
b'length.\r\n', b'Foo bar baz']
示例6: test_multi_part_line_breaks_problematic
def test_multi_part_line_breaks_problematic(self):
data = 'abc\rdef\r\nghi'
for x in range(1, 10):
test_stream = NativeStringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
buffer_size=4))
self.assert_equal(lines, ['abc\r', 'def\r\n', 'ghi'])
示例7: test_lines_longer_buffer_size
def test_lines_longer_buffer_size():
data = "1234567890\n1234567890\n"
for bufsize in range(1, 15):
lines = list(
wsgi.make_line_iter(
NativeStringIO(data), limit=len(data), buffer_size=bufsize
)
)
assert lines == ["1234567890\n", "1234567890\n"]
示例8: test_lines_longer_buffer_size_cap
def test_lines_longer_buffer_size_cap():
data = "1234567890\n1234567890\n"
for bufsize in range(1, 15):
lines = list(
wsgi.make_line_iter(
NativeStringIO(data),
limit=len(data),
buffer_size=bufsize,
cap_at_buffer=True,
)
)
assert len(lines[0]) == bufsize or lines[0].endswith("\n")
示例9: parse_lines
def parse_lines(self, file, boundary, content_length):
"""Generate parts of
``('begin_form', (headers, name))``
``('begin_file', (headers, name, filename))``
``('cont', bytestring)``
``('end', None)``
Always obeys the grammar
parts = ( begin_form cont* end |
begin_file cont* end )*
"""
next_part = b'--' + boundary
last_part = next_part + b'--'
iterator = chain(make_line_iter(file, limit=content_length,
buffer_size=self.buffer_size),
_empty_string_iter)
terminator = self._find_terminator(iterator)
if terminator == last_part:
return
elif terminator != next_part:
self.fail('Expected boundary at start of multipart data')
while terminator != last_part:
headers = parse_multipart_headers(iterator)
disposition = headers.get('content-disposition')
if disposition is None:
self.fail('Missing Content-Disposition header')
disposition, extra = parse_options_header(disposition)
transfer_encoding = self.get_part_encoding(headers)
name = extra.get('name')
filename = extra.get('filename')
# if no content type is given we stream into memory. A list is
# used as a temporary container.
if filename is None:
yield _begin_form, (headers, name)
# otherwise we parse the rest of the headers and ask the stream
# factory for something we can write in.
else:
yield _begin_file, (headers, name, filename)
buf = b''
for line in iterator:
if not line:
self.fail('unexpected end of stream')
if line[:2] == b'--':
terminator = line.rstrip()
if terminator in (next_part, last_part):
break
if transfer_encoding is not None:
if transfer_encoding == 'base64':
transfer_encoding = 'base64_codec'
try:
line = codecs.decode(line, transfer_encoding)
except Exception:
self.fail('could not decode transfer encoded chunk')
# we have something in the buffer from the last iteration.
# this is usually a newline delimiter.
if buf:
yield _cont, buf
buf = b''
# If the line ends with windows CRLF we write everything except
# the last two bytes. In all other cases however we write
# everything except the last byte. If it was a newline, that's
# fine, otherwise it does not matter because we will write it
# the next iteration. this ensures we do not write the
# final newline into the stream. That way we do not have to
# truncate the stream. However we do have to make sure that
# if something else than a newline is in there we write it
# out.
if line[-2:] == b'\r\n':
buf = b'\r\n'
cutoff = -2
else:
buf = line[-1:]
cutoff = -1
yield _cont, line[:cutoff]
else: # pragma: no cover
raise ValueError('unexpected end of part')
# if we have a leftover in the buffer that is not a newline
# character we have to flush it, otherwise we will chop of
# certain values.
if buf not in (b'', b'\r', b'\n', b'\r\n'):
yield _cont, buf
yield _end, None
示例10: test_lines_longer_buffer_size
def test_lines_longer_buffer_size(self):
data = '1234567890\n1234567890\n'
for bufsize in xrange(1, 15):
lines = list(wsgi.make_line_iter(NativeStringIO(data), limit=len(data),
buffer_size=4))
self.assert_equal(lines, ['1234567890\n', '1234567890\n'])
示例11: test_iter_functions_support_iterators
def test_iter_functions_support_iterators(self):
data = ['abcdef\r\nghi', 'jkl\r\nmnopqrstuvwxyz\r', '\nABCDEFGHIJK']
lines = list(wsgi.make_line_iter(data))
self.assert_equal(lines, ['abcdef\r\n', 'ghijkl\r\n',
'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK'])
示例12: test_lines_longer_buffer_size_cap
def test_lines_longer_buffer_size_cap():
data = '1234567890\n1234567890\n'
for bufsize in range(1, 15):
lines = list(wsgi.make_line_iter(NativeStringIO(data), limit=len(data),
buffer_size=4, cap_at_buffer=True))
assert lines == ['1234', '5678', '90\n', '1234', '5678', '90\n']
示例13: test_multi_part_line_breaks_problematic
def test_multi_part_line_breaks_problematic():
data = "abc\rdef\r\nghi"
for _ in range(1, 10):
test_stream = NativeStringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=4))
assert lines == ["abc\r", "def\r\n", "ghi"]
示例14: parse_multipart
def parse_multipart(file, boundary, content_length, stream_factory = None, charset = 'utf-8', errors = 'ignore', buffer_size = 10240, max_form_memory_size = None):
if stream_factory is None:
stream_factory = default_stream_factory
if not boundary:
raise ValueError('Missing boundary')
if not is_valid_multipart_boundary(boundary):
raise ValueError('Invalid boundary: %s' % boundary)
if len(boundary) > buffer_size:
raise ValueError('Boundary longer than buffer size')
total_content_length = content_length
next_part = '--' + boundary
last_part = next_part + '--'
form = []
files = []
in_memory = 0
file = LimitedStream(file, content_length)
iterator = chain(make_line_iter(file, buffer_size=buffer_size), _empty_string_iter)
try:
terminator = _find_terminator(iterator)
if terminator != next_part:
raise ValueError('Expected boundary at start of multipart data')
while terminator != last_part:
headers = parse_multipart_headers(iterator)
disposition = headers.get('content-disposition')
if disposition is None:
raise ValueError('Missing Content-Disposition header')
disposition, extra = parse_options_header(disposition)
name = extra.get('name')
transfer_encoding = headers.get('content-transfer-encoding')
try_decode = transfer_encoding is not None and transfer_encoding in _supported_multipart_encodings
filename = extra.get('filename')
if filename is None:
is_file = False
container = []
_write = container.append
guard_memory = max_form_memory_size is not None
else:
content_type = headers.get('content-type')
content_type = parse_options_header(content_type)[0] or 'text/plain'
is_file = True
guard_memory = False
if filename is not None:
filename = _fix_ie_filename(_decode_unicode(filename, charset, errors))
try:
content_length = int(headers['content-length'])
except (KeyError, ValueError):
content_length = 0
container = stream_factory(total_content_length, content_type, filename, content_length)
_write = container.write
buf = ''
for line in iterator:
if not line:
raise ValueError('unexpected end of stream')
if line[:2] == '--':
terminator = line.rstrip()
if terminator in (next_part, last_part):
break
if try_decode:
try:
line = line.decode(transfer_encoding)
except:
raise ValueError('could not decode transfer encoded chunk')
if buf:
_write(buf)
buf = ''
if line[-2:] == '\r\n':
buf = '\r\n'
cutoff = -2
else:
buf = line[-1]
cutoff = -1
_write(line[:cutoff])
if guard_memory:
in_memory += len(line)
if in_memory > max_form_memory_size:
from werkzeug.exceptions import RequestEntityTooLarge
raise RequestEntityTooLarge()
else:
raise ValueError('unexpected end of part')
if is_file:
container.seek(0)
files.append((name, FileStorage(container, filename, name, content_type, content_length, headers)))
else:
form.append((name, _decode_unicode(''.join(container), charset, errors)))
finally:
file.exhaust()
return (form, files)
示例15: parse
def parse(self, file, boundary, content_length):
next_part = '--' + boundary
last_part = next_part + '--'
form = []
files = []
in_memory = 0
iterator = chain(make_line_iter(file, limit=content_length,
buffer_size=self.buffer_size),
_empty_string_iter)
terminator = self._find_terminator(iterator)
if terminator != next_part:
self.fail('Expected boundary at start of multipart data')
while terminator != last_part:
headers = parse_multipart_headers(iterator)
disposition = headers.get('content-disposition')
if disposition is None:
self.fail('Missing Content-Disposition header')
disposition, extra = parse_options_header(disposition)
transfer_encoding = self.get_part_encoding(headers)
name = extra.get('name')
filename = extra.get('filename')
part_charset = self.get_part_charset(headers)
# if no content type is given we stream into memory. A list is
# used as a temporary container.
if filename is None:
is_file = False
container = []
_write = container.append
guard_memory = self.max_form_memory_size is not None
# otherwise we parse the rest of the headers and ask the stream
# factory for something we can write in.
else:
is_file = True
guard_memory = False
filename, container = self.start_file_streaming(
filename, headers, content_length)
_write = container.write
buf = ''
for line in iterator:
if not line:
self.fail('unexpected end of stream')
if line[:2] == '--':
terminator = line.rstrip()
if terminator in (next_part, last_part):
break
if transfer_encoding is not None:
try:
line = line.decode(transfer_encoding)
except Exception:
self.fail('could not decode transfer encoded chunk')
# we have something in the buffer from the last iteration.
# this is usually a newline delimiter.
if buf:
_write(buf)
buf = ''
# If the line ends with windows CRLF we write everything except
# the last two bytes. In all other cases however we write
# everything except the last byte. If it was a newline, that's
# fine, otherwise it does not matter because we will write it
# the next iteration. this ensures we do not write the
# final newline into the stream. That way we do not have to
# truncate the stream. However we do have to make sure that
# if something else than a newline is in there we write it
# out.
if line[-2:] == '\r\n':
buf = '\r\n'
cutoff = -2
else:
buf = line[-1]
cutoff = -1
_write(line[:cutoff])
# if we write into memory and there is a memory size limit we
# count the number of bytes in memory and raise an exception if
# there is too much data in memory.
if guard_memory:
in_memory += len(line)
if in_memory > self.max_form_memory_size:
self.in_memory_threshold_reached(in_memory)
else: # pragma: no cover
raise ValueError('unexpected end of part')
# if we have a leftover in the buffer that is not a newline
# character we have to flush it, otherwise we will chop of
# certain values.
if buf not in ('', '\r', '\n', '\r\n'):
_write(buf)
#.........这里部分代码省略.........