本文整理汇总了Python中bson.py3compat.StringIO类的典型用法代码示例。如果您正苦于以下问题:Python StringIO类的具体用法?Python StringIO怎么用?Python StringIO使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了StringIO类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: surcor_callback
def surcor_callback(res,error):
data = StringIO()
for item in res:
data.write(item['data'])
fileobj['data'] = data.getvalue()
callback(fileobj)
示例2: _encode_batched_write_command
def _encode_batched_write_command(
namespace, operation, command, docs, check_keys, opts, ctx):
"""Encode the next batched insert, update, or delete command.
"""
buf = StringIO()
to_send, _ = _batched_write_command(
namespace, operation, command, docs, check_keys, opts, ctx, buf)
return buf.getvalue(), to_send
示例3: readline
def readline(self, size=-1):
"""Read one line or up to `size` bytes from the file.
:Parameters:
- `size` (optional): the maximum number of bytes to read
.. versionadded:: 1.9
"""
if size == 0:
return b('')
remainder = int(self.length) - self.__position
if size < 0 or size > remainder:
size = remainder
received = 0
data = StringIO()
while received < size:
chunk_data = self.readchunk()
pos = chunk_data.find(NEWLN, 0, size)
if pos != -1:
size = received + pos + 1
received += len(chunk_data)
data.write(chunk_data)
if pos != -1:
break
self.__position -= received - size
# Return 'size' bytes and store the rest.
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
示例4: read
def read(self, size=-1):
"""Read at most `size` bytes from the file (less if there
isn't enough data).
The bytes are returned as an instance of :class:`str` (:class:`bytes`
in python 3). If `size` is negative or omitted all data is read.
:Parameters:
- `size` (optional): the number of bytes to read
"""
self._ensure_file()
if size == 0:
return EMPTY
remainder = int(self.length) - self.__position
if size < 0 or size > remainder:
size = remainder
received = 0
data = StringIO()
while received < size:
chunk_data = self.readchunk()
received += len(chunk_data)
data.write(chunk_data)
self.__position -= received - size
# Return 'size' bytes and store the rest.
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
示例5: write_me
def write_me(s, chunk_size):
buf = StringIO(s)
infile = GridIn(self.db.fs)
while True:
to_write = buf.read(chunk_size)
if to_write == b(''):
break
infile.write(to_write)
infile.close()
buf.close()
outfile = GridOut(self.db.fs, infile._id)
data = outfile.read()
self.assertEqual(s, data)
示例6: test_zip
def test_zip(self):
zf = StringIO()
z = zipfile.ZipFile(zf, "w")
z.writestr("test.txt", b"hello world")
z.close()
zf.seek(0)
f = GridIn(self.db.fs, filename="test.zip")
f.write(zf)
f.close()
self.assertEqual(1, self.db.fs.files.count_documents({}))
self.assertEqual(1, self.db.fs.chunks.count_documents({}))
g = GridOut(self.db.fs, f._id)
z = zipfile.ZipFile(g)
self.assertSequenceEqual(z.namelist(), ["test.txt"])
self.assertEqual(z.read("test.txt"), b"hello world")
示例7: read
def read(self, size=-1):
"""Read at most `size` bytes from the file (less if there
isn't enough data).
The bytes are returned as an instance of :class:`str` (:class:`bytes`
in python 3). If `size` is negative or omitted all data is read.
:Parameters:
- `size` (optional): the number of bytes to read
.. versionchanged:: 3.8
This method now only checks for extra chunks after reading the
entire file. Previously, this method would check for extra chunks
on every call.
"""
self._ensure_file()
remainder = int(self.length) - self.__position
if size < 0 or size > remainder:
size = remainder
if size == 0:
return EMPTY
received = 0
data = StringIO()
while received < size:
chunk_data = self.readchunk()
received += len(chunk_data)
data.write(chunk_data)
# Detect extra chunks after reading the entire file.
if size == remainder and self.__chunk_iter:
try:
self.__chunk_iter.next()
except StopIteration:
pass
self.__position -= received - size
# Return 'size' bytes and store the rest.
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
示例8: read
def read(self, size=-1):
"""Read at most `size` bytes from the file (less if there
isn't enough data).
The bytes are returned as an instance of :class:`str` (:class:`bytes`
in python 3). If `size` is negative or omitted all data is read.
:Parameters:
- `size` (optional): the number of bytes to read
"""
self._ensure_file()
if size == 0:
return EMPTY
remainder = int(self.length) - self.__position
if size < 0 or size > remainder:
size = remainder
received = 0
data = StringIO()
while received < size:
chunk_data = self.readchunk()
received += len(chunk_data)
data.write(chunk_data)
# Detect extra chunks.
max_chunk_n = math.ceil(self.length / float(self.chunk_size))
chunk = self.__chunks.find_one({"files_id": self._id,
"n": {"$gte": max_chunk_n}},
session=self._session)
# According to spec, ignore extra chunks if they are empty.
if chunk is not None and len(chunk['data']):
raise CorruptGridFile(
"Extra chunk found: expected %i chunks but found "
"chunk with n=%i" % (max_chunk_n, chunk['n']))
self.__position -= received - size
# Return 'size' bytes and store the rest.
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
示例9: __init__
def __init__(self, client, root_collection, **kwargs):
self.client = client
self.root_collection = root_collection
self._files = self.client.connection(files_coll(self.root_collection))
self._chunks = self.client.connection(chunks_coll(self.root_collection))
# Handle alternative naming
if "content_type" in kwargs:
kwargs["contentType"] = kwargs.pop("content_type")
if "chunk_size" in kwargs:
kwargs["chunkSize"] = kwargs.pop("chunk_size")
# Defaults
kwargs["_id"] = kwargs.get("_id", ObjectId())
kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE)
self._file = kwargs
self._chunk_number = 0
self._position = 0
self._buffer = StringIO()
示例10: _do_batched_write_command
def _do_batched_write_command(namespace, operation, command,
docs, check_keys, opts, ctx):
"""Execute a batch of insert, update, or delete commands.
"""
max_bson_size = ctx.max_bson_size
max_write_batch_size = ctx.max_write_batch_size
# Max BSON object size + 16k - 2 bytes for ending NUL bytes.
# Server guarantees there is enough room: SERVER-10643.
max_cmd_size = max_bson_size + _COMMAND_OVERHEAD
ordered = command.get('ordered', True)
buf = StringIO()
# Save space for message length and request id
buf.write(_ZERO_64)
# responseTo, opCode
buf.write(b"\x00\x00\x00\x00\xd4\x07\x00\x00")
# No options
buf.write(_ZERO_32)
# Namespace as C string
buf.write(b(namespace))
buf.write(_ZERO_8)
# Skip: 0, Limit: -1
buf.write(_SKIPLIM)
# Where to write command document length
command_start = buf.tell()
buf.write(bson.BSON.encode(command))
# Start of payload
buf.seek(-1, 2)
# Work around some Jython weirdness.
buf.truncate()
try:
buf.write(_OP_MAP[operation])
except KeyError:
raise InvalidOperation('Unknown command')
if operation in (_UPDATE, _DELETE):
check_keys = False
# Where to write list document length
list_start = buf.tell() - 4
to_send = []
def send_message():
"""Finalize and send the current OP_QUERY message.
"""
# Close list and command documents
buf.write(_ZERO_16)
# Write document lengths and request id
length = buf.tell()
buf.seek(list_start)
buf.write(struct.pack('<i', length - list_start - 1))
buf.seek(command_start)
buf.write(struct.pack('<i', length - command_start))
buf.seek(4)
request_id = _randint()
buf.write(struct.pack('<i', request_id))
buf.seek(0)
buf.write(struct.pack('<i', length))
return ctx.write_command(request_id, buf.getvalue(), to_send)
# If there are multiple batches we'll
# merge results in the caller.
results = []
idx = 0
idx_offset = 0
has_docs = False
for doc in docs:
has_docs = True
# Encode the current operation
key = b(str(idx))
value = bson.BSON.encode(doc, check_keys, opts)
# Send a batch?
enough_data = (buf.tell() + len(key) + len(value) + 2) >= max_cmd_size
enough_documents = (idx >= max_write_batch_size)
if enough_data or enough_documents:
if not idx:
write_op = "insert" if operation == _INSERT else None
_raise_document_too_large(
write_op, len(value), max_bson_size)
result = send_message()
results.append((idx_offset, result))
if ordered and "writeErrors" in result:
return results
# Truncate back to the start of list elements
buf.seek(list_start + 4)
buf.truncate()
idx_offset += idx
idx = 0
key = b'0'
to_send = []
buf.write(_BSONOBJ)
buf.write(key)
buf.write(_ZERO_8)
#.........这里部分代码省略.........
示例11: GridIn
class GridIn(object):
"""Class to write data to GridFS.
"""
def __init__(self, root_collection, **kwargs):
"""Write a file to GridFS
Application developers should generally not need to
instantiate this class directly - instead see the methods
provided by :class:`~gridfs.GridFS`.
Raises :class:`TypeError` if `root_collection` is not an
instance of :class:`~pymongo.collection.Collection`.
Any of the file level options specified in the `GridFS Spec
<http://dochub.mongodb.org/core/gridfsspec>`_ may be passed as
keyword arguments. Any additional keyword arguments will be
set as additional fields on the file document. Valid keyword
arguments include:
- ``"_id"``: unique ID for this file (default:
:class:`~bson.objectid.ObjectId`) - this ``"_id"`` must
not have already been used for another file
- ``"filename"``: human name for the file
- ``"contentType"`` or ``"content_type"``: valid mime-type
for the file
- ``"chunkSize"`` or ``"chunk_size"``: size of each of the
chunks, in bytes (default: 255 kb)
- ``"encoding"``: encoding used for this file. In Python 2,
any :class:`unicode` that is written to the file will be
converted to a :class:`str`. In Python 3, any :class:`str`
that is written to the file will be converted to
:class:`bytes`.
:Parameters:
- `root_collection`: root collection to write to
- `**kwargs` (optional): file level options (see above)
.. versionchanged:: 3.0
`root_collection` must use an acknowledged
:attr:`~pymongo.collection.Collection.write_concern`
"""
if not isinstance(root_collection, Collection):
raise TypeError("root_collection must be an "
"instance of Collection")
# With w=0, 'filemd5' might run before the final chunks are written.
if not root_collection.write_concern.acknowledged:
raise ConfigurationError('root_collection must use '
'acknowledged write_concern')
# Handle alternative naming
if "content_type" in kwargs:
kwargs["contentType"] = kwargs.pop("content_type")
if "chunk_size" in kwargs:
kwargs["chunkSize"] = kwargs.pop("chunk_size")
kwargs['md5'] = md5()
# Defaults
kwargs["_id"] = kwargs.get("_id", ObjectId())
kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE)
object.__setattr__(self, "_coll", root_collection)
object.__setattr__(self, "_chunks", root_collection.chunks)
object.__setattr__(self, "_file", kwargs)
object.__setattr__(self, "_buffer", StringIO())
object.__setattr__(self, "_position", 0)
object.__setattr__(self, "_chunk_number", 0)
object.__setattr__(self, "_closed", False)
object.__setattr__(self, "_ensured_index", False)
def __create_index(self, collection, index, unique):
doc = collection.find_one(projection={"_id": 1})
if doc is None:
try:
indexes = list(collection.list_indexes())
except OperationFailure:
indexes = []
if index not in indexes:
collection.create_index(index, unique=unique)
def __ensure_indexes(self):
if not object.__getattribute__(self, "_ensured_index"):
self.__create_index(self._coll.files, _F_INDEX, False)
self.__create_index(self._coll.chunks, _C_INDEX, True)
object.__setattr__(self, "_ensured_index", True)
def abort(self):
"""Remove all chunks/files that may have been uploaded and close.
"""
self._coll.chunks.delete_many({"files_id": self._file['_id']})
self._coll.files.delete_one({"_id": self._file['_id']})
object.__setattr__(self, "_closed", True)
@property
def closed(self):
"""Is this file closed?
#.........这里部分代码省略.........
示例12: __flush_buffer
def __flush_buffer(self):
"""Flush the buffer contents out to a chunk.
"""
self.__flush_data(self._buffer.getvalue())
self._buffer.close()
self._buffer = StringIO()
示例13: test_download_to_stream_by_name
def test_download_to_stream_by_name(self):
file1 = StringIO(b"hello world")
# Test with one chunk.
oid = self.fs.upload_from_stream("one_chunk", file1)
self.assertEqual(1, self.db.fs.chunks.count())
file2 = StringIO()
self.fs.download_to_stream_by_name("one_chunk", file2)
file1.seek(0)
file2.seek(0)
self.assertEqual(file1.read(), file2.read())
# Test with many chunks.
self.db.drop_collection("fs.files")
self.db.drop_collection("fs.chunks")
file1.seek(0)
self.fs.upload_from_stream("many_chunks", file1, chunk_size_bytes=1)
self.assertEqual(11, self.db.fs.chunks.count())
file2 = StringIO()
self.fs.download_to_stream_by_name("many_chunks", file2)
file1.seek(0)
file2.seek(0)
self.assertEqual(file1.read(), file2.read())
示例14: GridIn
class GridIn(object):
"""Class to write data to GridFS.
"""
def __init__(self, root_collection, **kwargs):
"""Write a file to GridFS
Application developers should generally not need to
instantiate this class directly - instead see the methods
provided by :class:`~gridfs.GridFS`.
Raises :class:`TypeError` if `root_collection` is not an
instance of :class:`~pymongo.collection.Collection`.
Any of the file level options specified in the `GridFS Spec
<http://dochub.mongodb.org/core/gridfsspec>`_ may be passed as
keyword arguments. Any additional keyword arguments will be
set as additional fields on the file document. Valid keyword
arguments include:
- ``"_id"``: unique ID for this file (default:
:class:`~bson.objectid.ObjectId`) - this ``"_id"`` must
not have already been used for another file
- ``"filename"``: human name for the file
- ``"contentType"`` or ``"content_type"``: valid mime-type
for the file
- ``"chunkSize"`` or ``"chunk_size"``: size of each of the
chunks, in bytes (default: 256 kb)
- ``"encoding"``: encoding used for this file. In Python 2,
any :class:`unicode` that is written to the file will be
converted to a :class:`str`. In Python 3, any :class:`str`
that is written to the file will be converted to
:class:`bytes`.
If you turn off write-acknowledgment for performance reasons, it is
critical to wrap calls to :meth:`write` and :meth:`close` within a
single request:
>>> from pymongo import MongoClient
>>> from gridfs import GridFS
>>> client = MongoClient(w=0) # turn off write acknowledgment
>>> fs = GridFS(client.database)
>>> gridin = fs.new_file()
>>> request = client.start_request()
>>> try:
... for i in range(10):
... gridin.write('foo')
... gridin.close()
... finally:
... request.end()
In Python 2.5 and later this code can be simplified with a
with-statement, see :doc:`/examples/requests` for more information.
:Parameters:
- `root_collection`: root collection to write to
- `**kwargs` (optional): file level options (see above)
"""
if not isinstance(root_collection, Collection):
raise TypeError("root_collection must be an "
"instance of Collection")
# Handle alternative naming
if "content_type" in kwargs:
kwargs["contentType"] = kwargs.pop("content_type")
if "chunk_size" in kwargs:
kwargs["chunkSize"] = kwargs.pop("chunk_size")
# Defaults
kwargs["_id"] = kwargs.get("_id", ObjectId())
kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE)
object.__setattr__(self, "_coll", root_collection)
object.__setattr__(self, "_chunks", root_collection.chunks)
object.__setattr__(self, "_file", kwargs)
object.__setattr__(self, "_buffer", StringIO())
object.__setattr__(self, "_position", 0)
object.__setattr__(self, "_chunk_number", 0)
object.__setattr__(self, "_closed", False)
object.__setattr__(self, "_ensured_index", False)
def _ensure_index(self):
if not object.__getattribute__(self, "_ensured_index"):
self._coll.chunks.ensure_index(
[("files_id", ASCENDING), ("n", ASCENDING)],
unique=True)
object.__setattr__(self, "_ensured_index", True)
@property
def closed(self):
"""Is this file closed?
"""
return self._closed
_id = _create_property("_id", "The ``'_id'`` value for this file.",
read_only=True)
filename = _create_property("filename", "Name of this file.")
name = _create_property("filename", "Alias for `filename`.")
#.........这里部分代码省略.........
示例15: _do_batched_write_command
def _do_batched_write_command(namespace, operation, command,
docs, check_keys, opts, ctx):
"""Create the next batched insert, update, or delete command.
"""
max_bson_size = ctx.max_bson_size
max_write_batch_size = ctx.max_write_batch_size
# Max BSON object size + 16k - 2 bytes for ending NUL bytes.
# Server guarantees there is enough room: SERVER-10643.
max_cmd_size = max_bson_size + _COMMAND_OVERHEAD
buf = StringIO()
# Save space for message length and request id
buf.write(_ZERO_64)
# responseTo, opCode
buf.write(b"\x00\x00\x00\x00\xd4\x07\x00\x00")
# No options
buf.write(_ZERO_32)
# Namespace as C string
buf.write(b(namespace))
buf.write(_ZERO_8)
# Skip: 0, Limit: -1
buf.write(_SKIPLIM)
# Where to write command document length
command_start = buf.tell()
buf.write(bson.BSON.encode(command))
# Start of payload
buf.seek(-1, 2)
# Work around some Jython weirdness.
buf.truncate()
try:
buf.write(_OP_MAP[operation])
except KeyError:
raise InvalidOperation('Unknown command')
if operation in (_UPDATE, _DELETE):
check_keys = False
# Where to write list document length
list_start = buf.tell() - 4
to_send = []
idx = 0
for doc in docs:
# Encode the current operation
key = b(str(idx))
value = bson.BSON.encode(doc, check_keys, opts)
# Is there enough room to add this document? max_cmd_size accounts for
# the two trailing null bytes.
enough_data = (buf.tell() + len(key) + len(value)) >= max_cmd_size
enough_documents = (idx >= max_write_batch_size)
if enough_data or enough_documents:
if not idx:
write_op = "insert" if operation == _INSERT else None
_raise_document_too_large(
write_op, len(value), max_bson_size)
break
buf.write(_BSONOBJ)
buf.write(key)
buf.write(_ZERO_8)
buf.write(value)
to_send.append(doc)
idx += 1
# Finalize the current OP_QUERY message.
# Close list and command documents
buf.write(_ZERO_16)
# Write document lengths and request id
length = buf.tell()
buf.seek(list_start)
buf.write(struct.pack('<i', length - list_start - 1))
buf.seek(command_start)
buf.write(struct.pack('<i', length - command_start))
buf.seek(4)
request_id = _randint()
buf.write(struct.pack('<i', request_id))
buf.seek(0)
buf.write(struct.pack('<i', length))
return request_id, buf.getvalue(), to_send