本文整理汇总了Python中bson.py3compat.StringIO.write方法的典型用法代码示例。如果您正苦于以下问题:Python StringIO.write方法的具体用法?Python StringIO.write怎么用?Python StringIO.write使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类bson.py3compat.StringIO
的用法示例。
在下文中一共展示了StringIO.write方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: readline
# 需要导入模块: from bson.py3compat import StringIO [as 别名]
# 或者: from bson.py3compat.StringIO import write [as 别名]
def readline(self, size=-1):
"""Read one line or up to `size` bytes from the file.
:Parameters:
- `size` (optional): the maximum number of bytes to read
.. versionadded:: 1.9
"""
if size == 0:
return b('')
remainder = int(self.length) - self.__position
if size < 0 or size > remainder:
size = remainder
received = 0
data = StringIO()
while received < size:
chunk_data = self.readchunk()
pos = chunk_data.find(NEWLN, 0, size)
if pos != -1:
size = received + pos + 1
received += len(chunk_data)
data.write(chunk_data)
if pos != -1:
break
self.__position -= received - size
# Return 'size' bytes and store the rest.
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
示例2: surcor_callback
# 需要导入模块: from bson.py3compat import StringIO [as 别名]
# 或者: from bson.py3compat.StringIO import write [as 别名]
def surcor_callback(res,error):
data = StringIO()
for item in res:
data.write(item['data'])
fileobj['data'] = data.getvalue()
callback(fileobj)
示例3: read
# 需要导入模块: from bson.py3compat import StringIO [as 别名]
# 或者: from bson.py3compat.StringIO import write [as 别名]
def read(self, size=-1):
"""Read at most `size` bytes from the file (less if there
isn't enough data).
The bytes are returned as an instance of :class:`str` (:class:`bytes`
in python 3). If `size` is negative or omitted all data is read.
:Parameters:
- `size` (optional): the number of bytes to read
"""
self._ensure_file()
if size == 0:
return EMPTY
remainder = int(self.length) - self.__position
if size < 0 or size > remainder:
size = remainder
received = 0
data = StringIO()
while received < size:
chunk_data = self.readchunk()
received += len(chunk_data)
data.write(chunk_data)
self.__position -= received - size
# Return 'size' bytes and store the rest.
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
示例4: read
# 需要导入模块: from bson.py3compat import StringIO [as 别名]
# 或者: from bson.py3compat.StringIO import write [as 别名]
def read(self, size=-1):
"""Read at most `size` bytes from the file (less if there
isn't enough data).
The bytes are returned as an instance of :class:`str` (:class:`bytes`
in python 3). If `size` is negative or omitted all data is read.
:Parameters:
- `size` (optional): the number of bytes to read
.. versionchanged:: 3.8
This method now only checks for extra chunks after reading the
entire file. Previously, this method would check for extra chunks
on every call.
"""
self._ensure_file()
remainder = int(self.length) - self.__position
if size < 0 or size > remainder:
size = remainder
if size == 0:
return EMPTY
received = 0
data = StringIO()
while received < size:
chunk_data = self.readchunk()
received += len(chunk_data)
data.write(chunk_data)
# Detect extra chunks after reading the entire file.
if size == remainder and self.__chunk_iter:
try:
self.__chunk_iter.next()
except StopIteration:
pass
self.__position -= received - size
# Return 'size' bytes and store the rest.
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
示例5: read
# 需要导入模块: from bson.py3compat import StringIO [as 别名]
# 或者: from bson.py3compat.StringIO import write [as 别名]
def read(self, size=-1):
"""Read at most `size` bytes from the file (less if there
isn't enough data).
The bytes are returned as an instance of :class:`str` (:class:`bytes`
in python 3). If `size` is negative or omitted all data is read.
:Parameters:
- `size` (optional): the number of bytes to read
"""
self._ensure_file()
if size == 0:
return EMPTY
remainder = int(self.length) - self.__position
if size < 0 or size > remainder:
size = remainder
received = 0
data = StringIO()
while received < size:
chunk_data = self.readchunk()
received += len(chunk_data)
data.write(chunk_data)
# Detect extra chunks.
max_chunk_n = math.ceil(self.length / float(self.chunk_size))
chunk = self.__chunks.find_one({"files_id": self._id,
"n": {"$gte": max_chunk_n}},
session=self._session)
# According to spec, ignore extra chunks if they are empty.
if chunk is not None and len(chunk['data']):
raise CorruptGridFile(
"Extra chunk found: expected %i chunks but found "
"chunk with n=%i" % (max_chunk_n, chunk['n']))
self.__position -= received - size
# Return 'size' bytes and store the rest.
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
示例6: _do_batched_write_command
# 需要导入模块: from bson.py3compat import StringIO [as 别名]
# 或者: from bson.py3compat.StringIO import write [as 别名]
def _do_batched_write_command(
namespace, operation, command, docs, check_keys, opts, ctx):
"""Create the next batched insert, update, or delete command.
"""
buf = StringIO()
# Save space for message length and request id
buf.write(_ZERO_64)
# responseTo, opCode
buf.write(b"\x00\x00\x00\x00\xd4\x07\x00\x00")
# Write OP_QUERY write command
to_send, length = _batched_write_command(
namespace, operation, command, docs, check_keys, opts, ctx, buf)
# Header - request id and message length
buf.seek(4)
request_id = _randint()
buf.write(struct.pack('<i', request_id))
buf.seek(0)
buf.write(struct.pack('<i', length))
return request_id, buf.getvalue(), to_send
示例7: GridIn
# 需要导入模块: from bson.py3compat import StringIO [as 别名]
# 或者: from bson.py3compat.StringIO import write [as 别名]
class GridIn(object):
"""Class to write data to GridFS.
"""
def __init__(self, root_collection, **kwargs):
"""Write a file to GridFS
Application developers should generally not need to
instantiate this class directly - instead see the methods
provided by :class:`~gridfs.GridFS`.
Raises :class:`TypeError` if `root_collection` is not an
instance of :class:`~pymongo.collection.Collection`.
Any of the file level options specified in the `GridFS Spec
<http://dochub.mongodb.org/core/gridfsspec>`_ may be passed as
keyword arguments. Any additional keyword arguments will be
set as additional fields on the file document. Valid keyword
arguments include:
- ``"_id"``: unique ID for this file (default:
:class:`~bson.objectid.ObjectId`) - this ``"_id"`` must
not have already been used for another file
- ``"filename"``: human name for the file
- ``"contentType"`` or ``"content_type"``: valid mime-type
for the file
- ``"chunkSize"`` or ``"chunk_size"``: size of each of the
chunks, in bytes (default: 256 kb)
- ``"encoding"``: encoding used for this file. In Python 2,
any :class:`unicode` that is written to the file will be
converted to a :class:`str`. In Python 3, any :class:`str`
that is written to the file will be converted to
:class:`bytes`.
:Parameters:
- `root_collection`: root collection to write to
- `**kwargs` (optional): file level options (see above)
"""
if not isinstance(root_collection, Collection):
raise TypeError("root_collection must be an "
"instance of Collection")
# Handle alternative naming
if "content_type" in kwargs:
kwargs["contentType"] = kwargs.pop("content_type")
if "chunk_size" in kwargs:
kwargs["chunkSize"] = kwargs.pop("chunk_size")
# Defaults
kwargs["_id"] = kwargs.get("_id", ObjectId())
kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE)
root_collection.chunks.ensure_index([("files_id", ASCENDING),
("n", ASCENDING)],
unique=True)
object.__setattr__(self, "_coll", root_collection)
object.__setattr__(self, "_chunks", root_collection.chunks)
object.__setattr__(self, "_file", kwargs)
object.__setattr__(self, "_buffer", StringIO())
object.__setattr__(self, "_position", 0)
object.__setattr__(self, "_chunk_number", 0)
object.__setattr__(self, "_closed", False)
@property
def closed(self):
"""Is this file closed?
"""
return self._closed
_id = _create_property("_id", "The ``'_id'`` value for this file.",
read_only=True)
filename = _create_property("filename", "Name of this file.")
content_type = _create_property("contentType", "Mime-type for this file.")
length = _create_property("length", "Length (in bytes) of this file.",
closed_only=True)
chunk_size = _create_property("chunkSize", "Chunk size for this file.",
read_only=True)
upload_date = _create_property("uploadDate",
"Date that this file was uploaded.",
closed_only=True)
md5 = _create_property("md5", "MD5 of the contents of this file "
"(generated on the server).",
closed_only=True)
def __getattr__(self, name):
if name in self._file:
return self._file[name]
raise AttributeError("GridIn object has no attribute '%s'" % name)
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
if self._closed:
self._coll.files.update({"_id": self._file["_id"]},
{"$set": {name: value}}, safe=True)
def __flush_data(self, data):
"""Flush `data` to a chunk.
#.........这里部分代码省略.........
示例8: _do_batched_write_command
# 需要导入模块: from bson.py3compat import StringIO [as 别名]
# 或者: from bson.py3compat.StringIO import write [as 别名]
def _do_batched_write_command(namespace, operation, command,
docs, check_keys, uuid_subtype, client):
"""Execute a batch of insert, update, or delete commands.
"""
max_bson_size = client.max_bson_size
# Max BSON object size + 16k - 2 bytes for ending NUL bytes
# XXX: This should come from the server - SERVER-10643
max_cmd_size = max_bson_size + 16382
ordered = command.get('ordered', True)
buf = StringIO()
# Save space for message length and request id
buf.write(_ZERO_64)
# responseTo, opCode
buf.write(b("\x00\x00\x00\x00\xd4\x07\x00\x00"))
# No options
buf.write(_ZERO_32)
# Namespace as C string
buf.write(b(namespace))
buf.write(_ZERO_8)
# Skip: 0, Limit: -1
buf.write(_SKIPLIM)
# Where to write command document length
command_start = buf.tell()
buf.write(bson.BSON.encode(command))
# Start of payload
buf.seek(-1, 2)
# Work around some Jython weirdness.
buf.truncate()
try:
buf.write(_OP_MAP[operation])
except KeyError:
raise InvalidOperation('Unknown command')
if operation in (_UPDATE, _DELETE):
check_keys = False
# Where to write list document length
list_start = buf.tell() - 4
def send_message():
"""Finalize and send the current OP_QUERY message.
"""
# Close list and command documents
buf.write(_ZERO_16)
# Write document lengths and request id
length = buf.tell()
buf.seek(list_start)
buf.write(struct.pack('<i', length - list_start - 1))
buf.seek(command_start)
buf.write(struct.pack('<i', length - command_start))
buf.seek(4)
request_id = random.randint(MIN_INT32, MAX_INT32)
buf.write(struct.pack('<i', request_id))
buf.seek(0)
buf.write(struct.pack('<i', length))
return client._send_message((request_id, buf.getvalue()),
with_last_error=True,
command=True)
# If there are multiple batches we'll
# merge results in the caller.
results = []
idx = 0
idx_offset = 0
has_docs = False
for doc in docs:
has_docs = True
# Encode the current operation
key = b(str(idx))
value = bson.BSON.encode(doc, check_keys, uuid_subtype)
# Send a batch?
if (buf.tell() + len(key) + len(value) + 2) >= max_cmd_size:
if not idx:
if operation == _INSERT:
raise InvalidDocument("BSON document too large (%d bytes)"
" - the connected server supports"
" BSON document sizes up to %d"
" bytes." % (len(value),
max_bson_size))
# There's nothing intelligent we can say
# about size for update and remove
raise InvalidDocument("command document too large")
result = send_message()
results.append((idx_offset, result))
if ordered and "writeErrors" in result:
return results
# Truncate back to the start of list elements
buf.seek(list_start + 4)
buf.truncate()
idx_offset += idx
idx = 0
key = b('0')
#.........这里部分代码省略.........
示例9: _do_batched_insert
# 需要导入模块: from bson.py3compat import StringIO [as 别名]
# 或者: from bson.py3compat.StringIO import write [as 别名]
def _do_batched_insert(collection_name, docs, check_keys,
safe, last_error_args, continue_on_error, opts,
sock_info):
"""Insert `docs` using multiple batches.
"""
def _insert_message(insert_message, send_safe):
"""Build the insert message with header and GLE.
"""
request_id, final_message = __pack_message(2002, insert_message)
if send_safe:
request_id, error_message, _ = __last_error(collection_name,
last_error_args)
final_message += error_message
return request_id, final_message
send_safe = safe or not continue_on_error
last_error = None
data = StringIO()
data.write(struct.pack("<i", int(continue_on_error)))
data.write(bson._make_c_string(collection_name))
message_length = begin_loc = data.tell()
has_docs = False
for doc in docs:
encoded = bson.BSON.encode(doc, check_keys, opts)
encoded_length = len(encoded)
too_large = (encoded_length > sock_info.max_bson_size)
message_length += encoded_length
if message_length < sock_info.max_message_size and not too_large:
data.write(encoded)
has_docs = True
continue
if has_docs:
# We have enough data, send this message.
try:
request_id, msg = _insert_message(data.getvalue(), send_safe)
sock_info.legacy_write(request_id, msg, 0, send_safe)
# Exception type could be OperationFailure or a subtype
# (e.g. DuplicateKeyError)
except OperationFailure as exc:
# Like it says, continue on error...
if continue_on_error:
# Store exception details to re-raise after the final batch.
last_error = exc
# With unacknowledged writes just return at the first error.
elif not safe:
return
# With acknowledged writes raise immediately.
else:
raise
if too_large:
raise DocumentTooLarge("BSON document too large (%d bytes)"
" - the connected server supports"
" BSON document sizes up to %d"
" bytes." %
(encoded_length, sock_info.max_bson_size))
message_length = begin_loc + encoded_length
data.seek(begin_loc)
data.truncate()
data.write(encoded)
if not has_docs:
raise InvalidOperation("cannot do an empty bulk insert")
request_id, msg = _insert_message(data.getvalue(), safe)
sock_info.legacy_write(request_id, msg, 0, safe)
# Re-raise any exception stored due to continue_on_error
if last_error is not None:
raise last_error
示例10: GridIn
# 需要导入模块: from bson.py3compat import StringIO [as 别名]
# 或者: from bson.py3compat.StringIO import write [as 别名]
class GridIn(object):
"""Class to write data to GridFS.
"""
def __init__(self, root_collection, **kwargs):
"""Write a file to GridFS
Application developers should generally not need to
instantiate this class directly - instead see the methods
provided by :class:`~gridfs.GridFS`.
Raises :class:`TypeError` if `root_collection` is not an
instance of :class:`~pymongo.collection.Collection`.
Any of the file level options specified in the `GridFS Spec
<http://dochub.mongodb.org/core/gridfsspec>`_ may be passed as
keyword arguments. Any additional keyword arguments will be
set as additional fields on the file document. Valid keyword
arguments include:
- ``"_id"``: unique ID for this file (default:
:class:`~bson.objectid.ObjectId`) - this ``"_id"`` must
not have already been used for another file
- ``"filename"``: human name for the file
- ``"contentType"`` or ``"content_type"``: valid mime-type
for the file
- ``"chunkSize"`` or ``"chunk_size"``: size of each of the
chunks, in bytes (default: 256 kb)
- ``"encoding"``: encoding used for this file. In Python 2,
any :class:`unicode` that is written to the file will be
converted to a :class:`str`. In Python 3, any :class:`str`
that is written to the file will be converted to
:class:`bytes`.
If you turn off write-acknowledgment for performance reasons, it is
critical to wrap calls to :meth:`write` and :meth:`close` within a
single request:
>>> from pymongo import MongoClient
>>> from gridfs import GridFS
>>> client = MongoClient(w=0) # turn off write acknowledgment
>>> fs = GridFS(client.database)
>>> gridin = fs.new_file()
>>> request = client.start_request()
>>> try:
... for i in range(10):
... gridin.write('foo')
... gridin.close()
... finally:
... request.end()
In Python 2.5 and later this code can be simplified with a
with-statement, see :doc:`/examples/requests` for more information.
:Parameters:
- `root_collection`: root collection to write to
- `**kwargs` (optional): file level options (see above)
"""
if not isinstance(root_collection, Collection):
raise TypeError("root_collection must be an "
"instance of Collection")
# Handle alternative naming
if "content_type" in kwargs:
kwargs["contentType"] = kwargs.pop("content_type")
if "chunk_size" in kwargs:
kwargs["chunkSize"] = kwargs.pop("chunk_size")
# Defaults
kwargs["_id"] = kwargs.get("_id", ObjectId())
kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE)
object.__setattr__(self, "_coll", root_collection)
object.__setattr__(self, "_chunks", root_collection.chunks)
object.__setattr__(self, "_file", kwargs)
object.__setattr__(self, "_buffer", StringIO())
object.__setattr__(self, "_position", 0)
object.__setattr__(self, "_chunk_number", 0)
object.__setattr__(self, "_closed", False)
object.__setattr__(self, "_ensured_index", False)
def _ensure_index(self):
if not object.__getattribute__(self, "_ensured_index"):
self._coll.chunks.ensure_index(
[("files_id", ASCENDING), ("n", ASCENDING)],
unique=True)
object.__setattr__(self, "_ensured_index", True)
@property
def closed(self):
"""Is this file closed?
"""
return self._closed
_id = _create_property("_id", "The ``'_id'`` value for this file.",
read_only=True)
filename = _create_property("filename", "Name of this file.")
name = _create_property("filename", "Alias for `filename`.")
#.........这里部分代码省略.........
示例11: _do_batched_write_command
# 需要导入模块: from bson.py3compat import StringIO [as 别名]
# 或者: from bson.py3compat.StringIO import write [as 别名]
def _do_batched_write_command(namespace, operation, command,
docs, check_keys, opts, ctx):
"""Execute a batch of insert, update, or delete commands.
"""
max_bson_size = ctx.max_bson_size
max_write_batch_size = ctx.max_write_batch_size
# Max BSON object size + 16k - 2 bytes for ending NUL bytes.
# Server guarantees there is enough room: SERVER-10643.
max_cmd_size = max_bson_size + _COMMAND_OVERHEAD
ordered = command.get('ordered', True)
buf = StringIO()
# Save space for message length and request id
buf.write(_ZERO_64)
# responseTo, opCode
buf.write(b"\x00\x00\x00\x00\xd4\x07\x00\x00")
# No options
buf.write(_ZERO_32)
# Namespace as C string
buf.write(b(namespace))
buf.write(_ZERO_8)
# Skip: 0, Limit: -1
buf.write(_SKIPLIM)
# Where to write command document length
command_start = buf.tell()
buf.write(bson.BSON.encode(command))
# Start of payload
buf.seek(-1, 2)
# Work around some Jython weirdness.
buf.truncate()
try:
buf.write(_OP_MAP[operation])
except KeyError:
raise InvalidOperation('Unknown command')
if operation in (_UPDATE, _DELETE):
check_keys = False
# Where to write list document length
list_start = buf.tell() - 4
to_send = []
def send_message():
"""Finalize and send the current OP_QUERY message.
"""
# Close list and command documents
buf.write(_ZERO_16)
# Write document lengths and request id
length = buf.tell()
buf.seek(list_start)
buf.write(struct.pack('<i', length - list_start - 1))
buf.seek(command_start)
buf.write(struct.pack('<i', length - command_start))
buf.seek(4)
request_id = _randint()
buf.write(struct.pack('<i', request_id))
buf.seek(0)
buf.write(struct.pack('<i', length))
return ctx.write_command(request_id, buf.getvalue(), to_send)
# If there are multiple batches we'll
# merge results in the caller.
results = []
idx = 0
idx_offset = 0
has_docs = False
for doc in docs:
has_docs = True
# Encode the current operation
key = b(str(idx))
value = bson.BSON.encode(doc, check_keys, opts)
# Send a batch?
enough_data = (buf.tell() + len(key) + len(value) + 2) >= max_cmd_size
enough_documents = (idx >= max_write_batch_size)
if enough_data or enough_documents:
if not idx:
write_op = "insert" if operation == _INSERT else None
_raise_document_too_large(
write_op, len(value), max_bson_size)
result = send_message()
results.append((idx_offset, result))
if ordered and "writeErrors" in result:
return results
# Truncate back to the start of list elements
buf.seek(list_start + 4)
buf.truncate()
idx_offset += idx
idx = 0
key = b'0'
to_send = []
buf.write(_BSONOBJ)
buf.write(key)
buf.write(_ZERO_8)
#.........这里部分代码省略.........
示例12: GridIn
# 需要导入模块: from bson.py3compat import StringIO [as 别名]
# 或者: from bson.py3compat.StringIO import write [as 别名]
class GridIn(object):
"""Class to write data to GridFS.
"""
def __init__(self, root_collection, **kwargs):
"""Write a file to GridFS
Application developers should generally not need to
instantiate this class directly - instead see the methods
provided by :class:`~gridfs.GridFS`.
Raises :class:`TypeError` if `root_collection` is not an
instance of :class:`~pymongo.collection.Collection`.
Any of the file level options specified in the `GridFS Spec
<http://dochub.mongodb.org/core/gridfsspec>`_ may be passed as
keyword arguments. Any additional keyword arguments will be
set as additional fields on the file document. Valid keyword
arguments include:
- ``"_id"``: unique ID for this file (default:
:class:`~bson.objectid.ObjectId`) - this ``"_id"`` must
not have already been used for another file
- ``"filename"``: human name for the file
- ``"contentType"`` or ``"content_type"``: valid mime-type
for the file
- ``"chunkSize"`` or ``"chunk_size"``: size of each of the
chunks, in bytes (default: 255 kb)
- ``"encoding"``: encoding used for this file. In Python 2,
any :class:`unicode` that is written to the file will be
converted to a :class:`str`. In Python 3, any :class:`str`
that is written to the file will be converted to
:class:`bytes`.
:Parameters:
- `root_collection`: root collection to write to
- `**kwargs` (optional): file level options (see above)
.. versionchanged:: 3.0
`root_collection` must use an acknowledged
:attr:`~pymongo.collection.Collection.write_concern`
"""
if not isinstance(root_collection, Collection):
raise TypeError("root_collection must be an "
"instance of Collection")
# With w=0, 'filemd5' might run before the final chunks are written.
if not root_collection.write_concern.acknowledged:
raise ConfigurationError('root_collection must use '
'acknowledged write_concern')
# Handle alternative naming
if "content_type" in kwargs:
kwargs["contentType"] = kwargs.pop("content_type")
if "chunk_size" in kwargs:
kwargs["chunkSize"] = kwargs.pop("chunk_size")
kwargs['md5'] = md5()
# Defaults
kwargs["_id"] = kwargs.get("_id", ObjectId())
kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE)
object.__setattr__(self, "_coll", root_collection)
object.__setattr__(self, "_chunks", root_collection.chunks)
object.__setattr__(self, "_file", kwargs)
object.__setattr__(self, "_buffer", StringIO())
object.__setattr__(self, "_position", 0)
object.__setattr__(self, "_chunk_number", 0)
object.__setattr__(self, "_closed", False)
object.__setattr__(self, "_ensured_index", False)
def __create_index(self, collection, index, unique):
doc = collection.find_one(projection={"_id": 1})
if doc is None:
try:
indexes = list(collection.list_indexes())
except OperationFailure:
indexes = []
if index not in indexes:
collection.create_index(index, unique=unique)
def __ensure_indexes(self):
if not object.__getattribute__(self, "_ensured_index"):
self.__create_index(self._coll.files, _F_INDEX, False)
self.__create_index(self._coll.chunks, _C_INDEX, True)
object.__setattr__(self, "_ensured_index", True)
def abort(self):
"""Remove all chunks/files that may have been uploaded and close.
"""
self._coll.chunks.delete_many({"files_id": self._file['_id']})
self._coll.files.delete_one({"_id": self._file['_id']})
object.__setattr__(self, "_closed", True)
@property
def closed(self):
"""Is this file closed?
#.........这里部分代码省略.........
示例13: GridIn
# 需要导入模块: from bson.py3compat import StringIO [as 别名]
# 或者: from bson.py3compat.StringIO import write [as 别名]
class GridIn(object):
def __init__(self, client, root_collection, **kwargs):
self.client = client
self.root_collection = root_collection
self._files = self.client.connection(files_coll(self.root_collection))
self._chunks = self.client.connection(chunks_coll(self.root_collection))
# Handle alternative naming
if "content_type" in kwargs:
kwargs["contentType"] = kwargs.pop("content_type")
if "chunk_size" in kwargs:
kwargs["chunkSize"] = kwargs.pop("chunk_size")
# Defaults
kwargs["_id"] = kwargs.get("_id", ObjectId())
kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE)
self._file = kwargs
self._chunk_number = 0
self._position = 0
self._buffer = StringIO()
def __flush_data(self, data):
def no_check(*arg, **kwargs):
pass
chunk = {"files_id": self._file['_id'],
"n": self._chunk_number,
"data": Binary(data)}
self._chunks.insert(chunk, callback = no_check)
self._chunk_number += 1
self._position += len(data)
def __flush_buffer(self):
self.__flush_data(self._buffer.getvalue())
self._buffer.close()
self._buffer = StringIO()
def write(self, data, callback = None, **kwargs):
self._file['length'] = len(data)
self._file.update(kwargs)
try:
read = data.read
except AttributeError:
read = StringIO(data).read
if self._buffer.tell() > 0:
space = self.chunk_size = self._buffer.tell()
if space:
to_write = read(space)
self._buffer.write(to_write)
if len(to_write) < space:
return # EOF
self.__flush_buffer()
to_write = read(self._file['chunkSize'])
while to_write and len(to_write) == self._file['chunkSize']:
self.__flush_data(to_write)
to_write = read(self._file['chunkSize'])
self._buffer.write(to_write)
def __flush(self):
self.__flush_buffer()
def no_check(*arg, **kwargs):
pass
def cb_md5(*arg, **kwargs):
try:
self._file['md5'] = arg[0]['md5']
except:
pass
self._file["uploadDate"] = datetime.datetime.utcnow()
self._files.insert(self._file, callback = no_check)
self.client.command('filemd5', self._file['_id'], root=self.root_collection, callback=cb_md5)
def close(self):
self.__flush()
示例14: _do_batched_write_command
# 需要导入模块: from bson.py3compat import StringIO [as 别名]
# 或者: from bson.py3compat.StringIO import write [as 别名]
def _do_batched_write_command(namespace, operation, command,
docs, check_keys, opts, ctx):
"""Create the next batched insert, update, or delete command.
"""
max_bson_size = ctx.max_bson_size
max_write_batch_size = ctx.max_write_batch_size
# Max BSON object size + 16k - 2 bytes for ending NUL bytes.
# Server guarantees there is enough room: SERVER-10643.
max_cmd_size = max_bson_size + _COMMAND_OVERHEAD
buf = StringIO()
# Save space for message length and request id
buf.write(_ZERO_64)
# responseTo, opCode
buf.write(b"\x00\x00\x00\x00\xd4\x07\x00\x00")
# No options
buf.write(_ZERO_32)
# Namespace as C string
buf.write(b(namespace))
buf.write(_ZERO_8)
# Skip: 0, Limit: -1
buf.write(_SKIPLIM)
# Where to write command document length
command_start = buf.tell()
buf.write(bson.BSON.encode(command))
# Start of payload
buf.seek(-1, 2)
# Work around some Jython weirdness.
buf.truncate()
try:
buf.write(_OP_MAP[operation])
except KeyError:
raise InvalidOperation('Unknown command')
if operation in (_UPDATE, _DELETE):
check_keys = False
# Where to write list document length
list_start = buf.tell() - 4
to_send = []
idx = 0
for doc in docs:
# Encode the current operation
key = b(str(idx))
value = bson.BSON.encode(doc, check_keys, opts)
# Is there enough room to add this document? max_cmd_size accounts for
# the two trailing null bytes.
enough_data = (buf.tell() + len(key) + len(value)) >= max_cmd_size
enough_documents = (idx >= max_write_batch_size)
if enough_data or enough_documents:
if not idx:
write_op = "insert" if operation == _INSERT else None
_raise_document_too_large(
write_op, len(value), max_bson_size)
break
buf.write(_BSONOBJ)
buf.write(key)
buf.write(_ZERO_8)
buf.write(value)
to_send.append(doc)
idx += 1
# Finalize the current OP_QUERY message.
# Close list and command documents
buf.write(_ZERO_16)
# Write document lengths and request id
length = buf.tell()
buf.seek(list_start)
buf.write(struct.pack('<i', length - list_start - 1))
buf.seek(command_start)
buf.write(struct.pack('<i', length - command_start))
buf.seek(4)
request_id = _randint()
buf.write(struct.pack('<i', request_id))
buf.seek(0)
buf.write(struct.pack('<i', length))
return request_id, buf.getvalue(), to_send
示例15: _do_batched_insert
# 需要导入模块: from bson.py3compat import StringIO [as 别名]
# 或者: from bson.py3compat.StringIO import write [as 别名]
def _do_batched_insert(collection_name, docs, check_keys,
safe, last_error_args, continue_on_error, opts,
ctx):
"""Insert `docs` using multiple batches.
"""
def _insert_message(insert_message, send_safe):
"""Build the insert message with header and GLE.
"""
request_id, final_message = __pack_message(2002, insert_message)
if send_safe:
request_id, error_message, _ = __last_error(collection_name,
last_error_args)
final_message += error_message
return request_id, final_message
send_safe = safe or not continue_on_error
last_error = None
data = StringIO()
data.write(struct.pack("<i", int(continue_on_error)))
data.write(_make_c_string(collection_name))
message_length = begin_loc = data.tell()
has_docs = False
to_send = []
encode = _dict_to_bson # Make local
compress = ctx.compress and not (safe or send_safe)
for doc in docs:
encoded = encode(doc, check_keys, opts)
encoded_length = len(encoded)
too_large = (encoded_length > ctx.max_bson_size)
message_length += encoded_length
if message_length < ctx.max_message_size and not too_large:
data.write(encoded)
to_send.append(doc)
has_docs = True
continue
if has_docs:
# We have enough data, send this message.
try:
if compress:
rid, msg = None, data.getvalue()
else:
rid, msg = _insert_message(data.getvalue(), send_safe)
ctx.legacy_bulk_insert(
rid, msg, 0, send_safe, to_send, compress)
# Exception type could be OperationFailure or a subtype
# (e.g. DuplicateKeyError)
except OperationFailure as exc:
# Like it says, continue on error...
if continue_on_error:
# Store exception details to re-raise after the final batch.
last_error = exc
# With unacknowledged writes just return at the first error.
elif not safe:
return
# With acknowledged writes raise immediately.
else:
raise
if too_large:
_raise_document_too_large(
"insert", encoded_length, ctx.max_bson_size)
message_length = begin_loc + encoded_length
data.seek(begin_loc)
data.truncate()
data.write(encoded)
to_send = [doc]
if not has_docs:
raise InvalidOperation("cannot do an empty bulk insert")
if compress:
request_id, msg = None, data.getvalue()
else:
request_id, msg = _insert_message(data.getvalue(), safe)
ctx.legacy_bulk_insert(request_id, msg, 0, safe, to_send, compress)
# Re-raise any exception stored due to continue_on_error
if last_error is not None:
raise last_error