本文整理汇总了Python中bson.binary.Binary.read方法的典型用法代码示例。如果您正苦于以下问题:Python Binary.read方法的具体用法?Python Binary.read怎么用?Python Binary.read使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类bson.binary.Binary
的用法示例。
在下文中一共展示了Binary.read方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_operation
# 需要导入模块: from bson.binary import Binary [as 别名]
# 或者: from bson.binary.Binary import read [as 别名]
def run_operation(self, sessions, collection, operation):
original_collection = collection
name = camel_to_snake(operation['name'])
if name == 'run_command':
name = 'command'
elif name == 'download_by_name':
name = 'open_download_stream_by_name'
elif name == 'download':
name = 'open_download_stream'
def parse_options(opts):
if 'readPreference' in opts:
opts['read_preference'] = parse_read_preference(
opts.pop('readPreference'))
if 'writeConcern' in opts:
opts['write_concern'] = WriteConcern(
**dict(opts.pop('writeConcern')))
if 'readConcern' in opts:
opts['read_concern'] = ReadConcern(
**dict(opts.pop('readConcern')))
return opts
database = collection.database
collection = database.get_collection(collection.name)
if 'collectionOptions' in operation:
collection = collection.with_options(
**dict(parse_options(operation['collectionOptions'])))
object_name = self.get_object_name(operation)
if object_name == 'gridfsbucket':
# Only create the GridFSBucket when we need it (for the gridfs
# retryable reads tests).
obj = GridFSBucket(
database, bucket_name=collection.name,
disable_md5=True)
else:
objects = {
'client': database.client,
'database': database,
'collection': collection,
'testRunner': self
}
objects.update(sessions)
obj = objects[object_name]
# Combine arguments with options and handle special cases.
arguments = operation.get('arguments', {})
arguments.update(arguments.pop("options", {}))
parse_options(arguments)
cmd = getattr(obj, name)
for arg_name in list(arguments):
c2s = camel_to_snake(arg_name)
# PyMongo accepts sort as list of tuples.
if arg_name == "sort":
sort_dict = arguments[arg_name]
arguments[arg_name] = list(iteritems(sort_dict))
# Named "key" instead not fieldName.
if arg_name == "fieldName":
arguments["key"] = arguments.pop(arg_name)
# Aggregate uses "batchSize", while find uses batch_size.
elif arg_name == "batchSize" and name == "aggregate":
continue
# Requires boolean returnDocument.
elif arg_name == "returnDocument":
arguments[c2s] = arguments.pop(arg_name) == "After"
elif c2s == "requests":
# Parse each request into a bulk write model.
requests = []
for request in arguments["requests"]:
bulk_model = camel_to_upper_camel(request["name"])
bulk_class = getattr(operations, bulk_model)
bulk_arguments = camel_to_snake_args(request["arguments"])
requests.append(bulk_class(**dict(bulk_arguments)))
arguments["requests"] = requests
elif arg_name == "session":
arguments['session'] = sessions[arguments['session']]
elif name == 'command' and arg_name == 'command':
# Ensure the first key is the command name.
ordered_command = SON([(operation['command_name'], 1)])
ordered_command.update(arguments['command'])
arguments['command'] = ordered_command
elif name == 'open_download_stream' and arg_name == 'id':
arguments['file_id'] = arguments.pop(arg_name)
elif name == 'with_transaction' and arg_name == 'callback':
callback_ops = arguments[arg_name]['operations']
arguments['callback'] = lambda _: self.run_operations(
sessions, original_collection, copy.deepcopy(callback_ops),
in_with_transaction=True)
else:
arguments[c2s] = arguments.pop(arg_name)
result = cmd(**dict(arguments))
if name == "aggregate":
if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]:
# Read from the primary to ensure causal consistency.
#.........这里部分代码省略.........