本文整理汇总了Python中bson.binary.Binary类的典型用法代码示例。如果您正苦于以下问题:Python Binary类的具体用法?Python Binary怎么用?Python Binary使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Binary类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_binary
def test_binary(self):
a_string = "hello world"
a_binary = Binary(b("hello world"))
self.assertTrue(a_binary.startswith(b("hello")))
self.assertTrue(a_binary.endswith(b("world")))
self.assertTrue(isinstance(a_binary, Binary))
self.assertFalse(isinstance(a_string, Binary))
示例2: post
def post(self, *args):
# Add entry into bucket and flag as multipart upload
if self.bucket_name and self.object_name:
bucket_name = self.bucket_name
object_name = self.object_name
else:
bucket_name,object_name = args
if bucket_name not in self._get_bucket_names():
self._error(code=404,s3code='NSB')
return
original_name = urllib.unquote(object_name)
bucket_object = Binary(self.request.body)
object_size = bucket_object.__len__()
object_md5 = self._object_md5(bucket_object)
if self.uploadId:
# We have a multipart upload, so iterate over the parts to generate the md5 hash and calculate size
# This is the last call made after the mutlipart upload with the uploadId
mupmd5 = hashlib.md5()
mupsize = 0
for mup in self.application.S3[bucket_name].find({'object_name':object_name}):
mupmd5.update(mup['object'])
mupsize += mup['size']
self.application.S3[bucket_name].insert_one({'object_name':object_name,'object':bucket_object,'multipart':True,'md5':mupmd5.hexdigest(),'size':mupsize,'added':datetime.datetime.utcnow(),'updated':datetime.datetime.utcnow(),})
self.render_xml({"InitiateMultipartUploadResult": {
"Bucket": bucket_name,
"Prefix": self.prefix,
"Key":object_name,
"UploadId":object_name
}})
示例3: put
def put(self, *args):
if self.bucket_name and self.object_name:
bucket_name = self.bucket_name
object_name = self.object_name
else:
bucket_name,object_name = args
original_name = urllib.unquote(object_name)
if bucket_name not in self._get_bucket_names():
self._error(code=404,s3code='NSB')
return
# Insert object and then calculate computed md5 of stored object, size, then update and return
# If the object already exists, delete contents and add updated timestamp and update
existance = self.application.S3[bucket_name].find({"object_name":original_name})
if existance.count() > 0 and self.partNumber == None:
existance_id = existance.next()['_id']
update_object = Binary(self.request.body)
object_size = update_object.__len__()
object_md5 = self._object_md5(update_object)
self.application.S3[bucket_name].update({"_id":existance_id},{'$set': {'object':update_object,'md5':object_md5,'updated':datetime.datetime.utcnow(),'size':object_size}})
self.set_header('etag', '"%s"' % object_md5)
self.finish()
return
if self.partNumber:
tobeinserted = {'object_name':original_name,'object':Binary(self.request.body),'partNumber':self.partNumber}
else:
tobeinserted = {'object_name':original_name,'object':Binary(self.request.body)}
inserted_object_id = self.application.S3[bucket_name].insert_one(tobeinserted).inserted_id
inserted_object = self._get_bucket_object(bucket_name=bucket_name,_id=inserted_object_id)
object_size = inserted_object['object'].__len__()
object_md5 = self._object_md5(inserted_object['object'])
self.application.S3[bucket_name].update({'_id':inserted_object_id},{'$set': {'md5':object_md5,'updated':datetime.datetime.utcnow(),'added':datetime.datetime.utcnow(),'size':object_size}})
self.set_header('etag', '"%s"' % object_md5)
self.finish()
示例4: _element_to_bson
def _element_to_bson(key, value, check_keys, uuid_subtype):
if not isinstance(key, basestring):
raise InvalidDocument("documents must have only string keys, "
"key was %r" % key)
if check_keys:
if key.startswith("$"):
raise InvalidDocument("key %r must not start with '$'" % key)
if "." in key:
raise InvalidDocument("key %r must not contain '.'" % key)
name = _make_c_string(key, True)
if isinstance(value, float):
return BSONNUM + name + struct.pack("<d", value)
if _use_uuid:
if isinstance(value, uuid.UUID):
# Java Legacy
if uuid_subtype == JAVA_LEGACY:
# Python 3.0(.1) returns a bytearray instance for bytes (3.1
# and newer just return a bytes instance). Convert that to
# binary_type (here and below) for compatibility.
from_uuid = binary_type(value.bytes)
as_legacy_java = from_uuid[0:8][::-1] + from_uuid[8:16][::-1]
value = Binary(as_legacy_java, subtype=OLD_UUID_SUBTYPE)
# C# legacy
elif uuid_subtype == CSHARP_LEGACY:
# Microsoft GUID representation.
value = Binary(binary_type(value.bytes_le),
subtype=OLD_UUID_SUBTYPE)
# Python
else:
value = Binary(binary_type(value.bytes), subtype=uuid_subtype)
if isinstance(value, Binary):
subtype = value.subtype
if subtype == 2:
value = struct.pack("<i", len(value)) + value
return (BSONBIN + name +
struct.pack("<i", len(value)) + b(chr(subtype)) + value)
if isinstance(value, Code):
cstring = _make_c_string(value)
if not value.scope:
length = struct.pack("<i", len(cstring))
return BSONCOD + name + length + cstring
scope = _dict_to_bson(value.scope, False, uuid_subtype, False)
full_length = struct.pack("<i", 8 + len(cstring) + len(scope))
length = struct.pack("<i", len(cstring))
return BSONCWS + name + full_length + length + cstring + scope
if isinstance(value, binary_type):
if PY3:
# Python3 special case. Store 'bytes' as BSON binary subtype 0.
return (BSONBIN + name +
struct.pack("<i", len(value)) + ZERO + value)
cstring = _make_c_string(value)
length = struct.pack("<i", len(cstring))
return BSONSTR + name + length + cstring
if isinstance(value, unicode):
cstring = _make_c_string(value)
length = struct.pack("<i", len(cstring))
return BSONSTR + name + length + cstring
if isinstance(value, dict):
return BSONOBJ + name + _dict_to_bson(value, check_keys, uuid_subtype, False)
if isinstance(value, (list, tuple)):
as_dict = SON(zip([str(i) for i in range(len(value))], value))
return BSONARR + name + _dict_to_bson(as_dict, check_keys, uuid_subtype, False)
if isinstance(value, ObjectId):
return BSONOID + name + value.binary
if value is True:
return BSONBOO + name + ONE
if value is False:
return BSONBOO + name + ZERO
if isinstance(value, int):
# TODO this is an ugly way to check for this...
if value > MAX_INT64 or value < MIN_INT64:
raise OverflowError("BSON can only handle up to 8-byte ints")
if value > MAX_INT32 or value < MIN_INT32:
return BSONLON + name + struct.pack("<q", value)
return BSONINT + name + struct.pack("<i", value)
# 2to3 will convert long to int here since there is no long in python3.
# That's OK. The previous if block will match instead.
if isinstance(value, long):
if value > MAX_INT64 or value < MIN_INT64:
raise OverflowError("BSON can only handle up to 8-byte ints")
return BSONLON + name + struct.pack("<q", value)
if isinstance(value, datetime.datetime):
if value.utcoffset() is not None:
value = value - value.utcoffset()
millis = int(calendar.timegm(value.timetuple()) * 1000 +
value.microsecond / 1000)
return BSONDAT + name + struct.pack("<q", millis)
if isinstance(value, Timestamp):
time = struct.pack("<I", value.time)
inc = struct.pack("<I", value.inc)
return BSONTIM + name + inc + time
if value is None:
return BSONNUL + name
if isinstance(value, (RE_TYPE, Regex)):
pattern = value.pattern
flags = ""
#.........这里部分代码省略.........
示例5: _element_to_bson
def _element_to_bson(key, value, check_keys, uuid_subtype):
if not isinstance(key, basestring):
raise InvalidDocument("documents must have only string keys, "
"key was %r" % key)
if check_keys:
if key.startswith("$"):
raise InvalidDocument("key %r must not start with '$'" % key)
if "." in key:
raise InvalidDocument("key %r must not contain '.'" % key)
name = _make_c_string(key, True)
if isinstance(value, float):
return "\x01" + name + struct.pack("<d", value)
# Use Binary w/ subtype 3 for UUID instances
if _use_uuid:
if isinstance(value, uuid.UUID):
value = Binary(value.bytes, subtype=uuid_subtype)
if isinstance(value, Binary):
subtype = value.subtype
if subtype == 2:
value = struct.pack("<i", len(value)) + value
return "\x05%s%s%s%s" % (name, struct.pack("<i", len(value)),
chr(subtype), value)
if isinstance(value, Code):
cstring = _make_c_string(value)
if not value.scope:
length = struct.pack("<i", len(cstring))
return "\x0D" + name + length + cstring
scope = _dict_to_bson(value.scope, False, uuid_subtype, False)
full_length = struct.pack("<i", 8 + len(cstring) + len(scope))
length = struct.pack("<i", len(cstring))
return "\x0F" + name + full_length + length + cstring + scope
if isinstance(value, str):
cstring = _make_c_string(value)
length = struct.pack("<i", len(cstring))
return "\x02" + name + length + cstring
if isinstance(value, unicode):
cstring = _make_c_string(value)
length = struct.pack("<i", len(cstring))
return "\x02" + name + length + cstring
if isinstance(value, dict):
return "\x03" + name + _dict_to_bson(value, check_keys, uuid_subtype, False)
if isinstance(value, (list, tuple)):
as_dict = SON(zip([str(i) for i in range(len(value))], value))
return "\x04" + name + _dict_to_bson(as_dict, check_keys, uuid_subtype, False)
if isinstance(value, ObjectId):
return "\x07" + name + value.binary
if value is True:
return "\x08" + name + "\x01"
if value is False:
return "\x08" + name + "\x00"
if isinstance(value, int):
# TODO this is an ugly way to check for this...
if value > MAX_INT64 or value < MIN_INT64:
raise OverflowError("BSON can only handle up to 8-byte ints")
if value > MAX_INT32 or value < MIN_INT32:
return "\x12" + name + struct.pack("<q", value)
return "\x10" + name + struct.pack("<i", value)
if isinstance(value, long):
# XXX No long type in Python 3
if value > MAX_INT64 or value < MIN_INT64:
raise OverflowError("BSON can only handle up to 8-byte ints")
return "\x12" + name + struct.pack("<q", value)
if isinstance(value, datetime.datetime):
if value.utcoffset() is not None:
value = value - value.utcoffset()
millis = int(calendar.timegm(value.timetuple()) * 1000 +
value.microsecond / 1000)
return "\x09" + name + struct.pack("<q", millis)
if isinstance(value, Timestamp):
time = struct.pack("<I", value.time)
inc = struct.pack("<I", value.inc)
return "\x11" + name + inc + time
if value is None:
return "\x0A" + name
if isinstance(value, RE_TYPE):
pattern = value.pattern
flags = ""
if value.flags & re.IGNORECASE:
flags += "i"
if value.flags & re.LOCALE:
flags += "l"
if value.flags & re.MULTILINE:
flags += "m"
if value.flags & re.DOTALL:
flags += "s"
if value.flags & re.UNICODE:
flags += "u"
if value.flags & re.VERBOSE:
flags += "x"
return "\x0B" + name + _make_c_string(pattern, True) + \
_make_c_string(flags)
if isinstance(value, DBRef):
return _element_to_bson(key, value.as_doc(), False, uuid_subtype)
if isinstance(value, MinKey):
return "\xFF" + name
if isinstance(value, MaxKey):
#.........这里部分代码省略.........
示例6: run_operation
#.........这里部分代码省略.........
if 'writeConcern' in opts:
opts['write_concern'] = WriteConcern(
**dict(opts.pop('writeConcern')))
if 'readConcern' in opts:
opts['read_concern'] = ReadConcern(
**dict(opts.pop('readConcern')))
return opts
database = collection.database
collection = database.get_collection(collection.name)
if 'collectionOptions' in operation:
collection = collection.with_options(
**dict(parse_options(operation['collectionOptions'])))
object_name = self.get_object_name(operation)
if object_name == 'gridfsbucket':
# Only create the GridFSBucket when we need it (for the gridfs
# retryable reads tests).
obj = GridFSBucket(
database, bucket_name=collection.name,
disable_md5=True)
else:
objects = {
'client': database.client,
'database': database,
'collection': collection,
'testRunner': self
}
objects.update(sessions)
obj = objects[object_name]
# Combine arguments with options and handle special cases.
arguments = operation.get('arguments', {})
arguments.update(arguments.pop("options", {}))
parse_options(arguments)
cmd = getattr(obj, name)
for arg_name in list(arguments):
c2s = camel_to_snake(arg_name)
# PyMongo accepts sort as list of tuples.
if arg_name == "sort":
sort_dict = arguments[arg_name]
arguments[arg_name] = list(iteritems(sort_dict))
# Named "key" instead not fieldName.
if arg_name == "fieldName":
arguments["key"] = arguments.pop(arg_name)
# Aggregate uses "batchSize", while find uses batch_size.
elif arg_name == "batchSize" and name == "aggregate":
continue
# Requires boolean returnDocument.
elif arg_name == "returnDocument":
arguments[c2s] = arguments.pop(arg_name) == "After"
elif c2s == "requests":
# Parse each request into a bulk write model.
requests = []
for request in arguments["requests"]:
bulk_model = camel_to_upper_camel(request["name"])
bulk_class = getattr(operations, bulk_model)
bulk_arguments = camel_to_snake_args(request["arguments"])
requests.append(bulk_class(**dict(bulk_arguments)))
arguments["requests"] = requests
elif arg_name == "session":
arguments['session'] = sessions[arguments['session']]
elif name == 'command' and arg_name == 'command':
# Ensure the first key is the command name.
ordered_command = SON([(operation['command_name'], 1)])
ordered_command.update(arguments['command'])
arguments['command'] = ordered_command
elif name == 'open_download_stream' and arg_name == 'id':
arguments['file_id'] = arguments.pop(arg_name)
elif name == 'with_transaction' and arg_name == 'callback':
callback_ops = arguments[arg_name]['operations']
arguments['callback'] = lambda _: self.run_operations(
sessions, original_collection, copy.deepcopy(callback_ops),
in_with_transaction=True)
else:
arguments[c2s] = arguments.pop(arg_name)
result = cmd(**dict(arguments))
if name == "aggregate":
if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]:
# Read from the primary to ensure causal consistency.
out = collection.database.get_collection(
arguments["pipeline"][-1]["$out"],
read_preference=ReadPreference.PRIMARY)
return out.find()
if name == "map_reduce":
if isinstance(result, dict) and 'results' in result:
return result['results']
if 'download' in name:
result = Binary(result.read())
if isinstance(result, Cursor) or isinstance(result, CommandCursor):
return list(result)
return result