本文整理汇总了Python中bson.binary.Binary方法的典型用法代码示例。如果您正苦于以下问题:Python binary.Binary方法的具体用法?Python binary.Binary怎么用?Python binary.Binary使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类bson.binary
的用法示例。
在下文中一共展示了binary.Binary方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_pickle_chunk_V1_read
# 需要导入模块: from bson import binary [as 别名]
# 或者: from bson.binary import Binary [as 别名]
def test_pickle_chunk_V1_read():
data = {'foo': b'abcdefghijklmnopqrstuvwxyz'}
version = {'_id': sentinel._id,
'blob': '__chunked__'}
coll = Mock()
arctic_lib = Mock()
datap = compressHC(cPickle.dumps(data, protocol=cPickle.HIGHEST_PROTOCOL))
data_1 = datap[0:5]
data_2 = datap[5:]
coll.find.return_value = [{'data': Binary(data_1),
'symbol': 'sentinel.symbol',
'segment': 0},
{'data': Binary(data_2),
'symbol': 'sentinel.symbol',
'segment': 1},
]
arctic_lib.get_top_level_collection.return_value = coll
ps = PickleStore()
assert(data == ps.read(arctic_lib, version, sentinel.symbol))
示例2: test_pickle_store_future_version
# 需要导入模块: from bson import binary [as 别名]
# 或者: from bson.binary import Binary [as 别名]
def test_pickle_store_future_version():
data = {'foo': b'abcdefghijklmnopqrstuvwxyz'}
version = {'_id': sentinel._id,
'blob': '__chunked__VERSION_ONE_MILLION'}
coll = Mock()
arctic_lib = Mock()
datap = compressHC(cPickle.dumps(data, protocol=cPickle.HIGHEST_PROTOCOL))
data_1 = datap[0:5]
data_2 = datap[5:]
coll.find.return_value = [{'data': Binary(data_1),
'symbol': 'sentinel.symbol',
'segment': 0},
{'data': Binary(data_2),
'symbol': 'sentinel.symbol',
'segment': 1},
]
arctic_lib.get_top_level_collection.return_value = coll
ps = PickleStore()
with pytest.raises(UnsupportedPickleStoreVersion) as e:
ps.read(arctic_lib, version, sentinel.symbol)
assert('unsupported version of pickle store' in str(e.value))
示例3: _authenticate_cram_md5
# 需要导入模块: from bson import binary [as 别名]
# 或者: from bson.binary import Binary [as 别名]
def _authenticate_cram_md5(credentials, sock_info, cmd_func):
"""Authenticate using CRAM-MD5 (RFC 2195)
"""
source, username, password = credentials
# The password used as the mac key is the
# same as what we use for MONGODB-CR
passwd = _password_digest(username, password)
cmd = SON([('saslStart', 1),
('mechanism', 'CRAM-MD5'),
('payload', Binary(b(''))),
('autoAuthorize', 1)])
response, _ = cmd_func(sock_info, source, cmd)
# MD5 as implicit default digest for digestmod is deprecated
# in python 3.4
mac = hmac.HMAC(key=passwd.encode('utf-8'), digestmod=_DMOD)
mac.update(response['payload'])
challenge = username.encode('utf-8') + b(' ') + b(mac.hexdigest())
cmd = SON([('saslContinue', 1),
('conversationId', response['conversationId']),
('payload', Binary(challenge))])
cmd_func(sock_info, source, cmd)
示例4: __flush_data
# 需要导入模块: from bson import binary [as 别名]
# 或者: from bson.binary import Binary [as 别名]
def __flush_data(self, data):
"""Flush `data` to a chunk.
"""
# Ensure the index, even if there's nothing to write, so
# the filemd5 command always succeeds.
self._ensure_index()
if not data:
return
assert(len(data) <= self.chunk_size)
chunk = {"files_id": self._file["_id"],
"n": self._chunk_number,
"data": Binary(data)}
try:
self._chunks.insert(chunk)
except DuplicateKeyError:
self._raise_file_exists(self._file['_id'])
self._chunk_number += 1
self._position += len(data)
示例5: _authenticate_cram_md5
# 需要导入模块: from bson import binary [as 别名]
# 或者: from bson.binary import Binary [as 别名]
def _authenticate_cram_md5(credentials, sock_info):
"""Authenticate using CRAM-MD5 (RFC 2195)
"""
source = credentials.source
username = credentials.username
password = credentials.password
# The password used as the mac key is the
# same as what we use for MONGODB-CR
passwd = _password_digest(username, password)
cmd = SON([('saslStart', 1),
('mechanism', 'CRAM-MD5'),
('payload', Binary(b'')),
('autoAuthorize', 1)])
response = sock_info.command(source, cmd)
# MD5 as implicit default digest for digestmod is deprecated
# in python 3.4
mac = hmac.HMAC(key=passwd.encode('utf-8'), digestmod=hashlib.md5)
mac.update(response['payload'])
challenge = username.encode('utf-8') + b' ' + mac.hexdigest().encode('utf-8')
cmd = SON([('saslContinue', 1),
('conversationId', response['conversationId']),
('payload', Binary(challenge))])
sock_info.command(source, cmd)
示例6: __flush_data
# 需要导入模块: from bson import binary [as 别名]
# 或者: from bson.binary import Binary [as 别名]
def __flush_data(self, data):
"""Flush `data` to a chunk.
"""
self.__ensure_indexes()
if 'md5' in self._file:
self._file['md5'].update(data)
if not data:
return
assert(len(data) <= self.chunk_size)
chunk = {"files_id": self._file["_id"],
"n": self._chunk_number,
"data": Binary(data)}
try:
self._chunks.insert_one(chunk, session=self._session)
except DuplicateKeyError:
self._raise_file_exists(self._file['_id'])
self._chunk_number += 1
self._position += len(data)
示例7: _authenticate_cram_md5
# 需要导入模块: from bson import binary [as 别名]
# 或者: from bson.binary import Binary [as 别名]
def _authenticate_cram_md5(credentials, sock_info):
"""Authenticate using CRAM-MD5 (RFC 2195)
"""
source = credentials.source
username = credentials.username
password = credentials.password
# The password used as the mac key is the
# same as what we use for MONGODB-CR
passwd = _password_digest(username, password)
cmd = SON([('saslStart', 1),
('mechanism', 'CRAM-MD5'),
('payload', Binary(b'')),
('autoAuthorize', 1)])
response = sock_info.command(source, cmd)
# MD5 as implicit default digest for digestmod is deprecated
# in python 3.4
mac = hmac.HMAC(key=passwd.encode('utf-8'), digestmod=md5)
mac.update(response['payload'])
challenge = username.encode('utf-8') + b' ' + b(mac.hexdigest())
cmd = SON([('saslContinue', 1),
('conversationId', response['conversationId']),
('payload', Binary(challenge))])
sock_info.command(source, cmd)
示例8: __flush_data
# 需要导入模块: from bson import binary [as 别名]
# 或者: from bson.binary import Binary [as 别名]
def __flush_data(self, data):
"""Flush `data` to a chunk.
"""
# Ensure the index, even if there's nothing to write, so
# the filemd5 command always succeeds.
self.__ensure_indexes()
self._file['md5'].update(data)
if not data:
return
assert(len(data) <= self.chunk_size)
chunk = {"files_id": self._file["_id"],
"n": self._chunk_number,
"data": Binary(data)}
try:
self._chunks.insert_one(chunk)
except DuplicateKeyError:
self._raise_file_exists(self._file['_id'])
self._chunk_number += 1
self._position += len(data)
示例9: test_to_db_fmt
# 需要导入模块: from bson import binary [as 别名]
# 或者: from bson.binary import Binary [as 别名]
def test_to_db_fmt():
import cPickle
import numpy as np
from bson.binary import Binary
# standard types should be passed through
assert to_db_fmt(1) == 1
assert to_db_fmt(4.2) == 4.2
assert to_db_fmt('foobarbaz') == 'foobarbaz'
assert to_db_fmt(1+2j) == 1+2j
# lists and dicts should be recursively formatted
assert to_db_fmt([1, 1+2j, 'foo', [1,2.3]]) == [1, 1+2j, 'foo', [1,2.3]]
assert to_db_fmt({'a': 1, 'b': ['foo', 2]}) == {'a': 1, 'b': ['foo', 2]}
# numpy arrays should be converted to lists
assert to_db_fmt(np.array([1,2,3])) == [1,2,3]
# objects should be pickled
x = object()
assert to_db_fmt(x) == Binary(cPickle.dumps(x, protocol=2))
示例10: from_db_fmt
# 需要导入模块: from bson import binary [as 别名]
# 或者: from bson.binary import Binary [as 别名]
def from_db_fmt(x):
# recursive descent through lists
if isinstance(x, list):
return [from_db_fmt(v) for v in x]
# recursive descent through dicts
if isinstance(x, dict):
return {k: from_db_fmt(v) for k, v in x.items()}
# further code occasionally serializes `ObjectId`s to json, so stringify them now
if isinstance(x, ObjectId):
return str(x)
if isinstance(x, Binary):
# this might be pickled data; let's attempt to deserialize it
try:
return cPickle.loads(x)
except cPickle.UnpicklingError:
# this wasn't pickled data. just return it.
return x
# not a datatype we need to deserialize! just pass it out
return x
示例11: test_binary
# 需要导入模块: from bson import binary [as 别名]
# 或者: from bson.binary import Binary [as 别名]
def test_binary(self):
bin_type_dict = {"bin": Binary(b"\x00\x01\x02\x03\x04")}
md5_type_dict = {
"md5": Binary(b" n7\x18\xaf\t/\xd1\xd1/\x80\xca\xe7q\xcc\xac",
MD5_SUBTYPE)
}
custom_type_dict = {"custom": Binary(b"hello", USER_DEFINED_SUBTYPE)}
self.round_trip(bin_type_dict)
self.round_trip(md5_type_dict)
self.round_trip(custom_type_dict)
json_bin_dump = bsonjs_dumps(md5_type_dict)
# Order should be $binary then $type.
self.assertEqual(
('{ "md5" : { "$binary" : "IG43GK8JL9HRL4DK53HMrA==", '
'"$type" : "05" } }'),
json_bin_dump)
json_bin_dump = bsonjs_dumps(custom_type_dict)
self.assertTrue('"$type" : "80"' in json_bin_dump)
# Check loading invalid binary
self.assertRaises(ValueError, bsonjs.loads,
'{"a": {"$binary": "invalid", "$type": "80"}}')
示例12: _encode_value
# 需要导入模块: from bson import binary [as 别名]
# 或者: from bson.binary import Binary [as 别名]
def _encode_value(self, value):
""" Encodes the value such that it can be stored into MongoDB.
Any primitive types are stored directly into MongoDB, while non-primitive types
are pickled and stored as GridFS objects. The id pointing to a GridFS object
replaces the original value.
Args:
value (object): The object that should be encoded for storing in MongoDB.
Returns:
object: The encoded value ready to be stored in MongoDB.
"""
if isinstance(value, (int, float, str, bool, datetime)):
return value
elif isinstance(value, list):
return [self._encode_value(item) for item in value]
elif isinstance(value, dict):
result = {}
for key, item in value.items():
result[key] = self._encode_value(item)
return result
else:
return self._gridfs.put(Binary(pickle.dumps(value)),
workflow_id=self._workflow_id)
示例13: store
# 需要导入模块: from bson import binary [as 别名]
# 或者: from bson.binary import Binary [as 别名]
def store(self, thing):
"""
Simple persistence method
"""
to_store = {'field1': thing.field1,
'date_field': thing.date_field,
}
to_store['stuff'] = Binary(cPickle.dumps(thing.stuff))
# Respect any soft-quota on write - raises if stats().totals.size > quota
self._arctic_lib.check_quota()
self._collection.insert_one(to_store)
示例14: _pandas_to_bucket
# 需要导入模块: from bson import binary [as 别名]
# 或者: from bson.binary import Binary [as 别名]
def _pandas_to_bucket(df, symbol, initial_image):
rtn = {SYMBOL: symbol, VERSION: CHUNK_VERSION_NUMBER, COLUMNS: {}, COUNT: len(df)}
end = to_dt(df.index[-1].to_pydatetime())
if initial_image:
if 'index' in initial_image:
start = min(to_dt(df.index[0].to_pydatetime()), initial_image['index'])
else:
start = to_dt(df.index[0].to_pydatetime())
image_start = initial_image.get('index', start)
rtn[IMAGE_DOC] = {IMAGE_TIME: image_start, IMAGE: initial_image}
final_image = TickStore._pandas_compute_final_image(df, initial_image, end)
else:
start = to_dt(df.index[0].to_pydatetime())
final_image = {}
rtn[END] = end
rtn[START] = start
logger.warning("NB treating all values as 'exists' - no longer sparse")
rowmask = Binary(lz4_compressHC(np.packbits(np.ones(len(df), dtype='uint8')).tostring()))
index_name = df.index.names[0] or "index"
if PD_VER < '0.23.0':
recs = df.to_records(convert_datetime64=False)
else:
recs = df.to_records()
for col in df:
array = TickStore._ensure_supported_dtypes(recs[col])
col_data = {
DATA: Binary(lz4_compressHC(array.tostring())),
ROWMASK: rowmask,
DTYPE: TickStore._str_dtype(array.dtype),
}
rtn[COLUMNS][col] = col_data
rtn[INDEX] = Binary(
lz4_compressHC(np.concatenate(
([recs[index_name][0].astype('datetime64[ms]').view('uint64')],
np.diff(
recs[index_name].astype('datetime64[ms]').view('uint64')))).tostring()))
return rtn, final_image
示例15: _checksum
# 需要导入模块: from bson import binary [as 别名]
# 或者: from bson.binary import Binary [as 别名]
def _checksum(self, fields, data):
"""
Checksum the passed in dictionary
"""
sha = hashlib.sha1()
for field in fields:
sha.update(field)
sha.update(data)
return Binary(sha.digest())