本文整理汇总了Python中pickle.loads方法的典型用法代码示例。如果您正苦于以下问题:Python pickle.loads方法的具体用法?Python pickle.loads怎么用?Python pickle.loads使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pickle
的用法示例。
在下文中一共展示了pickle.loads方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _deserialize
# 需要导入模块: import pickle [as 别名]
# 或者: from pickle import loads [as 别名]
def _deserialize(self, data, type_):
if self.compress:
# decompress the data if needed
data = lz4.frame.decompress(data)
if type_ == _NUMPY:
# deserialize numpy arrays
buf = io.BytesIO(data)
data = np.load(buf)
elif type_ == _PICKLE:
# deserialize other python objects
data = pickle.loads(data)
else:
# Otherwise we just return data as it is (bytes)
pass
return data
示例2: run
# 需要导入模块: import pickle [as 别名]
# 或者: from pickle import loads [as 别名]
def run(self):
print("VEDIO server starts...")
self.sock.bind(self.ADDR)
self.sock.listen(1)
conn, addr = self.sock.accept()
print("remote VEDIO client success connected...")
data = "".encode("utf-8")
payload_size = struct.calcsize("L")
cv2.namedWindow('Remote', cv2.WINDOW_AUTOSIZE)
while True:
while len(data) < payload_size:
data += conn.recv(81920)
packed_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("L", packed_size)[0]
while len(data) < msg_size:
data += conn.recv(81920)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
cv2.imshow('Remote', frame)
if cv2.waitKey(1) & 0xFF == 27:
break
示例3: recv_data
# 需要导入模块: import pickle [as 别名]
# 或者: from pickle import loads [as 别名]
def recv_data(self):
data_remaining = struct.unpack("I", self.conn.recv(4))[0]
if not data_remaining:
log.debug("no data?!")
return None
log.debug("<- recving %d bytes", data_remaining)
data = []
while data_remaining:
recv_bytes = data_remaining if data_remaining < self.SOCK_BUF else self.SOCK_BUF
data.append(self.conn.recv(recv_bytes))
data_len = len(data[-1])
if data_len == 0:
break
data_remaining -= data_len
data = pickle.loads("".join(data))
if data["cmd"] != self.ACK:
self.send_ack()
return data
示例4: _controller
# 需要导入模块: import pickle [as 别名]
# 或者: from pickle import loads [as 别名]
def _controller(self):
"""Return the server controller."""
def server_controller(cmd_id, cmd_body, _):
"""Server controler."""
if not self.init_logginig:
# the reason put the codes here is because we cannot get
# kvstore.rank earlier
head = '%(asctime)-15s Server[' + str(
self.kvstore.rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
self.init_logginig = True
if cmd_id == 0:
try:
optimizer = pickle.loads(cmd_body)
except:
raise
self.kvstore.set_optimizer(optimizer)
else:
print("server %d, unknown command (%d, %s)" % (
self.kvstore.rank, cmd_id, cmd_body))
return server_controller
示例5: test_attr_basic
# 需要导入模块: import pickle [as 别名]
# 或者: from pickle import loads [as 别名]
def test_attr_basic():
with mx.AttrScope(group='4', data='great'):
data = mx.symbol.Variable('data',
attr={'dtype':'data',
'group': '1',
'force_mirroring': 'True'},
lr_mult=1)
gdata = mx.symbol.Variable('data2')
assert gdata.attr('group') == '4'
assert data.attr('group') == '1'
assert data.attr('lr_mult') == '1'
assert data.attr('__lr_mult__') == '1'
assert data.attr('force_mirroring') == 'True'
assert data.attr('__force_mirroring__') == 'True'
data2 = pkl.loads(pkl.dumps(data))
assert data.attr('dtype') == data2.attr('dtype')
示例6: test_sparse_nd_pickle
# 需要导入模块: import pickle [as 别名]
# 或者: from pickle import loads [as 别名]
def test_sparse_nd_pickle():
dim0 = 40
dim1 = 40
stypes = ['row_sparse', 'csr']
densities = [0, 0.5]
stype_dict = {'row_sparse': RowSparseNDArray, 'csr': CSRNDArray}
shape = rand_shape_2d(dim0, dim1)
for stype in stypes:
for density in densities:
a, _ = rand_sparse_ndarray(shape, stype, density)
assert isinstance(a, stype_dict[stype])
data = pkl.dumps(a)
b = pkl.loads(data)
assert isinstance(b, stype_dict[stype])
assert same(a.asnumpy(), b.asnumpy())
# @kalyc: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11741
示例7: load
# 需要导入模块: import pickle [as 别名]
# 或者: from pickle import loads [as 别名]
def load(self, ctx, save=None):
"""loads current team's save. defaults to most recent"""
server = ctx.message.server
author = ctx.message.author
channel = ctx.message.channel
try:
team = self.splayers[server.id][channel.id][author.id]
except:
team = None
await self.embark.callback(self, ctx, team, save)
# @adventure.command(pass_context=True)
# async def save(self, ctx, file):
# pass
# if no team and no save, if user doesn't have a save, new game. otherwise new game must specify team and save
示例8: _load_data
# 需要导入模块: import pickle [as 别名]
# 或者: from pickle import loads [as 别名]
def _load_data(self):
extra_get_args = {}
if self.data_byte_range is not None:
range_str = 'bytes={}-{}'.format(*self.data_byte_range)
extra_get_args['Range'] = range_str
logger.debug("Getting function data")
data_download_start_tstamp = time.time()
data_obj = self.internal_storage.get_data(self.data_key, extra_get_args=extra_get_args)
logger.debug("Finished getting Function data")
logger.debug("Unpickle Function data")
loaded_data = pickle.loads(data_obj)
logger.debug("Finished unpickle Function data")
data_download_end_tstamp = time.time()
self.stats.write('data_download_time', round(data_download_end_tstamp-data_download_start_tstamp, 8))
return loaded_data
示例9: testPickle
# 需要导入模块: import pickle [as 别名]
# 或者: from pickle import loads [as 别名]
def testPickle(self):
# Issue 10326
# Can't use TestCase classes defined in Test class as
# pickle does not work with inner classes
test = unittest.TestCase('run')
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
# blew up prior to fix
pickled_test = pickle.dumps(test, protocol=protocol)
unpickled_test = pickle.loads(pickled_test)
self.assertEqual(test, unpickled_test)
# exercise the TestCase instance in a way that will invoke
# the type equality lookup mechanism
unpickled_test.assertEqual(set(), set())
示例10: test_pickle
# 需要导入模块: import pickle [as 别名]
# 或者: from pickle import loads [as 别名]
def test_pickle(self):
# Test that images pickle
# Image that is not proxied can pickle
img_klass = self.image_class
img = img_klass(np.zeros((2,3,4)), None)
img_str = pickle.dumps(img)
img2 = pickle.loads(img_str)
assert_array_equal(img.get_data(), img2.get_data())
assert_equal(img.get_header(), img2.get_header())
# Save / reload using bytes IO objects
for key, value in img.file_map.items():
value.fileobj = BytesIO()
img.to_file_map()
img_prox = img.from_file_map(img.file_map)
img_str = pickle.dumps(img_prox)
img2_prox = pickle.loads(img_str)
assert_array_equal(img.get_data(), img2_prox.get_data())
示例11: handle_read
# 需要导入模块: import pickle [as 别名]
# 或者: from pickle import loads [as 别名]
def handle_read(self):
try:
data = self.recv(self.dlen)
if len(data) == 0:
return
except socket.error as e:
if e[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
return
self.data += data
self.dlen -= len(data)
if self.dlen > 0:
# don't have complete record yet. wait for more data to read
return
if self.rlen == 0:
self.dlen = self.rlen = struct.unpack('>L', self.data)[0]
self.data = ''
# got record length. now read record
return
# got complete record
obj = pickle.loads(self.data)
record = logging.makeLogRecord(obj)
# Note: EVERY record gets logged. This is because Logger.handle
# is normally called AFTER logger-level filtering.
# Filter (e.g., only WARNING or higher)
# at the sender to save network bandwidth.
globalLogger.handle(record)
# reset for next record
self.data = ''
self.rlen = 0
self.dlen = 4
示例12: collect_results_gpu
# 需要导入模块: import pickle [as 别名]
# 或者: from pickle import loads [as 别名]
def collect_results_gpu(result_part, size):
rank, world_size = get_dist_info()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [
part_tensor.new_zeros(shape_max) for _ in range(world_size)
]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_list.append(
pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
示例13: test_pickle
# 需要导入模块: import pickle [as 别名]
# 或者: from pickle import loads [as 别名]
def test_pickle():
set1 = OrderedSet('abracadabra')
roundtrip = pickle.loads(pickle.dumps(set1))
assert roundtrip == set1
示例14: test_empty_pickle
# 需要导入模块: import pickle [as 别名]
# 或者: from pickle import loads [as 别名]
def test_empty_pickle():
empty_oset = OrderedSet()
empty_roundtrip = pickle.loads(pickle.dumps(empty_oset))
assert empty_roundtrip == empty_oset
示例15: deserialize
# 需要导入模块: import pickle [as 别名]
# 或者: from pickle import loads [as 别名]
def deserialize(pickle_serialized=''):
"""ddserialize"""
return pickle.loads(pickle_serialized)