本文整理匯總了Python中sys.getsizeof方法的典型用法代碼示例。如果您正苦於以下問題:Python sys.getsizeof方法的具體用法?Python sys.getsizeof怎麽用?Python sys.getsizeof使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sys
的用法示例。
在下文中一共展示了sys.getsizeof方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _sizeof
# 需要導入模塊: import sys [as 別名]
# 或者: from sys import getsizeof [as 別名]
def _sizeof(obj, seen=None):
'''Recursively finds size of objects'''
size = sys.getsizeof(obj)
if seen is None:
seen = set()
obj_id = id(obj)
if obj_id in seen:
return 0
# Important mark as seen *before* entering recursion to gracefully handle
# self-referential objects
seen.add(obj_id)
if isinstance(obj, dict):
size += sum([_sizeof(v, seen) for v in obj.values()])
size += sum([_sizeof(k, seen) for k in obj.keys()])
elif hasattr(obj, '__dict__'):
size += _sizeof(obj.__dict__, seen)
elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):
size += sum([_sizeof(i, seen) for i in obj])
return size
示例2: _nbytes
# 需要導入模塊: import sys [as 別名]
# 或者: from sys import getsizeof [as 別名]
def _nbytes(self, deep=False):
"""
return the number of bytes in the underlying data
deeply introspect the level data if deep=True
include the engine hashtable
*this is in internal routine*
"""
# for implementations with no useful getsizeof (PyPy)
objsize = 24
level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)
label_nbytes = sum(i.nbytes for i in self.codes)
names_nbytes = sum(getsizeof(i, objsize) for i in self.names)
result = level_nbytes + label_nbytes + names_nbytes
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# --------------------------------------------------------------------
# Rendering Methods
示例3: divide_message
# 需要導入模塊: import sys [as 別名]
# 或者: from sys import getsizeof [as 別名]
def divide_message(msg):
"""Kafka won't accept more than 1Mb messages, therefore too big
messages need to be divided into smaller chunks"""
MAX_SIZE = 10 ** 5
devided_message = []
msg_size = sys.getsizeof(msg)
if msg_size < MAX_SIZE:
return [msg]
else:
message = msg.split('\n')
new_message = ''
for i in range(len(message)):
new_metric = ''
while message[i].startswith('#'):
new_metric += message[i] + '\n'
i += 1
new_metric += message[i] + '\n'
if sys.getsizeof(new_message + new_metric) > MAX_SIZE and new_message:
devided_message.append(new_message)
new_message = new_metric
else:
new_message += new_metric
return devided_message
示例4: get_size
# 需要導入模塊: import sys [as 別名]
# 或者: from sys import getsizeof [as 別名]
def get_size(self, msg, seen=None):
"""Recursively finds size of objects"""
size = sys.getsizeof(msg)
if seen is None:
seen = set()
obj_id = id(msg)
if obj_id in seen:
return 0
seen.add(obj_id)
if isinstance(msg, dict):
size += sum([self.get_size(v, seen) for v in msg.values()])
size += sum([self.get_size(k, seen) for k in msg.keys()])
elif hasattr(msg, '__dict__'):
size += self.get_size(msg.__dict__, seen)
elif hasattr(msg, '__iter__') and not isinstance(msg, (str, bytes, bytearray)):
size += sum([self.get_size(i, seen) for i in msg])
return size
示例5: send_photo
# 需要導入模塊: import sys [as 別名]
# 或者: from sys import getsizeof [as 別名]
def send_photo(self, user_peer, image, caption_text="", name="", file_storage_version=1, mime_type="image/jpeg",
success_callback=None, failure_callback=None, **kwargs):
image_buffer = get_file_buffer(file=image)
file_size = sys.getsizeof(image_buffer)
im = Image.open(io.BytesIO(image_buffer))
width, height = im.size
thumb = get_image_thumbnails(im)
def success_upload_image(user_data, server_response):
file_id = str(server_response.get("file_id", None))
access_hash = str(server_response.get("user_id", None))
photo_message = PhotoMessage(file_id=file_id, access_hash=access_hash, name=name, file_size=file_size,
mime_type=mime_type, file_storage_version=file_storage_version, width=width,
height=height, caption_text=TextMessage(text=caption_text), thumb=thumb)
self.send_message(message=photo_message, peer=user_peer, success_callback=success_callback,
failure_callback=failure_callback, kwargs=kwargs)
self.upload_file(file=image, file_type="file", success_callback=success_upload_image,
failure_callback=failure_callback)
示例6: calc_data_size
# 需要導入模塊: import sys [as 別名]
# 或者: from sys import getsizeof [as 別名]
def calc_data_size(dt):
if dt is None:
return 0
if isinstance(dt, tuple):
return sum(calc_data_size(c) for c in dt)
if hasattr(dt, 'nbytes'):
return max(sys.getsizeof(dt), dt.nbytes)
if hasattr(dt, 'shape') and len(dt.shape) == 0:
return 0
if hasattr(dt, 'memory_usage') or hasattr(dt, 'groupby_obj'):
return sys.getsizeof(dt)
if hasattr(dt, 'dtypes') and hasattr(dt, 'shape'):
return dt.shape[0] * sum(dtype.itemsize for dtype in dt.dtypes)
if hasattr(dt, 'dtype') and hasattr(dt, 'shape'):
return dt.shape[0] * dt.dtype.itemsize
# object chunk
return sys.getsizeof(dt)
示例7: get_chunk_metas
# 需要導入模塊: import sys [as 別名]
# 或者: from sys import getsizeof [as 別名]
def get_chunk_metas(self, chunk_keys, filter_fields=None):
if filter_fields is not None: # pragma: no cover
raise NotImplementedError("Local context doesn't support filter fields now")
metas = []
for chunk_key in chunk_keys:
chunk_data = self.get(chunk_key)
if chunk_data is None:
metas.append(None)
continue
if hasattr(chunk_data, 'nbytes'):
# ndarray
size = chunk_data.nbytes
shape = chunk_data.shape
elif hasattr(chunk_data, 'memory_usage'):
# DataFrame
size = chunk_data.memory_usage(deep=True).sum()
shape = chunk_data.shape
else:
# other
size = sys.getsizeof(chunk_data)
shape = ()
metas.append(ChunkMeta(chunk_size=size, chunk_shape=shape, workers=None))
return metas
示例8: _get_size
# 需要導入模塊: import sys [as 別名]
# 或者: from sys import getsizeof [as 別名]
def _get_size(item, seen):
known_types = {dict: lambda d: chain.from_iterable(d.items())}
default_size = getsizeof(0)
def size_walk(item):
if id(item) in seen:
return 0
seen.add(id(item))
s = getsizeof(item, default_size)
for _type, fun in known_types.iteritems():
if isinstance(item, _type):
s += sum(map(size_walk, fun(item)))
break
return s
return size_walk(item)
示例9: test_errors
# 需要導入模塊: import sys [as 別名]
# 或者: from sys import getsizeof [as 別名]
def test_errors(self):
class BadSizeof(object):
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof(object):
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class OverflowSizeof(long):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
示例10: get_size
# 需要導入模塊: import sys [as 別名]
# 或者: from sys import getsizeof [as 別名]
def get_size(obj, seen=None):
"""Recursively finds size of objects"""
size = sys.getsizeof(obj)
if seen is None:
seen = set()
obj_id = id(obj)
if obj_id in seen:
return 0
# Important mark as seen *before* entering recursion to gracefully handle
# self-referential objects
seen.add(obj_id)
if isinstance(obj, dict):
size += sum([get_size(v, seen) for v in obj.values()])
size += sum([get_size(k, seen) for k in obj.keys()])
elif hasattr(obj, '__dict__'):
size += get_size(obj.__dict__, seen)
elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):
size += sum([get_size(i, seen) for i in obj])
return size
示例11: test_memory_usage
# 需要導入模塊: import sys [as 別名]
# 或者: from sys import getsizeof [as 別名]
def test_memory_usage(self):
for o in self.objs:
res = o.memory_usage()
res_deep = o.memory_usage(deep=True)
if (is_object_dtype(o) or (isinstance(o, Series) and
is_object_dtype(o.index))):
# if there are objects, only deep will pick them up
assert res_deep > res
else:
assert res == res_deep
if isinstance(o, Series):
assert ((o.memory_usage(index=False) +
o.index.memory_usage()) ==
o.memory_usage(index=True))
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = res_deep - sys.getsizeof(o)
assert abs(diff) < 100
示例12: _nbytes
# 需要導入模塊: import sys [as 別名]
# 或者: from sys import getsizeof [as 別名]
def _nbytes(self, deep=False):
"""
return the number of bytes in the underlying data
deeply introspect the level data if deep=True
include the engine hashtable
*this is in internal routine*
"""
# for implementations with no useful getsizeof (PyPy)
objsize = 24
level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)
label_nbytes = sum(i.nbytes for i in self.labels)
names_nbytes = sum(getsizeof(i, objsize) for i in self.names)
result = level_nbytes + label_nbytes + names_nbytes
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
示例13: gz_decompress
# 需要導入模塊: import sys [as 別名]
# 或者: from sys import getsizeof [as 別名]
def gz_decompress(driver, data):
"""
Params:
driver (PersistenceDriver)
data (Bytes): Compressed data
Return: Bytes
"""
try:
data = zlib.decompress(data, 47)
LOGGER.debug(
'LookupTable (%s): Object decompressed to %d byte payload',
driver.id,
sys.getsizeof(data)
)
except zlib.error:
LOGGER.warning(
'LookupTable (%s): Data is not compressed; defaulting to original payload',
driver.id
)
return data
示例14: gz_compress
# 需要導入模塊: import sys [as 別名]
# 或者: from sys import getsizeof [as 別名]
def gz_compress(driver, data):
"""
Params:
driver (PersistenceDriver)
data (Bytes): Uncompressed data
Return: Bytes
"""
try:
original_size = sys.getsizeof(data)
data = zlib.compress(data, level=zlib.Z_BEST_COMPRESSION)
LOGGER.debug(
'LookupTable (%s): Successfully compressed input data from %d to %d bytes',
driver.id,
original_size,
sys.getsizeof(data)
)
return data
except zlib.error:
LOGGER.exception('LookupTable (%s): Data compression error.', driver.id)
示例15: receive
# 需要導入模塊: import sys [as 別名]
# 或者: from sys import getsizeof [as 別名]
def receive(self):
self.handler_binance_websocket_api_manager.set_heartbeat(self.stream_id)
try:
received_data_json = await self.handler_binance_websocket_api_manager.websocket_list[self.stream_id].recv()
try:
if self.handler_binance_websocket_api_manager.restart_requests[self.stream_id]['status'] == "restarted":
self.handler_binance_websocket_api_manager.increase_reconnect_counter(self.stream_id)
del self.handler_binance_websocket_api_manager.restart_requests[self.stream_id]
except KeyError:
pass
if received_data_json is not None:
size = sys.getsizeof(received_data_json)
self.handler_binance_websocket_api_manager.increase_processed_receives_statistic(self.stream_id)
self.handler_binance_websocket_api_manager.add_total_received_bytes(size)
self.handler_binance_websocket_api_manager.increase_received_bytes_per_second(self.stream_id,
size)
return received_data_json
except RuntimeError as error_msg:
logging.debug("binance_websocket_api_connection->receive(" +
str(self.stream_id) + ") - RuntimeError - error_msg: " + str(error_msg))
sys.exit(1)
except ssl.SSLError as error_msg:
logging.debug("binance_websocket_api_connection->receive(" +
str(self.stream_id) + ") - ssl.SSLError - error_msg: " + str(error_msg))
except KeyError as error_msg:
logging.debug("binance_websocket_api_connection->receive(" +
str(self.stream_id) + ") - KeyError - error_msg: " + str(error_msg))
self.handler_binance_websocket_api_manager.stream_is_stopping(self.stream_id)
if self.handler_binance_websocket_api_manager.is_stop_request(self.stream_id) is False:
self.handler_binance_websocket_api_manager.set_restart_request(self.stream_id)
sys.exit(1)
except asyncio.base_futures.InvalidStateError as error_msg:
logging.critical("binance_websocket_api_connection->receive(" +
str(self.stream_id) + ") - asyncio.base_futures.InvalidStateError - error_msg: " +
str(error_msg) + " - Extra info: https://github.com/oliver-zehentleitner/unicorn-binance-"
"websocket-api/issues/18 - open an own issue if needed!")
self.handler_binance_websocket_api_manager.stream_is_stopping(self.stream_id)
if self.handler_binance_websocket_api_manager.is_stop_request(self.stream_id) is False:
self.handler_binance_websocket_api_manager.set_restart_request(self.stream_id)
sys.exit(1)
開發者ID:oliver-zehentleitner,項目名稱:unicorn-binance-websocket-api,代碼行數:42,代碼來源:unicorn_binance_websocket_api_connection.py