本文整理汇总了Python中oslo_utils.units.Mi方法的典型用法代码示例。如果您正苦于以下问题:Python units.Mi方法的具体用法?Python units.Mi怎么用?Python units.Mi使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类oslo_utils.units
的用法示例。
在下文中一共展示了units.Mi方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_cinder_get_size_with_metadata
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Mi [as 别名]
def test_cinder_get_size_with_metadata(self):
fake_client = FakeObject(auth_token=None, management_url=None)
fake_volume_uuid = str(uuid.uuid4())
expected_image_size = 4500 * units.Mi
fake_volume = FakeObject(size=5,
metadata={'image_size': expected_image_size})
fake_volumes = {fake_volume_uuid: fake_volume}
with mock.patch.object(cinder.Store, 'get_cinderclient') as mocked_cc:
mocked_cc.return_value = FakeObject(client=fake_client,
volumes=fake_volumes)
uri = 'cinder://%s' % fake_volume_uuid
loc = location.get_location_from_uri(uri, conf=self.conf)
image_size = self.store.get_size(loc, context=self.context)
self.assertEqual(expected_image_size, image_size)
示例2: test_cinder_get_size_with_metadata
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Mi [as 别名]
def test_cinder_get_size_with_metadata(self):
fake_client = FakeObject(auth_token=None, management_url=None)
fake_volume_uuid = str(uuid.uuid4())
expected_image_size = 4500 * units.Mi
fake_volume = FakeObject(size=5,
metadata={'image_size': expected_image_size})
fake_volumes = {fake_volume_uuid: fake_volume}
with mock.patch.object(cinder.Store, 'get_cinderclient') as mocked_cc:
mocked_cc.return_value = FakeObject(client=fake_client,
volumes=fake_volumes)
uri = 'cinder://%s' % fake_volume_uuid
loc = location.get_location_from_uri_and_backend(uri,
"cinder1",
conf=self.conf)
image_size = self.store.get_size(loc, context=self.context)
self.assertEqual(expected_image_size, image_size)
示例3: get_available_resource
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Mi [as 别名]
def get_available_resource(self, nodename):
if not hasattr(self, '_nodename'):
self._nodename = nodename
if nodename != self._nodename:
LOG.error('Hostname has changed from %(old)s to %(new)s. '
'A restart is required to take effect.',
{'old': self._nodename, 'new': nodename})
memory = hostinfo.get_memory_usage()
disk = hostinfo.get_disk_usage()
stats = {
'vcpus': hostinfo.get_total_vcpus(),
'vcpus_used': hostinfo.get_vcpus_used(self.list_instances(True)),
'memory_mb': memory['total'] / units.Mi,
'memory_mb_used': memory['used'] / units.Mi,
'local_gb': disk['total'] / units.Gi,
'local_gb_used': disk['used'] / units.Gi,
'disk_available_least': disk['available'] / units.Gi,
'hypervisor_type': 'docker',
'hypervisor_version': versionutils.convert_version_to_int('1.0'),
'hypervisor_hostname': self._nodename,
'cpu_info': '?',
'numa_topology': None,
'supported_instances': [
(fields.Architecture.I686, fields.HVType.DOCKER,
fields.VMMode.EXE),
(fields.Architecture.X86_64, fields.HVType.DOCKER,
fields.VMMode.EXE)
]
}
return stats
示例4: _get_memory_limit_bytes
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Mi [as 别名]
def _get_memory_limit_bytes(self, instance):
if isinstance(instance, objects.Instance):
return instance.get_flavor().memory_mb * units.Mi
else:
system_meta = utils.instance_sys_meta(instance)
return int(system_meta.get(
'instance_type_memory_mb', 0)) * units.Mi
示例5: test_configure_remotefx
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Mi [as 别名]
def test_configure_remotefx(self):
self.flags(enable_remotefx=True, group='hyperv')
mock_instance = self._setup_remotefx_mocks()
self._vmops._hostutils.check_server_feature.return_value = True
self._vmops._vmutils.vm_gen_supports_remotefx.return_value = True
extra_specs = mock_instance.flavor.extra_specs
self._vmops.configure_remotefx(mock_instance, constants.VM_GEN_1)
mock_enable_remotefx = (
self._vmops._vmutils.enable_remotefx_video_adapter)
mock_enable_remotefx.assert_called_once_with(
mock_instance.name, int(extra_specs['os:monitors']),
extra_specs['os:resolution'],
int(extra_specs['os:vram']) * units.Mi)
示例6: configure_add
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Mi [as 别名]
def configure_add(self):
"""
Configure the Store to use the stored configuration options
Any store that needs special configuration should implement
this method. If the store was not able to successfully configure
itself, it should raise `exceptions.BadStoreConfiguration`
"""
try:
if self.backend_group:
chunk = getattr(self.conf,
self.backend_group).rbd_store_chunk_size
pool = getattr(self.conf, self.backend_group).rbd_store_pool
user = getattr(self.conf, self.backend_group).rbd_store_user
conf_file = getattr(self.conf,
self.backend_group).rbd_store_ceph_conf
connect_timeout = getattr(
self.conf, self.backend_group).rados_connect_timeout
else:
chunk = self.conf.glance_store.rbd_store_chunk_size
pool = self.conf.glance_store.rbd_store_pool
user = self.conf.glance_store.rbd_store_user
conf_file = self.conf.glance_store.rbd_store_ceph_conf
connect_timeout = self.conf.glance_store.rados_connect_timeout
self.chunk_size = chunk * units.Mi
self.READ_CHUNKSIZE = self.chunk_size
self.WRITE_CHUNKSIZE = self.READ_CHUNKSIZE
# these must not be unicode since they will be passed to a
# non-unicode-aware C library
self.pool = str(pool)
self.user = str(user)
self.conf_file = str(conf_file)
self.connect_timeout = connect_timeout
except cfg.ConfigFileValueError as e:
reason = _("Error in store configuration: %s") % e
LOG.error(reason)
raise exceptions.BadStoreConfiguration(store_name='rbd',
reason=reason)
if self.backend_group:
self._set_url_prefix()
示例7: _unit_convert
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Mi [as 别名]
def _unit_convert(self, capacity):
"""Convert all units to GB"""
capacity = str(capacity)
capacity = capacity.upper()
try:
unit_of_used = re.findall(r'[A-Z]', capacity)
unit_of_used = ''.join(unit_of_used)
except BaseException:
unit_of_used = ''
capacity = capacity.replace(unit_of_used, '')
capacity = float(capacity.replace(unit_of_used, ''))
if unit_of_used in ['B', '']:
capacity = capacity / units.Gi
elif unit_of_used in ['K', 'KB']:
capacity = capacity / units.Mi
elif unit_of_used in ['M', 'MB']:
capacity = capacity / units.Ki
elif unit_of_used in ['G', 'GB']:
capacity = capacity
elif unit_of_used in ['T', 'TB']:
capacity = capacity * units.Ki
elif unit_of_used in ['E', 'EB']:
capacity = capacity * units.Mi
capacity = '%.0f' % capacity
return float(capacity)
示例8: size_to_gb
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Mi [as 别名]
def size_to_gb(self, size):
new_size = 0
if 'P' in size:
new_size = int(float(size.rstrip('PB')) * units.Mi)
elif 'T' in size:
new_size = int(float(size.rstrip('TB')) * units.Ki)
elif 'G' in size:
new_size = int(float(size.rstrip('GB')) * 1)
elif 'M' in size:
mb_size = float(size.rstrip('MB'))
new_size = int((mb_size + units.Ki - 1) / units.Ki)
return new_size
示例9: _update_share_stats
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Mi [as 别名]
def _update_share_stats(self):
stats = self.volume_client.rados.get_cluster_stats()
total_capacity_gb = stats['kb'] * units.Mi
free_capacity_gb = stats['kb_avail'] * units.Mi
data = {
'vendor_name': 'Ceph',
'driver_version': '1.0',
'share_backend_name': self.backend_name,
'storage_protocol': self.configuration.safe_get(
'cephfs_protocol_helper_type'),
'pools': [
{
'pool_name': 'cephfs',
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'qos': 'False',
'reserved_percentage': 0,
'dedupe': [False],
'compression': [False],
'thin_provisioning': [False]
}
],
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'snapshot_support': self.configuration.safe_get(
'cephfs_enable_snapshots'),
}
super( # pylint: disable=no-member
CephFSDriver, self)._update_share_stats(data)
示例10: extend_share
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Mi [as 别名]
def extend_share(self, share, new_size, share_server):
share_proto = share['share_proto']
share_name = share['name']
# The unit is in sectors.
size = int(new_size) * units.Mi * 2
share_url_type = self.helper._get_share_url_type(share_proto)
share = self.helper._get_share_by_name(share_name, share_url_type)
if not share:
err_msg = (_("Can not get share ID by share %s.")
% share_name)
LOG.error(err_msg)
raise exception.InvalidShareAccess(reason=err_msg)
fsid = share['FSID']
fs_info = self.helper._get_fs_info_by_id(fsid)
current_size = int(fs_info['CAPACITY']) / units.Mi / 2
if current_size >= new_size:
err_msg = (_("New size for extend must be bigger than "
"current size on array. (current: %(size)s, "
"new: %(new_size)s).")
% {'size': current_size, 'new_size': new_size})
LOG.error(err_msg)
raise exception.InvalidInput(reason=err_msg)
self.helper._change_share_size(fsid, size)
示例11: _get_capacity
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Mi [as 别名]
def _get_capacity(self, pool_name, result):
"""Get free capacity and total capacity of the pools."""
poolinfo = self.helper._find_pool_info(pool_name, result)
if poolinfo:
total = float(poolinfo['TOTALCAPACITY']) / units.Mi / 2
free = float(poolinfo['CAPACITY']) / units.Mi / 2
consumed = float(poolinfo['CONSUMEDCAPACITY']) / units.Mi / 2
poolinfo['TOTALCAPACITY'] = total
poolinfo['CAPACITY'] = free
poolinfo['CONSUMEDCAPACITY'] = consumed
poolinfo['PROVISIONEDCAPACITYGB'] = round(
float(total) - float(free), 2)
return poolinfo
示例12: _init_filesys_para
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Mi [as 别名]
def _init_filesys_para(self, share, poolinfo, extra_specs):
"""Init basic filesystem parameters."""
name = share['name']
size = int(share['size']) * units.Mi * 2
fileparam = {
"NAME": name.replace("-", "_"),
"DESCRIPTION": "",
"ALLOCTYPE": extra_specs['LUNType'],
"CAPACITY": size,
"PARENTID": poolinfo['ID'],
"INITIALALLOCCAPACITY": units.Ki * 20,
"PARENTTYPE": 216,
"SNAPSHOTRESERVEPER": 20,
"INITIALDISTRIBUTEPOLICY": 0,
"ISSHOWSNAPDIR": True,
"RECYCLESWITCH": 0,
"RECYCLEHOLDTIME": 15,
"RECYCLETHRESHOLD": 0,
"RECYCLEAUTOCLEANSWITCH": 0,
"ENABLEDEDUP": extra_specs['dedupe'],
"ENABLECOMPRESSION": extra_specs['compression'],
}
if fileparam['ALLOCTYPE'] == constants.ALLOC_TYPE_THICK_FLAG:
if (extra_specs['dedupe'] or
extra_specs['compression']):
err_msg = _(
'The filesystem type is "Thick",'
' so dedupe or compression cannot be set.')
LOG.error(err_msg)
raise exception.InvalidInput(reason=err_msg)
if extra_specs['sectorsize']:
fileparam['SECTORSIZE'] = extra_specs['sectorsize'] * units.Ki
return fileparam
示例13: test_rotate_log
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Mi [as 别名]
def test_rotate_log(self, path_mock, handler_mock):
rotation_type = 'size'
max_logfile_size_mb = 100
maxBytes = max_logfile_size_mb * units.Mi
backup_count = 2
self.config(log_rotation_type=rotation_type,
max_logfile_size_mb=max_logfile_size_mb,
max_logfile_count=backup_count)
log._setup_logging_from_conf(self.CONF, 'test', 'test')
handler_mock.assert_called_once_with(path_mock.return_value,
maxBytes=maxBytes,
backupCount=backup_count)
self.assertEqual(self.log_handlers[0], handler_mock.return_value)
示例14: _generate_random_telemetry
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Mi [as 别名]
def _generate_random_telemetry(self):
processor_time = random.randint(0, 100)
mem_total_bytes = 4*units.Gi
mem_avail_bytes = random.randint(1*units.Gi, 4*units.Gi)
mem_page_reads = random.randint(0, 2000)
mem_page_writes = random.randint(0, 2000)
disk_read_bytes = random.randint(0*units.Mi, 200*units.Mi)
disk_write_bytes = random.randint(0*units.Mi, 200*units.Mi)
net_bytes_received = random.randint(0*units.Mi, 20*units.Mi)
net_bytes_sent = random.randint(0*units.Mi, 10*units.Mi)
return jsonutils.dumps([
processor_time, mem_total_bytes, mem_avail_bytes,
mem_page_reads, mem_page_writes, disk_read_bytes,
disk_write_bytes, net_bytes_received, net_bytes_sent])
示例15: _copy_volume
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Mi [as 别名]
def _copy_volume(volume, disk_image_reader, backup_writer, event_manager):
disk_id = volume["disk_id"]
# for now we assume it is a local file
path = volume["disk_image_uri"]
skip_zeroes = volume.get("zeroed", False)
with backup_writer.open("", disk_id) as writer:
with disk_image_reader.open(path) as reader:
disk_size = reader.disk_size
perc_step = event_manager.add_percentage_step(
disk_size,
message_format="Disk copy progress for %s: "
"{:.0f}%%" % disk_id)
offset = 0
max_block_size = 10 * units.Mi # 10 MB
while offset < disk_size:
allocated, zero_block, block_size = reader.get_block_status(
offset, max_block_size)
if not allocated or zero_block and skip_zeroes:
if not allocated:
LOG.debug("Unallocated block detected: %s", block_size)
else:
LOG.debug("Skipping zero block: %s", block_size)
offset += block_size
writer.seek(offset)
else:
buf = reader.read(offset, block_size)
writer.write(buf)
offset += len(buf)
buf = None
gc.collect()
event_manager.set_percentage_step(
perc_step, offset)