本文整理汇总了Python中oslo_utils.units.Gi方法的典型用法代码示例。如果您正苦于以下问题:Python units.Gi方法的具体用法?Python units.Gi怎么用?Python units.Gi使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类oslo_utils.units
的用法示例。
在下文中一共展示了units.Gi方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _check_and_update_disks
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Gi [as 别名]
def _check_and_update_disks(self, context, instance, vm_gen, image_meta,
block_device_info, resize_instance=False):
self._block_dev_man.validate_and_update_bdi(instance, image_meta,
vm_gen, block_device_info)
root_device = block_device_info['root_disk']
if root_device['type'] == constants.DISK:
root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name)
root_device['path'] = root_vhd_path
if not root_vhd_path:
base_vhd_path = self._pathutils.get_instance_dir(instance.name)
raise exception.DiskNotFound(location=base_vhd_path)
root_vhd_info = self._vhdutils.get_vhd_info(root_vhd_path)
src_base_disk_path = root_vhd_info.get("ParentPath")
if src_base_disk_path:
self._check_base_disk(context, instance, root_vhd_path,
src_base_disk_path)
if resize_instance:
new_size = instance.flavor.root_gb * units.Gi
self._check_resize_vhd(root_vhd_path, root_vhd_info, new_size)
示例2: test_resize_and_cache_vhd_smaller
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Gi [as 别名]
def test_resize_and_cache_vhd_smaller(self, mock_get_vhd_size_gb):
self.imagecache._vhdutils.get_vhd_size.return_value = {
'VirtualSize': (self.FAKE_VHD_SIZE_GB + 1) * units.Gi
}
mock_get_vhd_size_gb.return_value = self.FAKE_VHD_SIZE_GB
mock_internal_vhd_size = (
self.imagecache._vhdutils.get_internal_vhd_size_by_file_size)
mock_internal_vhd_size.return_value = self.FAKE_VHD_SIZE_GB * units.Gi
self.assertRaises(exception.FlavorDiskSmallerThanImage,
self.imagecache._resize_and_cache_vhd,
mock.sentinel.instance,
mock.sentinel.vhd_path)
self.imagecache._vhdutils.get_vhd_size.assert_called_once_with(
mock.sentinel.vhd_path)
mock_get_vhd_size_gb.assert_called_once_with(mock.sentinel.instance)
mock_internal_vhd_size.assert_called_once_with(
mock.sentinel.vhd_path, self.FAKE_VHD_SIZE_GB * units.Gi)
示例3: test_cache_rescue_image_bigger_than_flavor
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Gi [as 别名]
def test_cache_rescue_image_bigger_than_flavor(self):
fake_rescue_image_id = 'fake_rescue_image_id'
self.imagecache._vhdutils.get_vhd_info.return_value = {
'VirtualSize': (self.instance.flavor.root_gb + 1) * units.Gi}
(expected_path,
expected_vhd_path) = self._prepare_get_cached_image(
rescue_image_id=fake_rescue_image_id)
self.assertRaises(exception.ImageUnacceptable,
self.imagecache.get_cached_image,
self.context, self.instance,
fake_rescue_image_id)
self._mock_fetch.assert_called_once_with(
self.context, fake_rescue_image_id, expected_path,
self.instance.trusted_certs)
self.imagecache._vhdutils.get_vhd_info.assert_called_once_with(
expected_vhd_path)
示例4: test_cinder_get_size
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Gi [as 别名]
def test_cinder_get_size(self):
fake_client = FakeObject(auth_token=None, management_url=None)
fake_volume_uuid = str(uuid.uuid4())
fake_volume = FakeObject(size=5, metadata={})
fake_volumes = {fake_volume_uuid: fake_volume}
with mock.patch.object(cinder.Store, 'get_cinderclient') as mocked_cc:
mocked_cc.return_value = FakeObject(client=fake_client,
volumes=fake_volumes)
uri = 'cinder://%s' % fake_volume_uuid
loc = location.get_location_from_uri_and_backend(uri,
"cinder1",
conf=self.conf)
image_size = self.store.get_size(loc, context=self.context)
self.assertEqual(fake_volume.size * units.Gi, image_size)
示例5: get_size
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Gi [as 别名]
def get_size(self, location, context=None):
"""
Takes a `glance_store.location.Location` object that indicates
where to find the image file and returns the image size
:param location: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
:raises: `glance_store.exceptions.NotFound` if image does not exist
:rtype: int
"""
loc = location.store_location
try:
self._check_context(context)
volume = self.get_cinderclient(context).volumes.get(loc.volume_id)
return int(volume.metadata.get('image_size',
volume.size * units.Gi))
except cinder_exception.NotFound:
raise exceptions.NotFound(image=loc.volume_id)
except Exception:
LOG.exception(_LE("Failed to get image size due to "
"internal error."))
return 0
示例6: _update_share_stats
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Gi [as 别名]
def _update_share_stats(self):
"""Retrieve stats info from share group."""
(free_capacity_bytes, physical_capacity_bytes,
provisioned_capacity_gb) = self._get_available_capacity()
max_over_subscription_ratio = (
self.configuration.max_over_subscription_ratio)
data = dict(
share_backend_name=self._backend_name,
vendor_name='INFINIDAT',
driver_version=self.VERSION,
storage_protocol='NFS',
total_capacity_gb=float(physical_capacity_bytes) / units.Gi,
free_capacity_gb=float(free_capacity_bytes) / units.Gi,
reserved_percentage=self.configuration.reserved_share_percentage,
thin_provisioning=self.configuration.infinidat_thin_provision,
max_over_subscription_ratio=max_over_subscription_ratio,
provisioned_capacity_gb=provisioned_capacity_gb,
snapshot_support=True,
create_share_from_snapshot_support=True,
mount_snapshot_support=True,
revert_to_snapshot_support=True)
super(InfiniboxShareDriver, self)._update_share_stats(data)
示例7: shrink_share
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Gi [as 别名]
def shrink_share(self, share, new_size, share_server=None):
"""Shrink a share to new_size."""
lcfg = self.configuration
details = self.zfssa.get_share(lcfg.zfssa_pool,
lcfg.zfssa_project,
share['id'])
used_space = details['space_data']
new_size_byte = int(new_size) * units.Gi
if used_space > new_size_byte:
LOG.error('%(used).1fGB of share %(id)s is already used. '
'Cannot shrink to %(newsize)dGB.',
{'used': float(used_space) / units.Gi,
'id': share['id'],
'newsize': new_size})
raise exception.ShareShrinkingPossibleDataLoss(
share_id=share['id'])
arg = self.create_arg(new_size)
self.zfssa.modify_share(lcfg.zfssa_pool, lcfg.zfssa_project,
share['id'], arg)
示例8: extend_share
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Gi [as 别名]
def extend_share(self, share, new_size, share_server=None):
"""Extend a share to new_size."""
lcfg = self.configuration
free_space = self.zfssa.get_project_stats(lcfg.zfssa_pool,
lcfg.zfssa_project)
diff_space = int(new_size - share['size']) * units.Gi
if diff_space > free_space:
msg = (_('There is not enough free space in project %s')
% (lcfg.zfssa_project))
LOG.error(msg)
raise exception.ShareExtendingError(share_id=share['id'],
reason=msg)
arg = self.create_arg(new_size)
self.zfssa.modify_share(lcfg.zfssa_pool, lcfg.zfssa_project,
share['id'], arg)
示例9: _update_share_stats
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Gi [as 别名]
def _update_share_stats(self):
"""Retrieve stats info from share volume group."""
data = dict(
share_backend_name=self.backend_name,
vendor_name='IBM',
storage_protocol='NFS',
reserved_percentage=self.configuration.reserved_share_percentage)
free, capacity = self._get_available_capacity(
self.configuration.gpfs_mount_point_base)
data['total_capacity_gb'] = math.ceil(capacity / units.Gi)
data['free_capacity_gb'] = math.ceil(free / units.Gi)
super(GPFSShareDriver, self)._update_share_stats(data)
示例10: _get_capacities
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Gi [as 别名]
def _get_capacities(self):
result = self.rpc.call('getSystemStatistics', {})
total = float(result['total_physical_capacity'])
used = float(result['total_physical_usage'])
LOG.info('Read capacity of %(cap)s bytes and '
'usage of %(use)s bytes from backend. ',
{'cap': total, 'use': used})
free = total - used
if free < 0:
free = 0 # no space available
free_replicated = free / self._get_qb_replication_factor()
# floor numbers to nine digits (bytes)
total = math.floor((total / units.Gi) * units.G) / units.G
free = math.floor((free_replicated / units.Gi) * units.G) / units.G
return total, free
示例11: manage_existing_snapshot
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Gi [as 别名]
def manage_existing_snapshot(self, snapshot, driver_options):
volume_name = self._get_volume_name(context.get_admin_context(),
snapshot['share'])
snapshot_path = self._get_snapshot_path(snapshot)
try:
snapshot_list = self._maprfs_util.get_snapshot_list(
volume_name=volume_name)
snapshot_name = snapshot['provider_location']
if snapshot_name not in snapshot_list:
msg = _("Snapshot %s not found") % snapshot_name
LOG.error(msg)
raise exception.ManageInvalidShareSnapshot(reason=msg)
size = math.ceil(float(self._maprfs_util.maprfs_du(
snapshot_path)) / units.Gi)
return {'size': size}
except exception.ProcessExecutionError:
msg = _("Manage existing share snapshot failed.")
LOG.exception(msg)
raise exception.MapRFSException(msg=msg)
示例12: _update_share_stats
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Gi [as 别名]
def _update_share_stats(self):
"""Retrieves stats info of share directories group."""
try:
total, free = self._maprfs_util.fs_capacity()
except exception.ProcessExecutionError:
msg = _('Failed to check MapRFS capacity info.')
LOG.exception(msg)
raise exception.MapRFSException(msg=msg)
total_capacity_gb = int(math.ceil(float(total) / units.Gi))
free_capacity_gb = int(math.floor(float(free) / units.Gi))
data = {
'share_backend_name': self.backend_name,
'storage_protocol': 'MAPRFS',
'driver_handles_share_servers': self.driver_handles_share_servers,
'vendor_name': 'MapR Technologies',
'driver_version': '1.0',
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'snapshot_support': True,
'create_share_from_snapshot_support': True,
}
super(MapRFSNativeShareDriver, self)._update_share_stats(data)
示例13: create_share
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Gi [as 别名]
def create_share(self, context, share, share_server):
"""Is called to create share."""
if share['share_proto'] == 'NFS':
location = self._create_nfs_share(share)
elif share['share_proto'] == 'CIFS':
location = self._create_cifs_share(share)
else:
message = (_('Unsupported share protocol: %(proto)s.') %
{'proto': share['share_proto']})
LOG.error(message)
raise exception.InvalidShare(reason=message)
# apply directory quota based on share size
max_share_size = share['size'] * units.Gi
self._isilon_api.quota_create(
self._get_container_path(share), 'directory', max_share_size)
return location
示例14: extend_share
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Gi [as 别名]
def extend_share(self, share, new_size, share_server=None):
LOG.debug("Extending share in HSP: %(shr_id)s.",
{'shr_id': share['id']})
old_size = share['size']
hsp_cluster = self.hsp.get_cluster()
free_space = hsp_cluster['properties']['total-storage-available']
free_space = free_space / units.Gi
if (new_size - old_size) < free_space:
filesystem_id = self.hsp.get_file_system(share['id'])['id']
self.hsp.resize_file_system(filesystem_id, new_size * units.Gi)
else:
msg = (_("Share %s cannot be extended due to insufficient space.")
% share['id'])
raise exception.HSPBackendException(msg=msg)
LOG.info("Share %(shr_id)s successfully extended to "
"%(shr_size)sG.",
{'shr_id': share['id'],
'shr_size': new_size})
示例15: shrink_share
# 需要导入模块: from oslo_utils import units [as 别名]
# 或者: from oslo_utils.units import Gi [as 别名]
def shrink_share(self, share, new_size, share_server=None):
LOG.debug("Shrinking share in HSP: %(shr_id)s.",
{'shr_id': share['id']})
file_system = self.hsp.get_file_system(share['id'])
usage = file_system['properties']['used-capacity'] / units.Gi
LOG.debug("Usage for share %(shr_id)s in HSP: %(usage)sG.",
{'shr_id': share['id'], 'usage': usage})
if new_size > usage:
self.hsp.resize_file_system(file_system['id'], new_size * units.Gi)
else:
raise exception.ShareShrinkingPossibleDataLoss(
share_id=share['id'])
LOG.info("Share %(shr_id)s successfully shrunk to "
"%(shr_size)sG.",
{'shr_id': share['id'],
'shr_size': new_size})