本文整理汇总了Python中marvin.lib.base.StoragePool类的典型用法代码示例。如果您正苦于以下问题:Python StoragePool类的具体用法?Python StoragePool怎么用?Python StoragePool使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了StoragePool类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: tearDown
def tearDown(self):
try:
for storagePool in self.pools:
StoragePool.update(self.apiclient, id=storagePool.id, tags="")
if hasattr(self, "data_volume_created"):
data_volumes_list = Volume.list(
self.userapiclient,
id=self.data_volume_created.id,
virtualmachineid=self.vm.id
)
if data_volumes_list:
self.vm.detach_volume(
self.userapiclient,
data_volumes_list[0]
)
status = validateList(data_volumes_list)
self.assertEqual(
status[0],
PASS,
"DATA Volume List Validation Failed")
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
示例2: test13_update_primary_storage_capacityIops_to_zero
def test13_update_primary_storage_capacityIops_to_zero(self):
updatedIops = 0
StoragePool.update(self.apiClient,
id=self.primary_storage_id,
capacityiops=updatedIops,
tags=self.primary_tag)
# Verify in cloudsatck
storage_pools_response = list_storage_pools(
self.apiClient, clusterid=self.cluster.id)
for data in storage_pools_response:
if data.id == self.primary_storage_id:
storage_pool = data
self.assertEqual(
storage_pool.capacityiops, updatedIops,
"Primary storage capacityiops not updated")
# Verify in datera
datera_primary_storage_name = "cloudstack-" + self.primary_storage_id
for instance in self.datera_api.app_instances.list():
if instance['name'] == datera_primary_storage_name:
datera_instance = instance
app_instance_response_iops = (
datera_instance['storage_instances']
['storage-1']['volumes']['volume-1']['performance_policy']
['total_iops_max'])
self.assertEqual(
app_instance_response_iops, updatedIops,
"app-instance capacityiops not updated")
StoragePool.delete(self.primary_storage, self.apiClient)
self.cleanup = []
示例3: tearDownClass
def tearDownClass(cls):
try:
# Cleanup resources used
if cls.updateclone:
Configurations.update(cls.api_client,
"vmware.create.full.clone",
value="false",storageid=cls.storageID)
Configurations.update(cls.api_client,
"vmware.create.full.clone",
value="false")
Configurations.update(cls.api_client,
"vmware.root.disk.controller",
value=cls.defaultdiskcontroller)
StoragePool.update(cls.api_client, id=cls.storageID,
tags="")
cls.restartServer()
#Giving 30 seconds to management to warm-up,
#Experienced failures when trying to deploy a VM exactly when management came up
time.sleep(30)
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
示例4: test_validateState_succeeds_at_retry_limit
def test_validateState_succeeds_at_retry_limit(self):
retries = 3
timeout = 3
api_client = MockApiClient(retries, 'initial state', 'final state')
storage_pool = StoragePool({'id': 'snapshot_id'})
state = storage_pool.validateState(api_client, 'final state', timeout=timeout, interval=1)
self.assertEqual(state, [PASS, None])
self.assertEqual(retries, api_client.retry_counter)
示例5: test_validateState_fails_after_retry_limit
def test_validateState_fails_after_retry_limit(self):
retries = 3
timeout = 2
api_client = MockApiClient(retries, 'initial state', 'final state')
storage_pool = StoragePool({'id': 'snapshot_id'})
state = storage_pool.validateState(api_client, 'final state', timeout=timeout, interval=1)
self.assertEqual(state, [FAIL, 'StoragePool state not transited to final state, operation timed out'])
self.assertEqual(retries, api_client.retry_counter)
示例6: tearDown
def tearDown(self):
try:
for storagePool in self.pools:
StoragePool.update(self.apiclient, id=storagePool.id, tags="")
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
示例7: test_add_remove_host_with_solidfire_plugin_3
def test_add_remove_host_with_solidfire_plugin_3(self):
if TestData.hypervisor_type != TestData.xenServer:
return
primarystorage = self.testdata[TestData.primaryStorage]
primary_storage = StoragePool.create(
self.apiClient,
primarystorage,
scope=primarystorage[TestData.scope],
zoneid=self.zone.id,
provider=primarystorage[TestData.provider],
tags=primarystorage[TestData.tags],
capacityiops=primarystorage[TestData.capacityIops],
capacitybytes=primarystorage[TestData.capacityBytes],
hypervisor=primarystorage[TestData.hypervisor]
)
self.cleanup.append(primary_storage)
self.virtual_machine = VirtualMachine.create(
self.apiClient,
self.testdata[TestData.virtualMachine],
accountid=self.account.name,
zoneid=self.zone.id,
serviceofferingid=self.compute_offering.id,
templateid=self.template.id,
domainid=self.domain.id,
startvm=True
)
root_volume = self._get_root_volume(self.virtual_machine)
sf_iscsi_name = sf_util.get_iqn(self.cs_api, root_volume, self)
primarystorage2 = self.testdata[TestData.primaryStorage2]
primary_storage_2 = StoragePool.create(
self.apiClient,
primarystorage2,
scope=primarystorage2[TestData.scope],
zoneid=self.zone.id,
clusterid=self.cluster.id,
provider=primarystorage2[TestData.provider],
tags=primarystorage2[TestData.tags],
capacityiops=primarystorage2[TestData.capacityIops],
capacitybytes=primarystorage2[TestData.capacityBytes],
hypervisor=primarystorage2[TestData.hypervisor]
)
self.cleanup.append(primary_storage_2)
self._perform_add_remove_xenserver_host(primary_storage.id, sf_iscsi_name)
示例8: setUpClass
def setUpClass(cls):
testClient = super(TestAttachDataDiskOnCWPS, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.testdata = testClient.getParsedTestDataConfig()
cls.hypervisor = cls.testClient.getHypervisorInfo()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls._cleanup = []
cls.template = get_template(
cls.apiclient,
cls.zone.id,
cls.testdata["ostype"])
cls.skiptest = False
try:
cls.pools = StoragePool.list(
cls.apiclient,
zoneid=cls.zone.id,
scope="CLUSTER")
except Exception as e:
cls.skiptest = True
return
try:
# Create an account
cls.account = Account.create(
cls.apiclient,
cls.testdata["account"],
domainid=cls.domain.id
)
cls._cleanup.append(cls.account)
# Create user api client of the account
cls.userapiclient = testClient.getUserApiClient(
UserName=cls.account.name,
DomainName=cls.account.domain
)
# Create Service offering
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.testdata["service_offering"],
)
cls._cleanup.append(cls.service_offering)
# Create Disk offering
cls.disk_offering = DiskOffering.create(
cls.apiclient,
cls.testdata["disk_offering"],
custom=True,
tags=CLUSTERTAG1,
)
cls._cleanup.append(cls.disk_offering)
except Exception as e:
cls.tearDownClass()
raise e
return
示例9: test09_add_vm_with_datera_storage
def test09_add_vm_with_datera_storage(self):
primarystorage = self.testdata[TestData.primaryStorage]
primary_storage = StoragePool.create(
self.apiClient,
primarystorage,
scope=primarystorage[TestData.scope],
zoneid=self.zone.id,
clusterid=self.cluster.id,
provider=primarystorage[TestData.provider],
tags=primarystorage[TestData.tags],
capacityiops=primarystorage[TestData.capacityIops],
capacitybytes=primarystorage[TestData.capacityBytes],
hypervisor=primarystorage[TestData.hypervisor]
)
primary_storage_url = primarystorage[TestData.url]
self._verify_attributes(
primary_storage.id, primary_storage_url)
self.cleanup.append(primary_storage)
self.virtual_machine = VirtualMachine.create(
self.apiClient,
self.testdata[TestData.virtualMachine],
accountid=self.account.name,
zoneid=self.zone.id,
serviceofferingid=self.compute_offering.id,
templateid=self.template.id,
domainid=self.domain.id,
startvm=True
)
self._validate_storage(primary_storage, self.virtual_machine)
示例10: test12_primary_storage_with_zero_iops
def test12_primary_storage_with_zero_iops(self):
primarystorage5 = self.testdata[TestData.primaryStorage5]
primary_storage5 = StoragePool.create(
self.apiClient,
primarystorage5,
scope=primarystorage5[TestData.scope],
zoneid=self.zone.id,
clusterid=self.cluster.id,
provider=primarystorage5[TestData.provider],
tags=primarystorage5[TestData.tags],
capacityiops=primarystorage5[TestData.capacityIops],
capacitybytes=primarystorage5[TestData.capacityBytes],
hypervisor=primarystorage5[TestData.hypervisor]
)
self.cleanup.append(primary_storage5)
primary_storage_name = "cloudstack-" + primary_storage5.id
self.assertEqual(
any(primary_storage_name == app_instance['name']
for app_instance in self.datera_api.app_instances.list()),
True, "app instance not created")
primary_storage_url = primarystorage5[TestData.url]
self._verify_attributes(
primary_storage5.id, primary_storage_url)
示例11: test10_add_vm_with_datera_storage_and_volume
def test10_add_vm_with_datera_storage_and_volume(self):
primarystorage = self.testdata[TestData.primaryStorage]
primary_storage = StoragePool.create(
self.apiClient,
primarystorage,
scope=primarystorage[TestData.scope],
zoneid=self.zone.id,
clusterid=self.cluster.id,
provider=primarystorage[TestData.provider],
tags=primarystorage[TestData.tags],
capacityiops=primarystorage[TestData.capacityIops],
capacitybytes=primarystorage[TestData.capacityBytes],
hypervisor=primarystorage[TestData.hypervisor]
)
primary_storage_url = primarystorage[TestData.url]
self._verify_attributes(
primary_storage.id, primary_storage_url)
self.cleanup.append(primary_storage)
self.virtual_machine = VirtualMachine.create(
self.apiClient,
self.testdata[TestData.virtualMachine],
accountid=self.account.name,
zoneid=self.zone.id,
serviceofferingid=self.compute_offering.id,
templateid=self.template.id,
domainid=self.domain.id,
startvm=True
)
self._validate_storage(primary_storage, self.virtual_machine)
volume = Volume.create(
self.apiClient,
self.testdata[TestData.volume_1],
account=self.account.name,
domainid=self.domain.id,
zoneid=self.zone.id,
diskofferingid=self.disk_offering.id
)
virtual_machine.attach_volume(
self.apiClient,
volume
)
storage_pools_response = list_storage_pools(
self.apiClient, id=primary_storage.id)
for key, value in self.xen_session.xenapi.SR.get_all_records().items():
if value['name_description'] == primary_storage.id:
xen_server_response = value
self.assertNotEqual(
int(storage_pools_response[0].disksizeused),
int(xen_server_response['physical_utilisation']))
示例12: test07_update_primary_storage_capacityBytes
def test07_update_primary_storage_capacityBytes(self):
updatedDiskSize = self.testdata[TestData.newCapacityBytes]
StoragePool.update(self.apiClient,
id=self.primary_storage_id,
capacitybytes=updatedDiskSize,
tags=self.primary_tag)
# Verify in cloudsatck
storage_pools_response = list_storage_pools(
self.apiClient, clusterid=self.cluster.id)
for data in storage_pools_response:
if data.id == self.primary_storage_id:
storage_pool = data
self.assertEqual(
storage_pool.disksizetotal, updatedDiskSize,
"Primary storage not updated")
# Verify in datera
datera_primary_storage_name = "cloudstack-" + self.primary_storage_id
for instance in self.datera_api.app_instances.list():
if instance['name'] == datera_primary_storage_name:
datera_instance = instance
app_instance_response_disk_size = (
datera_instance['storage_instances']
['storage-1']['volumes']['volume-1']['size'] * 1073741824)
self.assertEqual(
app_instance_response_disk_size, updatedDiskSize,
"app-instance not updated")
# Verify in xenserver
#for key, value in self.xen_session.xenapi.SR.get_all_records().items():
# if value['name_description'] == self.primary_storage_id:
# xen_sr = value
#Uncomment after xen fix
#print xen_sr
#print xen_sr['physical_size'], updatedDiskSize
#self.assertEqual(
# int(xen_sr['physical_size']) + 12582912, updatedDiskSize,
# "Xen server physical storage not updated")
StoragePool.delete(self.primary_storage, self.apiClient)
self.cleanup = []
示例13: setUpClass
def setUpClass(cls):
testClient = super(TestVolumes, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
cls._cleanup = []
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.services["mode"] = cls.zone.networktype
cls.hypervisor = testClient.getHypervisorInfo()
cls.invalidStoragePoolType = False
cls.disk_offering = DiskOffering.create(cls.apiclient, cls.services["disk_offering"])
cls.resized_disk_offering = DiskOffering.create(cls.apiclient, cls.services["resized_disk_offering"])
cls.custom_resized_disk_offering = DiskOffering.create(
cls.apiclient, cls.services["resized_disk_offering"], custom=True
)
template = get_template(cls.apiclient, cls.zone.id, cls.services["ostype"])
if template == FAILED:
assert False, "get_template() failed to return template with description %s" % cls.services["ostype"]
cls.services["domainid"] = cls.domain.id
cls.services["zoneid"] = cls.zone.id
cls.services["template"] = template.id
cls.services["diskofferingid"] = cls.disk_offering.id
cls.services["resizeddiskofferingid"] = cls.resized_disk_offering.id
cls.services["customresizeddiskofferingid"] = cls.custom_resized_disk_offering.id
# Create VMs, VMs etc
cls.account = Account.create(cls.apiclient, cls.services["account"], domainid=cls.domain.id)
cls.service_offering = ServiceOffering.create(cls.apiclient, cls.services["service_offerings"]["tiny"])
cls.virtual_machine = VirtualMachine.create(
cls.apiclient,
cls.services,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id,
mode=cls.services["mode"],
)
pools = StoragePool.list(cls.apiclient)
# cls.assertEqual(
# validateList(pools)[0],
# PASS,
# "storage pool list validation failed")
cls.volume = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid)
cls._cleanup = [
cls.resized_disk_offering,
cls.custom_resized_disk_offering,
cls.service_offering,
cls.disk_offering,
cls.volume,
cls.account,
]
示例14: test_01_migrateVolume
def test_01_migrateVolume(self):
"""
@Desc:Volume is not retaining same uuid when migrating from one
storage to another.
Step1:Create a volume/data disk
Step2:Verify UUID of the volume
Step3:Migrate the volume to another primary storage within
the cluster
Step4:Migrating volume to new primary storage should succeed
Step5:volume UUID should not change even after migration
"""
vol = Volume.create(
self.apiclient,
self.services["volume"],
diskofferingid=self.disk_offering.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
)
self.assertIsNotNone(vol, "Failed to create volume")
vol_res = Volume.list(self.apiclient, id=vol.id)
self.assertEqual(validateList(vol_res)[0], PASS, "Invalid response returned for list volumes")
vol_uuid = vol_res[0].id
try:
self.virtual_machine.attach_volume(self.apiclient, vol)
except Exception as e:
self.fail("Attaching data disk to vm failed with error %s" % e)
pools = StoragePool.listForMigration(self.apiclient, id=vol.id)
if not pools:
self.skipTest(
"No suitable storage pools found for volume migration.\
Skipping"
)
self.assertEqual(validateList(pools)[0], PASS, "invalid pool response from findStoragePoolsForMigration")
pool = pools[0]
self.debug("Migrating Volume-ID: %s to Pool: %s" % (vol.id, pool.id))
try:
Volume.migrate(self.apiclient, volumeid=vol.id, storageid=pool.id, livemigrate="true")
except Exception as e:
self.fail("Volume migration failed with error %s" % e)
migrated_vols = Volume.list(
self.apiclient, virtualmachineid=self.virtual_machine.id, listall="true", type="DATADISK"
)
self.assertEqual(validateList(migrated_vols)[0], PASS, "invalid volumes response after migration")
migrated_vol_uuid = migrated_vols[0].id
self.assertEqual(
vol_uuid,
migrated_vol_uuid,
"Volume is not retaining same uuid when migrating from one\
storage to another",
)
self.virtual_machine.detach_volume(self.apiclient, vol)
self.cleanup.append(vol)
return
示例15: test06_primary_storage_cancel_maintenance_mode
def test06_primary_storage_cancel_maintenance_mode(self):
StoragePool.enableMaintenance(self.apiClient,
id=self.primary_storage_id)
StoragePool.cancelMaintenance(self.apiClient,
id=self.primary_storage_id)
# Verify in cloudsatck
storage_pools_response = list_storage_pools(
self.apiClient, clusterid=self.cluster.id)
for storage in storage_pools_response:
if storage.id == self.primary_storage_id:
storage_pool = storage
self.assertEqual(
storage_pool.state, "Up",
"Primary storage not in up mode")
# Verify in datera
datera_primary_storage_name = "cloudstack-" + self.primary_storage_id
for instance in self.datera_api.app_instances.list():
if instance['name'] == datera_primary_storage_name:
datera_instance = instance
self.assertEqual(
datera_instance["admin_state"], "online",
"app-instance not in online mode")
# Verify in xenserver
for key, value in self.xen_session.xenapi.SR.get_all_records().items():
if value['name_description'] == self.primary_storage_id:
xen_sr = value
self.assertEqual(
set(["forget", "destroy"]).issubset(xen_sr["allowed_operations"]),
False, "Xenserver SR in offline mode")
StoragePool.delete(self.primary_storage, self.apiClient)
self.cleanup = []