本文整理汇总了Python中marvin.lib.base.StoragePool.update方法的典型用法代码示例。如果您正苦于以下问题:Python StoragePool.update方法的具体用法?Python StoragePool.update怎么用?Python StoragePool.update使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类marvin.lib.base.StoragePool
的用法示例。
在下文中一共展示了StoragePool.update方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: tearDown
# 需要导入模块: from marvin.lib.base import StoragePool [as 别名]
# 或者: from marvin.lib.base.StoragePool import update [as 别名]
def tearDown(self):
try:
for storagePool in self.pools:
StoragePool.update(self.apiclient, id=storagePool.id, tags="")
if hasattr(self, "data_volume_created"):
data_volumes_list = Volume.list(
self.userapiclient,
id=self.data_volume_created.id,
virtualmachineid=self.vm.id
)
if data_volumes_list:
self.vm.detach_volume(
self.userapiclient,
data_volumes_list[0]
)
status = validateList(data_volumes_list)
self.assertEqual(
status[0],
PASS,
"DATA Volume List Validation Failed")
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
示例2: test13_update_primary_storage_capacityIops_to_zero
# 需要导入模块: from marvin.lib.base import StoragePool [as 别名]
# 或者: from marvin.lib.base.StoragePool import update [as 别名]
def test13_update_primary_storage_capacityIops_to_zero(self):
updatedIops = 0
StoragePool.update(self.apiClient,
id=self.primary_storage_id,
capacityiops=updatedIops,
tags=self.primary_tag)
# Verify in cloudsatck
storage_pools_response = list_storage_pools(
self.apiClient, clusterid=self.cluster.id)
for data in storage_pools_response:
if data.id == self.primary_storage_id:
storage_pool = data
self.assertEqual(
storage_pool.capacityiops, updatedIops,
"Primary storage capacityiops not updated")
# Verify in datera
datera_primary_storage_name = "cloudstack-" + self.primary_storage_id
for instance in self.datera_api.app_instances.list():
if instance['name'] == datera_primary_storage_name:
datera_instance = instance
app_instance_response_iops = (
datera_instance['storage_instances']
['storage-1']['volumes']['volume-1']['performance_policy']
['total_iops_max'])
self.assertEqual(
app_instance_response_iops, updatedIops,
"app-instance capacityiops not updated")
StoragePool.delete(self.primary_storage, self.apiClient)
self.cleanup = []
示例3: tearDownClass
# 需要导入模块: from marvin.lib.base import StoragePool [as 别名]
# 或者: from marvin.lib.base.StoragePool import update [as 别名]
def tearDownClass(cls):
try:
# Cleanup resources used
if cls.updateclone:
Configurations.update(cls.api_client,
"vmware.create.full.clone",
value="false",storageid=cls.storageID)
Configurations.update(cls.api_client,
"vmware.create.full.clone",
value="false")
Configurations.update(cls.api_client,
"vmware.root.disk.controller",
value=cls.defaultdiskcontroller)
StoragePool.update(cls.api_client, id=cls.storageID,
tags="")
cls.restartServer()
#Giving 30 seconds to management to warm-up,
#Experienced failures when trying to deploy a VM exactly when management came up
time.sleep(30)
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
示例4: tearDown
# 需要导入模块: from marvin.lib.base import StoragePool [as 别名]
# 或者: from marvin.lib.base.StoragePool import update [as 别名]
def tearDown(self):
try:
for storagePool in self.pools:
StoragePool.update(self.apiclient, id=storagePool.id, tags="")
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
示例5: test07_update_primary_storage_capacityBytes
# 需要导入模块: from marvin.lib.base import StoragePool [as 别名]
# 或者: from marvin.lib.base.StoragePool import update [as 别名]
def test07_update_primary_storage_capacityBytes(self):
updatedDiskSize = self.testdata[TestData.newCapacityBytes]
StoragePool.update(self.apiClient,
id=self.primary_storage_id,
capacitybytes=updatedDiskSize,
tags=self.primary_tag)
# Verify in cloudsatck
storage_pools_response = list_storage_pools(
self.apiClient, clusterid=self.cluster.id)
for data in storage_pools_response:
if data.id == self.primary_storage_id:
storage_pool = data
self.assertEqual(
storage_pool.disksizetotal, updatedDiskSize,
"Primary storage not updated")
# Verify in datera
datera_primary_storage_name = "cloudstack-" + self.primary_storage_id
for instance in self.datera_api.app_instances.list():
if instance['name'] == datera_primary_storage_name:
datera_instance = instance
app_instance_response_disk_size = (
datera_instance['storage_instances']
['storage-1']['volumes']['volume-1']['size'] * 1073741824)
self.assertEqual(
app_instance_response_disk_size, updatedDiskSize,
"app-instance not updated")
# Verify in xenserver
#for key, value in self.xen_session.xenapi.SR.get_all_records().items():
# if value['name_description'] == self.primary_storage_id:
# xen_sr = value
#Uncomment after xen fix
#print xen_sr
#print xen_sr['physical_size'], updatedDiskSize
#self.assertEqual(
# int(xen_sr['physical_size']) + 12582912, updatedDiskSize,
# "Xen server physical storage not updated")
StoragePool.delete(self.primary_storage, self.apiClient)
self.cleanup = []
示例6: tearDownClass
# 需要导入模块: from marvin.lib.base import StoragePool [as 别名]
# 或者: from marvin.lib.base.StoragePool import update [as 别名]
def tearDownClass(cls):
try:
# Cleanup resources used
if cls.updateclone:
Configurations.update(cls.api_client,
"vmware.root.disk.controller",
value=cls.defaultdiskcontroller)
Configurations.update(cls.api_client,
"vmware.create.full.clone",
value="false")
Configurations.update(cls.api_client,
"vmware.create.full.clone",
value="false", storageid=cls.storageID)
if cls.storageID:
StoragePool.update(cls.api_client, id=cls.storageID,
tags="")
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
示例7: test_01_recover_VM
# 需要导入模块: from marvin.lib.base import StoragePool [as 别名]
# 或者: from marvin.lib.base.StoragePool import update [as 别名]
def test_01_recover_VM(self):
""" Test Restore VM on VMWare
1. Deploy a VM without datadisk
2. Restore the VM
3. Verify that VM comes up in Running state
"""
try:
self.pools = StoragePool.list(
self.apiclient,
zoneid=self.zone.id,
scope="CLUSTER")
status = validateList(self.pools)
# Step 3
self.assertEqual(
status[0],
PASS,
"Check: Failed to list cluster wide storage pools")
if len(self.pools) < 2:
self.skipTest("There must be at atleast two cluster wide\
storage pools available in the setup")
except Exception as e:
self.skipTest(e)
# Adding tags to Storage Pools
cluster_no = 1
StoragePool.update(
self.apiclient,
id=self.pools[0].id,
tags=[CLUSTERTAG1[:-1] + repr(cluster_no)])
self.vm = VirtualMachine.create(
self.apiclient,
self.testdata["small"],
accountid=self.account.name,
templateid=self.template.id,
domainid=self.account.domainid,
serviceofferingid=self.service_offering_cwps.id,
zoneid=self.zone.id,
)
# Step 2
volumes_root_list = list_volumes(
self.apiclient,
virtualmachineid=self.vm.id,
type=ROOT,
listall=True
)
root_volume = volumes_root_list[0]
# Restore VM till its ROOT disk is recreated on onother Primary Storage
while True:
self.vm.restore(self.apiclient)
volumes_root_list = list_volumes(
self.apiclient,
virtualmachineid=self.vm.id,
type=ROOT,
listall=True
)
root_volume = volumes_root_list[0]
if root_volume.storage != self.pools[0].name:
break
# Step 3
vm_list = list_virtual_machines(
self.apiclient,
id=self.vm.id)
state = vm_list[0].state
i = 0
while(state != "Running"):
vm_list = list_virtual_machines(
self.apiclient,
id=self.vm.id)
time.sleep(10)
i = i + 1
state = vm_list[0].state
if i >= 10:
self.fail("Restore VM Failed")
break
return
示例8: test_01_attach_datadisk_to_vm_on_zwps
# 需要导入模块: from marvin.lib.base import StoragePool [as 别名]
# 或者: from marvin.lib.base.StoragePool import update [as 别名]
def test_01_attach_datadisk_to_vm_on_zwps(self):
""" Attach Data Disk To VM on ZWPS
1. Check if zwps storage pool exists.
2. Adding tag to zone wide primary storage
3. Launch a VM on ZWPS
4. Attach data disk to vm which is on zwps.
5. Verify disk is attached.
"""
# Step 1
if len(list(storagePool for storagePool in self.pools
if storagePool.scope == "ZONE")) < 1:
self.skipTest("There must be at least one zone wide \
storage pools available in the setup")
# Adding tags to Storage Pools
zone_no = 1
for storagePool in self.pools:
if storagePool.scope == "ZONE":
StoragePool.update(
self.apiclient,
id=storagePool.id,
tags=[ZONETAG1[:-1] + repr(zone_no)])
zone_no += 1
self.vm = VirtualMachine.create(
self.apiclient,
self.testdata["small"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering_zone1.id,
zoneid=self.zone.id
)
self.data_volume_created = Volume.create(
self.userapiclient,
self.testdata["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id
)
self.cleanup.append(self.data_volume_created)
# Step 2
self.vm.attach_volume(
self.userapiclient,
self.data_volume_created
)
data_volumes_list = Volume.list(
self.userapiclient,
id=self.data_volume_created.id,
virtualmachineid=self.vm.id
)
data_volume = data_volumes_list[0]
status = validateList(data_volume)
# Step 3
self.assertEqual(
status[0],
PASS,
"Check: Data if Disk is attached to VM")
return
示例9: setUpClass
# 需要导入模块: from marvin.lib.base import StoragePool [as 别名]
# 或者: from marvin.lib.base.StoragePool import update [as 别名]
def setUpClass(cls):
cls.testClient = super(TestResizeVolume, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.hypervisor = (cls.testClient.getHypervisorInfo()).lower()
cls.storageID = None
# Fill services from the external config file
cls.services = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(
cls.api_client,
cls.testClient.getZoneForTests())
cls.services["mode"] = cls.zone.networktype
cls._cleanup = []
cls.unsupportedStorageType = False
cls.unsupportedHypervisorType = False
cls.updateclone = False
if cls.hypervisor not in ['xenserver',"kvm","vmware"]:
cls.unsupportedHypervisorType=True
return
cls.template = get_template(
cls.api_client,
cls.zone.id
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.services["volume"]["zoneid"] = cls.zone.id
try:
cls.parent_domain = Domain.create(cls.api_client,
services=cls.services[
"domain"],
parentdomainid=cls.domain.id)
cls.parentd_admin = Account.create(cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.parent_domain.id)
cls._cleanup.append(cls.parentd_admin)
cls._cleanup.append(cls.parent_domain)
list_pool_resp = list_storage_pools(cls.api_client,
account=cls.parentd_admin.name,domainid=cls.parent_domain.id)
res = validateList(list_pool_resp)
if res[2]== INVALID_INPUT:
raise Exception("Failed to list storage pool-no storagepools found ")
#Identify the storage pool type and set vmware fullclone to true if storage is VMFS
if cls.hypervisor == 'vmware':
for strpool in list_pool_resp:
if strpool.type.lower() == "vmfs" or strpool.type.lower()== "networkfilesystem":
list_config_storage_response = list_configurations(
cls.api_client
, name=
"vmware.create.full.clone",storageid=strpool.id)
res = validateList(list_config_storage_response)
if res[2]== INVALID_INPUT:
raise Exception("Failed to list configurations ")
if list_config_storage_response[0].value == "false":
Configurations.update(cls.api_client,
"vmware.create.full.clone",
value="true",storageid=strpool.id)
cls.updateclone = True
StoragePool.update(cls.api_client,id=strpool.id,tags="scsi")
cls.storageID = strpool.id
cls.unsupportedStorageType = False
break
else:
cls.unsupportedStorageType = True
# Creating service offering with normal config
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"])
cls.services_offering_vmware=ServiceOffering.create(
cls.api_client,cls.services["service_offering"],tags="scsi")
cls._cleanup.extend([cls.service_offering,cls.services_offering_vmware])
except Exception as e:
cls.tearDownClass()
return
示例10: setUpClass
# 需要导入模块: from marvin.lib.base import StoragePool [as 别名]
# 或者: from marvin.lib.base.StoragePool import update [as 别名]
def setUpClass(cls):
testClient = super(TestPathVolume, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.testdata = testClient.getParsedTestDataConfig()
#Get Zone,Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient)
cls.testdata["mode"] = cls.zone.networktype
cls.template = get_template(cls.apiclient, cls.zone.id, cls.testdata["ostype"])
cls.testdata["template"]["ostypeid"] = cls.template.ostypeid
if cls.template == FAILED:
cls.fail("get_template() failed to return template with description %s" % cls.testdata["ostype"])
cls._cleanup = []
try:
cls.account = Account.create(cls.apiclient,
cls.testdata["account"],
domainid=cls.domain.id
)
cls._cleanup.append(cls.account)
#createa two service offerings
cls.service_offering_1 = ServiceOffering.create(cls.apiclient, cls.testdata["service_offerings"]["small"])
cls._cleanup.append(cls.service_offering_1)
# Create Disk offerings
cls.disk_offering_1 = DiskOffering.create(cls.apiclient, cls.testdata["disk_offering"])
cls._cleanup.append(cls.disk_offering_1)
#check if zone wide storage is enable
cls.list_storage = StoragePool.list(cls.apiclient,
scope="ZONE"
)
if cls.list_storage:
cls.zone_wide_storage = cls.list_storage[0]
cls.debug("zone wide storage id is %s" % cls.zone_wide_storage.id)
cls.testdata["tags"] = "zp"
update1 = StoragePool.update(cls.apiclient,
id=cls.zone_wide_storage.id,
tags=cls.testdata["tags"]
)
cls.debug("Storage %s pool tag%s" % (cls.zone_wide_storage.id, update1.tags))
cls.testdata["service_offerings"]["tags"] = "zp"
cls.tagged_so = ServiceOffering.create(cls.apiclient, cls.testdata["service_offerings"])
cls.testdata["service_offerings"]["tags"] = " "
cls._cleanup.append(cls.tagged_so)
#create tagged disk offerings
cls.testdata["disk_offering"]["tags"] = "zp"
cls.disk_offering_tagged = DiskOffering.create(cls.apiclient, cls.testdata["disk_offering"])
cls._cleanup.append(cls.disk_offering_tagged)
else:
cls.debug("No zone wide storage found")
#check if local storage is enable
if cls.zone.localstorageenabled:
cls.testdata["disk_offering"]["tags"] = " "
cls.testdata["service_offerings"]["storagetype"] = 'local'
cls.service_offering_2 = ServiceOffering.create(cls.apiclient, cls.testdata["service_offerings"])
cls._cleanup.append(cls.service_offering_2)
#craete a compute offering with local storage
cls.testdata["disk_offering"]["storagetype"] = 'local'
cls.disk_offering_local = DiskOffering.create(cls.apiclient, cls.testdata["disk_offering"])
cls._cleanup.append(cls.disk_offering_local)
cls.testdata["disk_offering"]["storagetype"] = ' '
else:
cls.debug("No local storage found")
cls.userapiclient = testClient.getUserApiClient(UserName=cls.account.name,
DomainName=cls.account.domain
)
#Check if login is successful with new account
response = User.login(cls.userapiclient,
username=cls.account.name,
password=cls.testdata["account"]["password"]
)
assert response.sessionkey is not None
#response should have non null value
except Exception as e:
cls.tearDownClass()
raise e
return
示例11: setUpClass
# 需要导入模块: from marvin.lib.base import StoragePool [as 别名]
# 或者: from marvin.lib.base.StoragePool import update [as 别名]
def setUpClass(cls):
cls.cloudstacktestclient = super(TestDeployVmRootSize,
cls).getClsTestClient()
cls.api_client = cls.cloudstacktestclient.getApiClient()
cls.hypervisor = cls.cloudstacktestclient.getHypervisorInfo().lower()
cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__
# Get Zone, Domain and Default Built-in template
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client,
cls.cloudstacktestclient.getZoneForTests())
cls.services = cls.testClient.getParsedTestDataConfig()
cls.services["mode"] = cls.zone.networktype
cls._cleanup = []
cls.updateclone = False
cls.restartreq = False
cls.defaultdiskcontroller = "ide"
cls.template = get_template(cls.api_client, cls.zone.id)
if cls.template == FAILED:
assert False, "get_template() failed to return template "
#create a user account
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id,admin=True
)
cls._cleanup.append(cls.account)
list_pool_resp = list_storage_pools(cls.api_client,
account=cls.account.name,
domainid=cls.domain.id)
#Identify the storage pool type and set vmware fullclone to
# true if storage is VMFS
if cls.hypervisor == 'vmware':
# please make sure url of templateregister dictionary in
# test_data.config pointing to .ova file
list_config_storage_response = list_configurations(
cls.api_client
, name=
"vmware.root.disk.controller")
cls.defaultdiskcontroller = list_config_storage_response[0].value
if list_config_storage_response[0].value == "ide" or \
list_config_storage_response[0].value == \
"osdefault":
Configurations.update(cls.api_client,
"vmware.root.disk.controller",
value="scsi")
cls.updateclone = True
cls.restartreq = True
list_config_fullclone_global_response = list_configurations(
cls.api_client
, name=
"vmware.create.full.clone")
if list_config_fullclone_global_response[0].value=="false":
Configurations.update(cls.api_client,
"vmware.create.full.clone",
value="true")
cls.updateclone = True
cls.restartreq = True
cls.tempobj = Template.register(cls.api_client,
cls.services["templateregister"],
hypervisor=cls.hypervisor,
zoneid=cls.zone.id,
account=cls.account.name,
domainid=cls.domain.id
)
cls.tempobj.download(cls.api_client)
for strpool in list_pool_resp:
if strpool.type.lower() == "vmfs" or strpool.type.lower()== "networkfilesystem":
list_config_storage_response = list_configurations(
cls.api_client
, name=
"vmware.create.full.clone",storageid=strpool.id)
res = validateList(list_config_storage_response)
if res[2]== INVALID_INPUT:
raise Exception("Failed to list configurations ")
if list_config_storage_response[0].value == "false":
Configurations.update(cls.api_client,
"vmware.create.full.clone",
value="true",
storageid=strpool.id)
cls.updateclone = True
StoragePool.update(cls.api_client,id=strpool.id,
tags="scsi")
cls.storageID = strpool.id
break
if cls.restartreq:
cls.restartServer()
#create a service offering
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
#.........这里部分代码省略.........
示例12: test_01_attach_datadisk_to_vm_on_zwps
# 需要导入模块: from marvin.lib.base import StoragePool [as 别名]
# 或者: from marvin.lib.base.StoragePool import update [as 别名]
def test_01_attach_datadisk_to_vm_on_zwps(self):
""" Attach Data Disk on CWPS To VM
1. Check if zwps storage pool exists.
2. Adding tag to zone wide primary storage
3. Launch a VM
4. Attach data disk to vm.
5. Verify disk is attached and in correct storage pool.
"""
# Step 1
if len(list(self.pools)) < 1:
self.skipTest("There must be at least one zone wide \
storage pools available in the setup")
# Step 2
# Adding tags to Storage Pools
StoragePool.update(
self.apiclient,
id=self.pools[0].id,
tags=[CLUSTERTAG1])
# Launch VM
self.vm = VirtualMachine.create(
self.apiclient,
self.testdata["small"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering_zone1.id,
zoneid=self.zone.id
)
self.testdata["volume"]["zoneid"] = self.zone.id
self.testdata["volume"]["customdisksize"] = 1
self.data_volume_created = Volume.create_custom_disk(
self.userapiclient,
self.testdata["volume"],
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id,
)
self.cleanup.append(self.data_volume_created)
# Step 4
self.vm.attach_volume(
self.userapiclient,
self.data_volume_created
)
data_volumes_list = Volume.list(
self.userapiclient,
virtualmachineid=self.vm.id,
type="DATA",
listall=True
)
self.debug("list volumes using vm id %s" % dir(data_volumes_list[0]))
data_volumes_list = Volume.list(self.apiclient,
id=self.data_volume_created.id,
listall=True)
data_volume = data_volumes_list[0]
status = validateList(data_volume)
# Step 5
self.assertEqual(
status[0],
PASS,
"Check: volume list is valid")
self.assertEqual(
data_volume.state,
"Ready",
"Check: Data volume is attached to VM")
if data_volume.storage != self.pools[0].name:
self.fail("check if volume is created in correct storage pool")
return
示例13: test_11_migrate_volume_and_change_offering
# 需要导入模块: from marvin.lib.base import StoragePool [as 别名]
# 或者: from marvin.lib.base.StoragePool import update [as 别名]
def test_11_migrate_volume_and_change_offering(self):
# Validates the following
#
# 1. Creates a new Volume with a small disk offering
#
# 2. Migrates the Volume to another primary storage and changes the offering
#
# 3. Verifies the Volume has new offering when migrated to the new storage.
small_offering = list_disk_offering(
self.apiclient,
name = "Small"
)[0]
large_offering = list_disk_offering(
self.apiclient,
name = "Large"
)[0]
volume = Volume.create(
self.apiClient,
self.services,
zoneid = self.zone.id,
account = self.account.name,
domainid = self.account.domainid,
diskofferingid = small_offering.id
)
self.debug("Created a small volume: %s" % volume.id)
self.virtual_machine.attach_volume(self.apiclient, volume=volume)
if self.virtual_machine.hypervisor == "KVM":
self.virtual_machine.stop(self.apiclient)
pools = StoragePool.listForMigration(
self.apiclient,
id=volume.id
)
pool = None
if pools and len(pools) > 0:
pool = pools[0]
else:
raise self.skipTest("Not enough storage pools found, skipping test")
if hasattr(pool, 'tags'):
StoragePool.update(self.apiclient, id=pool.id, tags="")
self.debug("Migrating Volume-ID: %s to Pool: %s" % (volume.id, pool.id))
Volume.migrate(
self.apiclient,
volumeid = volume.id,
storageid = pool.id,
newdiskofferingid = large_offering.id
)
if self.virtual_machine.hypervisor == "KVM":
self.virtual_machine.start(self.apiclient
)
migrated_vol = Volume.list(
self.apiclient,
id = volume.id
)[0]
self.assertEqual(
migrated_vol.diskofferingname,
large_offering.name,
"Offering name did not match with the new one "
)
return
示例14: test_01_multiple_snapshot_in_zwps
# 需要导入模块: from marvin.lib.base import StoragePool [as 别名]
# 或者: from marvin.lib.base.StoragePool import update [as 别名]
def test_01_multiple_snapshot_in_zwps(self):
""" Test multiple volume snapshot in zwps
# 1. Verify if setup has a ZWPS and 2 CWPS
# 2. Deploy a VM with data disk in ZWPS
# 1. Verify ROOT and DATA Disk of the VM is in ZWPS.
# 2. Take a snapshot of VM.
# 3. Create Multiple Snapshots till operation fails.
"""
try:
self.pools = StoragePool.list(self.apiclient, zoneid=self.zone.id)
status = validateList(self.pools)
self.assertEqual(
status[0],
PASS,
"Check: Failed to list storage pools due to %s" %
status[2])
zonepoolList = list(storagePool for storagePool in self.pools
if storagePool.scope == "ZONE")
if len(zonepoolList) < 1:
self.skipTest("There must be at least one zone wide\
storage pools available in the setup")
if len(list(storagePool for storagePool in self.pools
if storagePool.scope == "CLUSTER")) < 2:
self.skipTest("There must be at atleast two cluster wide\
storage pools available in the setup")
except Exception as e:
self.skipTest(e)
# Adding tags to Storage Pools
zone_no = 1
StoragePool.update(
self.apiclient,
id=zonepoolList[0].id,
tags=[ZONETAG1[:-1] + repr(zone_no)])
self.vm_zwps = VirtualMachine.create(
self.apiclient,
self.testdata["small"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering_zwps.id,
diskofferingid=self.disk_offering_zwps.id,
zoneid=self.zone.id,
)
self.cleanup.append(self.vm_zwps)
# Step 1
volumes_root_list = list_volumes(
self.apiclient,
virtualmachineid=self.vm_zwps.id,
type=ROOT,
listall=True
)
status = validateList(volumes_root_list)
self.assertEqual(
status[0],
PASS,
"Check: Failed to list root vloume due to %s" %
status[2])
root_volume = volumes_root_list[0]
if root_volume.storage != zonepoolList[0].name:
self.fail("Root Volume not in Zone-Wide Storage Pool !")
volumes_data_list = list_volumes(
self.apiclient,
virtualmachineid=self.vm_zwps.id,
type=DATA,
listall=True
)
status = validateList(volumes_data_list)
self.assertEqual(
status[0],
PASS,
"Check: Failed to list data vloume due to %s" %
status[2])
data_volume = volumes_data_list[0]
if data_volume.storage != zonepoolList[0].name:
self.fail("Data Volume not in Zone-Wide Storage Pool !")
# Step 2
self.vm_zwps.stop(self.apiclient)
self.debug(
"Creation of Snapshot of Data Volume after VM is stopped.....")
Snapshot.create(
self.apiclient,
data_volume.id)
#.........这里部分代码省略.........