本文整理汇总了Python中virttest.virsh.pool_destroy函数的典型用法代码示例。如果您正苦于以下问题:Python pool_destroy函数的具体用法?Python pool_destroy怎么用?Python pool_destroy使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pool_destroy函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cleanup_pool
def cleanup_pool(pool_name, pool_type, pool_target):
"""
Delete vols, destroy the created pool and restore the env
"""
if pool_type in ["dir", "netfs"]:
vols = get_vol_list(pool_name)
for vol in vols:
result = virsh.vol_delete(vol, pool_name)
if result.exit_status:
raise error.TestFail("Command virsh vol-delete failed:\n%s"
% result.stderr)
else:
logging.debug("Delete volume %s from pool %s", vol, pool_name)
if not virsh.pool_destroy(pool_name):
raise error.TestFail("Command virsh pool-destroy failed")
else:
logging.debug("Destroy pool %s", pool_name)
if pool_type == "netfs":
shutil.move("/etc/exports.virt", "/etc/exports")
utils.run("service nfs restart")
nfs_path = os.path.join(test.tmpdir, nfs_server_dir)
if os.path.exists(nfs_path):
shutil.rmtree(nfs_path)
if pool_type == "logical":
cmd = "pvs |grep vg_logical|awk '{print $1}'"
pv = utils.system_output(cmd)
utils.run("vgremove -f vg_logical")
utils.run("pvremove %s" % pv)
if pool_type in ["dir", "fs", "netfs"]:
pool_target = os.path.join(test.tmpdir, pool_target)
if os.path.exists(pool_target):
shutil.rmtree(pool_target)
示例2: state_test
def state_test():
states = [ServiceState(), FileState(), DirState(), DomainState(),
NetworkState(), PoolState(), SecretState(), MountState()]
for state in states:
state.backup()
utils.run('echo hello > /etc/exports')
virsh.start('virt-tests-vm1')
virsh.net_autostart('default', '--disable')
virsh.pool_destroy('mount')
utils.run('rm /var/lib/virt_test/images/hello')
utils.run('mkdir /var/lib/virt_test/images/hi')
utils_libvirtd.Libvirtd().stop()
utils_selinux.set_status('permissive')
for state in states:
lines = state.check(recover=True)
for line in lines:
print line
示例3: create_iscsi_pool
def create_iscsi_pool():
"""
Setup iSCSI target,and create one iSCSI pool.
"""
libvirt.setup_or_cleanup_iscsi(is_setup=False)
iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
is_login=False,
image_size='1G',
chap_user="",
chap_passwd="",
portal_ip=disk_src_host)
# Define an iSCSI pool xml to create it
pool_src_xml = pool_xml.SourceXML()
pool_src_xml.host_name = pool_src_host
pool_src_xml.device_path = iscsi_target
poolxml = pool_xml.PoolXML(pool_type=pool_type)
poolxml.name = pool_name
poolxml.set_source(pool_src_xml)
poolxml.target_path = "/dev/disk/by-path"
# Create iSCSI pool.
virsh.pool_destroy(pool_name, **virsh_dargs)
cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
libvirt.check_exit_status(cmd_result)
示例4: remove
def remove(self, name):
"""
Remove target pool _pool_.
:param pool: Target pool to be removed.
"""
pool = name
if pool['state'] == 'running':
res = virsh.pool_destroy(pool['name'])
if not res:
raise Exception(str(res))
if pool['persistent'] == 'yes':
res = virsh.pool_undefine(pool['name'])
if res.exit_status:
raise Exception(str(res))
示例5: cleanup_pool
def cleanup_pool(pool_name, pool_target):
"""
Destroys, undefines and delete the pool target
"""
result = virsh.pool_destroy(pool_name, ignore_status=True)
if not result:
raise error.TestFail("Command virsh pool-destroy failed")
result = virsh.pool_undefine(pool_name, ignore_status=True)
if result.exit_status != 0:
raise error.TestFail("Command virsh pool-undefine failed:\n%s" %
result.stderr.strip())
try:
logging.debug("Deleting the pool target: %s directory", pool_target)
shutil.rmtree(pool_target)
except OSError, detail:
raise error.TestFail("Failed to delete the pool target directory"
"%s:\n %s" % (pool_target, detail) )
示例6: run
#.........这里部分代码省略.........
vm.destroy(gracefully=False)
new_disk = disk.Disk()
new_disk.xml = open(utlv.create_disk_xml(disk_params)).read()
# start vm with the virtual disk
vmxml.devices = vmxml.devices.append(new_disk)
vmxml.sync()
vm.start()
session = vm.wait_for_login()
cur_disks = virt_vm.get_disks()
mount_disk = "".join(list(set(old_disks) ^ set(cur_disks)))
# mkfs and mount disk in vm, create a file on that disk.
if not mount_disk:
logging.debug("old_disk: %s, new_disk: %s", old_disks, cur_disks)
raise exceptions.TestFail("No new disk found in vm.")
mkfs_and_mount(session, mount_disk)
create_file_in_vm(session, "/mnt/before_snapshot.txt", "before")
# virsh snapshot-create-as vm s --disk-only --diskspec vda,file=path
if snapshot_disk_only:
vm_blks = list(vm.get_disk_devices().keys())
options = "%s --disk-only" % snapshot_name
for vm_blk in vm_blks:
snapshot_file = snapshot_dir + "/" + vm_blk + "." + snapshot_name
if os.path.exists(snapshot_file):
os.remove(snapshot_file)
options = options + " --diskspec %s,file=%s" % (vm_blk,
snapshot_file)
else:
options = snapshot_name
utlv.check_exit_status(virsh.snapshot_create_as(vm_name, options))
# check virsh snapshot-list
logging.debug("Running: snapshot-list %s", vm_name)
snapshot_list = virsh.snapshot_list(vm_name)
logging.debug("snapshot list is: %s", snapshot_list)
if not snapshot_list:
raise exceptions.TestFail("snapshots not found after creation.")
# snapshot-revert doesn't support external snapshot for now. so
# only check this with internal snapshot.
if not snapshot_disk_only:
create_file_in_vm(session, "/mnt/after_snapshot.txt", "after")
logging.debug("Running: snapshot-revert %s %s",
vm_name, snapshot_name)
utlv.check_exit_status(virsh.snapshot_revert(vm_name, snapshot_name))
session = vm.wait_for_login()
file_existence, file_content = get_file_in_vm(session,
"/mnt/after_snapshot.txt")
logging.debug("file exist = %s, file content = %s",
file_existence, file_content)
if file_existence:
raise exceptions.TestFail("The file created "
"after snapshot still exists.")
file_existence, file_content = get_file_in_vm(session,
"/mnt/before_snapshot.txt")
logging.debug("file eixst = %s, file content = %s",
file_existence, file_content)
if ((not file_existence) or (file_content.strip() != "before")):
raise exceptions.TestFail("The file created "
"before snapshot is lost.")
# delete snapshots
# if diskonly, delete --metadata and remove files
# if not diskonly, delete snapshot
if snapshot_disk_only:
options = "--metadata"
else:
options = ""
for snap in snapshot_list:
logging.debug("deleting snapshot %s with options %s",
snap, options)
result = virsh.snapshot_delete(vm_name, snap, options)
logging.debug("result of snapshot-delete: %s",
result.stdout.strip())
if snapshot_disk_only:
vm_blks = list(vm.get_disk_devices().keys())
for vm_blk in vm_blks:
snapshot_file = snapshot_dir + "/" + vm_blk + "." + snap
if os.path.exists(snapshot_file):
os.remove(snapshot_file)
snapshot_list = virsh.snapshot_list(vm_name)
if snapshot_list:
raise exceptions.TestFail("Snapshot not deleted: %s", snapshot_list)
except Exception as detail:
raise exceptions.TestFail("exception happens: %s", detail)
finally:
logging.debug("Start to clean up env...")
vmxml_backup.sync()
if pool_ins and pool_ins.pool_exists(pool_name):
virsh.pool_destroy(pool_name)
for new_vhba in new_vhbas:
virsh.nodedev_destroy(new_vhba)
utils_npiv.restart_multipathd()
if old_mpath_conf:
utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
conf_content=old_mpath_conf,
replace_existing=True)
if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
os.remove(mpath_conf_path)
示例7: list
if virsh.domain_exists(vm_name):
if active_snap or with_shallow:
option = "--snapshots-metadata"
else:
option = None
original_xml.sync(option)
else:
original_xml.define()
except Exception, e:
logging.error(e)
for disk in snapshot_external_disks:
if os.path.exists(disk):
os.remove(disk)
# Clean up libvirt pool, which may be created by 'set_vm_disk'
if disk_type == 'volume':
virsh.pool_destroy(pool_name, ignore_status=True, debug=True)
# Clean up NFS
try:
if replace_vm_disk and disk_source_protocol == "netfs":
utl.setup_or_cleanup_nfs(is_setup=False)
except Exception, e:
logging.error(e)
# Clean up iSCSI
try:
for iscsi_n in list(set(emulated_iscsi)):
utl.setup_or_cleanup_iscsi(is_setup=False, emulated_image=iscsi_n)
except Exception, e:
logging.error(e)
if os.path.exists(dest_path):
os.remove(dest_path)
if os.path.exists(snap_path):
示例8: run
#.........这里部分代码省略.........
pool_name = new_pool_name
if pool_type != "logical":
result = virsh.pool_build(pool_name, build_option, ignore_status=True)
utlv.check_exit_status(result)
# Step (7)
# Pool start
result = virsh.pool_start(pool_name, debug=True, ignore_status=True)
utlv.check_exit_status(result)
# Step (8)
# Pool list
option = "--persistent --type %s" % pool_type
check_pool_list(pool_name, option)
# Step (9)
# Pool autostart
result = virsh.pool_autostart(pool_name, ignore_status=True)
utlv.check_exit_status(result)
# Step (10)
# Pool list
option = "--autostart --type %s" % pool_type
check_pool_list(pool_name, option)
# Step (11)
# Restart libvirtd and check the autostart pool
utils_libvirtd.libvirtd_restart()
option = "--autostart --persistent"
check_pool_list(pool_name, option)
# Step (12)
# Pool destroy
if virsh.pool_destroy(pool_name):
logging.debug("Pool %s destroyed.", pool_name)
else:
test.fail("Destroy pool % failed." % pool_name)
# Step (13)
# Pool autostart disable
result = virsh.pool_autostart(pool_name, "--disable",
ignore_status=True)
utlv.check_exit_status(result)
# Step (14)
# Repeat step (11)
utils_libvirtd.libvirtd_restart()
option = "--autostart"
check_pool_list(pool_name, option, True)
# Step (15)
# Pool start
# When libvirtd starts up, it'll check to see if any of the storage
# pools have been activated externally. If so, then it'll mark the
# pool as active. This is independent of autostart.
# So a directory based storage pool is thus pretty much always active,
# and so as the SCSI pool.
if pool_type not in ["dir", 'scsi']:
result = virsh.pool_start(pool_name, ignore_status=True)
utlv.check_exit_status(result)
# Step (16)
# Pool info
pool_info = _pool.pool_info(pool_name)
logging.debug("Pool '%s' info:\n%s", pool_name, pool_info)
示例9: run
#.........这里部分代码省略.........
# snapshot-info, snapshot-dumpxml, snapshot-create
snapshot_name1 = "snap1"
snapshot_name2 = "snap2"
cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
**virsh_dargs)
libvirt.check_exit_status(cmd_result)
try:
virsh.snapshot_list(vm_name, **virsh_dargs)
except process.CmdError:
test.fail("Failed getting snapshots list for %s" % vm_name)
try:
virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
except process.CmdError:
test.fail("Failed getting snapshots info for %s" % vm_name)
cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
**virsh_dargs)
libvirt.check_exit_status(cmd_result)
cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
libvirt.check_exit_status(cmd_result)
cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
libvirt.check_exit_status(cmd_result)
snapshot_file = os.path.join(data_dir.get_tmp_dir(), snapshot_name2)
sn_create_op = ("%s --disk-only --diskspec %s,file=%s"
% (snapshot_name2, disk_target, snapshot_file))
cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
**virsh_dargs)
libvirt.check_exit_status(cmd_result)
cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
**virsh_dargs)
cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
if snapshot_name2 not in cmd_result:
test.error("Snapshot %s not found" % snapshot_name2)
elif domain_operation == "":
logging.debug("No domain operation provided, so skip it")
else:
logging.error("Unsupport operation %s in this case, so skip it",
domain_operation)
def find_attach_disk(expect=True):
"""
Find attached disk inside the VM
"""
found_disk = False
if vm.is_dead():
test.error("Domain %s is not running" % vm_name)
else:
try:
session = vm.wait_for_login()
# Here the script needs wait for a while for the guest to
# recognize the hotplugged disk on PPC
if on_ppc:
time.sleep(10)
cmd = "grep %s /proc/partitions" % disk_target
s, o = session.cmd_status_output(cmd)
logging.info("%s output: %s", cmd, o)
session.close()
if s == 0:
found_disk = True
except (LoginError, VMError, ShellError) as e:
logging.error(str(e))
if found_disk == expect:
logging.debug("Check disk inside the VM PASS as expected")
else:
test.error("Check disk inside the VM FAIL")
# Check disk inside the VM, expect is False if status_error=True
find_attach_disk(not status_error)
# Detach disk
cmd_result = virsh.detach_disk(vm_name, disk_target)
libvirt.check_exit_status(cmd_result, status_error)
# Check disk inside the VM
find_attach_disk(False)
finally:
# Clean up snapshot
# Shut down before cleaning up snapshots
if vm.is_alive():
vm.destroy()
libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
# Restore vm
vmxml_backup.sync("--snapshots-metadata")
# Destroy pool and undefine secret, which may not exist
try:
if disk_type == "volume":
virsh.pool_destroy(disk_src_pool)
if chap_auth:
virsh.secret_undefine(secret_uuid)
except Exception:
pass
libvirt.setup_or_cleanup_iscsi(is_setup=False)
示例10: run
#.........这里部分代码省略.........
if update_policy:
new_device = utlv.setup_or_cleanup_iscsi(True)
lv_utils.vg_create(vg_name, new_device)
new_device = utlv.create_local_disk(disk_type, size="0.5",
vgname=vg_name, lvname=lv_name)
new_path = new_device
if invalid_source_path:
new_path = invalid_source_path
if pool_type == "fs":
utlv.mkfs(new_device, source_format)
n_poolxml = change_source_path(new_path, update_policy)
p_xml = n_poolxml.xml
if not virsh.pool_undefine(pool_name):
test.fail("Undefine pool %s failed" % pool_name)
if not virsh.pool_define(p_xml):
test.fail("Define pool %s from %s failed" % (pool_name, p_xml))
logging.debug("Start pool %s" % pool_name)
result = virsh.pool_start(pool_name, ignore_status=True, debug=True)
utlv.check_exit_status(result, status_error)
# Mount a valid fs to pool target
if pool_type == "fs":
source_list = []
mnt_cmd = ""
pool_target = n_poolxml.target_path
if invalid_source_path:
source_list.append(new_device)
else:
s_devices = n_poolxml.xmltreefile.findall("//source/device")
for dev in s_devices:
source_list.append(dev.get('path'))
try:
for src in source_list:
mnt_cmd = "mount %s %s" % (src, pool_target)
if not process.system(mnt_cmd, shell=True):
clean_mount = True
except process.CmdError:
test.error("Failed to run %s" % mnt_cmd)
# Step(2)
# Pool autostart
logging.debug("Try to mark pool %s as autostart" % pool_name)
result = virsh.pool_autostart(pool, readonly=ro_flag,
ignore_status=True, debug=True)
if not pre_def_pool:
utlv.check_exit_status(result, status_error)
if not result.exit_status:
check_pool(pool_name, pool_type, checkpoint='Autostart',
expect_value="yes", expect_error=status_error)
# Step(3)
# Restart libvirtd and check pool status
logging.info("Try to restart libvirtd")
libvirtd = utils_libvirtd.Libvirtd()
libvirtd.restart()
check_pool(pool_name, pool_type, checkpoint="State",
expect_value="active", expect_error=status_error)
# Step(4)
# Pool destroy
if pool_ins.is_pool_active(pool_name):
virsh.pool_destroy(pool_name)
logging.debug("Pool %s destroyed" % pool_name)
# Step(5)
# Pool autostart disable
logging.debug("Try to unmark pool %s as autostart" % pool_name)
result = virsh.pool_autostart(pool, extra="--disable", debug=True,
ignore_status=True)
if not pre_def_pool:
utlv.check_exit_status(result, status_error)
if not result.exit_status:
check_pool(pool_name, pool_type, checkpoint='Autostart',
expect_value="no", expect_error=status_error)
# Repeat step (3)
logging.debug("Try to restart libvirtd")
libvirtd = utils_libvirtd.Libvirtd()
libvirtd.restart()
check_pool(pool_name, pool_type, checkpoint='State',
expect_value="inactive", expect_error=status_error)
finally:
# Clean up
logging.debug("Try to clean up env")
try:
if clean_mount is True:
for src in source_list:
process.system("umount %s" % pool_target)
if pre_def_pool:
pvt.cleanup_pool(pool_name, pool_type, pool_target,
emulated_image, **kwargs)
if new_device:
utlv.delete_local_disk(disk_type, vgname=vg_name, lvname=lv_name)
lv_utils.vg_remove(vg_name)
utlv.setup_or_cleanup_iscsi(False)
if os.path.exists(p_xml):
os.remove(p_xml)
except test.fail as details:
libvirtd = utils_libvirtd.Libvirtd()
libvirtd.restart()
logging.error(str(details))
示例11: run
#.........这里部分代码省略.........
"nodedev_parent": online_hbas_list[0],
"scsi_wwnn": vhba_wwnn,
"scsi_wwpn": vhba_wwpn})
utils_misc.wait_for(
lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME*2)
if not new_vhba:
raise exceptions.TestFail("vHBA not sucessfully generated.")
new_vhbas.append(new_vhba)
utils_misc.wait_for(
lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
timeout=_DELAY_TIME*2)
if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
raise exceptions.TestFail("mpath dev not generated.")
cur_mpath_devs = utils_npiv.find_mpath_devs()
new_mpath_devs = list(set(cur_mpath_devs).difference(
set(old_mpath_devs)))
logging.debug("The newly added mpath dev is: %s", new_mpath_devs)
mpath_vol_path = "/dev/mapper/" + new_mpath_devs[0]
try:
cmd = "parted %s mklabel msdos -s" % mpath_vol_path
cmd_result = process.run(cmd, shell=True)
except Exception as e:
raise exceptions.TestError("Error occurred when parted mklable")
if pre_def_pool == "yes":
try:
pvt = utlv.PoolVolumeTest(test, params)
pvt.pre_pool(pool_name, pool_type,
pool_target, emulated_image,
**pool_kwargs)
utils_misc.wait_for(
lambda: utils_npiv.is_vhbas_added(old_vhbas),
_DELAY_TIME*2)
virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
virsh.pool_destroy(pool_name)
except Exception as e:
pvt.cleanup_pool(pool_name, pool_type, pool_target,
emulated_image, **pool_kwargs)
raise exceptions.TestError(
"Error occurred when prepare pool xml:\n %s" % e)
if os.path.exists(pool_xml_f):
with open(pool_xml_f, 'r') as f:
logging.debug("Create pool from file: %s", f.read())
try:
# define/create/start the pool
if (pre_def_pool == "yes") and (define_pool == "yes"):
pool_define_status = virsh.pool_define(pool_xml_f,
ignore_status=True,
debug=True)
utlv.check_exit_status(pool_define_status)
if define_pool_as == "yes":
pool_define_as_status = virsh.pool_define_as(
pool_name, pool_type,
pool_target, pool_extra_args,
ignore_status=True, debug=True
)
utlv.check_exit_status(pool_define_as_status)
if pool_create_as == "yes":
if pool_type != "scsi":
raise exceptions.TestSkipError("pool-create-as only needs to "
"be covered by scsi pool for "
"NPIV test.")
cmd = "virsh pool-create-as %s %s \
--adapter-wwnn %s --adapter-wwpn %s \
--adapter-parent %s --target %s"\
% (pool_name, pool_type, pool_wwnn, pool_wwpn,
online_hbas_list[0], pool_target)
示例12: run
#.........这里部分代码省略.........
# Add disk xml.
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
disk_xml = Disk(type_name=device_type)
disk_xml.device = device
if device_type == "file":
dev_attrs = "file"
elif device_type == "dir":
dev_attrs = "dir"
else:
dev_attrs = "dev"
disk_source = disk_xml.new_disk_source(
**{"attrs": {dev_attrs: volume_target_path}})
disk_xml.driver = {"name": "qemu", "type": volume_target_format,
"cache": "none"}
disk_xml.target = {"dev": device_target, "bus": device_bus}
v_xml = vol_xml.VolXML.new_from_vol_dumpxml(volume_name, pool_name)
sec_uuids.append(v_xml.encryption.secret["uuid"])
if not status_error:
logging.debug("vol info -- format: %s, type: %s, uuid: %s",
v_xml.encryption.format,
v_xml.encryption.secret["type"],
v_xml.encryption.secret["uuid"])
encryption_dict = {"encryption": v_xml.encryption.format,
"secret": {"type": v_xml.encryption.secret["type"],
"uuid": v_xml.encryption.secret["uuid"]}}
if encryption_in_source:
disk_source.encryption = disk_xml.new_encryption(
**encryption_dict)
if encryption_out_source:
disk_xml.encryption = disk_xml.new_encryption(
**encryption_dict)
disk_xml.source = disk_source
logging.debug("disk xml is:\n%s" % disk_xml)
if not hotplug:
# Sync VM xml.
vmxml.add_device(disk_xml)
vmxml.sync()
try:
# Start the VM and do disk hotplug if required,
# then check disk status in vm.
# Note that LUKS encrypted virtual disk without <encryption>
# can be normally started or attached since qemu will just treat
# it as RAW, so we don't test LUKS with status_error=TRUE.
vm.start()
vm.wait_for_login()
if status_error:
if hotplug:
logging.debug("attaching disk, expecting error...")
result = virsh.attach_device(vm_name, disk_xml.xml)
libvirt.check_exit_status(result, status_error)
else:
test.fail("VM started unexpectedly.")
else:
if hotplug:
result = virsh.attach_device(vm_name, disk_xml.xml,
debug=True)
libvirt.check_exit_status(result)
if not check_in_vm(vm, device_target, old_parts):
test.fail("Check encryption disk in VM failed")
result = virsh.detach_device(vm_name, disk_xml.xml,
debug=True)
libvirt.check_exit_status(result)
else:
if not check_in_vm(vm, device_target, old_parts):
test.fail("Check encryption disk in VM failed")
except virt_vm.VMStartError as e:
if status_error:
if hotplug:
test.fail("In hotplug scenario, VM should "
"start successfully but not."
"Error: %s", str(e))
else:
logging.debug("VM failed to start as expected."
"Error: %s", str(e))
else:
# Libvirt2.5.0 onward,AES-CBC encrypted qcow2 images is no
# longer supported.
err_msgs = ("AES-CBC encrypted qcow2 images is"
" no longer supported in system emulators")
if str(e).count(err_msgs):
test.cancel(err_msgs)
else:
test.fail("VM failed to start."
"Error: %s" % str(e))
finally:
# Recover VM.
if vm.is_alive():
vm.destroy(gracefully=False)
logging.info("Restoring vm...")
vmxml_backup.sync()
# Clean up pool, vol
for sec_uuid in set(sec_uuids):
virsh.secret_undefine(sec_uuid, **virsh_dargs)
virsh.vol_delete(volume_name, pool_name, **virsh_dargs)
if pool_name in virsh.pool_state_dict():
virsh.pool_destroy(pool_name, **virsh_dargs)
virsh.pool_undefine(pool_name, **virsh_dargs)
示例13: open
try:
cmd = "parted %s mklabel msdos -s" % mpath_vol_path
cmd_result = process.run(cmd, shell=True)
except Exception, e:
raise exceptions.TestError("Error occurred when parted mklable")
if pre_def_pool == "yes":
try:
pvt = utlv.PoolVolumeTest(test, params)
pvt.pre_pool(pool_name, pool_type,
pool_target, emulated_image,
**pool_kwargs)
utils_misc.wait_for(
lambda: utils_npiv.is_vhbas_added(old_vhbas),
_DELAY_TIME*2)
virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
virsh.pool_destroy(pool_name)
except Exception, e:
pvt.cleanup_pool(pool_name, pool_type, pool_target,
emulated_image, **pool_kwargs)
raise exceptions.TestError(
"Error occurred when prepare pool xml:\n %s" % e)
if os.path.exists(pool_xml_f):
try:
f = open(pool_xml_f, 'r')
logging.debug("Create pool from file: %s", f.read())
finally:
f.close()
try:
# define/create/start the pool
if (pre_def_pool == "yes") and (define_pool == "yes"):
pool_define_status = virsh.pool_define(pool_xml_f,
示例14: run
#.........这里部分代码省略.........
# if pool_type == "fs":
# option = '--overwrite'
result = virsh.pool_build(pool_name, option, ignore_status=True)
check_exit_status(result)
# Step (7)
# Pool start
result = virsh.pool_start(pool_name, ignore_status=True)
check_exit_status(result)
# Step (8)
# Pool list
option = "--persistent --type %s" % pool_type
check_pool_list(pool_name, option)
# Step (9)
# Pool autostart
result = virsh.pool_autostart(pool_name, ignore_status=True)
check_exit_status(result)
# Step (10)
# Pool list
option = "--autostart --type %s" % pool_type
check_pool_list(pool_name, option)
# Step (11)
# Restart libvirtd and check the autostart pool
utils_libvirtd.libvirtd_restart()
option = "--autostart --persistent"
check_pool_list(pool_name, option)
# Step (12)
# Pool destroy
if virsh.pool_destroy(pool_name):
logging.debug("Pool %s destroyed.", pool_name)
else:
raise error.TestFail("Destroy pool % failed." % pool_name)
# Step (13)
# Pool autostart disable
result = virsh.pool_autostart(pool_name, "--disable", ignore_status=True)
check_exit_status(result)
# Step (14)
# Repeat step (11)
utils_libvirtd.libvirtd_restart()
option = "--autostart"
check_pool_list(pool_name, option, True)
# Step (15)
# Pool start
# When libvirtd starts up, it'll check to see if any of the storage
# pools have been activated externally. If so, then it'll mark the
# pool as active. This is independent of autostart.
# So a directory based storage pool is thus pretty much always active,
# and so as the SCSI pool.
if pool_type != "scsi" and (pool_type != "dir" or libvirt_version.version_compare(1, 2, 15)):
result = virsh.pool_start(pool_name, ignore_status=True)
check_exit_status(result)
# Step (16)
# Pool info
pool_info = _pool.pool_info(pool_name)
logging.debug("Pool '%s' info:\n%s", pool_name, pool_info)
# Step (17)
示例15: run
def run(test, params, env):
"""
Test command: virsh pool-create.
Create a libvirt pool from an XML file. The file could be given by tester or
generated by dumpxml a pre-defined pool.
"""
pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
option = params.get("pool_create_extra_option", "")
readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no")
status_error = "yes" == params.get("status_error", "no")
pre_def_pool = "yes" == params.get("pre_def_pool", "no")
pool_type = params.get("pool_type", "dir")
source_format = params.get("pool_src_format", "")
source_name = params.get("pool_source_name", "")
source_path = params.get("pool_source_path", "/")
pool_target = params.get("pool_target", "pool_target")
duplicate_element = params.get("pool_create_duplicate_element", "")
new_pool_name = params.get("new_pool_create_name")
no_disk_label = "yes" == params.get("no_disk_label", "no")
if not libvirt_version.version_compare(1, 0, 0):
if pool_type == "gluster":
test.cancel("Gluster pool is not supported in current"
" libvirt version.")
if "/PATH/TO/POOL.XML" in pool_xml_f:
test.cancel("Please replace %s with valid pool xml file" %
pool_xml_f)
pool_ins = libvirt_storage.StoragePool()
if pre_def_pool and pool_ins.pool_exists(pool_name):
test.fail("Pool %s already exist" % pool_name)
emulated_image = "emulated-image"
kwargs = {'image_size': '1G', 'source_path': source_path,
'source_name': source_name, 'source_format': source_format}
pvt = utlv.PoolVolumeTest(test, params)
old_uuid = None
new_device_name = None
if pre_def_pool:
try:
pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
**kwargs)
virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
old_uuid = virsh.pool_uuid(pool_name).stdout.strip()
if no_disk_label:
# Update <device_path>
logging.debug("Try to update device path")
new_device_name = utlv.setup_or_cleanup_iscsi(True)
p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name)
s_xml = pool_xml.SourceXML()
s_xml.device_path = new_device_name
p_xml.set_source(s_xml)
pool_xml_f = p_xml.xml
if duplicate_element == "name":
pass
elif duplicate_element == "uuid":
pass
elif duplicate_element == "source":
# Remove <uuid> and update <name>
cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f
process.run(cmd, shell=True)
cmd = "sed -i 's/<name>.*<\/name>/<name>%s<\/name>/g' %s" % (new_pool_name, pool_xml_f)
process.run(cmd, shell=True)
else:
# The transient pool will gone after destroyed
virsh.pool_destroy(pool_name)
new_source_format = params.get("new_pool_src_format")
if new_source_format:
cmd = "sed -i s/type=\\\'%s\\\'/type=\\\'%s\\\'/g %s" % (
source_format, new_source_format, pool_xml_f)
process.run(cmd, shell=True)
# Remove uuid
cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f
process.run(cmd, shell=True)
except Exception as details:
pvt.cleanup_pool(pool_name, pool_type, pool_target,
emulated_image, **kwargs)
if new_device_name:
utlv.setup_or_cleanup_iscsi(False)
test.error("Error occurred when prepare pool xml:\n %s"
% details)
# Create an invalid pool xml file
if pool_xml_f == "invalid-pool-xml":
tmp_xml_f = xml_utils.TempXMLFile()
tmp_xml_f.write('"<pool><<<BAD>>><\'XML</name\>'
'[email protected]#$%^&*)>(}>}{CORRUPTE|>!</pool>')
tmp_xml_f.flush()
pool_xml_f = tmp_xml_f.name
# Readonly mode
ro_flag = False
if readonly_mode:
logging.debug("Readonly mode test")
ro_flag = True
# Run virsh test
if os.path.exists(pool_xml_f):
with open(pool_xml_f, 'r') as f:
logging.debug("Create pool from file:\n %s", f.read())
try:
#.........这里部分代码省略.........