本文整理汇总了Python中virttest.virsh.remove_domain函数的典型用法代码示例。如果您正苦于以下问题:Python remove_domain函数的具体用法?Python remove_domain怎么用?Python remove_domain使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了remove_domain函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: recover
def recover(self, params=None):
"""
Recover test environment
"""
if self.selinux_enforcing:
utils_selinux.set_status("enforcing")
if virsh.domain_exists(self.vm_name):
virsh.remove_domain(self.vm_name)
image_file = params.get("image_name")
if os.path.exists(image_file):
os.remove(image_file)
if os.path.isdir(self.image_path):
os.rmdir(self.image_path)
self.env.unregister_vm(self.vm_name)
示例2: run
def run(test, params, env):
"""
Test svirt in virt-install.
(1). Init variables.
(2). Set selinux on host.
(3). Set label of image.
(4). run unattended install.
(5). clean up.
"""
# Get general variables.
status_error = ('yes' == params.get("status_error", 'no'))
host_sestatus = params.get("host_selinux", "enforcing")
# Set selinux status on host.
backup_sestatus = utils_selinux.get_status()
utils_selinux.set_status(host_sestatus)
# Set the image label.
disk_label = params.get("disk_label", None)
vm_name = params.get("main_vm", None)
vm_params = params.object_params(vm_name)
base_dir = params.get("images_base_dir", data_dir.get_data_dir())
image_filename = storage.get_image_filename(vm_params, base_dir)
utils_selinux.set_context_of_file(image_filename, disk_label)
try:
try:
unattended_install.run(test, params, env)
# Install completed.
if status_error:
raise error.TestFail('Test successed in negative case.')
except error.CmdError, e:
# Install failed.
if not status_error:
raise error.TestFail("Test failed in positive case."
"error: %s" % e)
finally:
# cleanup
utils_selinux.set_status(backup_sestatus)
if virsh.domain_exists(vm_name):
virsh.remove_domain(vm_name)
示例3: result_confirm
def result_confirm(self, params):
"""
Confirm if VM installation is succeed
"""
if self.twice_execute and self.kill_first:
get_pid_cmd = "ps -ef | grep '%s' | grep qemu-kvm | grep -v grep"\
% self.vm_name
result = process.run(get_pid_cmd, ignore_status=True, shell=True)
if result.exit_status:
self.test.fail("First install failed!")
install_pid = result.stdout_text.strip().split()[1]
utils_misc.safe_kill(int(install_pid), signal.SIGKILL)
self.td.join()
if self.read_only:
if virsh.domain_exists(self.vm_name):
self.test.fail("Domain '%s' should not exist"
% self.vm_name)
os.chmod(self.image_path,
stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
else:
if not virsh.domain_exists(self.vm_name):
self.test.fail("Domain '%s' should exists, no matter its"
" installation is succeed or failed!"
% self.vm_name)
else:
if not self.kill_first:
if self.vm.is_dead():
self.vm.start()
try:
self.vm.wait_for_login()
except remote.LoginTimeoutError as detail:
self.test.fail(str(detail))
else:
virsh.remove_domain(self.vm_name)
if self.twice_execute or self.read_only:
self.td1 = threading.Thread(target=unattended_install.run,
args=(self.test, params, self.env))
self.td1.start()
self.td1.join()
if not virsh.domain_exists(self.vm_name):
self.test.fail("Domain '%s' installation failed!"
% self.vm_name)
示例4: vm_undefine_check
elif test_undefine:
vm_undefine_check(vm_name)
elif autostart_bypass_cache:
libvirtd.stop()
virsh_cmd = ("(service libvirtd start)")
check_flags_parallel(virsh_cmd, bash_cmd %
(managed_save_file, managed_save_file,
"0"), flags)
elif test_loop_cmd:
loop_range = params.get("loop_range", "20")
vm_managedsave_loop(vm_name, loop_range, libvirtd)
else:
vm_recover_check(option, libvirtd, check_shutdown)
finally:
# Restore test environment.
# Restart libvirtd.service
qemu_config.restore()
libvirt_guests_config.restore()
libvirtd.restart()
if autostart_bypass_cache:
virsh.autostart(vm_name, "--disable",
ignore_status=True, debug=True)
vm.destroy(gracefully=False)
virsh.managedsave_remove(vm_name, debug=True)
vmxml_backup.sync()
if multi_guests:
for i in range(int(multi_guests)):
virsh.remove_domain("%s_%s" % (vm_name, i),
"--remove-all-storage",
debug=True)
示例5: run
def run(test, params, env):
"""
Test for virt-clone, it is used to clone a guest
to another with given name.
(1). Get source guest and destination guest name.
(2). Clone source guest to dest guest with virt-clone.
(3). Check the dest guest.
"""
# Get the full path of virt-clone command.
VIRT_CLONE = os_dep.command("virt-clone")
vm_name = params.get("main_vm", "virt-tests-vm1")
vm = env.get_vm(vm_name)
dest_guest_name = params.get("dest_vm", "test-clone")
dest_guest_file = params.get("dest_image")
dest_guest_path = None
# Destroy and undefine the test-clone guest before
# executing virt-clone command.
virsh.remove_domain(dest_guest_name)
cmd = "%s --connect=%s -o %s -n %s" % (VIRT_CLONE,
vm.connect_uri,
vm_name,
dest_guest_name)
domblklist_result = virsh.domblklist(vm_name)
if len(domblklist_result.stdout.strip().splitlines()) >= 3:
# We need a file for destination if guest has block devices.
dest_guest_path = os.path.join(data_dir.get_data_dir(),
dest_guest_file)
if os.path.exists(dest_guest_path):
os.remove(dest_guest_path)
cmd = "%s -f %s" % (cmd, dest_guest_path)
try:
cmd_result = utils.run(cmd, ignore_status=True)
if cmd_result.exit_status:
raise error.TestFail("command of virt-clone failed.\n"
"output: %s." % cmd_result)
start_result = None
# We will get an error of "error: monitor socket did not show up:"
# when start vm immediately after virt-clone.
def _start_success():
start_result = virsh.start(dest_guest_name)
if start_result.exit_status:
return False
return True
if not utils_misc.wait_for(_start_success, timeout=5):
raise error.TestFail("command virt-clone exit successfully.\n"
"but start it failed.\n Detail: %s." %
start_result)
finally:
# cleanup remove the dest guest.
virsh.remove_domain(dest_guest_name)
# remove image file if we created it.
if dest_guest_path and os.path.exists(dest_guest_path):
os.remove(dest_guest_path)
示例6: run
#.........这里部分代码省略.........
# Check dnsmasq settings if take affect in guest
if guest_ipv4:
check_name_ip(session)
# Run bandwidth test for interface
if test_qos_bandwidth:
run_bandwidth_test(check_iface=True)
# Run bandwidth test for portgroup
if test_pg_bandwidth:
pg_bandwidth_inbound = params.get(
"portgroup_bandwidth_inbound", "").split()
pg_bandwidth_outbound = params.get(
"portgroup_bandwidth_outbound", "").split()
pg_name = params.get("portgroup_name", "").split()
pg_default = params.get("portgroup_default", "").split()
iface_inbound = ast.literal_eval(iface_bandwidth_inbound)
iface_outbound = ast.literal_eval(iface_bandwidth_outbound)
iface_name = libvirt.get_ifname_host(vm_name, iface_mac)
if_source = ast.literal_eval(iface_source)
if "portgroup" in if_source:
pg = if_source["portgroup"]
else:
pg = "default"
for (name, df, bw_ib, bw_ob) in zip(pg_name, pg_default,
pg_bandwidth_inbound,
pg_bandwidth_outbound):
if pg == name:
inbound = ast.literal_eval(bw_ib)
outbound = ast.literal_eval(bw_ob)
elif pg == "default" and df == "yes":
inbound = ast.literal_eval(bw_ib)
outbound = ast.literal_eval(bw_ob)
else:
continue
# Interface bandwidth settings will
# overwriting portgroup settings
if iface_inbound:
inbound = iface_inbound
if iface_outbound:
outbound = iface_outbound
check_class_rules(iface_name, "1:1", inbound)
check_filter_rules(iface_name, outbound)
if test_qos_remove:
# Remove the bandwidth settings in network xml
logging.debug("Removing network bandwidth settings...")
netxml_backup.sync()
vm.destroy(gracefully=False)
# Should fail to start vm
vm.start()
if restart_error:
test.fail("VM started unexpectedly")
if test_ipv6_address:
ipt6_rules = check_ipt_rules(check_ipv4=False, check_ipv6=True)
if not ("mode" in forward and forward["mode"] == "open"):
run_ip_test(session, "ipv6")
if test_ipv4_address:
ipt_rules = check_ipt_rules(check_ipv4=True)
if not ("mode" in forward and forward["mode"] == "open"):
run_ip_test(session, "ipv4")
if test_guest_libvirt:
run_guest_libvirt(session)
session.close()
except virt_vm.VMStartError as details:
logging.info(str(details))
if not (start_error or restart_error):
test.fail('VM failed to start:\n%s' % details)
# Destroy created network and check iptable rules
if net_name != "default":
virsh.net_destroy(net_name)
if ipt_rules:
output_des = to_text(process.system_output('iptables-save'))
for ipt in ipt_rules:
if re.search(r"%s" % ipt, output_des, re.M):
test.fail("Find iptable rule %s after net destroyed" % ipt)
if ipt6_rules:
output_des = to_text(process.system_output('ip6tables-save'))
for ipt in ipt6_rules:
if re.search(r"%s" % ipt, output_des, re.M):
test.fail("Find ip6table rule %s after net destroyed" % ipt)
finally:
# Recover VM.
if vm.is_alive():
vm.destroy(gracefully=False)
for vms in vms_list:
virsh.remove_domain(vms.name, "--remove-all-storage")
logging.info("Restoring network...")
if net_name == "default":
netxml_backup.sync()
else:
# Destroy and undefine new created network
virsh.net_destroy(net_name)
virsh.net_undefine(net_name)
vmxml_backup.sync()
if test_ipv6_address and original_accept_ra != '2':
process.system(sysctl_cmd + "=%s" % original_accept_ra)
示例7: run
def run(test, params, env):
"""
Test svirt in virt-install.
(1). Init variables.
(2). Set selinux on host.
(3). Set label of image.
(4). run a virt-install command.
(5). clean up.
As this test only care whether the qemu-kvm process
can access the image. It is not necessary to install
a full os in a vm. Just verify the vm is alive after
virt-install command is enough. Then we can save a lot
of time and make this test independent from unattended_install.
"""
# Get general variables.
status_error = "yes" == params.get("status_error", "no")
host_sestatus = params.get("host_selinux", "enforcing")
# Get variables about seclabel for VM.
sec_type = params.get("svirt_install_vm_sec_type", "dynamic")
sec_model = params.get("svirt_install_vm_sec_model", "selinux")
sec_label = params.get("svirt_install_vm_sec_label", None)
sec_relabel = params.get("svirt_install_vm_sec_relabel", "yes")
# Set selinux status on host.
backup_sestatus = utils_selinux.get_status()
utils_selinux.set_status(host_sestatus)
# Set the image label.
disk_label = params.get("svirt_install_disk_label", None)
vm_name = params.get("main_vm", None)
# svirt will prevent accessing via a symble link.
data_path = data_dir.get_data_dir()
real_data_path = os.path.realpath(data_path)
image_path = os.path.join(real_data_path, "svirt_image")
if virsh.domain_exists(vm_name):
virsh.remove_domain(vm_name)
if not os.path.exists(image_path):
utils_test.libvirt.create_local_disk("file", path=image_path)
try:
utils_selinux.set_context_of_file(image_path, disk_label)
cmd = "virt-install --name %s --import --disk path=%s --ram '1024' " % (vm_name, image_path)
cmd += " --security"
if sec_type == "static":
if sec_label is None:
raise ValueError("Seclabel is not setted for static.")
cmd += " type=static,label=%s" % (sec_label)
elif sec_type == "dynamic":
cmd += " type=dynamic"
else:
raise ValueError("Security type %s is not supported." % sec_type)
if sec_relabel is not None:
cmd += ",relabel=%s" % sec_relabel
cmd += "&"
utils.run(cmd, ignore_status=True)
def _vm_alive():
return virsh.is_alive(vm_name)
if utils_misc.wait_for(_vm_alive, timeout=5):
if status_error:
raise error.TestFail("Test succeeded in negative case.")
else:
if not status_error:
raise error.TestFail("Test failed in positive case.")
finally:
# cleanup
utils_selinux.set_status(backup_sestatus)
if virsh.domain_exists(vm_name):
virsh.remove_domain(vm_name)
if not os.path.exists(image_path):
utils_test.libvirt.delete_local_disk("file", path=image_path)
示例8: vm_recover_check
else:
vm_recover_check(option, libvirtd, check_shutdown)
finally:
# Restore test environment.
# Ensure libvirtd is started
if not libvirtd.is_running():
libvirtd.start()
if vm.is_paused():
virsh.resume(vm_name)
elif vm.is_dead():
vm.start()
# Wait for VM in running state
wait_for_state("running")
if autostart_bypass_cache:
virsh.autostart(vm_name, "--disable",
ignore_status=True)
if vm.is_alive():
vm.destroy(gracefully=False)
# Wait for VM to be in shut off state
utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
virsh.managedsave_remove(vm_name)
vmxml_backup.sync()
if multi_guests:
for i in range(int(multi_guests)):
virsh.remove_domain("%s_%s" % (vm_name, i),
"--remove-all-storage")
qemu_config.restore()
libvirt_guests_config.restore()
libvirtd.restart()
示例9: str
test.fail("VM failed to start."
"Error: %s" % str(details))
finally:
# Delete snapshots.
snapshot_lists = virsh.snapshot_list(vm_name)
if len(snapshot_lists) > 0:
libvirt.clean_up_snapshots(vm_name, snapshot_lists)
for snap in snapshot_lists:
virsh.snapshot_delete(vm_name, snap, "--metadata")
# Recover VM.
if vm.is_alive():
vm.destroy(gracefully=False)
if additional_guest:
virsh.remove_domain(guest_name,
"--remove-all-storage",
ignore_stauts=True)
# Remove the snapshot.
if create_snapshot:
cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
" purge {2} && rbd -m {0} {1} rm {2}"
"".format(mon_host, key_opt, disk_src_name))
process.run(cmd, ignore_status=True, shell=True)
elif create_volume:
cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
"".format(mon_host, key_opt, os.path.join(disk_src_pool, cloned_vol_name)))
process.run(cmd, ignore_status=True, shell=True)
cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
"".format(mon_host, key_opt, os.path.join(disk_src_pool, create_from_cloned_volume)))
process.run(cmd, ignore_status=True, shell=True)
clean_up_volume_snapshots()
示例10: run
#.........这里部分代码省略.........
block_device = params.get("block_device", "/DEV/EXAMPLE")
if application == "install":
cdrom_path = os.path.join(data_dir.get_data_dir(),
params.get("cdrom_cd1"))
if not os.path.exists(cdrom_path):
raise error.TestNAError("Can't find installation cdrom:%s"
% cdrom_path)
# Get a nonexist domain name
vm_name = "vol_install_test"
try:
pvtest = utlv.PoolVolumeTest(test, params)
pvtest.pre_pool(pool_name, pool_type, pool_target, emulated_img,
image_size=emulated_size, pre_disk_vol=[volume_size],
device_name=block_device)
logging.debug("Current pools:\n%s",
libvirt_storage.StoragePool().list_pools())
new_pool = libvirt_storage.PoolVolume(pool_name)
if pool_type == "disk":
volumes = new_pool.list_volumes()
logging.debug("Current volumes:%s", volumes)
else:
volumes = create_volumes(new_pool, volume_count, volume_size)
if application == "attach":
vm = env.get_vm(vm_name)
session = vm.wait_for_login()
virsh.attach_disk(vm_name, volumes.values()[volume_count - 1],
disk_target)
vm_attach_device = "/dev/%s" % disk_target
if session.cmd_status("which parted"):
# No parted command, check device only
if session.cmd_status("ls %s" % vm_attach_device):
raise error.TestFail("Didn't find attached device:%s"
% vm_attach_device)
return
# Test if attached disk can be used normally
utlv.mk_part(vm_attach_device, session=session)
session.cmd("mkfs.ext4 %s1" % vm_attach_device)
session.cmd("mount %s1 /mnt" % vm_attach_device)
session.cmd("echo %s > /mnt/test" % test_message)
output = session.cmd_output("cat /mnt/test").strip()
if output != test_message:
raise error.TestFail("%s cannot be used normally!"
% vm_attach_device)
elif application == "install":
# Get a nonexist domain name anyway
while virsh.domain_exists(vm_name):
vm_name += "_test"
# Prepare installation parameters
params["main_vm"] = vm_name
vm = env.create_vm("libvirt", None, vm_name, params,
test.bindir)
env.register_vm(vm_name, vm)
params["image_name"] = volumes.values()[volume_count - 1]
params["image_format"] = "raw"
params['force_create_image'] = "yes"
params['remove_image'] = "yes"
params['shutdown_cleanly'] = "yes"
params['shutdown_cleanly_timeout'] = 120
params['guest_port_unattended_install'] = 12323
params['inactivity_watcher'] = "error"
params['inactivity_treshold'] = 1800
params['image_verify_bootable'] = "no"
params['unattended_delivery_method'] = "cdrom"
params['drive_index_unattended'] = 1
params['drive_index_cd1'] = 2
params['boot_once'] = "d"
params['medium'] = "cdrom"
params['wait_no_ack'] = "yes"
params['image_raw_device'] = "yes"
params['backup_image_before_testing'] = "no"
params['kernel_params'] = ("ks=cdrom nicdelay=60 "
"console=ttyS0,115200 console=tty0")
params['cdroms'] = "unattended cd1"
params['redirs'] += " unattended_install"
selinux_mode = None
try:
selinux_mode = utils_selinux.get_status()
utils_selinux.set_status("permissive")
try:
unattended_install.run(test, params, env)
except process.CmdError, detail:
raise error.TestFail("Guest install failed:%s" % detail)
finally:
if selinux_mode is not None:
utils_selinux.set_status(selinux_mode)
env.unregister_vm(vm_name)
finally:
try:
if application == "install":
if virsh.domain_exists(vm_name):
virsh.remove_domain(vm_name)
elif application == "attach":
virsh.detach_disk(vm_name, disk_target)
finally:
pvtest.cleanup_pool(pool_name, pool_type,
pool_target, emulated_img,
device_name=block_device)
示例11: run
#.........这里部分代码省略.........
# Prepare step for different hypervisor
if hypervisor == "esx":
source_ip = vpx_ip
source_pwd = vpx_pwd
# Create password file to access ESX hypervisor
with open(vpx_pwd_file, 'w') as f:
f.write(vpx_pwd)
elif hypervisor == "xen":
source_ip = xen_ip
source_pwd = xen_pwd
# Set up ssh access using ssh-agent and authorized_keys
ssh_key.setup_ssh_key(source_ip, source_user, source_pwd)
try:
utils_misc.add_identities_into_ssh_agent()
except:
process.run("ssh-agent -k")
raise exceptions.TestError("Fail to setup ssh-agent")
else:
raise exceptions.TestSkipError("Unspported hypervisor: %s" % hypervisor)
# Create libvirt URI for the source node
v2v_uri = utils_v2v.Uri(hypervisor)
remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip)
logging.debug("Remote host uri for converting: %s", remote_uri)
# Make sure the VM exist before convert
virsh_dargs = {'uri': remote_uri, 'remote_ip': source_ip,
'remote_user': source_user, 'remote_pwd': source_pwd,
'debug': True}
remote_virsh = virsh.VirshPersistent(**virsh_dargs)
try:
if not remote_virsh.domain_exists(vm_name):
raise exceptions.TestError("VM '%s' not exist" % vm_name)
finally:
remote_virsh.close_session()
# Prepare libvirt storage pool
pool_type = params.get("pool_type")
pool_name = params.get("pool_name")
target_path = params.get("target_path")
libvirt_pool = utlv.PoolVolumeTest(test, params)
libvirt_pool.pre_pool(pool_name, pool_type, target_path, '')
# Preapre libvirt virtual network
network = params.get("network")
net_kwargs = {'net_name': network,
'address': params.get('network_addr'),
'dhcp_start': params.get('network_dhcp_start'),
'dhcp_end': params.get('network_dhcp_end')}
libvirt_net = utlv.LibvirtNetwork('vnet', **net_kwargs)
net_info = virsh.net_info(network).stdout.strip()
bridge = re.search(r'Bridge:\s+(\S+)', net_info).group(1)
# Maintain a single params for v2v to avoid duplicate parameters
v2v_params = {'target': target, 'hypervisor': hypervisor,
'main_vm': vm_name, 'input_mode': input_mode,
'network': network, 'bridge': bridge,
'storage': pool_name, 'hostname': source_ip}
if vpx_dc:
v2v_params.update({"vpx_dc": vpx_dc})
if esx_ip:
v2v_params.update({"esx_ip": esx_ip})
if v2v_opts:
v2v_params.update({"v2v_opts": v2v_opts})
# Set libguestfs environment
os.environ['LIBGUESTFS_BACKEND'] = 'direct'
try:
# Execute virt-v2v command
ret = utils_v2v.v2v_cmd(v2v_params)
logging.debug("virt-v2v verbose messages:\n%s", ret)
if ret.exit_status != 0:
raise exceptions.TestFail("Convert VM failed")
logging.debug("XML info:\n%s", virsh.dumpxml(vm_name))
vm = env.create_vm("libvirt", "libvirt", vm_name, params, test.bindir)
vm.start()
# Check all checkpoints after convert
vmchecker = VMChecker(test, params, env)
ret = vmchecker.run()
vmchecker.cleanup()
if ret == 0:
logging.info("All checkpoints passed")
else:
raise exceptions.TestFail("%s checkpoints failed" % ret)
finally:
if hypervisor == "esx":
os.remove(vpx_pwd_file)
if hypervisor == "xen":
process.run("ssh-agent -k")
# Clean libvirt VM
virsh.remove_domain(vm_name)
# Clean libvirt pool
if libvirt_pool:
libvirt_pool.cleanup_pool(pool_name, pool_type, target_path, '')
# Clean libvirt network
if libvirt_net:
libvirt_net.cleanup()
示例12: run
#.........这里部分代码省略.........
elif vm_ref == "name":
vm_ref = vm_name
# Ignore exception with "ignore_status=True"
if progress:
option += " --verbose"
option += extra_param
# For bypass_cache test. Run a shell command to check fd flags while
# excuting managedsave command
software_mgr = software_manager.SoftwareManager()
if not software_mgr.check_installed('lsof'):
logging.info('Installing lsof package:')
software_mgr.install('lsof')
bash_cmd = ("let i=1; while((i++<400)); do if [ -e %s ]; then (cat /proc"
"/$(lsof -w %s|awk '/libvirt_i/{print $2}')/fdinfo/*%s* |"
"grep 'flags:.*') && break; else sleep 0.05; fi; done;")
# Flags to check bypass cache take effect
flags = os.O_DIRECT
if test_bypass_cache:
# Drop caches.
drop_caches()
virsh_cmd = "virsh managedsave %s %s" % (option, vm_name)
check_flags_parallel(virsh_cmd, bash_cmd %
(managed_save_file, managed_save_file,
"1"), flags)
# Wait for VM in shut off state
wait_for_state("shut off")
virsh_cmd = "virsh start %s %s" % (option, vm_name)
check_flags_parallel(virsh_cmd, bash_cmd %
(managed_save_file, managed_save_file,
"0"), flags)
# Wait for VM in running state
wait_for_state("running")
elif test_libvirt_guests:
logging.debug("libvirt-guests status: %s", libvirt_guests.status())
if multi_guests:
check_multi_guests(multi_guests,
start_delay, libvirt_guests)
if check_flags:
check_guest_flags(bash_cmd, flags)
else:
# Ensure VM is running
utils_misc.wait_for(lambda: vm.state() == "running", 10)
ret = virsh.managedsave(vm_ref, options=option,
ignore_status=True, debug=True)
status = ret.exit_status
# The progress information outputed in error message
error_msg = ret.stderr.strip()
if move_saved_file:
cmd = "echo > %s" % managed_save_file
process.run(cmd, shell=True)
# recover libvirtd service start
if libvirtd_state == "off":
libvirtd.start()
if status_error:
if not status:
test.fail("Run successfully with wrong command!")
else:
if status:
test.fail("Run failed with right command")
if progress:
if not error_msg.count("Managedsave:"):
test.fail("Got invalid progress output")
if remove_after_cmd:
vm_msave_remove_check(vm_name)
elif test_undefine:
vm_undefine_check(vm_name)
elif autostart_bypass_cache:
libvirtd.stop()
virsh_cmd = ("(service libvirtd start)")
check_flags_parallel(virsh_cmd, bash_cmd %
(managed_save_file, managed_save_file,
"0"), flags)
elif test_loop_cmd:
loop_range = params.get("loop_range", "20")
vm_managedsave_loop(vm_name, loop_range, libvirtd)
else:
vm_recover_check(option, libvirtd, check_shutdown)
finally:
# Restore test environment.
# Restart libvirtd.service
qemu_config.restore()
libvirt_guests_config.restore()
libvirtd.restart()
if autostart_bypass_cache:
virsh.autostart(vm_name, "--disable",
ignore_status=True, debug=True)
vm.destroy(gracefully=False)
virsh.managedsave_remove(vm_name, debug=True)
vmxml_backup.sync()
if multi_guests:
for i in range(int(multi_guests)):
virsh.remove_domain("%s_%s" % (vm_name, i),
"--remove-all-storage",
debug=True)
示例13: run
#.........这里部分代码省略.........
(i, i, install_root), shell=True)
# Config basic network
net_file = install_root + '/etc/sysconfig/network'
with open(net_file, 'w') as f:
f.write('NETWORKING=yes\nHOSTNAME=%s\n' % vm_name)
net_script = install_root + '/etc/sysconfig/network-scripts/ifcfg-eth0'
with open(net_script, 'w') as f:
f.write('DEVICE=eth0\nBOOTPROTO=dhcp\nONBOOT=yes\n')
# Set root password and enable sshd
session = aexpect.ShellSession("chroot %s" % install_root)
session.sendline('echo %s|passwd root --stdin' % passwd)
session.sendline('chkconfig sshd on')
session.close()
# Create
result = virsh.create(vmxml.xml, **virsh_args)
utlv.check_exit_status(result)
check_state('running')
# Destroy
result = virsh.destroy(vm_name, **virsh_args)
utlv.check_exit_status(result)
if not virsh.domain_exists(vm_name, **virsh_args):
logging.info("Destroy transient LXC domain successfully")
else:
raise TestFail("Transient LXC domain still exist after destroy")
# Define
result = virsh.define(vmxml.xml, **virsh_args)
utlv.check_exit_status(result)
check_state('shut off')
# List
result = virsh.dom_list('--inactive', **virsh_args)
utlv.check_exit_status(result)
if re.findall("(%s)\s+shut off" % vm_name, result.stdout):
logging.info("Find %s in virsh list output", vm_name)
else:
raise TestFail("Not find %s in virsh list output")
# Dumpxml
result = virsh.dumpxml(vm_name, uri=uri, debug=False)
utlv.check_exit_status(result)
# Edit
edit_vcpu = '2'
logging.info("Change vcpu of LXC container to %s", edit_vcpu)
edit_cmd = [r":%s /[0-9]*<\/vcpu>/" + edit_vcpu + r"<\/vcpu>"]
if not utlv.exec_virsh_edit(vm_name, edit_cmd, connect_uri=uri):
raise TestFail("Run edit command fail")
else:
result = virsh.dumpxml(vm_name, **virsh_args)
new_vcpu = re.search(r'(\d*)</vcpu>', result.stdout).group(1)
if new_vcpu == edit_vcpu:
logging.info("vcpu number is expected after do edit")
else:
raise TestFail("vcpu number is unexpected after do edit")
# Start
result = virsh.start(vm_name, **virsh_args)
utlv.check_exit_status(result)
check_state('running')
# Suspend
result = virsh.suspend(vm_name, **virsh_args)
utlv.check_exit_status(result)
check_state('paused')
# Resume
result = virsh.resume(vm_name, **virsh_args)
utlv.check_exit_status(result)
check_state('running')
# Reboot(not supported on RHEL6)
result = virsh.reboot(vm_name, **virsh_args)
supported_err = 'not supported by the connection driver: virDomainReboot'
if supported_err in result.stderr.strip():
logging.info("Reboot is not supported")
else:
utlv.check_exit_status(result)
# Destroy
result = virsh.destroy(vm_name, **virsh_args)
utlv.check_exit_status(result)
check_state('shut off')
# Undefine
result = virsh.undefine(vm_name, **virsh_args)
utlv.check_exit_status(result)
if not virsh.domain_exists(vm_name, **virsh_args):
logging.info("Undefine LXC domain successfully")
else:
raise TestFail("LXC domain still exist after undefine")
finally:
virsh.remove_domain(vm_name, **virsh_args)
if full_os and os.path.exists(install_root):
shutil.rmtree(install_root)
示例14: run
def run(test, params, env):
"""
Test storage pool and volumes with applications such as:
install vms, attached to vms...
"""
pool_type = params.get("pool_type")
pool_name = "test_%s_app" % pool_type
pool_target = params.get("pool_target")
emulated_img = params.get("emulated_img", "emulated_img")
volume_count = int(params.get("volume_count", 1))
volume_size = params.get("volume_size", "1G")
emulated_size = "%sG" % (volume_count * int(volume_size[:-1]) + 1)
application = params.get("application", "install")
disk_target = params.get("disk_target", "vdb")
test_message = params.get("test_message", "")
vm_name = params.get("main_vm", "virt-tests-vm1")
if application == "install":
vm_name = params.get("vm_name", "vm1")
try:
pvtest = utlv.PoolVolumeTest(test, params)
pvtest.pre_pool(pool_name, pool_type, pool_target, emulated_img,
emulated_size)
logging.debug("Current pools:\n%s",
libvirt_storage.StoragePool().list_pools())
new_pool = libvirt_storage.PoolVolume(pool_name)
volumes = create_volumes(new_pool, volume_count, volume_size)
if application == "attach":
vm = env.get_vm(vm_name)
session = vm.wait_for_login()
# The attach-disk action based on running guest,
# so no need to recover the guest, it will be
# recovered automatically after shutdown/reboot
virsh.attach_disk(vm_name, volumes.values()[volume_count-1],
disk_target)
vm_attach_device = "/dev/%s" % disk_target
# Test if attached disk can be used normally
utlv.mk_part(vm_attach_device, session=session)
session.cmd("mkfs.ext4 %s1" % vm_attach_device)
session.cmd("mount %s1 /mnt" % vm_attach_device)
session.cmd("echo %s > /mnt/test" % test_message)
output = session.cmd_output("cat /mnt/test").strip()
if output != test_message:
raise error.TestFail("%s cannot be used normally!"
% vm_attach_device)
elif application == "install":
# Get a nonexist domain name
while virsh.domain_exists(vm_name):
vm_name += "_test"
params["image_name"] = volumes.values()[volume_count-1]
params["image_format"] = "raw"
try:
unattended_install.run(test, params, env)
except error.CmdError, detail:
raise error.TestFail("Guest install failed:%s" % detail)
finally:
try:
if application == "install":
if virsh.domain_exists(vm_name):
virsh.remove_domain(vm_name)
finally:
pvtest.cleanup_pool(pool_name, pool_type,
pool_target, emulated_img)
示例15: run
#.........这里部分代码省略.........
# Detach the device.
if attach_device:
xml_file = libvirt.create_disk_xml(params)
ret = virsh.detach_device(vm_name, xml_file)
libvirt.check_exit_status(ret)
if additional_guest:
ret = virsh.detach_device(guest_name, xml_file)
libvirt.check_exit_status(ret)
elif attach_disk:
ret = virsh.detach_disk(vm_name, targetdev)
libvirt.check_exit_status(ret)
# Check disk in vm after detachment.
if attach_device or attach_disk:
session = vm.wait_for_login()
new_parts = libvirt.get_parts_list(session)
if len(new_parts) != len(old_parts):
test.fail("Disk still exists in vm"
" after detachment")
session.close()
except virt_vm.VMStartError as details:
for msg in unsupported_err:
if msg in str(details):
test.cancel(str(details))
else:
test.fail("VM failed to start."
"Error: %s" % str(details))
finally:
# Remove /etc/ceph/ceph.conf file if exists.
if os.path.exists('/etc/ceph/ceph.conf'):
os.remove('/etc/ceph/ceph.conf')
# Delete snapshots.
snapshot_lists = virsh.snapshot_list(vm_name)
if len(snapshot_lists) > 0:
libvirt.clean_up_snapshots(vm_name, snapshot_lists)
for snap in snapshot_lists:
virsh.snapshot_delete(vm_name, snap, "--metadata")
# Recover VM.
if vm.is_alive():
vm.destroy(gracefully=False)
if additional_guest:
virsh.remove_domain(guest_name,
"--remove-all-storage",
ignore_stauts=True)
# Remove the snapshot.
if create_snapshot:
cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
" purge {2} && rbd -m {0} {1} rm {2}"
"".format(mon_host, key_opt, disk_src_name))
process.run(cmd, ignore_status=True, shell=True)
elif create_volume:
cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
"".format(mon_host, key_opt, os.path.join(disk_src_pool, cloned_vol_name)))
process.run(cmd, ignore_status=True, shell=True)
cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
"".format(mon_host, key_opt, os.path.join(disk_src_pool, create_from_cloned_volume)))
process.run(cmd, ignore_status=True, shell=True)
clean_up_volume_snapshots()
else:
cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
"".format(mon_host, key_opt, disk_src_name))
process.run(cmd, ignore_status=True, shell=True)
# Delete tmp files.
if os.path.exists(key_file):
os.remove(key_file)
if os.path.exists(img_file):
os.remove(img_file)
# Clean up volume, pool
if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout):
virsh.vol_delete(vol_name, pool_name)
if pool_name and pool_name in virsh.pool_state_dict():
virsh.pool_destroy(pool_name, **virsh_dargs)
virsh.pool_undefine(pool_name, **virsh_dargs)
# Clean up secret
secret_list = get_secret_list()
if secret_list:
for secret_uuid in secret_list:
virsh.secret_undefine(secret_uuid)
logging.info("Restoring vm...")
vmxml_backup.sync()
if disk_snapshot_with_sanlock:
# Restore virt_use_sanlock setting.
process.run("setsebool -P virt_use_sanlock 0", shell=True)
# Restore qemu config
qemu_config.restore()
utils_libvirtd.Libvirtd().restart()
# Force shutdown sanlock service.
process.run("sanlock client shutdown -f 1", shell=True)
# Clean up lockspace folder
process.run("rm -rf /var/lib/libvirt/sanlock/*", shell=True)
if snapshot_path is not None:
for snapshot in snapshot_path:
if os.path.exists(snapshot):
os.remove(snapshot)