本文整理汇总了Python中virttest.virsh.managedsave函数的典型用法代码示例。如果您正苦于以下问题:Python managedsave函数的具体用法?Python managedsave怎么用?Python managedsave使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了managedsave函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: do_operation
def do_operation():
"""
Do operation in guest os with vf and check the os behavior after operation.
"""
if operation == "resume_suspend":
try:
virsh.suspend(vm.name, debug=True, ignore_status=False)
virsh.resume(vm.name, debug=True, ignore_statue=False)
get_ip_by_mac(mac_addr, timeout=120)
except process.CmdError as detail:
err_msg = "Suspend-Resume %s with vf failed: %s" % (vm_name, detail)
test.fail(err_msg)
if operation == "reboot":
try:
if vm.serial_console is not None:
vm.cleanup_serial_console()
vm.create_serial_console()
virsh.reboot(vm.name, ignore_status=False)
get_ip_by_mac(mac_addr, timeout=120)
except process.CmdError as detail:
err_msg = "Reboot %s with vf failed: %s" % (vm_name, detail)
test.fail(err_msg)
if operation == "save":
result = virsh.managedsave(vm_name, ignore_status=True, debug=True)
utils_test.libvirt.check_exit_status(result, expect_error=True)
示例2: vm_managedsave_loop
def vm_managedsave_loop(vm_name, loop_range, libvirtd):
"""
Run a loop of managedsave command and check its result.
"""
if vm.is_dead():
virsh.start(vm_name)
for i in range(int(loop_range)):
logging.debug("Test loop: %s" % i)
virsh.managedsave(vm_name)
virsh.start(vm_name)
# Check libvirtd status.
if not libvirtd.is_running():
raise error.TestFail("libvirtd is stopped after cmd")
# Check vm status.
if vm.state() != "running":
raise error.TestFail("Guest isn't in running state")
示例3: manipulate_domain
def manipulate_domain(vm_name, action, recover=False):
"""
Save/managedsave/S3/S4 domain or recover it.
"""
tmp_dir = data_dir.get_tmp_dir()
save_file = os.path.join(tmp_dir, vm_name + ".save")
if not recover:
if action == "save":
save_option = ""
result = virsh.save(vm_name, save_file, save_option,
ignore_status=True, debug=True)
libvirt.check_exit_status(result)
elif action == "managedsave":
managedsave_option = ""
result = virsh.managedsave(vm_name, managedsave_option,
ignore_status=True, debug=True)
libvirt.check_exit_status(result)
elif action == "s3":
suspend_target = "mem"
result = virsh.dompmsuspend(vm_name, suspend_target,
ignore_status=True, debug=True)
libvirt.check_exit_status(result)
elif action == "s4":
suspend_target = "disk"
result = virsh.dompmsuspend(vm_name, suspend_target,
ignore_status=True, debug=True)
libvirt.check_exit_status(result)
# Wait domain state change: 'in shutdown' -> 'shut off'
utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
else:
logging.debug("No operation for the domain")
else:
if action == "save":
if os.path.exists(save_file):
result = virsh.restore(save_file, ignore_status=True, debug=True)
libvirt.check_exit_status(result)
os.remove(save_file)
else:
raise error.TestError("No save file for domain restore")
elif action in ["managedsave", "s4"]:
result = virsh.start(vm_name, ignore_status=True, debug=True)
libvirt.check_exit_status(result)
elif action == "s3":
suspend_target = "mem"
result = virsh.dompmwakeup(vm_name, ignore_status=True, debug=True)
libvirt.check_exit_status(result)
else:
logging.debug("No need recover the domain")
示例4: managedsave_hook
def managedsave_hook():
"""
Do managedsave operation and check the results.
"""
hook_para = "%s %s" % (hook_file, vm_name)
save_file = os.path.join(test.tmpdir,
"%s.save" % vm_name)
disk_src = vm.get_first_disk_devices()['source']
if domainxml_test:
disk_dist = "/tmp/%s.move" % vm_name
shutil.copy(disk_src, disk_dist)
script = (hook_script %
(vm_name, disk_src, disk_dist))
prepare_hook_file(script)
elif basic_test:
prepare_hook_file(hook_script %
(vm_name, hook_log))
ret = virsh.managedsave(vm_name, **virsh_dargs)
libvirt.check_exit_status(ret)
if domainxml_test:
disk_src_save = vm.get_first_disk_devices()['source']
if disk_src != disk_src_save:
raise error.TestFail("Failed to check hooks for"
" managedsave operation")
vm.start()
if os.path.exists(save_file):
os.remove(save_file)
if domainxml_test:
disk_src_restore = vm.get_first_disk_devices()['source']
if disk_dist != disk_src_restore:
raise error.TestFail("Failed to check hooks for"
" managedsave operation")
vm.destroy()
if os.path.exists(disk_dist):
os.remove(disk_dist)
vmxml_backup.sync()
if basic_test:
hook_str = hook_para + " restore begin -"
if not check_hooks(hook_str):
raise error.TestFail("Failed to check "
"managedsave hooks.")
示例5: run
def run(test, params, env):
"""
Test command: virsh dompmsuspend <domain> <target>
The command suspends a running domain using guest OS's power management.
"""
# MAIN TEST CODE ###
# Process cartesian parameters
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
vm_state = params.get("vm_state", "running")
suspend_target = params.get("pm_suspend_target", "mem")
pm_enabled = params.get("pm_enabled", "not_set")
pm_enabled_disk = params.get("pm_enabled_disk", "no")
pm_enabled_mem = params.get("pm_enabled_mem", "no")
test_managedsave = "yes" == params.get("test_managedsave", "no")
test_save_restore = "yes" == params.get("test_save_restore", "no")
test_suspend_resume = "yes" == params.get("test_suspend_resume", "no")
pmsuspend_error = 'yes' == params.get("pmsuspend_error", 'no')
pmsuspend_error_msg = params.get("pmsuspend_error_msg")
agent_error_test = 'yes' == params.get("agent_error_test", 'no')
arch = platform.processor()
duration_value = int(params.get("duration", "0"))
# Libvirt acl test related params
uri = params.get("virsh_uri")
unprivileged_user = params.get('unprivileged_user')
if unprivileged_user:
if unprivileged_user.count('EXAMPLE'):
unprivileged_user = 'testacl'
if not libvirt_version.version_compare(1, 1, 1):
if params.get('setup_libvirt_polkit') == 'yes':
test.cancel("API acl test not supported in current"
" libvirt version.")
# A backup of original vm
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
vmxml_backup = vmxml.copy()
# Expected possible fail patterns.
# Error output should match one of these patterns.
# An empty list mean test should succeed.
fail_pat = []
virsh_dargs = {'debug': True, 'ignore_status': True}
if params.get('setup_libvirt_polkit') == 'yes':
virsh_dargs_copy = virsh_dargs.copy()
virsh_dargs_copy['uri'] = uri
virsh_dargs_copy['unprivileged_user'] = unprivileged_user
if pmsuspend_error:
fail_pat.append('access denied')
# Setup possible failure patterns excluding ppc
if "ppc64" not in arch:
if pm_enabled == 'not_set':
fail_pat.append('not supported')
if pm_enabled == 'no':
fail_pat.append('disabled')
if vm_state == 'paused':
# For older version
fail_pat.append('not responding')
# For newer version
fail_pat.append('not running')
elif vm_state == 'shutoff':
fail_pat.append('not running')
if agent_error_test:
fail_pat.append('not running')
fail_pat.append('agent not available')
if pmsuspend_error_msg:
fail_pat.append(pmsuspend_error_msg)
# RHEL6 or older releases
unsupported_guest_err = 'suspend mode is not supported by the guest'
try:
if vm.is_alive():
vm.destroy()
# Set pm tag in domain's XML if needed.
if "ppc64" not in arch:
if pm_enabled == 'not_set':
try:
if vmxml.pm:
del vmxml.pm
except xcepts.LibvirtXMLNotFoundError:
pass
else:
pm_xml = vm_xml.VMPMXML()
pm_xml.mem_enabled = pm_enabled_mem
pm_xml.disk_enabled = pm_enabled_disk
vmxml.pm = pm_xml
vmxml.sync()
try:
vm.prepare_guest_agent()
except virt_vm.VMStartError as info:
if "not supported" in str(info).lower():
test.cancel(info)
#.........这里部分代码省略.........
示例6: run
def run(test, params, env):
"""
Test virsh undefine command.
Undefine an inactive domain, or convert persistent to transient.
1.Prepare test environment.
2.Backup the VM's information to a xml file.
3.When the libvirtd == "off", stop the libvirtd service.
4.Perform virsh undefine operation.
5.Recover test environment.(libvirts service,VM)
6.Confirm the test result.
"""
vm_ref = params.get("undefine_vm_ref", "vm_name")
extra = params.get("undefine_extra", "")
option = params.get("undefine_option", "")
libvirtd_state = params.get("libvirtd", "on")
status_error = ("yes" == params.get("status_error", "no"))
undefine_twice = ("yes" == params.get("undefine_twice", 'no'))
local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
local_pwd = params.get("local_pwd", "password")
remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
remote_user = params.get("remote_user", "user")
remote_pwd = params.get("remote_pwd", "password")
remote_prompt = params.get("remote_prompt", "#")
pool_type = params.get("pool_type")
pool_name = params.get("pool_name", "test")
pool_target = params.get("pool_target")
volume_size = params.get("volume_size", "1G")
vol_name = params.get("vol_name", "test_vol")
emulated_img = params.get("emulated_img", "emulated_img")
emulated_size = "%sG" % (int(volume_size[:-1]) + 1)
disk_target = params.get("disk_target", "vdb")
wipe_data = "yes" == params.get("wipe_data", "no")
if wipe_data:
option += " --wipe-storage"
nvram_o = None
if platform.machine() == 'aarch64':
nvram_o = " --nvram"
option += nvram_o
vm_name = params.get("main_vm", "avocado-vt-vm1")
vm = env.get_vm(vm_name)
vm_id = vm.get_id()
vm_uuid = vm.get_uuid()
# polkit acl related params
uri = params.get("virsh_uri")
unprivileged_user = params.get('unprivileged_user')
if unprivileged_user:
if unprivileged_user.count('EXAMPLE'):
unprivileged_user = 'testacl'
if not libvirt_version.version_compare(1, 1, 1):
if params.get('setup_libvirt_polkit') == 'yes':
test.cancel("API acl test not supported in current"
" libvirt version.")
# Back up xml file.Xen host has no guest xml file to define a guset.
backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
# Confirm how to reference a VM.
if vm_ref == "vm_name":
vm_ref = vm_name
elif vm_ref == "id":
vm_ref = vm_id
elif vm_ref == "hex_vm_id":
vm_ref = hex(int(vm_id))
elif vm_ref == "uuid":
vm_ref = vm_uuid
elif vm_ref.find("invalid") != -1:
vm_ref = params.get(vm_ref)
volume = None
pvtest = None
status3 = None
elems = backup_xml.xmltreefile.findall('/devices/disk/source')
existing_images = [elem.get('file') for elem in elems]
# Backup images since remove-all-storage could remove existing libvirt
# managed guest images
if existing_images and option.count("remove-all-storage"):
for img in existing_images:
backup_img = img + '.bak'
logging.info('Backup %s to %s', img, backup_img)
shutil.copyfile(img, backup_img)
try:
save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
if option.count("managedsave") and vm.is_alive():
virsh.managedsave(vm_name)
if not vm.is_lxc():
snp_list = virsh.snapshot_list(vm_name)
if option.count("snapshot"):
snp_file_list = []
if not len(snp_list):
virsh.snapshot_create(vm_name)
logging.debug("Create a snapshot for test!")
#.........这里部分代码省略.........
示例7: run
def run(test, params, env):
"""
Test command: virsh managedsave.
This command can save and destroy a
running domain, so it can be restarted
from the same state at a later time.
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
# define function
def vm_recover_check(guest_name, option):
"""
Check if the vm can be recovered correctly.
:param guest_name : Checked vm's name.
:param option : managedsave command option.
"""
# This time vm not be shut down
if vm.is_alive():
raise error.TestFail("Guest should be inactive")
virsh.start(guest_name)
# This time vm should be in the list
if vm.is_dead():
raise error.TestFail("Guest should be active")
if option:
if option.count("running"):
if vm.is_dead() or vm.is_paused():
raise error.TestFail("Guest state should be"
" running after started"
" because of '--running' option")
elif option.count("paused"):
if not vm.is_paused():
raise error.TestFail("Guest state should be"
" paused after started"
" because of '--paused' option")
else:
if params.get("paused_after_start_vm") == "yes":
if not vm.is_paused():
raise error.TestFail("Guest state should be"
" paused after started"
" because of initia guest state")
domid = vm.get_id()
domuuid = vm.get_uuid()
status_error = ("yes" == params.get("status_error", "no"))
vm_ref = params.get("managedsave_vm_ref")
libvirtd = params.get("libvirtd", "on")
extra_param = params.get("managedsave_extra_param", "")
progress = ("yes" == params.get("managedsave_progress", "no"))
option = params.get("managedsave_option", "")
if option:
if not virsh.has_command_help_match('managedsave', option):
# Older libvirt does not have this option
raise error.TestNAError("Older libvirt does not"
" handle arguments consistently")
# run test case
if vm_ref == "id":
vm_ref = domid
elif vm_ref == "uuid":
vm_ref = domuuid
elif vm_ref == "hex_id":
vm_ref = hex(int(domid))
elif vm_ref.count("invalid"):
vm_ref = params.get(vm_ref)
elif vm_ref == "name":
vm_ref = vm_name
# stop the libvirtd service
if libvirtd == "off":
utils_libvirtd.libvirtd_stop()
# Ignore exception with "ignore_status=True"
if progress:
option += " --verbose"
option += extra_param
ret = virsh.managedsave(vm_ref, options=option, ignore_status=True)
status = ret.exit_status
# The progress information outputed in error message
error_msg = ret.stderr.strip()
# recover libvirtd service start
if libvirtd == "off":
utils_libvirtd.libvirtd_start()
# check status_error
try:
if status_error:
if not status:
raise error.TestFail("Run successfully with wrong command!")
else:
if status:
raise error.TestFail("Run failed with right command")
if progress:
if not error_msg.count("Managedsave:"):
raise error.TestFail("Got invalid progress output")
#.........这里部分代码省略.........
示例8: set_condition
def set_condition(vm_name, condn, reset=False, guestbt=None):
"""
Set domain to given state or reset it.
"""
bt = None
if not reset:
if condn == "avocadotest":
bt = utils_test.run_avocado_bg(vm, params, test)
if not bt:
test.cancel("guest stress failed to start")
# Allow stress to start
time.sleep(condn_sleep_sec)
return bt
elif condn == "stress":
utils_test.load_stress("stress_in_vms", params=params, vms=[vm])
elif condn in ["save", "managedsave"]:
# No action
pass
elif condn == "suspend":
result = virsh.suspend(vm_name, ignore_status=True, debug=True)
libvirt.check_exit_status(result)
elif condn == "hotplug":
result = virsh.setvcpus(vm_name, max_vcpu, "--live",
ignore_status=True, debug=True)
libvirt.check_exit_status(result)
exp_vcpu = {'max_config': max_vcpu, 'max_live': max_vcpu,
'cur_config': current_vcpu, 'cur_live': max_vcpu,
'guest_live': max_vcpu}
result = utils_hotplug.check_vcpu_value(vm, exp_vcpu,
option="--live")
elif condn == "host_smt":
if cpu.get_cpu_arch() == 'power9':
result = process.run("ppc64_cpu --smt=4", shell=True)
else:
test.cancel("Host SMT changes not allowed during guest live")
else:
logging.debug("No operation for the domain")
else:
if condn == "save":
save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
result = virsh.save(vm_name, save_file,
ignore_status=True, debug=True)
libvirt.check_exit_status(result)
time.sleep(condn_sleep_sec)
if os.path.exists(save_file):
result = virsh.restore(save_file, ignore_status=True,
debug=True)
libvirt.check_exit_status(result)
os.remove(save_file)
else:
test.error("No save file for domain restore")
elif condn == "managedsave":
result = virsh.managedsave(vm_name,
ignore_status=True, debug=True)
libvirt.check_exit_status(result)
time.sleep(condn_sleep_sec)
result = virsh.start(vm_name, ignore_status=True, debug=True)
libvirt.check_exit_status(result)
elif condn == "suspend":
result = virsh.resume(vm_name, ignore_status=True, debug=True)
libvirt.check_exit_status(result)
elif condn == "avocadotest":
guestbt.join(ignore_status=True)
elif condn == "stress":
utils_test.unload_stress("stress_in_vms", params=params, vms=[vm])
elif condn == "hotplug":
result = virsh.setvcpus(vm_name, current_vcpu, "--live",
ignore_status=True, debug=True)
libvirt.check_exit_status(result)
exp_vcpu = {'max_config': max_vcpu, 'max_live': current_vcpu,
'cur_config': current_vcpu, 'cur_live': current_vcpu,
'guest_live': current_vcpu}
result = utils_hotplug.check_vcpu_value(vm, exp_vcpu,
option="--live")
elif condn == "host_smt":
result = process.run("ppc64_cpu --smt=2", shell=True)
# Change back the host smt
result = process.run("ppc64_cpu --smt=4", shell=True)
# Work around due to known cgroup issue after cpu hot(un)plug
# sequence
root_cpuset_path = utils_cgroup.get_cgroup_mountpoint("cpuset")
machine_cpuset_paths = []
if os.path.isdir(os.path.join(root_cpuset_path,
"machine.slice")):
machine_cpuset_paths.append(os.path.join(root_cpuset_path,
"machine.slice"))
if os.path.isdir(os.path.join(root_cpuset_path, "machine")):
machine_cpuset_paths.append(os.path.join(root_cpuset_path,
"machine"))
if not machine_cpuset_paths:
logging.warning("cgroup cpuset might not recover properly "
"for guests after host smt changes, "
"restore it manually")
root_cpuset_cpus = os.path.join(root_cpuset_path, "cpuset.cpus")
for path in machine_cpuset_paths:
machine_cpuset_cpus = os.path.join(path, "cpuset.cpus")
# check if file content differs
cmd = "diff %s %s" % (root_cpuset_cpus,
machine_cpuset_cpus)
#.........这里部分代码省略.........
示例9: run
#.........这里部分代码省略.........
if 'host_mtu=%s' % mtu_size in qemu_mtu_info:
logging.info('PASS on qemu cmd line check.')
else:
error += 'Fail on qemu cmd line check.'
if error:
test.fail(error)
def check_mtu_in_vm(fn_login, mtu_size):
"""
Check if mtu meets expectations in vm
"""
session = fn_login()
check_cmd = 'ifconfig'
output = session.cmd(check_cmd)
session.close()
logging.debug(output)
if 'mtu %s' % mtu_size not in output:
test.fail('MTU check inside vm failed.')
else:
logging.debug("MTU check inside vm passed.")
try:
bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
bk_netxml = NetworkXML.new_from_net_dumpxml(DEFAULT_NET)
if add_pkg:
add_pkg = add_pkg.split()
utils_package.package_install(add_pkg)
if 'openvswitch' in add_pkg:
br = 'ovsbr0' + utils_misc.generate_random_string(3)
process.run('systemctl start openvswitch.service', shell=True, verbose=True)
process.run('ovs-vsctl add-br %s' % br, shell=True, verbose=True)
process.run('ovs-vsctl show', shell=True, verbose=True)
if not check or check in ['save', 'managedsave', 'hotplug_save']:
# Create bridge or network and set mtu
iface_type = 'network'
if net_type in ('bridge', 'openvswitch'):
if net_type == 'bridge':
params['con_name'], br = create_bridge()
if mtu_type == 'network':
test_net = create_network_xml(
bridge_name, net_type,
bridge_name=br
)
virsh.net_create(test_net, debug=True)
virsh.net_dumpxml(bridge_name, debug=True)
if mtu_type == 'interface':
iface_type = net_type
bridge_name = br
elif net_type == 'network':
if mtu_type == 'network':
set_network(mtu_size)
iface_mtu = 0
if mtu_type == 'interface':
iface_mtu = mtu_size
if mtu_type == 'network' and with_iface:
mtu_size = str(int(mtu_size)//2)
iface_mtu = mtu_size
source_net = bridge_name if net_type in ('bridge', 'openvswitch') else 'default'
# set mtu in vm interface
set_interface(iface_mtu, source_network=source_net, iface_type=iface_type, iface_model=model)
vm.start()
vm_login = vm.wait_for_serial_login if net_type in ('bridge', 'openvswitch') else vm.wait_for_login
示例10: run
#.........这里部分代码省略.........
# get remote session
session = remote.wait_for_login("ssh", remote_ip, "22", "root",
remote_pwd, "#")
# get uri of local
uri = libvirt_vm.complete_uri(local_ip)
cmd = "virsh -c %s start %s" % (uri, vm_ref)
status, output = session.cmd_status_output(cmd)
if status:
raise error.TestError(vm_ref, output)
elif opt.count("console"):
# With --console, start command will print the
# dmesg of guest in starting and turn into the
# login prompt. In this case, we start it with
# --console and login vm in console by
# remote.handle_prompts().
cmd = "start %s --console" % vm_ref
virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC, auto_close=True)
virsh_session.sendline(cmd)
remote.handle_prompts(virsh_session, params.get("username", ""),
params.get("password", ""), r"[\#\$]\s*$",
timeout=60, debug=True)
elif opt.count("autodestroy"):
# With --autodestroy, vm will be destroyed when
# virsh session closed. Then we execute start
# command in a virsh session and start vm with
# --autodestroy. Then we closed the virsh session,
# and check the vm is destroyed or not.
virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC, auto_close=True)
cmd = "start %s --autodestroy" % vm_ref
status = virsh_session.cmd_status(cmd)
if status:
raise error.TestFail("Failed to start vm with --autodestroy.")
# Close the session, then the vm shoud be destroyed.
virsh_session.close()
elif opt.count("force-boot"):
# With --force-boot, VM will be stared from boot
# even we have saved it with virsh managedsave.
# In this case, we start vm and execute sleep 1000&,
# then save it with virsh managedsave. At last, we
# start vm with --force-boot. To verify the result,
# we check the sleep process. If the process exists,
# force-boot failed, else case pass.
vm.start()
session = vm.wait_for_login()
status = session.cmd_status("sleep 1000&")
if status:
raise error.TestError("Can not execute command in guest.")
sleep_pid = session.cmd_output("echo $!").strip()
virsh.managedsave(vm_ref)
virsh.start(vm_ref, options=opt)
else:
cmd_result = virsh.start(vm_ref, options=opt)
if cmd_result.exit_status:
if status_error == "no":
raise error.TestFail("Start vm failed.\n Detail: %s"
% cmd_result)
else:
# start vm successfully
if status_error == "yes":
raise error.TestFail("Run successfully with wrong "
"command!\n Detail:%s"
% cmd_result)
if opt.count("paused"):
if not (vm.state() == "paused"):
raise error.TestFail("VM is not paused when started with "
"--paused.")
elif opt.count("autodestroy"):
if vm.is_alive():
raise error.TestFail("VM was started with --autodestroy,"
"but not destroyed when virsh session "
"closed.")
elif opt.count("force-boot"):
session = vm.wait_for_login()
status = session.cmd_status("ps %s |grep '[s]leep 1000'"
% sleep_pid)
if not status:
raise error.TestFail("VM was started with --force-boot,"
"but it is restored from a"
" managedsave.")
else:
if status_error == "no" and not vm.is_alive():
raise error.TestFail("VM was started but it is not alive.")
except remote.LoginError, detail:
raise error.TestFail("Failed to login guest.")
finally:
# clean up
if libvirtd_state == "off":
utils_libvirtd.libvirtd_start()
elif pre_operation == "rename":
libvirt_xml.VMXML.vm_rename(vm, backup_name)
if vm and vm.is_paused():
vm.resume()
# Restore VM
vmxml_backup.sync()
示例11: run
def run(test, params, env):
"""
Test virsh undefine command.
Undefine an inactive domain, or convert persistent to transient.
1.Prepare test environment.
2.Backup the VM's information to a xml file.
3.When the libvirtd == "off", stop the libvirtd service.
4.Perform virsh undefine operation.
5.Recover test environment.(libvirts service,VM)
6.Confirm the test result.
"""
vm_ref = params.get("undefine_vm_ref", "vm_name")
extra = params.get("undefine_extra", "")
option = params.get("undefine_option", "")
libvirtd_state = params.get("libvirtd", "on")
status_error = ("yes" == params.get("status_error", "no"))
undefine_twice = ("yes" == params.get("undefine_twice", 'no'))
local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
remote_user = params.get("remote_user", "user")
remote_pwd = params.get("remote_pwd", "password")
remote_prompt = params.get("remote_prompt", "#")
pool_type = params.get("pool_type")
pool_name = params.get("pool_name", "test")
pool_target = params.get("pool_target")
volume_size = params.get("volume_size", "1G")
vol_name = params.get("vol_name", "test_vol")
emulated_img = params.get("emulated_img", "emulated_img")
emulated_size = "%sG" % (int(volume_size[:-1]) + 1)
disk_target = params.get("disk_target", "vdb")
wipe_data = "yes" == params.get("wipe_data", "no")
if wipe_data:
option += " --wipe-storage"
vm_name = params.get("main_vm", "virt-tests-vm1")
vm = env.get_vm(vm_name)
vm_id = vm.get_id()
vm_uuid = vm.get_uuid()
# polkit acl related params
uri = params.get("virsh_uri")
unprivileged_user = params.get('unprivileged_user')
if unprivileged_user:
if unprivileged_user.count('EXAMPLE'):
unprivileged_user = 'testacl'
if not libvirt_version.version_compare(1, 1, 1):
if params.get('setup_libvirt_polkit') == 'yes':
raise error.TestNAError("API acl test not supported in current"
" libvirt version.")
# Back up xml file.Xen host has no guest xml file to define a guset.
backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
# Confirm how to reference a VM.
if vm_ref == "vm_name":
vm_ref = vm_name
elif vm_ref == "id":
vm_ref = vm_id
elif vm_ref == "hex_vm_id":
vm_ref = hex(int(vm_id))
elif vm_ref == "uuid":
vm_ref = vm_uuid
elif vm_ref.find("invalid") != -1:
vm_ref = params.get(vm_ref)
volume = None
pvtest = None
status3 = None
try:
save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
if option.count("managedsave") and vm.is_alive():
virsh.managedsave(vm_name)
if not vm.is_lxc():
snp_list = virsh.snapshot_list(vm_name)
if option.count("snapshot"):
snp_file_list = []
if not len(snp_list):
virsh.snapshot_create(vm_name)
logging.debug("Create a snapshot for test!")
else:
# Backup snapshots for domain
for snp_item in snp_list:
tmp_file = os.path.join(test.tmpdir, snp_item + ".xml")
virsh.snapshot_dumpxml(vm_name, snp_item, to_file=tmp_file)
snp_file_list.append(tmp_file)
else:
if len(snp_list):
raise error.TestNAError("This domain has snapshot(s), "
"cannot be undefined!")
if option.count("remove-all-storage"):
pvtest = utlv.PoolVolumeTest(test, params)
pvtest.pre_pool(pool_name, pool_type, pool_target, emulated_img,
emulated_size=emulated_size)
new_pool = libvirt_storage.PoolVolume(pool_name)
if not new_pool.create_volume(vol_name, volume_size):
#.........这里部分代码省略.........
示例12: manipulate_domain
def manipulate_domain(vm_name, vm_operation, recover=False):
"""
Operate domain to given state or recover it.
:params vm_name: Name of the VM domain
:params vm_operation: Operation to be performed on VM domain
like save, managedsave, suspend
:params recover: flag to inform whether to set or reset
vm_operation
"""
save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
if not recover:
if vm_operation == "save":
save_option = ""
result = virsh.save(vm_name, save_file, save_option,
ignore_status=True, debug=True)
libvirt.check_exit_status(result)
elif vm_operation == "managedsave":
managedsave_option = ""
result = virsh.managedsave(vm_name, managedsave_option,
ignore_status=True, debug=True)
libvirt.check_exit_status(result)
elif vm_operation == "s3":
suspend_target = "mem"
result = virsh.dompmsuspend(vm_name, suspend_target,
ignore_status=True, debug=True)
libvirt.check_exit_status(result)
elif vm_operation == "s4":
suspend_target = "disk"
result = virsh.dompmsuspend(vm_name, suspend_target,
ignore_status=True, debug=True)
libvirt.check_exit_status(result)
# Wait domain state change: 'in shutdown' -> 'shut off'
utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
elif vm_operation == "suspend":
result = virsh.suspend(vm_name, ignore_status=True, debug=True)
libvirt.check_exit_status(result)
elif vm_operation == "reboot":
vm.reboot()
vm_uptime_init = vm.uptime()
else:
logging.debug("No operation for the domain")
else:
if vm_operation == "save":
if os.path.exists(save_file):
result = virsh.restore(save_file, ignore_status=True,
debug=True)
libvirt.check_exit_status(result)
os.remove(save_file)
else:
test.error("No save file for domain restore")
elif vm_operation in ["managedsave", "s4"]:
result = virsh.start(vm_name, ignore_status=True, debug=True)
libvirt.check_exit_status(result)
elif vm_operation == "s3":
suspend_target = "mem"
result = virsh.dompmwakeup(vm_name, ignore_status=True,
debug=True)
libvirt.check_exit_status(result)
elif vm_operation == "suspend":
result = virsh.resume(vm_name, ignore_status=True, debug=True)
libvirt.check_exit_status(result)
elif vm_operation == "reboot":
pass
else:
logging.debug("No need recover the domain")
示例13: run_virsh_domjobinfo
def run_virsh_domjobinfo(test, params, env):
"""
Test command: virsh domjobinfo.
The command returns information about jobs running on a domain.
1.Prepare test environment.
2.When the libvirtd == "off", stop the libvirtd service.
3.Perform virsh domjobinfo operation.
4.Recover test environment.
5.Confirm the test result.
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
domid = vm.get_id()
domuuid = vm.get_uuid()
pre_vm_state = params.get("domjobinfo_pre_vm_state", "null")
vm_ref = params.get("domjobinfo_vm_ref")
status_error = params.get("status_error", "no")
libvirtd = params.get("libvirtd", "on")
tmp_file = os.path.join(test.tmpdir, '%s.tmp' % vm_name )
#prepare the state of vm
if pre_vm_state == "dump":
virsh.dump(vm_name, tmp_file)
elif pre_vm_state == "save":
virsh.save(vm_name, tmp_file)
elif pre_vm_state == "restore":
virsh.save(vm_name, tmp_file)
virsh.restore(tmp_file)
elif pre_vm_state == "managedsave":
virsh.managedsave(vm_name)
#run test case
if vm_ref == "id":
vm_ref = domid
elif vm_ref == "hex_id":
vm_ref = hex(int(domid))
elif vm_ref == "name":
vm_ref = "%s %s" % (vm_name, params.get("domjobinfo_extra"))
elif vm_ref == "uuid":
vm_ref = domuuid
elif vm_ref.find("invalid") != -1:
vm_ref = params.get(vm_ref)
if libvirtd == "off":
utils_libvirtd.libvirtd_stop()
status = virsh.domjobinfo(vm_ref, ignore_status=True).exit_status
#recover libvirtd service start
if libvirtd == "off":
utils_libvirtd.libvirtd_start()
#check status_error
if status_error == "yes":
if status == 0:
raise error.TestFail("Run successfully with wrong command!")
elif status_error == "no":
if status != 0:
raise error.TestFail("Run failed with right command")
示例14: run_virsh_managedsave
def run_virsh_managedsave(test, params, env):
"""
Test command: virsh managedsave.
This command can save and destroy a
running domain, so it can be restarted
from the same state at a later time.
"""
vm_name = params.get("main_vm")
vm = env.get_vm(params["main_vm"])
#define function
def vm_recover_check(guest_name):
"""
Check if the vm can be recovered correctly.
@param: guest_name : Checked vm's name.
"""
ret = virsh.dom_list()
#This time vm should not be in the list
if re.search(guest_name, ret.stdout):
raise error.TestFail("virsh list output invalid")
virsh.start(guest_name)
if params.get("paused_after_start_vm") == "yes":
virsh.resume(guest_name)
#This time vm should be in the list
ret = virsh.dom_list()
if not re.search(guest_name, ret.stdout):
raise error.TestFail("virsh list output invalid")
domid = vm.get_id()
domuuid = vm.get_uuid()
libvirtd = params.get("managedsave_libvirtd","on")
#run test case
vm_ref = params.get("managedsave_vm_ref")
if vm_ref == "id":
vm_ref = domid
elif vm_ref == "uuid":
vm_ref = domuuid
elif vm_ref == "hex_id":
vm_ref = hex(int(domid))
elif vm_ref == "managedsave_invalid_id" or\
vm_ref == "managedsave_invalid_uuid":
vm_ref = params.get(vm_ref)
elif vm_ref == "name" or vm_ref == "extra_parame":
vm_ref = "%s %s" % (vm_name, params.get("managedsave_extra_parame"))
#stop the libvirtd service
if libvirtd == "off":
libvirt_vm.libvirtd_stop()
#Ignore exception with "ignore_status=True"
ret = virsh.managedsave(vm_ref, ignore_status=True)
status = ret.exit_status
#recover libvirtd service start
if libvirtd == "off":
libvirt_vm.libvirtd_start()
#check status_error
status_error = params.get("status_error")
if status_error == "yes":
if status == 0:
if not virsh.has_command_help_match('managedsave', r'\s+--running\s+'):
# Older libvirt does not have --running parameter
raise error.TestNAError("Older libvirt does not handle arguments consistently")
else:
raise error.TestFail("Run successfully with wrong command!")
elif status_error == "no":
if status != 0:
raise error.TestFail("Run failed with right command")
vm_recover_check(vm_name)
示例15: run
#.........这里部分代码省略.........
cpu_xml.fallback = model_fallback
cpu_xml.numa_cell = cells
vmxml.cpu = cpu_xml
# Delete memory and currentMemory tag,
# libvirt will fill it automatically
del vmxml.max_mem
del vmxml.current_mem
# hugepages setting
if huge_pages:
membacking = vm_xml.VMMemBackingXML()
hugepages = vm_xml.VMHugepagesXML()
pagexml_list = []
for i in range(len(huge_pages)):
pagexml = hugepages.PageXML()
pagexml.update(huge_pages[i])
pagexml_list.append(pagexml)
hugepages.pages = pagexml_list
membacking.hugepages = hugepages
vmxml.mb = membacking
logging.debug("vm xml: %s", vmxml)
vmxml.sync()
pre_vm_state = params.get("pre_vm_state", "running")
attach_device = "yes" == params.get("attach_device", "no")
detach_device = "yes" == params.get("detach_device", "no")
attach_error = "yes" == params.get("attach_error", "no")
start_error = "yes" == params.get("start_error", "no")
detach_error = "yes" == params.get("detach_error", "no")
maxmem_error = "yes" == params.get("maxmem_error", "no")
attach_option = params.get("attach_option", "")
test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
test_managedsave = "yes" == params.get("test_managedsave", "no")
test_save_restore = "yes" == params.get("test_save_restore", "no")
test_mem_binding = "yes" == params.get("test_mem_binding", "no")
restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
add_mem_device = "yes" == params.get("add_mem_device", "no")
test_dom_xml = "yes" == params.get("test_dom_xml", "no")
max_mem = params.get("max_mem")
max_mem_rt = params.get("max_mem_rt")
max_mem_slots = params.get("max_mem_slots", "16")
#cur_mem = params.get("current_mem")
numa_cells = params.get("numa_cells", "").split()
set_max_mem = params.get("set_max_mem")
# params for attached device
tg_size = params.get("tg_size")
tg_sizeunit = params.get("tg_sizeunit", 'KiB')
tg_node = params.get("tg_node", 0)
pg_size = params.get("page_size")
pg_unit = params.get("page_unit", "KiB")
node_mask = params.get("node_mask", "0")
mem_addr = ast.literal_eval(params.get("memory_addr", "{}"))
huge_pages = [ast.literal_eval(x)
for x in params.get("huge_pages", "").split()]
numa_memnode = [ast.literal_eval(x)
for x in params.get("numa_memnode", "").split()]
# Back up xml file.
vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
try:
# Drop caches first for host has enough memory
drop_caches()
# Destroy domain first