本文整理汇总了Python中virttest.virsh.detach_device函数的典型用法代码示例。如果您正苦于以下问题:Python detach_device函数的具体用法?Python detach_device怎么用?Python detach_device使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了detach_device函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: unhotplug_serial_device
def unhotplug_serial_device(hotplug_type, char_dev, index=1):
if hotplug_type == "qmp":
del_dev_opt = "device_del %s%s" % (char_dev, index)
del_char_opt = "chardev-remove %s%s" % (char_dev, index)
virsh.qemu_monitor_command(vm_name, del_dev_opt, "--hmp")
virsh.qemu_monitor_command(vm_name, del_char_opt, "--hmp")
elif hotplug_type == "attach":
xml_file = "%s/xml_%s%s" % (tmp_dir, char_dev, index)
virsh.detach_device(vm_name, xml_file, flagstr="--live")
示例2: unhotplug_serial_device
def unhotplug_serial_device(type, char_dev):
if type == "qmp":
del_dev_opt = "device_del %s" % char_dev
del_char_opt = "chardev-remove %s" % char_dev
result = virsh.qemu_monitor_command(vm_name, del_dev_opt, "--hmp")
if result.exit_status:
raise error.TestError('Failed to del device %s from %s.Result:\n%s'
% (char_dev, vm_name, result))
result = virsh.qemu_monitor_command(vm_name, del_char_opt, "--hmp")
elif type == "attach":
result = virsh.detach_device(vm_name, xml_file)
示例3: device_hotunplug
def device_hotunplug():
result = virsh.detach_device(vm_name, dev.xml,
flagstr="--live", debug=True)
if result.exit_status:
test.fail(result.stdout.strip())
else:
logging.debug(result.stdout.strip())
# Fix me
# the purpose of waiting here is after detach the device from
# guest it need time to perform any other operation on the device
time.sleep(timeout)
if not libvirt_version.version_compare(3, 10, 0):
pci_devs.sort()
reattach_device(pci_devs, pci_ids)
示例4: detach_interface
def detach_interface():
"""
Detach interface:
1.Detach interface from xml;
2.Check the live xml after detach interface;
3.Check the vf driver after detach interface.
"""
def _detach_completed():
result = virsh.domiflist(vm_name, "", ignore_status=True)
return result.stdout.find(mac_addr) == -1
result = virsh.detach_device(vm_name, new_iface.xml)
utils_test.libvirt.check_exit_status(result, expect_error=False)
utils_misc.wait_for(_detach_completed, timeout=60)
live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
device = live_xml.devices
logging.debug("Domain xml after detach interface:\n %s", live_xml)
if vf_type == "vf" or vf_type == "vf_pool":
for interface in device.by_device_tag("interface"):
if interface.type_name == "hostdev":
if interface.hostdev_address.attrs == vf_addr_attrs:
test.fail("The hostdev interface still in the guest xml after detach\n")
break
driver = os.readlink(os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
logging.debug("The driver after vf detached from guest is %s\n", driver)
if managed == "no":
if driver != "vfio-pci":
test.fail("The vf pci driver is not vfio-pci after detached from guest with managed as no\n")
result = virsh.nodedev_reattach(nodedev_pci_addr)
utils_test.libvirt.check_exit_status(result, expect_error=False)
elif driver != origin_driver:
test.fail("The vf pci driver is not reset to the origin driver after detach from guest: %s vs %s\n" % (driver, origin_driver))
else:
for interface in device.by_device_tag("interface"):
if interface.type_name == "direct":
if interface.source["dev"] == vf_name:
test.fail("The macvtap interface still exist in the guest xml after detach\n")
break
示例5: run
def run(test, params, env):
"""
Test interafce xml options.
1.Prepare test environment,destroy or suspend a VM.
2.Edit xml and start the domain.
3.Perform test operation.
4.Recover test environment.
5.Confirm the test result.
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
if vm.is_alive():
vm.wait_for_login()
def create_iface_xml(mac):
"""
Create interface xml file
"""
iface = Interface(type_name=iface_type)
iface.source = iface_source
iface.model = iface_model if iface_model else "virtio"
if iface_target:
iface.target = {'dev': iface_target}
iface.mac_address = mac
if iface_rom:
iface.rom = eval(iface_rom)
logging.debug("Create new interface xml: %s", iface)
return iface
# Interface specific attributes.
iface_num = params.get("iface_num", '1')
iface_type = params.get("iface_type", "network")
iface_source = eval(params.get("iface_source",
"{'network':'default'}"))
iface_model = params.get("iface_model")
iface_target = params.get("iface_target")
iface_mac = params.get("iface_mac")
iface_rom = params.get("iface_rom")
attach_device = "yes" == params.get("attach_device", "no")
attach_iface = "yes" == params.get("attach_iface", "no")
attach_option = params.get("attach_option", "")
detach_device = "yes" == params.get("detach_device")
stress_test = "yes" == params.get("stress_test")
restart_libvirtd = "yes" == params.get("restart_libvirtd",
"no")
start_vm = "yes" == params.get("start_vm", "yes")
options_test = "yes" == params.get("options_test", "no")
username = params.get("username")
password = params.get("password")
poll_timeout = int(params.get("poll_timeout", 10))
err_msgs1 = params.get("err_msgs1")
err_msgs2 = params.get("err_msgs2")
err_msg_rom = params.get("err_msg_rom")
# stree_test require detach operation
stress_test_detach_device = False
stress_test_detach_interface = False
if stress_test:
if attach_device:
stress_test_detach_device = True
if attach_iface:
stress_test_detach_interface = True
# The following detach-device step also using attach option
detach_option = attach_option
# Back up xml file.
vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
#iface_mac = vm_xml.VMXML.get_first_mac_by_name(vm_name)
libvirtd = utils_libvirtd.Libvirtd()
# Check virsh command option
check_cmds = []
sep_options = attach_option.split()
logging.debug("sep_options: %s" % sep_options)
for sep_option in sep_options:
if attach_device and sep_option:
check_cmds.append(('attach-device', sep_option))
if attach_iface and sep_option:
check_cmds.append(('attach-interface', sep_option))
if (detach_device or stress_test_detach_device) and sep_option:
check_cmds.append(('detach-device', sep_option))
if stress_test_detach_interface and sep_option:
check_cmds.append(('detach-device', sep_option))
for cmd, option in check_cmds:
libvirt.virsh_cmd_has_option(cmd, option)
try:
try:
# Attach an interface when vm is running
iface_list = []
err_msgs = ("No more available PCI slots",
"No more available PCI addresses")
if not start_vm:
virsh.destroy(vm_name)
for i in range(int(iface_num)):
if attach_device:
logging.info("Try to attach device loop %s" % i)
#.........这里部分代码省略.........
示例6: run
#.........这里部分代码省略.........
libvirt.check_exit_status(result)
xml_after_attach = VMXML.new_from_dumpxml(vm_name)
logging.debug(virsh.dumpxml(vm_name))
# Check if the iface with given mac address is successfully
# attached with address bus equal to pcie/pci bridge's index
iface_list = [
iface for iface in xml_after_attach.get_devices('interface')
if iface.mac_address == mac and
int(iface.address['attrs']['bus'], 16) == int(pci_br_index, 16)
]
logging.debug('iface list after attach: %s', iface_list)
if not iface_list:
test.error('Failed to attach interface %s' % iface)
# Check inside vm
def check_inside_vm(session, expect=True):
ip_output = session.cmd('ip a')
logging.debug('output of "ip a": %s', ip_output)
return expect if mac in ip_output else not expect
session = vm.wait_for_serial_login()
if not utils_misc.wait_for(lambda: check_inside_vm(session, True),
timeout=60, step=5):
test.fail('Check interface inside vm failed,'
'interface not successfully attached:'
'not found mac address %s' % mac)
session.close()
# Test hotunplug
result = virsh.detach_device(vm_name, iface.xml, debug=True)
libvirt.check_exit_status(result)
logging.debug(virsh.dumpxml(vm_name))
# Check if the iface with given mac address has been
# successfully detached
xml_after_detach = VMXML.new_from_dumpxml(vm_name)
iface_list_after_detach = [
iface for iface in xml_after_detach.get_devices('interface')
if iface.mac_address == mac
]
logging.debug('iface list after detach: %s', iface_list_after_detach)
if iface_list_after_detach:
test.fail('Failed to detach device: %s' % iface)
# Check again inside vm
session = vm.wait_for_serial_login()
if not utils_misc.wait_for(lambda: check_inside_vm(session, False),
timeout=60, step=5):
test.fail('Check interface inside vm failed,'
'interface not successfully detached:'
'found mac address %s' % mac)
session.close()
# Other test scenarios of pci/pcie
if case:
logging.debug('iface_kwargs: %s', iface_kwargs)
# Setting pcie-to-pci-bridge model name !=pcie-pci-bridge.
# or Invalid controller index for pcie-to-pci-bridge.
if case in ('wrong_model_name', 'invalid_index'):
示例7: run
#.........这里部分代码省略.........
vm_xml.VMXML.set_pm_suspend(vm_name)
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
logging.debug("Attempting to set guest agent channel")
vmxml.set_agent_channel()
vmxml.sync()
if gluster_disk:
# Setup glusterfs and disk xml.
disk_img = "gluster.%s" % disk_format
host_ip = prepare_gluster_disk(disk_img, disk_format)
mnt_src = "%s:%s" % (host_ip, vol_name)
global custom_disk
custom_disk = build_disk_xml(disk_img, disk_format, host_ip)
start_vm = "yes" == params.get("start_vm", "yes")
# set domain options
if dom_iothreads:
try:
vmxml.iothreads = int(dom_iothreads)
vmxml.sync()
except ValueError:
# 'iothreads' may not invalid number in negative tests
logging.debug("Can't convert '%s' to integer type",
dom_iothreads)
if default_pool:
disks_dev = vmxml.get_devices(device_type="disk")
for disk in disks_dev:
vmxml.del_device(disk)
vmxml.sync()
# If hot plug, start VM first, otherwise stop VM if running.
if start_vm:
if vm.is_dead():
vm.start()
else:
if not vm.is_dead():
vm.destroy()
# If gluster_disk is True, use attach_device.
attach_option = params.get("attach_option", "")
if gluster_disk:
cmd_result = virsh.attach_device(domainarg=vm_name, filearg=custom_disk.xml,
flagstr=attach_option,
dargs=virsh_dargs, debug=True)
libvirt.check_exit_status(cmd_result)
# Turn VM into certain state.
if pre_vm_state == "running":
logging.info("Starting %s...", vm_name)
if vm.is_dead():
vm.start()
elif pre_vm_state == "transient":
logging.info("Creating %s...", vm_name)
vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
vm.undefine()
if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
vmxml_backup.define()
test.skip("can't create the domain")
# Run the tests.
if pm_enabled:
# Makesure the guest agent is started
try:
vm.prepare_guest_agent()
except (remote.LoginError, virt_vm.VMError) as detail:
test.fail("failed to prepare agent: %s" % detail)
# Run dompmsuspend command.
test_pmsuspend(vm_name)
if test_qemu_cmd:
# Check qemu-kvm command line
cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
if transport == "rdma":
cmd += " | grep gluster+%s.*format=%s" % (transport, disk_format)
else:
cmd += " | grep gluster.*format=%s" % disk_format
if driver_iothread:
cmd += " | grep iothread=iothread%s" % driver_iothread
if process.run(cmd, ignore_status=True, shell=True).exit_status:
test.fail("Can't see gluster option '%s' "
"in command line" % cmd)
# Detach hot plugged device.
if start_vm and not default_pool:
if gluster_disk:
ret = virsh.detach_device(vm_name, custom_disk.xml,
flagstr=attach_option, dargs=virsh_dargs)
libvirt.check_exit_status(ret)
finally:
# Recover VM.
if vm.is_alive():
vm.destroy(gracefully=False)
logging.info("Restoring vm...")
vmxml_backup.sync()
if utils_misc.is_mounted(mnt_src, default_pool, 'glusterfs'):
process.run("umount %s" % default_pool,
ignore_status=True, shell=True)
if gluster_disk:
libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path)
示例8: run
#.........这里部分代码省略.........
try:
vm.start()
except virt_vm.VMStartError as e:
logging.debug(e)
if pool_type == "mpath":
raise exceptions.TestSkipError("'mpath' pools for backing "
"'volume' disks isn't "
"supported for now")
else:
raise exceptions.TestFail("Failed to start vm")
session = vm.wait_for_login()
else:
pass
# checking attached disk in vm
logging.info("Checking disk availability in domain")
if not vmxml.get_disk_count(vm_name):
raise exceptions.TestFail("No disk in domain %s." % vm_name)
new_count = vmxml.get_disk_count(vm_name)
if new_count <= old_count:
raise exceptions.TestFail(
"Failed to attach disk %s" % lun_disk_xml)
logging.debug("Disks before attach: %s", bf_disks)
af_disks = libvirt_vm.get_disks()
logging.debug("Disks after attach: %s", af_disks)
mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
if not mount_disk:
raise exceptions.TestFail("Can not get attached device in vm.")
logging.debug("Attached device in vm:%s", mount_disk)
logging.debug("Creating file system for %s", mount_disk)
output = session.cmd_status_output(
'echo yes | mkfs.ext4 %s' % mount_disk)
logging.debug("%s", output[1])
if mount_disk:
mount_success = mount_and_dd(session, mount_disk)
if not mount_success:
raise exceptions.TestFail("Mount failed")
else:
raise exceptions.TestFail("Partition not available for disk")
logging.debug("Unmounting disk")
session.cmd_status_output('umount %s' % mount_disk)
output = session.cmd_status_output('mount')
logging.debug("%s", output[1])
mount_success = mount_and_dd(session, mount_disk)
if not mount_success:
raise exceptions.TestFail("Mount failed")
logging.debug("Unmounting disk")
session.cmd_status_output('umount %s' % mount_disk)
session.close()
# detach disk from vm
dev_detach_status = virsh.detach_device(vm_name, disk_xml,
debug=True)
utlv.check_exit_status(dev_detach_status)
finally:
vm.destroy(gracefully=False)
vmxml_backup.sync()
logging.debug('Destroying pool %s', pool_name)
virsh.pool_destroy(pool_name)
logging.debug('Undefining pool %s', pool_name)
virsh.pool_undefine(pool_name)
if os.path.exists(pool_xml_f):
os.remove(pool_xml_f)
if os.path.exists(disk_xml):
data_dir.clean_tmp_files()
logging.debug("Cleanup disk xml")
if pre_def_pool == "yes":
# Do not apply cleanup_pool for logical pool, logical pool will
# be cleaned below
pvt.cleanup_pool(pool_name, pool_type, pool_target,
emulated_image, **pool_kwargs)
if (test_unit and
(need_vol_create == "yes" and (pre_def_pool == "no")) and
(pool_type == "logical")):
process.system('lvremove -f %s/%s' % (pool_name, test_unit),
verbose=True)
process.system('vgremove -f %s' % pool_name, verbose=True)
process.system('pvremove -f %s' % source_dev, verbose=True)
if new_vhbas:
utils_npiv.vhbas_cleanup(new_vhbas)
# Restart multipathd, this is to avoid bz1399075
if source_dev:
utils_misc.wait_for(lambda: utils_npiv.restart_multipathd(source_dev),
_DELAY_TIME*5, 0.0, 5.0)
elif mpath_vol_path:
utils_misc.wait_for(lambda: utils_npiv.restart_multipathd(mpath_vol_path),
_DELAY_TIME*5, 0.0, 5.0)
else:
utils_npiv.restart_multipathd()
if old_mpath_conf:
utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
conf_content=old_mpath_conf,
replace_existing=True)
if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
os.remove(mpath_conf_path)
示例9: run
#.........这里部分代码省略.........
vm_session = vm.wait_for_login()
libvirt.check_qemu_cmd_line('mem-path=/tmp/nvdimm,share=no')
private_str = 'This is a test for foo-private!'
vm_session.cmd('mount -o dax /dev/pmem0 /mnt/')
file_private = 'foo-private'
vm_session.cmd("echo '%s' >/mnt/%s" % (private_str, file_private))
if private_str not in vm_session.cmd('cat /mnt/%s' % file_private):
test.fail('"%s" should be in output' % private_str)
# Shutdown the guest, then start it,
# check the file: foo-private is no longer existed
vm_session.close()
vm.destroy()
vm.start()
vm_session = vm.wait_for_login()
vm_session.cmd('mount -o dax /dev/pmem0 /mnt/')
if file_private in vm_session.cmd('ls /mnt/'):
test.fail('%s should not exist, for it was '
'created when access=private' % file_private)
if check == 'label_back_file':
# Create an xfs file system on /dev/pmem0
vm_session.cmd('mkfs.xfs -f -b size=4096 /dev/pmem0')
# Mount the file system with DAX enabled for page cache bypass
output = vm_session.cmd_output('mount -o dax /dev/pmem0 /mnt/')
logging.info(output)
# Create a file on the nvdimm device.
test_str = 'This is a test with label'
vm_session.cmd('echo "%s" >/mnt/foo-label' % test_str)
if test_str not in vm_session.cmd('cat /mnt/foo-label '):
test.fail('"%s" should be in output' % test_str)
# Reboot the guest, and remount the nvdimm device in the guest.
# Check the file foo-label is exited
vm_session.close()
virsh.reboot(vm_name, debug=True)
vm_session = vm.wait_for_login()
vm_session.cmd('mount -o dax /dev/pmem0 /mnt')
if test_str not in vm_session.cmd('cat /mnt/foo-label '):
test.fail('"%s" should be in output' % test_str)
if check == 'hot_plug':
# Create file for 2nd nvdimm device
nvdimm_file_2 = params.get('nvdimm_file_2')
process.run('truncate -s 512M %s' % nvdimm_file_2)
# Add 2nd nvdimm device to vm xml
nvdimm2_params = {k.replace('nvdimmxml2_', ''): v
for k, v in params.items() if k.startswith('nvdimmxml2_')}
nvdimm2_xml = create_nvdimm_xml(**nvdimm2_params)
ori_devices = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices('memory')
logging.debug('Starts with %d memory devices', len(ori_devices))
result = virsh.attach_device(vm_name, nvdimm2_xml.xml, debug=True)
libvirt.check_exit_status(result)
# After attach, there should be an extra memory device
devices_after_attach = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices('memory')
logging.debug('After detach, vm has %d memory devices',
len(devices_after_attach))
if len(ori_devices) != len(devices_after_attach) - 1:
test.fail('Number of memory devices after attach is %d, should be %d'
% (len(devices_after_attach), len(ori_devices) + 1))
time.sleep(5)
check_file_in_vm(vm_session, '/dev/pmem1')
nvdimm_detach = alive_vmxml.get_devices('memory')[-1]
logging.debug(nvdimm_detach)
# Hot-unplug nvdimm device
result = virsh.detach_device(vm_name, nvdimm_detach.xml, debug=True)
libvirt.check_exit_status(result)
vm_session.close()
vm_session = vm.wait_for_login()
virsh.dumpxml(vm_name, debug=True)
left_devices = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices('memory')
logging.debug(left_devices)
if len(left_devices) != len(ori_devices):
test.fail('Number of memory devices after detach is %d, should be %d'
% (len(left_devices), len(ori_devices)))
time.sleep(5)
check_file_in_vm(vm_session, '/dev/pmem1', expect=False)
finally:
if vm.is_alive():
vm.destroy(gracefully=False)
bkxml.sync()
os.remove(nvdimm_file)
示例10: run
#.........这里部分代码省略.........
old_mpath_conf = ""
mpath_conf_path = "/etc/multipath.conf"
original_mpath_conf_exist = os.path.exists(mpath_conf_path)
vm = env.get_vm(vm_name)
try:
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
vmxml_backup = vmxml.copy()
old_disk_count = vmxml.get_disk_count(vm_name)
# Prepare vHBA
online_hbas = utils_npiv.find_hbas("hba")
old_vhbas = utils_npiv.find_hbas("vhba")
if not online_hbas:
raise exceptions.TestSkipError("Host doesn't have online hba!")
old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
replace_existing=True)
first_online_hba = online_hbas[0]
new_vhba = utils_npiv.nodedev_create_from_xml(
{"nodedev_parent": first_online_hba,
"scsi_wwnn": wwnn,
"scsi_wwpn": wwpn})
utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
timeout=_TIMEOUT)
if not utils_npiv.is_vhbas_added(old_vhbas):
raise exceptions.TestFail("vHBA is not successfully created.")
new_vhbas.append(new_vhba)
new_vhba_scsibus = re.sub("\D", "", new_vhba)
# Get the new block device generated by the new vHBA
utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
timeout=_TIMEOUT)
blk_devs = get_blks_by_scsi(new_vhba_scsibus)
if not blk_devs:
raise exceptions.TestFail("block device not found with scsi_%s",
new_vhba_scsibus)
first_blk_dev = blk_devs[0]
# Get the symbolic link of the device in /dev/disk/by-[path|uuid|id]
logging.debug("first_blk_dev = %s, lun_dir_method = %s"
% (first_blk_dev, lun_dir_method))
utils_misc.wait_for(
lambda: get_symbols_by_blk(first_blk_dev, lun_dir_method),
timeout=_TIMEOUT)
lun_sl = get_symbols_by_blk(first_blk_dev, lun_dir_method)
if not lun_sl:
raise exceptions.TestFail("lun symbolic links not found under "
"/dev/disk/%s/ for block device %s." %
(lun_dir_method, blk_dev))
lun_dev = lun_sl[0]
lun_dev_path = "/dev/disk/" + lun_dir_method + "/" + lun_dev
# Prepare xml of virtual disk
disk_params = {'type_name': device_type, 'device': disk_device,
'driver_name': driver_name, 'driver_type': driver_type,
'source_file': lun_dev_path,
'target_dev': device_target, 'target_bus': target_bus,
'readonly': readonly}
disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
lun_disk_xml = utlv.create_disk_xml(disk_params)
copyfile(lun_disk_xml, disk_xml)
if not vm.is_alive():
vm.start()
session = vm.wait_for_login()
libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
vm.address_cache)
old_disks = libvirt_vm.get_disks()
# Attach disk
dev_attach_status = virsh.attach_device(
vm_name, disk_xml, debug=True)
utlv.check_exit_status(dev_attach_status)
cur_disk_count = vmxml.get_disk_count(vm_name)
cur_disks = libvirt_vm.get_disks()
if cur_disk_count <= old_disk_count:
raise exceptions.TestFail(
"Failed to attach disk: %s" % lun_disk_xml)
new_disk = "".join(list(set(old_disks) ^ set(cur_disks)))
logging.debug("Attached device in vm:%s", new_disk)
# Check disk in VM
output = session.cmd_status_output('mkfs.ext4 -F %s' % new_disk)
logging.debug("mkfs.ext4 the disk in vm, result: %s", output[1])
if not check_vm_disk(session, new_disk, readonly):
raise exceptions.TestFail("Failed check the disk in vm.")
session.cmd_status_output('umount %s' % new_disk)
# Detach disk
dev_detach_status = virsh.detach_device(vm_name, disk_xml, debug=True)
utlv.check_exit_status(dev_detach_status)
cur_disks = libvirt_vm.get_disks()
if cur_disks != old_disks:
raise exceptions.TestFail("Detach disk failed.")
session.close()
finally:
utils_npiv.vhbas_cleanup(new_vhbas)
if vm.is_alive():
vm.destroy(gracefully=False)
vmxml_backup.sync()
process.system('service multipathd restart', verbose=True)
if old_mpath_conf:
utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
conf_content=old_mpath_conf,
replace_existing=True)
if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
os.remove(mpath_conf_path)
示例11: run
#.........这里部分代码省略.........
if result.exit_status:
raise process.CmdError(result.command, result)
else:
if disk:
utils_test.libvirt.create_local_disk("file", path, size="1M")
os.chmod(path, 0666)
disk_xml = Disk(type_name="file")
disk_xml.device = "disk"
disk_xml.source = disk_xml.new_disk_source(**{"attrs": {"file": path}})
disk_xml.driver = {"name": "qemu", "type": "raw", "cache": "none"}
disk_xml.target = {"dev": "sdb", "bus": "usb"}
attributes = {"type_name": "usb", "bus": "1", "port": "0"}
disk_xml.address = disk_xml.new_disk_address(**{"attrs": attributes})
result = virsh.attach_device(vm_name, disk_xml.xml)
if result.exit_status:
raise process.CmdError(result.command, result)
if mouse:
mouse_xml = Input("mouse")
mouse_xml.input_bus = "usb"
attributes = {"type_name": "usb", "bus": "1", "port": "0"}
mouse_xml.address = mouse_xml.new_input_address(**{"attrs": attributes})
result = virsh.attach_device(vm_name, mouse_xml.xml)
if result.exit_status:
raise process.CmdError(result.command, result)
if tablet:
tablet_xml = Input("tablet")
tablet_xml.input_bus = "usb"
attributes = {"type_name": "usb", "bus": "1", "port": "0"}
tablet_xml.address = tablet_xml.new_input_address(**{"attrs": attributes})
result = virsh.attach_device(vm_name, tablet_xml.xml)
if result.exit_status:
raise process.CmdError(result.command, result)
if keyboard:
kbd_xml = Input("keyboard")
kbd_xml.input_bus = "usb"
attributes = {"type_name": "usb", "bus": "1", "port": "0"}
kbd_xml.address = kbd_xml.new_input_address(**{"attrs": attributes})
result = virsh.attach_device(vm_name, kbd_xml.xml)
if result.exit_status:
raise process.CmdError(result.command, result)
if attach_type == "qemu_monitor":
options = "--hmp"
if disk:
attach_cmd = "drive_del"
attach_cmd += " drive-usb-disk"
result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
if result.exit_status:
raise process.CmdError(result.command, result)
if mouse:
attach_cmd = "device_del"
attach_cmd += " mouse"
result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
if result.exit_status:
raise process.CmdError(result.command, result)
if keyboard:
attach_cmd = "device_del"
attach_cmd += " keyboard"
result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
if result.exit_status:
raise process.CmdError(result.command, result)
if tablet:
attach_cmd = "device_del"
attach_cmd += " tablet"
result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
if result.exit_status:
raise process.CmdError(result.command, result)
else:
if disk:
result = virsh.detach_device(vm_name, disk_xml.xml)
if result.exit_status:
raise process.CmdError(result.command, result)
if mouse:
result = virsh.detach_device(vm_name, mouse_xml.xml)
if result.exit_status:
raise process.CmdError(result.command, result)
if keyboard:
result = virsh.detach_device(vm_name, kbd_xml.xml)
if result.exit_status:
raise process.CmdError(result.command, result)
if tablet:
result = virsh.detach_device(vm_name, tablet_xml.xml)
if result.exit_status:
raise process.CmdError(result.command, result)
except process.CmdError, e:
if not status_error:
raise error.TestFail("failed to attach device.\n" "Detail: %s." % result)
finally:
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
vm_xml_backup.sync()
示例12: run
#.........这里部分代码省略.........
# Wait for vm is running
vm.wait_for_login(timeout=600).close()
if additional_guest:
if additional_vm.is_dead():
additional_vm.start()
# Check qemu command line
if test_qemu_cmd:
check_qemu_cmd()
# Check partitions in vm
if test_vm_parts:
if not check_in_vm(vm, targetdev, old_parts,
read_only=create_snapshot):
test.fail("Failed to check vm partitions")
if additional_guest:
if not check_in_vm(additional_vm, targetdev, old_parts):
test.fail("Failed to check vm partitions")
# Save and restore operation
if test_save_restore:
check_save_restore()
if test_snapshot:
snap_option = params.get("snapshot_option", "")
check_snapshot(snap_option)
if test_blockcopy:
check_blockcopy(targetdev)
if test_disk_readonly:
snap_option = params.get("snapshot_option", "")
check_snapshot(snap_option, 'vdb')
if test_disk_internal_snapshot:
snap_option = params.get("snapshot_option", "")
check_snapshot(snap_option, targetdev)
# Detach the device.
if attach_device:
xml_file = libvirt.create_disk_xml(params)
ret = virsh.detach_device(vm_name, xml_file)
libvirt.check_exit_status(ret)
if additional_guest:
ret = virsh.detach_device(guest_name, xml_file)
libvirt.check_exit_status(ret)
elif attach_disk:
ret = virsh.detach_disk(vm_name, targetdev)
libvirt.check_exit_status(ret)
# Check disk in vm after detachment.
if attach_device or attach_disk:
session = vm.wait_for_login()
new_parts = libvirt.get_parts_list(session)
if len(new_parts) != len(old_parts):
test.fail("Disk still exists in vm"
" after detachment")
session.close()
except virt_vm.VMStartError as details:
for msg in unsupported_err:
if msg in str(details):
test.cancel(str(details))
else:
test.fail("VM failed to start."
"Error: %s" % str(details))
finally:
# Remove /etc/ceph/ceph.conf file if exists.
if os.path.exists('/etc/ceph/ceph.conf'):
os.remove('/etc/ceph/ceph.conf')
# Delete snapshots.
snapshot_lists = virsh.snapshot_list(vm_name)
if len(snapshot_lists) > 0:
libvirt.clean_up_snapshots(vm_name, snapshot_lists)
示例13: run
def run(test, params, env):
"""
Test interafce xml options.
1.Prepare test environment,destroy or suspend a VM.
2.Edit xml and start the domain.
3.Perform test operation.
4.Recover test environment.
5.Confirm the test result.
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
if vm.is_alive():
vm.wait_for_login()
def create_iface_xml(mac):
"""
Create interface xml file
"""
iface = Interface(type_name=iface_type)
iface.source = iface_source
iface.model = iface_model if iface_model else "virtio"
if iface_target:
iface.target = {'dev': iface_target}
iface.mac_address = mac
logging.debug("Create new interface xml: %s", iface)
return iface
status_error = "yes" == params.get("status_error", "no")
# Interface specific attributes.
iface_num = params.get("iface_num", '1')
iface_type = params.get("iface_type", "network")
iface_source = eval(params.get("iface_source",
"{'network':'default'}"))
iface_model = params.get("iface_model")
iface_target = params.get("iface_target")
iface_mac = params.get("iface_mac")
attach_device = "yes" == params.get("attach_device", "no")
attach_iface = "yes" == params.get("attach_iface", "no")
attach_option = params.get("attach_option", "")
detach_device = "yes" == params.get("detach_device")
stress_test = "yes" == params.get("stress_test")
restart_libvirtd = "yes" == params.get("restart_libvirtd",
"no")
username = params.get("username")
password = params.get("password")
poll_timeout = int(params.get("poll_timeout", 10))
# stree_test require detach operation
stress_test_detach_device = False
stress_test_detach_interface = False
if stress_test:
if attach_device:
stress_test_detach_device = True
if attach_iface:
stress_test_detach_interface = True
# The following detach-device step also using attach option
detach_option = attach_option
# Back up xml file.
vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
#iface_mac = vm_xml.VMXML.get_first_mac_by_name(vm_name)
libvirtd = utils_libvirtd.Libvirtd()
# Check virsh command option
check_cmds = []
if not status_error:
if attach_device and attach_option:
check_cmds.append(('attach-device', attach_option))
if attach_iface and attach_option:
check_cmds.append(('attach-interface', attach_option))
if (detach_device or stress_test_detach_device) and detach_option:
check_cmds.append(('detach-device', detach_option))
if stress_test_detach_interface and detach_option:
check_cmds.append(('detach-device', detach_option))
for cmd, option in check_cmds:
libvirt.virsh_cmd_has_option(cmd, option)
try:
try:
# Attach an interface when vm is running
iface_list = []
err_msgs = ("No more available PCI slots",
"No more available PCI addresses")
if attach_device:
for i in range(int(iface_num)):
logging.info("Try to attach interface loop %s" % i)
if iface_mac:
mac = iface_mac
else:
mac = utils_net.generate_mac_address_simple()
iface_xml_obj = create_iface_xml(mac)
iface_xml_obj.xmltreefile.write()
ret = virsh.attach_device(vm_name, iface_xml_obj.xml,
flagstr=attach_option,
ignore_status=True)
if ret.exit_status:
if any([msg in ret.stderr for msg in err_msgs]):
#.........这里部分代码省略.........
示例14: run
#.........这里部分代码省略.........
tmp_dir = data_dir.get_tmp_dir()
image = qemu_storage.QemuImg(params, tmp_dir, img_name)
# Create a image.
img_path, result = image.create(params)
# Set the context of the image.
if sec_relabel == "no":
utils_selinux.set_context_of_file(filename=img_path, context=img_label)
disk_xml.target = {"dev": device_target, "bus": device_bus}
disk_xml.driver = {"name": "qemu", "type": vol_format}
if disk_seclabel == "yes":
source_seclabel = []
sec_xml = seclabel.Seclabel()
sec_xml.update(sec_disk_dict)
source_seclabel.append(sec_xml)
disk_source = disk_xml.new_disk_source(**{"attrs": {source_type: img_path},
"seclabels": source_seclabel})
else:
disk_source = disk_xml.new_disk_source(**{"attrs": {source_type: img_path}})
# Set the context of the VM.
vmxml.set_seclabel([sec_dict])
vmxml.sync()
disk_xml.source = disk_source
logging.debug(disk_xml)
# Do the attach action.
cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml.xml, flagstr='--persistent')
libvirt.check_exit_status(cmd_result, expect_error=False)
logging.debug("the domain xml is: %s" % vmxml.xmltreefile)
# Start VM to check the VM is able to access the image or not.
try:
vm.start()
# Start VM successfully.
# VM with set seclabel can access the image with the
# set context.
if status_error:
test.fail('Test succeeded in negative case.')
if check_cap_rawio:
cap_list = ['CapPrm', 'CapEff', 'CapBnd']
cap_dict = {}
pid = vm.get_pid()
pid_status_path = "/proc/%s/status" % pid
with open(pid_status_path) as f:
for line in f:
val_list = line.split(":")
if val_list[0] in cap_list:
cap_dict[val_list[0]] = int(val_list[1].strip(), 16)
# bit and with rawio capabilitiy value to check cap_sys_rawio
# is set
cap_rawio_val = 0x0000000000020000
for i in cap_list:
if not cap_rawio_val & cap_dict[i]:
err_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
err_msg += " lack cap_sys_rawio capabilities"
test.fail(err_msg)
else:
inf_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
inf_msg += " have cap_sys_rawio capabilities"
logging.debug(inf_msg)
if pool_type == "disk":
if libvirt_version.version_compare(3, 1, 0) and enable_namespace:
vm_pid = vm.get_pid()
output = process.system_output(
"nsenter -t %d -m -- ls -Z %s" % (vm_pid, img_path))
else:
output = process.system_output('ls -Z %s' % img_path)
logging.debug("The default label is %s", default_label)
logging.debug("The label after guest started is %s", to_text(output.strip().split()[-2]))
if default_label not in to_text(output.strip().split()[-2]):
test.fail("The label is wrong after guest started\n")
except virt_vm.VMStartError as e:
# Starting VM failed.
# VM with set seclabel can not access the image with the
# set context.
if not status_error:
test.fail("Test failed in positive case."
"error: %s" % e)
cmd_result = virsh.detach_device(domainarg=vm_name, filearg=disk_xml.xml)
libvirt.check_exit_status(cmd_result, status_error)
finally:
# clean up
vm.destroy()
if not with_pool_vol:
image.remove()
if pvt:
try:
pvt.cleanup_pool(pool_name, pool_type, pool_target,
emulated_image)
except exceptions.TestFail as detail:
logging.error(str(detail))
backup_xml.sync()
utils_selinux.set_status(backup_sestatus)
if check_cap_rawio:
qemu_conf.restore()
libvirtd.restart()
示例15: run
#.........这里部分代码省略.........
return True
except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as detail:
logging.error(str(detail))
return False
try:
status_error = "yes" == params.get("status_error", "no")
vm_name = params.get("main_vm", "avocado-vt-vm1")
device_target = params.get("hostdev_disk_target", "hdb")
scsi_wwnn = params.get("scsi_wwnn", "ENTER.YOUR.WWNN")
scsi_wwpn = params.get("scsi_wwpn", "ENTER.YOUR.WWPN")
attach_method = params.get('attach_method', 'hot')
vm = env.get_vm(vm_name)
vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
virsh_dargs = {'debug': True, 'ignore_status': True}
new_vhbas = []
if scsi_wwnn.count("ENTER.YOUR.WWNN") or \
scsi_wwpn.count("ENTER.YOUR.WWPN"):
test.cancel("You didn't provide proper wwpn/wwnn")
if vm.is_dead():
vm.start()
session = vm.wait_for_login()
old_parts = libvirt.get_parts_list(session)
# find first online hba
online_hbas = []
online_hbas = utils_npiv.find_hbas("hba")
if not online_hbas:
test.cancel("NO ONLINE HBAs!")
first_online_hba = online_hbas[0]
# create vhba based on the first online hba
old_vhbas = utils_npiv.find_hbas("vhba")
logging.debug("Original online vHBAs: %s", old_vhbas)
new_vhba = utils_npiv.nodedev_create_from_xml(
{"nodedev_parent": first_online_hba,
"scsi_wwnn": scsi_wwnn,
"scsi_wwpn": scsi_wwpn})
if not utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
timeout=_TIMEOUT):
test.fail("vhba not successfully created")
new_vhbas.append(new_vhba)
# find first available lun of the newly created vhba
lun_dicts = []
first_lun = {}
if not utils_misc.wait_for(lambda: utils_npiv.find_scsi_luns(new_vhba),
timeout=_TIMEOUT):
test.fail("There is no available lun storage for "
"wwpn: %s, please check your wwns or "
"contact IT admins" % scsi_wwpn)
lun_dicts = utils_npiv.find_scsi_luns(new_vhba)
logging.debug("The luns discovered are: %s", lun_dicts)
first_lun = lun_dicts[0]
# prepare hostdev xml for the first lun
kwargs = {'addr_bus': first_lun['bus'],
'addr_target': first_lun['target'],
'addr_unit': first_lun['unit']}
new_hostdev_xml = utils_npiv.create_hostdev_xml(
adapter_name="scsi_host"+first_lun['scsi'],
**kwargs)
logging.info("New hostdev xml as follow:")
logging.info(new_hostdev_xml)
new_hostdev_xml.xmltreefile.write()
if attach_method == "hot":
# attach-device the lun's hostdev xml to guest vm
result = virsh.attach_device(vm_name, new_hostdev_xml.xml)
libvirt.check_exit_status(result, status_error)
elif attach_method == "cold":
if vm.is_alive():
vm.destroy(gracefully=False)
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
vmxml.devices = vmxml.devices.append(new_hostdev_xml)
vmxml.sync()
vm.start()
session = vm.wait_for_login()
logging.debug("The new vm's xml is: \n%s", vmxml)
# login vm and check the disk
check_result = check_in_vm(vm, device_target, old_parts)
if not check_result:
test.fail("check disk in vm failed")
result = virsh.detach_device(vm_name, new_hostdev_xml.xml)
libvirt.check_exit_status(result, status_error)
# login vm and check disk actually removed
parts_after_detach = libvirt.get_parts_list(session)
old_parts.sort()
parts_after_detach.sort()
if parts_after_detach == old_parts:
logging.info("hostdev successfully detached.")
else:
test.fail("Device not successfully detached. "
"Still existing in vm's /proc/partitions")
finally:
utils_npiv.vhbas_cleanup(new_vhbas)
# recover vm
if vm.is_alive():
vm.destroy(gracefully=False)
logging.info("Restoring vm...")
vmxml_backup.sync()
process.system('service multipathd restart', verbose=True)