本文整理汇总了Python中virttest.libvirt_xml.devices.disk.Disk.driver方法的典型用法代码示例。如果您正苦于以下问题:Python Disk.driver方法的具体用法?Python Disk.driver怎么用?Python Disk.driver使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类virttest.libvirt_xml.devices.disk.Disk
的用法示例。
在下文中一共展示了Disk.driver方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_disk_xml
# 需要导入模块: from virttest.libvirt_xml.devices.disk import Disk [as 别名]
# 或者: from virttest.libvirt_xml.devices.disk.Disk import driver [as 别名]
def build_disk_xml(disk_img, disk_format, host_ip):
"""
Try to rebuild disk xml
"""
# Delete existed disks first.
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
disks_dev = vmxml.get_devices(device_type="disk")
for disk in disks_dev:
vmxml.del_device(disk)
if default_pool:
disk_xml = Disk(type_name="file")
else:
disk_xml = Disk(type_name="network")
disk_xml.device = "disk"
driver_dict = {"name": "qemu",
"type": disk_format,
"cache": "none"}
if driver_iothread:
driver_dict.update({"iothread": driver_iothread})
disk_xml.driver = driver_dict
disk_xml.target = {"dev": "vda", "bus": "virtio"}
if default_pool:
utils_misc.mount("%s:%s" % (host_ip, vol_name),
default_pool, "glusterfs")
process.run("setsebool virt_use_fusefs on", shell=True)
virsh.pool_refresh("default")
source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
disk_xml.source = disk_xml.new_disk_source(
**{"attrs": source_dict})
else:
source_dict = {"protocol": "gluster",
"name": "%s/%s" % (vol_name, disk_img)}
host_dict = {"name": host_ip, "port": "24007"}
if transport:
host_dict.update({"transport": transport})
disk_xml.source = disk_xml.new_disk_source(
**{"attrs": source_dict, "hosts": [host_dict]})
# set domain options
if dom_iothreads:
try:
vmxml.iothreads = int(dom_iothreads)
except ValueError:
# 'iothreads' may not invalid number in negative tests
logging.debug("Can't convert '%s' to integer type"
% dom_iothreads)
# Add the new disk xml.
vmxml.add_device(disk_xml)
vmxml.sync()
示例2: get_vm_disk_xml
# 需要导入模块: from virttest.libvirt_xml.devices.disk import Disk [as 别名]
# 或者: from virttest.libvirt_xml.devices.disk.Disk import driver [as 别名]
def get_vm_disk_xml(dev_type, dev_name, **options):
"""
Create a disk xml object and return it.
:param dev_type. Disk type.
:param dev_name. Disk device name.
:param options. Disk options.
:return: Disk xml object.
"""
# Create disk xml
disk_xml = Disk(type_name=dev_type)
disk_xml.device = options["disk_device"]
if options.has_key("sgio") and options["sgio"] != "":
disk_xml.sgio = options["sgio"]
disk_xml.device = "lun"
disk_xml.rawio = "no"
if dev_type == "block":
disk_attr = "dev"
else:
disk_attr = "file"
disk_xml.target = {'dev': options["target"],
'bus': options["bus"]}
disk_xml.source = disk_xml.new_disk_source(
**{'attrs': {disk_attr: dev_name}})
# Add driver options from parameters.
driver_dict = {"name": "qemu"}
if options.has_key("driver"):
for driver_option in options["driver"].split(','):
if driver_option != "":
d = driver_option.split('=')
logging.debug("disk driver option: %s=%s", d[0], d[1])
driver_dict.update({d[0].strip(): d[1].strip()})
disk_xml.driver = driver_dict
if options.has_key("share"):
if options["share"] == "shareable":
disk_xml.share = True
if options.has_key("readonly"):
if options["readonly"] == "readonly":
disk_xml.readonly = True
logging.debug("The disk xml is: %s" % disk_xml.xmltreefile)
return disk_xml
示例3: add_cdrom_device
# 需要导入模块: from virttest.libvirt_xml.devices.disk import Disk [as 别名]
# 或者: from virttest.libvirt_xml.devices.disk.Disk import driver [as 别名]
def add_cdrom_device(v_xml, iso_file, target_dev, device_bus):
"""
Add cdrom disk in VM XML
:param v_xml: The instance of VMXML class
:param iso_file: The iso file path
:param target_dev: The target dev in Disk XML
:param device_bus: The target bus in Disk XML
"""
disk_xml = Disk(type_name="file")
disk_xml.device = "cdrom"
disk_xml.target = {"dev": target_dev, "bus": device_bus}
disk_xml.driver = {"name": "qemu", "type": "raw"}
src_dict = {"file": iso_file}
disk_xml.source = disk_xml.new_disk_source(
**{"attrs": src_dict})
disk_xml.readonly = False
v_xml.add_device(disk_xml)
return v_xml
示例4: prepare_virt_disk_xml
# 需要导入模块: from virttest.libvirt_xml.devices.disk import Disk [as 别名]
# 或者: from virttest.libvirt_xml.devices.disk.Disk import driver [as 别名]
def prepare_virt_disk_xml(image_path):
"""
Prepare the virtual disk xml to be attached/detached.
:param image_path: The path to the local image.
:return: The virtual disk xml.
"""
virt_disk_device = params.get("virt_disk_device", "disk")
virt_disk_device_type = params.get("virt_disk_device_type", "file")
virt_disk_device_format = params.get("virt_disk_device_format", "raw")
virt_disk_device_target = params.get("virt_disk_device_target", "sdb")
virt_disk_device_bus = params.get("virt_disk_device_bus", "usb")
disk_xml = Disk(type_name=virt_disk_device_type)
disk_xml.device = virt_disk_device
disk_src_dict = {'attrs': {'file': image_path, 'type_name': 'file'}}
disk_xml.source = disk_xml.new_disk_source(**disk_src_dict)
driver_dict = {"name": "qemu", "type": virt_disk_device_format}
disk_xml.driver = driver_dict
disk_xml.target = {"dev": virt_disk_device_target,
"bus": virt_disk_device_bus}
return disk_xml
示例5: build_disk_xml
# 需要导入模块: from virttest.libvirt_xml.devices.disk import Disk [as 别名]
# 或者: from virttest.libvirt_xml.devices.disk.Disk import driver [as 别名]
def build_disk_xml(disk_img, disk_format, host_ip):
"""
Try to rebuild disk xml
"""
# Delete existed disks first.
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
disks_dev = vmxml.get_devices(device_type="disk")
for disk in disks_dev:
vmxml.del_device(disk)
if default_pool:
disk_xml = Disk(type_name="file")
else:
disk_xml = Disk(type_name="network")
disk_xml.device = "disk"
driver_dict = {"name": "qemu",
"type": disk_format,
"cache": "none"}
disk_xml.driver = driver_dict
disk_xml.target = {"dev": "vda", "bus": "virtio"}
if default_pool:
utils.run("mount -t glusterfs %s:%s %s; setsebool virt_use_fusefs on" %
(host_ip, vol_name, default_pool))
virsh.pool_refresh("default")
source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
disk_xml.source = disk_xml.new_disk_source(
**{"attrs": source_dict})
else:
source_dict = {"protocol": "gluster",
"name": "%s/%s" % (vol_name, disk_img)}
host_dict = {"name": host_ip, "port": "24007"}
if transport:
host_dict.update({"transport": transport})
disk_xml.source = disk_xml.new_disk_source(
**{"attrs": source_dict, "hosts": [host_dict]})
# Add the new disk xml.
vmxml.add_device(disk_xml)
vmxml.sync()
示例6: build_disk_xml
# 需要导入模块: from virttest.libvirt_xml.devices.disk import Disk [as 别名]
# 或者: from virttest.libvirt_xml.devices.disk.Disk import driver [as 别名]
def build_disk_xml(disk_img, disk_format, host_ip):
"""
Try to rebuild disk xml
"""
if default_pool:
disk_xml = Disk(type_name="file")
else:
disk_xml = Disk(type_name="network")
disk_xml.device = "disk"
driver_dict = {"name": "qemu",
"type": disk_format,
"cache": "none"}
if driver_iothread:
driver_dict.update({"iothread": driver_iothread})
disk_xml.driver = driver_dict
disk_xml.target = {"dev": "vdb", "bus": "virtio"}
if default_pool:
utils_misc.mount("%s:%s" % (host_ip, vol_name),
default_pool, "glusterfs")
process.run("setsebool virt_use_fusefs on", shell=True)
source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
disk_xml.source = disk_xml.new_disk_source(
**{"attrs": source_dict})
else:
source_dict = {"protocol": "gluster",
"name": "%s/%s" % (vol_name, disk_img)}
host_dict = [{"name": host_ip, "port": "24007"}]
# If mutiple_hosts is True, attempt to add multiple hosts.
if multiple_hosts:
host_dict.append({"name": params.get("dummy_host1"), "port": "24007"})
host_dict.append({"name": params.get("dummy_host2"), "port": "24007"})
if transport:
host_dict[0]['transport'] = transport
disk_xml.source = disk_xml.new_disk_source(
**{"attrs": source_dict, "hosts": host_dict})
return disk_xml
示例7: run
# 需要导入模块: from virttest.libvirt_xml.devices.disk import Disk [as 别名]
# 或者: from virttest.libvirt_xml.devices.disk.Disk import driver [as 别名]
#.........这里部分代码省略.........
attach_cmd += " 0 id=drive-usb-disk%s,if=none,file=%s" % (i, path)
result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
if result.exit_status:
raise process.CmdError(result.command, result)
if keyboard:
attach_cmd = "device_add"
attach_cmd += " usb-kdb,bus=usb1.0,id=kdb"
result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
if result.exit_status:
raise process.CmdError(result.command, result)
if mouse:
attach_cmd = "device_add"
attach_cmd += " usb-mouse,bus=usb1.0,id=mouse"
result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
if result.exit_status:
raise process.CmdError(result.command, result)
if tablet:
attach_cmd = "device_add"
attach_cmd += " usb-tablet,bus=usb1.0,id=tablet"
result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
if result.exit_status:
raise process.CmdError(result.command, result)
else:
if disk:
utils_test.libvirt.create_local_disk("file", path, size="1M")
os.chmod(path, 0666)
disk_xml = Disk(type_name="file")
disk_xml.device = "disk"
disk_xml.source = disk_xml.new_disk_source(**{"attrs": {"file": path}})
disk_xml.driver = {"name": "qemu", "type": "raw", "cache": "none"}
disk_xml.target = {"dev": "sdb", "bus": "usb"}
attributes = {"type_name": "usb", "bus": "1", "port": "0"}
disk_xml.address = disk_xml.new_disk_address(**{"attrs": attributes})
result = virsh.attach_device(vm_name, disk_xml.xml)
if result.exit_status:
raise process.CmdError(result.command, result)
if mouse:
mouse_xml = Input("mouse")
mouse_xml.input_bus = "usb"
attributes = {"type_name": "usb", "bus": "1", "port": "0"}
mouse_xml.address = mouse_xml.new_input_address(**{"attrs": attributes})
result = virsh.attach_device(vm_name, mouse_xml.xml)
if result.exit_status:
raise process.CmdError(result.command, result)
if tablet:
tablet_xml = Input("tablet")
tablet_xml.input_bus = "usb"
attributes = {"type_name": "usb", "bus": "1", "port": "0"}
tablet_xml.address = tablet_xml.new_input_address(**{"attrs": attributes})
result = virsh.attach_device(vm_name, tablet_xml.xml)
if result.exit_status:
raise process.CmdError(result.command, result)
if keyboard:
kbd_xml = Input("keyboard")
kbd_xml.input_bus = "usb"
attributes = {"type_name": "usb", "bus": "1", "port": "0"}
kbd_xml.address = kbd_xml.new_input_address(**{"attrs": attributes})
示例8: run
# 需要导入模块: from virttest.libvirt_xml.devices.disk import Disk [as 别名]
# 或者: from virttest.libvirt_xml.devices.disk.Disk import driver [as 别名]
def run(test, params, env):
"""
Test virsh domblkerror in 2 types error
1. unspecified error
2. no space
"""
if not virsh.has_help_command('domblkerror'):
raise error.TestNAError("This version of libvirt does not support "
"domblkerror test")
vm_name = params.get("main_vm", "virt-tests-vm1")
error_type = params.get("domblkerror_error_type")
timeout = params.get("domblkerror_timeout", 240)
mnt_dir = params.get("domblkerror_mnt_dir", "/home/test")
tmp_file = params.get("domblkerror_tmp_file", "/tmp/fdisk-cmd")
export_file = params.get("nfs_export_file", "/etc/exports")
img_name = params.get("domblkerror_img_name", "libvirt-disk")
img_size = params.get("domblkerror_img_size")
target_dev = params.get("domblkerror_target_dev", "vdb")
pool_name = params.get("domblkerror_pool_name", "fs_pool")
vol_name = params.get("domblkerror_vol_name", "vol1")
vm = env.get_vm(vm_name)
# backup /etc/exports
shutil.copyfile(export_file, "%s.bak" % export_file)
selinux_bak = ""
# backup xml
vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
try:
# Gerenate tmp dir
tmp_dir = data_dir.get_tmp_dir()
img_dir = os.path.join(tmp_dir, 'images')
if not os.path.exists(img_dir):
os.mkdir(img_dir)
# Generate attached disk
utils.run("qemu-img create %s %s" %
(os.path.join(img_dir, img_name), img_size))
# Get unspecified error
if error_type == "unspecified error":
# In this situation, guest will attach a disk on nfs, stop nfs
# service will cause guest paused and get unspecified error
nfs_dir = os.path.join(tmp_dir, 'mnt')
if not os.path.exists(nfs_dir):
os.mkdir(nfs_dir)
mount_opt = "rw,no_root_squash,async"
res = utils_test.libvirt.setup_or_cleanup_nfs(
is_setup=True, mount_dir=nfs_dir, is_mount=False,
export_options=mount_opt, export_dir=img_dir)
selinux_bak = res["selinux_status_bak"]
utils.run("mount -o nolock,soft,timeo=1,retrans=1,retry=0 "
"127.0.0.1:%s %s" % (img_dir, nfs_dir))
img_path = os.path.join(nfs_dir, img_name)
nfs_service = Factory.create_service("nfs")
elif error_type == "no space":
# Steps to generate no space block error:
# 1. Prepare a iscsi disk and build fs pool with it
# 2. Create vol with larger capacity and 0 allocation
# 3. Attach this disk in guest
# 4. In guest, create large image in the vol, which may cause
# guest paused
pool_target = os.path.join(tmp_dir, pool_name)
_pool_vol = utils_test.libvirt.PoolVolumeTest(test, params)
_pool_vol.pre_pool(pool_name, "fs", pool_target, img_name,
image_size=img_size)
_pool_vol.pre_vol(vol_name, "raw", "100M", "0", pool_name)
img_path = os.path.join(pool_target, vol_name)
# Generate disk xml
# Guest will attach a disk with cache=none and error_policy=stop
img_disk = Disk(type_name="file")
img_disk.device = "disk"
img_disk.source = img_disk.new_disk_source(
**{'attrs': {'file': img_path}})
img_disk.driver = {'name': "qemu",
'type': "raw",
'cache': "none",
'error_policy': "stop"}
img_disk.target = {'dev': target_dev,
'bus': "virtio"}
logging.debug("disk xml is %s", img_disk.xml)
# Start guest and get session
if not vm.is_alive():
vm.start()
session = vm.wait_for_login()
# Get disk list before operation
get_disks_cmd = "fdisk -l|grep '^Disk /dev'|cut -d: -f1|cut -d' ' -f2"
bef_list = session.cmd_output(get_disks_cmd).split("\n")
# Attach disk to guest
ret = virsh.attach_device(domain_opt=vm_name,
file_opt=img_disk.xml)
if ret.exit_status != 0:
raise error.TestFail("Fail to attach device %s" % ret.stderr)
time.sleep(2)
#.........这里部分代码省略.........
示例9: run
# 需要导入模块: from virttest.libvirt_xml.devices.disk import Disk [as 别名]
# 或者: from virttest.libvirt_xml.devices.disk.Disk import driver [as 别名]
#.........这里部分代码省略.........
device_source = "rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip)
disk_src_dict = {"attrs": {"protocol": "rbd",
"name": ceph_disk_name},
"hosts": [{"name": ceph_host_ip,
"port": ceph_host_port}]}
elif backend_storage_type == "nfs":
pool_name = params.get("pool_name", "nfs_pool")
pool_target = params.get("pool_target", "nfs_mount")
pool_type = params.get("pool_type", "netfs")
nfs_server_dir = params.get("nfs_server_dir", "nfs_server")
emulated_image = params.get("emulated_image")
image_name = params.get("nfs_image_name", "nfs.img")
tmp_dir = data_dir.get_tmp_dir()
pvt = libvirt.PoolVolumeTest(test, params)
pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
nfs_mount_dir = os.path.join(tmp_dir, pool_target)
device_source = nfs_mount_dir + image_name
disk_src_dict = {'attrs': {'file': device_source,
'type_name': 'file'}}
else:
test.cancel("Only iscsi/gluster/rbd/nfs can be tested for now.")
logging.debug("device source is: %s", device_source)
luks_sec_uuid = libvirt.create_secret(params)
logging.debug("A secret created with uuid = '%s'", luks_sec_uuid)
ret = virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd,
encode=True, debug=True)
encrypt_dev(device_source, params)
libvirt.check_exit_status(ret)
# Add disk xml.
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
disk_xml = Disk(type_name=device_type)
disk_xml.device = device
disk_xml.target = {"dev": device_target, "bus": device_bus}
driver_dict = {"name": "qemu", "type": device_format}
disk_xml.driver = driver_dict
disk_source = disk_xml.new_disk_source(**disk_src_dict)
if disk_auth_dict:
logging.debug("disk auth dict is: %s" % disk_auth_dict)
if auth_in_source:
disk_source.auth = disk_xml.new_auth(**disk_auth_dict)
else:
disk_xml.auth = disk_xml.new_auth(**disk_auth_dict)
disk_encryption_dict = {"encryption": "luks",
"secret": {"type": "passphrase",
"uuid": luks_sec_uuid}}
disk_encryption = disk_xml.new_encryption(**disk_encryption_dict)
if encryption_in_source:
disk_source.encryption = disk_encryption
else:
disk_xml.encryption = disk_encryption
disk_xml.source = disk_source
logging.debug("new disk xml is: %s", disk_xml)
# Sync VM xml
if not hotplug_disk:
vmxml.add_device(disk_xml)
vmxml.sync()
try:
vm.start()
vm.wait_for_login()
except virt_vm.VMStartError as details:
# When use wrong password in disk xml for cold plug cases,
# VM cannot be started
if status_error and not hotplug_disk:
logging.info("VM failed to start as expected: %s" % str(details))
else:
test.fail("VM should start but failed: %s" % str(details))
示例10: run
# 需要导入模块: from virttest.libvirt_xml.devices.disk import Disk [as 别名]
# 或者: from virttest.libvirt_xml.devices.disk.Disk import driver [as 别名]
def run(test, params, env):
"""
Test disk encryption option.
1.Prepare test environment, destroy or suspend a VM.
2.Prepare tgtd and secret config.
3.Edit disks xml and start the domain.
4.Perform test operation.
5.Recover test environment.
6.Confirm the test result.
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
virsh_dargs = {'debug': True, 'ignore_status': True}
def check_save_restore(save_file):
"""
Test domain save and restore.
"""
# Save the domain.
ret = virsh.save(vm_name, save_file, **virsh_dargs)
libvirt.check_exit_status(ret)
# Restore the domain.
ret = virsh.restore(save_file, **virsh_dargs)
libvirt.check_exit_status(ret)
def check_snapshot():
"""
Test domain snapshot operation.
"""
snapshot1 = "s1"
snapshot2 = "s2"
ret = virsh.snapshot_create_as(vm_name, snapshot1)
libvirt.check_exit_status(ret)
ret = virsh.snapshot_create_as(vm_name,
"%s --disk-only --diskspec vda,"
"file=/tmp/testvm-snap1"
% snapshot2)
libvirt.check_exit_status(ret, True)
ret = virsh.snapshot_create_as(vm_name,
"%s --memspec file=%s,snapshot=external"
" --diskspec vda,file=/tmp/testvm-snap2"
% (snapshot2, snapshot2))
libvirt.check_exit_status(ret, True)
def check_in_vm(target, old_parts):
"""
Check mount/read/write disk in VM.
:param vm. VM guest.
:param target. Disk dev in VM.
:return: True if check successfully.
"""
try:
session = vm.wait_for_login()
new_parts = libvirt.get_parts_list(session)
added_parts = list(set(new_parts).difference(set(old_parts)))
logging.info("Added parts:%s", added_parts)
if len(added_parts) != 1:
logging.error("The number of new partitions is invalid in VM")
return False
added_part = None
if target.startswith("vd"):
if added_parts[0].startswith("vd"):
added_part = added_parts[0]
elif target.startswith("hd"):
if added_parts[0].startswith("sd"):
added_part = added_parts[0]
elif target.startswith("sd"):
added_part = added_parts[0]
if not added_part:
logging.error("Cann't see added partition in VM")
return False
cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
"mkdir -p test && mount /dev/{0} test && echo"
" teststring > test/testfile && umount test"
.format(added_part))
s, o = session.cmd_status_output(cmd)
logging.info("Check disk operation in VM:\n%s", o)
if s != 0:
return False
return True
except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
logging.error(str(e))
return False
def check_qemu_cmd():
"""
Check qemu-kvm command line options
"""
cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
if driver_iothread:
cmd += " | grep iothread=iothread%s" % driver_iothread
#.........这里部分代码省略.........
示例11: vm_stress_events
# 需要导入模块: from virttest.libvirt_xml.devices.disk import Disk [as 别名]
# 或者: from virttest.libvirt_xml.devices.disk.Disk import driver [as 别名]
def vm_stress_events(self, event, vm):
"""
Stress events
:param event: event name
:param vm: vm object
"""
dargs = {'ignore_status': True, 'debug': True}
for itr in range(self.iterations):
if "vcpupin" in event:
for vcpu in range(int(self.current_vcpu)):
result = virsh.vcpupin(vm.name, vcpu,
random.choice(self.host_cpu_list),
**dargs)
if not self.ignore_status:
libvirt.check_exit_status(result)
elif "emulatorpin" in event:
for vcpu in range(int(self.current_vcpu)):
result = virsh.emulatorpin(vm.name,
random.choice(
self.host_cpu_list),
**dargs)
if not self.ignore_status:
libvirt.check_exit_status(result)
elif "suspend" in event:
result = virsh.suspend(vm.name, **dargs)
if not self.ignore_status:
libvirt.check_exit_status(result)
time.sleep(self.event_sleep_time)
result = virsh.resume(vm.name, **dargs)
if not self.ignore_status:
libvirt.check_exit_status(result)
elif "cpuhotplug" in event:
result = virsh.setvcpus(vm.name, self.max_vcpu, "--live",
**dargs)
if not self.ignore_status:
libvirt.check_exit_status(result)
exp_vcpu = {'max_config': self.max_vcpu,
'max_live': self.max_vcpu,
'cur_config': self.current_vcpu,
'cur_live': self.max_vcpu,
'guest_live': self.max_vcpu}
utils_hotplug.check_vcpu_value(
vm, exp_vcpu, option="--live")
time.sleep(self.event_sleep_time)
result = virsh.setvcpus(vm.name, self.current_vcpu, "--live",
**dargs)
if not self.ignore_status:
libvirt.check_exit_status(result)
exp_vcpu = {'max_config': self.max_vcpu,
'max_live': self.max_vcpu,
'cur_config': self.current_vcpu,
'cur_live': self.current_vcpu,
'guest_live': self.current_vcpu}
utils_hotplug.check_vcpu_value(
vm, exp_vcpu, option="--live")
elif "reboot" in event:
vm.reboot()
elif "nethotplug" in event:
for iface_num in range(int(self.iface_num)):
logging.debug("Try to attach interface %d" % iface_num)
mac = utils_net.generate_mac_address_simple()
options = ("%s %s --model %s --mac %s %s" %
(self.iface_type, self.iface_source['network'],
self.iface_model, mac, self.attach_option))
logging.debug("VM name: %s , Options for Network attach: %s", vm.name, options)
ret = virsh.attach_interface(vm.name, options,
ignore_status=True)
time.sleep(self.event_sleep_time)
if not self.ignore_status:
libvirt.check_exit_status(ret)
if self.detach_option:
options = ("--type %s --mac %s %s" %
(self.iface_type, mac, self.detach_option))
logging.debug("VM name: %s , Options for Network detach: %s", vm.name, options)
ret = virsh.detach_interface(vm.name, options,
ignore_status=True)
if not self.ignore_status:
libvirt.check_exit_status(ret)
elif "diskhotplug" in event:
for disk_num in range(len(self.device_source_names)):
disk = {}
disk_attach_error = False
disk_name = os.path.join(self.path, vm.name, self.device_source_names[disk_num])
device_source = libvirt.create_local_disk(
self.disk_type, disk_name, self.disk_size, disk_format=self.disk_format)
disk.update({"format": self.disk_format,
"source": device_source})
disk_xml = Disk(self.disk_type)
disk_xml.device = self.disk_device
disk_xml.driver = {"name": self.disk_driver, "type": self.disk_format}
ret = virsh.attach_disk(vm.name, disk["source"], self.device_target[disk_num], self.attach_option, debug=True)
if not self.ignore_status:
libvirt.check_exit_status(ret, disk_attach_error)
if self.detach_option:
ret = virsh.detach_disk(vm.name, self.device_target[disk_num], extra=self.detach_option)
if not self.ignore_status:
libvirt.check_exit_status(ret)
libvirt.delete_local_disk(self.disk_type, disk_name)
else:
raise NotImplementedError
示例12: run
# 需要导入模块: from virttest.libvirt_xml.devices.disk import Disk [as 别名]
# 或者: from virttest.libvirt_xml.devices.disk.Disk import driver [as 别名]
#.........这里部分代码省略.........
pass
if not guestmemory:
# assign default memory
guestmemory = default_mem
# Set the current and max memory params
vmxml.current_mem_unit = memunit
vmxml.max_mem_unit = memunit
vmxml.current_mem = int(guestmemory)
vmxml.max_mem = int(guestmemory)
vmxml.sync()
# Set vcpu and topology
libvirt_xml.VMXML.set_vm_vcpus(vm_name, max_vcpu, current_vcpu,
vm_sockets, vm_cores, vm_threads)
vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
# Set vnc display as needed
graphics = vmxml.get_device_class('graphics')()
if graphic:
if not vmxml.get_graphics_devices("vnc"):
graphics.add_graphic(vm_name, graphic="vnc")
else:
if vmxml.get_graphics_devices("vnc"):
graphics.del_graphic(vm_name)
vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
network_str = None
disk_str = None
# Set network devices
if max_network:
network_str = "ip link|grep ^[1-9]|wc -l"
for idx in range(num_network):
network = Interface(type_name="bridge")
network.mac_address = utils_net.generate_mac_address_simple()
network.source = {"bridge": netdst}
vmxml.add_device(network)
# Set disk devices
if max_disk:
for idx in range(num_disk):
disk_str = "lsblk|grep ^[s,v]|grep 1G|wc -l"
disk = Disk()
disk_path = os.path.join(data_dir.get_data_dir(), "images", "%s.qcow2" % idx)
if "scsi" in drive_format:
drive_format = "scsi"
disk_target = "sd%s" % letters[(idx % 51)+1]
else:
drive_format = "virtio"
disk_target = "vd%s" % letters[(idx % 51)+1]
disk_source = libvirt.create_local_disk("file", disk_path, '1', "qcow2")
disk.device = "disk"
disk.source = disk.new_disk_source(**{"attrs": {'file': disk_source}})
disk.target = {"dev": disk_target, "bus": drive_format}
disk.driver = {"name": "qemu", 'type': disk_format}
vmxml.add_device(disk)
vmxml.sync()
# Start VM
logging.debug("VM XML: \n%s", vmxml)
try:
vm.start()
except virt_vm.VMStartError, detail:
for msg in failures.items():
if msg[0] in detail:
test.cancel("%s", msg[1])
test.fail("%s" % detail)
# Check the memory and vcpu inside guest
memtotal = vm.get_totalmem_sys()
cpucount = vm.get_cpu_count()
session = vm.wait_for_login()
if network_str:
guestnetworks = int(session.cmd_output(network_str))
logging.debug("guestnet: %d", guestnetworks)
if (guestnetworks - 2) != num_network:
failed = True
logging.error("mismatch in guest network devices: \n"
"Expected: %d\nActual: %d", num_network,
guestnetworks)
if disk_str:
guestdisks = int(session.cmd_output(disk_str))
logging.debug("guestdisk: %d", guestdisks)
if guestdisks != num_disk:
failed = True
logging.error("mismatch in guest disk devices: \n"
"Expected: %d\nActual: %s", num_disk, guestdisks)
session.close()
guestmem = utils_misc.normalize_data_size("%s G" % guestmemory)
# TODO:512 MB threshold deviation value, need to normalize
if int(float(guestmem) - memtotal) > 512:
failed = True
logging.error("mismatch in guest memory: \nExpected: "
"%s\nActual: %s", float(guestmem), memtotal)
if cpucount != current_vcpu:
failed = True
logging.error("mismatch in guest vcpu:\nExpected: %d\nActual: "
"%d", current_vcpu, cpucount)
if failed:
test.fail("Consult previous failures")
示例13: Disk
# 需要导入模块: from virttest.libvirt_xml.devices.disk import Disk [as 别名]
# 或者: from virttest.libvirt_xml.devices.disk.Disk import driver [as 别名]
# If we use qcow2 disk format, should format iscsi disk first.
if device_format == "qcow2":
cmd = ("qemu-img create -f qcow2 iscsi://%s:%s/%s/1 %s"
% (iscsi_host, iscsi_port, iscsi_target, emulated_size))
utils.run(cmd)
# Add disk xml.
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
disk_xml = Disk(type_name=device_type)
disk_xml.device = device
disk_xml.source = disk_xml.new_disk_source(
**{"attrs": {"protocol": "iscsi", "name": "%s/1" % iscsi_target},
"hosts": [{"name": iscsi_host, "port": iscsi_port}]})
disk_xml.target = {"dev": device_target, "bus": device_bus}
disk_xml.driver = {"name": "qemu", "type": device_format}
# Check if we want to use a faked uuid.
if not uuid:
uuid = secret_uuid
auth_dict = {}
if auth_uuid:
auth_dict = {"auth_user": chap_user,
"secret_type": secret_usage_type,
"secret_uuid": uuid}
elif auth_usage:
auth_dict = {"auth_user": chap_user,
"secret_type": secret_usage_type,
"secret_usage": secret_usage_target}
if auth_dict:
disk_xml.auth = disk_xml.new_auth(**auth_dict)
示例14: run
# 需要导入模块: from virttest.libvirt_xml.devices.disk import Disk [as 别名]
# 或者: from virttest.libvirt_xml.devices.disk.Disk import driver [as 别名]
#.........这里部分代码省略.........
if pool_type in ["iscsi", "disk"]:
source_type = "dev"
if pool_type == "iscsi":
disk_xml.device = "lun"
disk_xml.rawio = "yes"
else:
if not enable_namespace:
qemu_conf.namespaces = ''
logging.debug("the qemu.conf content is: %s" % qemu_conf)
libvirtd.restart()
else:
source_type = "file"
# set host_sestatus as nfs pool will reset it
utils_selinux.set_status(host_sestatus)
# set virt_use_nfs
result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs,
shell=True)
if result.exit_status:
test.cancel("Failed to set virt_use_nfs value")
else:
source_type = "file"
# Init a QemuImg instance.
params['image_name'] = img_name
tmp_dir = data_dir.get_tmp_dir()
image = qemu_storage.QemuImg(params, tmp_dir, img_name)
# Create a image.
img_path, result = image.create(params)
# Set the context of the image.
if sec_relabel == "no":
utils_selinux.set_context_of_file(filename=img_path, context=img_label)
disk_xml.target = {"dev": device_target, "bus": device_bus}
disk_xml.driver = {"name": "qemu", "type": vol_format}
if disk_seclabel == "yes":
source_seclabel = []
sec_xml = seclabel.Seclabel()
sec_xml.update(sec_disk_dict)
source_seclabel.append(sec_xml)
disk_source = disk_xml.new_disk_source(**{"attrs": {source_type: img_path},
"seclabels": source_seclabel})
else:
disk_source = disk_xml.new_disk_source(**{"attrs": {source_type: img_path}})
# Set the context of the VM.
vmxml.set_seclabel([sec_dict])
vmxml.sync()
disk_xml.source = disk_source
logging.debug(disk_xml)
# Do the attach action.
cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml.xml, flagstr='--persistent')
libvirt.check_exit_status(cmd_result, expect_error=False)
logging.debug("the domain xml is: %s" % vmxml.xmltreefile)
# Start VM to check the VM is able to access the image or not.
try:
vm.start()
# Start VM successfully.
# VM with set seclabel can access the image with the
# set context.
if status_error:
test.fail('Test succeeded in negative case.')
if check_cap_rawio:
cap_list = ['CapPrm', 'CapEff', 'CapBnd']
示例15: len
# 需要导入模块: from virttest.libvirt_xml.devices.disk import Disk [as 别名]
# 或者: from virttest.libvirt_xml.devices.disk.Disk import driver [as 别名]
disk_xml.vendor = vendor
if product != "":
disk_xml.product = product
disk_xml.target = {"dev": device_targets[i], "bus": device_bus[i]}
if len(device_readonly) > i:
disk_xml.readonly = "yes" == device_readonly[i]
# Add driver options from parameters
driver_dict = {"name": "qemu"}
if len(driver_options) > i:
for driver_option in driver_options[i].split(','):
if driver_option != "":
d = driver_option.split('=')
driver_dict.update({d[0].strip(): d[1].strip()})
disk_xml.driver = driver_dict
# Add disk address from parameters.
if len(device_address) > i:
addr_dict = {}
for addr_option in device_address[i].split(','):
if addr_option != "":
d = addr_option.split('=')
addr_dict.update({d[0].strip(): d[1].strip()})
disk_xml.address = disk_xml.new_disk_address(
**{"attrs": addr_dict})
logging.debug("disk xml: %s", disk_xml)
if hotplug:
disks_xml.append(disk_xml)
else: