本文整理汇总了Python中vdsm.supervdsm.getProxy函数的典型用法代码示例。如果您正苦于以下问题:Python getProxy函数的具体用法?Python getProxy怎么用?Python getProxy使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了getProxy函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _alloc
def _alloc(count, size, path):
"""Helper to actually (de)allocate hugepages, called by public facing
methods.
Args:
count: Number of hugepages to allocate (can be negative)
size: The target hugepage size (must be supported by the system)
path: Path to the hugepages directory.
Returns: The amount of allocated pages (can be negative,
implicating deallocation).
Raises:
"""
if size is None:
size = DEFAULT_HUGEPAGESIZE[cpuarch.real()]
path = path.format(size)
ret = supervdsm.getProxy().hugepages_alloc(count, path)
if ret != count:
supervdsm.getProxy().hugepages_alloc(-ret, path)
raise NonContiguousMemory
return ret
示例2: setRpFilterIfNeeded
def setRpFilterIfNeeded(netIfaceName, hostname, loose_mode):
"""
Set rp_filter to loose or strict mode if there's no session using the
netIfaceName device and it's not the device used by the OS to reach the
'hostname'.
loose mode is needed to allow multiple iSCSI connections in a multiple NIC
per subnet configuration. strict mode is needed to avoid the security
breach where an untrusted VM can DoS the host by sending it packets with
spoofed random sources.
Arguments:
netIfaceName: the device used by the iSCSI session
target: iSCSI target object cointaining the portal hostname
loose_mode: boolean
"""
if netIfaceName is None:
log.debug("iface.net_ifacename not provided, skipping rp filter setup")
return
sessions = _sessionsUsingNetiface(netIfaceName)
if not any(sessions) and netIfaceName != getRouteDeviceTo(hostname):
if loose_mode:
log.info("Setting loose mode rp_filter for device %r." %
netIfaceName)
supervdsm.getProxy().set_rp_filter_loose(netIfaceName)
else:
log.info("Setting strict mode rp_filter for device %r." %
netIfaceName)
supervdsm.getProxy().set_rp_filter_strict(netIfaceName)
示例3: reattach_detachable
def reattach_detachable(device_name):
libvirt_device, device_params = _get_device_ref_and_params(device_name)
if CAPABILITY_TO_XML_ATTR[device_params['capability']] == 'pci':
try:
iommu_group = device_params['iommu_group']
except KeyError:
raise NoIOMMUSupportException
supervdsm.getProxy().rmAppropriateIommuGroup(iommu_group)
libvirt_device.reAttach()
示例4: rescan
def rescan():
"""
Rescan HBAs discovering new devices.
"""
log.debug("Starting scan")
try:
supervdsm.getProxy().hbaRescan()
except Error as e:
log.error("Scan failed: %s", e)
else:
log.debug("Scan finished")
示例5: detach_detachable
def detach_detachable(device_name):
libvirt_device, device_params = _get_device_ref_and_params(device_name)
if CAPABILITY_TO_XML_ATTR[device_params['capability']] == 'pci':
try:
iommu_group = device_params['iommu_group']
except KeyError:
raise NoIOMMUSupportException('hostdev passthrough without iommu')
supervdsm.getProxy().appropriateIommuGroup(iommu_group)
libvirt_device.detachFlags(None)
return device_params
示例6: main
def main():
setup_nets_config = hooking.read_json()
in_rollback = setup_nets_config['request']['options'].get('_inRollback')
if in_rollback:
log('Configuration failed with _inRollback=True.')
else:
log('Configuration failed. At this point, non-OVS rollback should be '
'done. Executing OVS rollback.')
supervdsm.getProxy().setupNetworks(
{}, {}, {'connectivityCheck': False, '_inRollback': True,
'_inOVSRollback': True})
示例7: validateDirAccess
def validateDirAccess(dirPath):
try:
getProcPool().fileUtils.validateAccess(dirPath)
supervdsm.getProxy().validateAccess(
constants.VDSM_USER,
(constants.VDSM_GROUP,), dirPath,
(os.R_OK | os.W_OK | os.X_OK))
supervdsm.getProxy().validateAccess(
constants.QEMU_PROCESS_USER,
(constants.DISKIMAGE_GROUP, constants.METADATA_GROUP), dirPath,
(os.R_OK | os.X_OK))
except OSError as e:
if e.errno == errno.EACCES:
raise se.StorageServerAccessPermissionError(dirPath)
raise
return True
示例8: testKsmAction
def testKsmAction(self):
dropPrivileges()
proxy = supervdsm.getProxy()
ksmParams = {"run": 0,
"merge_across_nodes": 1,
"sleep_millisecs": 0xffff,
"pages_to_scan": 0xffff}
proxy.ksmTune(ksmParams)
for k, v in ksmParams.iteritems():
self.assertEqual(str(v),
open("/sys/kernel/mm/ksm/%s" % k, "r").read())
示例9: _resize_if_needed
def _resize_if_needed(guid):
name = devicemapper.getDmId(guid)
slaves = [(slave, getDeviceSize(slave))
for slave in devicemapper.getSlaves(name)]
if len(slaves) == 0:
log.warning("Map %r has no slaves" % guid)
return False
if len(set(size for slave, size in slaves)) != 1:
raise Error("Map %r slaves size differ %s" % (guid, slaves))
map_size = getDeviceSize(name)
slave_size = slaves[0][1]
if map_size == slave_size:
return False
log.info("Resizing map %r (map_size=%d, slave_size=%d)",
guid, map_size, slave_size)
supervdsm.getProxy().resizeMap(name)
return True
示例10: getVmNumaNodeRuntimeInfo
def getVmNumaNodeRuntimeInfo(vm):
"""
Collect vm numa nodes runtime pinning to which host numa nodes
information.
Host numa node topology:
'numaNodes': {'<nodeIndex>': {'cpus': [int], 'totalMemory': 'str'},
...}
We can get each physical cpu core belongs to which host numa node.
Vm numa node configuration:
'guestNumaNodes': [{'cpus': 'str', 'memory': 'str'}, ...]
We can get each vcpu belongs to which vm numa node.
Vcpu runtime pinning to physical cpu core information:
([(0, 1, 19590000000L, 1), (1, 1, 10710000000L, 1)],
[(True, True, True, True), (True, True, True, True)])
The first list element of the above tuple describe each vcpu(list[0])
runtime pinning to which physical cpu core(list[3]).
Get the mapping info between vcpu and pid from
/var/run/libvirt/qemu/<vmName>.xml
Get each vcpu(pid) backed memory mapping to which host numa nodes info
from /proc/<vm_pid>/<vcpu_pid>/numa_maps
From all the above information, we can calculate each vm numa node
runtime pinning to which host numa node.
The output is a map like:
'<vm numa node index>': [<host numa node index>, ...]
"""
vmNumaNodeRuntimeMap = {}
vcpu_to_pcpu = _get_mapping_vcpu_to_pcpu(
_get_vcpu_positioning(vm))
if vcpu_to_pcpu:
vm_numa_placement = defaultdict(set)
vcpu_to_pnode = supervdsm.getProxy().getVcpuNumaMemoryMapping(
vm.conf['vmName'].encode('utf-8'))
pcpu_to_pnode = _get_mapping_pcpu_to_pnode()
vcpu_to_vnode = _get_mapping_vcpu_to_vnode(vm)
for vcpu_id, pcpu_id in vcpu_to_pcpu.iteritems():
vnode_index = str(vcpu_to_vnode[vcpu_id])
vm_numa_placement[vnode_index].add(pcpu_to_pnode[pcpu_id])
vm_numa_placement[vnode_index].update(
vcpu_to_pnode.get(vcpu_id, ()))
vmNumaNodeRuntimeMap = dict((k, list(v)) for k, v in
vm_numa_placement.iteritems())
return vmNumaNodeRuntimeMap
示例11: rollback
def rollback(running_config, initial_config):
diff = running_config.diffFrom(initial_config)
if diff:
for libvirt_ovs_nets in (iter_ovs_nets(running_config.networks),
iter_ovs_nets(initial_config.networks)):
for net, attrs in libvirt_ovs_nets:
with suppress(libvirtError): # network not found
libvirt.removeNetwork(net)
destroy_ovs_bridge()
for net, attrs in running_config.networks.items():
if is_ovs_network(attrs):
running_config.networks.pop(net)
for bond, attrs in running_config.bonds.items():
if is_ovs_bond(attrs):
running_config.bonds.pop(bond)
running_config.save()
supervdsm.getProxy().setupNetworks(
initial_config.networks, initial_config.bonds,
{'connectivityCheck': False, '_inRollback': True})
示例12: _prepareVolumePathFromPayload
def _prepareVolumePathFromPayload(self, vmId, device, payload):
"""
param vmId:
VM UUID or None
param device:
either 'floppy' or 'cdrom'
param payload:
a dict formed like this:
{'volId': 'volume id', # volId is optional
'file': {'filename': 'content', ...}}
"""
funcs = {'cdrom': 'mkIsoFs', 'floppy': 'mkFloppyFs'}
if device not in funcs:
raise vm.VolumeError("Unsupported 'device': %s" % device)
func = getattr(supervdsm.getProxy(), funcs[device])
return func(vmId, payload['file'], payload.get('volId'))
示例13: getVmVolumeInfo
def getVmVolumeInfo(self):
"""
Send info to represent Gluster volume as a network block device
"""
rpath = sdCache.produce(self.sdUUID).getRealPath()
volfileServer, volname = rpath.rsplit(":", 1)
volname = volname.strip('/')
# Volume transport to Libvirt transport mapping
VOLUME_TRANS_MAP = {'TCP': 'tcp', 'RDMA': 'rdma'}
# Extract the volume's transport using gluster cli
svdsmProxy = svdsm.getProxy()
try:
volInfo = svdsmProxy.glusterVolumeInfo(volname, volfileServer)
volTrans = VOLUME_TRANS_MAP[volInfo[volname]['transportType'][0]]
except GlusterException:
# In case of issues with finding transport type, default to tcp
self.log.warning("Unable to find transport type for GlusterFS"
" volume %s. GlusterFS server = %s."
"Defaulting to tcp",
(volname, volfileServer), exc_info=True)
volTrans = VOLUME_TRANS_MAP['TCP']
# Use default port
volPort = "0"
imgFilePath = self.getVolumePath()
imgFilePath_list = imgFilePath.rsplit("/")
# Extract path to the image, relative to the gluster mount
imgFileRelPath = "/".join(imgFilePath_list[-4:])
glusterPath = volname + '/' + imgFileRelPath
return {'volType': VmVolumeInfo.TYPE_NETWORK, 'path': glusterPath,
'protocol': 'gluster', 'volPort': volPort,
'volTransport': volTrans,
'volfileServer': volfileServer}
示例14: restoreNetConfig
def restoreNetConfig(self):
supervdsm.getProxy().restoreNetworks()
示例15: str
vm_name = str(
domxml.getElementsByTagName('name')[0].firstChild.nodeValue
)
target_mdev_type = os.environ['mdev_type']
# Sufficient as the hook only supports single mdev instance per VM.
mdev_uuid = str(uuid.uuid3(_OVIRT_MDEV_NAMESPACE, vm_name))
device = _suitable_device_for_mdev_type(target_mdev_type)
if device is None:
sys.stderr.write('vgpu: No device with type {} is available.\n'.format(
target_mdev_type)
)
sys.exit(1)
try:
supervdsm.getProxy().mdev_create(device, target_mdev_type, mdev_uuid)
except IOError:
sys.stderr.write('vgpu: Failed to create mdev type {}.\n'.format(
target_mdev_type)
)
sys.exit(1)
supervdsm.getProxy().appropriateIommuGroup(
os.path.basename(os.path.realpath(
os.path.join(_MDEV_PATH, device, mdev_uuid, 'iommu_group')
))
)
hostdev = domxml.createElement('hostdev')
hostdev.setAttribute('mode', 'subsystem')
hostdev.setAttribute('type', 'mdev')