本文整理汇总了Python中supervdsm.getProxy函数的典型用法代码示例。如果您正苦于以下问题:Python getProxy函数的具体用法?Python getProxy怎么用?Python getProxy使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了getProxy函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: addNetwork
def addNetwork(self, bridge, vlan=None, bond=None, nics=None, options={}):
"""Add a new network to this vds.
Network topology is bridge--[vlan--][bond--]nics.
vlan(number) and bond are optional - pass the empty string to discard
them. """
self._translateOptionsToNew(options)
if not self._cif._networkSemaphore.acquire(blocking=False):
self.log.warn('concurrent network verb already executing')
return errCode['unavail']
try:
self._cif._netConfigDirty = True
if vlan:
options['vlan'] = vlan
if bond:
options['bonding'] = bond
if nics:
options['nics'] = list(nics)
try:
supervdsm.getProxy().addNetwork(bridge, options)
except configNetwork.ConfigNetworkError, e:
self.log.error(e.message, exc_info=True)
return {'status': {'code': e.errCode, 'message': e.message}}
return {'status': doneCode}
示例2: setRpFilterIfNeeded
def setRpFilterIfNeeded(netIfaceName, hostname, loose_mode):
"""
Set rp_filter to loose or strict mode if there's no session using the
netIfaceName device and it's not the device used by the OS to reach the
'hostname'.
loose mode is needed to allow multiple iSCSI connections in a multiple NIC
per subnet configuration. strict mode is needed to avoid the security
breach where an untrusted VM can DoS the host by sending it packets with
spoofed random sources.
Arguments:
netIfaceName: the device used by the iSCSI session
target: iSCSI target object cointaining the portal hostname
loose_mode: boolean
"""
if netIfaceName is None:
log.info("iSCSI iface.net_ifacename not provided. Skipping.")
return
sessions = _sessionsUsingNetiface(netIfaceName)
if not any(sessions) and netIfaceName != getRouteDeviceTo(hostname):
if loose_mode:
log.info("Setting loose mode rp_filter for device %r." %
netIfaceName)
supervdsm.getProxy().set_rp_filter_loose(netIfaceName)
else:
log.info("Setting strict mode rp_filter for device %r." %
netIfaceName)
supervdsm.getProxy().set_rp_filter_strict(netIfaceName)
示例3: delNetwork
def delNetwork(self, bridge, vlan=None, bond=None, nics=None, options={}):
"""Delete a network from this vds."""
self._translateOptionsToNew(options)
try:
if not self._cif._networkSemaphore.acquire(blocking=False):
self.log.warn('concurrent network verb already executing')
return errCode['unavail']
if vlan or bond or nics:
# Backwards compatibility
self.log.warn('Specifying vlan, bond or nics to delNetwork is deprecated')
_netinfo = netinfo.NetInfo()
try:
if bond:
configNetwork.validateBondingName(bond)
if vlan:
configNetwork.validateVlanId(vlan)
if nics and bond and set(nics) != set(_netinfo.bondings[bond]["slaves"]):
self.log.error('delNetwork: not all nics specified are enslaved (%s != %s)'
% (nics, _netinfo.bondings[bond]["slaves"])
)
raise configNetwork.ConfigNetworkError(configNetwork.ne.ERR_BAD_NIC, "not all nics are enslaved")
except configNetwork.ConfigNetworkError, e:
self.log.error(e.message, exc_info=True)
return {'status': {'code': e.errCode, 'message': e.message}}
self._cif._netConfigDirty = True
try:
supervdsm.getProxy().delNetwork(bridge, options)
except configNetwork.ConfigNetworkError, e:
self.log.error(e.message, exc_info=True)
return {'status': {'code': e.errCode, 'message': e.message}}
示例4: reattach_detachable
def reattach_detachable(device_name):
libvirt_device, device_params = _get_device_ref_and_params(device_name)
if CAPABILITY_TO_XML_ATTR[device_params['capability']] == 'pci':
iommu_group = device_params['iommu_group']
supervdsm.getProxy().rmAppropriateIommuGroup(iommu_group)
libvirt_device.reAttach()
示例5: detach_detachable
def detach_detachable(device_name):
libvirt_device, device_params = _get_device_ref_and_params(device_name)
if CAPABILITY_TO_XML_ATTR[device_params['capability']] == 'pci':
iommu_group = device_params['iommu_group']
supervdsm.getProxy().appropriateIommuGroup(iommu_group)
libvirt_device.detachFlags(None)
return device_params
示例6: reattach_detachable
def reattach_detachable(device_name):
libvirt_device, device_params = _get_device_ref_and_params(device_name)
if CAPABILITY_TO_XML_ATTR[device_params['capability']] == 'pci':
try:
iommu_group = device_params['iommu_group']
except KeyError:
raise NoIOMMUSupportException
supervdsm.getProxy().rmAppropriateIommuGroup(iommu_group)
libvirt_device.reAttach()
示例7: setSafeNetworkConfig
def setSafeNetworkConfig(self):
"""Declare current network configuration as 'safe'"""
if not self._cif._networkSemaphore.acquire(blocking=False):
self.log.warn('concurrent network verb already executing')
return errCode['unavail']
try:
self._cif._netConfigDirty = False
supervdsm.getProxy().setSafeNetworkConfig()
return {'status': doneCode}
finally:
self._cif._networkSemaphore.release()
示例8: detach_detachable
def detach_detachable(device_name):
libvirt_device, device_params = _get_device_ref_and_params(device_name)
if CAPABILITY_TO_XML_ATTR[device_params['capability']] == 'pci':
try:
iommu_group = device_params['iommu_group']
except KeyError:
raise NoIOMMUSupportException('hostdev passthrough without iommu')
supervdsm.getProxy().appropriateIommuGroup(iommu_group)
libvirt_device.detachFlags(None)
return device_params
示例9: validateDirAccess
def validateDirAccess(dirPath):
try:
getProcPool().fileUtils.validateAccess(dirPath)
supervdsm.getProxy().validateAccess(
constants.QEMU_PROCESS_USER,
(constants.DISKIMAGE_GROUP, constants.METADATA_GROUP), dirPath,
(os.R_OK | os.X_OK))
except OSError as e:
if e.errno == errno.EACCES:
raise se.StorageServerAccessPermissionError(dirPath)
raise
return True
示例10: rescan
def rescan():
"""
Forces multipath daemon to rescan the list of available devices and
refresh the mapping table. New devices can be found under /dev/mapper
Should only be called from hsm._rescanDevices()
"""
# First rescan iSCSI and FCP connections
iscsi.rescan()
supervdsm.getProxy().hbaRescan()
# Now let multipath daemon pick up new devices
misc.execCmd([constants.EXT_MULTIPATH], sudo=True)
示例11: rescan
def rescan():
"""
Forces multiupath daemon to rescan the list of available devices and
refresh the mapping table. New devices can be found under /dev/mapper
Should only be called from hsm._rescanDevices()
"""
# First ask iSCSI to rescan all its sessions
iscsi.rescan()
supervdsm.getProxy().forceIScsiScan()
# Now let multipath daemon pick up new devices
misc.execCmd([constants.EXT_MULTIPATH])
示例12: setupNetworks
def setupNetworks(self, networks={}, bondings={}, options={}):
"""Add a new network to this vds, replacing an old one."""
self._translateOptionsToNew(options)
if not self._cif._networkSemaphore.acquire(blocking=False):
self.log.warn('concurrent network verb already executing')
return errCode['unavail']
try:
self._cif._netConfigDirty = True
try:
supervdsm.getProxy().setupNetworks(networks, bondings, options)
except configNetwork.ConfigNetworkError, e:
self.log.error(e.message, exc_info=True)
return {'status': {'code': e.errCode, 'message': e.message}}
return {'status': doneCode}
示例13: doUnmountMaster
def doUnmountMaster(cls, masterdir):
"""
Unmount the master metadata file system. Should be called only by SPM.
"""
# fuser processes holding mount point and validate that the umount
# succeeded
cls.__handleStuckUmount(masterdir)
try:
masterMount = mount.getMountFromTarget(masterdir)
except OSError as ex:
if ex.errno == errno.ENOENT:
return
raise
if masterMount.isMounted():
# Try umount, take 1
try:
masterMount.umount()
except mount.MountError:
# umount failed, try to kill that processes holding mount point
svdsmp = svdsm.getProxy()
pids = svdsmp.fuser(masterMount.fs_file, mountPoint=True)
# It was unmounted while I was checking no need to do anything
if not masterMount.isMounted():
return
if len(pids) == 0:
cls.log.warn("Unmount failed because of errors that fuser "
"can't solve")
else:
for pid in pids:
try:
cls.log.debug("Trying to kill pid %d", pid)
os.kill(pid, signal.SIGKILL)
except OSError as e:
if e.errno == errno.ESRCH: # No such process
pass
elif e.errno == errno.EPERM: # Op. not permitted
cls.log.warn("Could not kill pid %d because "
"operation was not permitted",
pid)
else:
cls.log.warn("Could not kill pid %d because an"
" unexpected error",
exc_info=True)
except:
cls.log.warn("Could not kill pid %d because an "
"unexpected error", exc_info=True)
# Try umount, take 2
try:
masterMount.umount()
except mount.MountError:
pass
if masterMount.isMounted():
# We failed to umount masterFS
# Forcibly rebooting the SPM host would be safer. ???
raise se.StorageDomainMasterUnmountError(masterdir, 1)
示例14: _resize_if_needed
def _resize_if_needed(guid):
name = devicemapper.getDmId(guid)
slaves = [(slave, getDeviceSize(slave))
for slave in devicemapper.getSlaves(name)]
if len(slaves) == 0:
log.warning("Map %r has no slaves" % guid)
return False
if len(set(size for slave, size in slaves)) != 1:
raise Error("Map %r slaves size differ %s" % (guid, slaves))
map_size = getDeviceSize(name)
slave_size = slaves[0][1]
if map_size == slave_size:
return False
log.info("Resizing map %r (map_size=%d, slave_size=%d)",
guid, map_size, slave_size)
supervdsm.getProxy().resizeMap(name)
return True
示例15: getVmNumaNodeRuntimeInfo
def getVmNumaNodeRuntimeInfo(vm):
"""
Collect vm numa nodes runtime pinning to which host numa nodes
information.
Host numa node topology:
'numaNodes': {'<nodeIndex>': {'cpus': [int], 'totalMemory': 'str'},
...}
We can get each physical cpu core belongs to which host numa node.
Vm numa node configuration:
'guestNumaNodes': [{'cpus': 'str', 'memory': 'str'}, ...]
We can get each vcpu belongs to which vm numa node.
Vcpu runtime pinning to physical cpu core information:
([(0, 1, 19590000000L, 1), (1, 1, 10710000000L, 1)],
[(True, True, True, True), (True, True, True, True)])
The first list element of the above tuple describe each vcpu(list[0])
runtime pinning to which physical cpu core(list[3]).
Get the mapping info between vcpu and pid from
/var/run/libvirt/qemu/<vmName>.xml
Get each vcpu(pid) backed memory mapping to which host numa nodes info
from /proc/<vm_pid>/<vcpu_pid>/numa_maps
From all the above information, we can calculate each vm numa node
runtime pinning to which host numa node.
The output is a map like:
'<vm numa node index>': [<host numa node index>, ...]
"""
vmNumaNodeRuntimeMap = {}
vcpu_to_pcpu = _get_mapping_vcpu_to_pcpu(
_get_vcpu_positioning(vm))
if vcpu_to_pcpu:
vm_numa_placement = defaultdict(set)
vcpu_to_pnode = supervdsm.getProxy().getVcpuNumaMemoryMapping(
vm.conf['vmName'].encode('utf-8'))
pcpu_to_pnode = _get_mapping_pcpu_to_pnode()
vcpu_to_vnode = _get_mapping_vcpu_to_vnode(vm)
for vcpu_id, pcpu_id in vcpu_to_pcpu.iteritems():
vnode_index = str(vcpu_to_vnode[vcpu_id])
vm_numa_placement[vnode_index].add(pcpu_to_pnode[pcpu_id])
vm_numa_placement[vnode_index].update(
vcpu_to_pnode.get(vcpu_id, ()))
vmNumaNodeRuntimeMap = dict((k, list(v)) for k, v in
vm_numa_placement.iteritems())
return vmNumaNodeRuntimeMap