本文整理汇总了Python中ovs.extensions.generic.volatilemutex.VolatileMutex类的典型用法代码示例。如果您正苦于以下问题:Python VolatileMutex类的具体用法?Python VolatileMutex怎么用?Python VolatileMutex使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了VolatileMutex类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: update_vdisk_name
def update_vdisk_name(volume_id, old_name, new_name):
"""
Update a vDisk name using Management Center: set new name
"""
vdisk = None
for mgmt_center in MgmtCenterList.get_mgmtcenters():
mgmt = Factory.get_mgmtcenter(mgmt_center = mgmt_center)
try:
disk_info = mgmt.get_vdisk_device_info(volume_id)
device_path = disk_info['device_path']
vpool_name = disk_info['vpool_name']
vp = VPoolList.get_vpool_by_name(vpool_name)
file_name = os.path.basename(device_path)
vdisk = VDiskList.get_by_devicename_and_vpool(file_name, vp)
if vdisk:
break
except Exception as ex:
logger.info('Trying to get mgmt center failed for disk {0} with volume_id {1}. {2}'.format(old_name, volume_id, ex))
if not vdisk:
logger.error('No vdisk found for name {0}'.format(old_name))
return
vpool = vdisk.vpool
mutex = VolatileMutex('{}_{}'.format(old_name, vpool.guid if vpool is not None else 'none'))
try:
mutex.acquire(wait=5)
vdisk.name = new_name
vdisk.save()
finally:
mutex.release()
示例2: sync_with_reality
def sync_with_reality(storagerouter_guid=None, max_attempts=3):
"""
Try to run sync_with_reality, retry in case of failure
always run sync, as tasks calling this expect this to be sync
:param storagerouter_guid:
:return:
"""
cache = VolatileFactory.get_client()
mutex = VolatileMutex('ovs_disk_sync_with_reality_{0}'.format(storagerouter_guid))
key = 'ovs_dedupe_sync_with_reality_{0}'.format(storagerouter_guid)
attempt = 1
while attempt < max_attempts:
task_id = cache.get(key)
if task_id:
revoke(task_id)
try:
mutex.acquire(wait=120)
return DiskController._sync_with_reality(storagerouter_guid)
except Exception as ex:
logger.warning('Sync with reality failed. {0}'.format(ex))
attempt += 1
time.sleep(attempt*30)
finally:
mutex.release()
raise RuntimeError('Sync with reality failed after 3 attempts')
示例3: resize_from_voldrv
def resize_from_voldrv(volumename, volumesize, volumepath, storagedriver_id):
"""
Resize a disk
Triggered by volumedriver messages on the queue
@param volumepath: path on hypervisor to the volume
@param volumename: volume id of the disk
@param volumesize: size of the volume
"""
pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id)
storagedriver = StorageDriverList.get_by_storagedriver_id(storagedriver_id)
hypervisor = Factory.get(pmachine)
volumepath = hypervisor.clean_backing_disk_filename(volumepath)
mutex = VolatileMutex('{}_{}'.format(volumename, volumepath))
try:
mutex.acquire(wait=30)
disk = VDiskList.get_vdisk_by_volume_id(volumename)
if disk is None:
disk = VDiskList.get_by_devicename_and_vpool(volumepath, storagedriver.vpool)
if disk is None:
disk = VDisk()
finally:
mutex.release()
disk.devicename = volumepath
disk.volume_id = volumename
disk.size = volumesize
disk.vpool = storagedriver.vpool
disk.save()
VDiskController.sync_with_mgmtcenter(disk, pmachine, storagedriver)
MDSServiceController.ensure_safety(disk)
示例4: _backend_property
def _backend_property(self, function, dynamic):
"""
Handles the internal caching of dynamic properties
"""
caller_name = dynamic.name
cache_key = '{0}_{1}'.format(self._key, caller_name)
mutex = VolatileMutex(cache_key)
try:
cached_data = self._volatile.get(cache_key)
if cached_data is None:
if dynamic.locked:
mutex.acquire()
cached_data = self._volatile.get(cache_key)
if cached_data is None:
function_info = inspect.getargspec(function)
if 'dynamic' in function_info.args:
cached_data = function(dynamic=dynamic) # Load data from backend
else:
cached_data = function()
if cached_data is not None:
correct, allowed_types, given_type = Toolbox.check_type(cached_data, dynamic.return_type)
if not correct:
raise TypeError('Dynamic property {0} allows types {1}. {2} given'.format(
caller_name, str(allowed_types), given_type
))
if dynamic.timeout > 0:
self._volatile.set(cache_key, cached_data, dynamic.timeout)
return cached_data
finally:
mutex.release()
示例5: delete_from_voldrv
def delete_from_voldrv(volumename, storagedriver_id):
"""
Delete a disk
Triggered by volumedriver messages on the queue
@param volumename: volume id of the disk
"""
_ = storagedriver_id # For logging purposes
disk = VDiskList.get_vdisk_by_volume_id(volumename)
if disk is not None:
mutex = VolatileMutex('{}_{}'.format(volumename, disk.devicename))
try:
mutex.acquire(wait=20)
pmachine = None
try:
pmachine = PMachineList.get_by_storagedriver_id(disk.storagedriver_id)
except RuntimeError as ex:
if 'could not be found' not in str(ex):
raise
# else: pmachine can't be loaded, because the volumedriver doesn't know about it anymore
if pmachine is not None:
limit = 5
hypervisor = Factory.get(pmachine)
exists = hypervisor.file_exists(disk.vpool, disk.devicename)
while limit > 0 and exists is True:
time.sleep(1)
exists = hypervisor.file_exists(disk.vpool, disk.devicename)
limit -= 1
if exists is True:
logger.info('Disk {0} still exists, ignoring delete'.format(disk.devicename))
return
logger.info('Delete disk {}'.format(disk.name))
disk.delete()
finally:
mutex.release()
示例6: new_function
def new_function(self, request, *args, **kwargs):
"""
Wrapped function
"""
now = time.time()
key = 'ovs_api_limit_{0}.{1}_{2}'.format(
f.__module__, f.__name__,
request.META['HTTP_X_REAL_IP']
)
client = VolatileFactory.get_client()
mutex = VolatileMutex(key)
try:
mutex.acquire()
rate_info = client.get(key, {'calls': [],
'timeout': None})
active_timeout = rate_info['timeout']
if active_timeout is not None:
if active_timeout > now:
return HttpResponse, {'error_code': 'rate_limit_timeout',
'error': 'Rate limit timeout ({0}s remaining)'.format(round(active_timeout - now, 2))}, 429
else:
rate_info['timeout'] = None
rate_info['calls'] = [call for call in rate_info['calls'] if call > (now - per)] + [now]
calls = len(rate_info['calls'])
if calls > amount:
rate_info['timeout'] = now + timeout
client.set(key, rate_info)
return HttpResponse, {'error_code': 'rate_limit_reached',
'error': 'Rate limit reached ({0} in last {1}s)'.format(calls, per)}, 429
client.set(key, rate_info)
finally:
mutex.release()
return f(self, request, *args, **kwargs)
示例7: new_function
def new_function(self, request, *args, **kwargs):
"""
Wrapped function
"""
now = time.time()
key = 'ovs_api_limit_{0}.{1}_{2}'.format(
f.__module__, f.__name__,
request.META['HTTP_X_REAL_IP']
)
client = VolatileFactory.get_client()
mutex = VolatileMutex(key)
try:
mutex.acquire()
rate_info = client.get(key, {'calls': [],
'timeout': None})
active_timeout = rate_info['timeout']
if active_timeout is not None:
if active_timeout > now:
raise Throttled(wait=active_timeout - now)
else:
rate_info['timeout'] = None
rate_info['calls'] = [call for call in rate_info['calls'] if call > (now - per)] + [now]
calls = len(rate_info['calls'])
if calls > amount:
rate_info['timeout'] = now + timeout
client.set(key, rate_info)
raise Throttled(wait=timeout)
client.set(key, rate_info)
finally:
mutex.release()
return f(self, request, *args, **kwargs)
示例8: create_cluster
def create_cluster(cluster_name, ip, base_dir, plugins=None, locked=True):
"""
Creates a cluster
:param locked: Indicates whether the create should run in a locked context (e.g. to prevent port conflicts)
:param plugins: Plugins that should be added to the configuration file
:param base_dir: Base directory that should contain the data and tlogs
:param ip: IP address of the first node of the new cluster
:param cluster_name: Name of the cluster
"""
logger.debug('Creating cluster {0} on {1}'.format(cluster_name, ip))
base_dir = base_dir.rstrip('/')
client = SSHClient(ip)
if ArakoonInstaller.is_running(cluster_name, client):
logger.info('Arakoon service running for cluster {0}'.format(cluster_name))
config = ArakoonClusterConfig(cluster_name, plugins)
config.load_config()
for node in config.nodes:
if node.ip == ip:
return {'client_port': node.client_port,
'messaging_port': node.messaging_port}
node_name = System.get_my_machine_id(client)
home_dir = ArakoonInstaller.ARAKOON_HOME_DIR.format(base_dir, cluster_name)
log_dir = ArakoonInstaller.ARAKOON_LOG_DIR.format(cluster_name)
tlog_dir = ArakoonInstaller.ARAKOON_TLOG_DIR.format(base_dir, cluster_name)
ArakoonInstaller.clean_leftover_arakoon_data(ip, {log_dir: True,
home_dir: False,
tlog_dir: False})
port_mutex = None
try:
if locked is True:
from ovs.extensions.generic.volatilemutex import VolatileMutex
port_mutex = VolatileMutex('arakoon_install_ports_{0}'.format(ip))
port_mutex.acquire(wait=60)
ports = ArakoonInstaller._get_free_ports(client)
config = ArakoonClusterConfig(cluster_name, plugins)
config.nodes.append(ArakoonNodeConfig(name=node_name,
ip=ip,
client_port=ports[0],
messaging_port=ports[1],
log_dir=log_dir,
home=home_dir,
tlog_dir=tlog_dir))
ArakoonInstaller._deploy(config)
finally:
if port_mutex is not None:
port_mutex.release()
logger.debug('Creating cluster {0} on {1} completed'.format(cluster_name, ip))
return {'client_port': ports[0],
'messaging_port': ports[1]}
示例9: new_function
def new_function(*args, **kw):
"""
Executes the decorated function in a locked context
"""
filemutex = FileMutex('messaging')
try:
filemutex.acquire(wait=5)
mutex = VolatileMutex('messaging')
try:
mutex.acquire(wait=5)
return f(*args, **kw)
finally:
mutex.release()
finally:
filemutex.release()
示例10: invalidate_dynamics
def invalidate_dynamics(self, properties=None):
"""
Invalidates all dynamic property caches. Use with caution, as this action can introduce
a short performance hit.
"""
for dynamic in self._dynamics:
if properties is None or dynamic.name in properties:
key = '{0}_{1}'.format(self._key, dynamic.name)
mutex = VolatileMutex(key)
try:
if dynamic.locked:
mutex.acquire()
self._volatile.delete(key)
finally:
mutex.release()
示例11: extend_cluster
def extend_cluster(master_ip, new_ip, cluster_name, base_dir):
"""
Extends a cluster to a given new node
:param base_dir: Base directory that will hold the db and tlogs
:param cluster_name: Name of the cluster to be extended
:param new_ip: IP address of the node to be added
:param master_ip: IP of one of the already existing nodes
"""
logger.debug('Extending cluster {0} from {1} to {2}'.format(cluster_name, master_ip, new_ip))
base_dir = base_dir.rstrip('/')
from ovs.extensions.generic.volatilemutex import VolatileMutex
port_mutex = VolatileMutex('arakoon_install_ports_{0}'.format(new_ip))
config = ArakoonClusterConfig(cluster_name)
config.load_config()
client = SSHClient(new_ip)
node_name = System.get_my_machine_id(client)
home_dir = ArakoonInstaller.ARAKOON_HOME_DIR.format(base_dir, cluster_name)
log_dir = ArakoonInstaller.ARAKOON_LOG_DIR.format(cluster_name)
tlog_dir = ArakoonInstaller.ARAKOON_TLOG_DIR.format(base_dir, cluster_name)
ArakoonInstaller.archive_existing_arakoon_data(new_ip, home_dir,
ArakoonInstaller.ARAKOON_BASE_DIR.format(base_dir), cluster_name)
ArakoonInstaller.archive_existing_arakoon_data(new_ip, log_dir,
ArakoonInstaller.ARAKOON_LOG_DIR.format(''), cluster_name)
ArakoonInstaller.archive_existing_arakoon_data(new_ip, tlog_dir,
ArakoonInstaller.ARAKOON_BASE_DIR.format(base_dir), cluster_name)
try:
port_mutex.acquire(wait=60)
ports = ArakoonInstaller._get_free_ports(client)
if node_name not in [node.name for node in config.nodes]:
config.nodes.append(ArakoonNodeConfig(name=node_name,
ip=new_ip,
client_port=ports[0],
messaging_port=ports[1],
log_dir=log_dir,
home=home_dir,
tlog_dir=tlog_dir))
ArakoonInstaller._deploy(config)
finally:
port_mutex.release()
logger.debug('Extending cluster {0} from {1} to {2} completed'.format(cluster_name, master_ip, new_ip))
return {'client_port': ports[0],
'messaging_port': ports[1]}
示例12: __init__
def __init__(self, *args, **kwargs):
"""
Initializes the distributed scheduler
"""
self._persistent = PersistentFactory.get_client()
self._namespace = 'ovs_celery_beat'
self._mutex = VolatileMutex('celery_beat')
self._has_lock = False
super(DistributedScheduler, self).__init__(*args, **kwargs)
logger.debug('DS init')
示例13: update_vmachine_name
def update_vmachine_name(instance_id, old_name, new_name):
"""
Update a vMachine name: find vmachine by management center instance id, set new name
:param instance_id: ID for the virtual machine known by management center
:param old_name: Old name of the virtual machine
:param new_name: New name for the virtual machine
"""
vmachine = None
for mgmt_center in MgmtCenterList.get_mgmtcenters():
mgmt = Factory.get_mgmtcenter(mgmt_center = mgmt_center)
try:
machine_info = mgmt.get_vmachine_device_info(instance_id)
file_name = machine_info['file_name']
host_name = machine_info['host_name']
vpool_name = machine_info['vpool_name']
storage_router = StorageRouterList.get_by_name(host_name)
machine_id = storage_router.machine_id
device_name = '{0}/{1}'.format(machine_id, file_name)
vp = VPoolList.get_vpool_by_name(vpool_name)
vmachine = VMachineList.get_by_devicename_and_vpool(device_name, vp)
if vmachine:
break
vmachine = VMachineList.get_by_devicename_and_vpool(device_name, None)
if vmachine:
break
except Exception as ex:
logger.info('Trying to get mgmt center failed for vmachine {0}. {1}'.format(old_name, ex))
if not vmachine:
logger.error('No vmachine found for name {0}'.format(old_name))
return
vpool = vmachine.vpool
mutex = VolatileMutex('{0}_{1}'.format(old_name, vpool.guid if vpool is not None else 'none'))
try:
mutex.acquire(wait=5)
vmachine.name = new_name
vmachine.save()
finally:
mutex.release()
示例14: get_relation_set
def get_relation_set(remote_class, remote_key, own_class, own_key, own_guid):
"""
This method will get a DataList for a relation.
On a cache miss, the relation DataList will be rebuild and due to the nature of the full table scan, it will
update all relations in the mean time.
"""
# Example:
# * remote_class = vDisk
# * remote_key = vmachine
# * own_class = vMachine
# * own_key = vdisks
# Called to load the vMachine.vdisks list (resulting in a possible scan of vDisk objects)
# * own_guid = this vMachine object's guid
volatile = VolatileFactory.get_client()
own_name = own_class.__name__.lower()
datalist = DataList({}, '{0}_{1}_{2}'.format(own_name, own_guid, remote_key), load=False)
reverse_key = 'ovs_reverseindex_{0}_{1}'.format(own_name, own_guid)
# Check whether the requested information is available in cache
reverse_index = volatile.get(reverse_key)
if reverse_index is not None and own_key in reverse_index:
Toolbox.log_cache_hit('datalist', True)
datalist.data = reverse_index[own_key]
datalist.from_cache = True
return datalist
Toolbox.log_cache_hit('datalist', False)
mutex = VolatileMutex('reverseindex')
remote_name = remote_class.__name__.lower()
blueprint_object = remote_class() # vDisk object
foreign_guids = {}
remote_namespace = blueprint_object._namespace
for relation in blueprint_object._relations: # E.g. vmachine or vpool relation
if relation.foreign_type is None:
classname = remote_name
foreign_namespace = blueprint_object._namespace
else:
classname = relation.foreign_type.__name__.lower()
foreign_namespace = relation.foreign_type()._namespace
if classname not in foreign_guids:
foreign_guids[classname] = DataList.get_pks(foreign_namespace, classname)
try:
mutex.acquire(60)
for foreign_guid in foreign_guids[classname]:
reverse_key = 'ovs_reverseindex_{0}_{1}'.format(classname, foreign_guid)
reverse_index = volatile.get(reverse_key)
if reverse_index is None:
reverse_index = {}
if relation.foreign_key not in reverse_index:
reverse_index[relation.foreign_key] = []
volatile.set(reverse_key, reverse_index)
finally:
mutex.release()
remote_keys = DataList.get_pks(remote_namespace, remote_name)
for guid in remote_keys:
try:
instance = remote_class(guid)
for relation in blueprint_object._relations: # E.g. vmachine or vpool relation
if relation.foreign_type is None:
classname = remote_name
else:
classname = relation.foreign_type.__name__.lower()
key = getattr(instance, '{0}_guid'.format(relation.name))
if key is not None:
try:
mutex.acquire(60)
reverse_index = volatile.get('ovs_reverseindex_{0}_{1}'.format(classname, key))
if reverse_index is None:
reverse_index = {}
if relation.foreign_key not in reverse_index:
reverse_index[relation.foreign_key] = []
if guid not in reverse_index[relation.foreign_key]:
if instance.updated_on_datastore():
raise ConcurrencyException()
reverse_index[relation.foreign_key].append(guid)
volatile.set('ovs_reverseindex_{0}_{1}'.format(classname, key), reverse_index)
finally:
mutex.release()
except ObjectNotFoundException:
pass
except ConcurrencyException:
pass
try:
mutex.acquire(60)
reverse_key = 'ovs_reverseindex_{0}_{1}'.format(own_name, own_guid)
reverse_index = volatile.get(reverse_key)
if reverse_index is None:
reverse_index = {}
if own_key not in reverse_index:
reverse_index[own_key] = []
volatile.set(reverse_key, reverse_index)
datalist.data = reverse_index[own_key]
datalist.from_cache = False
finally:
mutex.release()
return datalist
示例15: clone
def clone(machineguid, timestamp, name):
"""
Clone a vmachine using the disk snapshot based on a snapshot timestamp
@param machineguid: guid of the machine to clone
@param timestamp: timestamp of the disk snapshots to use for the clone
@param name: name for the new machine
"""
machine = VMachine(machineguid)
timestamp = str(timestamp)
if timestamp not in (snap['timestamp'] for snap in machine.snapshots):
raise RuntimeError('Invalid timestamp provided, not a valid snapshot of this vmachine.')
vpool = None
storagerouter = None
if machine.pmachine is not None and machine.pmachine.hvtype == 'VMWARE':
for vdisk in machine.vdisks:
if vdisk.vpool is not None:
vpool = vdisk.vpool
break
for vdisk in machine.vdisks:
if vdisk.storagerouter_guid:
storagerouter = StorageRouter(vdisk.storagerouter_guid)
break
hv = Factory.get(machine.pmachine)
vm_path = hv.get_vmachine_path(name, storagerouter.machine_id if storagerouter is not None else '')
# mutex in sync_with_hypervisor uses "None" for KVM hvtype
mutex = VolatileMutex('{0}_{1}'.format(hv.clean_vmachine_filename(vm_path), vpool.guid if vpool is not None else 'none'))
disks = {}
for snapshot in machine.snapshots:
if snapshot['timestamp'] == timestamp:
for diskguid, snapshotguid in snapshot['snapshots'].iteritems():
disks[diskguid] = snapshotguid
try:
mutex.acquire(wait=120)
new_machine = VMachine()
new_machine.copy(machine)
new_machine.name = name
new_machine.devicename = hv.clean_vmachine_filename(vm_path)
new_machine.pmachine = machine.pmachine
new_machine.save()
finally:
mutex.release
new_disk_guids = []
vm_disks = []
mountpoint = None
disks_by_order = sorted(machine.vdisks, key=lambda x: x.order)
try:
for currentDisk in disks_by_order:
if machine.is_vtemplate and currentDisk.templatesnapshot:
snapshotid = currentDisk.templatesnapshot
else:
snapshotid = disks[currentDisk.guid]
prefix = '%s-clone' % currentDisk.name
result = VDiskController.clone(diskguid=currentDisk.guid,
snapshotid=snapshotid,
devicename=prefix,
pmachineguid=new_machine.pmachine_guid,
machinename=new_machine.name,
machineguid=new_machine.guid)
new_disk_guids.append(result['diskguid'])
mountpoint = StorageDriverList.get_by_storagedriver_id(currentDisk.storagedriver_id).mountpoint
vm_disks.append(result)
except Exception as ex:
logger.error('Failed to clone disks. {0}'.format(ex))
VMachineController.delete(machineguid=new_machine.guid)
raise
try:
result = hv.clone_vm(machine.hypervisor_id, name, vm_disks, mountpoint)
except Exception as ex:
logger.error('Failed to clone vm. {0}'.format(ex))
VMachineController.delete(machineguid=new_machine.guid)
raise
try:
mutex.acquire(wait=120)
new_machine.hypervisor_id = result
new_machine.save()
finally:
mutex.release()
return new_machine.guid