本文整理汇总了Python中service.cache.get_cached_driver函数的典型用法代码示例。如果您正苦于以下问题:Python get_cached_driver函数的具体用法?Python get_cached_driver怎么用?Python get_cached_driver使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_cached_driver函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_instance_owner_map
def _get_instance_owner_map(provider, users=None):
"""
All keys == All identities
Values = List of identities / username
NOTE: This is KEYSTONE && NOVA specific. the 'instance owner' here is the
username // ex_tenant_name
"""
from service.driver import get_account_driver
admin_driver = get_cached_driver(provider=provider)
accounts = get_account_driver(provider=provider)
all_identities = _select_identities(provider, users)
acct_providers = AccountProvider.objects.filter(provider=provider)
if acct_providers:
account_identity = acct_providers[0].identity
provider = None
else:
account_identity = None
all_instances = get_cached_instances(provider=provider, identity=account_identity, force=True)
#all_tenants = admin_driver._connection._keystone_list_tenants()
all_tenants = accounts.list_projects()
# Convert instance.owner from tenant-id to tenant-name all at once
all_instances = _convert_tenant_id_to_names(all_instances, all_tenants)
# Make a mapping of owner-to-instance
instance_map = _make_instance_owner_map(all_instances, users=users)
logger.info("Instance owner map created")
identity_map = _include_all_idents(all_identities, instance_map)
logger.info("Identity map created")
return identity_map
示例2: create_volume_snapshot
def create_volume_snapshot(identity_uuid, volume_id, name, description):
"""
Create a new volume snapshot
"""
try:
identity = Identity.objects.get(uuid=identity_uuid)
driver = get_cached_driver(identity=identity)
esh_volume = driver._connection.ex_get_volume(volume_id)
if not esh_volume:
raise Exception("No volume found for id=%s." % volume_id)
snapshot = driver._connection.ex_create_snapshot(
esh_volume, name, description
)
if not snapshot:
raise Exception("The snapshot could not be created.")
except SoftTimeLimitExceeded as e:
logger.info("Task too long to complete. Task will be retried")
create_volume_snapshot.retry(exc=e)
except Identity.DoesNotExist:
logger.info("An Identity for uuid=%s does not exist.", identity_uuid)
raise
示例3: provider_over_allocation_enforcement
def provider_over_allocation_enforcement(identity, user):
provider = identity.provider
action = provider.over_allocation_action
if not action:
logger.debug("No 'over_allocation_action' provided for %s" % provider)
return False
driver = get_cached_driver(identity=identity)
esh_instances = driver.list_instances()
#TODO: Parallelize this operation so you don't wait for larger instances to finish 'wait_for' task below..
for instance in esh_instances:
try:
if driver._is_active_instance(instance):
# Suspend active instances, update the task in the DB
# NOTE: identity.created_by COULD BE the Admin User, indicating that this action/InstanceHistory was
# executed by the administrator.. Future Release Idea.
_execute_provider_action(identity, identity.created_by, instance, action.name)
# NOTE: Intentionally added to allow time for
# the Cloud to begin 'suspend' operation
# before querying for the instance again.
#TODO: Instead: Add "wait_for" change from active to any terminal, non-active state?
wait_time = random.uniform(2, 6)
time.sleep(wait_time)
updated_esh = driver.get_instance(instance.id)
convert_esh_instance(
driver, updated_esh,
identity.provider.uuid,
identity.uuid,
user)
except Exception, e:
# Raise ANY exception that doesn't say
# 'This instance is already in the requested VM state'
#NOTE: This is OpenStack specific
if 'in vm_state' not in e.message:
raise
示例4: allocation_source_overage_enforcement_for
def allocation_source_overage_enforcement_for(allocation_source, user, identity):
logger.debug("allocation_source_overage_enforcement_for - allocation_source: %s, user: %s, identity: %s",
allocation_source, user, identity)
provider = identity.provider
action = provider.over_allocation_action
logger.debug("allocation_source_overage_enforcement_for - provider.over_allocation_action: %s",
provider.over_allocation_action)
if not action:
logger.debug("No 'over_allocation_action' provided for %s", provider)
return [] # Over_allocation was not attempted
if not settings.ENFORCING:
logger.info("Settings dictate that ENFORCING = False. Returning..")
return []
try:
driver = get_cached_driver(identity=identity)
esh_instances = driver.list_instances()
except LibcloudInvalidCredsError:
raise Exception("User %s has invalid credentials on Identity %s" % (user, identity))
filtered_instances = filter_allocation_source_instances(allocation_source, user, esh_instances)
# TODO: Parallelize this operation so you don't wait for larger instances
# to finish 'wait_for' task below..
instances = []
for instance in filtered_instances:
core_instance = execute_provider_action(user, driver, identity, instance, action)
instances.append(core_instance)
return instances
示例5: create_volume_from_snapshot
def create_volume_from_snapshot(identity_uuid, snapshot_id, size_id, name,
description, metadata):
"""
Create a new volume for the snapshot
NOTE: The size must be at least the same size as the original volume.
"""
try:
identity = Identity.objects.get(uuid=identity_uuid)
driver = get_cached_driver(identity=identity)
snapshot = driver._connection.ex_get_snapshot(snapshot_id)
size = driver._connection.ex_get_size(size_id)
if not snapshot:
raise Exception("No snapshot found for id=%s." % snapshot_id)
if not size:
raise Exception("No size found for id=%s." % size_id)
success, esh_volume = driver._connection.create_volume(
snapshot.size, name, description=description, metadata=metadata,
snapshot=snapshot)
if not success:
raise Exception("Could not create volume from snapshot")
# Save the new volume to the database
convert_esh_volume(
esh_volume, identity.provider.uuid, identity_uuid,
identity.created_by)
except SoftTimeLimitExceeded as e:
create_volume_from_snapshot.retry(exc=e)
except Identity.DoesNotExist:
logger.info("An Identity for uuid=%s does not exist.", identity_uuid)
raise
示例6: launch_instance
def launch_instance(user, provider_uuid, identity_uuid,
size_alias, source_alias, deploy=True,
**launch_kwargs):
"""
USE THIS TO LAUNCH YOUR INSTANCE FROM THE REPL!
Initialization point --> launch_*_instance --> ..
Required arguments will launch the instance, extras will do
provider-specific modifications.
1. Test for available Size (on specific driver!)
2. Test user has Quota/Allocation (on our DB)
3. Test user is launching appropriate size (Not below Thresholds)
4. Perform an 'Instance launch' depending on Boot Source
5. Return CORE Instance with new 'esh' objects attached.
"""
now_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
status_logger.debug("%s,%s,%s,%s,%s,%s"
% (now_time, user, "No Instance", source_alias, size_alias,
"Request Received"))
identity = CoreIdentity.objects.get(uuid=identity_uuid)
esh_driver = get_cached_driver(identity=identity)
#May raise Exception("Size not available")
size = check_size(esh_driver, size_alias, provider_uuid)
#May raise Exception("Volume/Machine not available")
boot_source = get_boot_source(user.username, identity_uuid, source_alias)
#Raise any other exceptions before launching here
_pre_launch_validation(user.username, esh_driver, identity_uuid, boot_source, size)
core_instance = _select_and_launch_source(user, identity_uuid, esh_driver, boot_source, size, deploy=deploy, **launch_kwargs)
return core_instance
示例7: create_volume_from_image
def create_volume_from_image(identity_uuid, image_id, size_id, name,
description, metadata):
"""
Create a new volume from an image
"""
try:
identity = Identity.objects.get(uuid=identity_uuid)
user = identity.created_by
driver = get_cached_driver(identity=identity)
image = driver._connection.ex_get_image(image_id)
size = driver._connection.ex_get_size(size_id)
if not image:
raise Exception("No image found for id=%s." % image_id)
if not size:
raise Exception("No size found for id=%s." % size_id)
success, esh_volume = driver._connection.create_volume(
size.id, name, description=description, metadata=metadata,
image=image)
if not success:
raise Exception("Could not create volume from image")
# Save the new volume to the database
convert_esh_volume(
esh_volume, identity.provider.uuid, identity_uuid, user)
except SoftTimeLimitExceeded as e:
create_volume_from_image.retry(exc=e)
except Identity.DoesNotExist:
logger.info("An Identity for uuid=%s does not exist.", identity_uuid)
raise
示例8: _resolve_history_conflict
def _resolve_history_conflict(
identity, core_running_instance, bad_history, reset_time=None
):
"""
NOTE 1: This is a 'band-aid' fix until we are 100% that Transaction will
not create conflicting un-end-dated objects.
NOTE 2: It is EXPECTED that this instance has the 'esh' attribute
Failure to add the 'esh' attribute will generate a ValueError!
"""
if not getattr(core_running_instance, 'esh'):
raise ValueError("Esh is missing from %s" % core_running_instance)
esh_instance = core_running_instance.esh
# Check for temporary status and fetch that
tmp_status = esh_instance.extra.get('metadata', {}).get("tmp_status")
new_status = tmp_status or esh_instance.extra['status']
esh_driver = get_cached_driver(identity=identity)
new_size = _esh_instance_size_to_core(
esh_driver, esh_instance, identity.provider.uuid
)
if not reset_time:
reset_time = timezone.now()
for history in bad_history:
history.end_date = reset_time
history.save()
new_history = InstanceStatusHistory.create_history(
new_status, core_running_instance, new_size, reset_time
)
return new_history
示例9: set_provider_quota
def set_provider_quota(identity_uuid, limit_dict=None):
"""
"""
identity = Identity.objects.get(uuid=identity_uuid)
if not identity.credential_set.all():
# Can't update quota if credentials arent set
return
if not limit_dict:
limit_dict = _get_hard_limits(identity.provider)
if identity.provider.get_type_name().lower() == 'openstack':
driver = get_cached_driver(identity=identity)
username = identity.created_by.username
user_id = driver._connection.key
tenant_id = driver._connection._get_tenant_id()
membership = IdentityMembership.objects.get(
identity__uuid=identity_uuid,
member__name=username)
user_quota = membership.quota
if user_quota:
# Don't go above the hard-set limits per provider.
if user_quota.cpu > limit_dict['cpu']:
user_quota.cpu = limit_dict['cpu']
if user_quota.memory > limit_dict['ram']:
user_quota.memory = limit_dict['ram']
# Use THESE values...
values = {'cores': user_quota.cpu,
'ram': user_quota.memory * 1024}
logger.info("Updating quota for %s to %s" % (username, values))
ad = AccountDriver(identity.provider)
admin_driver = ad.admin_driver
admin_driver._connection.ex_update_quota_for_user(tenant_id,
user_id,
values)
return True
示例10: set_provider_quota
def set_provider_quota(identity_id):
"""
"""
identity = Identity.objects.get(id=identity_id)
if not identity.credential_set.all():
#Can't update quota if credentials arent set
return
if identity.provider.get_type_name().lower() == 'openstack':
driver = get_cached_driver(identity=identity)
username = identity.created_by.username
user_id = driver._connection._get_user_id()
tenant_id = driver._connection._get_tenant_id()
membership = IdentityMembership.objects.get(identity__id=identity_id,
member__name=username)
user_quota = membership.quota
if user_quota:
values = {'cores': user_quota.cpu,
'ram': user_quota.memory * 1024}
logger.info("Updating quota for %s to %s" % (username, values))
ad = AccountDriver(identity.provider)
admin_driver = ad.admin_driver
admin_driver._connection.ex_update_quota_for_user(tenant_id,
user_id,
values)
return True
示例11: create_bootable_volume
def create_bootable_volume(
user,
provider_uuid,
identity_uuid,
name,
size_alias,
new_source_alias,
source_hint=None,
**kwargs):
"""
**kwargs passed as data to boot_volume_instance
"""
identity = Identity.objects.get(uuid=identity_uuid)
if not identity:
raise Exception("Identity UUID %s does not exist." % identity_uuid)
driver = get_cached_driver(identity=identity)
if not driver:
raise Exception(
"Driver could not be initialized. Invalid Credentials?")
size = driver.get_size(size_alias)
if not size:
raise Exception(
"Size %s could not be located with this driver" % size_alias)
# Return source or raises an Exception
source = _retrieve_source(driver, new_source_alias, source_hint)
core_instance = boot_volume_instance(driver, identity,
source, size, name, **kwargs)
return core_instance
示例12: monitor_instances_for
def monitor_instances_for(provider_id, users=None,
print_logs=False, check_allocations=False, start_date=None, end_date=None):
"""
Run the set of tasks related to monitoring instances for a provider.
Optionally, provide a list of usernames to monitor
While debugging, print_logs=True can be very helpful.
start_date and end_date allow you to search a 'non-standard' window of time.
"""
provider = Provider.objects.get(id=provider_id)
# For now, lets just ignore everything that isn't openstack.
if 'openstack' not in provider.type.name.lower():
return
instance_map = _get_instance_owner_map(provider, users=users)
if print_logs:
import logging
import sys
consolehandler = logging.StreamHandler(sys.stdout)
consolehandler.setLevel(logging.DEBUG)
celery_logger.addHandler(consolehandler)
# DEVNOTE: Potential slowdown running multiple functions
# Break this out when instance-caching is enabled
running_total = 0
for username in sorted(instance_map.keys()):
running_instances = instance_map[username]
running_total += len(running_instances)
identity = _get_identity_from_tenant_name(provider, username)
if identity and running_instances:
try:
driver = get_cached_driver(identity=identity)
core_running_instances = [
convert_esh_instance(
driver,
inst,
identity.provider.uuid,
identity.uuid,
identity.created_by) for inst in running_instances]
except Exception as exc:
celery_logger.exception(
"Could not convert running instances for %s" %
username)
continue
else:
# No running instances.
core_running_instances = []
# Using the 'known' list of running instances, cleanup the DB
core_instances = _cleanup_missing_instances(
identity,
core_running_instances)
if check_allocations:
allocation_result = user_over_allocation_enforcement(
provider, username,
print_logs, start_date, end_date)
if print_logs:
celery_logger.removeHandler(consolehandler)
return running_total
示例13: _execute_provider_action
def _execute_provider_action(identity, user, instance, action_name):
driver = get_cached_driver(identity=identity)
# NOTE: This if statement is a HACK! It will be removed when IP management is enabled in an upcoming version. -SG
reclaim_ip = True if identity.provider.location != 'iPlant Cloud - Tucson' else False
# ENDNOTE
# NOTE: This metadata statement is a HACK! It should be removed when all instances matching this metadata key have been removed.
instance_has_home_mount = instance.extra['metadata'].get('atmosphere_ephemeral_home_mount', 'false').lower()
if instance_has_home_mount == 'true' and action_name == 'Shelve':
logger.info("Instance %s will be suspended instead of shelved, because the ephemeral storage is in /home" % instance.id)
action_name = 'Suspend'
logger.info("User %s has gone over their allocation on Instance %s - Enforcement Choice: %s" % (user, instance, action_name))
try:
if not action_name:
logger.debug("No 'action_name' provided")
return
elif action_name == 'Suspend':
suspend_instance(
driver,
instance,
identity.provider.uuid,
identity.uuid,
user,
reclaim_ip)
elif action_name == 'Stop':
stop_instance(
driver,
instance,
identity.provider.uuid,
identity.uuid,
user,
reclaim_ip)
elif action_name == 'Shelve':
shelve_instance(
driver,
instance,
identity.provider.uuid,
identity.uuid,
user,
reclaim_ip)
elif action_name == 'Shelve Offload':
offload_instance(
driver,
instance,
identity.provider.uuid,
identity.uuid,
user,
reclaim_ip)
elif action_name == 'Terminate':
destroy_instance(user, identity.uuid, instance.id)
else:
raise Exception("Encountered Unknown Action Named %s" % action_name)
except ObjectDoesNotExist:
# This may be unreachable when null,blank = True
logger.debug("Provider %s - 'Do Nothing' for Over Allocation" % identity.provider)
return
示例14: get_core_instances
def get_core_instances(identity_uuid):
identity = CoreIdentity.objects.get(uuid=identity_uuid)
driver = get_cached_driver(identity=identity)
instances = driver.list_instances()
core_instances = [convert_esh_instance(driver,
esh_instance,
identity.provider.uuid,
identity.uuid,
identity.created_by)
for esh_instance in instances]
return core_instances
示例15: monitor_instances_for
def monitor_instances_for(provider_id, users=None,
print_logs=False, start_date=None, end_date=None):
"""
Run the set of tasks related to monitoring instances for a provider.
Optionally, provide a list of usernames to monitor
While debugging, print_logs=True can be very helpful.
start_date and end_date allow you to search a 'non-standard' window of time.
"""
provider = Provider.objects.get(id=provider_id)
# For now, lets just ignore everything that isn't openstack.
if 'openstack' not in provider.type.name.lower():
return
instance_map = _get_instance_owner_map(provider, users=users)
if print_logs:
console_handler = _init_stdout_logging()
seen_instances = []
# DEVNOTE: Potential slowdown running multiple functions
# Break this out when instance-caching is enabled
if not settings.ENFORCING:
celery_logger.debug('Settings dictate allocations are NOT enforced')
for tenant_name in sorted(instance_map.keys()):
running_instances = instance_map[tenant_name]
identity = _get_identity_from_tenant_name(provider, tenant_name)
if identity and running_instances:
try:
driver = get_cached_driver(identity=identity)
core_running_instances = [
convert_esh_instance(
driver,
inst,
identity.provider.uuid,
identity.uuid,
identity.created_by) for inst in running_instances]
seen_instances.extend(core_running_instances)
except Exception as exc:
celery_logger.exception(
"Could not convert running instances for %s" %
tenant_name)
continue
else:
# No running instances.
core_running_instances = []
# Using the 'known' list of running instances, cleanup the DB
core_instances = _cleanup_missing_instances(
identity,
core_running_instances)
if print_logs:
_exit_stdout_logging(console_handler)
# return seen_instances NOTE: this has been commented out to avoid PicklingError!
# TODO: Uncomment the above, Determine what _we can return_ and return that instead....
return