本文整理汇总了Python中threepio.celery_logger.info函数的典型用法代码示例。如果您正苦于以下问题:Python info函数的具体用法?Python info怎么用?Python info使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了info函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _share_image
def _share_image(account_driver, cloud_machine, identity, members, dry_run=False):
"""
INPUT: use account_driver to share cloud_machine with identity (if not in 'members' list)
"""
# Skip tenant-names who are NOT in the DB, and tenants who are already included
missing_tenant = identity.credential_set.filter(~Q(value__in=members), key='ex_tenant_name')
if missing_tenant.count() == 0:
#celery_logger.debug("SKIPPED _ Image %s already shared with %s" % (cloud_machine.id, identity))
return
elif missing_tenant.count() > 1:
raise Exception("Safety Check -- You should not be here")
tenant_name = missing_tenant[0]
cloud_machine_is_public = cloud_machine.is_public if hasattr(cloud_machine,'is_public') else cloud_machine.get('visibility','') == 'public'
if cloud_machine_is_public == True:
celery_logger.info("Making Machine %s private" % cloud_machine.id)
if not dry_run:
account_driver.image_manager.glance.images.update(cloud_machine.id, visibility='shared')
celery_logger.info("Sharing image %s<%s>: %s with %s" % (cloud_machine.id, cloud_machine.name, identity.provider.location, tenant_name.value))
if not dry_run:
try:
account_driver.image_manager.share_image(cloud_machine, tenant_name.value)
except GlanceConflict as exc:
if 'already associated with image' in exc.message:
pass
except GlanceForbidden as exc:
if 'Public images do not have members' in exc.message:
celery_logger.warn("CONFLICT -- This image should have been marked 'shared'! %s" % cloud_machine)
pass
return
示例2: send_email
def send_email(subject, body, from_email, to, cc=None,
fail_silently=False, html=False):
"""
Use django.core.mail.EmailMessage to send and log an Atmosphere email.
"""
try:
msg = EmailMessage(subject=subject, body=body,
from_email=from_email,
to=to,
cc=cc)
if html:
msg.content_subtype = 'html'
email_logger.info("\n> From:%s\n> To:%s\n> Cc:%s\n> Subject:%s\n> Body:\n%s", from_email, to, cc, subject, body)
if getattr(settings, "SEND_EMAILS", True):
msg.send(fail_silently=fail_silently)
email_logger.info("NOTE: Above message sent successfully")
celery_logger.info("NOTE: Above message sent successfully")
else:
email_logger.info("NOTE: Above message not sent -- SEND_EMAILS was False")
celery_logger.info("NOTE: Above message not sent -- SEND_EMAILS was False")
return True
except Exception as e:
celery_logger.exception(e)
return False
示例3: distribute_image_membership
def distribute_image_membership(account_driver, cloud_machine, provider):
"""
Based on what we know about the DB, at a minimum, ensure that their projects are added to the image_members list for this cloud_machine.
"""
pm = ProviderMachine.objects.get(
instance_source__provider=provider,
instance_source__identifier=cloud_machine.id
)
group_ids = ProviderMachineMembership.objects.filter(provider_machine=pm
).values_list(
'group', flat=True
)
groups = Group.objects.filter(id__in=group_ids)
for group in groups:
try:
celery_logger.info(
"Add %s to cloud membership for %s" % (group, pm)
)
update_cloud_membership_for_machine(pm, group)
except TimeoutError:
celery_logger.warn(
"Failed to add cloud membership for %s - Operation timed out" %
group
)
return groups
示例4: _share_image
def _share_image(account_driver,
cloud_machine,
identity,
members,
dry_run=False):
"""
INPUT: use account_driver to share cloud_machine with identity (if not in 'members' list)
"""
# Skip tenant-names who are NOT in the DB, and tenants who are already included
missing_tenant = identity.credential_set.filter(
~Q(value__in=members), key='ex_tenant_name')
if missing_tenant.count() == 0:
#celery_logger.debug("SKIPPED _ Image %s already shared with %s" % (cloud_machine.id, identity))
return
elif missing_tenant.count() > 1:
raise Exception("Safety Check -- You should not be here")
tenant_name = missing_tenant[0]
if cloud_machine.is_public == True:
celery_logger.info("Making Machine %s private" % cloud_machine.id)
cloud_machine.update(is_public=False)
celery_logger.info("Sharing image %s<%s>: %s with %s" %
(cloud_machine.id, cloud_machine.name,
identity.provider.location, tenant_name.value))
if not dry_run:
account_driver.image_manager.share_image(cloud_machine,
tenant_name.value)
示例5: enable_image_validation
def enable_image_validation(machine_request, init_task, final_task, original_status="", error_handler_task=None):
if not error_handler_task:
error_handler_task = machine_request_error.s(machine_request.id)
# Task 3 = Validate the new image by launching an instance
admin_ident = machine_request.new_admin_identity()
admin_driver = get_admin_driver(machine_request.new_machine_provider)
if 'validating' == original_status:
image_id = machine_request.new_machine.identifier
celery_logger.info("Start with validating:%s" % image_id)
# If validating, seed the image_id and start here..
validate_task = validate_new_image.s(image_id, machine_request.id)
init_task = validate_task
else:
validate_task = validate_new_image.s(machine_request.id)
init_task.link(validate_task)
#Validate task returns an instance_id
# Task 4 = Wait for new instance to be 'active'
wait_for_task = wait_for_instance.s(
# NOTE: 1st arg, instance_id, passed from last task.
admin_driver.__class__,
admin_driver.provider,
admin_driver.identity,
"active",
test_tmp_status=True,
return_id=True)
validate_task.link(wait_for_task)
validate_task.link_error(error_handler_task)
# Task 5 = Terminate the new instance on completion
destroy_task = destroy_instance.s(
admin_ident.created_by, admin_ident.uuid)
wait_for_task.link(destroy_task)
wait_for_task.link_error(error_handler_task)
destroy_task.link_error(error_handler_task)
destroy_task.link(final_task)
return init_task
示例6: mount_failed
def mount_failed(
context,
exception_msg,
traceback,
driverCls,
provider,
identity,
volume_id,
unmount=False,
**celery_task_args
):
from service import volume as volume_service
try:
celery_logger.debug("mount_failed task started at %s." % timezone.now())
celery_logger.info("task context=%s" % context)
err_str = "%s\nMount Error Traceback:%s" % (exception_msg, traceback)
celery_logger.error(err_str)
driver = get_driver(driverCls, provider, identity)
volume = driver.get_volume(volume_id)
if unmount:
tmp_status = 'umount_error'
else:
tmp_status = 'mount_error'
return volume_service._update_volume_metadata(
driver, volume, metadata={'tmp_status': tmp_status}
)
except Exception as exc:
celery_logger.warn(exc)
mount_failed.retry(exc=exc)
示例7: make_machines_private
def make_machines_private(application, identities, account_drivers={}, provider_tenant_mapping={}, image_maps={}, dry_run=False):
"""
This method is called when the DB has marked the Machine/Application as PUBLIC
But the CLOUD states that the machine is really private.
GOAL: All versions and machines will be listed as PRIVATE on the cloud and include AS MANY identities as exist.
"""
for version in application.active_versions():
for machine in version.active_machines():
# For each *active* machine in app/version..
# Loop over each identity and check the list of 'current tenants' as viewed by keystone.
account_driver = memoized_driver(machine, account_drivers)
tenant_name_mapping = memoized_tenant_name_map(account_driver, provider_tenant_mapping)
current_tenants = get_current_members(
account_driver, machine, tenant_name_mapping)
provider = machine.instance_source.provider
cloud_machine = memoized_image(account_driver, machine, image_maps)
for identity in identities:
if identity.provider == provider:
_share_image(account_driver, cloud_machine, identity, current_tenants, dry_run=dry_run)
add_application_membership(application, identity, dry_run=dry_run)
# All the cloud work has been completed, so "lock down" the application.
if application.private == False:
application.private = True
celery_logger.info("Making Application %s private" % application.name)
if not dry_run:
application.save()
示例8: prep_instance_for_snapshot
def prep_instance_for_snapshot(identity_id, instance_id, **celery_task_args):
identity = Identity.objects.get(id=identity_id)
try:
celery_logger.debug("prep_instance_for_snapshot task started at %s." % timezone.now())
# NOTE: FIXMEIF the assumption that the 'linux username'
# is the 'created_by' AtmosphereUser changes.
username = identity.created_by.username
driver = get_esh_driver(identity)
instance = driver.get_instance(instance_id)
if instance.extra.get('status','') != 'active':
celery_logger.info("prep_instance_for_snapshot skipped")
return
playbooks = deploy_prepare_snapshot(
instance.ip, username, instance_id)
celery_logger.info(playbooks.__dict__)
hostname = build_host_name(instance.id, instance.ip)
result = False if execution_has_failures(playbooks, hostname)\
or execution_has_unreachable(playbooks, hostname) else True
if not result:
raise Exception(
"Error encountered while preparing instance for snapshot: %s"
% playbooks.stats.summarize(host=hostname))
except Exception as exc:
celery_logger.warn(exc)
prep_instance_for_snapshot.retry(exc=exc)
示例9: mount_volume_task
def mount_volume_task(
driverCls,
provider,
identity,
instance_id,
volume_id,
device_location,
mount_location,
device_type,
mount_prefix=None,
*args,
**kwargs
):
try:
celery_logger.debug("mount task started at %s." % timezone.now())
celery_logger.debug("mount_location: %s" % (mount_location, ))
driver = get_driver(driverCls, provider, identity)
username = identity.get_username()
instance = driver.get_instance(instance_id)
volume = driver.get_volume(volume_id)
try:
attach_data = volume.extra['attachments'][0]
if not device_location:
device_location = attach_data['device']
except (KeyError, IndexError):
celery_logger.warn(
"Volume %s missing attachments in Extra" % (volume, )
)
if not device_location:
raise Exception(
"No device_location found or inferred by volume %s" % volume
)
if not mount_prefix:
mount_prefix = "/vol_"
last_char = device_location[-1] # /dev/sdb --> b
if not mount_location:
mount_location = mount_prefix + last_char
playbook_results = deploy_mount_volume(
instance.ip,
username,
instance.id,
device_location,
mount_location=mount_location,
device_type=device_type
)
celery_logger.info(playbook_results)
if execution_has_failures(
playbook_results
) or execution_has_unreachable(playbook_results):
raise Exception(
"Error encountered while mounting volume: instance_id: {}, volume_id: {}"
.format(instance_id, volume_id)
)
return mount_location
except Exception as exc:
celery_logger.warn(exc)
mount_volume_task.retry(exc=exc)
示例10: monitor_allocation_sources
def monitor_allocation_sources(usernames=()):
"""
Monitor allocation sources, if a snapshot shows that all compute has been used, then enforce as necessary
"""
celery_logger.debug('monitor_allocation_sources - usernames: %s', usernames)
allocation_sources = AllocationSource.objects.all()
for allocation_source in allocation_sources.order_by('name'):
celery_logger.debug(
'monitor_allocation_sources - allocation_source: %s',
allocation_source
)
for user in allocation_source.all_users.order_by('username'):
celery_logger.debug('monitor_allocation_sources - user: %s', user)
if usernames and user.username not in usernames:
celery_logger.info(
"Skipping User %s - not in the list" % user.username
)
continue
over_allocation = allocation_source.is_over_allocation(user)
celery_logger.debug(
'monitor_allocation_sources - user: %s, over_allocation: %s',
user, over_allocation
)
enforcement_override_choice = AllocationSourcePluginManager.get_enforcement_override(
user, allocation_source
)
celery_logger.debug(
'monitor_allocation_sources - enforcement_override_choice: %s',
enforcement_override_choice
)
if over_allocation and enforcement_override_choice == EnforcementOverrideChoice.NEVER_ENFORCE:
celery_logger.debug(
'Allocation source is over allocation, but %s + user %s has an override of %s, '
'therefore not enforcing', allocation_source, user,
enforcement_override_choice
)
continue
if not over_allocation and enforcement_override_choice == EnforcementOverrideChoice.ALWAYS_ENFORCE:
celery_logger.debug(
'Allocation source is not over allocation, but %s + user %s has an override of %s, '
'therefore enforcing', allocation_source, user,
enforcement_override_choice
)
# Note: The enforcing happens in the next `if` statement.
if over_allocation or enforcement_override_choice == EnforcementOverrideChoice.ALWAYS_ENFORCE:
assert enforcement_override_choice in (
EnforcementOverrideChoice.NO_OVERRIDE,
EnforcementOverrideChoice.ALWAYS_ENFORCE
)
celery_logger.debug(
'monitor_allocation_sources - Going to enforce on user: %s',
user
)
allocation_source_overage_enforcement_for_user.apply_async(
args=(allocation_source, user)
)
示例11: prune_machines_for
def prune_machines_for(
provider_id, print_logs=False, dry_run=False, forced_removal=False):
"""
Look at the list of machines (as seen by the AccountProvider)
if a machine cannot be found in the list, remove it.
NOTE: BEFORE CALLING THIS TASK you should ensure
that the AccountProvider can see ALL images.
Failure to do so will result in any image unseen by the admin
to be prematurely end-dated and removed from the API/UI.
"""
provider = Provider.objects.get(id=provider_id)
now = timezone.now()
if print_logs:
import logging
import sys
consolehandler = logging.StreamHandler(sys.stdout)
consolehandler.setLevel(logging.DEBUG)
celery_logger.addHandler(consolehandler)
celery_logger.info("Starting prune_machines for Provider %s @ %s"
% (provider, now))
if provider.is_active():
account_driver = get_account_driver(provider)
db_machines = ProviderMachine.objects.filter(
only_current_source(), instance_source__provider=provider)
cloud_machines = account_driver.list_all_images()
else:
db_machines = ProviderMachine.objects.filter(
source_in_range(), # like 'only_current..' w/o active_provider
instance_source__provider=provider)
cloud_machines = []
# Don't do anything if cloud machines == [None,[]]
if not cloud_machines and not forced_removal:
return
# Loop 1 - End-date All machines in the DB that
# can NOT be found in the cloud.
mach_count = _end_date_missing_database_machines(
db_machines, cloud_machines, now=now, dry_run=dry_run)
# Loop 2 and 3 - Capture all (still-active) versions without machines,
# and all applications without versions.
# These are 'outliers' and mainly here for safety-check purposes.
ver_count = _remove_versions_without_machines(now=now)
app_count = _remove_applications_without_versions(now=now)
# Loop 4 - All 'Application' DB objects require
# >=1 Version with >=1 ProviderMachine (ACTIVE!)
# Apps that don't meet this criteria should be end-dated.
app_count += _update_improperly_enddated_applications(now)
celery_logger.info(
"prune_machines completed for Provider %s : "
"%s Applications, %s versions and %s machines pruned."
% (provider, app_count, ver_count, mach_count))
if print_logs:
celery_logger.removeHandler(consolehandler)
示例12: monitor_volumes_for
def monitor_volumes_for(provider_id, print_logs=False):
"""
Run the set of tasks related to monitoring sizes for a provider.
Optionally, provide a list of usernames to monitor
While debugging, print_logs=True can be very helpful.
start_date and end_date allow you to search a 'non-standard' window of time.
"""
from service.driver import get_account_driver
from core.models import Identity
if print_logs:
console_handler = _init_stdout_logging()
provider = Provider.objects.get(id=provider_id)
account_driver = get_account_driver(provider)
# Non-End dated volumes on this provider
db_volumes = Volume.objects.filter(only_current_source(), instance_source__provider=provider)
all_volumes = account_driver.admin_driver.list_all_volumes(timeout=30)
seen_volumes = []
for cloud_volume in all_volumes:
try:
core_volume = convert_esh_volume(cloud_volume, provider_uuid=provider.uuid)
seen_volumes.append(core_volume)
except ObjectDoesNotExist:
tenant_id = cloud_volume.extra['object']['os-vol-tenant-attr:tenant_id']
tenant = account_driver.get_project_by_id(tenant_id)
tenant_name = tenant.name if tenant else tenant_id
try:
if not tenant:
celery_logger.warn("Warning: tenant_id %s found on volume %s, but did not exist from the account driver perspective.", tenant_id, cloud_volume)
raise ObjectDoesNotExist()
identity = Identity.objects.filter(
contains_credential('ex_project_name', tenant_name), provider=provider
).first()
if not identity:
raise ObjectDoesNotExist()
core_volume = convert_esh_volume(
cloud_volume,
provider.uuid, identity.uuid,
identity.created_by)
except ObjectDoesNotExist:
celery_logger.info("Skipping Volume %s - No Identity for: Provider:%s + Project Name:%s" % (cloud_volume.id, provider, tenant_name))
pass
now_time = timezone.now()
needs_end_date = [volume for volume in db_volumes if volume not in seen_volumes]
for volume in needs_end_date:
celery_logger.debug("End dating inactive volume: %s" % volume)
volume.end_date = now_time
volume.save()
if print_logs:
_exit_stdout_logging(console_handler)
for vol in seen_volumes:
vol.esh = None
return [vol.instance_source.identifier for vol in seen_volumes]
示例13: add_application_membership
def add_application_membership(application, identity, dry_run=False):
for membership_obj in identity.identitymembership_set.all():
# For every 'member' of this identity:
group = membership_obj.member
# Add an application membership if not already there
if application.applicationmembership_set.filter(group=group).count() == 0:
celery_logger.info("Added ApplicationMembership %s for %s" % (group.name, application.name))
if not dry_run:
ApplicationMembership.objects.create(application=application, group=group)
else:
#celery_logger.debug("SKIPPED _ Group %s already ApplicationMember for %s" % (group.name, application.name))
pass
示例14: export_request_task
def export_request_task(export_request_id):
celery_logger.info("export_request_task task started at %s." % timezone.now())
export_request = ExportRequest.objects.get(id=export_request_id)
export_request.status = 'processing'
export_request.save()
(orig_managerCls, orig_creds) = export_request.prepare_manager()
default_kwargs = export_request.get_export_args()
file_loc = export_source(orig_managerCls, orig_creds, default_kwargs)
celery_logger.info("export_request_task task finished at %s." % timezone.now())
return file_loc
示例15: export_request_error
def export_request_error(task_uuid, export_request_id):
celery_logger.info("export_request_id=%s" % export_request_id)
celery_logger.info("task_uuid=%s" % task_uuid)
result = app.AsyncResult(task_uuid)
with allow_join_result():
exc = result.get(propagate=False)
err_str = "ERROR - %r Exception:%r" % (result.result, result.traceback,)
celery_logger.error(err_str)
export_request = ExportRequest.objects.get(id=export_request_id)
export_request.status = err_str
export_request.save()