本文整理汇总了Python中threepio.celery_logger.debug函数的典型用法代码示例。如果您正苦于以下问题:Python debug函数的具体用法?Python debug怎么用?Python debug使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了debug函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: update_mount_location
def update_mount_location(new_mount_location,
driverCls, provider, identity,
volume_alias):
"""
"""
from service import volume as volume_service
try:
celery_logger.debug(
"update_mount_location task started at %s." %
datetime.now())
driver = get_driver(driverCls, provider, identity)
volume = driver.get_volume(volume_alias)
if not volume:
return
if not new_mount_location:
return
volume_metadata = volume.extra['metadata']
return volume_service._update_volume_metadata(
driver, volume,
metadata={'mount_location': new_mount_location})
celery_logger.debug(
"update_mount_location task finished at %s." %
datetime.now())
except Exception as exc:
celery_logger.exception(exc)
update_mount_location.retry(exc=exc)
示例2: prep_instance_for_snapshot
def prep_instance_for_snapshot(identity_id, instance_id, **celery_task_args):
identity = Identity.objects.get(id=identity_id)
try:
celery_logger.debug("prep_instance_for_snapshot task started at %s." % timezone.now())
# NOTE: FIXMEIF the assumption that the 'linux username'
# is the 'created_by' AtmosphereUser changes.
username = identity.created_by.username
driver = get_esh_driver(identity)
instance = driver.get_instance(instance_id)
if instance.extra.get('status','') != 'active':
celery_logger.info("prep_instance_for_snapshot skipped")
return
playbooks = deploy_prepare_snapshot(
instance.ip, username, instance_id)
celery_logger.info(playbooks.__dict__)
hostname = build_host_name(instance.id, instance.ip)
result = False if execution_has_failures(playbooks, hostname)\
or execution_has_unreachable(playbooks, hostname) else True
if not result:
raise Exception(
"Error encountered while preparing instance for snapshot: %s"
% playbooks.stats.summarize(host=hostname))
except Exception as exc:
celery_logger.warn(exc)
prep_instance_for_snapshot.retry(exc=exc)
示例3: monitor_sizes_for
def monitor_sizes_for(provider_id, print_logs=False):
"""
Run the set of tasks related to monitoring sizes for a provider.
Optionally, provide a list of usernames to monitor
While debugging, print_logs=True can be very helpful.
start_date and end_date allow you to search a 'non-standard' window of time.
"""
from service.driver import get_admin_driver
if print_logs:
import logging
import sys
consolehandler = logging.StreamHandler(sys.stdout)
consolehandler.setLevel(logging.DEBUG)
celery_logger.addHandler(consolehandler)
provider = Provider.objects.get(id=provider_id)
admin_driver = get_admin_driver(provider)
# Non-End dated sizes on this provider
db_sizes = Size.objects.filter(only_current(), provider=provider)
all_sizes = admin_driver.list_sizes()
seen_sizes = []
for cloud_size in all_sizes:
core_size = convert_esh_size(cloud_size, provider.uuid)
seen_sizes.append(core_size)
now_time = timezone.now()
needs_end_date = [size for size in db_sizes if size not in seen_sizes]
for size in needs_end_date:
celery_logger.debug("End dating inactive size: %s" % size)
size.end_date = now_time
size.save()
if print_logs:
celery_logger.removeHandler(consolehandler)
示例4: mount_volume_task
def mount_volume_task(
driverCls,
provider,
identity,
instance_id,
volume_id,
device_location,
mount_location,
device_type,
mount_prefix=None,
*args,
**kwargs
):
try:
celery_logger.debug("mount task started at %s." % timezone.now())
celery_logger.debug("mount_location: %s" % (mount_location, ))
driver = get_driver(driverCls, provider, identity)
username = identity.get_username()
instance = driver.get_instance(instance_id)
volume = driver.get_volume(volume_id)
try:
attach_data = volume.extra['attachments'][0]
if not device_location:
device_location = attach_data['device']
except (KeyError, IndexError):
celery_logger.warn(
"Volume %s missing attachments in Extra" % (volume, )
)
if not device_location:
raise Exception(
"No device_location found or inferred by volume %s" % volume
)
if not mount_prefix:
mount_prefix = "/vol_"
last_char = device_location[-1] # /dev/sdb --> b
if not mount_location:
mount_location = mount_prefix + last_char
playbook_results = deploy_mount_volume(
instance.ip,
username,
instance.id,
device_location,
mount_location=mount_location,
device_type=device_type
)
celery_logger.info(playbook_results)
if execution_has_failures(
playbook_results
) or execution_has_unreachable(playbook_results):
raise Exception(
"Error encountered while mounting volume: instance_id: {}, volume_id: {}"
.format(instance_id, volume_id)
)
return mount_location
except Exception as exc:
celery_logger.warn(exc)
mount_volume_task.retry(exc=exc)
示例5: remove_empty_networks_for
def remove_empty_networks_for(provider_id):
provider = Provider.objects.get(id=provider_id)
os_driver = get_account_driver(provider)
all_instances = os_driver.admin_driver.list_all_instances()
project_map = os_driver.network_manager.project_network_map()
projects_with_networks = project_map.keys()
for project in projects_with_networks:
networks = project_map[project]['network']
if not isinstance(networks, list):
networks = [networks]
for network in networks:
network_name = network['name']
celery_logger.debug("Checking if network %s is in use" % network_name)
if running_instances(network_name, all_instances):
continue
# TODO: MUST change when not using 'usergroups' explicitly.
user = project
try:
celery_logger.debug("Removing project network for User:%s, Project:%s"
% (user, project))
os_driver.network_manager.delete_project_network(user, project)
except NeutronClientException:
celery_logger.exception("Neutron unable to remove project"
"network for %s-%s" % (user, project))
except NeutronException:
celery_logger.exception("Neutron unable to remove project"
"network for %s-%s" % (user, project))
示例6: mount_failed
def mount_failed(
context,
exception_msg,
traceback,
driverCls,
provider,
identity,
volume_id,
unmount=False,
**celery_task_args
):
from service import volume as volume_service
try:
celery_logger.debug("mount_failed task started at %s." % timezone.now())
celery_logger.info("task context=%s" % context)
err_str = "%s\nMount Error Traceback:%s" % (exception_msg, traceback)
celery_logger.error(err_str)
driver = get_driver(driverCls, provider, identity)
volume = driver.get_volume(volume_id)
if unmount:
tmp_status = 'umount_error'
else:
tmp_status = 'mount_error'
return volume_service._update_volume_metadata(
driver, volume, metadata={'tmp_status': tmp_status}
)
except Exception as exc:
celery_logger.warn(exc)
mount_failed.retry(exc=exc)
示例7: umount_task
def umount_task(driverCls, provider, identity, instance_id, volume_id, *args, **kwargs):
try:
celery_logger.debug("umount_task started at %s." % datetime.now())
driver = get_driver(driverCls, provider, identity)
instance = driver.get_instance(instance_id)
volume = driver.get_volume(volume_id)
attach_data = volume.extra["attachments"][0]
device = attach_data["device"]
# Check mount to find the mount_location for device
private_key = "/opt/dev/atmosphere/extras/ssh/id_rsa"
kwargs.update({"ssh_key": private_key})
kwargs.update({"timeout": 120})
mount_location = None
cm_script = check_mount()
kwargs.update({"deploy": cm_script})
driver.deploy_to(instance, **kwargs)
regex = re.compile("(?P<device>[\w/]+) on (?P<location>.*) type")
for line in cm_script.stdout.split("\n"):
res = regex.search(line)
if not res:
continue
search_dict = res.groupdict()
dev_found = search_dict["device"]
if device == dev_found:
mount_location = search_dict["location"]
break
# Volume not mounted, move along..
if not mount_location:
return
um_script = umount_volume(device)
kwargs.update({"deploy": um_script})
driver.deploy_to(instance, **kwargs)
if "device is busy" in um_script.stdout:
# Show all processes that are making device busy..
lsof_script = lsof_location(mount_location)
kwargs.update({"deploy": lsof_script})
driver.deploy_to(instance, **kwargs)
regex = re.compile("(?P<name>[\w]+)\s*(?P<pid>[\d]+)")
offending_processes = []
for line in lsof_script.stdout.split("\n"):
res = regex.search(line)
if not res:
continue
search_dict = res.groupdict()
offending_processes.append((search_dict["name"], search_dict["pid"]))
raise DeviceBusyException(mount_location, offending_processes)
# Return here if no errors occurred..
celery_logger.debug("umount_task finished at %s." % datetime.now())
except DeviceBusyException:
raise
except Exception as exc:
celery_logger.warn(exc)
umount_task.retry(exc=exc)
示例8: add_membership_task
def add_membership_task(image_version, group):
celery_logger.debug("add_membership_task task started at %s." % timezone.now())
try:
add_membership(image_version, group)
celery_logger.debug("add_membership_task task finished at %s." % timezone.now())
except Exception as exc:
celery_logger.exception(exc)
add_membership_task.retry(exc=exc)
示例9: running_instances
def running_instances(network_name, all_instances):
for instance in all_instances:
if network_name in instance.extra['addresses'].keys():
# #If not build/active, the network is assumed to be NOT in use
celery_logger.debug("Network %s is in use, Active Instance:%s"
% (network_name, instance.id))
return True
celery_logger.debug("Network %s is NOT in use" % network_name)
return False
示例10: monitor_instances_for
def monitor_instances_for(provider_id, users=None,
print_logs=False, check_allocations=False, start_date=None, end_date=None):
"""
Run the set of tasks related to monitoring instances for a provider.
Optionally, provide a list of usernames to monitor
While debugging, print_logs=True can be very helpful.
start_date and end_date allow you to search a 'non-standard' window of time.
"""
provider = Provider.objects.get(id=provider_id)
# For now, lets just ignore everything that isn't openstack.
if 'openstack' not in provider.type.name.lower():
return
instance_map = _get_instance_owner_map(provider, users=users)
if print_logs:
console_handler = _init_stdout_logging()
# DEVNOTE: Potential slowdown running multiple functions
# Break this out when instance-caching is enabled
running_total = 0
if not settings.ENFORCING:
celery_logger.debug('Settings dictate allocations are NOT enforced')
for username in sorted(instance_map.keys()):
running_instances = instance_map[username]
running_total += len(running_instances)
identity = _get_identity_from_tenant_name(provider, username)
if identity and running_instances:
try:
driver = get_cached_driver(identity=identity)
core_running_instances = [
convert_esh_instance(
driver,
inst,
identity.provider.uuid,
identity.uuid,
identity.created_by) for inst in running_instances]
except Exception as exc:
celery_logger.exception(
"Could not convert running instances for %s" %
username)
continue
else:
# No running instances.
core_running_instances = []
# Using the 'known' list of running instances, cleanup the DB
core_instances = _cleanup_missing_instances(
identity,
core_running_instances)
if check_allocations:
allocation_result = user_over_allocation_enforcement(
provider, username,
print_logs, start_date, end_date)
if print_logs:
_exit_stdout_logging(console_handler)
return running_total
示例11: monitor_volumes_for
def monitor_volumes_for(provider_id, print_logs=False):
"""
Run the set of tasks related to monitoring sizes for a provider.
Optionally, provide a list of usernames to monitor
While debugging, print_logs=True can be very helpful.
start_date and end_date allow you to search a 'non-standard' window of time.
"""
from service.driver import get_account_driver
from core.models import Identity
if print_logs:
console_handler = _init_stdout_logging()
provider = Provider.objects.get(id=provider_id)
account_driver = get_account_driver(provider)
# Non-End dated volumes on this provider
db_volumes = Volume.objects.filter(only_current_source(), instance_source__provider=provider)
all_volumes = account_driver.admin_driver.list_all_volumes(timeout=30)
seen_volumes = []
for cloud_volume in all_volumes:
try:
core_volume = convert_esh_volume(cloud_volume, provider_uuid=provider.uuid)
seen_volumes.append(core_volume)
except ObjectDoesNotExist:
tenant_id = cloud_volume.extra['object']['os-vol-tenant-attr:tenant_id']
tenant = account_driver.get_project_by_id(tenant_id)
tenant_name = tenant.name if tenant else tenant_id
try:
if not tenant:
celery_logger.warn("Warning: tenant_id %s found on volume %s, but did not exist from the account driver perspective.", tenant_id, cloud_volume)
raise ObjectDoesNotExist()
identity = Identity.objects.filter(
contains_credential('ex_project_name', tenant_name), provider=provider
).first()
if not identity:
raise ObjectDoesNotExist()
core_volume = convert_esh_volume(
cloud_volume,
provider.uuid, identity.uuid,
identity.created_by)
except ObjectDoesNotExist:
celery_logger.info("Skipping Volume %s - No Identity for: Provider:%s + Project Name:%s" % (cloud_volume.id, provider, tenant_name))
pass
now_time = timezone.now()
needs_end_date = [volume for volume in db_volumes if volume not in seen_volumes]
for volume in needs_end_date:
celery_logger.debug("End dating inactive volume: %s" % volume)
volume.end_date = now_time
volume.save()
if print_logs:
_exit_stdout_logging(console_handler)
for vol in seen_volumes:
vol.esh = None
return [vol.instance_source.identifier for vol in seen_volumes]
示例12: monitor_sizes_for
def monitor_sizes_for(provider_id, print_logs=False):
"""
Run the set of tasks related to monitoring sizes for a provider.
Optionally, provide a list of usernames to monitor
While debugging, print_logs=True can be very helpful.
start_date and end_date allow you to search a 'non-standard' window of time.
"""
from service.driver import get_admin_driver
if print_logs:
console_handler = _init_stdout_logging()
provider = Provider.objects.get(id=provider_id)
admin_driver = get_admin_driver(provider)
# Non-End dated sizes on this provider
db_sizes = Size.objects.filter(only_current(), provider=provider)
all_sizes = admin_driver.list_sizes()
seen_sizes = []
for cloud_size in all_sizes:
core_size = convert_esh_size(cloud_size, provider.uuid)
seen_sizes.append(core_size)
now_time = timezone.now()
needs_end_date = [size for size in db_sizes if size not in seen_sizes]
for size in needs_end_date:
celery_logger.debug("End dating inactive size: %s" % size)
size.end_date = now_time
size.save()
# Find home for 'Unknown Size'
unknown_sizes = Size.objects.filter(
provider=provider, name__contains='Unknown Size'
)
for size in unknown_sizes:
# Lookup sizes may not show up in 'list_sizes'
if size.alias == 'N/A':
continue # This is a sentinal value added for a separate purpose.
try:
libcloud_size = admin_driver.get_size(
size.alias, forced_lookup=True
)
except BaseHTTPError as error:
if error.code == 404:
# The size may have been truly deleted
continue
if not libcloud_size:
continue
cloud_size = OSSize(libcloud_size)
core_size = convert_esh_size(cloud_size, provider.uuid)
if print_logs:
_exit_stdout_logging(console_handler)
for size in seen_sizes:
size.esh = None
return seen_sizes
示例13: monitor_instances_for
def monitor_instances_for(
provider_id, users=None, print_logs=False, start_date=None, end_date=None
):
"""
Run the set of tasks related to monitoring instances for a provider.
Optionally, provide a list of usernames to monitor
While debugging, print_logs=True can be very helpful.
start_date and end_date allow you to search a 'non-standard' window of time.
"""
provider = Provider.objects.get(id=provider_id)
# For now, lets just ignore everything that isn't openstack.
if 'openstack' not in provider.type.name.lower():
return
instance_map = _get_instance_owner_map(provider, users=users)
if print_logs:
console_handler = _init_stdout_logging()
seen_instances = []
# DEVNOTE: Potential slowdown running multiple functions
# Break this out when instance-caching is enabled
if not settings.ENFORCING:
celery_logger.debug('Settings dictate allocations are NOT enforced')
for tenant_name in sorted(instance_map.keys()):
running_instances = instance_map[tenant_name]
identity = _get_identity_from_tenant_name(provider, tenant_name)
if identity and running_instances:
try:
driver = get_cached_driver(identity=identity)
core_running_instances = [
convert_esh_instance(
driver, inst, identity.provider.uuid, identity.uuid,
identity.created_by
) for inst in running_instances
]
seen_instances.extend(core_running_instances)
except Exception:
celery_logger.exception(
"Could not convert running instances for %s" % tenant_name
)
continue
else:
# No running instances.
core_running_instances = []
# Using the 'known' list of running instances, cleanup the DB
_cleanup_missing_instances(identity, core_running_instances)
if print_logs:
_exit_stdout_logging(console_handler)
# return seen_instances NOTE: this has been commented out to avoid PicklingError!
# TODO: Uncomment the above, Determine what _we can return_ and return that instead....
return
示例14: allocation_source_overage_enforcement_for_user
def allocation_source_overage_enforcement_for_user(allocation_source, user):
celery_logger.debug('allocation_source_overage_enforcement_for_user - allocation_source: %s, user: %s',
allocation_source, user)
user_instances = []
for identity in user.current_identities:
try:
celery_logger.debug('allocation_source_overage_enforcement_for_user - identity: %s', identity)
affected_instances = allocation_source_overage_enforcement_for(allocation_source, user, identity)
user_instances.extend(affected_instances)
except Exception:
celery_logger.exception(
'allocation_source_overage_enforcement_for allocation_source: %s, user: %s, and identity: %s',
allocation_source, user, identity)
return user_instances
示例15: update_volume_metadata
def update_volume_metadata(driverCls, provider, identity, volume_alias, metadata):
"""
"""
from service import volume as volume_service
try:
celery_logger.debug("update_volume_metadata task started at %s." % datetime.now())
driver = get_driver(driverCls, provider, identity)
volume = driver.get_volume(volume_alias)
if not volume:
return
return volume_service.update_volume_metadata(driver, volume, metadata=metadata)
celery_logger.debug("volume_metadata task finished at %s." % datetime.now())
except Exception as exc:
celery_logger.exception(exc)
update_volume_metadata.retry(exc=exc)