本文整理汇总了Python中twitter.common.log.debug函数的典型用法代码示例。如果您正苦于以下问题:Python debug函数的具体用法?Python debug怎么用?Python debug使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了debug函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _construct_scheduler
def _construct_scheduler(self):
"""
Populates:
self._scheduler_client
self._client
"""
self._scheduler_client = SchedulerClient.get(self.cluster, verbose=self.verbose)
assert self._scheduler_client, "Could not find scheduler (cluster = %s)" % self.cluster.name
start = time.time()
while (time.time() - start) < self.CONNECT_MAXIMUM_WAIT.as_(Time.SECONDS):
try:
# this can wind up generating any kind of error, because it turns into
# a call to a dynamically set authentication module.
self._client = self._scheduler_client.get_thrift_client()
break
except SchedulerClient.CouldNotConnect as e:
log.warning('Could not connect to scheduler: %s' % e)
except Exception as e:
# turn any auth module exception into an auth error.
log.debug('Warning: got an unknown exception during authentication:')
log.debug(traceback.format_exc())
raise self.AuthenticationError('Error connecting to scheduler: %s' % e)
if not self._client:
raise self.TimeoutError('Timed out trying to connect to scheduler at %s' % self.cluster.name)
server_version = self._client.getVersion().result.getVersionResult
if server_version != CURRENT_API_VERSION:
raise self.APIVersionError("Client Version: %s, Server Version: %s" %
(CURRENT_API_VERSION, server_version))
示例2: _create_kill_add_lists
def _create_kill_add_lists(self, instance_ids, operation_configs):
"""Determines a particular action (kill or add) to use for every instance in instance_ids.
Arguments:
instance_ids -- current batch of IDs to process.
operation_configs -- OperationConfigs with update details.
Returns lists of instances to kill and to add.
"""
to_kill = []
to_add = []
for instance_id in instance_ids:
from_config = operation_configs.from_config.get(instance_id)
to_config = operation_configs.to_config.get(instance_id)
if from_config and to_config:
diff_output = self._diff_configs(from_config, to_config)
if diff_output:
log.debug('Task configuration changed for instance [%s]:\n%s' % (instance_id, diff_output))
to_kill.append(instance_id)
to_add.append(instance_id)
elif from_config and not to_config:
to_kill.append(instance_id)
elif not from_config and to_config:
to_add.append(instance_id)
else:
raise self.Error('Instance %s is outside of supported range' % instance_id)
return to_kill, to_add
示例3: _get_process_resource_consumption
def _get_process_resource_consumption(self, task_id, process_name):
if task_id not in self.active_tasks:
log.debug("Task %s not found in active tasks" % task_id)
return ProcessSample.empty().to_dict()
sample = self.active_tasks[task_id].resource_monitor.sample_by_process(process_name).to_dict()
log.debug('Resource consumption (%s, %s) => %s' % (task_id, process_name, sample))
return sample
示例4: _maybe_update_health_check_count
def _maybe_update_health_check_count(self, is_healthy, reason):
if not is_healthy:
log.warning('Health check failure: %s' % reason)
if self.current_consecutive_successes > 0:
log.debug('Reset consecutive successes counter.')
self.current_consecutive_successes = 0
if self._should_ignore_failure():
return
if self._should_fail_fast():
log.warning('Not enough attempts left prove health, failing fast.')
self.healthy = False
self.reason = reason
self.current_consecutive_failures += 1
if self.current_consecutive_failures > self.max_consecutive_failures:
log.warning('Reached consecutive failure limit.')
self.healthy = False
self.reason = reason
else:
self.current_consecutive_successes += 1
if not self.running:
if self.current_consecutive_successes >= self.min_consecutive_successes:
log.info('Reached consecutive success limit.')
self.running = True
if self.current_consecutive_failures > 0:
log.debug('Reset consecutive failures counter.')
self.current_consecutive_failures = 0
示例5: __init__
def __init__(self,
task_monitor,
sandbox,
process_collector=ProcessTreeCollector,
disk_collector=DiskCollector,
process_collection_interval=Amount(20, Time.SECONDS),
disk_collection_interval=Amount(1, Time.MINUTES),
history_time=Amount(1, Time.HOURS)):
"""
task_monitor: TaskMonitor object specifying the task whose resources should be monitored
sandbox: Directory for which to monitor disk utilisation
"""
self._task_monitor = task_monitor # exposes PIDs, sandbox
self._task_id = task_monitor._task_id
log.debug('Initialising resource collection for task %s' % self._task_id)
self._process_collectors = dict() # ProcessStatus => ProcessTreeCollector
# TODO(jon): sandbox is also available through task_monitor, but typically the first checkpoint
# isn't written (and hence the header is not available) by the time we initialise here
self._sandbox = sandbox
self._process_collector_factory = process_collector
self._disk_collector = disk_collector(self._sandbox)
self._process_collection_interval = process_collection_interval.as_(Time.SECONDS)
self._disk_collection_interval = disk_collection_interval.as_(Time.SECONDS)
min_collection_interval = min(self._process_collection_interval, self._disk_collection_interval)
history_length = int(history_time.as_(Time.SECONDS) / min_collection_interval)
if history_length > self.MAX_HISTORY:
raise ValueError("Requested history length too large")
log.debug("Initialising ResourceHistory of length %s" % history_length)
self._history = ResourceHistory(history_length)
self._kill_signal = threading.Event()
threading.Thread.__init__(self)
self.daemon = True
示例6: terminal_state
def terminal_state(self):
if self._terminal_state:
log.debug('Forced terminal state: %s' %
TaskState._VALUES_TO_NAMES.get(self._terminal_state, 'UNKNOWN'))
return self._terminal_state
else:
return TaskState.SUCCESS if self.is_healthy() else TaskState.FAILED
示例7: flush
def flush(self):
if self.isOpen():
self.close()
self.open()
data = self.__wbuf.getvalue()
self.__wbuf = BytesIO()
self._session.headers['Content-Type'] = 'application/x-thrift'
self._session.headers['Content-Length'] = str(len(data))
self._session.headers['Host'] = self.__urlparse.hostname
response = None
try:
response = self._session.post(
self.__uri,
data=data,
timeout=self.__timeout,
auth=self.__auth)
response.raise_for_status()
except request_exceptions.Timeout:
raise TTransportException(
type=TTransportException.TIMED_OUT,
message='Timed out talking to %s' % self.__uri)
except request_exceptions.RequestException as e:
if response:
log.debug('Error connecting, logging response headers:.')
for field_name, field_value in response.headers.items():
log.debug(' %s: %s' % (field_name, field_value))
raise TTransportException(
type=TTransportException.UNKNOWN,
message='Unknown error talking to %s: %s' % (self.__uri, e))
self.__rbuf = BytesIO(response.content)
示例8: setup_child_subreaping
def setup_child_subreaping():
"""
This uses the prctl(2) syscall to set the `PR_SET_CHILD_SUBREAPER` flag. This
means if any children processes need to be reparented, they will be reparented
to this process.
More documentation here: http://man7.org/linux/man-pages/man2/prctl.2.html
and here: https://lwn.net/Articles/474787/
Callers should reap terminal children to prevent zombies.
"""
log.debug("Calling prctl(2) with PR_SET_CHILD_SUBREAPER")
# This constant is taken from prctl.h
PR_SET_CHILD_SUBREAPER = 36
try:
library_name = ctypes.util.find_library('c')
if library_name is None:
log.warning("libc is not found. Unable to call prctl!")
log.warning("Children subreaping is disabled!")
return
libc = ctypes.CDLL(library_name, use_errno=True)
# If we are on a system where prctl doesn't exist, this will throw an
# attribute error.
ret = libc.prctl(PR_SET_CHILD_SUBREAPER, 1, 0, 0, 0)
if ret != 0:
errno = ctypes.get_errno()
raise OSError(errno, os.strerror(errno))
except Exception as e:
log.error("Unable to call prctl %s" % e)
log.error("Children subreaping is disabled!")
示例9: select_binary
def select_binary(base_path, version, name, config=None):
"""Selects a binary matching the current os and architecture.
:raises: :class:`pants.binary_util.BinaryUtil.BinaryNotFound` if no binary of the given version
and name could be found.
"""
# TODO(John Sirois): finish doc of the path structure expexcted under base_path
config = config or Config.load()
bootstrap_dir = config.getdefault('pants_bootstrapdir')
binary_path = select_binary_base_path(base_path, version, name)
bootstrapped_binary_path = os.path.join(bootstrap_dir, binary_path)
if not os.path.exists(bootstrapped_binary_path):
downloadpath = bootstrapped_binary_path + '~'
try:
with select_binary_stream(base_path, version, name, config) as stream:
with safe_open(downloadpath, 'wb') as bootstrapped_binary:
bootstrapped_binary.write(stream())
os.rename(downloadpath, bootstrapped_binary_path)
chmod_plus_x(bootstrapped_binary_path)
finally:
safe_delete(downloadpath)
log.debug('Selected {binary} binary bootstrapped to: {path}'
.format(binary=name, path=bootstrapped_binary_path))
return bootstrapped_binary_path
示例10: write
def write(self, slice_, data):
log.debug('Disk writing %s' % slice_)
if len(data) != slice_.length:
raise self.WriteError('Block must be of appropriate size!')
with open(slice_._filename, 'r+b') as fp:
fp.seek(slice_.start)
fp.write(data)
示例11: select
def select(self):
"""
Read and multiplex checkpoint records from all the forked off process coordinators.
Checkpoint records can come from one of two places:
in-process: checkpoint records synthesized for FORKED and LOST events
out-of-process: checkpoint records from from file descriptors of forked coordinators
Returns a list of RunnerCkpt objects that were successfully read, or an empty
list if none were read.
"""
self._bind_processes()
updates = []
for handle in filter(None, self._processes.values()):
try:
fstat = os.fstat(handle.fileno())
except OSError as e:
log.error('Unable to fstat %s!' % handle.name)
continue
if handle.tell() > fstat.st_size:
log.error('Truncated checkpoint record detected on %s!' % handle.name)
elif handle.tell() < fstat.st_size:
rr = ThriftRecordReader(handle, RunnerCkpt)
while True:
process_update = rr.try_read()
if process_update:
updates.append(process_update)
else:
break
if len(updates) > 0:
log.debug('select() returning %s updates:' % len(updates))
for update in updates:
log.debug(' = %s' % update)
return updates
示例12: run
def run(self):
log.debug('Health checker thread started.')
self._clock.sleep(self._initial_interval)
log.debug('Initial interval expired.')
while not self._dead.is_set():
self._maybe_update_failure_count(*self._checker())
self._clock.sleep(self._interval)
示例13: genlang
def genlang(self, lang, targets):
bases, sources = self._calculate_sources(targets)
if lang == 'java':
safe_mkdir(self.java_out)
gen = '--java_out=%s' % self.java_out
elif lang == 'python':
safe_mkdir(self.py_out)
gen = '--python_out=%s' % self.py_out
else:
raise TaskError('Unrecognized protobuf gen lang: %s' % lang)
args = [
self.protobuf_binary,
gen
]
for base in bases:
args.append('--proto_path=%s' % base)
args.extend(sources)
log.debug('Executing: %s' % ' '.join(args))
process = subprocess.Popen(args)
result = process.wait()
if result != 0:
raise TaskError
示例14: _update_instances_in_parallel
def _update_instances_in_parallel(self, target, instances_to_update):
"""Processes instance updates in parallel and waits for completion.
Arguments:
target -- target method to handle instance update.
instances_to_update -- list of InstanceData with update details.
Returns Queue with non-updated instance data.
"""
log.info('Processing in parallel with %s worker thread(s)' % self._update_config.batch_size)
instance_queue = Queue()
for instance_to_update in instances_to_update:
instance_queue.put(instance_to_update)
try:
threads = []
for _ in range(self._update_config.batch_size):
threads.append(spawn_worker(target, kwargs={'instance_queue': instance_queue}))
for thread in threads:
thread.join_and_raise()
except Exception as e:
log.debug('Caught unhandled exception: %s' % e)
self._terminate()
raise
return instance_queue
示例15: control
def control(self, force=False):
"""
Bind to the checkpoint associated with this task, position to the end of the log if
it exists, or create it if it doesn't. Fails if we cannot get "leadership" i.e. a
file lock on the checkpoint stream.
"""
if self.is_terminal():
raise self.StateError('Cannot take control of a task in terminal state.')
if self._sandbox:
safe_mkdir(self._sandbox)
ckpt_file = self._pathspec.getpath('runner_checkpoint')
try:
self._ckpt = TaskRunnerHelper.open_checkpoint(ckpt_file, force=force, state=self._state)
except TaskRunnerHelper.PermissionError:
raise self.PermissionError('Unable to open checkpoint %s' % ckpt_file)
log.debug('Flipping recovery mode off.')
self._recovery = False
self._set_task_status(self.task_state())
self._resume_task()
try:
yield
except Exception as e:
log.error('Caught exception in self.control(): %s', e)
log.error(' %s', traceback.format_exc())
self._ckpt.close()