本文整理汇总了Python中twitter.common.log.warn函数的典型用法代码示例。如果您正苦于以下问题:Python warn函数的具体用法?Python warn怎么用?Python warn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了warn函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: update
def update(self, instances=None):
"""Performs the job update, blocking until it completes.
A rollback will be performed if the update was considered a failure based on the
update configuration.
Arguments:
instances -- (optional) instances to update. If not specified, all instances will be updated.
Returns a response object with update result status.
"""
resp = self._start()
if resp.responseCode != ResponseCode.OK:
return resp
try:
# Handle cron jobs separately from other jobs.
if self._replace_template_if_cron():
log.info('Cron template updated, next run will reflect changes')
return self._finish()
else:
if not self._update(instances):
log.warn('Update failures threshold reached')
self._finish()
return self._failed_response('Update reverted')
else:
log.info('Update successful')
return self._finish()
except self.Error as e:
return self._failed_response('Aborting update without rollback! Fatal error: %s' % e)
示例2: wait_for_accept
def wait_for_accept(cls, port, tunnel_popen, timeout):
total_time = Amount(0, Time.SECONDS)
sleep = cls.MIN_RETRY
warned = False # Did we log a warning that shows we're waiting for the tunnel?
while total_time < timeout and tunnel_popen.returncode is None:
try:
accepted_socket = socket.create_connection(('localhost', port), timeout=5.0)
accepted_socket.close()
return True
except socket.error:
total_time += sleep
time.sleep(sleep.as_(Time.SECONDS))
# Increase sleep exponentially until MAX_INTERVAL is reached
sleep = min(sleep * 2, cls.MAX_INTERVAL)
if total_time > cls.WARN_THRESHOLD and not warned:
log.warn('Still waiting for tunnel to be established after %s (timeout is %s)' % (
total_time, cls.DEFAULT_TIMEOUT))
warned = True
tunnel_popen.poll() # needed to update tunnel_popen.returncode
if tunnel_popen.returncode is not None:
cls.log('SSH returned prematurely with code %s' % str(tunnel_popen.returncode))
else:
cls.log('timed out initializing tunnel')
return False
示例3: start
def start(self, env=None):
if self._process:
log.warn("start() called when a running task subprocess already exists")
return
command = (
"%(cmd)s %(framework_user)s %(host)s %(port)s %(server_id)s %(data_dir)s %(log_dir)s "
"%(tmp_dir)s %(conf_file)s %(buffer_pool_size)s" % dict(
cmd=os.path.join(self._scripts_dir, "mysos_launch_mysqld.sh"),
framework_user=self._framework_user,
host=self._host,
port=self._port,
server_id=self._server_id,
data_dir=self._sandbox.mysql_data_dir,
log_dir=self._sandbox.mysql_log_dir,
tmp_dir=self._sandbox.mysql_tmp_dir,
conf_file=self._conf_file,
buffer_pool_size=self._buffer_pool_size))
log.info("Executing command: %s" % command)
self._process = subprocess.Popen(command, shell=True, env=env, preexec_fn=os.setpgrp)
# There is a delay before mysqld becomes available to accept requests. Wait for it.
command = "%(cmd)s %(pid_file)s %(port)s %(timeout)s" % dict(
cmd=os.path.join(self._scripts_dir, "mysos_wait_for_mysqld.sh"),
pid_file=os.path.join(self._sandbox.mysql_log_dir, "mysqld.pid"),
port=self._port,
timeout=60)
log.info("Executing command: %s" % command)
subprocess.check_call(command, shell=True, env=env)
return self._process
示例4: initialize
def initialize(options):
cwd_path = os.path.abspath(CWD)
checkpoint_root = os.path.join(cwd_path, MesosPathDetector.DEFAULT_SANDBOX_PATH)
# status providers:
status_providers = [
HealthCheckerProvider(),
ResourceManagerProvider(checkpoint_root=checkpoint_root)
]
if options.announcer_enable:
log.warn('Please remove the deprecated and no-op --announcer-enable flag in scheduler config!')
if options.announcer_ensemble is not None:
status_providers.append(DefaultAnnouncerCheckerProvider(
options.announcer_ensemble,
options.announcer_serverset_path,
options.announcer_allow_custom_serverset_path,
options.announcer_hostname,
make_zk_auth(options.announcer_zookeeper_auth_config)
))
# Create executor stub
if options.execute_as_user or options.nosetuid:
# If nosetuid is set, execute_as_user is also None
thermos_runner_provider = UserOverrideThermosTaskRunnerProvider(
dump_runner_pex(),
checkpoint_root,
artifact_dir=cwd_path,
process_logger_destination=options.runner_logger_destination,
process_logger_mode=options.runner_logger_mode,
rotate_log_size_mb=options.runner_rotate_log_size_mb,
rotate_log_backups=options.runner_rotate_log_backups,
preserve_env=options.preserve_env
)
thermos_runner_provider.set_role(None)
thermos_executor = AuroraExecutor(
runner_provider=thermos_runner_provider,
status_providers=status_providers,
sandbox_provider=UserOverrideDirectorySandboxProvider(options.execute_as_user)
)
else:
thermos_runner_provider = DefaultThermosTaskRunnerProvider(
dump_runner_pex(),
checkpoint_root,
artifact_dir=cwd_path,
process_logger_destination=options.runner_logger_destination,
process_logger_mode=options.runner_logger_mode,
rotate_log_size_mb=options.runner_rotate_log_size_mb,
rotate_log_backups=options.runner_rotate_log_backups,
preserve_env=options.preserve_env
)
thermos_executor = AuroraExecutor(
runner_provider=thermos_runner_provider,
status_providers=status_providers
)
return thermos_executor
示例5: _run_task
def _run_task(self, task):
assert self._runner, "_runner should be created before this method is called"
try:
self._runner.start()
log.info("Task runner for task %s started" % task.task_id)
self._send_update(task.task_id.value, mesos_pb2.TASK_RUNNING)
except TaskError as e:
log.error("Task runner for task %s failed to start: %s" % (task.task_id, str(e)))
# Send TASK_FAILED if the task failed to start.
self._send_update(task.task_id.value, mesos_pb2.TASK_FAILED)
except Exception as e:
log.error("Error occurred while executing the task: %s" % e)
log.error(traceback.format_exc())
# Send TASK_LOST for unknown errors.
self._send_update(task.task_id.value, mesos_pb2.TASK_LOST)
else:
# Wait for the task's return code (when it terminates).
try:
returncode = self._runner.join()
# If '_runner' terminates, it has either failed or been killed.
log.warn("Task process terminated with return code %s" % returncode)
except TaskError as e:
log.error("Task terminated: %s" % e)
finally:
if self._killed:
self._send_update(task.task_id.value, mesos_pb2.TASK_KILLED)
else:
self._send_update(task.task_id.value, mesos_pb2.TASK_FAILED)
self._terminated.set()
finally:
# No matter what happens above, when we reach here the executor has no task to run so it
# should just commit seppuku.
self._kill()
示例6: __init__
def __init__(
self,
batch_size,
restart_threshold,
watch_secs,
max_per_shard_failures,
max_total_failures,
rollback_on_failure=True,
wait_for_batch_completion=False,
pulse_interval_secs=None,
):
if batch_size <= 0:
raise ValueError("Batch size should be greater than 0")
if watch_secs <= 0:
raise ValueError("Watch seconds should be greater than 0")
if pulse_interval_secs is not None and pulse_interval_secs < self.MIN_PULSE_INTERVAL_SECONDS:
raise ValueError("Pulse interval seconds must be at least %s seconds." % self.MIN_PULSE_INTERVAL_SECONDS)
if restart_threshold:
log.warn("restart_threshold has been deprecated and will be removed in a future release")
self.batch_size = batch_size
self.watch_secs = watch_secs
self.max_total_failures = max_total_failures
self.max_per_instance_failures = max_per_shard_failures
self.rollback_on_failure = rollback_on_failure
self.wait_for_batch_completion = wait_for_batch_completion
self.pulse_interval_secs = pulse_interval_secs
示例7: _maybe_scrubbed_env
def _maybe_scrubbed_env(cls):
for env_var in cls._SCRUBBED_ENV:
value = os.getenv(env_var)
if value:
log.warn('Scrubbing {env_var}={value}'.format(env_var=env_var, value=value))
with environment_as(**cls._SCRUBBED_ENV):
yield
示例8: _request_agent_containers
def _request_agent_containers(self):
try:
resp = requests.get(self._url, timeout=self._request_timeout)
resp.raise_for_status()
return resp.json()
except requests.exceptions.RequestException as ex:
log.warn("MesosDiskCollector: Unexpected error talking to agent api: %s", ex)
return []
示例9: add_to_queue
def add_to_queue(self, queue, item, label):
""" queue items send to us by the sniffer """
count = len(queue)
if count > queue.maxlength(): # pragma: no cover
log.warn("Too many %s queued (%d)", label, count)
return
queue.appendleft(item)
示例10: __check_int
def __check_int(item):
if item is not None:
try:
item = int(item)
except ValueError:
log.warn('Failed to deserialize value %r' % item)
item = None
return item
示例11: iterate
def iterate(self):
with self._lock:
try:
with open(self._filename, 'r') as fp:
self._sample = json.load(fp)
except (IOError, OSError, ValueError) as e:
if log:
log.warn('Failed to collect sample: %s' % e)
示例12: run
def run(self, lock):
if self.options.dry_run:
print "****** Dry Run ******"
logger = None
if self.options.log or self.options.log_level:
from twitter.common.log import init
from twitter.common.log.options import LogOptions
LogOptions.set_stderr_log_level((self.options.log_level or "info").upper())
logdir = self.options.logdir or self.config.get("goals", "logdir", default=None)
if logdir:
safe_mkdir(logdir)
LogOptions.set_log_dir(logdir)
init("goals")
else:
init()
logger = log
if self.options.recursive_directory:
log.warn("--all-recursive is deprecated, use a target spec with the form [dir]:: instead")
for dir in self.options.recursive_directory:
self.add_target_recursive(dir)
if self.options.target_directory:
log.warn("--all is deprecated, use a target spec with the form [dir]: instead")
for dir in self.options.target_directory:
self.add_target_directory(dir)
context = Context(
self.config,
self.options,
self.targets,
requested_goals=self.requested_goals,
lock=lock,
log=logger,
timer=self.timer if self.options.time else None,
)
unknown = []
for phase in self.phases:
if not phase.goals():
unknown.append(phase)
if unknown:
print ("Unknown goal(s): %s" % " ".join(phase.name for phase in unknown))
print ("")
return Phase.execute(context, "goals")
if logger:
logger.debug("Operating on targets: %s", self.targets)
ret = Phase.attempt(context, self.phases)
if self.options.time:
print ("Timing report")
print ("=============")
self.timer.print_timings()
return ret
示例13: _maybe_scrubbed_classpath
def _maybe_scrubbed_classpath(self):
if self._scrub_classpath:
classpath = os.getenv('CLASSPATH')
if classpath:
log.warn('Scrubbing CLASSPATH=%s' % classpath)
with environment_as(CLASSPATH=None):
yield
else:
yield
示例14: run
def run(self, lock):
with self.check_errors("Target contains a dependency cycle") as error:
for target in self.targets:
try:
InternalTarget.check_cycles(target)
except InternalTarget.CycleException as e:
error(target.id)
timer = None
if self.options.time:
class Timer(object):
def now(self):
return time.time()
def log(self, message):
print(message)
timer = Timer()
logger = None
if self.options.log or self.options.log_level:
from twitter.common.log import init
from twitter.common.log.options import LogOptions
LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper())
logdir = self.options.logdir or self.config.get('goals', 'logdir', default=None)
if logdir:
safe_mkdir(logdir)
LogOptions.set_log_dir(logdir)
init('goals')
else:
init()
logger = log
if self.options.recursive_directory:
log.warn('--all-recursive is deprecated, use a target spec with the form [dir]:: instead')
for dir in self.options.recursive_directory:
self.add_target_recursive(dir)
if self.options.target_directory:
log.warn('--all is deprecated, use a target spec with the form [dir]: instead')
for dir in self.options.target_directory:
self.add_target_directory(dir)
context = Context(self.config, self.options, self.targets, lock=lock, log=logger)
unknown = []
for phase in self.phases:
if not phase.goals():
unknown.append(phase)
if unknown:
print('Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown))
print('')
return Phase.execute(context, 'goals')
if logger:
logger.debug('Operating on targets: %s', self.targets)
return Phase.attempt(context, self.phases, timer=timer)
示例15: cpu_affinity
def cpu_affinity(self):
"""
Get CPU affinity of this process
:return: a list() of CPU cores this processes is pinned to
"""
try:
return self.process.cpu_affinity()
except AttributeError:
log.warn('cpu affinity is not available on your platform')