本文整理汇总了Python中twitter.common.app.error函数的典型用法代码示例。如果您正苦于以下问题:Python error函数的具体用法?Python error怎么用?Python error使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了error函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: add
def add(file_or_dir):
if os.path.isfile(file_or_dir):
add_file(file_or_dir)
elif os.path.isdir(file_or_dir):
add_dir(file_or_dir)
else:
app.error("Unknown or non-existent file: %s" % file_or_dir)
示例2: from_assigned_task
def from_assigned_task(self, assigned_task, _):
mesos_task = mesos_task_instance_from_assigned_task(assigned_task)
if not mesos_task.has_announce():
return None
portmap = resolve_ports(mesos_task, assigned_task.assignedPorts)
# assigned_task.slaveHost is the --hostname argument passed into the mesos slave.
# Using this allows overriding the hostname published into ZK when announcing.
# If no argument was passed to the mesos-slave, the slave falls back to gethostname().
endpoint, additional = make_endpoints(
assigned_task.slaveHost,
portmap,
mesos_task.announce().primary_port().get())
client = self.make_zk_client()
if mesos_task.announce().has_zk_path():
if self.__allow_custom_serverset_path:
path = mesos_task.announce().zk_path().get()
else:
app.error('Executor must be started with --announcer-allow-custom-serverset-path in order '
'to use zk_path in the Announcer config')
else:
path = self.make_zk_path(assigned_task)
initial_interval = mesos_task.health_check_config().initial_interval_secs().get()
interval = mesos_task.health_check_config().interval_secs().get()
consecutive_failures = mesos_task.health_check_config().max_consecutive_failures().get()
timeout_secs = initial_interval + (consecutive_failures * interval)
return AnnouncerChecker(
client, path, timeout_secs, endpoint, additional=additional, shard=assigned_task.instanceId,
name=self.name)
示例3: tail
def tail(args, options):
"""Tail the logs of a task process.
Usage: thermos tail task_name [process_name]
"""
if len(args) == 0:
app.error("Expected a task to tail, got nothing!")
if len(args) not in (1, 2):
app.error("Expected at most two arguments (task and optional process), got %d" % len(args))
task_id = args[0]
detector = TaskDetector(root=options.root)
checkpoint = CheckpointDispatcher.from_file(detector.get_checkpoint(task_id))
log_dir = checkpoint.header.log_dir
process_runs = [(process, run) for (process, run) in detector.get_process_runs(task_id, log_dir)]
if len(args) == 2:
process_runs = [(process, run) for (process, run) in process_runs if process == args[1]]
if len(process_runs) == 0:
print("ERROR: No processes found.", file=sys.stderr)
sys.exit(1)
processes = set([process for process, _ in process_runs])
if len(processes) != 1:
print("ERROR: More than one process matches query.", file=sys.stderr)
sys.exit(1)
process = processes.pop()
run = max([run for _, run in process_runs])
logdir = TaskPath(root=options.root, task_id=args[0], process=process, run=run, log_dir=log_dir).getpath(
"process_logdir"
)
logfile = os.path.join(logdir, "stderr" if options.use_stderr else "stdout")
monitor = TaskMonitor(TaskPath(root=options.root), args[0])
def log_is_active():
active_processes = monitor.get_active_processes()
for process_status, process_run in active_processes:
if process_status.process == process and process_run == run:
return True
return False
if not log_is_active():
print("Tail of terminal log %s" % logfile)
for line in tail_closed(logfile):
print(line.rstrip())
return
now = time.time()
next_check = now + 5.0
print("Tail of active log %s" % logfile)
for line in tail_f(logfile, include_last=True, forever=False):
print(line.rstrip())
if time.time() > next_check:
if not log_is_active():
break
else:
next_check = time.time() + 5.0
示例4: create
def create(args, options):
validate_common_options(options)
if not options.num_nodes:
app.error("--num_nodes is required")
if not options.cluster_user:
app.error("--cluster_user is required")
url = 'http://%s:%s/clusters/%s' % (options.api_host, options.api_port, options.cluster_name)
values = dict(
num_nodes=int(options.num_nodes),
cluster_user=options.cluster_user,
size=options.size if options.size else '',
backup_id=options.backup_id if options.backup_id else '')
req = urllib2.Request(url, urllib.urlencode(values))
try:
response = urllib2.urlopen(req).read()
except urllib2.HTTPError as e:
log.error("POST request failed: %s, %s, %s" % (
e.code, BaseHTTPServer.BaseHTTPRequestHandler.responses[e.code], e.read()))
app.quit(1)
try:
result = json.loads(response)
if not isinstance(result, dict):
raise ValueError()
except ValueError:
log.error("Invalid response: %s" % response)
app.quit(1)
log.info("Cluster created. Cluster info: %s" % str(result))
with open(options.password_file, 'w') as f:
f.write(result["cluster_password"])
log.info("Waiting for the master for this cluster to be elected...")
master_endpoint = wait_for_master(result['cluster_url']).service_endpoint
connection_str = "mysql://%s:%[email protected]%s:%d/" % (
options.cluster_user,
result["cluster_password"],
master_endpoint.host,
master_endpoint.port)
log.info("Connecting to the MySQL cluster master: %s" % connection_str)
engine = create_engine(connection_str)
for i in range(5): # Loop for 5 times/seconds to wait for the master to be promoted.
try:
# TODO(jyx): Test writing to the master and reading from the slave.
result = engine.execute("SELECT 1;").scalar()
assert 1 == int(result), "Expecting result to be 1 but got %s" % result
break
except OperationalError:
if i == 4:
raise
log.debug("MySQL master not ready yet. Sleep for 1 second...")
time.sleep(1)
log.info("Cluster successfully started")
示例5: main
def main(args, options):
thermos_runner_provider = DefaultThermosTaskRunnerProvider(
dump_runner_pex(),
artifact_dir=os.path.realpath('.'),
)
# status providers:
status_providers = [HealthCheckerProvider()]
if options.announcer_enable:
if options.announcer_ensemble is None:
app.error('Must specify --announcer-ensemble if the announcer is enabled.')
status_providers.append(DefaultAnnouncerCheckerProvider(
options.announcer_ensemble, options.announcer_serverset_path))
# Create executor stub
thermos_executor = AuroraExecutor(
runner_provider=thermos_runner_provider,
status_providers=status_providers,
)
# Create driver stub
driver = MesosExecutorDriver(thermos_executor)
# This is an ephemeral executor -- shutdown if we receive no tasks within a certain
# time period
ExecutorTimeout(thermos_executor.launched, driver).start()
# Start executor
driver.run()
log.info('MesosExecutorDriver.run() has finished.')
示例6: delete
def delete(args, options):
validate_common_options(options)
with open(options.password_file, 'r') as f:
password = f.read().strip()
if not password:
app.error("Empty password file")
url = 'http://%s:%s/clusters/%s' % (options.api_host, options.api_port, options.cluster_name)
values = dict(password=password)
req = urllib2.Request(url, urllib.urlencode(values))
req.get_method = lambda: 'DELETE'
try:
response = urllib2.urlopen(req).read()
except urllib2.HTTPError as e:
log.error("DELETE request failed: %s, %s, %s" % (
e.code, BaseHTTPServer.BaseHTTPRequestHandler.responses[e.code], e.read()))
app.quit(1)
try:
result = json.loads(response)
if not isinstance(result, dict):
raise ValueError()
except ValueError:
log.error("Invalid response: %s" % response)
app.quit(1)
log.info("Cluster deletion result: %s" % result)
log.info("Waiting for the cluster to terminate...")
wait_for_termination(result['cluster_url'])
log.info("Cluster terminated/deleted")
示例7: get_task_from_options
def get_task_from_options(args, opts, **kw):
loader = ThermosConfigLoader.load_json if opts.json else ThermosConfigLoader.load
if len(args) != 1:
app.error('Should specify precisely one config, instead got: %s' % args)
tasks = loader(args[0], bindings=opts.bindings, **kw)
task_list = list(tasks.tasks())
if len(task_list) == 0:
app.error("No tasks specified!")
if opts.task is None and len(task_list) > 1:
app.error("Multiple tasks in config but no task name specified!")
task = None
if opts.task is not None:
for t in task_list:
if t.task().name().get() == opts.task:
task = t
break
if task is None:
app.error("Could not find task %s!" % opts.task)
else:
task = task_list[0]
if kw.get('strict', False):
if not task.task.check().ok():
app.error(task.task.check().message())
return task
示例8: generate_token_interactive
def generate_token_interactive():
password = getpass('Enter your Subsonic password: ')
salt = getpass('Enter a salt (an integer of at least six digits): ')
if len(salt) < 6 or not salt.isdigit():
app.error('Salt value is not an integer of at least six digits.')
token = md5(password + salt).hexdigest()
print 'Your API token is: {}'.format(token)
print 'This must be used with the same salt value entered during this session.'
示例9: initialize
def initialize(options):
cwd_path = os.path.abspath(CWD)
checkpoint_root = os.path.join(cwd_path, MesosPathDetector.DEFAULT_SANDBOX_PATH)
# status providers:
status_providers = [
HealthCheckerProvider(),
ResourceManagerProvider(checkpoint_root=checkpoint_root)
]
if options.announcer_enable:
if options.announcer_ensemble is None:
app.error('Must specify --announcer-ensemble if the announcer is enabled.')
status_providers.append(DefaultAnnouncerCheckerProvider(
options.announcer_ensemble,
options.announcer_serverset_path,
options.announcer_allow_custom_serverset_path
))
# Create executor stub
if options.execute_as_user or options.nosetuid:
# If nosetuid is set, execute_as_user is also None
thermos_runner_provider = UserOverrideThermosTaskRunnerProvider(
dump_runner_pex(),
checkpoint_root,
artifact_dir=cwd_path,
process_logger_destination=options.runner_logger_destination,
process_logger_mode=options.runner_logger_mode,
rotate_log_size_mb=options.runner_rotate_log_size_mb,
rotate_log_backups=options.runner_rotate_log_backups,
preserve_env=options.preserve_env
)
thermos_runner_provider.set_role(None)
thermos_executor = AuroraExecutor(
runner_provider=thermos_runner_provider,
status_providers=status_providers,
sandbox_provider=UserOverrideDirectorySandboxProvider(options.execute_as_user)
)
else:
thermos_runner_provider = DefaultThermosTaskRunnerProvider(
dump_runner_pex(),
checkpoint_root,
artifact_dir=cwd_path,
process_logger_destination=options.runner_logger_destination,
process_logger_mode=options.runner_logger_mode,
rotate_log_size_mb=options.runner_rotate_log_size_mb,
rotate_log_backups=options.runner_rotate_log_backups,
preserve_env=options.preserve_env
)
thermos_executor = AuroraExecutor(
runner_provider=thermos_runner_provider,
status_providers=status_providers
)
return thermos_executor
示例10: read
def read(args, options):
"""Replay a thermos checkpoint.
Usage: thermos read [options] checkpoint_filename
Options:
--simple Do not replay the full task state machine. Only print out the contents of
each checkpoint log message.
"""
if len(args) != 1:
app.error('Expected one checkpoint file, got %s' % len(args))
if not os.path.exists(args[0]):
app.error('Could not find %s' % args[0])
dispatcher = CheckpointDispatcher()
state = RunnerState(processes={})
with open(args[0], 'r') as fp:
try:
for record in ThriftRecordReader(fp, RunnerCkpt):
if not options.simple:
dispatcher.dispatch(state, record)
else:
print('CKPT: %s' % record)
except RecordIO.Error as err:
print("Failed to recover from %s: %s" % (fp.name, err))
return
if not options.simple:
if state is None or state.header is None:
print('Checkpoint stream CORRUPT or outdated format')
return
print('Recovered Task Header:')
print(' id: %s' % state.header.task_id)
print(' user: %s' % state.header.user)
print(' host: %s' % state.header.hostname)
print(' sandbox: %s' % state.header.sandbox)
if state.header.ports:
print(' ports: %s' % ' '.join(
'%s->%s' % (name, port) for (name, port) in state.header.ports.items()))
print('Recovered Task States:')
for task_status in state.statuses:
print(' %s [pid: %d] => %s' % (
time.asctime(time.localtime(task_status.timestamp_ms / 1000.0)),
task_status.runner_pid,
TaskState._VALUES_TO_NAMES[task_status.state]))
print('Recovered Processes:')
for process, process_history in state.processes.items():
print(' %s runs: %s' % (process, len(process_history)))
for k in reversed(range(len(process_history))):
run = process_history[k]
print(' %2d: pid=%d, rc=%s, finish:%s, state:%s' % (
k,
run.pid,
run.return_code if run.return_code is not None else '',
time.asctime(time.localtime(run.stop_time)) if run.stop_time else 'None',
ProcessState._VALUES_TO_NAMES.get(run.state, 'Unknown')))
示例11: pid_provider
def pid_provider():
options = app.get_options()
for path, _, pid in list_pids():
if pid == options.pid:
break
else:
app.error('Could not find pid %s' % options.pid)
def loader():
with open(path, 'rb') as fp:
return fp.read()
return loader
示例12: main
def main():
if MesosExecutorDriver is None:
app.error('Could not load MesosExecutorDriver!')
thermos_gc_executor, metric_writer, driver = initialize()
thermos_gc_executor.start()
metric_writer.start()
driver.run()
log.info('MesosExecutorDriver.run() has finished.')
示例13: main
def main(args, options):
if MesosExecutorDriver is None:
app.error('Could not load MesosExecutorDriver!')
# status providers:
status_providers = [
HealthCheckerProvider(),
ResourceManagerProvider(checkpoint_root=options.checkpoint_root)
]
if options.announcer_enable:
if options.announcer_ensemble is None:
app.error('Must specify --announcer-ensemble if the announcer is enabled.')
status_providers.append(DefaultAnnouncerCheckerProvider(
options.announcer_ensemble, options.announcer_serverset_path))
# Create executor stub
if options.execute_as_user or options.nosetuid:
# If nosetuid is set, execute_as_user is also None
thermos_runner_provider = UserOverrideThermosTaskRunnerProvider(
dump_runner_pex(),
artifact_dir=os.path.abspath(CWD)
)
thermos_runner_provider.set_role(None)
thermos_executor = AuroraExecutor(
runner_provider=thermos_runner_provider,
status_providers=status_providers,
sandbox_provider=UserOverrideDirectorySandboxProvider(options.execute_as_user)
)
else:
thermos_runner_provider = DefaultThermosTaskRunnerProvider(
dump_runner_pex(),
artifact_dir=os.path.abspath(CWD)
)
thermos_executor = AuroraExecutor(
runner_provider=thermos_runner_provider,
status_providers=status_providers
)
# Create driver stub
driver = MesosExecutorDriver(thermos_executor)
# This is an ephemeral executor -- shutdown if we receive no tasks within a certain
# time period
ExecutorTimeout(thermos_executor.launched, driver).start()
# Start executor
driver.run()
log.info('MesosExecutorDriver.run() has finished.')
示例14: main
def main(args):
if len(args) != 1:
app.error("Must supply a serverset path to monitor.")
def on_join(endpoint):
print("@ %s += %s" % (datetime.now(), endpoint))
def on_leave(endpoint):
print("@ %s -= %s" % (datetime.now(), endpoint))
ss = ServerSet(ZooKeeper(), args[0], on_join=on_join, on_leave=on_leave)
while True:
time.sleep(100)
示例15: to_acl
def to_acl(access):
cred = access.credential().get()
if access.scheme().get() == 'digest':
cred_parts = access.credential().get().split(':')
if len(cred_parts) != 2:
app.error('Digest credential should be of the form <user>:<password>')
cred = make_digest_acl_credential(cred_parts[0], cred_parts[1])
return make_acl(access.scheme().get(),
cred,
read=access.permissions().read().get(),
write=access.permissions().write().get(),
create=access.permissions().create().get(),
delete=access.permissions().delete().get(),
admin=access.permissions().admin().get())