本文整理汇总了Python中twitter.common.log.error函数的典型用法代码示例。如果您正苦于以下问题:Python error函数的具体用法?Python error怎么用?Python error使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了error函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
def run(self):
try:
log.info("Setting filter: %s", self.config.filter)
if self.config.iface == "any": # pragma: no cover
sniff(
filter=self.config.filter,
store=0,
prn=self.handle_packet,
stop_filter=self.wants_stop
)
else:
sniff(
filter=self.config.filter,
store=0,
prn=self.handle_packet,
iface=self.config.iface,
stop_filter=self.wants_stop
)
except socket.error as ex:
if self._error_to_stderr:
sys.stderr.write("Error: %s, device: %s\n" % (ex, self.config.iface))
else:
log.error("Error: %s, device: %s", ex, self.config.iface)
finally:
log.info("The sniff loop exited")
os.kill(os.getpid(), signal.SIGINT)
示例2: connect
def connect(self):
try:
redis_conn = redis.StrictRedis(host=self.host, port=self.port, db=self.db)
self.redis_pipeline = redis_conn.pipeline()
return redis_conn
except Exception as _e:
log.error("RedisSink: ConnectionError\n %s %s" % (self.config, str(_e)))
示例3: _update_endpoints
def _update_endpoints(self, _1, event, state, _2):
"""Update endpoints from ZK.
This function will block until the ZK servers respond or retry limit is hit.
:raises ReconnectFailed: If reconnection fails.
"""
if not (state == zookeeper.CONNECTED_STATE and event == zookeeper.CHILD_EVENT) and not (
state == zookeeper.EXPIRED_SESSION_STATE
):
return
try:
endpoints = []
endpoint_names = self._zk.get_children(self._endpoint, self._update_endpoints)
endpoint_names.sort()
for endpoint in endpoint_names:
data = self._zk.get(posixpath.join(self._endpoint, endpoint))
service_endpoint = serverset_types.ServiceInstance()
endpoints.append(codec.deserialize(service_endpoint, data[0]))
old = set(map(_format_endpoint, self._endpoints))
new = set(map(_format_endpoint, endpoints))
log.debug("ServerSet endpoints at %r changed to: %s" % (self._endpoint, ", ".join(new)))
log.debug(" Added: %s" % ", ".join(new - old))
log.debug(" Removed: %s" % ", ".join(old - new))
with self._lock:
if self._watcher:
self._watcher(self._endpoint, self._endpoints, endpoints)
self._endpoints = endpoints
except ZooKeeper.Error as e:
log.error("Lost connection to ZooKeeper: %s, reestablishing." % e)
self._reconnect()
示例4: method_wrapper
def method_wrapper(*args):
with self._lock:
start = time.time()
while not self._terminating.is_set() and (
time.time() - start) < self.RPC_MAXIMUM_WAIT.as_(Time.SECONDS):
try:
method = getattr(self.client(), method_name)
if not callable(method):
return method
resp = method(*args)
if resp is not None and resp.responseCode == ResponseCode.ERROR_TRANSIENT:
raise self.TransientError(", ".join(
[m.message for m in resp.details] if resp.details else []))
return resp
except TRequestsTransport.AuthError as e:
log.error(self.scheduler_client().get_failed_auth_message())
raise self.AuthError(e)
except (TTransport.TTransportException, self.TimeoutError, self.TransientError) as e:
if not self._terminating.is_set():
log.warning('Connection error with scheduler: %s, reconnecting...' % e)
self.invalidate()
self._terminating.wait(self.RPC_RETRY_INTERVAL.as_(Time.SECONDS))
except Exception as e:
# Take any error that occurs during the RPC call, and transform it
# into something clients can handle.
if not self._terminating.is_set():
raise self.ThriftInternalError("Error during thrift call %s to %s: %s" %
(method_name, self.cluster.name, e))
if not self._terminating.is_set():
raise self.TimeoutError('Timed out attempting to issue %s to %s' % (
method_name, self.cluster.name))
示例5: handle_process
def handle_process(self, task_id, process_id):
all_processes = {}
current_run = self._observer.process(task_id, process_id)
if not current_run:
HttpServer.abort(404, 'Invalid task/process combination: %s/%s' % (task_id, process_id))
process = self._observer.process_from_name(task_id, process_id)
if process is None:
msg = 'Could not recover process: %s/%s' % (task_id, process_id)
log.error(msg)
HttpServer.abort(404, msg)
current_run_number = current_run['process_run']
all_processes[current_run_number] = current_run
for run in range(current_run_number):
all_processes[run] = self._observer.process(task_id, process_id, run)
template = {
'task_id': task_id,
'process': {
'name': process_id,
'status': all_processes[current_run_number]["state"],
'cmdline': process.cmdline().get()
},
}
template['process'].update(**all_processes[current_run_number].get('used', {}))
template['runs'] = all_processes
log.debug('Rendering template is: %s' % template)
return template
示例6: get
def get(cls, task_id, checkpoint_root):
"""
Get a TaskRunner bound to the task_id in checkpoint_root.
"""
path = TaskPath(root=checkpoint_root, task_id=task_id, state="active")
task_json = path.getpath("task_path")
task_checkpoint = path.getpath("runner_checkpoint")
if not os.path.exists(task_json):
return None
task = ThermosConfigLoader.load_json(task_json)
if task is None:
return None
if len(task.tasks()) == 0:
return None
try:
checkpoint = CheckpointDispatcher.from_file(task_checkpoint)
if checkpoint is None or checkpoint.header is None:
return None
return cls(
task.tasks()[0].task(),
checkpoint_root,
checkpoint.header.sandbox,
log_dir=checkpoint.header.log_dir,
task_id=task_id,
portmap=checkpoint.header.ports,
hostname=checkpoint.header.hostname,
)
except Exception as e:
log.error("Failed to reconstitute checkpoint in TaskRunner.get: %s" % e, exc_info=True)
return None
示例7: __iter__
def __iter__(self):
"""
May raise:
RecordIO.PrematureEndOfStream
"""
fd = os.dup(self._fp.fileno())
try:
cur_fp = os.fdopen(fd, self._fp.mode)
cur_fp.seek(0)
except OSError as e:
log.error('Failed to duplicate fd on %s, error = %s' % (self._fp.name, e))
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
log.error('Failed to close duped fd on %s, error = %s' % (self._fp.name, e))
return
try:
while True:
blob = RecordIO.Reader.do_read(cur_fp, self._codec)
if blob:
yield blob
else:
break
finally:
cur_fp.close()
示例8: _apply_states
def _apply_states(self):
"""
os.stat() the corresponding checkpoint stream of this task and determine if there are new ckpt
records. Attempt to read those records and update the high watermark for that stream.
Returns True if new states were applied, False otherwise.
"""
ckpt_offset = None
try:
ckpt_offset = os.stat(self._runner_ckpt).st_size
updated = False
if self._ckpt_head < ckpt_offset:
with open(self._runner_ckpt, "r") as fp:
fp.seek(self._ckpt_head)
rr = ThriftRecordReader(fp, RunnerCkpt)
while True:
runner_update = rr.try_read()
if not runner_update:
break
try:
self._dispatcher.dispatch(self._runnerstate, runner_update)
except CheckpointDispatcher.InvalidSequenceNumber as e:
log.error("Checkpoint stream is corrupt: %s" % e)
break
new_ckpt_head = fp.tell()
updated = self._ckpt_head != new_ckpt_head
self._ckpt_head = new_ckpt_head
return updated
except OSError as e:
if e.errno == errno.ENOENT:
# The log doesn't yet exist, will retry later.
log.warning("Could not read from checkpoint %s" % self._runner_ckpt)
return False
else:
raise
示例9: _run
def _run(self, driver, assigned_task, mounted_volume_paths):
"""
Commence running a Task.
- Initialize the sandbox
- Start the ThermosTaskRunner (fork the Thermos TaskRunner)
- Set up necessary HealthCheckers
- Set up StatusManager, and attach HealthCheckers
"""
self.send_update(driver, self._task_id, mesos_pb2.TASK_STARTING)
if not self._initialize_sandbox(driver, assigned_task, mounted_volume_paths):
return
# start the process on a separate thread and give the message processing thread back
# to the driver
try:
self._runner = self._runner_provider.from_assigned_task(assigned_task, self._sandbox)
except TaskError as e:
self.runner_aborted.set()
self._die(driver, mesos_pb2.TASK_FAILED, str(e))
return
if not isinstance(self._runner, TaskRunner):
self._die(driver, mesos_pb2.TASK_FAILED, 'Unrecognized task!')
return
if not self._start_runner(driver, assigned_task):
return
try:
self._start_status_manager(driver, assigned_task)
except Exception:
log.error(traceback.format_exc())
self._die(driver, mesos_pb2.TASK_FAILED, "Internal error")
示例10: launchTask
def launchTask(self, driver, task):
"""
Invoked when a task has been launched on this executor (initiated via Scheduler::launchTasks).
Note that this task can be realized with a thread, a process, or some simple computation,
however, no other callbacks will be invoked on this executor until this callback has returned.
"""
self.launched.set()
self.log('launchTask got task: %s:%s' % (task.name, task.task_id.value))
# TODO(wickman) Update the tests to call registered(), then remove this line and issue
# an assert if self._driver is not populated.
self._driver = driver
if self._runner:
log.error('Already running a task! %s' % self._task_id)
self.send_update(driver, task.task_id.value, mesos_pb.TASK_LOST,
"Task already running on this executor: %s" % self._task_id)
return
self._slave_id = task.slave_id.value
self._task_id = task.task_id.value
try:
assigned_task = assigned_task_from_mesos_task(task)
mesos_task = mesos_task_instance_from_assigned_task(assigned_task)
except Exception as e:
log.fatal('Could not deserialize AssignedTask')
log.fatal(traceback.format_exc())
self.send_update(
driver, self._task_id, mesos_pb.TASK_FAILED, "Could not deserialize task: %s" % e)
defer(driver.stop, delay=self.STOP_WAIT)
return
defer(lambda: self._run(driver, assigned_task, mesos_task))
示例11: statusUpdate
def statusUpdate(self, driver, status):
with self._lock:
# Forward the status update to the corresponding launcher.
task_id = status.task_id.value
launcher = self._get_launcher_by_task_id(task_id)
if not launcher:
log.info("Cluster for task %s doesn't exist. It could have been removed" % task_id)
return
try:
launcher.status_update(status)
except MySQLClusterLauncher.Error as e:
log.error("Status update failed due to launcher error: %s" % e.message)
self._stop()
# Update metrics.
# TODO(xujyan): This doesn't rule out duplicates, etc. We can consider updating these metrics
# in the launcher.
if status.state == mesos_pb2.TASK_FINISHED:
self._metrics.tasks_finished.increment()
elif status.state == mesos_pb2.TASK_FAILED:
self._metrics.tasks_failed.increment()
elif status.state == mesos_pb2.TASK_KILLED:
self._metrics.tasks_killed.increment()
elif status.state == mesos_pb2.TASK_LOST:
self._metrics.tasks_lost.increment()
if launcher.terminated:
log.info("Deleting the launcher for cluster %s because the cluster has terminated" %
launcher.cluster_name)
self._delete_launcher(launcher)
示例12: launchTask
def launchTask(self, driver, task):
"""
Invoked when a task has been launched on this executor (initiated via Scheduler::launchTasks).
Note that this task can be realized with a thread, a process, or some simple computation,
however, no other callbacks will be invoked on this executor until this callback has returned.
"""
self.launched.set()
self.log('TaskInfo: %s' % task)
self.log('launchTask got task: %s:%s' % (task.name, task.task_id.value))
# TODO(wickman) Update the tests to call registered(), then remove this line and issue
# an assert if self._driver is not populated.
self._driver = driver
if self._runner:
log.error('Already running a task! %s' % self._task_id)
self.send_update(driver, task.task_id.value, mesos_pb2.TASK_LOST,
"Task already running on this executor: %s" % self._task_id)
return
self._slave_id = task.slave_id.value
self._task_id = task.task_id.value
assigned_task = self.validate_task(task)
self.log("Assigned task: %s" % assigned_task)
if not assigned_task:
self.send_update(driver, self._task_id, mesos_pb2.TASK_FAILED,
'Could not deserialize task.')
defer(driver.stop, delay=self.STOP_WAIT)
return
defer(lambda: self._run(driver, assigned_task, self.extract_mount_paths_from_task(task)))
示例13: _check_sla
def _check_sla(self, hostnames, grouping_function, percentage, duration):
"""Check if the provided list of hosts passes the job uptime SLA check.
This is an all-or-nothing check, meaning that all provided hosts must pass their job
SLA check for the maintenance to proceed.
:param hostnames: list of host names to check SLA for
:type hostnames: list of strings
:param grouping_function: grouping function to apply to the given hosts
:type grouping_function: function
:param percentage: SLA uptime percentage override
:type percentage: float
:param duration: SLA uptime duration override
:type duration: twitter.common.quantity.Amount
:rtype: set of unsafe hosts
"""
vector = self._client.sla_get_safe_domain_vector(self.SLA_MIN_JOB_INSTANCE_COUNT, hostnames)
host_groups = vector.probe_hosts(percentage, duration.as_(Time.SECONDS), grouping_function)
unsafe_hostnames = set()
# Given that maintenance is performed 1 group at a time, any result longer than 1 group
# should be considered a batch failure.
if host_groups:
if len(host_groups) > 1:
log.error("Illegal multiple groups detected in SLA results. Skipping hosts: %s" % hostnames)
return set(hostnames)
results, unsafe_hostnames = format_sla_results(host_groups, unsafe_only=True)
if results:
print_results(results)
return unsafe_hostnames
return unsafe_hostnames
示例14: delete
def delete(args, options):
validate_common_options(options)
with open(options.password_file, 'r') as f:
password = f.read().strip()
if not password:
app.error("Empty password file")
url = 'http://%s:%s/clusters/%s' % (options.api_host, options.api_port, options.cluster_name)
values = dict(password=password)
req = urllib2.Request(url, urllib.urlencode(values))
req.get_method = lambda: 'DELETE'
try:
response = urllib2.urlopen(req).read()
except urllib2.HTTPError as e:
log.error("DELETE request failed: %s, %s, %s" % (
e.code, BaseHTTPServer.BaseHTTPRequestHandler.responses[e.code], e.read()))
app.quit(1)
try:
result = json.loads(response)
if not isinstance(result, dict):
raise ValueError()
except ValueError:
log.error("Invalid response: %s" % response)
app.quit(1)
log.info("Cluster deletion result: %s" % result)
log.info("Waiting for the cluster to terminate...")
wait_for_termination(result['cluster_url'])
log.info("Cluster terminated/deleted")
示例15: setup_child_subreaping
def setup_child_subreaping():
"""
This uses the prctl(2) syscall to set the `PR_SET_CHILD_SUBREAPER` flag. This
means if any children processes need to be reparented, they will be reparented
to this process.
More documentation here: http://man7.org/linux/man-pages/man2/prctl.2.html
and here: https://lwn.net/Articles/474787/
Callers should reap terminal children to prevent zombies.
"""
log.debug("Calling prctl(2) with PR_SET_CHILD_SUBREAPER")
# This constant is taken from prctl.h
PR_SET_CHILD_SUBREAPER = 36
try:
library_name = ctypes.util.find_library('c')
if library_name is None:
log.warning("libc is not found. Unable to call prctl!")
log.warning("Children subreaping is disabled!")
return
libc = ctypes.CDLL(library_name, use_errno=True)
# If we are on a system where prctl doesn't exist, this will throw an
# attribute error.
ret = libc.prctl(PR_SET_CHILD_SUBREAPER, 1, 0, 0, 0)
if ret != 0:
errno = ctypes.get_errno()
raise OSError(errno, os.strerror(errno))
except Exception as e:
log.error("Unable to call prctl %s" % e)
log.error("Children subreaping is disabled!")