本文整理汇总了Python中util.Timer.step方法的典型用法代码示例。如果您正苦于以下问题:Python Timer.step方法的具体用法?Python Timer.step怎么用?Python Timer.step使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类util.Timer
的用法示例。
在下文中一共展示了Timer.step方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
# 需要导入模块: from util import Timer [as 别名]
# 或者: from util.Timer import step [as 别名]
#.........这里部分代码省略.........
check_status = CheckStatus(
check.name, instance_statuses, metric_count,
event_count, service_check_count, service_metadata=current_check_metadata,
library_versions=check.get_library_info(),
source_type_name=check.SOURCE_TYPE_NAME or check.name,
check_stats=check_stats
)
# Service check for Agent checks failures
service_check_tags = ["check:%s" % check.name]
if check_status.status == STATUS_OK:
status = AgentCheck.OK
elif check_status.status == STATUS_ERROR:
status = AgentCheck.CRITICAL
check.service_check('datadog.agent.check_status', status, tags=service_check_tags)
# Collect the service checks and save them in the payload
current_check_service_checks = check.get_service_checks()
if current_check_service_checks:
service_checks.extend(current_check_service_checks)
service_check_count = len(current_check_service_checks)
# Update the check status with the correct service_check_count
check_status.service_check_count = service_check_count
check_statuses.append(check_status)
check_run_time = time.time() - check_start_time
log.debug("Check %s ran in %.2f s" % (check.name, check_run_time))
# Intrument check run timings if enabled.
if self.check_timings:
metric = 'datadog.agent.check_run_time'
meta = {'tags': ["check:%s" % check.name]}
metrics.append((metric, time.time(), check_run_time, meta))
for check_name, info in self.init_failed_checks_d.iteritems():
if not self.continue_running:
return
check_status = CheckStatus(check_name, None, None, None, None,
init_failed_error=info['error'],
init_failed_traceback=info['traceback'])
check_statuses.append(check_status)
# Add a service check for the agent
service_checks.append(create_service_check('datadog.agent.up', AgentCheck.OK,
hostname=self.hostname))
# Store the metrics and events in the payload.
payload['metrics'] = metrics
payload['events'] = events
payload['service_checks'] = service_checks
# Populate metadata
self._populate_payload_metadata(payload, check_statuses, start_event)
collect_duration = timer.step()
if self._agent_metrics:
metric_context = {
'collection_time': collect_duration,
'emit_time': self.emit_duration,
}
if not Platform.is_windows():
metric_context['cpu_time'] = time.clock() - cpu_clock
self._agent_metrics.set_metric_context(payload, metric_context)
self._agent_metrics.run()
agent_stats = self._agent_metrics.get_metrics()
payload['metrics'].extend(agent_stats)
if self.agentConfig.get('developer_mode'):
log.debug("\n Agent developer mode stats: \n {0}".format(
Collector._stats_for_display(agent_stats))
)
# Flush metadata for the Agent Metrics check. Otherwise they'll just accumulate and leak.
self._agent_metrics.get_service_metadata()
# Let's send our payload
emitter_statuses = payload.emit(log, self.agentConfig, self.emitters,
self.continue_running)
self.emit_duration = timer.step()
# Persist the status of the collection run.
try:
CollectorStatus(check_statuses, emitter_statuses,
self.hostname_metadata_cache).persist()
except Exception:
log.exception("Error persisting collector status")
if self.run_count <= FLUSH_LOGGING_INITIAL or self.run_count % FLUSH_LOGGING_PERIOD == 0:
log.info("Finished run #%s. Collection time: %ss. Emit time: %ss" %
(self.run_count, round(collect_duration, 2), round(self.emit_duration, 2)))
if self.run_count == FLUSH_LOGGING_INITIAL:
log.info("First flushes done, next flushes will be logged every %s flushes." %
FLUSH_LOGGING_PERIOD)
else:
log.debug("Finished run #%s. Collection time: %ss. Emit time: %ss" %
(self.run_count, round(collect_duration, 2), round(self.emit_duration, 2)))
return payload
示例2: run
# 需要导入模块: from util import Timer [as 别名]
# 或者: from util.Timer import step [as 别名]
#.........这里部分代码省略.........
has_resource = True
res_value = { 'snaps': snaps,
'format_version': resources_check.get_format_version() }
res_format = resources_check.describe_format_if_needed()
if res_format is not None:
res_value['format_description'] = res_format
payload['resources'][resources_check.RESOURCE_KEY] = res_value
if has_resource:
payload['resources']['meta'] = {
'api_key': self.agentConfig['api_key'],
'host': payload['internalHostname'],
}
# newer-style checks (not checks.d style)
for metrics_check in self._metrics_checks:
res = metrics_check.check(self.agentConfig)
if res:
metrics.extend(res)
# checks.d checks
check_statuses = []
for check in self.initialized_checks_d:
if not self.continue_running:
return
log.info("Running check %s" % check.name)
instance_statuses = []
metric_count = 0
event_count = 0
try:
# Run the check.
instance_statuses = check.run()
# Collect the metrics and events.
current_check_metrics = check.get_metrics()
current_check_events = check.get_events()
current_check_service_checks = check.get_service_checks()
# Save them for the payload.
metrics.extend(current_check_metrics)
if current_check_events:
if check.name not in events:
events[check.name] = current_check_events
else:
events[check.name] += current_check_events
if current_check_service_checks:
service_checks.extend(current_check_service_checks)
# Save the status of the check.
metric_count = len(current_check_metrics)
event_count = len(current_check_events)
service_check_count = len(current_check_service_checks)
except Exception:
log.exception("Error running check %s" % check.name)
check_status = CheckStatus(check.name, instance_statuses, metric_count, event_count, service_check_count,
library_versions=check.get_library_info())
check_statuses.append(check_status)
for check_name, info in self.init_failed_checks_d.iteritems():
if not self.continue_running:
return
check_status = CheckStatus(check_name, None, None, None, None,
init_failed_error=info['error'],
init_failed_traceback=info['traceback'])
check_statuses.append(check_status)
# Store the metrics and events in the payload.
payload['metrics'] = metrics
payload['events'] = events
payload['service_checks'] = service_checks
collect_duration = timer.step()
if self.os != 'windows':
payload['metrics'].extend(self._agent_metrics.check(payload, self.agentConfig,
collect_duration, self.emit_duration, time.clock() - cpu_clock))
else:
payload['metrics'].extend(self._agent_metrics.check(payload, self.agentConfig,
collect_duration, self.emit_duration))
emitter_statuses = self._emit(payload)
self.emit_duration = timer.step()
# Persist the status of the collection run.
try:
CollectorStatus(check_statuses, emitter_statuses, self.metadata_cache).persist()
except Exception:
log.exception("Error persisting collector status")
if self.run_count <= FLUSH_LOGGING_INITIAL or self.run_count % FLUSH_LOGGING_PERIOD == 0:
log.info("Finished run #%s. Collection time: %ss. Emit time: %ss" %
(self.run_count, round(collect_duration, 2), round(self.emit_duration, 2)))
if self.run_count == FLUSH_LOGGING_INITIAL:
log.info("First flushes done, next flushes will be logged every %s flushes." % FLUSH_LOGGING_PERIOD)
else:
log.debug("Finished run #%s. Collection time: %ss. Emit time: %ss" %
(self.run_count, round(collect_duration, 2), round(self.emit_duration, 2)))
示例3: run
# 需要导入模块: from util import Timer [as 别名]
# 或者: from util.Timer import step [as 别名]
#.........这里部分代码省略.........
instance_statuses,
metric_count,
event_count,
service_check_count,
service_metadata=current_check_metadata,
library_versions=check.get_library_info(),
source_type_name=check.SOURCE_TYPE_NAME or check.name,
check_stats=check_stats,
)
# Service check for Agent checks failures
service_check_tags = ["check:%s" % check.name]
if check_status.status == STATUS_OK:
status = AgentCheck.OK
elif check_status.status == STATUS_ERROR:
status = AgentCheck.CRITICAL
check.service_check("datadog.agent.check_status", status, tags=service_check_tags)
# Collect the service checks and save them in the payload
current_check_service_checks = check.get_service_checks()
if current_check_service_checks:
service_checks.extend(current_check_service_checks)
service_check_count = len(current_check_service_checks)
# Update the check status with the correct service_check_count
check_status.service_check_count = service_check_count
check_statuses.append(check_status)
check_run_time = time.time() - check_start_time
log.debug("Check %s ran in %.2f s" % (check.name, check_run_time))
# Intrument check run timings if enabled.
if self.check_timings:
metric = "datadog.agent.check_run_time"
meta = {"tags": ["check:%s" % check.name]}
metrics.append((metric, time.time(), check_run_time, meta))
for check_name, info in self.init_failed_checks_d.iteritems():
if not self.continue_running:
return
check_status = CheckStatus(
check_name,
None,
None,
None,
None,
init_failed_error=info["error"],
init_failed_traceback=info["traceback"],
)
check_statuses.append(check_status)
# Add a service check for the agent
service_checks.append(create_service_check("datadog.agent.up", AgentCheck.OK, hostname=self.hostname))
# Store the metrics and events in the payload.
payload["metrics"] = metrics
payload["events"] = events
payload["service_checks"] = service_checks
# Populate metadata
self._populate_payload_metadata(payload, check_statuses, start_event)
collect_duration = timer.step()
if self._agent_metrics:
metric_context = {"collection_time": collect_duration, "emit_time": self.emit_duration}
if not Platform.is_windows():
metric_context["cpu_time"] = time.clock() - cpu_clock
self._agent_metrics.set_metric_context(payload, metric_context)
self._agent_metrics.run()
agent_stats = self._agent_metrics.get_metrics()
payload["metrics"].extend(agent_stats)
if self.agentConfig.get("developer_mode"):
log.debug("\n Agent developer mode stats: \n {0}".format(Collector._stats_for_display(agent_stats)))
# Let's send our payload
emitter_statuses = payload.emit(log, self.agentConfig, self.emitters, self.continue_running)
self.emit_duration = timer.step()
# Persist the status of the collection run.
try:
CollectorStatus(check_statuses, emitter_statuses, self.hostname_metadata_cache).persist()
except Exception:
log.exception("Error persisting collector status")
if self.run_count <= FLUSH_LOGGING_INITIAL or self.run_count % FLUSH_LOGGING_PERIOD == 0:
log.info(
"Finished run #%s. Collection time: %ss. Emit time: %ss"
% (self.run_count, round(collect_duration, 2), round(self.emit_duration, 2))
)
if self.run_count == FLUSH_LOGGING_INITIAL:
log.info("First flushes done, next flushes will be logged every %s flushes." % FLUSH_LOGGING_PERIOD)
else:
log.debug(
"Finished run #%s. Collection time: %ss. Emit time: %ss"
% (self.run_count, round(collect_duration, 2), round(self.emit_duration, 2))
)
return payload
示例4: run
# 需要导入模块: from util import Timer [as 别名]
# 或者: from util.Timer import step [as 别名]
#.........这里部分代码省略.........
# Intrument check run timings if enabled.
if self.check_timings:
metric = 'datadog.agent.check_run_time'
meta = {'tags': ["check:%s" % check.name]}
metrics.append((metric, time.time(), check_run_time, meta))
for check_name, info in self.init_failed_checks_d.iteritems():
if not self.continue_running:
return
check_status = CheckStatus(check_name, None, None, None, None,
init_failed_error=info['error'],
init_failed_traceback=info['traceback'])
check_statuses.append(check_status)
# Add a service check for the agent
service_checks.append(create_service_check('datadog.agent.up', AgentCheck.OK,
hostname=self.hostname))
# Store the metrics and events in the payload.
payload['metrics'] = metrics
payload['events'] = events
payload['service_checks'] = service_checks
if self._should_send_additional_data('agent_checks'):
# Add agent checks statuses and error/warning messages
agent_checks = []
for check in check_statuses:
if check.instance_statuses is not None:
for instance_status in check.instance_statuses:
agent_checks.append(
(
check.name, check.source_type_name,
instance_status.instance_id,
instance_status.status,
# put error message or list of warning messages in the same field
# it will be handled by the UI
instance_status.error or instance_status.warnings or ""
)
)
else:
agent_checks.append(
(
check.name, check.source_type_name,
"initialization",
check.status, repr(check.init_failed_error)
)
)
payload['agent_checks'] = agent_checks
payload['meta'] = self.metadata_cache # add hostname metadata
collect_duration = timer.step()
if self.os != 'windows':
if self._agent_metrics is not None:
self._agent_metrics.set_metric_context(payload,
{
'collection_time': collect_duration,
'emit_time': self.emit_duration,
'cpu_time': time.clock() - cpu_clock
})
self._agent_metrics.run()
agent_stats = self._agent_metrics.get_metrics()
payload['metrics'].extend(agent_stats)
# Dump the metrics to log when in developer mode
if self.agentConfig.get('developer_mode', False):
log.info("\n AGENT STATS: \n {0}".format(Collector._stats_for_display(agent_stats)))
else:
if self._agent_metrics is not None:
self._agent_metrics.set_metric_context(payload,
{
'collection_time': collect_duration,
'emit_time': self.emit_duration,
})
self._agent_metrics.run()
agent_stats = self._agent_metrics.get_metrics()
payload['metrics'].extend(agent_stats)
# Dump the metrics to log when in developer mode
if self.agentConfig.get('developer_mode', False):
log.info("\n AGENT STATS: \n {0}".format(Collector._stats_for_display(agent_stats)))
# Let's send our payload
emitter_statuses = self._emit(payload)
self.emit_duration = timer.step()
# Persist the status of the collection run.
try:
CollectorStatus(check_statuses, emitter_statuses, self.metadata_cache).persist()
except Exception:
log.exception("Error persisting collector status")
if self.run_count <= FLUSH_LOGGING_INITIAL or self.run_count % FLUSH_LOGGING_PERIOD == 0:
log.info("Finished run #%s. Collection time: %ss. Emit time: %ss" %
(self.run_count, round(collect_duration, 2), round(self.emit_duration, 2)))
if self.run_count == FLUSH_LOGGING_INITIAL:
log.info("First flushes done, next flushes will be logged every %s flushes." % FLUSH_LOGGING_PERIOD)
else:
log.debug("Finished run #%s. Collection time: %ss. Emit time: %ss" %
(self.run_count, round(collect_duration, 2), round(self.emit_duration, 2)))
return payload
示例5: run
# 需要导入模块: from util import Timer [as 别名]
# 或者: from util.Timer import step [as 别名]
#.........这里部分代码省略.........
if cassandraData is not False and cassandraData is not None:
payload['cassandra'] = cassandraData
# MySQL Status
if mysqlStatus:
payload.update(mysqlStatus)
# RabbitMQ
if rabbitmq:
payload['rabbitMQ'] = rabbitmq
# MongoDB
if mongodb:
if mongodb.has_key('events'):
events['Mongo'] = mongodb['events']['Mongo']
del mongodb['events']
payload['mongoDB'] = mongodb
# CouchDB
if couchdb:
payload['couchDB'] = couchdb
# dogstream
if dogstreamData:
dogstreamEvents = dogstreamData.get('dogstreamEvents', None)
if dogstreamEvents:
if 'dogstream' in payload['events']:
events['dogstream'].extend(dogstreamEvents)
else:
events['dogstream'] = dogstreamEvents
del dogstreamData['dogstreamEvents']
payload.update(dogstreamData)
# metrics about the forwarder
if ddforwarderData:
payload['datadog'] = ddforwarderData
# Process the event checks.
for event_check in self._event_checks:
event_data = event_check.check(checks_logger, self.agentConfig)
if event_data:
events[event_check.key] = event_data
# Resources checks
if self.os != 'windows':
has_resource = False
for resources_check in self._resources_checks:
resources_check.check()
snaps = resources_check.pop_snapshots()
if snaps:
has_resource = True
res_value = { 'snaps': snaps,
'format_version': resources_check.get_format_version() }
res_format = resources_check.describe_format_if_needed()
if res_format is not None:
res_value['format_description'] = res_format
payload['resources'][resources_check.RESOURCE_KEY] = res_value
if has_resource:
payload['resources']['meta'] = {
'api_key': self.agentConfig['api_key'],
'host': payload['internalHostname'],
}
# newer-style checks (not checks.d style)
for metrics_check in self._metrics_checks:
res = metrics_check.check(self.agentConfig)
if res:
metrics.extend(res)
# checks.d checks
checksd = checksd or []
for check in checksd:
check_cls = check['class']
for instance in check['instances']:
try:
# Run the check for each configuration
check_cls.check(instance)
metrics.extend(check_cls.get_metrics())
if check_cls.has_events():
if check['name'] not in events:
events[check['name']] = []
for ev in check_cls.get_events():
events[check['name']].append(ev)
except Exception:
logger.exception("Check %s failed" % check_cls.name)
# Store the metrics and events in the payload.
payload['metrics'] = metrics
payload['events'] = events
collect_duration = timer.step()
# Pass the payload along to the emitters.
for emitter in self.emitters:
emitter(payload, checks_logger, self.agentConfig)
emit_duration = timer.step()
logger.info("Finished run #%s. Collection time: %ss. Emit time: %ss" %
(self.run_count, round(collect_duration, 2), round(emit_duration, 2)))