本文整理汇总了Python中util.Watchdog类的典型用法代码示例。如果您正苦于以下问题:Python Watchdog类的具体用法?Python Watchdog怎么用?Python Watchdog使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Watchdog类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_watchdog
def _get_watchdog(self, check_freq, agentConfig):
watchdog = None
if agentConfig.get("watchdog", True):
watchdog = Watchdog(check_freq * WATCHDOG_MULTIPLIER,
max_mem_mb=agentConfig.get('limit_memory_consumption', None))
watchdog.reset()
return watchdog
示例2: run
def run(self, agentConfig=None, run_forever=True):
"""Main loop of the collector"""
agentLogger = logging.getLogger('agent')
systemStats = get_system_stats()
agentLogger.debug('System Properties: ' + str(systemStats))
if agentConfig is None:
agentConfig = get_config()
# Load the checks.d checks
checksd = load_check_directory(agentConfig)
# Try to fetch instance Id from EC2 if not hostname has been set
# in the config file.
# DEPRECATED
if agentConfig.get('hostname') is None and agentConfig.get('use_ec2_instance_id'):
instanceId = EC2.get_instance_id()
if instanceId is not None:
agentLogger.info("Running on EC2, instanceId: %s" % instanceId)
agentConfig['hostname'] = instanceId
else:
agentLogger.info('Not running on EC2, using hostname to identify this server')
emitters = [http_emitter]
for emitter_spec in [s.strip() for s in agentConfig.get('custom_emitters', '').split(',')]:
if len(emitter_spec) == 0: continue
emitters.append(modules.load(emitter_spec, 'emitter'))
check_freq = int(agentConfig['check_freq'])
# Checks instance
c = checks(agentConfig, emitters)
# Watchdog
watchdog = None
if agentConfig.get("watchdog", True):
watchdog = Watchdog(check_freq * WATCHDOG_MULTIPLIER)
watchdog.reset()
# Run checks once, to get once-in-a-run data
c.doChecks(True, systemStats, checksd)
# Main loop
while run_forever:
if watchdog is not None:
watchdog.reset()
time.sleep(check_freq)
c.doChecks(checksd=checksd)
示例3: __init__
def __init__(self, interval, metrics_aggregator, api_host, api_key=None, use_watchdog=False, event_chunk_size=None):
threading.Thread.__init__(self)
self.interval = int(interval)
self.finished = threading.Event()
self.metrics_aggregator = metrics_aggregator
self.flush_count = 0
self.log_count = 0
self.watchdog = None
if use_watchdog:
from util import Watchdog
self.watchdog = Watchdog(WATCHDOG_TIMEOUT)
self.api_key = api_key
self.api_host = api_host
self.event_chunk_size = event_chunk_size or EVENT_CHUNK_SIZE
self.http_conn_cls = http_client.HTTPSConnection
match = re.match('^(https?)://(.*)', api_host)
if match:
self.api_host = match.group(2)
if match.group(1) == 'http':
self.http_conn_cls = http_client.HTTPConnection
示例4: __init__
def __init__(self, port, agentConfig, watchdog=True,
skip_ssl_validation=False, use_simple_http_client=False):
self._port = int(port)
self._agentConfig = agentConfig
self._metrics = {}
AgentTransaction.set_application(self)
AgentTransaction.set_endpoints(agentConfig['endpoints'])
AgentTransaction.set_request_timeout(agentConfig['forwarder_timeout'])
max_parallelism = self.NO_PARALLELISM
# Multiple endpoints => enable parallelism
if len(agentConfig['endpoints']) > 1:
max_parallelism = self.DEFAULT_PARALLELISM
self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY,
MAX_QUEUE_SIZE, THROTTLING_DELAY,
max_parallelism=max_parallelism)
AgentTransaction.set_tr_manager(self._tr_manager)
self._watchdog = None
self.skip_ssl_validation = skip_ssl_validation or agentConfig.get('skip_ssl_validation', False)
self.use_simple_http_client = use_simple_http_client
if self.skip_ssl_validation:
log.info("Skipping SSL hostname validation, useful when using a transparent proxy")
# Monitor activity
if watchdog:
watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER / 1000
self._watchdog = Watchdog(
watchdog_timeout,
max_mem_mb=agentConfig.get('limit_memory_consumption', None),
max_resets=WATCHDOG_HIGH_ACTIVITY_THRESHOLD
)
示例5: __init__
def __init__(self, port, agentConfig):
self._port = port
self._agentConfig = agentConfig
self._metrics = {}
self._watchdog = Watchdog(TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER)
MetricTransaction.set_application(self)
self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY,
MAX_QUEUE_SIZE, THROTTLING_DELAY)
MetricTransaction.set_tr_manager(self._tr_manager)
示例6: __init__
def __init__(self, port, agentConfig, watchdog=True):
self._port = int(port)
self._agentConfig = agentConfig
self._metrics = {}
MetricTransaction.set_application(self)
MetricTransaction.set_endpoints()
self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY, MAX_QUEUE_SIZE, THROTTLING_DELAY)
MetricTransaction.set_tr_manager(self._tr_manager)
self._watchdog = None
if watchdog:
watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER
self._watchdog = Watchdog(watchdog_timeout)
示例7: __init__
def __init__(self, interval, metrics_aggregator, api_host, api_key=None, use_watchdog=False, event_chunk_size=None):
threading.Thread.__init__(self)
self.interval = int(interval)
self.finished = threading.Event()
self.metrics_aggregator = metrics_aggregator
self.flush_count = 0
self.log_count = 0
self.watchdog = None
if use_watchdog:
from util import Watchdog
self.watchdog = Watchdog(WATCHDOG_TIMEOUT)
self.api_key = api_key
self.api_host = api_host
self.event_chunk_size = event_chunk_size or EVENT_CHUNK_SIZE
示例8: __init__
def __init__(self, port, agentConfig, watchdog=True, skip_ssl_validation=False):
self._port = int(port)
self._agentConfig = agentConfig
self._metrics = {}
MetricTransaction.set_application(self)
MetricTransaction.set_endpoints()
self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY,
MAX_QUEUE_SIZE, THROTTLING_DELAY)
MetricTransaction.set_tr_manager(self._tr_manager)
self._watchdog = None
self.skip_ssl_validation = skip_ssl_validation or agentConfig.get('skip_ssl_validation', False)
if self.skip_ssl_validation:
log.info("Skipping SSL hostname validation, useful when using a transparent proxy")
if watchdog:
watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER
self._watchdog = Watchdog(watchdog_timeout,
max_mem_mb=agentConfig.get('limit_memory_consumption', None))
示例9: Application
class Application(tornado.web.Application):
def __init__(self, port, agentConfig, watchdog=True):
self._port = int(port)
self._agentConfig = agentConfig
self._metrics = {}
MetricTransaction.set_application(self)
MetricTransaction.set_endpoints()
self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY, MAX_QUEUE_SIZE, THROTTLING_DELAY)
MetricTransaction.set_tr_manager(self._tr_manager)
self._watchdog = None
if watchdog:
watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER
self._watchdog = Watchdog(watchdog_timeout)
def appendMetric(self, prefix, name, host, device, ts, value):
if self._metrics.has_key(prefix):
metrics = self._metrics[prefix]
else:
metrics = {}
self._metrics[prefix] = metrics
if metrics.has_key(name):
metrics[name].append([host, device, ts, value])
else:
metrics[name] = [[host, device, ts, value]]
def _postMetrics(self):
if len(self._metrics) > 0:
self._metrics["uuid"] = getUuid()
self._metrics["internalHostname"] = gethostname(self._agentConfig)
self._metrics["apiKey"] = self._agentConfig["api_key"]
MetricTransaction(self._metrics, {})
self._metrics = {}
def run(self):
handlers = [
(r"/intake/?", AgentInputHandler),
(r"/api/v1/series/?", ApiInputHandler),
(r"/status/?", StatusHandler),
]
settings = dict(cookie_secret="12oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=", xsrf_cookies=False, debug=True)
tornado.web.Application.__init__(self, handlers, **settings)
http_server = tornado.httpserver.HTTPServer(self)
http_server.listen(self._port)
logging.info("Listening on port %d" % self._port)
# Register callbacks
self.mloop = tornado.ioloop.IOLoop.instance()
def flush_trs():
if self._watchdog:
self._watchdog.reset()
self._postMetrics()
self._tr_manager.flush()
tr_sched = tornado.ioloop.PeriodicCallback(flush_trs, TRANSACTION_FLUSH_INTERVAL, io_loop=self.mloop)
# Register optional Graphite listener
gport = self._agentConfig.get("graphite_listen_port", None)
if gport is not None:
logging.info("Starting graphite listener on port %s" % gport)
from graphite import GraphiteServer
gs = GraphiteServer(self, gethostname(self._agentConfig), io_loop=self.mloop)
gs.listen(gport)
# Start everything
if self._watchdog:
self._watchdog.reset()
tr_sched.start()
self.mloop.start()
def stop(self):
self.mloop.stop()
示例10: Reporter
class Reporter(threading.Thread):
"""
The reporter periodically sends the aggregated metrics to the
server.
"""
def __init__(self, interval, metrics_aggregator, api_host, api_key=None, use_watchdog=False):
threading.Thread.__init__(self)
self.interval = int(interval)
self.finished = threading.Event()
self.metrics_aggregator = metrics_aggregator
self.flush_count = 0
self.watchdog = None
if use_watchdog:
from util import Watchdog
self.watchdog = Watchdog(WATCHDOG_TIMEOUT)
self.api_key = api_key
self.api_host = api_host
self.http_conn_cls = http_client.HTTPSConnection
match = re.match('^(https?)://(.*)', api_host)
if match:
self.api_host = match.group(2)
if match.group(1) == 'http':
self.http_conn_cls = http_client.HTTPConnection
def stop(self):
log.info("Stopping reporter")
self.finished.set()
def run(self):
log.info("Reporting to %s every %ss" % (self.api_host, self.interval))
log.debug("Watchdog enabled: %s" % bool(self.watchdog))
# Persist a start-up message.
DogstatsdStatus().persist()
while not self.finished.isSet(): # Use camel case isSet for 2.4 support.
self.finished.wait(self.interval)
self.metrics_aggregator.send_packet_count('datadog.dogstatsd.packet.count')
self.flush()
if self.watchdog:
self.watchdog.reset()
# Clean up the status messages.
log.debug("Stopped reporter")
DogstatsdStatus.remove_latest_status()
def flush(self):
try:
self.flush_count += 1
packets_per_second = self.metrics_aggregator.packets_per_second(self.interval)
packet_count = self.metrics_aggregator.total_count
metrics = self.metrics_aggregator.flush()
count = len(metrics)
should_log = self.flush_count < LOGGING_INTERVAL or self.flush_count % LOGGING_INTERVAL == 0
if not count:
if should_log:
log.info("Flush #%s: No metrics to flush." % self.flush_count)
else:
if should_log:
log.info("Flush #%s: flushing %s metrics" % (self.flush_count, count))
self.submit(metrics)
events = self.metrics_aggregator.flush_events()
event_count = len(events)
if not event_count:
if should_log:
log.info("Flush #%s: No events to flush." % self.flush_count)
else:
log.debug("Flush #%s: No events to flush." % self.flush_count)
else:
if should_log:
log.info("Flush #%s: flushing %s events" % (self.flush_count, len(events)))
else:
log.debug("Flush #%s: flushing %s events" % (self.flush_count, len(events)))
self.submit_events(events)
# Persist a status message.
packet_count = self.metrics_aggregator.total_count
DogstatsdStatus(
flush_count=self.flush_count,
packet_count=packet_count,
packets_per_second=packets_per_second,
metric_count=count,
event_count=event_count
).persist()
except Exception, e:
log.exception("Error flushing metrics")
示例11: Reporter
class Reporter(threading.Thread):
"""
The reporter periodically sends the aggregated metrics to the
server.
"""
def __init__(self, interval, metrics_aggregator, api_host, api_key=None, use_watchdog=False):
threading.Thread.__init__(self)
self.interval = int(interval)
self.finished = threading.Event()
self.metrics_aggregator = metrics_aggregator
self.flush_count = 0
self.watchdog = None
if use_watchdog:
from util import Watchdog
self.watchdog = Watchdog(WATCHDOG_TIMEOUT)
self.api_key = api_key
self.api_host = api_host
self.http_conn_cls = http_client.HTTPSConnection
match = re.match('^(https?)://(.*)', api_host)
if match:
self.api_host = match.group(2)
if match.group(1) == 'http':
self.http_conn_cls = http_client.HTTPConnection
def stop(self):
log.info("Stopping reporter")
self.finished.set()
def run(self):
log.info("Reporting to %s every %ss" % (self.api_host, self.interval))
log.debug("Watchdog enabled: %s" % bool(self.watchdog))
# Persist a start-up message.
DogstatsdStatus().persist()
while not self.finished.isSet(): # Use camel case isSet for 2.4 support.
self.finished.wait(self.interval)
self.metrics_aggregator.send_packet_count('datadog.dogstatsd.packet.count')
self.flush()
if self.watchdog:
self.watchdog.reset()
# Clean up the status messages.
log.debug("Stopped reporter")
DogstatsdStatus.remove_latest_status()
def flush(self):
try:
self.flush_count += 1
packets_per_second = self.metrics_aggregator.packets_per_second(self.interval)
packet_count = self.metrics_aggregator.total_count
metrics = self.metrics_aggregator.flush()
count = len(metrics)
should_log = self.flush_count < LOGGING_INTERVAL or self.flush_count % LOGGING_INTERVAL == 0
if not count:
if should_log:
log.info("Flush #%s: No metrics to flush." % self.flush_count)
else:
if should_log:
log.info("Flush #%s: flushing %s metrics" % (self.flush_count, count))
self.submit(metrics)
# Persist a status message.
packet_count = self.metrics_aggregator.total_count
DogstatsdStatus(
flush_count=self.flush_count,
packet_count=packet_count,
packets_per_second=packets_per_second,
metric_count=count).persist()
except:
log.exception("Error flushing metrics")
def submit(self, metrics):
# HACK - Copy and pasted from dogapi, because it's a bit of a pain to distribute python
# dependencies with the agent.
body = serialize(metrics)
headers = {'Content-Type':'application/json'}
method = 'POST'
params = {}
if self.api_key:
params['api_key'] = self.api_key
url = '/api/v1/series?%s' % urlencode(params)
start_time = time()
status = None
conn = self.http_conn_cls(self.api_host)
try:
conn.request(method, url, body, headers)
#FIXME: add timeout handling code here
#.........这里部分代码省略.........
示例12: normal_run
def normal_run(self):
w = Watchdog(2)
w.reset()
for i in range(5):
time.sleep(1)
w.reset()
示例13: Application
class Application(tornado.web.Application):
def __init__(self, port, agentConfig, watchdog=True, skip_ssl_validation=False):
self._port = int(port)
self._agentConfig = agentConfig
self._metrics = {}
MetricTransaction.set_application(self)
MetricTransaction.set_endpoints()
self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY,
MAX_QUEUE_SIZE, THROTTLING_DELAY)
MetricTransaction.set_tr_manager(self._tr_manager)
self._watchdog = None
self.skip_ssl_validation = skip_ssl_validation or agentConfig.get('skip_ssl_validation', False)
if self.skip_ssl_validation:
log.info("Skipping SSL hostname validation, useful when using a transparent proxy")
if watchdog:
watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER
self._watchdog = Watchdog(watchdog_timeout,
max_mem_mb=agentConfig.get('limit_memory_consumption', None))
def log_request(self, handler):
""" Override the tornado logging method.
If everything goes well, log level is DEBUG.
Otherwise it's WARNING or ERROR depending on the response code. """
if handler.get_status() < 400:
log_method = log.debug
elif handler.get_status() < 500:
log_method = log.warning
else:
log_method = log.error
request_time = 1000.0 * handler.request.request_time()
log_method("%d %s %.2fms", handler.get_status(),
handler._request_summary(), request_time)
def appendMetric(self, prefix, name, host, device, ts, value):
if self._metrics.has_key(prefix):
metrics = self._metrics[prefix]
else:
metrics = {}
self._metrics[prefix] = metrics
if metrics.has_key(name):
metrics[name].append([host, device, ts, value])
else:
metrics[name] = [[host, device, ts, value]]
def _postMetrics(self):
if len(self._metrics) > 0:
self._metrics['uuid'] = get_uuid()
self._metrics['internalHostname'] = get_hostname(self._agentConfig)
self._metrics['apiKey'] = self._agentConfig['api_key']
MetricTransaction(json.dumps(self._metrics),
headers={'Content-Type': 'application/json'})
self._metrics = {}
def run(self):
handlers = [
(r"/intake/?", AgentInputHandler),
(r"/api/v1/series/?", ApiInputHandler),
(r"/status/?", StatusHandler),
]
settings = dict(
cookie_secret="12oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
xsrf_cookies=False,
debug=False,
log_function=self.log_request
)
non_local_traffic = self._agentConfig.get("non_local_traffic", False)
tornado.web.Application.__init__(self, handlers, **settings)
http_server = tornado.httpserver.HTTPServer(self)
# non_local_traffic must be == True to match, not just some non-false value
if non_local_traffic is True:
http_server.listen(self._port)
else:
# localhost in lieu of 127.0.0.1 to support IPv6
try:
http_server.listen(self._port, address = "localhost")
except gaierror:
log.warning("Warning localhost seems undefined in your host file, using 127.0.0.1 instead")
http_server.listen(self._port, address = "127.0.0.1")
log.info("Listening on port %d" % self._port)
# Register callbacks
self.mloop = get_tornado_ioloop()
logging.getLogger().setLevel(get_logging_config()['log_level'] or logging.INFO)
def flush_trs():
if self._watchdog:
self._watchdog.reset()
self._postMetrics()
#.........这里部分代码省略.........
示例14: Reporter
class Reporter(threading.Thread):
"""
The reporter periodically sends the aggregated metrics to the
server.
"""
def __init__(self, interval, metrics_aggregator, api_host, api_key=None, use_watchdog=False, event_chunk_size=None):
threading.Thread.__init__(self)
self.interval = int(interval)
self.finished = threading.Event()
self.metrics_aggregator = metrics_aggregator
self.flush_count = 0
self.log_count = 0
self.watchdog = None
if use_watchdog:
from util import Watchdog
self.watchdog = Watchdog(WATCHDOG_TIMEOUT)
self.api_key = api_key
self.api_host = api_host
self.event_chunk_size = event_chunk_size or EVENT_CHUNK_SIZE
self.http_conn_cls = http_client.HTTPSConnection
match = re.match('^(https?)://(.*)', api_host)
if match:
self.api_host = match.group(2)
if match.group(1) == 'http':
self.http_conn_cls = http_client.HTTPConnection
def stop(self):
log.info("Stopping reporter")
self.finished.set()
def run(self):
log.info("Reporting to %s every %ss" % (self.api_host, self.interval))
log.debug("Watchdog enabled: %s" % bool(self.watchdog))
# Persist a start-up message.
DogstatsdStatus().persist()
while not self.finished.isSet(): # Use camel case isSet for 2.4 support.
self.finished.wait(self.interval)
self.metrics_aggregator.send_packet_count('datadog.dogstatsd.packet.count')
self.flush()
if self.watchdog:
self.watchdog.reset()
# Clean up the status messages.
log.debug("Stopped reporter")
DogstatsdStatus.remove_latest_status()
def flush(self):
try:
self.flush_count += 1
self.log_count += 1
packets_per_second = self.metrics_aggregator.packets_per_second(self.interval)
packet_count = self.metrics_aggregator.total_count
metrics = self.metrics_aggregator.flush()
count = len(metrics)
if self.flush_count % FLUSH_LOGGING_PERIOD == 0:
self.log_count = 0
if count:
self.submit(metrics)
events = self.metrics_aggregator.flush_events()
event_count = len(events)
if event_count:
self.submit_events(events)
should_log = self.flush_count <= FLUSH_LOGGING_INITIAL or self.log_count <= FLUSH_LOGGING_COUNT
log_func = log.info
if not should_log:
log_func = log.debug
log_func("Flush #%s: flushed %s metric%s and %s event%s" % (self.flush_count, count, plural(count), event_count, plural(event_count)))
if self.flush_count == FLUSH_LOGGING_INITIAL:
log.info("First flushes done, %s flushes will be logged every %s flushes." % (FLUSH_LOGGING_COUNT, FLUSH_LOGGING_PERIOD))
# Persist a status message.
packet_count = self.metrics_aggregator.total_count
DogstatsdStatus(
flush_count=self.flush_count,
packet_count=packet_count,
packets_per_second=packets_per_second,
metric_count=count,
event_count=event_count,
).persist()
except Exception:
log.exception("Error flushing metrics")
def submit(self, metrics):
# Copy and pasted from dogapi, because it's a bit of a pain to distribute python
# dependencies with the agent.
body, headers = serialize_metrics(metrics)
method = 'POST'
#.........这里部分代码省略.........
示例15: Application
class Application(tornado.web.Application):
NO_PARALLELISM = 1
DEFAULT_PARALLELISM = 5
def __init__(self, port, agentConfig, watchdog=True,
skip_ssl_validation=False, use_simple_http_client=False):
self._port = int(port)
self._agentConfig = agentConfig
self._metrics = {}
AgentTransaction.set_application(self)
AgentTransaction.set_endpoints(agentConfig['endpoints'])
AgentTransaction.set_request_timeout(agentConfig['forwarder_timeout'])
max_parallelism = self.NO_PARALLELISM
# Multiple endpoints => enable parallelism
if len(agentConfig['endpoints']) > 1:
max_parallelism = self.DEFAULT_PARALLELISM
self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY,
MAX_QUEUE_SIZE, THROTTLING_DELAY,
max_parallelism=max_parallelism)
AgentTransaction.set_tr_manager(self._tr_manager)
self._watchdog = None
self.skip_ssl_validation = skip_ssl_validation or agentConfig.get('skip_ssl_validation', False)
self.use_simple_http_client = use_simple_http_client
if self.skip_ssl_validation:
log.info("Skipping SSL hostname validation, useful when using a transparent proxy")
# Monitor activity
if watchdog:
watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER / 1000
self._watchdog = Watchdog(
watchdog_timeout,
max_mem_mb=agentConfig.get('limit_memory_consumption', None),
max_resets=WATCHDOG_HIGH_ACTIVITY_THRESHOLD
)
def log_request(self, handler):
""" Override the tornado logging method.
If everything goes well, log level is DEBUG.
Otherwise it's WARNING or ERROR depending on the response code. """
if handler.get_status() < 400:
log_method = log.debug
elif handler.get_status() < 500:
log_method = log.warning
else:
log_method = log.error
request_time = 1000.0 * handler.request.request_time()
log_method(
u"%d %s %.2fms",
handler.get_status(),
handler._request_summary(), request_time
)
def appendMetric(self, prefix, name, host, device, ts, value):
if prefix in self._metrics:
metrics = self._metrics[prefix]
else:
metrics = {}
self._metrics[prefix] = metrics
if name in metrics:
metrics[name].append([host, device, ts, value])
else:
metrics[name] = [[host, device, ts, value]]
def _postMetrics(self):
if len(self._metrics) > 0:
self._metrics['uuid'] = get_uuid()
self._metrics['internalHostname'] = get_hostname(self._agentConfig)
self._metrics['apiKey'] = self._agentConfig['api_key']
MetricTransaction(json.dumps(self._metrics),
headers={'Content-Type': 'application/json'})
self._metrics = {}
def run(self):
handlers = [
(r"/intake/?", AgentInputHandler),
(r"/intake/metrics?", MetricsAgentInputHandler),
(r"/intake/metadata?", MetadataAgentInputHandler),
(r"/api/v1/series/?", ApiInputHandler),
(r"/api/v1/check_run/?", ApiCheckRunHandler),
(r"/status/?", StatusHandler),
]
settings = dict(
cookie_secret="12oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
xsrf_cookies=False,
debug=False,
log_function=self.log_request
)
non_local_traffic = self._agentConfig.get("non_local_traffic", False)
tornado.web.Application.__init__(self, handlers, **settings)
#.........这里部分代码省略.........