本文整理汇总了Python中checks.datadog.Dogstreams类的典型用法代码示例。如果您正苦于以下问题:Python Dogstreams类的具体用法?Python Dogstreams怎么用?Python Dogstreams使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Dogstreams类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_dogstream_events
def test_dogstream_events(self):
log_data = [
'2012-05-14 12:46:01 [ERROR] - host0 is down (broke its collarbone)',
'2012-05-14 12:48:07 [ERROR] - host1 is down (got a bloody nose)',
'2012-05-14 12:52:03 [RECOVERY] - host0 is up (collarbone healed)',
'2012-05-14 12:59:09 [RECOVERY] - host1 is up (nose stopped bleeding)',
]
expected_output = {
"dogstreamEvents": [
{
"timestamp": 1336999561,
"alert_type": "error",
"host": "host0",
"msg_title": "host0 is down (broke its collarbone)",
"msg_text": "2012-05-14 12:46:01 [ERROR] - host0 is down (broke its collarbone)",
"event_type": EventDefaults.EVENT_TYPE,
"aggregation_key": EventDefaults.EVENT_OBJECT,
"event_object": EventDefaults.EVENT_OBJECT,
},
{
"timestamp": 1336999687,
"alert_type": "error",
"host": "host1",
"msg_title": "host1 is down (got a bloody nose)",
"msg_text": "2012-05-14 12:48:07 [ERROR] - host1 is down (got a bloody nose)",
"event_type": EventDefaults.EVENT_TYPE,
"aggregation_key": EventDefaults.EVENT_OBJECT,
"event_object": EventDefaults.EVENT_OBJECT,
},
{
"timestamp": 1336999923,
"alert_type": "success",
"host": "host0",
"msg_title": "host0 is up (collarbone healed)",
"msg_text": "2012-05-14 12:52:03 [RECOVERY] - host0 is up (collarbone healed)",
"event_type": EventDefaults.EVENT_TYPE,
"aggregation_key": EventDefaults.EVENT_OBJECT,
"event_object": EventDefaults.EVENT_OBJECT,
},
{
"timestamp": 1337000349,
"alert_type": "success",
"host": "host1",
"msg_title": "host1 is up (nose stopped bleeding)",
"msg_text": "2012-05-14 12:59:09 [RECOVERY] - host1 is up (nose stopped bleeding)",
"event_type": EventDefaults.EVENT_TYPE,
"aggregation_key": EventDefaults.EVENT_OBJECT,
"event_object": EventDefaults.EVENT_OBJECT,
},
]
}
self._write_log(log_data)
dogstream = Dogstreams.init(self.logger, {'dogstreams': '{0}:{1}:parse_events'.format(self.log_file.name, __name__)})
actual_output = dogstream.check(self.config, move_end=False)
self.assertEquals(expected_output, actual_output)
示例2: test_dogstream_events_validation
def test_dogstream_events_validation(self):
log_data = [
{"msg_title": "title", "timestamp": 1336999561},
{"msg_text": "body", "timestamp": 1336999561},
{"none of the above": "should get filtered out", "timestamp": 1336999561},
]
expected_output = {
"dogstreamEvents": [
{
"timestamp": 1336999561,
"msg_title": "title",
"event_type": EventDefaults.EVENT_TYPE,
"aggregation_key": EventDefaults.EVENT_OBJECT,
"event_object": EventDefaults.EVENT_OBJECT,
},
{
"timestamp": 1336999561,
"msg_text": "body",
"event_type": EventDefaults.EVENT_TYPE,
"aggregation_key": EventDefaults.EVENT_OBJECT,
"event_object": EventDefaults.EVENT_OBJECT,
},
]
}
self._write_log([repr(d) for d in log_data])
dogstream = Dogstreams.init(self.logger, {'dogstreams': '{0}:{1}:repr_event_parser'.format(self.log_file.name, __name__)})
actual_output = dogstream.check(self.config, move_end=False)
self.assertEquals(expected_output, actual_output)
示例3: __init__
def __init__(self, agentConfig, emitters, systemStats):
self.emit_duration = None
self.agentConfig = agentConfig
# system stats is generated by config.get_system_stats
self.agentConfig["system_stats"] = systemStats
# agent config is used during checks, system_stats can be accessed through the config
self.os = get_os()
self.plugins = None
self.emitters = emitters
self.metadata_interval = int(agentConfig.get("metadata_interval", 10 * 60))
self.metadata_start = time.time()
socket.setdefaulttimeout(15)
self.run_count = 0
self.continue_running = True
self.metadata_cache = None
self.initialized_checks_d = []
self.init_failed_checks_d = []
# Unix System Checks
self._unix_system_checks = {
"disk": u.Disk(log),
"io": u.IO(log),
"load": u.Load(log),
"memory": u.Memory(log),
"processes": u.Processes(log),
"cpu": u.Cpu(log),
}
# Win32 System `Checks
self._win32_system_checks = {
"disk": w32.Disk(log),
"io": w32.IO(log),
"proc": w32.Processes(log),
"memory": w32.Memory(log),
"network": w32.Network(log),
"cpu": w32.Cpu(log),
}
# Old-style metric checks
self._ganglia = Ganglia(log)
self._dogstream = Dogstreams.init(log, self.agentConfig)
self._ddforwarder = DdForwarder(log, self.agentConfig)
# Agent Metrics
self._agent_metrics = CollectorMetrics(log)
self._metrics_checks = []
# Custom metric checks
for module_spec in [s.strip() for s in self.agentConfig.get("custom_checks", "").split(",")]:
if len(module_spec) == 0:
continue
try:
self._metrics_checks.append(modules.load(module_spec, "Check")(log))
log.info("Registered custom check %s" % module_spec)
log.warning(
"Old format custom checks are deprecated. They should be moved to the checks.d interface as old custom checks will be removed in a next version"
)
except Exception, e:
log.exception("Unable to load custom check module %s" % module_spec)
示例4: __init__
def __init__(self, agentConfig, emitters, systemStats):
self.emit_duration = None
self.agentConfig = agentConfig
# system stats is generated by config.get_system_stats
self.agentConfig['system_stats'] = systemStats
# agent config is used during checks, system_stats can be accessed through the config
self.os = get_os()
self.plugins = None
self.emitters = emitters
self.metadata_interval = int(agentConfig.get('metadata_interval', 10 * 60))
self.metadata_start = time.time()
socket.setdefaulttimeout(15)
self.run_count = 0
self.continue_running = True
self.metadata_cache = None
self.checks_d = []
# Unix System Checks
self._unix_system_checks = {
'disk': u.Disk(log),
'io': u.IO(log),
'load': u.Load(log),
'memory': u.Memory(log),
'processes': u.Processes(log),
'cpu': u.Cpu(log)
}
# Win32 System `Checks
self._win32_system_checks = {
'disk': w32.Disk(log),
'io': w32.IO(log),
'proc': w32.Processes(log),
'memory': w32.Memory(log),
'network': w32.Network(log),
'cpu': w32.Cpu(log)
}
# Old-style metric checks
self._ganglia = Ganglia(log)
self._cassandra = Cassandra()
self._dogstream = Dogstreams.init(log, self.agentConfig)
self._ddforwarder = DdForwarder(log, self.agentConfig)
# Agent Metrics
self._agent_metrics = CollectorMetrics(log)
# Metric Checks
self._metrics_checks = [
Memcache(log),
]
# Custom metric checks
for module_spec in [s.strip() for s in self.agentConfig.get('custom_checks', '').split(',')]:
if len(module_spec) == 0: continue
try:
self._metrics_checks.append(modules.load(module_spec, 'Check')(log))
log.info("Registered custom check %s" % module_spec)
except Exception, e:
log.exception('Unable to load custom check module %s' % module_spec)
示例5: setUp
def setUp(self):
TailTestCase.setUp(self)
self.config = {
'dogstreams': self.log_file.name,
'check_freq': 5,
}
log.info("Test config: %s" % self.config)
self.dogstream = Dogstreams.init(self.logger, self.config)
self.maxDiff = None
示例6: test_host_perfdata
def test_host_perfdata(self):
from checks.datadog import NagiosHostPerfData
self._write_nagios_config([
"host_perfdata_file=%s" % self.log_file.name,
"host_perfdata_file_template=DATATYPE::HOSTPERFDATA\tTIMET::$TIMET$\tHOSTNAME::$HOSTNAME$\tHOSTPERFDATA::$HOSTPERFDATA$\tHOSTCHECKCOMMAND::$HOSTCHECKCOMMAND$\tHOSTSTATE::$HOSTSTATE$\tHOSTSTATETYPE::$HOSTSTATETYPE$",
])
dogstream = Dogstreams.init(self.logger, self.agent_config)
self.assertEquals([NagiosHostPerfData], [d.__class__ for d in dogstream.dogstreams])
log_data = [
("DATATYPE::HOSTPERFDATA",
"TIMET::1000000010",
"HOSTNAME::myhost1",
"HOSTPERFDATA::" + " ".join([
"rta=0.978000ms;5000.000000;5000.000000;0.000000",
"pl=0%;100;100;0",
]),
"HOSTCHECKCOMMAND::check-host-alive",
"HOSTSTATE::UP",
"HOSTSTATETYPE::HARD",
),
]
expected_output = [
('nagios.host.rta', 1000000010, 0.978, {
'metric_type': 'gauge',
'host_name': 'myhost1',
'unit': 'ms',
'warn': '5000.000000',
'crit': '5000.000000',
'min': '0.000000'
}),
('nagios.host.pl', 1000000010, 0., {
'metric_type': 'gauge',
'host_name': 'myhost1',
'unit': '%',
'warn': '100',
'crit': '100',
'min': '0'
}),
]
expected_output.sort(key=point_sorter)
self._write_log(('\t'.join(data) for data in log_data))
actual_output = dogstream.check(self.agent_config, move_end=False)['dogstream']
actual_output.sort(key=point_sorter)
self.assertEquals(expected_output, actual_output)
示例7: __init__
def __init__(self, agentConfig, emitters):
self.agentConfig = agentConfig
self.plugins = None
self.emitters = emitters
self.os = None
self.checksLogger = logging.getLogger("checks")
socket.setdefaulttimeout(15)
self._apache = Apache(self.checksLogger)
self._nginx = Nginx(self.checksLogger)
self._disk = Disk(self.checksLogger)
self._io = IO()
self._load = Load(self.checksLogger)
self._memory = Memory(self.checksLogger)
self._network = Network(self.checksLogger)
self._processes = Processes()
self._cpu = Cpu()
self._couchdb = CouchDb(self.checksLogger)
self._mongodb = MongoDb(self.checksLogger)
self._mysql = MySql(self.checksLogger)
self._pgsql = PostgreSql(self.checksLogger)
self._rabbitmq = RabbitMq()
self._ganglia = Ganglia(self.checksLogger)
self._cassandra = Cassandra()
self._redis = Redis(self.checksLogger)
self._jvm = Jvm(self.checksLogger)
self._tomcat = Tomcat(self.checksLogger)
self._activemq = ActiveMQ(self.checksLogger)
self._solr = Solr(self.checksLogger)
self._memcache = Memcache(self.checksLogger)
self._dogstream = Dogstreams.init(self.checksLogger, self.agentConfig)
self._ddforwarder = DdForwarder(self.checksLogger, self.agentConfig)
# All new checks should be metrics checks:
self._metrics_checks = [
Cacti(self.checksLogger),
Redis(self.checksLogger),
Varnish(self.checksLogger),
ElasticSearch(self.checksLogger),
]
for module_spec in [s.strip() for s in self.agentConfig.get("custom_checks", "").split(",")]:
if len(module_spec) == 0:
continue
try:
self._metrics_checks.append(modules.load(module_spec, "Check")(self.checksLogger))
self.checksLogger.info("Registered custom check %s" % module_spec)
except Exception, e:
self.checksLogger.exception("Unable to load custom check module %s" % module_spec)
示例8: test_alt_host_perfdata
def test_alt_host_perfdata(self):
from checks.datadog import NagiosHostPerfData
self._write_nagios_config([
"host_perfdata_file=%s" % NAGIOS_TEST_HOST,
"host_perfdata_file_template=%s" % NAGIOS_TEST_HOST_TEMPLATE,
])
dogstream = Dogstreams.init(self.logger, self.agent_config)
self.assertEquals([NagiosHostPerfData], [d.__class__ for d in dogstream.dogstreams])
actual_output = dogstream.check(self.agent_config, move_end=False)
expected_output = {'dogstream': [('nagios.host.pl', 1339511440, 0.0, {'warn': '80', 'metric_type': 'gauge', 'host_name': 'localhost', 'min': '0', 'crit': '100', 'unit': '%'}), ('nagios.host.rta', 1339511440, 0.048, {'warn': '3000.000000', 'metric_type': 'gauge', 'host_name': 'localhost', 'min': '0.000000', 'crit': '5000.000000', 'unit': 'ms'})]}
self.assertEquals(expected_output, actual_output)
示例9: test_dogstream_ancient_function_plugin
def test_dogstream_ancient_function_plugin(self):
"""Ensure that pre-stateful plugins still work"""
log_data = [
'test.metric.simple 1000000000 1 metric_type=gauge',
'test.metric.simple 1100000000 1 metric_type=gauge'
]
expected_output = {
"dogstream": [
('test.metric.simple', 1000000000, 1, self.gauge),
('test.metric.simple', 1100000000, 1, self.gauge)]
}
self._write_log(log_data)
plugdog = Dogstreams.init(self.logger, {'dogstreams': '{0}:{1}:parse_ancient_function_plugin'.format(self.log_file.name, __name__)})
actual_output = plugdog.check(self.config, move_end=False)
示例10: test_alt_service_perfdata
def test_alt_service_perfdata(self):
from checks.datadog import NagiosServicePerfData
self._write_nagios_config([
"service_perfdata_file=%s" % NAGIOS_TEST_SVC,
"service_perfdata_file_template=%s" % NAGIOS_TEST_SVC_TEMPLATE,
])
dogstream = Dogstreams.init(self.logger, self.agent_config)
self.assertEquals([NagiosServicePerfData], [d.__class__ for d in dogstream.dogstreams])
actual_output = dogstream.check(self.agent_config, move_end=False)
expected_output = {'dogstream': [('nagios.current_users.users', 1339511440, 1.0, {'metric_type': 'gauge', 'warn': '20', 'host_name': 'localhost', 'crit': '50', 'min': '0'}), ('nagios.ping.pl', 1339511500, 0.0, {'warn': '20', 'metric_type': 'gauge', 'host_name': 'localhost', 'min': '0', 'crit': '60', 'unit': '%'}), ('nagios.ping.rta', 1339511500, 0.065, {'warn': '100.000000', 'metric_type': 'gauge', 'host_name': 'localhost', 'min': '0.000000', 'crit': '500.000000', 'unit': 'ms'}), ('nagios.root_partition', 1339511560, 2470.0, {'min': '0', 'max': '7315', 'device_name': '/', 'warn': '5852', 'metric_type': 'gauge', 'host_name': 'localhost', 'crit': '6583', 'unit': 'MB'})]}
self.assertEquals(expected_output, actual_output)
示例11: test_dogstream_stateful
def test_dogstream_stateful(self):
log_data = [
'test.metric.accumulator 1000000000 1 metric_type=counter',
'test.metric.accumulator 1100000000 1 metric_type=counter'
]
expected_output = {
"dogstream": [
('test.metric.accumulator', 1000000000, 1, self.counter),
('test.metric.accumulator', 1100000000, 2, self.counter)]
}
self._write_log(log_data)
statedog = Dogstreams.init(self.logger, {'dogstreams': '%s:tests.test_datadog:parse_stateful' % self.log_file.name})
actual_output = statedog.check(self.config, move_end=False)
self.assertEquals(expected_output, actual_output)
示例12: test_dogstream_new_plugin
def test_dogstream_new_plugin(self):
"""Ensure that class-based stateful plugins work"""
log_data = [
'test.metric.accumulator 1000000000 1 metric_type=counter',
'test.metric.accumulator 1100000000 1 metric_type=counter'
]
expected_output = {
"dogstream": [
('foo.bar:test.metric.accumulator', 1000000000, 1, self.counter),
('foo.bar:test.metric.accumulator', 1100000000, 2, self.counter)]
}
self._write_log(log_data)
statedog = Dogstreams.init(self.logger, {'dogstreams': '{0}:{1}:ParseClassPlugin:foo:bar'.format(self.log_file.name, __name__)})
actual_output = statedog.check(self.config, move_end=False)
self.assertEquals(expected_output, actual_output)
示例13: test_dogstream_function_plugin
def test_dogstream_function_plugin(self):
"""Ensure that non-class-based stateful plugins work"""
log_data = [
'test.metric.accumulator 1000000000 1 metric_type=counter',
'test.metric.accumulator 1100000000 1 metric_type=counter'
]
expected_output = {
"dogstream": [
('test.metric.accumulator', 1000000000, 1, self.counter),
('test.metric.accumulator', 1100000000, 2, self.counter)]
}
self._write_log(log_data)
statedog = Dogstreams.init(self.logger, {'dogstreams': '%s:tests.test_datadog:parse_function_plugin' % self.log_file.name})
actual_output = statedog.check(self.config, move_end=False)
self.assertEquals(expected_output, actual_output)
示例14: __init__
def __init__(self, agentConfig, emitters):
self.agentConfig = agentConfig
self.plugins = None
self.emitters = emitters
self.os = None
self.checksLogger = logging.getLogger('checks')
socket.setdefaulttimeout(15)
self._apache = Apache(self.checksLogger)
self._nginx = Nginx(self.checksLogger)
self._disk = Disk(self.checksLogger)
self._io = IO()
self._load = Load(self.checksLogger)
self._memory = Memory(self.checksLogger)
self._network = Network(self.checksLogger)
self._processes = Processes()
self._cpu = Cpu()
self._couchdb = CouchDb(self.checksLogger)
self._mongodb = MongoDb(self.checksLogger)
self._mysql = MySql(self.checksLogger)
self._pgsql = PostgreSql(self.checksLogger)
self._rabbitmq = RabbitMq()
self._ganglia = Ganglia(self.checksLogger)
self._cassandra = Cassandra()
self._redis = Redis(self.checksLogger)
self._jvm = Jvm(self.checksLogger)
self._tomcat = Tomcat(self.checksLogger)
self._activemq = ActiveMQ(self.checksLogger)
self._solr = Solr(self.checksLogger)
self._memcache = Memcache(self.checksLogger)
self._dogstream = Dogstreams.init(self.checksLogger, self.agentConfig)
self._ddforwarder = DdForwarder(self.checksLogger, self.agentConfig)
# All new checks should be metrics checks:
self._metrics_checks = [
Cacti(self.checksLogger),
Redis(self.checksLogger),
Varnish(self.checksLogger),
ElasticSearch(self.checksLogger),
]
self._event_checks = [Hudson(), Nagios(socket.gethostname())]
self._resources_checks = [ResProcesses(self.checksLogger,self.agentConfig)]
self._ec2 = EC2(self.checksLogger)
示例15: test_dogstream_log_path_globbing
def test_dogstream_log_path_globbing(self):
"""Make sure that globbed dogstream logfile matching works."""
# Create a tmpfile to serve as a prefix for the other temporary
# files we'll be globbing.
first_tmpfile = NamedTemporaryFile()
tmp_fprefix = os.path.basename(first_tmpfile.name)
all_tmp_filenames = set([first_tmpfile.name])
# We stick the file objects in here to avoid garbage collection (and
# tmpfile deletion). Not sure why this was happening, but it's working
# with this hack in.
avoid_gc = []
for i in range(3):
new_tmpfile = NamedTemporaryFile(prefix=tmp_fprefix)
all_tmp_filenames.add(new_tmpfile.name)
avoid_gc.append(new_tmpfile)
dogstream_glob = os.path.join(gettempdir(), tmp_fprefix + '*')
paths = Dogstreams._get_dogstream_log_paths(dogstream_glob)
self.assertEqual(set(paths), all_tmp_filenames)