本文整理汇总了Python中checks.ganglia.Ganglia类的典型用法代码示例。如果您正苦于以下问题:Python Ganglia类的具体用法?Python Ganglia怎么用?Python Ganglia使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Ganglia类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testSpeed
def testSpeed(self):
# Pretend to be gmetad and serve a large piece of content
server = subprocess.Popen("nc -l 8651 < %s" % TEST_FN, shell=True)
# Wait for 1 second
time.sleep(1)
pfile = tempfile.NamedTemporaryFile()
g = Ganglia(logging.getLogger(__file__))
# Running the profiler
# profile.runctx("g.check({'ganglia_host': 'localhost', 'ganglia_port': 8651})", {}, {"g": g}, pfile.name)
# p = pstats.Stats(pfile.name)
# p.sort_stats('time').print_stats()
self.assertEquals(md5(g.check({'ganglia_host': 'localhost', 'ganglia_port': 8651})).hexdigest(), md5(open(TEST_FN).read()).hexdigest())
示例2: testSpeed
def testSpeed(self):
# Pretend to be gmetad and serve a large piece of content
original_file = Fixtures.file('ganglia.txt')
subprocess.Popen("nc -l 8651 < %s" % original_file, shell=True)
# Wait for 1 second
time.sleep(1)
g = Ganglia(logging.getLogger(__file__))
parsed = StringIO(g.check({'ganglia_host': 'localhost', 'ganglia_port': 8651}))
original = Fixtures.file('ganglia.txt')
x1 = tree.parse(parsed)
x2 = tree.parse(original)
# Cursory test
self.assertEquals([c.tag for c in x1.getroot()], [c.tag for c in x2.getroot()])
示例3: testSpeed
def testSpeed(self):
# Pretend to be gmetad and serve a large piece of content
server = subprocess.Popen("nc -l 8651 < %s" % TEST_FN, shell=True)
# Wait for 1 second
time.sleep(1)
pfile = tempfile.NamedTemporaryFile()
g = Ganglia(logging.getLogger(__file__))
# Running the profiler
# profile.runctx("g.check({'ganglia_host': 'localhost', 'ganglia_port': 8651})", {}, {"g": g}, pfile.name)
# p = pstats.Stats(pfile.name)
# p.sort_stats('time').print_stats()
parsed = StringIO(g.check({'ganglia_host': 'localhost', 'ganglia_port': 8651}))
original = open(TEST_FN)
x1 = tree.parse(parsed)
x2 = tree.parse(original)
# Cursory test
self.assertEquals([c.tag for c in x1.getroot()], [c.tag for c in x2.getroot()])
示例4: __init__
def __init__(self, agentConfig, emitters):
self.agentConfig = agentConfig
self.plugins = None
self.emitters = emitters
self.os = None
self.checksLogger = logging.getLogger('checks')
socket.setdefaulttimeout(15)
self._apache = Apache(self.checksLogger)
self._nginx = Nginx(self.checksLogger)
self._disk = Disk(self.checksLogger)
self._io = IO()
self._load = Load(self.checksLogger)
self._memory = Memory(self.checksLogger)
self._network = Network(self.checksLogger)
self._processes = Processes()
self._cpu = Cpu()
self._couchdb = CouchDb(self.checksLogger)
self._mongodb = MongoDb(self.checksLogger)
self._mysql = MySql(self.checksLogger)
self._pgsql = PostgreSql(self.checksLogger)
self._rabbitmq = RabbitMq()
self._ganglia = Ganglia(self.checksLogger)
self._cassandra = Cassandra()
self._redis = Redis(self.checksLogger)
self._jvm = Jvm(self.checksLogger)
self._tomcat = Tomcat(self.checksLogger)
self._activemq = ActiveMQ(self.checksLogger)
self._solr = Solr(self.checksLogger)
self._memcache = Memcache(self.checksLogger)
self._dogstream = Dogstreams.init(self.checksLogger, self.agentConfig)
self._ddforwarder = DdForwarder(self.checksLogger, self.agentConfig)
# All new checks should be metrics checks:
self._metrics_checks = [
Cacti(self.checksLogger),
Redis(self.checksLogger),
Varnish(self.checksLogger),
ElasticSearch(self.checksLogger),
]
self._event_checks = [Hudson(), Nagios(socket.gethostname())]
self._resources_checks = [ResProcesses(self.checksLogger,self.agentConfig)]
self._ec2 = EC2(self.checksLogger)
示例5: __init__
def __init__(self, agentConfig, emitters, systemStats, hostname):
self.emit_duration = None
self.agentConfig = agentConfig
self.hostname = hostname
# system stats is generated by config.get_system_stats
self.agentConfig['system_stats'] = systemStats
# agent config is used during checks, system_stats can be accessed through the config
self.os = get_os()
self.plugins = None
self.emitters = emitters
self.check_timings = agentConfig.get('check_timings')
self.push_times = {
'host_metadata': {
'start': time.time(),
'interval': int(agentConfig.get('metadata_interval', 4 * 60 * 60))
},
'external_host_tags': {
'start': time.time() - 3 * 60, # Wait for the checks to init
'interval': int(agentConfig.get('external_host_tags', 5 * 60))
},
'agent_checks': {
'start': time.time(),
'interval': int(agentConfig.get('agent_checks_interval', 10 * 60))
},
'processes': {
'start': time.time(),
'interval': int(agentConfig.get('processes_interval', 60))
}
}
socket.setdefaulttimeout(15)
self.run_count = 0
self.continue_running = True
self.hostname_metadata_cache = None
self.initialized_checks_d = []
self.init_failed_checks_d = {}
# Unix System Checks
self._unix_system_checks = {
'io': u.IO(log),
'load': u.Load(log),
'memory': u.Memory(log),
'processes': u.Processes(log),
'cpu': u.Cpu(log),
'system': u.System(log)
}
# Win32 System `Checks
self._win32_system_checks = {
'io': w32.IO(log),
'proc': w32.Processes(log),
'memory': w32.Memory(log),
'network': w32.Network(log),
'cpu': w32.Cpu(log),
'system': w32.System(log)
}
# Old-style metric checks
self._ganglia = Ganglia(log)
self._dogstream = Dogstreams.init(log, self.agentConfig)
self._ddforwarder = DdForwarder(log, self.agentConfig)
# Agent performance metrics check
self._agent_metrics = None
self._metrics_checks = []
# Custom metric checks
for module_spec in [s.strip() for s in self.agentConfig.get('custom_checks', '').split(',')]:
if len(module_spec) == 0:
continue
try:
self._metrics_checks.append(modules.load(module_spec, 'Check')(log))
log.info("Registered custom check %s" % module_spec)
log.warning("Old format custom checks are deprecated. They should be moved to the checks.d interface as old custom checks will be removed in a next version")
except Exception:
log.exception('Unable to load custom check module %s' % module_spec)
示例6: Collector
class Collector(object):
"""
The collector is responsible for collecting data from each check and
passing it along to the emitters, who send it to their final destination.
"""
def __init__(self, agentConfig, emitters, systemStats, hostname):
self.emit_duration = None
self.agentConfig = agentConfig
self.hostname = hostname
# system stats is generated by config.get_system_stats
self.agentConfig['system_stats'] = systemStats
# agent config is used during checks, system_stats can be accessed through the config
self.os = get_os()
self.plugins = None
self.emitters = emitters
self.check_timings = agentConfig.get('check_timings')
self.push_times = {
'host_metadata': {
'start': time.time(),
'interval': int(agentConfig.get('metadata_interval', 4 * 60 * 60))
},
'external_host_tags': {
'start': time.time() - 3 * 60, # Wait for the checks to init
'interval': int(agentConfig.get('external_host_tags', 5 * 60))
},
'agent_checks': {
'start': time.time(),
'interval': int(agentConfig.get('agent_checks_interval', 10 * 60))
},
'processes': {
'start': time.time(),
'interval': int(agentConfig.get('processes_interval', 60))
}
}
socket.setdefaulttimeout(15)
self.run_count = 0
self.continue_running = True
self.hostname_metadata_cache = None
self.initialized_checks_d = []
self.init_failed_checks_d = {}
# Unix System Checks
self._unix_system_checks = {
'io': u.IO(log),
'load': u.Load(log),
'memory': u.Memory(log),
'processes': u.Processes(log),
'cpu': u.Cpu(log),
'system': u.System(log)
}
# Win32 System `Checks
self._win32_system_checks = {
'io': w32.IO(log),
'proc': w32.Processes(log),
'memory': w32.Memory(log),
'network': w32.Network(log),
'cpu': w32.Cpu(log),
'system': w32.System(log)
}
# Old-style metric checks
self._ganglia = Ganglia(log)
self._dogstream = Dogstreams.init(log, self.agentConfig)
self._ddforwarder = DdForwarder(log, self.agentConfig)
# Agent performance metrics check
self._agent_metrics = None
self._metrics_checks = []
# Custom metric checks
for module_spec in [s.strip() for s in self.agentConfig.get('custom_checks', '').split(',')]:
if len(module_spec) == 0:
continue
try:
self._metrics_checks.append(modules.load(module_spec, 'Check')(log))
log.info("Registered custom check %s" % module_spec)
log.warning("Old format custom checks are deprecated. They should be moved to the checks.d interface as old custom checks will be removed in a next version")
except Exception:
log.exception('Unable to load custom check module %s' % module_spec)
def stop(self):
"""
Tell the collector to stop at the next logical point.
"""
# This is called when the process is being killed, so
# try to stop the collector as soon as possible.
# Most importantly, don't try to submit to the emitters
# because the forwarder is quite possibly already killed
# in which case we'll get a misleading error in the logs.
# Best to not even try.
self.continue_running = False
for check in self.initialized_checks_d:
check.stop()
@staticmethod
def _stats_for_display(raw_stats):
return pprint.pformat(raw_stats, indent=4)
#.........这里部分代码省略.........
示例7: Collector
class Collector(object):
"""
The collector is responsible for collecting data from each check and
passing it along to the emitters, who send it to their final destination.
"""
def __init__(self, agentConfig, emitters, systemStats, hostname):
self.emit_duration = None
self.agentConfig = agentConfig
self.hostname = hostname
# system stats is generated by config.get_system_stats
self.agentConfig['system_stats'] = systemStats
# agent config is used during checks, system_stats can be accessed through the config
self.os = get_os()
self.plugins = None
self.emitters = emitters
self.check_timings = agentConfig.get('check_timings')
self.push_times = {
'host_metadata': {
'start': time.time(),
'interval': int(agentConfig.get('metadata_interval', 4 * 60 * 60))
},
'external_host_tags': {
'start': time.time() - 3 * 60, # Wait for the checks to init
'interval': int(agentConfig.get('external_host_tags', 5 * 60))
},
'agent_checks': {
'start': time.time(),
'interval': int(agentConfig.get('agent_checks_interval', 10 * 60))
},
'processes': {
'start': time.time(),
'interval': int(agentConfig.get('processes_interval', 60))
}
}
socket.setdefaulttimeout(15)
self.run_count = 0
self.continue_running = True
self.hostname_metadata_cache = None
self.initialized_checks_d = []
self.init_failed_checks_d = {}
# Unix System Checks
self._unix_system_checks = {
'io': u.IO(log),
'load': u.Load(log),
'memory': u.Memory(log),
'processes': u.Processes(log),
'cpu': u.Cpu(log),
'system': u.System(log)
}
# Win32 System `Checks
self._win32_system_checks = {
'io': w32.IO(log),
'proc': w32.Processes(log),
'memory': w32.Memory(log),
'network': w32.Network(log),
'cpu': w32.Cpu(log),
'system': w32.System(log)
}
# Old-style metric checks
self._ganglia = Ganglia(log)
self._dogstream = Dogstreams.init(log, self.agentConfig)
self._ddforwarder = DdForwarder(log, self.agentConfig)
# Agent performance metrics check
self._agent_metrics = None
self._metrics_checks = []
# Custom metric checks
for module_spec in [s.strip() for s in self.agentConfig.get('custom_checks', '').split(',')]:
if len(module_spec) == 0:
continue
try:
self._metrics_checks.append(modules.load(module_spec, 'Check')(log))
log.info("Registered custom check %s" % module_spec)
log.warning("Old format custom checks are deprecated. They should be moved to the checks.d interface as old custom checks will be removed in a next version")
except Exception:
log.exception('Unable to load custom check module %s' % module_spec)
def stop(self):
"""
Tell the collector to stop at the next logical point.
"""
# This is called when the process is being killed, so
# try to stop the collector as soon as possible.
# Most importantly, don't try to submit to the emitters
# because the forwarder is quite possibly already killed
# in which case we'll get a misleading error in the logs.
# Best to not even try.
self.continue_running = False
for check in self.initialized_checks_d:
check.stop()
@staticmethod
def _stats_for_display(raw_stats):
return pprint.pformat(raw_stats, indent=4)
#.........这里部分代码省略.........
示例8: __init__
def __init__(self, agentConfig, emitters, systemStats, hostname):
self.emit_duration = None
self.agentConfig = agentConfig
self.hostname = hostname
# system stats is generated by config.get_system_stats
self.agentConfig["system_stats"] = systemStats
# agent config is used during checks, system_stats can be accessed through the config
self.os = get_os()
self.plugins = None
self.emitters = emitters
self.check_timings = agentConfig.get("check_timings")
self.push_times = {
"host_metadata": {"start": time.time(), "interval": int(agentConfig.get("metadata_interval", 4 * 60 * 60))},
"external_host_tags": {
"start": time.time() - 3 * 60, # Wait for the checks to init
"interval": int(agentConfig.get("external_host_tags", 5 * 60)),
},
"agent_checks": {"start": time.time(), "interval": int(agentConfig.get("agent_checks_interval", 10 * 60))},
"processes": {"start": time.time(), "interval": int(agentConfig.get("processes_interval", 60))},
}
socket.setdefaulttimeout(15)
self.run_count = 0
self.continue_running = True
self.hostname_metadata_cache = None
self.initialized_checks_d = []
self.init_failed_checks_d = {}
# Unix System Checks
self._unix_system_checks = {
"io": u.IO(log),
"load": u.Load(log),
"memory": u.Memory(log),
"processes": u.Processes(log),
"cpu": u.Cpu(log),
"system": u.System(log),
}
# Win32 System `Checks
self._win32_system_checks = {
"io": w32.IO(log),
"proc": w32.Processes(log),
"memory": w32.Memory(log),
"network": w32.Network(log),
"cpu": w32.Cpu(log),
"system": w32.System(log),
}
# Old-style metric checks
self._ganglia = Ganglia(log) if self.agentConfig.get("ganglia_host", "") != "" else None
self._dogstream = None if self.agentConfig.get("dogstreams") is None else Dogstreams.init(log, self.agentConfig)
# Agent performance metrics check
self._agent_metrics = None
self._metrics_checks = []
# Custom metric checks
for module_spec in [s.strip() for s in self.agentConfig.get("custom_checks", "").split(",")]:
if len(module_spec) == 0:
continue
try:
self._metrics_checks.append(modules.load(module_spec, "Check")(log))
log.info("Registered custom check %s" % module_spec)
log.warning(
"Old format custom checks are deprecated. They should be moved to the checks.d interface as old custom checks will be removed in a next version"
)
except Exception:
log.exception("Unable to load custom check module %s" % module_spec)
示例9: checks
class checks(object):
def __init__(self, agentConfig, emitters):
self.agentConfig = agentConfig
self.plugins = None
self.emitters = emitters
self.os = None
self.checksLogger = logging.getLogger('checks')
socket.setdefaulttimeout(15)
self._apache = Apache(self.checksLogger)
self._nginx = Nginx(self.checksLogger)
self._disk = Disk(self.checksLogger)
self._io = IO()
self._load = Load(self.checksLogger)
self._memory = Memory(self.checksLogger)
self._network = Network(self.checksLogger)
self._processes = Processes()
self._cpu = Cpu()
self._couchdb = CouchDb(self.checksLogger)
self._mongodb = MongoDb(self.checksLogger)
self._mysql = MySql(self.checksLogger)
self._pgsql = PostgreSql(self.checksLogger)
self._rabbitmq = RabbitMq()
self._ganglia = Ganglia(self.checksLogger)
self._cassandra = Cassandra()
self._redis = Redis(self.checksLogger)
self._jvm = Jvm(self.checksLogger)
self._tomcat = Tomcat(self.checksLogger)
self._activemq = ActiveMQ(self.checksLogger)
self._solr = Solr(self.checksLogger)
self._memcache = Memcache(self.checksLogger)
self._dogstream = Dogstreams.init(self.checksLogger, self.agentConfig)
self._ddforwarder = DdForwarder(self.checksLogger, self.agentConfig)
# All new checks should be metrics checks:
self._metrics_checks = [
Cacti(self.checksLogger),
Redis(self.checksLogger),
Varnish(self.checksLogger),
ElasticSearch(self.checksLogger),
]
self._event_checks = [Hudson(), Nagios(socket.gethostname())]
self._resources_checks = [ResProcesses(self.checksLogger,self.agentConfig)]
self._ec2 = EC2(self.checksLogger)
#
# Checks - FIXME migrating to the new Check interface is a WIP
#
@recordsize
def getApacheStatus(self):
return self._apache.check(self.agentConfig)
@recordsize
def getCouchDBStatus(self):
return self._couchdb.check(self.agentConfig)
@recordsize
def getDiskUsage(self):
return self._disk.check(self.agentConfig)
@recordsize
def getIOStats(self):
return self._io.check(self.checksLogger, self.agentConfig)
@recordsize
def getLoadAvrgs(self):
return self._load.check(self.agentConfig)
@recordsize
def getMemoryUsage(self):
return self._memory.check(self.agentConfig)
@recordsize
def getMongoDBStatus(self):
return self._mongodb.check(self.agentConfig)
@recordsize
def getMySQLStatus(self):
return self._mysql.check(self.agentConfig)
@recordsize
def getPgSQLStatus(self):
return self._pgsql.check(self.agentConfig)
@recordsize
def getNetworkTraffic(self):
return self._network.check(self.agentConfig)
@recordsize
def getNginxStatus(self):
return self._nginx.check(self.agentConfig)
@recordsize
def getProcesses(self):
return self._processes.check(self.checksLogger, self.agentConfig)
@recordsize
def getRabbitMQStatus(self):
#.........这里部分代码省略.........
示例10: Collector
class Collector(object):
"""
The collector is responsible for collecting data from each check and
passing it along to the emitters, who send it to their final destination.
"""
def __init__(self, agentConfig, emitters, systemStats, hostname):
self.emit_duration = None
self.agentConfig = agentConfig
self.hostname = hostname
# system stats is generated by config.get_system_stats
self.agentConfig["system_stats"] = systemStats
# agent config is used during checks, system_stats can be accessed through the config
self.os = get_os()
self.plugins = None
self.emitters = emitters
self.check_timings = agentConfig.get("check_timings")
self.push_times = {
"host_metadata": {"start": time.time(), "interval": int(agentConfig.get("metadata_interval", 4 * 60 * 60))},
"external_host_tags": {
"start": time.time() - 3 * 60, # Wait for the checks to init
"interval": int(agentConfig.get("external_host_tags", 5 * 60)),
},
"agent_checks": {"start": time.time(), "interval": int(agentConfig.get("agent_checks_interval", 10 * 60))},
}
socket.setdefaulttimeout(15)
self.run_count = 0
self.continue_running = True
self.hostname_metadata_cache = None
self.initialized_checks_d = []
self.init_failed_checks_d = {}
# Unix System Checks
self._unix_system_checks = {
"io": u.IO(log),
"load": u.Load(log),
"memory": u.Memory(log),
"processes": u.Processes(log),
"cpu": u.Cpu(log),
"system": common.System(log),
}
# Win32 System `Checks
self._win32_system_checks = {
"io": w32.IO(log),
"proc": w32.Processes(log),
"memory": w32.Memory(log),
"network": w32.Network(log),
"cpu": w32.Cpu(log),
"system": common.System(log),
}
# Old-style metric checks
self._ganglia = Ganglia(log)
self._dogstream = Dogstreams.init(log, self.agentConfig)
self._ddforwarder = DdForwarder(log, self.agentConfig)
# Agent performance metrics check
self._agent_metrics = None
self._metrics_checks = []
# Custom metric checks
for module_spec in [s.strip() for s in self.agentConfig.get("custom_checks", "").split(",")]:
if len(module_spec) == 0:
continue
try:
self._metrics_checks.append(modules.load(module_spec, "Check")(log))
log.info("Registered custom check %s" % module_spec)
log.warning(
"Old format custom checks are deprecated. They should be moved to the checks.d interface as old custom checks will be removed in a next version"
)
except Exception:
log.exception("Unable to load custom check module %s" % module_spec)
# Resource Checks
self._resources_checks = [ResProcesses(log, self.agentConfig)]
def stop(self):
"""
Tell the collector to stop at the next logical point.
"""
# This is called when the process is being killed, so
# try to stop the collector as soon as possible.
# Most importantly, don't try to submit to the emitters
# because the forwarder is quite possibly already killed
# in which case we'll get a misleading error in the logs.
# Best to not even try.
self.continue_running = False
for check in self.initialized_checks_d:
check.stop()
@staticmethod
def _stats_for_display(raw_stats):
return pprint.pformat(raw_stats, indent=4)
@log_exceptions(log)
def run(self, checksd=None, start_event=True, configs_reloaded=False):
"""
Collect data from each check and submit their data.
#.........这里部分代码省略.........