本文整理汇总了Python中utils.kubernetes.KubeUtil类的典型用法代码示例。如果您正苦于以下问题:Python KubeUtil类的具体用法?Python KubeUtil怎么用?Python KubeUtil使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了KubeUtil类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, agentConfig):
try:
self.config_store = get_config_store(agentConfig=agentConfig)
except Exception as e:
log.error('Failed to instantiate the config store client. '
'Auto-config only will be used. %s' % str(e))
agentConfig['sd_config_backend'] = None
self.config_store = get_config_store(agentConfig=agentConfig)
self.dockerutil = DockerUtil(config_store=self.config_store)
self.docker_client = self.dockerutil.client
if Platform.is_k8s():
try:
self.kubeutil = KubeUtil()
except Exception as ex:
self.kubeutil = None
log.error("Couldn't instantiate the kubernetes client, "
"subsequent kubernetes calls will fail as well. Error: %s" % str(ex))
if Platform.is_nomad():
self.nomadutil = NomadUtil()
elif Platform.is_ecs_instance():
self.ecsutil = ECSUtil()
self.VAR_MAPPING = {
'host': self._get_host_address,
'port': self._get_port,
'tags': self._get_additional_tags,
}
AbstractSDBackend.__init__(self, agentConfig)
示例2: KubeTestCase
class KubeTestCase(unittest.TestCase):
# Patch _locate_kubelet that is used by KubeUtil.__init__
def setUp(self):
with patch.object(KubeUtil, '_locate_kubelet', return_value='http://localhost:10255'):
self.kube = KubeUtil()
self.kube.__init__() # It's a singleton, force re-init
def tearDown(self):
self.kube = None
@classmethod
def _load_json_array(cls, names):
json_array = []
for filename in names:
path = os.path.join(os.path.dirname(__file__), 'fixtures', 'kubeutil', filename)
with open(path) as data_file:
json_array.append(json.load(data_file))
return json_array
示例3: __init__
def __init__(self, name, init_config, agentConfig, instances=None):
if instances is not None and len(instances) > 1:
raise Exception('Kubernetes check only supports one configured instance.')
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
inst = instances[0] if instances is not None else None
self.kubeutil = KubeUtil(instance=inst)
if not self.kubeutil.host:
raise Exception('Unable to retrieve Docker hostname and host parameter is not set')
示例4: init
def init(self):
try:
instance = self.instances[0]
self.docker_util = DockerUtil()
self.docker_client = self.docker_util.client
self.docker_gateway = DockerUtil.get_gateway()
if Platform.is_k8s():
self.kubeutil = KubeUtil()
# We configure the check with the right cgroup settings for this host
# Just needs to be done once
self._mountpoints = self.docker_util.get_mountpoints(CGROUP_METRICS)
self.cgroup_listing_retries = 0
self._latest_size_query = 0
self._filtered_containers = set()
self._disable_net_metrics = False
# Set tagging options
self.custom_tags = instance.get("tags", [])
self.collect_labels_as_tags = instance.get("collect_labels_as_tags", [])
self.kube_labels = {}
self.use_histogram = _is_affirmative(instance.get('use_histogram', False))
performance_tags = instance.get("performance_tags", DEFAULT_PERFORMANCE_TAGS)
self.tag_names = {
CONTAINER: instance.get("container_tags", DEFAULT_CONTAINER_TAGS),
PERFORMANCE: performance_tags,
IMAGE: instance.get('image_tags', DEFAULT_IMAGE_TAGS)
}
# Set filtering settings
if self.docker_util.filtering_enabled:
self.tag_names[FILTERED] = self.docker_util.filtered_tag_names
# Other options
self.collect_image_stats = _is_affirmative(instance.get('collect_images_stats', False))
self.collect_container_size = _is_affirmative(instance.get('collect_container_size', False))
self.collect_events = _is_affirmative(instance.get('collect_events', True))
self.collect_image_size = _is_affirmative(instance.get('collect_image_size', False))
self.collect_disk_stats = _is_affirmative(instance.get('collect_disk_stats', False))
self.collect_ecs_tags = _is_affirmative(instance.get('ecs_tags', True)) and Platform.is_ecs_instance()
self.ecs_tags = {}
except Exception as e:
self.log.critical(e)
self.warning("Initialization failed. Will retry at next iteration")
else:
self.init_success = True
示例5: __init__
def __init__(self, name, init_config, agentConfig, instances=None):
if instances is not None and len(instances) > 1:
raise Exception('Kubernetes check only supports one configured instance.')
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
inst = instances[0] if instances is not None else None
self.kubeutil = KubeUtil(instance=inst)
if not self.kubeutil.host:
raise Exception('Unable to retrieve Docker hostname and host parameter is not set')
self.k8s_namespace_regexp = None
if inst:
regexp = inst.get('namespace_name_regexp', None)
if regexp:
try:
self.k8s_namespace_regexp = re.compile(regexp)
except re.error as e:
self.log.warning('Invalid regexp for "namespace_name_regexp" in configuration (ignoring regexp): %s' % str(e))
示例6: __init__
def __init__(self, agentConfig):
try:
self.config_store = get_config_store(agentConfig=agentConfig)
except Exception as e:
log.error('Failed to instantiate the config store client. '
'Auto-config only will be used. %s' % str(e))
agentConfig['sd_config_backend'] = None
self.config_store = get_config_store(agentConfig=agentConfig)
self.docker_client = DockerUtil(config_store=self.config_store).client
if Platform.is_k8s():
self.kubeutil = KubeUtil()
self.VAR_MAPPING = {
'host': self._get_host_address,
'port': self._get_port,
'tags': self._get_additional_tags,
}
AbstractSDBackend.__init__(self, agentConfig)
示例7: __init__
def __init__(self, name, init_config, agentConfig, instances=None):
if instances is not None and len(instances) > 1:
raise Exception('Kubernetes check only supports one configured instance.')
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
inst = instances[0] if instances is not None else None
self.kubeutil = KubeUtil(instance=inst)
if not self.kubeutil.kubelet_api_url:
raise Exception('Unable to reach kubelet. Try setting the host parameter.')
if agentConfig.get('service_discovery') and \
agentConfig.get('service_discovery_backend') == 'docker':
self._sd_backend = get_sd_backend(agentConfig)
else:
self._sd_backend = None
self.k8s_namespace_regexp = None
if inst:
regexp = inst.get('namespace_name_regexp', None)
if regexp:
try:
self.k8s_namespace_regexp = re.compile(regexp)
except re.error as e:
self.log.warning('Invalid regexp for "namespace_name_regexp" in configuration (ignoring regexp): %s' % str(e))
self._collect_events = _is_affirmative(inst.get('collect_events', DEFAULT_COLLECT_EVENTS))
if self._collect_events:
self.event_retriever = self.kubeutil.get_event_retriever()
elif self.kubeutil.collect_service_tag:
# Only fetch service and pod events for service mapping
event_delay = inst.get('service_tag_update_freq', DEFAULT_SERVICE_EVENT_FREQ)
self.event_retriever = self.kubeutil.get_event_retriever(kinds=['Service', 'Pod'],
delay=event_delay)
else:
self.event_retriever = None
else:
self._collect_events = None
self.event_retriever = None
示例8: __init__
def __init__(self, agentConfig):
try:
self.config_store = get_config_store(agentConfig=agentConfig)
except Exception as e:
log.error('Failed to instantiate the config store client. '
'Auto-config only will be used. %s' % str(e))
agentConfig['sd_config_backend'] = None
self.config_store = get_config_store(agentConfig=agentConfig)
self.dockerutil = DockerUtil(config_store=self.config_store)
self.kubeutil = None
if Platform.is_k8s():
try:
self.kubeutil = KubeUtil()
except Exception as ex:
log.error("Couldn't instantiate the kubernetes client, "
"subsequent kubernetes calls will fail as well. Error: %s" % str(ex))
self.metadata_collector = MetadataCollector()
self.VAR_MAPPING = {
'host': self._get_host_address,
'pid': self._get_container_pid,
'port': self._get_port,
'container-name': self._get_container_name,
'tags': self._get_additional_tags,
}
# docker labels we'll add as tags to all instances SD configures
self.docker_labels_as_tags = agentConfig.get('docker_labels_as_tags', '')
if self.docker_labels_as_tags:
self.docker_labels_as_tags = [label.strip() for label in self.docker_labels_as_tags.split(',')]
else:
self.docker_labels_as_tags = []
AbstractSDBackend.__init__(self, agentConfig)
示例9: SDDockerBackend
class SDDockerBackend(AbstractSDBackend):
"""Docker-based service discovery"""
def __init__(self, agentConfig):
try:
self.config_store = get_config_store(agentConfig=agentConfig)
except Exception as e:
log.error('Failed to instantiate the config store client. '
'Auto-config only will be used. %s' % str(e))
agentConfig['sd_config_backend'] = None
self.config_store = get_config_store(agentConfig=agentConfig)
self.dockerutil = DockerUtil(config_store=self.config_store)
self.docker_client = self.dockerutil.client
if Platform.is_k8s():
try:
self.kubeutil = KubeUtil()
except Exception as ex:
self.kubeutil = None
log.error("Couldn't instantiate the kubernetes client, "
"subsequent kubernetes calls will fail as well. Error: %s" % str(ex))
if Platform.is_nomad():
self.nomadutil = NomadUtil()
elif Platform.is_ecs_instance():
self.ecsutil = ECSUtil()
self.VAR_MAPPING = {
'host': self._get_host_address,
'port': self._get_port,
'tags': self._get_additional_tags,
}
AbstractSDBackend.__init__(self, agentConfig)
def _make_fetch_state(self):
pod_list = []
if Platform.is_k8s():
if not self.kubeutil:
log.error("kubelet client not created, cannot retrieve pod list.")
else:
try:
pod_list = self.kubeutil.retrieve_pods_list().get('items', [])
except Exception as ex:
log.warning("Failed to retrieve pod list: %s" % str(ex))
return _SDDockerBackendConfigFetchState(self.docker_client.inspect_container, pod_list)
def update_checks(self, changed_containers):
state = self._make_fetch_state()
if Platform.is_k8s():
self.kubeutil.check_services_cache_freshness()
conf_reload_set = set()
for c_id in changed_containers:
checks = self._get_checks_to_refresh(state, c_id)
if checks:
conf_reload_set.update(set(checks))
if conf_reload_set:
self.reload_check_configs = conf_reload_set
def _get_checks_to_refresh(self, state, c_id):
"""Get the list of checks applied to a container from the identifier_to_checks cache in the config store.
Use the DATADOG_ID label or the image."""
inspect = state.inspect_container(c_id)
# If the container was removed we can't tell which check is concerned
# so we have to reload everything.
# Same thing if it's stopped and we're on Kubernetes in auto_conf mode
# because the pod was deleted and its template could have been in the annotations.
if not inspect or \
(not inspect.get('State', {}).get('Running')
and Platform.is_k8s() and not self.agentConfig.get('sd_config_backend')):
self.reload_check_configs = True
return
identifier = inspect.get('Config', {}).get('Labels', {}).get(DATADOG_ID) or \
self.dockerutil.image_name_extractor(inspect)
platform_kwargs = {}
if Platform.is_k8s():
kube_metadata = state.get_kube_config(c_id, 'metadata') or {}
platform_kwargs = {
'kube_annotations': kube_metadata.get('annotations'),
'kube_container_name': state.get_kube_container_name(c_id),
}
return self.config_store.get_checks_to_refresh(identifier, **platform_kwargs)
def _get_host_address(self, state, c_id, tpl_var):
"""Extract the container IP from a docker inspect object, or the kubelet API."""
c_inspect = state.inspect_container(c_id)
c_id, c_img = c_inspect.get('Id', ''), c_inspect.get('Config', {}).get('Image', '')
networks = c_inspect.get('NetworkSettings', {}).get('Networks') or {}
ip_dict = {}
for net_name, net_desc in networks.iteritems():
ip = net_desc.get('IPAddress')
if ip:
#.........这里部分代码省略.........
示例10: setUp
def setUp(self):
self.kubeutil = KubeUtil()
示例11: test_get_auth_token
def test_get_auth_token(self):
KubeUtil.AUTH_TOKEN_PATH = '/foo/bar'
self.assertIsNone(KubeUtil.get_auth_token())
KubeUtil.AUTH_TOKEN_PATH = Fixtures.file('events.json') # any file could do the trick
self.assertIsNotNone(KubeUtil.get_auth_token())
示例12: SDDockerBackend
class SDDockerBackend(AbstractSDBackend):
"""Docker-based service discovery"""
def __init__(self, agentConfig):
try:
self.config_store = get_config_store(agentConfig=agentConfig)
except Exception as e:
log.error('Failed to instantiate the config store client. '
'Auto-config only will be used. %s' % str(e))
agentConfig['sd_config_backend'] = None
self.config_store = get_config_store(agentConfig=agentConfig)
self.docker_client = DockerUtil(config_store=self.config_store).client
if Platform.is_k8s():
self.kubeutil = KubeUtil()
self.VAR_MAPPING = {
'host': self._get_host_address,
'port': self._get_port,
'tags': self._get_additional_tags,
}
AbstractSDBackend.__init__(self, agentConfig)
def update_checks(self, changed_containers):
conf_reload_set = set()
for id_ in changed_containers:
try:
inspect = self.docker_client.inspect_container(id_)
except (NullResource, NotFound):
# if the container was removed we can't tell which check is concerned
# so we have to reload everything
self.reload_check_configs = True
return
checks = self._get_checks_from_inspect(inspect)
conf_reload_set.update(set(checks))
if conf_reload_set:
self.reload_check_configs = conf_reload_set
def _get_checks_from_inspect(self, inspect):
"""Get the list of checks applied to a container from the identifier_to_checks cache in the config store.
Use the DATADOG_ID label or the image."""
identifier = inspect.get('Config', {}).get('Labels', {}).get(DATADOG_ID) or \
inspect.get('Config', {}).get('Image')
annotations = (self._get_kube_config(inspect.get('Id'), 'metadata') or {}).get('annotations') if Platform.is_k8s() else None
return self.config_store.get_checks_to_refresh(identifier, kube_annotations=annotations)
def _get_host_address(self, c_inspect, tpl_var):
"""Extract the container IP from a docker inspect object, or the kubelet API."""
c_id, c_img = c_inspect.get('Id', ''), c_inspect.get('Config', {}).get('Image', '')
networks = c_inspect.get('NetworkSettings', {}).get('Networks') or {}
ip_dict = {}
for net_name, net_desc in networks.iteritems():
ip = net_desc.get('IPAddress')
if ip:
ip_dict[net_name] = ip
ip_addr = self._extract_ip_from_networks(ip_dict, tpl_var)
if ip_addr:
return ip_addr
# try to get the bridge (default) IP address
log.debug("No IP address was found in container %s (%s) "
"networks, trying with the IPAddress field" % (c_id[:12], c_img))
ip_addr = c_inspect.get('NetworkSettings', {}).get('IPAddress')
if ip_addr:
return ip_addr
if Platform.is_k8s():
# kubernetes case
log.debug("Couldn't find the IP address for container %s (%s), "
"using the kubernetes way." % (c_id[:12], c_img))
pod_list = self.kubeutil.retrieve_pods_list().get('items', [])
for pod in pod_list:
pod_ip = pod.get('status', {}).get('podIP')
if pod_ip is None:
continue
else:
c_statuses = pod.get('status', {}).get('containerStatuses', [])
for status in c_statuses:
# compare the container id with those of containers in the current pod
if c_id == status.get('containerID', '').split('//')[-1]:
return pod_ip
log.error("No IP address was found for container %s (%s)" % (c_id[:12], c_img))
return None
def _extract_ip_from_networks(self, ip_dict, tpl_var):
"""Extract a single IP from a dictionary made of network names and IPs."""
if not ip_dict:
return None
tpl_parts = tpl_var.split('_', 1)
# no specifier
if len(tpl_parts) < 2:
log.warning("No key was passed for template variable %s." % tpl_var)
return self._get_fallback_ip(ip_dict)
#.........这里部分代码省略.........
示例13: TestKubeutil
class TestKubeutil(unittest.TestCase):
def setUp(self):
self.kubeutil = KubeUtil()
@mock.patch('utils.kubernetes.KubeUtil.retrieve_pods_list', side_effect=['foo'])
@mock.patch('utils.kubernetes.KubeUtil.extract_kube_labels')
def test_get_kube_labels(self, extract_kube_labels, retrieve_pods_list):
self.kubeutil.get_kube_labels(excluded_keys='bar')
retrieve_pods_list.assert_called_once()
extract_kube_labels.assert_called_once_with('foo', excluded_keys='bar')
def test_extract_kube_labels(self):
"""
Test with both 1.1 and 1.2 version payloads
"""
res = self.kubeutil.extract_kube_labels({}, ['foo'])
self.assertEqual(len(res), 0)
pods = json.loads(Fixtures.read_file("pods_list_1.1.json", string_escape=False))
res = self.kubeutil.extract_kube_labels(pods, ['foo'])
labels = set(inn for out in res.values() for inn in out)
self.assertEqual(len(labels), 8)
res = self.kubeutil.extract_kube_labels(pods, ['k8s-app'])
labels = set(inn for out in res.values() for inn in out)
self.assertEqual(len(labels), 6)
pods = json.loads(Fixtures.read_file("pods_list_1.2.json", string_escape=False))
res = self.kubeutil.extract_kube_labels(pods, ['foo'])
labels = set(inn for out in res.values() for inn in out)
self.assertEqual(len(labels), 3)
res = self.kubeutil.extract_kube_labels(pods, ['k8s-app'])
labels = set(inn for out in res.values() for inn in out)
self.assertEqual(len(labels), 3)
def test_extract_meta(self):
"""
Test with both 1.1 and 1.2 version payloads
"""
res = self.kubeutil.extract_meta({}, 'foo')
self.assertEqual(len(res), 0)
pods = json.loads(Fixtures.read_file("pods_list_1.1.json", string_escape=False))
res = self.kubeutil.extract_meta(pods, 'foo')
self.assertEqual(len(res), 0)
res = self.kubeutil.extract_meta(pods, 'uid')
self.assertEqual(len(res), 6)
pods = json.loads(Fixtures.read_file("pods_list_1.2.json", string_escape=False))
res = self.kubeutil.extract_meta(pods, 'foo')
self.assertEqual(len(res), 0)
res = self.kubeutil.extract_meta(pods, 'uid')
self.assertEqual(len(res), 4)
@mock.patch('utils.kubernetes.kubeutil.retrieve_json')
def test_retrieve_pods_list(self, retrieve_json):
self.kubeutil.retrieve_pods_list()
retrieve_json.assert_called_once_with(self.kubeutil.pods_list_url)
@mock.patch('utils.kubernetes.kubeutil.retrieve_json')
def test_retrieve_machine_info(self, retrieve_json):
self.kubeutil.retrieve_machine_info()
retrieve_json.assert_called_once_with(self.kubeutil.machine_info_url)
@mock.patch('utils.kubernetes.kubeutil.retrieve_json')
def test_retrieve_metrics(self, retrieve_json):
self.kubeutil.retrieve_metrics()
retrieve_json.assert_called_once_with(self.kubeutil.metrics_url)
def test_filter_pods_list(self):
"""
Test with both 1.1 and 1.2 version payloads
"""
res = self.kubeutil.filter_pods_list({}, 'foo')
self.assertEqual(len(res.get('items')), 0)
pods = json.loads(Fixtures.read_file("pods_list_1.1.json", string_escape=False))
res = self.kubeutil.filter_pods_list(pods, '10.240.0.9')
self.assertEqual(len(res.get('items')), 5)
pods = json.loads(Fixtures.read_file("pods_list_1.1.json", string_escape=False))
res = self.kubeutil.filter_pods_list(pods, 'foo')
self.assertEqual(len(res.get('items')), 0)
pods = json.loads(Fixtures.read_file("pods_list_1.2.json", string_escape=False))
res = self.kubeutil.filter_pods_list(pods, '10.240.0.5')
self.assertEqual(len(res.get('items')), 1)
pods = json.loads(Fixtures.read_file("pods_list_1.2.json", string_escape=False))
res = self.kubeutil.filter_pods_list(pods, 'foo')
self.assertEqual(len(res.get('items')), 0)
@mock.patch('utils.kubernetes.kubeutil.requests')
def test_retrieve_json_auth(self, r):
self.kubeutil.retrieve_json_auth('url', 'foo_tok')
r.get.assert_called_once_with('url', verify=False, timeout=10, headers={'Authorization': 'Bearer foo_tok'})
self.kubeutil.CA_CRT_PATH = __file__
self.kubeutil.retrieve_json_auth('url', 'foo_tok')
r.get.assert_called_with('url', verify=__file__, timeout=10, headers={'Authorization': 'Bearer foo_tok'})
#.........这里部分代码省略.........
示例14: init
def init(self):
try:
instance = self.instances[0]
self.docker_util = DockerUtil()
self.docker_client = self.docker_util.client
self.docker_gateway = DockerUtil.get_gateway()
self.metadata_collector = MetadataCollector()
if Platform.is_k8s():
try:
self.kubeutil = KubeUtil()
except Exception as ex:
self.kubeutil = None
self.log.error("Couldn't instantiate the kubernetes client, "
"subsequent kubernetes calls will fail as well. Error: %s" % str(ex))
# We configure the check with the right cgroup settings for this host
# Just needs to be done once
self._mountpoints = self.docker_util.get_mountpoints(CGROUP_METRICS)
self._latest_size_query = 0
self._filtered_containers = set()
self._disable_net_metrics = False
# Set tagging options
self.custom_tags = instance.get("tags", [])
self.collect_labels_as_tags = instance.get("collect_labels_as_tags", DEFAULT_LABELS_AS_TAGS)
self.kube_pod_tags = {}
self.use_histogram = _is_affirmative(instance.get('use_histogram', False))
performance_tags = instance.get("performance_tags", DEFAULT_PERFORMANCE_TAGS)
self.tag_names = {
CONTAINER: instance.get("container_tags", DEFAULT_CONTAINER_TAGS),
PERFORMANCE: performance_tags,
IMAGE: instance.get('image_tags', DEFAULT_IMAGE_TAGS)
}
# Set filtering settings
if self.docker_util.filtering_enabled:
self.tag_names[FILTERED] = self.docker_util.filtered_tag_names
# Container network mapping cache
self.network_mappings = {}
# get the health check whitelist
self.whitelist_patterns = None
health_scs_whitelist = instance.get('health_service_check_whitelist', [])
if health_scs_whitelist:
patterns, whitelist_tags = compile_filter_rules(health_scs_whitelist)
self.whitelist_patterns = set(patterns)
self.tag_names[HEALTHCHECK] = set(whitelist_tags)
# Other options
self.collect_image_stats = _is_affirmative(instance.get('collect_images_stats', False))
self.collect_container_size = _is_affirmative(instance.get('collect_container_size', False))
self.collect_container_count = _is_affirmative(instance.get('collect_container_count', False))
self.collect_volume_count = _is_affirmative(instance.get('collect_volume_count', False))
self.collect_events = _is_affirmative(instance.get('collect_events', True))
self.event_attributes_as_tags = instance.get('event_attributes_as_tags', [])
self.collect_image_size = _is_affirmative(instance.get('collect_image_size', False))
self.collect_disk_stats = _is_affirmative(instance.get('collect_disk_stats', False))
self.collect_exit_codes = _is_affirmative(instance.get('collect_exit_codes', False))
self.collect_ecs_tags = _is_affirmative(instance.get('ecs_tags', True)) and Platform.is_ecs_instance()
self.capped_metrics = instance.get('capped_metrics')
except Exception as e:
self.log.critical(e)
self.warning("Initialization failed. Will retry at next iteration")
else:
self.init_success = True
示例15: Kubernetes
class Kubernetes(AgentCheck):
""" Collect metrics and events from kubelet """
pod_names_by_container = {}
def __init__(self, name, init_config, agentConfig, instances=None):
if instances is not None and len(instances) > 1:
raise Exception('Kubernetes check only supports one configured instance.')
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
inst = instances[0] if instances is not None else None
self.kubeutil = KubeUtil(instance=inst)
if not self.kubeutil.host:
raise Exception('Unable to retrieve Docker hostname and host parameter is not set')
self.k8s_namespace_regexp = None
if inst:
regexp = inst.get('namespace_name_regexp', None)
if regexp:
try:
self.k8s_namespace_regexp = re.compile(regexp)
except re.error as e:
self.log.warning('Invalid regexp for "namespace_name_regexp" in configuration (ignoring regexp): %s' % str(e))
def _perform_kubelet_checks(self, url):
service_check_base = NAMESPACE + '.kubelet.check'
is_ok = True
try:
r = requests.get(url, params={'verbose': True})
for line in r.iter_lines():
# avoid noise; this check is expected to fail since we override the container hostname
if line.find('hostname') != -1:
continue
matches = re.match('\[(.)\]([^\s]+) (.*)?', line)
if not matches or len(matches.groups()) < 2:
continue
service_check_name = service_check_base + '.' + matches.group(2)
status = matches.group(1)
if status == '+':
self.service_check(service_check_name, AgentCheck.OK)
else:
self.service_check(service_check_name, AgentCheck.CRITICAL)
is_ok = False
except Exception as e:
self.log.warning('kubelet check %s failed: %s' % (url, str(e)))
self.service_check(service_check_base, AgentCheck.CRITICAL,
message='Kubelet check %s failed: %s' % (url, str(e)))
else:
if is_ok:
self.service_check(service_check_base, AgentCheck.OK)
else:
self.service_check(service_check_base, AgentCheck.CRITICAL)
def check(self, instance):
self.max_depth = instance.get('max_depth', DEFAULT_MAX_DEPTH)
enabled_gauges = instance.get('enabled_gauges', DEFAULT_ENABLED_GAUGES)
self.enabled_gauges = ["{0}.{1}".format(NAMESPACE, x) for x in enabled_gauges]
enabled_rates = instance.get('enabled_rates', DEFAULT_ENABLED_RATES)
self.enabled_rates = ["{0}.{1}".format(NAMESPACE, x) for x in enabled_rates]
self.publish_aliases = _is_affirmative(instance.get('publish_aliases', DEFAULT_PUBLISH_ALIASES))
self.use_histogram = _is_affirmative(instance.get('use_histogram', DEFAULT_USE_HISTOGRAM))
self.publish_rate = FUNC_MAP[RATE][self.use_histogram]
self.publish_gauge = FUNC_MAP[GAUGE][self.use_histogram]
# initialized by _filter_containers
self._filtered_containers = set()
pods_list = self.kubeutil.retrieve_pods_list()
# kubelet health checks
self._perform_kubelet_checks(self.kubeutil.kube_health_url)
# kubelet metrics
self._update_metrics(instance, pods_list)
# kubelet events
if _is_affirmative(instance.get('collect_events', DEFAULT_COLLECT_EVENTS)):
try:
self._process_events(instance, pods_list)
except Exception as ex:
self.log.error("Event collection failed: %s" % str(ex))
def _publish_raw_metrics(self, metric, dat, tags, depth=0):
if depth >= self.max_depth:
self.log.warning('Reached max depth on metric=%s' % metric)
return
if isinstance(dat, numbers.Number):
if self.enabled_rates and any([fnmatch(metric, pat) for pat in self.enabled_rates]):
self.publish_rate(self, metric, float(dat), tags)
elif self.enabled_gauges and any([fnmatch(metric, pat) for pat in self.enabled_gauges]):
self.publish_gauge(self, metric, float(dat), tags)
elif isinstance(dat, dict):
#.........这里部分代码省略.........