本文整理汇总了Python中cm_api.api_client.ApiResource.get_host方法的典型用法代码示例。如果您正苦于以下问题:Python ApiResource.get_host方法的具体用法?Python ApiResource.get_host怎么用?Python ApiResource.get_host使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cm_api.api_client.ApiResource
的用法示例。
在下文中一共展示了ApiResource.get_host方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ImpalaCluster
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_host [as 别名]
class ImpalaCluster(object):
def __init__(self, cm_host, cm_cluster_name, username, password):
self.cm_api = ApiResource(cm_host, username=username, password=password)
self.hosts = dict()
self.services = list()
self.cluster = self.cm_api.get_cluster(cm_cluster_name)
if self.cluster is None:
raise RuntimeError, 'Cluster name "%s" not found' % cm_cluster_name
self.__load_hosts()
self.__impala_service = ImpalaService(self)
def _get_all_services(self):
return self.cluster.get_all_services()
def get_impala_service(self):
return self.__impala_service
def __load_hosts(self):
self.hosts = dict()
# Search for all hosts that are in the target cluster.
# There is no API that provides the list of host in a given cluster, so to find them
# we must loop through all the hosts and check the cluster name matches.
for host_info in self.cm_api.get_all_hosts():
# host_info doesn't include a link to the roleRef so need to do another lookup
# based on the hostId.
host = self.cm_api.get_host(host_info.hostId)
for roleRef.get('clusterName') == self.cluster_name:
self.hosts[host_info.hostId] = Host(host)
break
示例2: get_cluster_info
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_host [as 别名]
def get_cluster_info(manager_host, server_port=7180, username='admin',
password='admin'):
cm_api = ApiResource(manager_host, username=username, password=password,
server_port=server_port, version=9)
host = list(cm_api.get_all_hosts())[0] # all hosts same instance type
cluster = list(cm_api.get_all_clusters())[0]
yarn = filter(lambda x: x.type == 'YARN',
list(cluster.get_all_services()))[0]
hive = filter(lambda x: x.type == 'HIVE',
list(cluster.get_all_services()))[0]
impala = filter(lambda x: x.type == 'IMPALA',
list(cluster.get_all_services()))[0]
hive_hs2 = hive.get_roles_by_type('HIVESERVER2')[0]
hive_host = cm_api.get_host(hive_hs2.hostRef.hostId).hostname
hive_port = int(
hive_hs2.get_config('full')['hs2_thrift_address_port'].default)
impala_hs2 = impala.get_roles_by_type('IMPALAD')[0]
impala_host = cm_api.get_host(impala_hs2.hostRef.hostId).hostname
impala_port = int(impala_hs2.get_config('full')['hs2_port'].default)
return {'num_worker_nodes': len(yarn.get_roles_by_type('NODEMANAGER')),
'node_cores': host.numCores, 'node_memory': host.totalPhysMemBytes,
'hive_host': hive_host, 'hive_port': hive_port,
'impala_host': impala_host, 'impala_port': impala_port}
示例3: CmCluster
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_host [as 别名]
#.........这里部分代码省略.........
if cluster_name:
clusters_by_name = dict((c.name, c) for c in clusters)
if cluster_name not in clusters_by_name:
raise Exception(("No clusters named %s found in CM at %s."
"Available clusters are %s.")
% (cluster_name, host_name, ", ".join(sorted(clusters_by_name.keys()))))
self.cm_cluster = clusters_by_name[cluster_name]
else:
if len(clusters) > 1:
raise Exception(("Too many clusters found in CM at %s;"
" a cluster name must be provided")
% host_name)
self.cm_cluster = clusters[-1]
self.ssh_user = ssh_user
self.ssh_port = ssh_port
self.ssh_key_file = ssh_key_file
self._ssh_client_lock = Lock()
self._ssh_clients_by_host_name = defaultdict(list)
def shell(self, cmd, host_name, timeout_secs=DEFAULT_TIMEOUT):
with self._ssh_client(host_name) as client:
return client.shell(cmd, timeout_secs=timeout_secs)
@contextmanager
def _ssh_client(self, host_name):
"""Returns an SSH client for use in a 'with' block. When the 'with' context exits,
the client will be kept for reuse.
"""
with self._ssh_client_lock:
clients = self._ssh_clients_by_host_name[host_name]
if clients:
client = clients.pop()
else:
# IMPALA-7460: Insulate this import away from the global context so as to avoid
# requiring Paramiko unless it's absolutely needed.
from tests.util.ssh_util import SshClient
LOG.debug("Creating new SSH client for %s", host_name)
client = SshClient()
client.connect(host_name, username=self.ssh_user, key_filename=self.ssh_key_file)
error_occurred = False
try:
yield client
except Exception:
error_occurred = True
raise
finally:
if not error_occurred:
with self._ssh_client_lock:
self._ssh_clients_by_host_name[host_name].append(client)
def _init_local_hadoop_conf_dir(self):
self._local_hadoop_conf_dir = mkdtemp()
data = StringIO(self.cm.get("/clusters/%s/services/%s/clientConfig"
% (self.cm_cluster.name, self._find_service("HIVE").name)))
zip_file = ZipFile(data)
for name in zip_file.namelist():
if name.endswith("/"):
continue
extract_path = os.path.join(self._local_hadoop_conf_dir, os.path.basename(name))
with open(extract_path, "w") as conf_file:
conf_file.write(zip_file.open(name).read())
def _find_service(self, service_type):
"""Find a service by its CM API service type. An exception will be raised if no
service is found or multiple services are found. See the CM API documentation for
more details about the service type.
"""
services = [s for s in self.cm_cluster.get_all_services() if s.type == service_type]
if not services:
raise Exception("No service of type %s found in cluster %s"
% (service_type, self.cm_cluster.name))
if len(services) > 1:
raise Exception("Found %s services in cluster %s; only one is expected."
% len(services, self.cm_cluster.name))
return services[0]
def _find_role(self, role_type, service_type):
"""Find a role by its CM API role and service type. An exception will be raised if
no roles are found. See the CM API documentation for more details about the
service and role types.
"""
service = self._find_service(service_type)
roles = service.get_roles_by_type(role_type)
if not roles:
raise Exception("No roles of type %s found in service %s"
% (role_type, service.name))
return roles[0]
def _init_hdfs(self):
self._hdfs = Hdfs(self, "hdfs")
def _init_hive(self):
hs2 = self._find_role("HIVESERVER2", "HIVE")
host = self.cm.get_host(hs2.hostRef.hostId)
config = hs2.get_config(view="full")["hs2_thrift_address_port"]
self._hive = Hive(self, str(host.hostname), int(config.value or config.default))
def _init_impala(self):
self._impala = CmImpala(self, self._find_service("IMPALA"))
示例4: ApiResource
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_host [as 别名]
cm_host = "localhost"
api = ApiResource(cm_host, username="cloudera", password="cloudera")
print "*** CLUSTERS ***"
clusters = None
# List clusters
for c in api.get_all_clusters():
print "Cluster \"%s\" is version %s" % (c.name, c.version)
clusters = c
print "*** HOSTS ***"
for host_ref in c.list_hosts():
host = api.get_host(host_ref.hostId)
print host.hostname
print "*** SERVICES ***"
hdfs = None
# List services & health info
for s in clusters.get_all_services():
print "Service \"%s\" -- state \"%s\" -- health \"%s\"" %(s.name, s.serviceState, s.healthSummary)
# Get HDFS service
if 'hdfs' in s.type.lower():
hdfs = s
print "*** HDFS Service checks (" + hdfs.serviceUrl + ") ***"
print "*** ROLES FOR HDFS ***"
示例5: RemoteDataLoad
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_host [as 别名]
class RemoteDataLoad(object):
"""This is an implementation of the process to load a test-warehouse snapshot on
a remote CM managed cluster. This script assumes that the warehouse snapshot was
already downloaded and was either passed in as a parameter, or can be found by
either inspecting the SNAPSHOT_DIR environment variable, or based on the WORKSPACE
environment variable on a Jenkins build slave.
The reason for the additional setup code is that in the local development
environment it is assumed that $USER is HDFS superuser, which is not the case for
remote deloyments.
"""
def __init__(self, cm_host, options):
logger.info("Starting remote data load...")
self.options = options
self.cm_host = cm_host
# Gateway host can be used if the CM host is not configured as a Hadoop gateway
self.gateway = options.gateway if options.gateway else cm_host
self.impala_home = os.environ["IMPALA_HOME"]
self.api = ApiResource(self.cm_host, username=options.cm_user,
password=options.cm_pass)
# The API returns a list of clusters managed by the CM host. We're assuming
# that this CM host was set up for the purpose of Impala testing on one
# cluster, so the list should only have one value.
self.cluster = self.api.get_all_clusters()[0]
self.services = self.get_services()
self.config = self.get_service_client_configurations()
logger.info("Retrieved service configuration")
logger.info(str(self.config))
self.prepare()
logger.info("IMPALA_HOME: {0}".format(self.impala_home))
def get_hostname_for_ref(self, host_ref):
"""Translate the HostRef instance into the hostname."""
return self.api.get_host(host_ref.hostId).hostname
@staticmethod
def get_or_default(config):
return config.value if config.value else config.default
def get_services(self):
"""Confirm that all services are running, and return service dict."""
services = dict((s.type, s) for s in self.cluster.get_all_services())
if set(REQUIRED_SERVICES) != set(services.keys()):
missing_services = set(REQUIRED_SERVICES) - set(services.keys())
logger.error("Services not installed: {0}".format(list(missing_services)))
raise RuntimeError("Cluster not ready.")
if not all(services[s].serviceState == 'STARTED' for s in services):
stopped = [s for s in services if services[s].serviceState != "STARTED"]
logger.error("Not all services started: {0}".format(stopped))
raise RuntimeError("Cluster not ready.")
return services
@timing
def download_client_config(self, cluster, service):
"""Download the client configuration zip for a particular cluster and service.
Since cm_api does not provide a way to download the archive we build the URL
manually and download the file. Once it downloaded the file the archive is
extracted and its content is copied to the Hadoop configuration directories
defined by Impala.
"""
logger.info("Downloading client configuration for {0}".format(service.name))
url = "http://{0}:7180/api/{1}/clusters/{2}/services/{3}/clientConfig".format(
self.cm_host, CM_API_VERSION, urlquote(cluster.name), urlquote(service.name))
path = mkdtemp()
sh.curl(url, o=os.path.join(path, "clientConfig.zip"), _out=tee, _err=tee)
current = os.getcwd()
os.chdir(path)
sh.unzip("clientConfig.zip")
for root, _, file_names in os.walk("."):
for filename in fnmatch.filter(file_names, "*.xml"):
src = os.path.join(root, filename)
dst = os.path.join(self.impala_home, "fe", "src", "test", "resources")
logger.debug("Copying {0} to {1}".format(src, dst))
shutil.copy(src, dst)
os.chdir(current)
# TODO: this may be available in tests/comparison/cluster.py
def set_hive_warehouse_dir(self, cluster, service):
logger.info("Setting the Hive Warehouse Dir")
for service in self.api.get_all_clusters()[0].get_all_services():
logger.info(service)
if service.type == "HIVE":
hive_config = { "hive_warehouse_directory" : HIVE_WAREHOUSE_DIR }
service.update_config(hive_config)
# TODO: This functionality should be more generally available to other infrastructure
# code, rather than being quarantined in this script. See IMPALA-4367.
@timing
def get_service_client_configurations(self):
"""Download the client configurations necessary to upload data to the remote
cluster. Unfortunately, the CM API does not allow downloading it so we have to
iterate over the services and download the config for all of them.
#.........这里部分代码省略.........
示例6: main
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_host [as 别名]
def main():
module = AnsibleModule(argument_spec=dict((argument, {'type': 'str'}) for argument in MODULE_ARGUMENTS))
api = ApiResource('localhost', username=ADMIN_USER, password=ADMIN_PASS, version=10)
cluster_name = CLUSTER_NAME
manager = api.get_cloudera_manager()
action_a = module.params.get('action', None)
if action_a == 'create_cluster':
license_a = module.params.get('license', None)
version_a = module.params.get('version', None)
cluster_list = [x.name for x in api.get_all_clusters()]
if cluster_name in cluster_list:
module.exit_json(changed=False, msg='Cluster exists')
else:
cluster = api.create_cluster(CLUSTER_NAME, fullVersion=version_a)
if license_a == None:
manager.begin_trial()
else:
manager.update_license(license_a.decode('base64'))
module.exit_json(changed=True, msg='Cluster created')
elif action_a in ['add_host', 'create_mgmt', 'deploy_parcel', 'deploy_hdfs_base', 'deploy_hdfs_httpfs', 'deploy_hdfs_dn', 'deploy_hdfs_ha', 'deploy_rm_ha', 'set_config', 'service', 'deploy_service', 'deploy_service_worker_nodes', 'deploy_base_roles', 'run_command', 'cluster','create_snapshot_policy']:
# more complicated actions that need a created cluster go here
cluster = api.get_cluster(cluster_name)
host_map = dict((api.get_host(x.hostId).hostname, x.hostId) for x in cluster.list_hosts())
# adds a host to the cluster
# host_name should be in the internal DNS format, ip-xx-xx-xx.copute.internal
if action_a == 'add_host':
host_a = module.params.get('host', None)
host_list = host_map.keys()
if host_a in host_list:
module.exit_json(changed=False, msg='Host already in cluster')
else:
try:
cluster.add_hosts([host_a])
except ApiException:
# if a host isn't there, it could be because the agent didn't manage to connect yet
# so let's wait a moment for it
sleep(120)
cluster.add_hosts([host_a])
module.exit_json(changed=True, msg='Host added')
# create management service and set it's basic configuration
# this needs a separate function since management is handled
# differently than the rest of services
elif action_a == 'create_mgmt':
host_a = module.params.get('host', None)
# getting the management service is the only way to check if mgmt exists
# an exception means there isn't one
try:
mgmt = manager.get_service()
module.exit_json(changed=False, msg='Mgmt service already exists')
except ApiException:
pass
mgmt = manager.create_mgmt_service(ApiServiceSetupInfo())
# this is ugly... and I see no good way to unuglify it
firehose_passwd = Popen("sudo grep com.cloudera.cmf.ACTIVITYMONITOR.db.password /etc/cloudera-scm-server/db.mgmt.properties | awk -F'=' '{print $2}'", shell=True, stdout=PIPE).stdout.read().rstrip("\n")
reports_passwd = Popen("sudo grep com.cloudera.cmf.REPORTSMANAGER.db.password /etc/cloudera-scm-server/db.mgmt.properties | awk -F'=' '{print $2}'", shell=True, stdout=PIPE).stdout.read().rstrip("\n")
# since there is no easy way of configuring the manager... let's do it here :(
role_conf = defaultdict(dict)
role_conf['ACTIVITYMONITOR'] = {
'firehose_database_host': '{0}:7432'.format(host_a),
'firehose_database_user': 'amon',
'firehose_database_password': firehose_passwd,
'firehose_database_type': 'postgresql',
'firehose_database_name': 'amon',
'firehose_heapsize': '268435456',
}
role_conf['EVENTSERVER'] = {
'event_server_heapsize': '215964392'
}
role_conf['REPORTSMANAGER'] = {
'headlamp_database_host': '{0}:7432'.format(host_a),
'headlamp_database_user': 'rman',
'headlamp_database_password': reports_passwd,
'headlamp_database_type': 'postgresql',
'headlamp_database_name': 'rman',
'headlamp_heapsize': '215964392',
}
roles = ['ACTIVITYMONITOR', 'ALERTPUBLISHER', 'EVENTSERVER', 'HOSTMONITOR', 'SERVICEMONITOR', 'REPORTSMANAGER']
# create mangement roles
for role in roles:
mgmt.create_role('{0}-1'.format(role), role, host_map[host_a])
# update configuration of each
for group in mgmt.get_all_role_config_groups():
group.update_config(role_conf[group.roleType])
mgmt.start().wait()
#.........这里部分代码省略.........
示例7: api_data_collection
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_host [as 别名]
def api_data_collection(request):
"""
Application information collection restful api. Query completed application information on specific conditions and accumulate it.
@method: GET
@param from_time: Application finish time after this time. format : "%d/%m/%Y %H:%M". time zone=UTC+8
@param end_time: Application finish time before this time. format : "%d/%m/%Y %H:%M". time zone=UTC+8
@param queue_name: Query completed application information on specific queue name.
@param app_type: Query completed application information on specific application type.
@param app_state: Query completed application information on specific application states. specified as a comma-separated list. ex: FINISHED,FAILED,KILLED
@return: json data
{ "success":False, "message":"error message" }
{ "success":True, "message": { "queue_view":{...}, "group_view":{...} } }
"""
if request.method == "GET":
response = {'success':False, 'message':''}
filter_dict = {}
if "queue_name" in request.GET:
filter_dict['queue_name'] = request.GET.get('queue_name')
if "app_type" in request.GET:
filter_dict['app_type'] = request.GET.get('app_type')
if "app_state" in request.GET:
filter_dict['app_state'] = request.GET.get('app_state').split(',')
#
# time zone = Asia/Taipei = UTC+8
from_time = datetime.strptime(request.GET.get('from_time'), "%d/%m/%Y %H:%M") - timedelta(hours=8)
to_time = datetime.strptime(request.GET.get('end_time'), "%d/%m/%Y %H:%M") - timedelta(hours=8)
#
# get config
config = ConfigParser.ConfigParser()
config.read( os.path.join(settings.BASE_DIR, "cluster.ini") )
cm_host = config.get("CM", "cm.host")
cm_port = config.get("CM", "cm.port")
cm_version = config.get("CM", "cm.version")
cm_username = config.get("CM", "cm.username")
cm_password = config.get("CM", "cm.password")
#
cluster_name = config.get("Cluster", "cluster.name")
yarn_name = config.get("Cluster", "cluster.yarn.name")
#
ldap_host = config.get("Ldap", "ldap.host")
ldap_username = config.get("Ldap", "ldap.username")
ldap_password = config.get("Ldap", "ldap.password")
#
# get active resource manager info
try:
cm_api = ApiResource( cm_host, int(cm_port), username=cm_username, password=cm_password, version=int(cm_version) )
cm_cluster_obj = cm_api.get_cluster(name=cluster_name)
cm_yarn_obj = cm_cluster_obj.get_service(name=yarn_name)
#
find_active_rm = False
for rm in cm_yarn_obj.get_roles_by_type(role_type="RESOURCEMANAGER"):
if rm.haStatus == "ACTIVE":
host = cm_api.get_host(rm.hostRef.hostId)
active_rm_ip = host.ipAddress
active_rm_port = 8088
find_active_rm = True
#
if not find_active_rm:
message = "can not find active rm"
print( "[ERROR] " + message )
response['success'] = False
response['message'] = message
return HttpResponse( json.dumps(response) )
except Exception, e:
message = "can not get cm yarn object"
print( "[ERROR] " + message + str(e) )
response['success'] = False
response['message'] = message
return HttpResponse( json.dumps(response) )
#
# all application statistics
statistics_response = applications_statistics(active_rm_ip, active_rm_port, from_time, to_time, filter_dict)
if statistics_response['success']:
#
# create ldap connection. access ldap to get group of account
if create_ldap_connection(ldap_host, ldap_username, ldap_password):
ldap_connection = create_ldap_connection(ldap_host, ldap_username, ldap_password)
else:
message = "can not connect to ldap://" + ldap_host
response['success'] = False
response['message'] = message
return HttpResponse( json.dumps(response) )
#
# init queue view result & group view result
queue_view_final_result = statistics_response['message']
group_view_final_result = {}
#
#
# add group information to queue view result and accumulate the result by group
for queue, queue_info in queue_view_final_result.items():
#
queue_view_final_result[queue]['group'] = ''
# queue naming : root.SYSTEM.<account> , root.PERSONAL.<account>
m = re.match(r"(?P<root>\w+)\.(?P<second>\w+)\.(?P<third>\w+)", queue)
if m and m.group('root') == 'root' and ( m.group('second') == 'SYSTEM' or m.group('second') == 'PERSONAL' ):
queue_view_final_result[queue]['account'] = m.group('third')
group_query_result = query_group_of_user(ldap_connection, queue_view_final_result[queue]['account'])
group = group_query_result['group']
project_name = group_query_result['name']
queue_view_final_result[queue]['group'] = group
#.........这里部分代码省略.........
示例8: ClouderaManager
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_host [as 别名]
class ClouderaManager(object):
"""
The complete orchestration of a cluster from start to finish assuming all the hosts are
configured and Cloudera Manager is installed with all the required databases setup.
Handle all the steps required in creating a cluster. All the functions are built to function
idempotently. So you should be able to resume from any failed step but running thru the
__class__.setup()
"""
def __init__(self, module, config, trial=False, license_txt=None):
self.api = ApiResource(config['cm']['host'], username=config['cm']['username'],
password=config['cm']['password'])
self.manager = self.api.get_cloudera_manager()
self.config = config
self.module = module
self.trial = trial
self.license_txt = license_txt
self.cluster = None
def enable_license(self):
"""
Enable the requested license, either it's trial mode or a full license is entered and
registered.
"""
try:
_license = self.manager.get_license()
except ApiException:
print_json(type="LICENSE", msg="Enabling license")
if self.trial:
self.manager.begin_trial()
else:
if license_txt is not None:
self.manager.update_license(license_txt)
else:
fail(self.module, 'License should be provided or trial should be specified')
try:
_license = self.manager.get_license()
except ApiException:
fail(self.module, 'Failed enabling license')
print_json(type="LICENSE",
msg="Owner: {}, UUID: {}".format(_license.owner, _license.uuid))
def create_cluster(self):
"""
Create a cluster and add hosts to the cluster. A new cluster is only created
if another one doesn't exist with the same name.
"""
print_json(type="CLUSTER", msg="Creating cluster")
cluster_config = self.config['cluster']
try:
self.cluster = self.api.get_cluster(cluster_config['name'])
except ApiException:
print_json(type="CLUSTER",
msg="Creating Cluster entity: {}".format(cluster_config['name']))
self.cluster = self.api.create_cluster(cluster_config['name'],
cluster_config['version'],
cluster_config['fullVersion'])
cluster_hosts = [self.api.get_host(host.hostId).hostname
for host in self.cluster.list_hosts()]
hosts = []
for host in cluster_config['hosts']:
if host not in cluster_hosts:
hosts.append(host)
self.cluster.add_hosts(hosts)
def activate_parcels(self):
print_json(type="PARCELS", msg="Setting up parcels")
for parcel_cfg in self.config['parcels']:
parcel = Parcels(self.module, self.manager, self.cluster,
parcel_cfg.get('version'), parcel_cfg.get('repo'),
parcel_cfg.get('product', 'CDH'))
parcel.download()
parcel.distribute()
parcel.activate()
@retry(attempts=20, delay=5)
def wait_inspect_hosts(self, cmd):
"""
Inspect all the hosts. Basically wait till the check completes on all hosts.
:param cmd: A command instance used for tracking the status of the command
"""
print_json(type="HOSTS", msg="Inspecting hosts")
cmd = cmd.fetch()
if cmd.success is None:
raise ApiException("Waiting on command {} to finish".format(cmd))
elif not cmd.success:
if (cmd.resultMessage is not None and
'is not currently available for execution' in cmd.resultMessage):
raise ApiException('Retry Command')
fail(self.module, 'Host inspection failed')
print_json(type="HOSTS", msg="Host inspection completed: {}".format(cmd.resultMessage))
def deploy_mgmt_services(self):
"""
Configure, deploy and start all the Cloudera Management Services.
"""
#.........这里部分代码省略.........