本文整理汇总了Python中cm_api.api_client.ApiResource.get_all_hosts方法的典型用法代码示例。如果您正苦于以下问题:Python ApiResource.get_all_hosts方法的具体用法?Python ApiResource.get_all_hosts怎么用?Python ApiResource.get_all_hosts使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cm_api.api_client.ApiResource
的用法示例。
在下文中一共展示了ApiResource.get_all_hosts方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ImpalaCluster
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_all_hosts [as 别名]
class ImpalaCluster(object):
def __init__(self, cm_host, cm_cluster_name, username, password):
self.cm_api = ApiResource(cm_host, username=username, password=password)
self.hosts = dict()
self.services = list()
self.cluster = self.cm_api.get_cluster(cm_cluster_name)
if self.cluster is None:
raise RuntimeError, 'Cluster name "%s" not found' % cm_cluster_name
self.__load_hosts()
self.__impala_service = ImpalaService(self)
def _get_all_services(self):
return self.cluster.get_all_services()
def get_impala_service(self):
return self.__impala_service
def __load_hosts(self):
self.hosts = dict()
# Search for all hosts that are in the target cluster.
# There is no API that provides the list of host in a given cluster, so to find them
# we must loop through all the hosts and check the cluster name matches.
for host_info in self.cm_api.get_all_hosts():
# host_info doesn't include a link to the roleRef so need to do another lookup
# based on the hostId.
host = self.cm_api.get_host(host_info.hostId)
for roleRef.get('clusterName') == self.cluster_name:
self.hosts[host_info.hostId] = Host(host)
break
示例2: adjust_yarn_memory_limits
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_all_hosts [as 别名]
def adjust_yarn_memory_limits(region, stack_name, restart=True):
ec2_conn = create_ec2_connection(region)
manager_instance = get_manager_instance(ec2_conn, stack_name)
with cm_tunnel_ctx(manager_instance) as local_port:
cm_api = ApiResource('localhost', username='admin', password='admin',
server_port=local_port, version=9)
cluster = list(cm_api.get_all_clusters())[0]
host = list(cm_api.get_all_hosts())[0] # all hosts same instance type
yarn = filter(lambda x: x.type == 'YARN',
list(cluster.get_all_services()))[0]
rm_cg = filter(lambda x: x.roleType == 'RESOURCEMANAGER',
list(yarn.get_all_role_config_groups()))[0]
nm_cg = filter(lambda x: x.roleType == 'NODEMANAGER',
list(yarn.get_all_role_config_groups()))[0]
rm_cg.update_config({
'yarn_scheduler_maximum_allocation_mb': (
int(host.totalPhysMemBytes / 1024. / 1024.)),
'yarn_scheduler_maximum_allocation_vcores': host.numCores})
nm_cg.update_config({
'yarn_nodemanager_resource_memory_mb': (
int(host.totalPhysMemBytes / 1024. / 1024.)),
'yarn_nodemanager_resource_cpu_vcores': host.numCores})
cluster.deploy_client_config().wait()
if restart:
cluster.restart().wait()
示例3: list_hosts
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_all_hosts [as 别名]
def list_hosts(host, username, password, cafile):
context = ssl.create_default_context(cafile=cafile)
api = ApiResource(host, username=username, password=password, use_tls=True,
ssl_context=context)
for h in api.get_all_hosts():
print h.hostname
示例4: get_cluster_specs
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_all_hosts [as 别名]
def get_cluster_specs():
cm_api = ApiResource(os.environ['MANAGER_HOST'], username='admin',
password='admin', server_port=7180, version=9)
host = list(cm_api.get_all_hosts())[0] # all hosts same instance type
cluster = list(cm_api.get_all_clusters())[0]
yarn = filter(lambda x: x.type == 'YARN',
list(cluster.get_all_services()))[0]
return {'num_worker_nodes': len(yarn.get_roles_by_type('NODEMANAGER')),
'num_cores': host.numCores, 'node_memory': host.totalPhysMemBytes}
示例5: get_cluster_info
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_all_hosts [as 别名]
def get_cluster_info(manager_host, server_port=7180, username='admin',
password='admin'):
cm_api = ApiResource(manager_host, username=username, password=password,
server_port=server_port, version=9)
host = list(cm_api.get_all_hosts())[0] # all hosts same instance type
cluster = list(cm_api.get_all_clusters())[0]
yarn = filter(lambda x: x.type == 'YARN',
list(cluster.get_all_services()))[0]
hive = filter(lambda x: x.type == 'HIVE',
list(cluster.get_all_services()))[0]
impala = filter(lambda x: x.type == 'IMPALA',
list(cluster.get_all_services()))[0]
hive_hs2 = hive.get_roles_by_type('HIVESERVER2')[0]
hive_host = cm_api.get_host(hive_hs2.hostRef.hostId).hostname
hive_port = int(
hive_hs2.get_config('full')['hs2_thrift_address_port'].default)
impala_hs2 = impala.get_roles_by_type('IMPALAD')[0]
impala_host = cm_api.get_host(impala_hs2.hostRef.hostId).hostname
impala_port = int(impala_hs2.get_config('full')['hs2_port'].default)
return {'num_worker_nodes': len(yarn.get_roles_by_type('NODEMANAGER')),
'node_cores': host.numCores, 'node_memory': host.totalPhysMemBytes,
'hive_host': hive_host, 'hive_port': hive_port,
'impala_host': impala_host, 'impala_port': impala_port}
示例6: adjust_yarn_memory_limits
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_all_hosts [as 别名]
def adjust_yarn_memory_limits(region, stack_name):
ec2_conn = create_ec2_connection(region)
manager_instance = get_manager_instance(ec2_conn, stack_name)
cm_api = ApiResource("localhost", username="admin", password="admin", server_port=64999, version=9)
with http_tunnel_ctx(manager_instance, 7180, 64999):
cluster = list(cm_api.get_all_clusters())[0]
host = list(cm_api.get_all_hosts())[0] # all hosts same instance type
yarn = filter(lambda x: x.type == "YARN", list(cluster.get_all_services()))[0]
rm_cg = filter(lambda x: x.roleType == "RESOURCEMANAGER", list(yarn.get_all_role_config_groups()))[0]
nm_cg = filter(lambda x: x.roleType == "NODEMANAGER", list(yarn.get_all_role_config_groups()))[0]
rm_cg.update_config(
{
"yarn_scheduler_maximum_allocation_mb": (int(host.totalPhysMemBytes / 1024.0 / 1024.0)),
"yarn_scheduler_maximum_allocation_vcores": host.numCores,
}
)
nm_cg.update_config(
{
"yarn_nodemanager_resource_memory_mb": (int(host.totalPhysMemBytes / 1024.0 / 1024.0)),
"yarn_nodemanager_resource_cpu_vcores": host.numCores,
}
)
cluster.deploy_client_config().wait()
cluster.restart().wait()
示例7: create_cluster
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_all_hosts [as 别名]
def create_cluster(config_dict):
config.read(['./conf/hadrian.ini','./conf/cluster_specs.ini', './conf/cloudera-manager/cm.ini'])
cm_cluster_name = config_grabber("Globals")['cm.cluster.name']
cm_username = config_grabber("Globals")['cm.username']
cm_password = config_grabber("Globals")['cm.password']
cm_port = config_grabber("Globals")['cm.port']
version = config_grabber('Globals')['cdh.cluster.version']
cm_server = config_grabber(cm_cluster_name + '-en')['cm.server']
#Grab all configuration files in the directory with the CM Cluster Name.
for i in os.listdir('./conf/' + cm_cluster_name):
config.read('./conf/' + cm_cluster_name + '/' + i)
all_nodes = list()
while (get_cm_status(cm_server + ':' + cm_port) != 200):
print 'Waiting for CM Server to start... '
time.sleep(15)
api = ApiResource(cm_server, cm_port, cm_username, cm_password)
# create cluster
cluster = api.create_cluster(cm_cluster_name, version.upper())
#Config CM
print 'Applying any configuration changes to Cloudera Manager'
cmanager = api.get_cloudera_manager()
cmanager.update_config(config_grabber('cloudera-manager-updates'))
planned_nodes = config_grabber(cm_cluster_name + '-en')['full.list'].split(',')
for k, v in config_grabber(cm_cluster_name + '-dn').iteritems():
for j in v.split(','):
planned_nodes.append(j)
# TODO make this smarter. show which agents haven't checked in. Add the option to continue without them.
if len(api.get_all_hosts()) != len(planned_nodes):
print 'Waiting for all agents to check into the CM Server before continuing.'
while len(planned_nodes) > api.get_all_hosts():
print 'Waiting for the final set of CM Agent nodes to check in.'
time.sleep(5)
print 'Updating Rack configuration for data nodes.'
all_hosts = list()
for host in api.get_all_hosts():
all_hosts.append(host.hostId)
for k,v in config_grabber(cm_cluster_name + '-dn').iteritems():
if host.hostname in v:
print 'Setting host: ' + host.hostname + ' to rack /default/' + k
host.set_rack_id('/default/' + k)
print 'Adding all hosts to cluster.'
cluster.add_hosts(all_hosts)
# download CDH Parcels
# TODO add some logic here to make the parcel list something that's read from the hadrian.ini
# This will allow support for other CDH packages, Search, etc.
if config_grabber('Globals')['cdh.distribution.method'] == 'parcels':
distribute_parcel(cluster, 'CDH', config_grabber("Globals")['cdh.parcel.version'])
if config_dict.get('hdfs_ha') == True:
create_zookeeper_service(config_dict, cluster)
create_hdfs_service(config_dict, cluster)
cmd = cluster.deploy_client_config()
if not cmd.wait(CMD_TIMEOUT).success:
print 'Failed to deploy client configurations'
else:
print 'Client configuration deployment complete.'
create_mapred_service(config_dict, cluster, cm_server)
if config_dict.get('hbase') == True:
if config_dict.get('hdfs_ha') == False:
create_zookeeper_service(config_dict, cluster)
create_hbase_service(config_dict, cluster)
if config_dict.get('hive') == True:
create_hive_service(config_dict, cluster)
print 'Starting final client configuration deployment for all services.'
cmd = cluster.deploy_client_config()
if not cmd.wait(CMD_TIMEOUT).success:
print 'Failed to deploy client configuration.'
else:
print 'Client configuration deployment complete. The cluster is all yours. Happy Hadooping.'
示例8: raw_input
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_all_hosts [as 别名]
# Get a handle to the API client
from cm_api.api_client import ApiResource
import time
import sys
cm_host = raw_input ("Enter IP address of CM: ")
cm_username = raw_input ("Enter username: ")
cm_password = raw_input ("Enter password: ")
api = ApiResource(cm_host,server_port=7180, username=cm_username, password=cm_password)
hosts = api.get_all_hosts()
# Get a list of all clusters
print "Clusters:"
cdh5 = None
for c in api.get_all_clusters():
print c.name
if c.version == "CDH5":
cdh5 = c
#Print all hosts
print "Hosts:"
for i in hosts:
print i
#cdh5.rolling_restart(stale_configs_only=1) works only in Enterprise version
#Get list of all services
print "Services:"
for s in cdh5.get_all_services():
print s
if s.type == "HDFS":
示例9: main
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_all_hosts [as 别名]
#.........这里部分代码省略.........
# if version is just "latest", try to check everything
else:
version_substr = ".*"
try:
[version_parcel] = [x for x in available_versions if re.match(version_substr, x) != None]
except ValueError:
module.fail_json(msg='Specified version {0} doesnt appear in {1} or appears twice'.format(version_substr, available_versions))
else:
version_parcel = version_a
# we now go through various stages of getting the parcel
# as there is no built-in way of waiting for an operation to complete
# we use loops with sleep to get it done
parcel = cluster.get_parcel(name_a, version_parcel)
if parcel.stage == 'AVAILABLE_REMOTELY':
parcel.start_download()
while parcel.stage != 'DOWNLOADED':
parcel = cluster.get_parcel(name_a, version_parcel)
if parcel.state.errors:
raise Exception(str(parcel.state.errors))
sleep(10)
if parcel.stage == 'DOWNLOADED':
parcel.start_distribution()
while parcel.stage != 'DISTRIBUTED':
parcel = cluster.get_parcel(name_a, version_parcel)
if parcel.state.errors:
raise Exception(str(parcel.state.errors))
# sleep while hosts report problems after the download
for i in range(12):
sleep(10)
if sum([1 for x in api.get_all_hosts(view='Full') if x.healthSummary != 'GOOD']) == 0:
break
# since parcels are distributed automatically when a new host is added to a cluster
# we can encounter the ,,ACTIVATING'' stage then
if parcel.stage == 'DISTRIBUTED' or parcel.stage == 'ACTIVATING':
if parcel.stage == 'DISTRIBUTED':
parcel.activate()
while parcel.stage != 'ACTIVATED':
parcel = cluster.get_parcel(name_a, version_parcel)
# this sleep has to be large because although the operation is very fast
# it makes the management and cloudera hosts go bonkers, failing all of the health checks
sleep(10)
# sleep while hosts report problems after the distribution
for i in range(60):
sleep(10)
if sum([1 for x in api.get_all_hosts(view='Full') if x.healthSummary != 'GOOD']) == 0:
break
module.exit_json(changed=True, msg='Parcel activated')
if parcel.stage == 'ACTIVATED':
module.exit_json(changed=False, msg='Parcel already activated')
# if we get down here, something is not right
module.fail_json(msg='Invalid parcel state')
# deploy nodes for workers, according to SERVICE_WORKER_MAP
# also give them sane names and init zookeeper and kafka ones
# which need id's specified
elif action_a == 'deploy_service_worker_nodes':
示例10: get_rackID
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_all_hosts [as 别名]
#!/usr/bin/env python
#author Steven
#auto fill the Rack ID if it's /null
import simplejson as json
import urllib2, base64
import re
from cm_api.api_client import ApiResource
def get_rackID(host):
url="https://cartographer.siri.apple.com/api/v2/hosts?host.hostname="+str(host)
request = urllib2.Request(url)
result = urllib2.urlopen(request)
jsoncont=result.read()
for i in json.loads(jsoncont):
#print i
f=i['asset']['location_in_building'].split(".")
cm_rack_id="/"+f[0]+"."+f[1]+"."+f[2]+"."+f[3]+"."+f[4]+"."+f[5]+"/"+f[6]
return cm_rack_id
#print get_rackID("flume001.sp07.siri.apple.com")
api = ApiResource('cm001.sp07.siri.apple.com',version=6,username='admin',password='admin')
for h in api.get_all_hosts():
if h.rackId=="/null":
# if h.hostname=="batch001.sp07.siri.apple.com":
# h.set_rack_id("/US.RMR.02.01.0903.06/010")
print get_rackID(h.hostname),h.hostname
h.set_rack_id(get_rackID(h.hostname))
#print h.hostname
示例11: main
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_all_hosts [as 别名]
def main():
global ec2con
global cwcon
ec2con = boto.ec2.connect_to_region('us-east-1')
cwcon = boto.ec2.cloudwatch.CloudWatchConnection()
api = ApiResource(CM_HOST, username="admin", password="admin")
displayName = None
for c in api.get_all_clusters():
displayName = c.displayName
print "Cluster: %s (%s)" % (displayName, c.name)
inst_cache = {}
insts = api.get_all_hosts('full')
print "Found %s in the cluster" % [inst.hostId for inst in insts.objects]
for inst in insts.objects:
clusterName = inst.roleRefs[0].clusterName
if clusterName <> c.name:
print 'Clusters do not correspond: %s vs %s' % (clusterName, c.name)
continue
cores = inst.numCores
inst_id = inst.hostId
inst_cache[inst_id] = my_cache = {}
# For later - we'll send in one data point for every TS query
# that has AWS data
my_cache['aws_info_recorded'] = False
# my_cache['healthSummary'] = inst.healthSummary
ress = ec2con.get_all_reservations(filters={'instance-id' : inst_id})
if len(ress) > 0:
print "Found %s reservations for %s: %s" % (len(ress), inst_id, ress)
res = ress[0]
instances = res.instances
if len(instances) > 1:
print "Found %s instances for %s %s" % (len(instances), inst_id, instances)
inst = instances[0]
if inst.id <> inst_id:
raise Exception("%s != %s" % (inst.id, inst_id))
platform = inst.platform
vpc_id = inst.vpc_id
if platform == 'windows':
product = 'Windows'
elif not platform:
product = 'Linux_UNIX'
else:
product = 'UNKNOWN'
if vpc_id:
product += "_Amazon_VPC"
ami = inst.image_id
my_cache['product'] = product
my_cache['region'] = inst.region.name
my_cache['zone'] = inst.placement
inst_type = inst.instance_type.replace('.','_')
my_cache['inst_type'] = inst_type
time_f = arrow.utcnow().replace(minutes=common.DEFAULT_LOOKBACK_MINUTES)
time_t = arrow.utcnow()
# TODO
# http://arr.gr/blog/2013/08/monitoring-ec2-instance-memory-usage-with-cloudwatch/
# http://blog.sciencelogic.com/netflix-steals-time-in-the-cloud-and-from-users/03/2011
# https://www.stackdriver.com/cpu-steal-why-aws-cloudwatch-metrics-are-different-than-agent-metrics/
stat = cwcon.get_metric_statistics(300,
time_f,
time_t,
'CPUUtilization',
'AWS/EC2',
['Average','Minimum','Maximum'],
{ 'InstanceId' : inst_id })
# [{u'Timestamp': datetime.datetime(2014, 4, 13, 6, 5), u'Average': 0.35250000000000004, u'Minimum': 0.33, u'Maximum': 0.42, u'Unit': u'Percent'}]
print 'Fetching stats for %s: %s' % (inst_id, stat)
if stat:
for s in stat:
ts = common.ts_from_aws(s)
my_cache['avg_cpu'] = float(s['Average'])
else:
print "No stats found for %s" % inst_id
print "Querying CDH."
series = api.query_timeseries('SELECT * WHERE clusterName = %s' % c.name)
for entry in series.objects[0].timeSeries:
# print entry.metadata.__dict__
metric = entry.metadata.metricName
# internal host
hostname = ""
if 'hostname' in entry.metadata.attributes:
host = entry.metadata.attributes['hostname']
inst_id = ""
my_cache = {}
if 'hostId' in entry.metadata.attributes:
#.........这里部分代码省略.........