当前位置: 首页>>代码示例>>Python>>正文


Python ApiResource.get_cloudera_manager方法代码示例

本文整理汇总了Python中cm_api.api_client.ApiResource.get_cloudera_manager方法的典型用法代码示例。如果您正苦于以下问题:Python ApiResource.get_cloudera_manager方法的具体用法?Python ApiResource.get_cloudera_manager怎么用?Python ApiResource.get_cloudera_manager使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cm_api.api_client.ApiResource的用法示例。


在下文中一共展示了ApiResource.get_cloudera_manager方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: do_call

# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_cloudera_manager [as 别名]
def do_call(host, port, version, user, password, cluster_name, parcel_name, parcel_version, parcel_repo, init_pre_dir, init_post_dir):
    api = ApiResource(host, port, user, password, False, version)
    if not parcel_repo.endswith('/'):
        parcel_repo += '/'
    if re.match(REGEX_VERSION, parcel_version) is None or re.match(REGEX_VERSION, parcel_version).group() != parcel_version:
        raise Exception('Parcel [' + parcel_name + '] is qualified by invalid version [' + parcel_version + '] expected to match regular expression [' + REGEX_VERSION + ']')
    if not parcel_repo.endswith(parcel_version + '/'):
        raise Exception('Parcel [' + parcel_name + '] is qualified by invalid version [' + parcel_version + '] when compared with repository [' + parcel_repo + ']')    
    cm_config = api.get_cloudera_manager().get_config(view='full')
    repo_config = cm_config['REMOTE_PARCEL_REPO_URLS']
    repo_list = repo_config.value or repo_config.default
    if parcel_repo not in repo_list:     
        repo_list += ',' + parcel_repo
        api.get_cloudera_manager().update_config({'REMOTE_PARCEL_REPO_URLS': repo_list})
        time.sleep(POLL_SEC)  # The parcel synchronize end-point is not exposed via the API, so sleep instead
    cluster_names = []
    if cluster_name is None:
        for cluster in api.get_all_clusters():
            cluster_names.append(cluster.name)
    else:
        cluster_names.append(cluster_name)
    for cluster_name_itr in cluster_names:
        print 'Cluster [DEPLOYMENT] starting ... '
        cluster = api.get_cluster(cluster_name_itr)
        parcel = cluster.get_parcel(parcel_name, parcel_version)
        print 'Parcel [DEPLOYMENT] starting ... '
        do_parcel_op(cluster, parcel_name, parcel_version, 'DOWNLOAD', 'AVAILABLE_REMOTELY', 'DOWNLOADED', 'start_download')
        do_parcel_op(cluster, parcel_name, parcel_version, 'DISTRIBUTE', 'DOWNLOADED', 'DISTRIBUTED', 'start_distribution')
        do_parcel_op(cluster, parcel_name, parcel_version, 'ACTIVATE', 'DISTRIBUTED', 'ACTIVATED', 'activate')
        parcel = cluster.get_parcel(parcel_name, parcel_version)
        if parcel.stage != 'ACTIVATED':
            raise Exception('Parcel is currently mid-stage [' + parcel.stage + '], please wait for this to complete')
        print 'Parcel [DEPLOYMENT] finished'
        if init_pre_dir is not None and os.path.isdir(init_pre_dir):
            print 'Cluster [PRE_INIT] starting ... '
            for script in glob.glob(init_pre_dir + '/*.sh'):
                subprocess.call([script])
            print 'Cluster [PRE_INIT] finihsed'            
        print 'Cluster [CONFIG_DEPLOYMENT] starting ... '
        cluster.deploy_client_config()
        cmd = cluster.deploy_client_config()
        if not cmd.wait(TIMEOUT_SEC).success:
            raise Exception('Failed to deploy client configs')
        print 'Cluster [CONFIG_DEPLOYMENT] finihsed'
        print 'Cluster [STOP] starting ... '
        cluster.stop().wait()
        print 'Cluster [STOP] finihsed'
        print 'Cluster [START] starting ... '
        cluster.start().wait()
        print 'Cluster [START] finihsed'
        if init_post_dir is not None and os.path.isdir(init_post_dir):
            print 'Cluster [POST_INIT] starting ... '
            for script in glob.glob(init_post_dir + '/*.sh'):
                subprocess.call([script])
            print 'Cluster [POST_INIT] finihsed'            
        print 'Cluster [DEPLOYMENT] finished'
开发者ID:boghbogh,项目名称:cloudera-framework,代码行数:58,代码来源:cloudera-framework-parcel.py

示例2: connect

# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_cloudera_manager [as 别名]
def connect(cm_api, cm_username, cm_password, use_proxy=False):
    '''
    Wait for ten minutes for CM to come up
    '''

    for _ in xrange(120):
        try:
            logging.info("Checking CM availability....")
            # change name of proxy if necessary
            proxy = urllib2.ProxyHandler({'http': 'proxy'})

            api = ApiResource(cm_api, username=cm_username, password=cm_password, version=14)

            if use_proxy:
            # pylint: disable=W0212
                api._client._opener.add_handler(proxy)

            cloudera_manager = api.get_cloudera_manager()
            api.get_user(cm_username)

            return api, cloudera_manager
        except Exception:
            logging.warning("CM is not up")
            time.sleep(5)
    logging.error("CM did not come UP")
    sys.exit(-1)
开发者ID:pndaproject,项目名称:platform-salt,代码行数:28,代码来源:cm_setup.py

示例3: main

# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_cloudera_manager [as 别名]
def main():
    """
    Kerberizes a cluster.

    @rtype:   number
    @returns: A number representing the status of success.
    """
    settings = retrieve_args()

    api = ApiResource(settings.host, settings.port, settings.username,
                      settings.password, settings.use_tls, 8)

    cloudera_manager = api.get_cloudera_manager()
    cluster = api.get_cluster(settings.cluster)
    mgmt_service = cloudera_manager.get_service()

    if verify_cloudera_manager_has_kerberos_principal(cloudera_manager):
        wait_for_command('Stopping the cluster', cluster.stop())
        wait_for_command('Stopping MGMT services', mgmt_service.stop())
        configure_services(cluster)
        wait_for_generate_credentials(cloudera_manager)
        wait_for_command('Deploying client configs.', cluster.deploy_client_config())
        wait_for_command('Deploying cluster client configs', cluster.deploy_cluster_client_config())
        wait_for_command('Starting MGMT services', mgmt_service.start())
        wait_for_command('Starting the cluster', cluster.start())
    else:
        print "Cluster does not have Kerberos admin credentials.  Exiting!"

    return 0
开发者ID:bdclark,项目名称:director-scripts,代码行数:31,代码来源:kerberize-cluster.py

示例4: main

# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_cloudera_manager [as 别名]
def main():
  parser = cm_args_parser()
  args = parser.parse_args()
  print "connecting to host:" + args.cm_host + "..."
  api = ApiResource(args.cm_host, username=args.cm_user, password=args.cm_password)
  print "host connected, getting cloudera manager "
  MANAGER = api.get_cloudera_manager()
  print "have cloudera manager object" 
  deploy_management(MANAGER, MGMT_SERVICENAME, MGMT_SERVICE_CONFIG, MGMT_ROLE_CONFIG, AMON_ROLENAME, AMON_ROLE_CONFIG, APUB_ROLENAME, APUB_ROLE_CONFIG, ESERV_ROLENAME, ESERV_ROLE_CONFIG, HMON_ROLENAME, HMON_ROLE_CONFIG, SMON_ROLENAME, SMON_ROLE_CONFIG, RMAN_ROLENAME, RMAN_ROLE_CONFIG)
  print "Deployed CM management service " + MGMT_SERVICENAME + " to run on " + CM_HOST
开发者ID:dougc333,项目名称:cdhautomation,代码行数:12,代码来源:addSM.py

示例5: connect_cm

# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_cloudera_manager [as 别名]
def connect_cm(cm_host, cm_username, cm_password):
    """
    Connects to Cloudera Manager API Resource instance to retrieve Endpoint details
    :param cm_host: Cloudera Manager host
    :param cm_username: Username for authentication
    :param cm_password: Password for authentication
    :return:
    """
    api = ApiResource(cm_host, version=6, username=cm_username, password=cm_password)
    cm_manager = api.get_cloudera_manager()
    return api, cm_manager
开发者ID:pndaproject,项目名称:platform-data-mgmnt,代码行数:13,代码来源:endpoint.py

示例6: main

# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_cloudera_manager [as 别名]
def main():
    # connect cm api
    api = ApiResource(CM_HOST, 7180, username=CM_USERNAME, password=CM_PASSWORD)
    manager = api.get_cloudera_manager()
    # no need to update cm config
    #manager.update_config(cm_host)
    print("[INFO] Connected to CM host on " + CM_HOST)

    # create cluster object
    try:
        cluster = api.get_cluster(name=CLUSTER_NAME)
    except:
        cluster = init_cluster(api, CLUSTER_NAME, CLUSTER_VERSION, CLUSTER_NODE_COUNT)
    print("[INFO] Initialized cluster " + CLUSTER_NAME + " which uses CDH version " + CLUSTER_VERSION)

    #
    mgmt_servicename = "MGMT"
    amon_role_name = "ACTIVITYMONITOR"
    apub_role_name = "ALERTPUBLISHER"
    eserv_role_name = "EVENTSERVER"
    hmon_role_name = "HOSTMONITOR"
    smon_role_name = "SERVICEMONITOR"
    nav_role_name = "NAVIGATOR"
    navms_role_name = "NAVIGATORMETADATASERVER"
    rman_role_name = "REPORTMANAGER"
    deploy_management(manager, mgmt_servicename, amon_role_name, apub_role_name, eserv_role_name, hmon_role_name, smon_role_name, nav_role_name, navms_role_name, rman_role_name)
    print("[INFO] Deployed CM management service " + mgmt_servicename + " to run on " + CM_HOST)

    #
    assign_roles(api, cluster)
    print("[INFO] all roles have assigned.")

    #
    # Custom role config groups cannot be automatically configured: Gateway Group 1 (error 400)
    try:
        cluster.auto_configure()
    except:
        pass
    update_custom_config(api, cluster)
    print("[INFO] all servies and roles have configured.")
    #
    cmd = cluster.first_run()
    while cmd.success == None:
        cmd = cmd.fetch()
    if not cmd.success:
        print("[ERROR] The first run command failed: " + cmd.resultMessage())
    else:
        print("[INFO] First run successfully executed. Your cluster has been set up!")
开发者ID:FayeHuang,项目名称:docker-CDH,代码行数:50,代码来源:deployCDH.py

示例7: main

# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_cloudera_manager [as 别名]
def main():

    AMON_ROLE_CONFIG = {
   'firehose_heapsize': '1173741824',
}

    API = ApiResource("ec2-52-24-151-222.us-west-2.compute.amazonaws.com", version=5, username="admin", password="admin")
    MANAGER = API.get_cloudera_manager()
    mgmt = MANAGER.get_service()
    #cf = mgmt.get_config()

    for group in mgmt.get_all_role_config_groups():
       if group.roleType == "ACTIVITYMONITOR":
           group.update_config(AMON_ROLE_CONFIG)

    pass
开发者ID:kostin88,项目名称:cm_api,代码行数:18,代码来源:pk_test.py

示例8: main

# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_cloudera_manager [as 别名]
def main():
    resource = ApiResource("localhost", 7180, "cloudera", "cloudera", version=19)
    cluster = resource.get_cluster("Cloudera Quickstart")

    cm_manager = resource.get_cloudera_manager()
    cm_manager.update_config({'REMOTE_PARCEL_REPO_URLS': PARCEL_REPO})
    cm_manager.update_all_hosts_config(JDK_CONFIG)
    time.sleep(5)

    for parcel in PARCELS:
        ParcelInstaller(parcel['name'], parcel['version']).install(cluster)

    print "Restarting cluster"
    cluster.stop().wait()
    cluster.start().wait()
    print "Done restarting cluster"
开发者ID:ottogroup,项目名称:schedoscope,代码行数:18,代码来源:parcel-installer.py

示例9: main

# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_cloudera_manager [as 别名]
def main():
    """
    Enables HDFS HA on a cluster.

    @rtype:   number
    @returns: A number representing the status of success.
    """
    settings = retrieve_args()

    api = ApiResource(settings.host, settings.port, settings.username, settings.password,
                      version=6)

    if not validate_cluster(api, settings.cluster):
        write_to_stdout("Cluster does not satisfy preconditions for enabling HDFS HA. Exiting!")
        return 1

    if settings.wait_for_good_health:
        write_to_stdout("Waiting for GOOD health... ")
        if not wait_for_good_health(api, settings.cluster):
            write_to_stdout("Cluster health is not GOOD.  Exiting!\n")
            return 1
    else:
        write_to_stdout("Checking cluster health... ")
        if not check_health(api, settings.cluster):
            write_to_stdout("Cluster health is not GOOD.  Exiting!\n")

    write_to_stdout("Cluster health is GOOD!\n")

    cluster = api.get_cluster(settings.cluster)

    invoke_hdfs_enable_nn_ha(cluster, settings.nameservice)
    update_hive_for_ha_hdfs(cluster)

    # Restarting the MGMT services to make sure the HDFS file browser functions
    # as expected.
    cloudera_manager = api.get_cloudera_manager()
    mgmt_service = cloudera_manager.get_service()
    wait_for_command('Restarting MGMT services', mgmt_service.restart())

    return 0
开发者ID:bdclark,项目名称:director-scripts,代码行数:42,代码来源:enable-hdfs-ha.py

示例10: main

# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_cloudera_manager [as 别名]
def main():
   API = ApiResource(CM_HOST, version=5, username=ADMIN_USER, password=ADMIN_PASS)
   MANAGER = API.get_cloudera_manager()
   MANAGER.update_config(CM_CONFIG)
   print "Connected to CM host on " + CM_HOST + " and updated CM configuration"

   CLUSTER = init_cluster(API, CLUSTER_NAME, CDH_VERSION, CLUSTER_HOSTS, CM_HOST)
   print "Initialized cluster " + CLUSTER_NAME + " which uses CDH version " + CDH_VERSION

   deploy_management(MANAGER, MGMT_SERVICENAME, MGMT_SERVICE_CONFIG, MGMT_ROLE_CONFIG, AMON_ROLENAME, AMON_ROLE_CONFIG, APUB_ROLENAME, APUB_ROLE_CONFIG, ESERV_ROLENAME, ESERV_ROLE_CONFIG, HMON_ROLENAME, HMON_ROLE_CONFIG, SMON_ROLENAME, SMON_ROLE_CONFIG, NAV_ROLENAME, NAV_ROLE_CONFIG, NAVMS_ROLENAME, NAVMS_ROLE_CONFIG, RMAN_ROLENAME, RMAN_ROLE_CONFIG)
   print "Deployed CM management service " + MGMT_SERVICENAME + " to run on " + CM_HOST
   
   deploy_parcels(CLUSTER, PARCELS)
   print "Downloaded and distributed parcels: "
   PRETTY_PRINT.pprint(PARCELS)

   zookeeper_service = deploy_zookeeper(CLUSTER, ZOOKEEPER_SERVICE_NAME, ZOOKEEPER_HOSTS, ZOOKEEPER_SERVICE_CONFIG, ZOOKEEPER_ROLE_CONFIG)
   print "Deployed ZooKeeper " + ZOOKEEPER_SERVICE_NAME + " to run on: "
   PRETTY_PRINT.pprint(ZOOKEEPER_HOSTS)
   
   hdfs_service = deploy_hdfs(CLUSTER, HDFS_SERVICE_NAME, HDFS_SERVICE_CONFIG, HDFS_NAMENODE_SERVICE_NAME, HDFS_NAMENODE_HOST, HDFS_NAMENODE_CONFIG, HDFS_SECONDARY_NAMENODE_HOST, HDFS_SECONDARY_NAMENODE_CONFIG, HDFS_DATANODE_HOSTS, HDFS_DATANODE_CONFIG, HDFS_GATEWAY_HOSTS, HDFS_GATEWAY_CONFIG)
   print "Deployed HDFS service " + HDFS_SERVICE_NAME + " using NameNode on " + HDFS_NAMENODE_HOST + ", SecondaryNameNode on " + HDFS_SECONDARY_NAMENODE_HOST + ", and DataNodes running on: "
   PRETTY_PRINT.pprint(HDFS_DATANODE_HOSTS)
   init_hdfs(hdfs_service, HDFS_SERVICE_NAME, CMD_TIMEOUT)
   print "Initialized HDFS service"

   # mapred and yarn are mutually exclusive; only deploy one of them
   #mapred_service = deploy_mapreduce(CLUSTER, MAPRED_SERVICE_NAME, MAPRED_SERVICE_CONFIG, MAPRED_JT_HOST, MAPRED_JT_CONFIG, MAPRED_TT_HOSTS, MAPRED_TT_CONFIG, MAPRED_GW_HOSTS, MAPRED_GW_CONFIG)
   print "Deployed MapReduce service " + MAPRED_SERVICE_NAME + " using JobTracker on " + MAPRED_JT_HOST + " and TaskTrackers running on "
   PRETTY_PRINT.pprint(MAPRED_TT_HOSTS)
   
   yarn_service = deploy_yarn(CLUSTER, YARN_SERVICE_NAME, YARN_SERVICE_CONFIG, YARN_RM_HOST, YARN_RM_CONFIG, YARN_JHS_HOST, YARN_JHS_CONFIG, YARN_NM_HOSTS, YARN_NM_CONFIG, YARN_GW_HOSTS, YARN_GW_CONFIG)
   print "Deployed YARN service " + YARN_SERVICE_NAME + " using ResourceManager on " + YARN_RM_HOST + ", JobHistoryServer on " + YARN_JHS_HOST + ", and NodeManagers on "
   PRETTY_PRINT.pprint(YARN_NM_HOSTS)
   
   spark_service = deploy_spark(CLUSTER, SPARK_SERVICE_NAME, SPARK_SERVICE_CONFIG, SPARK_MASTER_HOST, SPARK_MASTER_CONFIG, SPARK_WORKER_HOSTS, SPARK_WORKER_CONFIG, SPARK_GW_HOSTS, SPARK_GW_CONFIG)
   print "Deployed SPARK service " + SPARK_SERVICE_NAME + " using SparkMaster on " + SPARK_MASTER_HOST + " and SparkWorkers on "
   PRETTY_PRINT.pprint(SPARK_WORKER_HOSTS)
   
   deploy_hbase(CLUSTER, HBASE_SERVICE_NAME, HBASE_SERVICE_CONFIG, HBASE_HM_HOST, HBASE_HM_CONFIG, HBASE_RS_HOSTS, HBASE_RS_CONFIG, HBASE_THRIFTSERVER_SERVICE_NAME, HBASE_THRIFTSERVER_HOST, HBASE_THRIFTSERVER_CONFIG, HBASE_GW_HOSTS, HBASE_GW_CONFIG)
   print "Deployed HBase service " + HBASE_SERVICE_NAME + " using HMaster on " + HBASE_HM_HOST + " and RegionServers on "
   PRETTY_PRINT.pprint(HBASE_RS_HOSTS)
   
   hive_service = deploy_hive(CLUSTER, HIVE_SERVICE_NAME, HIVE_SERVICE_CONFIG, HIVE_HMS_HOST, HIVE_HMS_CONFIG, HIVE_HS2_HOST, HIVE_HS2_CONFIG, HIVE_WHC_HOST, HIVE_WHC_CONFIG, HIVE_GW_HOSTS, HIVE_GW_CONFIG)
   print "Depoyed Hive service " + HIVE_SERVICE_NAME + " using HiveMetastoreServer on " + HIVE_HMS_HOST + " and HiveServer2 on " + HIVE_HS2_HOST
   init_hive(hive_service)
   print "Initialized Hive service"
   
   impala_service = deploy_impala(CLUSTER, IMPALA_SERVICE_NAME, IMPALA_SERVICE_CONFIG, IMPALA_SS_HOST, IMPALA_SS_CONFIG, IMPALA_CS_HOST, IMPALA_CS_CONFIG, IMPALA_ID_HOSTS, IMPALA_ID_CONFIG)
   print "Deployed Impala service " + IMPALA_SERVICE_NAME + " using StateStore on " + IMPALA_SS_HOST + ", CatalogServer on " + IMPALA_CS_HOST + ", and ImpalaDaemons on "
   PRETTY_PRINT.pprint(IMPALA_ID_HOSTS)
   
   #Need to start the cluster now as subsequent services need the cluster to be runnign
   #TODO can we just start ZK, and maybe HDFS, instead of everything? It's just needed for the search service
   print "About to restart cluster"
   CLUSTER.stop().wait()
   CLUSTER.start().wait()
   print "Done restarting cluster"

   search_service = deploy_search(CLUSTER, SEARCH_SERVICE_NAME, SEARCH_SERVICE_CONFIG, SEARCH_SOLR_HOST, SEARCH_SOLR_CONFIG, SEARCH_GW_HOSTS, SEARCH_GW_CONFIG)
   print "Deployed Search service " + SEARCH_SERVICE_NAME + " using SOLRHost " + SEARCH_SOLR_HOST
   
   flume_service = deploy_flume(CLUSTER, FLUME_SERVICE_NAME, FLUME_SERVICE_CONFIG, FLUME_AGENT_HOSTS, FLUME_AGENT_CONFIG)
   print "Deployed Flume service " + FLUME_SERVICE_NAME + " using FlumeAgents on "
   PRETTY_PRINT.pprint(FLUME_AGENT_HOSTS)
   
   oozie_service = deploy_oozie(CLUSTER, OOZIE_SERVICE_NAME, OOZIE_SERVICE_CONFIG, OOZIE_SERVER_HOST, OOZIE_SERVER_CONFIG)
   print "Deployed Oozie service " + OOZIE_SERVICE_NAME + " using OozieServer on " + OOZIE_SERVER_HOST
   
   sqoop_service = deploy_sqoop(CLUSTER, SQOOP_SERVICE_NAME, SQOOP_SERVICE_CONFIG, SQOOP_SERVER_HOST, SQOOP_SERVER_CONFIG)
   print "Deployed Sqoop service " + SQOOP_SERVICE_NAME + " using SqoopServer on " + SQOOP_SERVER_HOST
   
   hue_service = deploy_hue(CLUSTER, HUE_SERVICE_NAME, HUE_SERVICE_CONFIG, HUE_SERVER_HOST, HUE_SERVER_CONFIG, HUE_KTR_HOST, HUE_KTR_CONFIG)
   print "Deployed HUE service " + HUE_SERVICE_NAME + " using HueServer on " + HUE_SERVER_HOST
   
   #deploy_accumulo(CLUSTER, ACCUMULO_SERVICE_NAME, ACCUMULO_SERVICE_CONFIG, ACCUMULO_MASTER_HOSTS, ACCUMULO_MASTER_CONFIG, ACCUMULO_TRACER_HOSTS, ACCUMULO_TRACER_CONFIG, ACCUMULO_TSERVER_HOSTS, ACCUMULO_TSERVER_CONFIG, ACCUMULO_LOGGER_HOSTS, ACCUMULO_LOGGER_CONFIG, ACCUMULO_MONITOR_HOST, ACCUMULO_MONITOR_CONFIG, ACCUMULO_GC_HOST, ACCUMULO_GC_CONFIG, ACCUMULO_GATEWAY_HOSTS, ACCUMULO_GATEWAY_CONFIG)
   
   print "About to restart cluster."
   CLUSTER.stop().wait()
   CLUSTER.start().wait()
   print "Done restarting cluster."
   
   post_startup(CLUSTER, hdfs_service, oozie_service)

   print "Finished deploying Cloudera cluster. Go to http://" + CM_HOST + ":7180 to administer the cluster."
   print "If the Oozie service (and therefore the HUE service as well, which depends on it) did not start properly, go to the Oozie service, stop it, click on the Actions button and choose 'Create Database', then start it."
   print "If there are any other services not running, restart them now."
开发者ID:MrTomerLevi,项目名称:cm_api,代码行数:89,代码来源:deploycloudera.py

示例11: file

# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_cloudera_manager [as 别名]
# FIXME: could be removed in future version?
f = file('/etc/cloudera-scm-server/db.mgmt.properties')
for line in f:
  if not line.startswith("#"):
    (key, value) = line.split("=")
    s = key.split('.')
    service = s[3].strip()
    setting = s[5].strip()
    value = value.strip()
    if service not in creds:
      creds[service] = {}
    creds[service][setting] = value


api = ApiResource(sys.argv[1], username="admin", password="admin", use_tls=False, version=4)
cm = api.get_cloudera_manager()

roles = [ApiRole(api, t.lower(), t, ApiHostRef(api, sys.argv[1])) for t in ROLE_TYPES]
try:
   service = cm.get_service()
except ApiException:
   mgmt = ApiServiceSetupInfo("management", "MGMT", roles=roles)
   service = cm.create_mgmt_service(mgmt)

rcg = service.get_all_role_config_groups()
for rc in rcg:
  if rc.roleType in ROLE_TYPES:
    config = {}
    # Reduce amount of some logs to 1 day
    if rc.roleType == "ACTIVITYMONITOR":
        config["firehose_activity_purge_duration_hours"] = "24"
开发者ID:qubell-bazaar,项目名称:cookbook-hadoop,代码行数:33,代码来源:cms.py

示例12: ApiResource

# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_cloudera_manager [as 别名]
#!/usr/bin/env python
#Author: Pratap Raj
#Purpose: Start Cloudera Management services

import sys
import socket
from cm_api.api_client import ApiResource
from cm_api.endpoints.cms import ClouderaManager

#########
# Do not edit any system variables here. They are all passed from the startstopcluster.sh script, so make changes there.
cmhost=str(sys.argv[1])
cmport=str(sys.argv[2])
cmusername=str(sys.argv[3])
cmpassword=str(sys.argv[4])
tlspref=str(sys.argv[5])
#########

api = ApiResource(cmhost, server_port=cmport, username=cmusername, password=cmpassword, use_tls=tlspref)

mgmt=api.get_cloudera_manager().get_service()
cmstartstatus=mgmt.start().wait()
print cmstartstatus.success
开发者ID:prataprajr,项目名称:cmapi_clusterstartstop,代码行数:25,代码来源:startcmservices.py

示例13: ApiResource

# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_cloudera_manager [as 别名]
#!/usr/bin/env python
import socket
import time
from cm_api.api_client import ApiResource
#initialize

hosts = [ ]
cm_host = "cloudera-pe-cm01"
api = ApiResource(cm_host, username="admin", password="admin")

# Distribute the CDH parcel

parcel_repo = 'http://archive.cloudera.com/cdh5/parcels/5.2.0'
#parcel_repo = 'http://archive.cloudera.com/cdh5/parcels/5.1.3/'
cm_config = api.get_cloudera_manager().get_config(view='full')
repo_config = cm_config['REMOTE_PARCEL_REPO_URLS']
value = repo_config.value or repo_config.default
value += ',' + parcel_repo
api.get_cloudera_manager().update_config({'REMOTE_PARCEL_REPO_URLS': value})
time.sleep(10)

# create cluster, add the hosts
cluster = api.create_cluster("cloudera-pe-test", "CDH5")
#api.create_host("master", "ip-10-238-154-140", "10.238.154.140")
#api.create_host("w01", "ip-10-143-183-98", "10.143.183.98")
#api.create_host("w02", "ip-10-140-38-88", "10.140.38.88")
#api.create_host("w03", "ip-10-140-28-243", "10.140.28.243")
#hosts.append("master")
#hosts.append("w01")
#hosts.append("w02")
#hosts.append("w03")
开发者ID:implicateorder,项目名称:cm_api_automation,代码行数:33,代码来源:create_cluster.py

示例14: install_java_8

# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_cloudera_manager [as 别名]
def install_java_8(region, stack_name):
    # following general protocol for upgrading to JDK 1.8 here:
    # http://www.cloudera.com/content/cloudera/en/documentation/core/v5-3-x/topics/cdh_cm_upgrading_to_jdk8.html
    ec2_conn = create_ec2_connection(region)
    manager_instance = get_manager_instance(ec2_conn, stack_name)
    cluster_instances = (
        get_worker_instances(ec2_conn, stack_name) +
        [manager_instance, get_master_instance(ec2_conn, stack_name)])
    cluster_hosts = [i.ip_address for i in cluster_instances]

    with cm_tunnel_ctx(manager_instance) as local_port:
        # Connect to CM API
        cm_api = ApiResource('localhost', username='admin', password='admin',
                             server_port=local_port, version=9)
        cloudera_manager = cm_api.get_cloudera_manager()

        # Stop Cloudera Management Service
        print "Stopping Cloudera Management Service"
        mgmt_service = cloudera_manager.get_service()
        mgmt_service.stop().wait()

        # Stop cluster
        print "Stopping the cluster"
        clusters = cm_api.get_all_clusters()
        cluster = clusters.objects[0]
        cluster.stop().wait()

    # Stop all Cloudera Manager Agents
    @parallel
    def stop_cm_agents():
        sudo('service cloudera-scm-agent stop')
    execute(stop_cm_agents, hosts=cluster_hosts)

    # Stop the Cloudera Manager Server
    def stop_cm_server():
        sudo('service cloudera-scm-server stop')
    execute(stop_cm_server, hosts=[manager_instance.ip_address])

    # Cleanup other Java versions and install JDK 1.8
    @parallel
    def swap_jdks():
        sudo('rpm -qa | grep jdk | xargs rpm -e')
        sudo('rm -rf /usr/java/jdk1.6*')
        sudo('rm -rf /usr/java/jdk1.7*')
        run('wget -O jdk-8-linux-x64.rpm --no-cookies --no-check-certificate '
            '--header "Cookie: oraclelicense=accept-securebackup-cookie" '
            'http://download.oracle.com/otn-pub/java/jdk/8u51-b16/'
            'jdk-8u51-linux-x64.rpm')
        sudo('yum install -y jdk-8-linux-x64.rpm')
        append('/home/ec2-user/.bash_profile',
               'export JAVA_HOME=`find /usr/java -name "jdk1.8*"`')
    execute(swap_jdks, hosts=cluster_hosts)

    # Start the Cloudera Manager Server
    def start_cm_server():
        sudo('service cloudera-scm-server start')
    execute(start_cm_server, hosts=[manager_instance.ip_address])

    # Start all Cloudera Manager Agents
    @parallel
    def start_cm_agents():
        sudo('service cloudera-scm-agent start')
    execute(start_cm_agents, hosts=cluster_hosts)

    with cm_tunnel_ctx(manager_instance) as local_port:
        # Connect to CM API
        cm_api = ApiResource('localhost', username='admin', password='admin',
                             server_port=local_port, version=9)
        cloudera_manager = cm_api.get_cloudera_manager()

        # Start the cluster and the mgmt service
        print "Starting the cluster"
        cluster.start().wait()
        print "Starting the Cloudera Management Service"
        cloudera_manager = cm_api.get_cloudera_manager()
        mgmt_service = cloudera_manager.get_service()
        mgmt_service.start().wait()
开发者ID:heuermh,项目名称:eggo,代码行数:79,代码来源:director.py

示例15: create_cluster

# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import get_cloudera_manager [as 别名]
def create_cluster(config_dict):
    config.read(['./conf/hadrian.ini','./conf/cluster_specs.ini', './conf/cloudera-manager/cm.ini'])
    
    
    cm_cluster_name = config_grabber("Globals")['cm.cluster.name']
    cm_username = config_grabber("Globals")['cm.username']
    cm_password = config_grabber("Globals")['cm.password']
    cm_port = config_grabber("Globals")['cm.port']
    version = config_grabber('Globals')['cdh.cluster.version']
    cm_server = config_grabber(cm_cluster_name + '-en')['cm.server']
    
    #Grab all configuration files in the directory with the CM Cluster Name.
    
    for i in os.listdir('./conf/' + cm_cluster_name):
        config.read('./conf/' + cm_cluster_name + '/' + i)
    
    all_nodes = list()

    while (get_cm_status(cm_server + ':' + cm_port) != 200):
        print 'Waiting for CM Server to start... '
        time.sleep(15)
    
    api = ApiResource(cm_server, cm_port, cm_username, cm_password)
    # create cluster
    cluster = api.create_cluster(cm_cluster_name, version.upper())
    
    #Config CM
    print 'Applying any configuration changes to Cloudera Manager'
    cmanager = api.get_cloudera_manager()
    cmanager.update_config(config_grabber('cloudera-manager-updates'))
        
    planned_nodes = config_grabber(cm_cluster_name + '-en')['full.list'].split(',')
    for k, v in config_grabber(cm_cluster_name + '-dn').iteritems():
        for j in v.split(','):
            planned_nodes.append(j)
    
    # TODO make this smarter.  show which agents haven't checked in.  Add the option to continue without them.
    if len(api.get_all_hosts()) != len(planned_nodes):
        print 'Waiting for all agents to check into the CM Server before continuing.'
        
        while len(planned_nodes) > api.get_all_hosts():
            print 'Waiting for the final set of CM Agent nodes to check in.' 
            time.sleep(5)
        
    print 'Updating Rack configuration for data nodes.'
    all_hosts = list()
    for host in api.get_all_hosts():
        all_hosts.append(host.hostId)
        for k,v in config_grabber(cm_cluster_name + '-dn').iteritems():
            if host.hostname in v:
                print 'Setting host: ' + host.hostname + ' to rack /default/' + k
                host.set_rack_id('/default/' + k)
    
    print 'Adding all hosts to cluster.'
    cluster.add_hosts(all_hosts)

    # download CDH Parcels
    # TODO add some logic here to make the parcel list something that's read from the hadrian.ini
    # This will allow support for other CDH packages, Search, etc.
    if config_grabber('Globals')['cdh.distribution.method'] == 'parcels':
        distribute_parcel(cluster, 'CDH', config_grabber("Globals")['cdh.parcel.version'])
    
    if config_dict.get('hdfs_ha') == True:
        create_zookeeper_service(config_dict, cluster)
    create_hdfs_service(config_dict, cluster)    

    cmd = cluster.deploy_client_config()
    if not cmd.wait(CMD_TIMEOUT).success:
        print 'Failed to deploy client configurations'
    else:
        print 'Client configuration deployment complete.'

    create_mapred_service(config_dict, cluster, cm_server)
    if config_dict.get('hbase') == True:
        if config_dict.get('hdfs_ha') == False:
            create_zookeeper_service(config_dict, cluster)
        create_hbase_service(config_dict, cluster)
    if config_dict.get('hive') == True:
         create_hive_service(config_dict, cluster)
    print 'Starting final client configuration deployment for all services.'
    cmd = cluster.deploy_client_config()
    if not cmd.wait(CMD_TIMEOUT).success:
        print 'Failed to deploy client configuration.'
    else:
        print 'Client configuration deployment complete.  The cluster is all yours.  Happy Hadooping.'
开发者ID:andmarios,项目名称:hadrian,代码行数:87,代码来源:CreateCdhCluster.py


注:本文中的cm_api.api_client.ApiResource.get_cloudera_manager方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。