当前位置: 首页>>代码示例>>Python>>正文


Python utils.get_instances函数代码示例

本文整理汇总了Python中sahara.plugins.utils.get_instances函数的典型用法代码示例。如果您正苦于以下问题:Python get_instances函数的具体用法?Python get_instances怎么用?Python get_instances使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了get_instances函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_get_instances

    def test_get_instances(self):
        res = pu.get_instances(self.cluster)
        self.assertEqual([
            FakeInstance("1"), FakeInstance("2"), FakeInstance("3")], res)

        res = pu.get_instances(self.cluster, "node_process1")
        self.assertEqual([FakeInstance("1")], res)
开发者ID:openstack,项目名称:sahara,代码行数:7,代码来源:test_utils.py

示例2: test_get_instances

 def test_get_instances(self):
     self.assertEqual(5, len(u.get_instances(self.c1)))
     self.assertEqual([], u.get_instances(self.c1, "wrong-process"))
     self.assertEqual(self.ng1.instances, u.get_instances(self.c1, "nn"))
     instances = list(self.ng2.instances)
     instances += self.ng3.instances
     self.assertEqual(instances, u.get_instances(self.c1, "dn"))
开发者ID:egafford,项目名称:sahara,代码行数:7,代码来源:test_utils.py

示例3: test_get_instances

 def test_get_instances(self):
     self.assertEqual(len(u.get_instances(self.c1)), 5)
     self.assertEqual(u.get_instances(self.c1, 'wrong-process'), [])
     self.assertEqual(u.get_instances(self.c1, 'nn'),
                      self.ng1.instances)
     instances = list(self.ng2.instances)
     instances += self.ng3.instances
     self.assertEqual(u.get_instances(self.c1, 'dn'), instances)
开发者ID:a9261,项目名称:sahara,代码行数:8,代码来源:test_utils.py

示例4: restart_nns_and_rms

def restart_nns_and_rms(cluster):
    nns = plugin_utils.get_instances(cluster, p_common.NAMENODE)
    for nn in nns:
        restart_namenode(cluster, nn)

    rms = plugin_utils.get_instances(cluster, p_common.RESOURCEMANAGER)
    for rm in rms:
        restart_resourcemanager(cluster, rm)
开发者ID:Imperat,项目名称:sahara,代码行数:8,代码来源:deploy.py

示例5: deploy_kerberos

 def deploy_kerberos(self, cluster):
     all_instances = plugin_utils.get_instances(cluster)
     namenodes = plugin_utils.get_instances(cluster, 'namenode')
     server = None
     if len(namenodes) > 0:
         server = namenodes[0]
     elif len(all_instances) > 0:
         server = all_instances[0]
     if server:
         krb.deploy_infrastructure(cluster, server)
开发者ID:Imperat,项目名称:sahara,代码行数:10,代码来源:plugin.py

示例6: _extract_configs_to_extra

    def _extract_configs_to_extra(self, cluster):
        st_master = utils.get_instance(cluster, "nimbus")
        zk_servers = utils.get_instances(cluster, "zookeeper")

        extra = dict()

        config_instances = ''
        if st_master is not None:
            if zk_servers is not None:
                zknames = []
                for zk in zk_servers:
                    zknames.append(zk.hostname())

            config_instances = c_helper.generate_storm_config(
                st_master.hostname(),
                zknames)

        config = self._convert_dict_to_yaml(config_instances)
        supervisor_conf = c_helper.generate_slave_supervisor_conf()
        nimbus_ui_conf = c_helper.generate_master_supervisor_conf()
        zk_conf = c_helper.generate_zookeeper_conf()

        for ng in cluster.node_groups:
            extra[ng.id] = {
                'st_instances': config,
                'slave_sv_conf': supervisor_conf,
                'master_sv_conf': nimbus_ui_conf,
                'zk_conf': zk_conf
            }

        return extra
开发者ID:frgaudet,项目名称:sahara,代码行数:31,代码来源:plugin.py

示例7: start_cluster

 def start_cluster(self, cluster):
     self._set_cluster_info(cluster)
     deploy.start_cluster(cluster)
     cluster_instances = plugin_utils.get_instances(cluster)
     swift_helper.install_ssl_certs(cluster_instances)
     deploy.add_hadoop_swift_jar(cluster_instances)
     deploy.prepare_hive(cluster)
开发者ID:Imperat,项目名称:sahara,代码行数:7,代码来源:plugin.py

示例8: _clear_exclude_files

def _clear_exclude_files(cluster):
    for instance in u.get_instances(cluster):
        with instance.remote() as r:
            r.execute_command(
                'sudo su - -c "echo > %s/dn-exclude" hadoop' % HADOOP_CONF_DIR)
            r.execute_command(
                'sudo su - -c "echo > %s/nm-exclude" hadoop' % HADOOP_CONF_DIR)
开发者ID:a9261,项目名称:sahara,代码行数:7,代码来源:scaling.py

示例9: get_plain_instances

 def get_plain_instances(self):
     fs = self.get_fs_instances()
     zk = self.get_zk_instances()
     cldb = self.get_cldb_instances()
     zk_fs_cldb = zk + fs + cldb
     instances = u.get_instances(self.get_cluster())
     return [i for i in instances if i not in zk_fs_cldb]
开发者ID:a9261,项目名称:sahara,代码行数:7,代码来源:base_context.py

示例10: start_cluster

    def start_cluster(self, cluster):
        nn_instance = utils.get_instance(cluster, "namenode")
        sm_instance = utils.get_instance(cluster, "master")
        dn_instances = utils.get_instances(cluster, "datanode")

        # Start the name node
        with remote.get_remote(nn_instance) as r:
            run.format_namenode(r)
            run.start_processes(r, "namenode")

        # start the data nodes
        self._start_slave_datanode_processes(dn_instances)

        LOG.info(_LI("Hadoop services in cluster %s have been started"),
                 cluster.name)

        with remote.get_remote(nn_instance) as r:
            r.execute_command("sudo -u hdfs hdfs dfs -mkdir -p /user/$USER/")
            r.execute_command("sudo -u hdfs hdfs dfs -chown $USER "
                              "/user/$USER/")

        # start spark nodes
        if sm_instance:
            with remote.get_remote(sm_instance) as r:
                run.start_spark_master(r, self._spark_home(cluster))
                LOG.info(_LI("Spark service at '%s' has been started"),
                         sm_instance.hostname())

        LOG.info(_LI('Cluster %s has been started successfully'),
                 cluster.name)
        self._set_cluster_info(cluster)
开发者ID:degorenko,项目名称:sahara,代码行数:31,代码来源:plugin.py

示例11: start_cluster

    def start_cluster(self, cluster):
        nn = vu.get_namenode(cluster)
        run.format_namenode(nn)
        run.start_hadoop_process(nn, 'namenode')

        for snn in vu.get_secondarynamenodes(cluster):
            run.start_hadoop_process(snn, 'secondarynamenode')

        rm = vu.get_resourcemanager(cluster)
        if rm:
            run.start_yarn_process(rm, 'resourcemanager')

        run.start_dn_nm_processes(utils.get_instances(cluster))

        run.await_datanodes(cluster)

        hs = vu.get_historyserver(cluster)
        if hs:
            run.start_historyserver(hs)

        oo = vu.get_oozie(cluster)
        if oo:
            run.start_oozie_process(self.pctx, oo)

        hiveserver = vu.get_hiveserver(cluster)
        if hiveserver:
            run.start_hiveserver_process(self.pctx, hiveserver)

        self._set_cluster_info(cluster)
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:29,代码来源:versionhandler.py

示例12: _setup_instances

    def _setup_instances(self, cluster, instances=None):
        extra = self._extract_configs_to_extra(cluster)

        if instances is None:
            instances = utils.get_instances(cluster)

        self._push_configs_to_nodes(cluster, extra, instances)
开发者ID:crobby,项目名称:sahara,代码行数:7,代码来源:plugin.py

示例13: _validate_existing_ng_scaling

    def _validate_existing_ng_scaling(self, cluster, existing):
        scalable_processes = self._get_scalable_processes()
        dn_to_delete = 0
        for ng in cluster.node_groups:
            if ng.id in existing:
                if ng.count > existing[ng.id] and ("datanode" in
                                                   ng.node_processes):
                    dn_to_delete += ng.count - existing[ng.id]
                if not set(ng.node_processes).issubset(scalable_processes):
                    raise ex.NodeGroupCannotBeScaled(
                        ng.name, _("Spark plugin cannot scale nodegroup"
                                   " with processes: %s") %
                        ' '.join(ng.node_processes))

        dn_amount = len(utils.get_instances(cluster, "datanode"))
        rep_factor = utils.get_config_value_or_default('HDFS',
                                                       "dfs.replication",
                                                       cluster)

        if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor:
            raise ex.ClusterCannotBeScaled(
                cluster.name, _("Spark plugin cannot shrink cluster because "
                                "there would be not enough nodes for HDFS "
                                "replicas (replication factor is %s)") %
                rep_factor)
开发者ID:crobby,项目名称:sahara,代码行数:25,代码来源:plugin.py

示例14: start_cluster

    def start_cluster(self, cluster):
        nn_instance = utils.get_instance(cluster, "namenode")
        dn_instances = utils.get_instances(cluster, "datanode")
        zep_instance = utils.get_instance(cluster, "zeppelin")

        # Start the name node
        self._start_namenode(nn_instance)

        # start the data nodes
        self._start_datanode_processes(dn_instances)

        LOG.info(_LI("Hadoop services have been started"))

        with remote.get_remote(nn_instance) as r:
            r.execute_command("sudo -u hdfs hdfs dfs -mkdir -p /user/$USER/")
            r.execute_command("sudo -u hdfs hdfs dfs -chown $USER "
                              "/user/$USER/")

        # start spark nodes
        self.start_spark(cluster)

        # start zeppelin, if necessary
        if zep_instance:
            self._start_zeppelin(zep_instance)

        LOG.info(_LI('Cluster has been started successfully'))
        self._set_cluster_info(cluster)
开发者ID:crobby,项目名称:sahara,代码行数:27,代码来源:plugin.py

示例15: setup_agents

def setup_agents(cluster, instances=None):
    LOG.debug("Set up Ambari agents")
    manager_address = plugin_utils.get_instance(
        cluster, p_common.AMBARI_SERVER).fqdn()
    if not instances:
        instances = plugin_utils.get_instances(cluster)
    _setup_agents(instances, manager_address)
开发者ID:Imperat,项目名称:sahara,代码行数:7,代码来源:deploy.py


注:本文中的sahara.plugins.utils.get_instances函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。