当前位置: 首页>>代码示例>>Python>>正文


Python i18n._LI函数代码示例

本文整理汇总了Python中sahara.i18n._LI函数的典型用法代码示例。如果您正苦于以下问题:Python _LI函数的具体用法?Python _LI怎么用?Python _LI使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了_LI函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: start_cluster

    def start_cluster(self, cluster):
        nn_instance = utils.get_instance(cluster, "namenode")
        sm_instance = utils.get_instance(cluster, "master")
        dn_instances = utils.get_instances(cluster, "datanode")

        # Start the name node
        with remote.get_remote(nn_instance) as r:
            run.format_namenode(r)
            run.start_processes(r, "namenode")

        # start the data nodes
        self._start_slave_datanode_processes(dn_instances)

        LOG.info(_LI("Hadoop services in cluster %s have been started"), cluster.name)

        with remote.get_remote(nn_instance) as r:
            r.execute_command("sudo -u hdfs hdfs dfs -mkdir -p /user/$USER/")
            r.execute_command(("sudo -u hdfs hdfs dfs -chown $USER " "/user/$USER/"))

        # start spark nodes
        if sm_instance:
            with remote.get_remote(sm_instance) as r:
                run.start_spark_master(r, self._spark_home(cluster))
                LOG.info(_LI("Spark service at '%s' has been started"), sm_instance.hostname())

        LOG.info(_LI("Cluster %s has been started successfully"), cluster.name)
        self._set_cluster_info(cluster)
开发者ID:hao707822882,项目名称:sahara,代码行数:27,代码来源:plugin.py

示例2: wait_for_host_registrations

    def wait_for_host_registrations(self, num_hosts, ambari_info):
        LOG.info(
            _LI('Waiting for all Ambari agents to register with server ...'))

        url = 'http://{0}/api/v1/hosts'.format(ambari_info.get_address())
        result = None
        json_result = None

        # TODO(jspeidel): timeout
        while result is None or len(json_result['items']) < num_hosts:
            context.sleep(5)
            try:
                result = self._get(url, ambari_info)
                json_result = json.loads(result.text)

                LOG.info(_LI('Registered Hosts: %(current_number)s of '
                             '%(final_number)s'),
                         {'current_number': len(json_result['items']),
                          'final_number': num_hosts})
                for hosts in json_result['items']:
                    LOG.debug('Registered Host: {0}'.format(
                        hosts['Hosts']['host_name']))
            except Exception:
                # TODO(jspeidel): max wait time
                LOG.info(_LI('Waiting to connect to ambari server ...'))
开发者ID:degorenko,项目名称:sahara,代码行数:25,代码来源:versionhandler.py

示例3: start_cluster

    def start_cluster(self, cluster):
        nn_instance = utils.get_instance(cluster, "namenode")
        dn_instances = utils.get_instances(cluster, "datanode")
        zep_instance = utils.get_instance(cluster, "zeppelin")

        # Start the name node
        self._start_namenode(nn_instance)

        # start the data nodes
        self._start_datanode_processes(dn_instances)

        LOG.info(_LI("Hadoop services have been started"))

        with remote.get_remote(nn_instance) as r:
            r.execute_command("sudo -u hdfs hdfs dfs -mkdir -p /user/$USER/")
            r.execute_command("sudo -u hdfs hdfs dfs -chown $USER "
                              "/user/$USER/")

        # start spark nodes
        self.start_spark(cluster)

        # start zeppelin, if necessary
        if zep_instance:
            self._start_zeppelin(zep_instance)

        LOG.info(_LI('Cluster has been started successfully'))
        self._set_cluster_info(cluster)
开发者ID:crobby,项目名称:sahara,代码行数:27,代码来源:plugin.py

示例4: start_services

    def start_services(self, cluster_name, cluster_spec, ambari_info):
        start_url = ('http://{0}/api/v1/clusters/{1}/services?ServiceInfo/'
                     'state=INSTALLED'.format(
                         ambari_info.get_address(), cluster_name))
        body = ('{"RequestInfo" : { "context" : "Start all services" },'
                '"Body" : {"ServiceInfo": {"state" : "STARTED"}}}')

        self._fire_service_start_notifications(
            cluster_name, cluster_spec, ambari_info)
        result = self._put(start_url, ambari_info, data=body)
        if result.status_code == 202:
            json_result = json.loads(result.text)
            request_id = json_result['Requests']['id']
            success = self._wait_for_async_request(
                self._get_async_request_uri(ambari_info, cluster_name,
                                            request_id), ambari_info)
            if success:
                LOG.info(
                    _LI("Successfully started Hadoop cluster."))
                LOG.info(_LI('Ambari server address: {server_address}')
                         .format(server_address=ambari_info.get_address()))

            else:
                LOG.error(_LE('Failed to start Hadoop cluster.'))
                raise ex.HadoopProvisionError(
                    _('Start of Hadoop services failed.'))

        elif result.status_code != 200:
            LOG.error(
                _LE('Start command failed. Status: {status}, '
                    'response: {response}').format(status=result.status_code,
                                                   response=result.text))
            raise ex.HadoopProvisionError(
                _('Start of Hadoop services failed.'))
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:34,代码来源:versionhandler.py

示例5: main

def main():
    # TODO(tmckay): Work on restricting the options
    # pulled in by imports which show up in the help.
    # If we find a nice way to do this the calls to
    # unregister_extra_cli_opt() can be removed
    CONF(project="sahara")

    # For some reason, this is necessary to clear cached values
    # and re-read configs.  For instance, if this is not done
    # here the 'plugins' value will not reflect the value from
    # the config file on the command line
    CONF.reload_config_files()
    log.setup(CONF, "sahara")

    # If we have to enforce extra option checks, like one option
    # requires another, do it here
    extra_option_checks()

    # Since this may be scripted, record the command in the log
    # so a user can know exactly what was done
    LOG.info(_LI("Command: {command}").format(command=" ".join(sys.argv)))

    api.set_logger(LOG)
    api.set_conf(CONF)

    CONF.command.func()

    LOG.info(_LI("Finished {command}").format(command=CONF.command.name))
开发者ID:thefuyang,项目名称:sahara,代码行数:28,代码来源:cli.py

示例6: _await_networks

    def _await_networks(self, cluster, instances):
        if not instances:
            return

        ips_assigned = set()
        while len(ips_assigned) != len(instances):
            if not g.check_cluster_exists(cluster):
                return
            for instance in instances:
                if instance.id not in ips_assigned:
                    if networks.init_instances_ips(instance):
                        ips_assigned.add(instance.id)

            context.sleep(1)

        LOG.info(
            _LI("Cluster '%s': all instances have IPs assigned"), cluster.id)

        cluster = conductor.cluster_get(context.ctx(), cluster)
        instances = g.get_instances(cluster, ips_assigned)

        with context.ThreadGroup() as tg:
            for instance in instances:
                tg.spawn("wait-for-ssh-%s" % instance.instance_name,
                         self._wait_until_accessible, instance)

        LOG.info(_LI("Cluster '%s': all instances are accessible"), cluster.id)
开发者ID:degorenko,项目名称:sahara,代码行数:27,代码来源:engine.py

示例7: _install_services

    def _install_services(self, cluster_name, ambari_info):
        LOG.info(_LI('Installing required Hadoop services ...'))

        ambari_address = ambari_info.get_address()
        install_url = ('http://{0}/api/v1/clusters/{'
                       '1}/services?ServiceInfo/state=INIT'.format(
                           ambari_address, cluster_name))
        body = ('{"RequestInfo" : { "context" : "Install all services" },'
                '"Body" : {"ServiceInfo": {"state" : "INSTALLED"}}}')

        result = self._put(install_url, ambari_info, data=body)

        if result.status_code == 202:
            json_result = json.loads(result.text)
            request_id = json_result['Requests']['id']
            success = self._wait_for_async_request(self._get_async_request_uri(
                ambari_info, cluster_name, request_id),
                ambari_info)
            if success:
                LOG.info(_LI("Install of Hadoop stack successful."))
                self._finalize_ambari_state(ambari_info)
            else:
                LOG.critical(_LC('Install command failed.'))
                raise ex.HadoopProvisionError(
                    _('Installation of Hadoop stack failed.'))
        elif result.status_code != 200:
            LOG.error(
                _LE('Install command failed. {0}').format(result.text))
            raise ex.HadoopProvisionError(
                _('Installation of Hadoop stack failed.'))
开发者ID:degorenko,项目名称:sahara,代码行数:30,代码来源:versionhandler.py

示例8: _await_networks

    def _await_networks(self, cluster, instances):
        if not instances:
            return

        cpo.add_provisioning_step(cluster.id, _("Assign IPs"), len(instances))

        ips_assigned = set()
        self._ips_assign(ips_assigned, cluster, instances)

        LOG.info(
            _LI("Cluster {cluster_id}: all instances have IPs assigned")
            .format(cluster_id=cluster.id))

        cluster = conductor.cluster_get(context.ctx(), cluster)
        instances = g.get_instances(cluster, ips_assigned)

        cpo.add_provisioning_step(
            cluster.id, _("Wait for instance accessibility"), len(instances))

        with context.ThreadGroup() as tg:
            for instance in instances:
                tg.spawn("wait-for-ssh-%s" % instance.instance_name,
                         self._wait_until_accessible, instance)

        LOG.info(_LI("Cluster {cluster_id}: all instances are accessible")
                 .format(cluster_id=cluster.id))
开发者ID:YongchaoTIAN,项目名称:sahara,代码行数:26,代码来源:engine.py

示例9: stop_services

def stop_services(cluster, instances):
    LOG.info(_LI("Stop warden and zookeeper"))
    for instance in instances:
        with instance.remote() as r:
            r.execute_command(STOP_WARDEN_CMD, run_as_root=True)
            if check_if_is_zookeeper_node(instance):
                r.execute_command(STOP_ZOOKEEPER_CMD, run_as_root=True)
    LOG.info(_LI("Warden and zookeeper stoped"))
开发者ID:a9261,项目名称:sahara,代码行数:8,代码来源:scaling.py

示例10: remove_services

def remove_services(cluster, instances):
    LOG.info(_LI("Start remove all mapr services"))
    for instance in instances:
        with instance.remote() as r:
            r.execute_command(REMOVE_MAPR_PACKAGES_CMD, run_as_root=True)
            r.execute_command(REMOVE_MAPR_HOME_CMD, run_as_root=True)
            r.execute_command(REMOVE_MAPR_CORES_CMD, run_as_root=True)
    LOG.info(_LI("All mapr services removed"))
开发者ID:a9261,项目名称:sahara,代码行数:8,代码来源:scaling.py

示例11: format_cluster_deleted_message

def format_cluster_deleted_message(cluster):
    msg = _LI("Cluster %(name)s (id=%(id)s) was deleted. "
              "Canceling current operation.")

    if cluster:
        return (msg, {'name': cluster.name,
                      'id': cluster.id})
    return (msg, {'name': _LI("Unknown"),
                  'id': _LI("Unknown")})
开发者ID:turu,项目名称:sahara,代码行数:9,代码来源:general.py

示例12: move_node

def move_node(cluster, instances):
    LOG.info(_LI("Start moving the node to the /decommissioned"))
    for instance in instances:
        with instance.remote() as r:
            command = GET_SERVER_ID_CMD % instance.management_ip
            ec, out = r.execute_command(command, run_as_root=True)
            command = MOVE_NODE_CMD % out.strip()
            r.execute_command(command, run_as_root=True)
    LOG.info(_LI("Nodes moved to the /decommissioned"))
开发者ID:a9261,项目名称:sahara,代码行数:9,代码来源:scaling.py

示例13: scale_cluster

def scale_cluster(cluster, instances, disk_setup_script_path, waiting_script,
                  context, configure_sh_string, is_node_awareness):
    LOG.info(_LI('START: Cluster scaling. Cluster = %s'), cluster.name)
    for inst in instances:
        start_helper.install_role_on_instance(inst, context)
    config.configure_instances(cluster, instances)
    start_services(cluster, instances, disk_setup_script_path,
                   waiting_script, configure_sh_string)
    LOG.info(_LI('END: Cluster scaling. Cluster = %s'), cluster)
开发者ID:a9261,项目名称:sahara,代码行数:9,代码来源:scaling.py

示例14: _install_components

 def _install_components(self, ambari_info, auth, cluster_name, servers):
     # query for the host components on the given hosts that are in the
     # INIT state
     # TODO(jspeidel): provide request context
     body = '{"HostRoles": {"state" : "INSTALLED"}}'
     install_uri = ('http://{0}/api/v1/clusters/{'
                    '1}/host_components?HostRoles/state=INIT&'
                    'HostRoles/host_name.in({2})'.format(
                        ambari_info.get_address(), cluster_name,
                        self._get_host_list(servers)))
     self._exec_ambari_command(ambari_info, body, install_uri)
     LOG.info(_LI('Started Hadoop components while scaling up'))
     LOG.info(_LI('Ambari server ip {ip}')
              .format(ip=ambari_info.get_address()))
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:14,代码来源:versionhandler.py

示例15: start_cluster

    def start_cluster(self, cluster):
        nn_instance = vu.get_namenode(cluster)
        with remote.get_remote(nn_instance) as r:
            run.format_namenode(r)
            run.start_processes(r, "namenode")

        for snn in vu.get_secondarynamenodes(cluster):
            run.start_processes(remote.get_remote(snn), "secondarynamenode")

        jt_instance = vu.get_jobtracker(cluster)
        if jt_instance:
            run.start_processes(remote.get_remote(jt_instance), "jobtracker")

        self._start_tt_dn_processes(utils.get_instances(cluster))

        self._await_datanodes(cluster)

        LOG.info(_LI("Hadoop services in cluster %s have been started"),
                 cluster.name)

        oozie = vu.get_oozie(cluster)
        if oozie:
            with remote.get_remote(oozie) as r:
                if c_helper.is_mysql_enable(cluster):
                    run.mysql_start(r, oozie)
                    run.oozie_create_db(r)
                run.oozie_share_lib(r, nn_instance.hostname())
                run.start_oozie(r)
                LOG.info(_LI("Oozie service at '%s' has been started"),
                         nn_instance.hostname())

        hive_server = vu.get_hiveserver(cluster)
        if hive_server:
            with remote.get_remote(hive_server) as r:
                run.hive_create_warehouse_dir(r)
                run.hive_copy_shared_conf(
                    r, edp.get_hive_shared_conf_path('hadoop'))

                if c_helper.is_mysql_enable(cluster):
                    if not oozie or hive_server.hostname() != oozie.hostname():
                        run.mysql_start(r, hive_server)
                    run.hive_create_db(r)
                    run.hive_metastore_start(r)
                    LOG.info(_LI("Hive Metastore server at %s has been "
                                 "started"),
                             hive_server.hostname())

        LOG.info(_LI('Cluster %s has been started successfully'), cluster.name)
        self._set_cluster_info(cluster)
开发者ID:stannie42,项目名称:sahara,代码行数:49,代码来源:versionhandler.py


注:本文中的sahara.i18n._LI函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。