当前位置: 首页>>代码示例>>Python>>正文


Python utils.get_hiveserver函数代码示例

本文整理汇总了Python中sahara.plugins.vanilla.utils.get_hiveserver函数的典型用法代码示例。如果您正苦于以下问题:Python get_hiveserver函数的具体用法?Python get_hiveserver怎么用?Python get_hiveserver使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了get_hiveserver函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_get_hiveserver

    def test_get_hiveserver(self):
        cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
                               [self.ng_manager, self.ng_hiveserver])
        self.assertEqual('hs1', u.get_hiveserver(cl).instance_id)

        cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
                               [self.ng_manager])
        self.assertIsNone(u.get_hiveserver(cl))
开发者ID:uladz,项目名称:sahara,代码行数:8,代码来源:test_utils.py

示例2: start_cluster

    def start_cluster(self, cluster):
        nn = vu.get_namenode(cluster)
        run.format_namenode(nn)
        run.start_hadoop_process(nn, 'namenode')

        for snn in vu.get_secondarynamenodes(cluster):
            run.start_hadoop_process(snn, 'secondarynamenode')

        rm = vu.get_resourcemanager(cluster)
        if rm:
            run.start_yarn_process(rm, 'resourcemanager')

        run.start_dn_nm_processes(utils.get_instances(cluster))

        run.await_datanodes(cluster)

        hs = vu.get_historyserver(cluster)
        if hs:
            run.start_historyserver(hs)

        oo = vu.get_oozie(cluster)
        if oo:
            run.start_oozie_process(self.pctx, oo)

        hiveserver = vu.get_hiveserver(cluster)
        if hiveserver:
            run.start_hiveserver_process(self.pctx, hiveserver)

        self._set_cluster_info(cluster)
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:29,代码来源:versionhandler.py

示例3: _extract_configs_to_extra

    def _extract_configs_to_extra(self, cluster):
        oozie = vu.get_oozie(cluster)
        hive = vu.get_hiveserver(cluster)

        extra = dict()

        if hive:
            extra['hive_mysql_passwd'] = six.text_type(uuid.uuid4())

        for ng in cluster.node_groups:
            extra[ng.id] = {
                'xml': c_helper.generate_xml_configs(
                    cluster, ng, extra['hive_mysql_passwd'] if hive else None),
                'setup_script': c_helper.generate_setup_script(
                    ng.storage_paths(),
                    c_helper.extract_environment_confs(ng.configuration()),
                    append_oozie=(
                        oozie and oozie.node_group.id == ng.id)
                )
            }

        if c_helper.is_data_locality_enabled(cluster):
            topology_data = th.generate_topology_map(
                cluster, CONF.enable_hypervisor_awareness)
            extra['topology_data'] = "\n".join(
                [k + " " + v for k, v in topology_data.items()]) + "\n"

        return extra
开发者ID:stannie42,项目名称:sahara,代码行数:28,代码来源:versionhandler.py

示例4: _setup_instances

    def _setup_instances(self, cluster, instances):
        if (CONF.use_identity_api_v3 and CONF.use_domain_for_proxy_users and
                vu.get_hiveserver(cluster) and
                c_helper.is_swift_enable(cluster)):
            cluster = proxy.create_proxy_user_for_cluster(cluster)
            instances = utils.get_instances(cluster)

        extra = self._extract_configs_to_extra(cluster)
        cluster = conductor.cluster_get(context.ctx(), cluster)
        self._push_configs_to_nodes(cluster, extra, instances)
开发者ID:al-indigo,项目名称:sahara,代码行数:10,代码来源:versionhandler.py

示例5: configure_cluster

def configure_cluster(pctx, cluster):
    LOG.debug("Configuring cluster")
    if (CONF.use_identity_api_v3 and CONF.use_domain_for_proxy_users and
            vu.get_hiveserver(cluster) and
            c_helper.is_swift_enabled(pctx, cluster)):
        cluster = proxy.create_proxy_user_for_cluster(cluster)

    instances = utils.get_instances(cluster)
    configure_instances(pctx, instances)
    configure_topology_data(pctx, cluster)
开发者ID:egafford,项目名称:sahara,代码行数:10,代码来源:config.py

示例6: generate_xml_configs

def generate_xml_configs(cluster, node_group, hive_mysql_passwd):
    oozie_hostname = vu.get_instance_hostname(vu.get_oozie(cluster))
    hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))

    ng_configs = node_group.configuration()

    general_cfg = get_general_configs(hive_hostname, hive_mysql_passwd)

    all_cfg = generate_sahara_configs(cluster, node_group)

    # inserting user-defined configs
    for key, value in extract_xml_confs(ng_configs):
        all_cfg[key] = value

    # applying swift configs if user enabled it
    swift_xml_confs = swift.get_swift_configs()
    all_cfg = generate_cfg_from_general(all_cfg, ng_configs, general_cfg)

    # invoking applied configs to appropriate xml files
    core_all = CORE_DEFAULT + swift_xml_confs
    mapred_all = MAPRED_DEFAULT

    if CONF.enable_data_locality:
        all_cfg.update(topology.TOPOLOGY_CONFIG)

        # applying vm awareness configs
        core_all += topology.vm_awareness_core_config()
        mapred_all += topology.vm_awareness_mapred_config()

    xml_configs = {
        'core-site': x.create_hadoop_xml(all_cfg, core_all),
        'mapred-site': x.create_hadoop_xml(all_cfg, mapred_all),
        'hdfs-site': x.create_hadoop_xml(all_cfg, HDFS_DEFAULT)
    }

    if hive_hostname:
        cfg = all_cfg
        cfg_filter = HIVE_DEFAULT
        proxy_configs = cluster.cluster_configs.get('proxy_configs')
        if CONF.use_identity_api_v3 and proxy_configs:
            cfg, cfg_filter = _inject_swift_trust_info(cfg,
                                                       cfg_filter,
                                                       proxy_configs)
        xml_configs.update({'hive-site':
                            x.create_hadoop_xml(cfg, cfg_filter)})
        LOG.debug('Generated hive-site.xml for hive {host}'.format(
            host=hive_hostname))

    if oozie_hostname:
        xml_configs.update({'oozie-site':
                            x.create_hadoop_xml(all_cfg, o_h.OOZIE_DEFAULT)})
        LOG.debug('Generated oozie-site.xml for oozie {host}'.format(
            host=oozie_hostname))

    return xml_configs
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:55,代码来源:config_helper.py

示例7: generate_sahara_configs

def generate_sahara_configs(cluster, node_group=None):
    nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
    jt_hostname = vu.get_instance_hostname(vu.get_jobtracker(cluster))
    oozie_hostname = vu.get_instance_hostname(vu.get_oozie(cluster))
    hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))

    storage_path = node_group.storage_paths() if node_group else None

    # inserting common configs depends on provisioned VMs and HDFS placement
    # TODO(aignatov): should be moved to cluster context

    cfg = {
        'fs.default.name': 'hdfs://%s:8020' % nn_hostname,
        'dfs.name.dir': extract_hadoop_path(storage_path,
                                            '/lib/hadoop/hdfs/namenode'),
        'dfs.data.dir': extract_hadoop_path(storage_path,
                                            '/lib/hadoop/hdfs/datanode'),
        'dfs.hosts': '/etc/hadoop/dn.incl',
        'dfs.hosts.exclude': '/etc/hadoop/dn.excl',
    }

    if jt_hostname:
        mr_cfg = {
            'mapred.job.tracker': '%s:8021' % jt_hostname,
            'mapred.system.dir': extract_hadoop_path(storage_path,
                                                     '/mapred/mapredsystem'),
            'mapred.local.dir': extract_hadoop_path(storage_path,
                                                    '/lib/hadoop/mapred'),
            'mapred.hosts': '/etc/hadoop/tt.incl',
            'mapred.hosts.exclude': '/etc/hadoop/tt.excl',
        }
        cfg.update(mr_cfg)

    if oozie_hostname:
        o_cfg = {
            'hadoop.proxyuser.hadoop.hosts': "localhost," + oozie_hostname,
            'hadoop.proxyuser.hadoop.groups': 'hadoop',
        }
        cfg.update(o_cfg)
        LOG.debug('Applied Oozie configs for core-site.xml')
        cfg.update(o_h.get_oozie_required_xml_configs())
        LOG.debug('Applied Oozie configs for oozie-site.xml')

    if hive_hostname:
        h_cfg = {
            'hive.warehouse.subdir.inherit.perms': True,
            'javax.jdo.option.ConnectionURL':
            'jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true'
        }
        cfg.update(h_cfg)
        LOG.debug('Applied Hive config for hive metastore server')

    return cfg
开发者ID:a9261,项目名称:sahara,代码行数:53,代码来源:config_helper.py

示例8: configure_cluster

def configure_cluster(pctx, cluster):
    LOG.debug("Configuring cluster \"%s\"", cluster.name)
    if (CONF.use_identity_api_v3 and vu.get_hiveserver(cluster) and
            c_helper.is_swift_enabled(pctx, cluster)):
        cluster = proxy.create_proxy_user_for_cluster(cluster)

    instances = []
    for node_group in cluster.node_groups:
        for instance in node_group.instances:
            instances.append(instance)

    configure_instances(pctx, instances)
    configure_topology_data(pctx, cluster)
开发者ID:degorenko,项目名称:sahara,代码行数:13,代码来源:config.py

示例9: start_cluster

    def start_cluster(self, cluster):
        nn_instance = vu.get_namenode(cluster)
        with remote.get_remote(nn_instance) as r:
            run.format_namenode(r)
            run.start_processes(r, "namenode")

        for snn in vu.get_secondarynamenodes(cluster):
            run.start_processes(remote.get_remote(snn), "secondarynamenode")

        jt_instance = vu.get_jobtracker(cluster)
        if jt_instance:
            run.start_processes(remote.get_remote(jt_instance), "jobtracker")

        self._start_tt_dn_processes(utils.get_instances(cluster))

        self._await_datanodes(cluster)

        LOG.info(_LI("Hadoop services in cluster %s have been started"),
                 cluster.name)

        oozie = vu.get_oozie(cluster)
        if oozie:
            with remote.get_remote(oozie) as r:
                if c_helper.is_mysql_enable(cluster):
                    run.mysql_start(r, oozie)
                    run.oozie_create_db(r)
                run.oozie_share_lib(r, nn_instance.hostname())
                run.start_oozie(r)
                LOG.info(_LI("Oozie service at '%s' has been started"),
                         nn_instance.hostname())

        hive_server = vu.get_hiveserver(cluster)
        if hive_server:
            with remote.get_remote(hive_server) as r:
                run.hive_create_warehouse_dir(r)
                run.hive_copy_shared_conf(
                    r, edp.get_hive_shared_conf_path('hadoop'))

                if c_helper.is_mysql_enable(cluster):
                    if not oozie or hive_server.hostname() != oozie.hostname():
                        run.mysql_start(r, hive_server)
                    run.hive_create_db(r)
                    run.hive_metastore_start(r)
                    LOG.info(_LI("Hive Metastore server at %s has been "
                                 "started"),
                             hive_server.hostname())

        LOG.info(_LI('Cluster %s has been started successfully'), cluster.name)
        self._set_cluster_info(cluster)
开发者ID:stannie42,项目名称:sahara,代码行数:49,代码来源:versionhandler.py

示例10: generate_xml_configs

def generate_xml_configs(cluster, node_group, hive_mysql_passwd):
    oozie_hostname = vu.get_instance_hostname(vu.get_oozie(cluster))
    hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))

    ng_configs = node_group.configuration()

    general_cfg = get_general_configs(hive_hostname, hive_mysql_passwd)

    all_cfg = generate_sahara_configs(cluster, node_group)

    # inserting user-defined configs
    for key, value in extract_xml_confs(ng_configs):
        all_cfg[key] = value

    # applying swift configs if user enabled it
    swift_xml_confs = swift.get_swift_configs()
    all_cfg = generate_cfg_from_general(all_cfg, ng_configs, general_cfg)

    # invoking applied configs to appropriate xml files
    core_all = CORE_DEFAULT + swift_xml_confs
    mapred_all = MAPRED_DEFAULT

    if CONF.enable_data_locality:
        all_cfg.update(topology.TOPOLOGY_CONFIG)

        # applying vm awareness configs
        core_all += topology.vm_awareness_core_config()
        mapred_all += topology.vm_awareness_mapred_config()

    xml_configs = {
        "core-site": x.create_hadoop_xml(all_cfg, core_all),
        "mapred-site": x.create_hadoop_xml(all_cfg, mapred_all),
        "hdfs-site": x.create_hadoop_xml(all_cfg, HDFS_DEFAULT),
    }

    if hive_hostname:
        xml_configs.update({"hive-site": x.create_hadoop_xml(all_cfg, HIVE_DEFAULT)})
        LOG.debug("Generated hive-site.xml for hive % s", hive_hostname)

    if oozie_hostname:
        xml_configs.update({"oozie-site": x.create_hadoop_xml(all_cfg, o_h.OOZIE_DEFAULT)})
        LOG.debug("Generated oozie-site.xml for oozie % s", oozie_hostname)

    return xml_configs
开发者ID:JohannaMW,项目名称:sahara,代码行数:44,代码来源:config_helper.py

示例11: _get_hadoop_configs

def _get_hadoop_configs(pctx, instance):
    cluster = instance.node_group.cluster
    nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
    dirs = _get_hadoop_dirs(instance)
    confs = {
        "Hadoop": {"fs.defaultFS": "hdfs://%s:9000" % nn_hostname},
        "HDFS": {
            "dfs.namenode.name.dir": ",".join(dirs["hadoop_name_dirs"]),
            "dfs.datanode.data.dir": ",".join(dirs["hadoop_data_dirs"]),
            "dfs.hosts": "%s/dn-include" % HADOOP_CONF_DIR,
            "dfs.hosts.exclude": "%s/dn-exclude" % HADOOP_CONF_DIR,
        },
    }

    res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster))
    if res_hostname:
        confs["YARN"] = {
            "yarn.nodemanager.aux-services": "mapreduce_shuffle",
            "yarn.resourcemanager.hostname": "%s" % res_hostname,
            "yarn.resourcemanager.nodes.include-path": "%s/nm-include" % (HADOOP_CONF_DIR),
            "yarn.resourcemanager.nodes.exclude-path": "%s/nm-exclude" % (HADOOP_CONF_DIR),
        }
        confs["MapReduce"] = {"mapreduce.framework.name": "yarn"}
        hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster))
        if hs_hostname:
            confs["MapReduce"]["mapreduce.jobhistory.address"] = "%s:10020" % hs_hostname

    oozie = vu.get_oozie(cluster)
    if oozie:
        hadoop_cfg = {"hadoop.proxyuser.hadoop.hosts": "*", "hadoop.proxyuser.hadoop.groups": "hadoop"}
        confs["Hadoop"].update(hadoop_cfg)

        oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
        if c_helper.is_mysql_enabled(pctx, cluster):
            oozie_cfg.update(o_helper.get_oozie_mysql_configs())

        confs["JobFlow"] = oozie_cfg

    if c_helper.is_swift_enabled(pctx, cluster):
        swift_configs = {}
        for config in swift.get_swift_configs():
            swift_configs[config["name"]] = config["value"]

        confs["Hadoop"].update(swift_configs)

    if c_helper.is_data_locality_enabled(pctx, cluster):
        confs["Hadoop"].update(th.TOPOLOGY_CONFIG)
        confs["Hadoop"].update({"topology.script.file.name": HADOOP_CONF_DIR + "/topology.sh"})

    hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))
    if hive_hostname:
        hive_cfg = {
            "hive.warehouse.subdir.inherit.perms": True,
            "javax.jdo.option.ConnectionURL": "jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true",
        }

        if c_helper.is_mysql_enabled(pctx, cluster):
            hive_cfg.update(
                {
                    "javax.jdo.option.ConnectionURL": "jdbc:mysql://%s/metastore" % hive_hostname,
                    "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver",
                    "javax.jdo.option.ConnectionUserName": "hive",
                    "javax.jdo.option.ConnectionPassword": "pass",
                    "datanucleus.autoCreateSchema": "false",
                    "datanucleus.fixedDatastore": "true",
                    "hive.metastore.uris": "thrift://%s:9083" % hive_hostname,
                }
            )

        proxy_configs = cluster.cluster_configs.get("proxy_configs")
        if proxy_configs and c_helper.is_swift_enabled(pctx, cluster):
            key = key_manager.API().get(context.current(), proxy_configs["proxy_password"])
            password = key.get_encoded()
            hive_cfg.update(
                {
                    swift.HADOOP_SWIFT_USERNAME: proxy_configs["proxy_username"],
                    swift.HADOOP_SWIFT_PASSWORD: password,
                    swift.HADOOP_SWIFT_TRUST_ID: proxy_configs["proxy_trust_id"],
                    swift.HADOOP_SWIFT_DOMAIN_NAME: CONF.proxy_user_domain_name,
                }
            )

        confs["Hive"] = hive_cfg

    return confs
开发者ID:uladz,项目名称:sahara,代码行数:85,代码来源:config.py

示例12: start_hiveserver

 def start_hiveserver(self, cluster):
     hs = vu.get_hiveserver(cluster)
     if hs:
         self._start_hiveserver(cluster, hs)
开发者ID:metasensus,项目名称:sahara,代码行数:4,代码来源:versionhandler.py

示例13: _get_hadoop_configs

def _get_hadoop_configs(pctx, instance):
    cluster = instance.node_group.cluster
    nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
    dirs = _get_hadoop_dirs(instance)
    confs = {
        'Hadoop': {
            'fs.defaultFS': 'hdfs://%s:9000' % nn_hostname
        },
        'HDFS': {
            'dfs.namenode.name.dir': ','.join(dirs['hadoop_name_dirs']),
            'dfs.datanode.data.dir': ','.join(dirs['hadoop_data_dirs']),
            'dfs.hosts': '%s/dn-include' % HADOOP_CONF_DIR,
            'dfs.hosts.exclude': '%s/dn-exclude' % HADOOP_CONF_DIR
        }
    }

    res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster))
    if res_hostname:
        confs['YARN'] = {
            'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
            'yarn.resourcemanager.hostname': '%s' % res_hostname,
            'yarn.resourcemanager.nodes.include-path': '%s/nm-include' % (
                HADOOP_CONF_DIR),
            'yarn.resourcemanager.nodes.exclude-path': '%s/nm-exclude' % (
                HADOOP_CONF_DIR)
        }
        confs['MapReduce'] = {
            'mapreduce.framework.name': 'yarn'
        }
        hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster))
        if hs_hostname:
            confs['MapReduce']['mapreduce.jobhistory.address'] = (
                "%s:10020" % hs_hostname)

    oozie = vu.get_oozie(cluster)
    if oozie:
        hadoop_cfg = {
            'hadoop.proxyuser.hadoop.hosts': '*',
            'hadoop.proxyuser.hadoop.groups': 'hadoop'
        }
        confs['Hadoop'].update(hadoop_cfg)

        oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
        if c_helper.is_mysql_enabled(pctx, cluster):
            oozie_cfg.update(o_helper.get_oozie_mysql_configs())

        confs['JobFlow'] = oozie_cfg

    if c_helper.is_swift_enabled(pctx, cluster):
        swift_configs = {}
        for config in swift.get_swift_configs():
            swift_configs[config['name']] = config['value']

        confs['Hadoop'].update(swift_configs)

    if c_helper.is_data_locality_enabled(pctx, cluster):
        confs['Hadoop'].update(th.TOPOLOGY_CONFIG)
        confs['Hadoop'].update({"topology.script.file.name":
                                HADOOP_CONF_DIR + "/topology.sh"})

    hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))
    if hive_hostname:
        hive_cfg = {
            'hive.warehouse.subdir.inherit.perms': True,
            'javax.jdo.option.ConnectionURL':
            'jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true'
        }

        if c_helper.is_mysql_enabled(pctx, cluster):
            hive_cfg.update({
                'javax.jdo.option.ConnectionURL':
                'jdbc:mysql://%s/metastore' % hive_hostname,
                'javax.jdo.option.ConnectionDriverName':
                'com.mysql.jdbc.Driver',
                'javax.jdo.option.ConnectionUserName': 'hive',
                'javax.jdo.option.ConnectionPassword': 'pass',
                'datanucleus.autoCreateSchema': 'false',
                'datanucleus.fixedDatastore': 'true',
                'hive.metastore.uris': 'thrift://%s:9083' % hive_hostname,
            })

        proxy_configs = cluster.cluster_configs.get('proxy_configs')
        if proxy_configs and c_helper.is_swift_enabled(pctx, cluster):
            hive_cfg.update({
                swift.HADOOP_SWIFT_USERNAME: proxy_configs['proxy_username'],
                swift.HADOOP_SWIFT_PASSWORD: proxy_configs['proxy_password'],
                swift.HADOOP_SWIFT_TRUST_ID: proxy_configs['proxy_trust_id'],
                swift.HADOOP_SWIFT_DOMAIN_NAME: CONF.proxy_user_domain_name
            })

        confs['Hive'] = hive_cfg

    return confs
开发者ID:egafford,项目名称:sahara,代码行数:93,代码来源:config.py

示例14: start_hiveserver

 def start_hiveserver(self, cluster):
     hiveserver = vu.get_hiveserver(cluster)
     if hiveserver:
         run.start_hiveserver_process(self.pctx, hiveserver)
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:4,代码来源:versionhandler.py


注:本文中的sahara.plugins.vanilla.utils.get_hiveserver函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。