当前位置: 首页>>代码示例>>Python>>正文


Python Bigtop.trigger_puppet方法代码示例

本文整理汇总了Python中charms.layer.apache_bigtop_base.Bigtop.trigger_puppet方法的典型用法代码示例。如果您正苦于以下问题:Python Bigtop.trigger_puppet方法的具体用法?Python Bigtop.trigger_puppet怎么用?Python Bigtop.trigger_puppet使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在charms.layer.apache_bigtop_base.Bigtop的用法示例。


在下文中一共展示了Bigtop.trigger_puppet方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: configure_kafka

# 需要导入模块: from charms.layer.apache_bigtop_base import Bigtop [as 别名]
# 或者: from charms.layer.apache_bigtop_base.Bigtop import trigger_puppet [as 别名]
    def configure_kafka(self, zk_units, network_interface=None):
        # Get ip:port data from our connected zookeepers
        zks = []
        for unit in zk_units:
            ip = utils.resolve_private_address(unit['host'])
            zks.append("%s:%s" % (ip, unit['port']))
        zks.sort()
        zk_connect = ",".join(zks)
        service, unit_num = os.environ['JUJU_UNIT_NAME'].split('/', 1)
        kafka_port = self.dist_config.port('kafka')

        roles = ['kafka-server']
        override = {
            'kafka::server::broker_id': unit_num,
            'kafka::server::port': kafka_port,
            'kafka::server::zookeeper_connection_string': zk_connect,
        }
        if network_interface:
            ip = Bigtop().get_ip_for_interface(network_interface)
            override['kafka::server::bind_addr'] = ip

        bigtop = Bigtop()
        bigtop.render_site_yaml(roles=roles, overrides=override)
        bigtop.trigger_puppet()
        self.set_advertise()
        self.restart()
开发者ID:Guavus,项目名称:bigtop,代码行数:28,代码来源:bigtop_kafka.py

示例2: install_pig

# 需要导入模块: from charms.layer.apache_bigtop_base import Bigtop [as 别名]
# 或者: from charms.layer.apache_bigtop_base.Bigtop import trigger_puppet [as 别名]
    def install_pig(self):
        '''
        Trigger the Bigtop puppet recipe that handles the Pig service.
        '''
        # Dirs are handled by the bigtop deb. No need to call out to
        # dist_config to do that work.
        roles = ['pig-client']

        bigtop = Bigtop()
        bigtop.render_site_yaml(roles=roles)
        bigtop.trigger_puppet()

        # Set app version for juju status output; pig --version looks like:
        #   Apache Pig version 0.15.0 (r: unknown)
        #   compiled Feb 06 2016, 23:00:40
        try:
            pig_out = check_output(['pig', '-x', 'local', '--version']).decode()
        except CalledProcessError as e:
            pig_out = e.output
        lines = pig_out.splitlines()
        parts = lines[0].split() if lines else []
        if len(parts) < 4:
            hookenv.log('Error getting Pig version: {}'.format(pig_out),
                        hookenv.ERROR)
            pig_ver = ''
        else:
            pig_ver = parts[3]
        hookenv.application_version_set(pig_ver)
开发者ID:Guavus,项目名称:bigtop,代码行数:30,代码来源:bigtop_pig.py

示例3: install_resourcemanager

# 需要导入模块: from charms.layer.apache_bigtop_base import Bigtop [as 别名]
# 或者: from charms.layer.apache_bigtop_base.Bigtop import trigger_puppet [as 别名]
def install_resourcemanager(namenode):
    """Install if the namenode has sent its FQDN.

    We only need the namenode FQDN to perform the RM install, so poll for
    namenodes() data whenever we have a namenode relation. This allows us to
    install asap, even if 'namenode.ready' is not set yet.
    """
    if namenode.namenodes():
        hookenv.status_set('maintenance', 'installing resourcemanager')
        nn_host = namenode.namenodes()[0]
        rm_host = get_fqdn()
        bigtop = Bigtop()
        hosts = {'namenode': nn_host, 'resourcemanager': rm_host}
        bigtop.render_site_yaml(hosts=hosts, roles='resourcemanager')
        bigtop.trigger_puppet()

        # /etc/hosts entries from the KV are not currently used for bigtop,
        # but a hosts_map attribute is required by some interfaces (eg: mapred-slave)
        # to signify RM's readiness. Set our RM info in the KV to fulfill this
        # requirement.
        utils.initialize_kv_host()

        # Add our ubuntu user to the hadoop and mapred groups.
        get_layer_opts().add_users()

        set_state('apache-bigtop-resourcemanager.installed')
        hookenv.status_set('maintenance', 'resourcemanager installed')
    else:
        hookenv.status_set('waiting', 'waiting for namenode fqdn')
开发者ID:juju-solutions,项目名称:layer-apache-bigtop-resourcemanager,代码行数:31,代码来源:resourcemanager.py

示例4: install_namenode

# 需要导入模块: from charms.layer.apache_bigtop_base import Bigtop [as 别名]
# 或者: from charms.layer.apache_bigtop_base.Bigtop import trigger_puppet [as 别名]
def install_namenode():
    hookenv.status_set('maintenance', 'installing namenode')
    bigtop = Bigtop()
    nn_host = get_fqdn()
    hosts = {'namenode': nn_host}
    bigtop.render_site_yaml(hosts=hosts, roles='namenode')
    bigtop.trigger_puppet()

    # /etc/hosts entries from the KV are not currently used for bigtop,
    # but a hosts_map attribute is required by some interfaces (eg: dfs-slave)
    # to signify NN's readiness. Set our NN info in the KV to fulfill this
    # requirement.
    utils.initialize_kv_host()

    # make our namenode listen on all interfaces
    hdfs_site = Path('/etc/hadoop/conf/hdfs-site.xml')
    with utils.xmlpropmap_edit_in_place(hdfs_site) as props:
        props['dfs.namenode.rpc-bind-host'] = '0.0.0.0'
        props['dfs.namenode.servicerpc-bind-host'] = '0.0.0.0'
        props['dfs.namenode.http-bind-host'] = '0.0.0.0'
        props['dfs.namenode.https-bind-host'] = '0.0.0.0'

    # We need to create the 'mapred' user/group since we are not installing
    # hadoop-mapreduce. This is needed so the namenode can access yarn
    # job history files in hdfs. Also add our ubuntu user to the hadoop
    # and mapred groups.
    get_layer_opts().add_users()

    set_state('apache-bigtop-namenode.installed')
    hookenv.status_set('maintenance', 'namenode installed')
开发者ID:juju-solutions,项目名称:layer-apache-bigtop-namenode,代码行数:32,代码来源:namenode.py

示例5: trigger_bigtop

# 需要导入模块: from charms.layer.apache_bigtop_base import Bigtop [as 别名]
# 或者: from charms.layer.apache_bigtop_base.Bigtop import trigger_puppet [as 别名]
    def trigger_bigtop(self):
        '''
        Trigger the Bigtop puppet recipe that handles the Zeppelin service.
        '''
        bigtop = Bigtop()
        overrides = unitdata.kv().getrange('zeppelin.bigtop.overrides.',
                                           strip=True)

        # The zep deb depends on spark-core which unfortunately brings in
        # most of hadoop. Include appropriate roles here to ensure these
        # packages are configured in the same way as our other Bigtop
        # software deployed with puppet.
        bigtop.render_site_yaml(
            roles=[
                'spark-client',
                'spark-yarn-slave',
                'zeppelin-server',
            ],
            overrides=overrides,
        )

        # NB: during an upgrade, we configure the site.yaml, but do not
        # trigger puppet. The user must do that with the 'reinstall' action.
        if unitdata.kv().get('zeppelin.version.repo', False):
            hookenv.log("An upgrade is available and the site.yaml has been "
                        "configured. Run the 'reinstall' action to continue.",
                        level=hookenv.INFO)
        else:
            ####################################################################
            # BUG: BIGTOP-2742
            # Default zeppelin init script looks for the literal '$(hostname)'
            # string. Symlink it so it exists before the apt install from puppet
            # tries to start the service.
            import subprocess
            host = subprocess.check_output(['hostname']).decode('utf8').strip()
            zepp_pid = '/var/run/zeppelin/zeppelin-zeppelin-{}.pid'.format(host)
            utils.run_as('root', 'mkdir', '-p', '/var/run/zeppelin')
            utils.run_as('root', 'ln', '-sf',
                         zepp_pid,
                         '/var/run/zeppelin/zeppelin-zeppelin-$(hostname).pid')
            ####################################################################

            bigtop.trigger_puppet()
            self.wait_for_api(30)

            ####################################################################
            # BUG: BIGTOP-2742
            # Puppet apply will call systemctl daemon-reload, which removes the
            # symlink we just created. Now that the bits are on disk, update the
            # init script $(hostname) that caused this mess to begin with.
            zepp_init_script = '/etc/init.d/zeppelin'
            utils.re_edit_in_place(zepp_init_script, {
                r'^# pidfile.*': '# pidfile: {}'.format(zepp_pid),
            })
            utils.run_as('root', 'systemctl', 'daemon-reload')
            self.restart()
            self.wait_for_api(30)
开发者ID:apache,项目名称:bigtop,代码行数:59,代码来源:bigtop_zeppelin.py

示例6: configure

# 需要导入模块: from charms.layer.apache_bigtop_base import Bigtop [as 别名]
# 或者: from charms.layer.apache_bigtop_base.Bigtop import trigger_puppet [as 别名]
    def configure(self, available_hosts):
        """
        This is the core logic of setting up spark.

        Two flags are needed:

          * Namenode exists aka HDFS is there
          * Resource manager exists aka YARN is ready

        both flags are infered from the available hosts.

        :param dict available_hosts: Hosts that Spark should know about.
        """

        if not unitdata.kv().get('spark.bootstrapped', False):
            self.setup()
            unitdata.kv().set('spark.bootstrapped', True)

        self.install_benchmark()

        hosts = {
            'spark': available_hosts['spark-master'],
        }

        dc = self.dist_config
        events_log_dir = 'file://{}'.format(dc.path('spark_events'))
        if 'namenode' in available_hosts:
            hosts['namenode'] = available_hosts['namenode']
            events_log_dir = self.setup_hdfs_logs()

        if 'resourcemanager' in available_hosts:
            hosts['resourcemanager'] = available_hosts['resourcemanager']

        roles = self.get_roles()

        override = {
            'spark::common::master_url': self.get_master_url(available_hosts['spark-master']),
            'spark::common::event_log_dir': events_log_dir,
            'spark::common::history_log_dir': events_log_dir,
        }

        bigtop = Bigtop()
        bigtop.render_site_yaml(hosts, roles, override)
        bigtop.trigger_puppet()
        # There is a race condition here.
        # The work role will not start the first time we trigger puppet apply.
        # The exception in /var/logs/spark:
        # Exception in thread "main" org.apache.spark.SparkException: Invalid master URL: spark://:7077
        # The master url is not set at the time the worker start the first time.
        # TODO(kjackal): ...do the needed... (investiate,debug,submit patch)
        bigtop.trigger_puppet()
        if 'namenode' not in available_hosts:
            # Make sure users other than spark can access the events logs dir and run jobs
            utils.run_as('root', 'chmod', '777', dc.path('spark_events'))
开发者ID:juju-solutions,项目名称:layer-apache-bigtop-spark,代码行数:56,代码来源:bigtop_spark.py

示例7: install_oozie

# 需要导入模块: from charms.layer.apache_bigtop_base import Bigtop [as 别名]
# 或者: from charms.layer.apache_bigtop_base.Bigtop import trigger_puppet [as 别名]
    def install_oozie(self):
        roles = ['hadoop-client']

        bigtop = Bigtop()
        bigtop.render_site_yaml(roles=roles)
        bigtop.trigger_puppet()

        roles = ['oozie-client', 'oozie-server']

        bigtop.render_site_yaml(roles=roles)
        bigtop.trigger_puppet()
开发者ID:juju-solutions,项目名称:layer-apache-bigtop-oozie,代码行数:13,代码来源:bigtop_oozie.py

示例8: trigger_bigtop

# 需要导入模块: from charms.layer.apache_bigtop_base import Bigtop [as 别名]
# 或者: from charms.layer.apache_bigtop_base.Bigtop import trigger_puppet [as 别名]
 def trigger_bigtop(self):
     bigtop = Bigtop()
     overrides = unitdata.kv().getrange('zeppelin.bigtop.overrides.',
                                        strip=True)
     bigtop.render_site_yaml(
         roles=[
             'zeppelin-server',
         ],
         overrides=overrides,
     )
     bigtop.trigger_puppet()
     self.wait_for_api(30)
开发者ID:Guavus,项目名称:bigtop,代码行数:14,代码来源:bigtop_zeppelin.py

示例9: install_hadoop_client_yarn

# 需要导入模块: from charms.layer.apache_bigtop_base import Bigtop [as 别名]
# 或者: from charms.layer.apache_bigtop_base.Bigtop import trigger_puppet [as 别名]
def install_hadoop_client_yarn(principal, namenode, resourcemanager):
    if namenode.namenodes() and resourcemanager.resourcemanagers():
        hookenv.status_set('maintenance', 'installing plugin (yarn)')
        nn_host = namenode.namenodes()[0]
        rm_host = resourcemanager.resourcemanagers()[0]
        bigtop = Bigtop()
        hosts = {'namenode': nn_host, 'resourcemanager': rm_host}
        bigtop.render_site_yaml(hosts=hosts, roles='hadoop-client')
        bigtop.trigger_puppet()
        set_state('apache-bigtop-plugin.yarn.installed')
        hookenv.status_set('maintenance', 'plugin (yarn) installed')
    else:
        hookenv.status_set('waiting', 'waiting for master fqdns')
开发者ID:Guavus,项目名称:bigtop,代码行数:15,代码来源:apache_bigtop_plugin.py

示例10: install

# 需要导入模块: from charms.layer.apache_bigtop_base import Bigtop [as 别名]
# 或者: from charms.layer.apache_bigtop_base.Bigtop import trigger_puppet [as 别名]
    def install(self, hbase=None, zk_units=None):
        '''
        Trigger the Bigtop puppet recipe that handles the Hive service.
        '''
        # Dirs are handled by the bigtop deb. No need to call out to
        # dist_config to do that. We do want 'ubuntu' in the hive group though.
        self.dist_config.add_users()

        # Prep config
        roles = ['hive-client', 'hive-metastore', 'hive-server2']
        metastore = "thrift://{}:9083".format(hookenv.unit_private_ip())

        if hbase:
            roles.append('hive-hbase')
            hb_connect = "{}:{}".format(hbase['host'], hbase['master_port'])
            zk_hbase_connect = hbase['zk_connect']
        else:
            hb_connect = ""
            zk_hbase_connect = ""

        if zk_units:
            hive_support_concurrency = True
            zk_hive_connect = self.get_zk_connect(zk_units)
        else:
            hive_support_concurrency = False
            zk_hive_connect = ""

        override = {
            'hadoop_hive::common_config::hbase_master': hb_connect,
            'hadoop_hive::common_config::hbase_zookeeper_quorum':
                zk_hbase_connect,
            'hadoop_hive::common_config::hive_zookeeper_quorum':
                zk_hive_connect,
            'hadoop_hive::common_config::hive_support_concurrency':
                hive_support_concurrency,
            'hadoop_hive::common_config::metastore_uris': metastore,
            'hadoop_hive::common_config::server2_thrift_port':
                self.dist_config.port('hive-thrift'),
            'hadoop_hive::common_config::server2_thrift_http_port':
                self.dist_config.port('hive-thrift-web'),
        }

        bigtop = Bigtop()
        bigtop.render_site_yaml(roles=roles, overrides=override)
        bigtop.trigger_puppet()

        # Bigtop doesn't create a hive-env.sh, but we need it for heap config
        hive_env = self.dist_config.path('hive_conf') / 'hive-env.sh'
        if not hive_env.exists():
            (self.dist_config.path('hive_conf') / 'hive-env.sh.template').copy(
                hive_env)
开发者ID:apache,项目名称:bigtop,代码行数:53,代码来源:bigtop_hive.py

示例11: install_mahout

# 需要导入模块: from charms.layer.apache_bigtop_base import Bigtop [as 别名]
# 或者: from charms.layer.apache_bigtop_base.Bigtop import trigger_puppet [as 别名]
def install_mahout():
    hookenv.status_set('maintenance', 'installing mahout')
    bigtop = Bigtop()
    bigtop.render_site_yaml(
        roles=[
            'mahout-client',
        ],
    )
    bigtop.trigger_puppet()
    with utils.environment_edit_in_place('/etc/environment') as env:
        env['MAHOUT_HOME'] = '/usr/lib/mahout'

    hookenv.status_set('active', 'ready')
    set_state('mahout.installed')
开发者ID:evans-ye,项目名称:bigtop,代码行数:16,代码来源:mahout.py

示例12: install

# 需要导入模块: from charms.layer.apache_bigtop_base import Bigtop [as 别名]
# 或者: from charms.layer.apache_bigtop_base.Bigtop import trigger_puppet [as 别名]
    def install(self, nodes=None):
        '''
        Write out the config, then run puppet.

        After this runs, we should have a configured and running service.

        '''
        bigtop = Bigtop()
        log("Rendering site yaml ''with overrides: {}".format(self._override))
        bigtop.render_site_yaml(self._hosts, self._roles, self._override)
        bigtop.trigger_puppet()
        if self.is_zk_leader():
            zkpeer = RelationBase.from_state('zkpeer.joined')
            zkpeer.set_zk_leader()
开发者ID:xlsong19,项目名称:bigtop,代码行数:16,代码来源:zookeeper.py

示例13: install_mahout

# 需要导入模块: from charms.layer.apache_bigtop_base import Bigtop [as 别名]
# 或者: from charms.layer.apache_bigtop_base.Bigtop import trigger_puppet [as 别名]
def install_mahout():
    hookenv.status_set('maintenance', 'installing mahout')
    bigtop = Bigtop()
    bigtop.render_site_yaml(
        roles=[
            'mahout-client',
        ],
    )
    bigtop.trigger_puppet()
    with utils.environment_edit_in_place('/etc/environment') as env:
        env['MAHOUT_HOME'] = '/usr/lib/mahout'

    set_state('mahout.installed')
    hookenv.status_set('active', 'ready')
    # set app version string for juju status output
    mahout_version = get_package_version('mahout') or 'unknown'
    hookenv.application_version_set(mahout_version)
开发者ID:Guavus,项目名称:bigtop,代码行数:19,代码来源:mahout.py

示例14: install_hadoop_client_hdfs

# 需要导入模块: from charms.layer.apache_bigtop_base import Bigtop [as 别名]
# 或者: from charms.layer.apache_bigtop_base.Bigtop import trigger_puppet [as 别名]
def install_hadoop_client_hdfs(principal, namenode):
    """Install if the namenode has sent its FQDN.

    We only need the namenode FQDN to perform the plugin install, so poll for
    namenodes() data whenever we have a namenode relation. This allows us to
    install asap, even if 'namenode.ready' is not set yet.
    """
    if namenode.namenodes():
        hookenv.status_set('maintenance', 'installing plugin (hdfs)')
        nn_host = namenode.namenodes()[0]
        bigtop = Bigtop()
        hosts = {'namenode': nn_host}
        bigtop.render_site_yaml(hosts=hosts, roles='hadoop-client')
        bigtop.trigger_puppet()
        set_state('apache-bigtop-plugin.hdfs.installed')
        hookenv.status_set('maintenance', 'plugin (hdfs) installed')
    else:
        hookenv.status_set('waiting', 'waiting for namenode fqdn')
开发者ID:Altiscale,项目名称:bigtop,代码行数:20,代码来源:apache_bigtop_plugin.py

示例15: configure

# 需要导入模块: from charms.layer.apache_bigtop_base import Bigtop [as 别名]
# 或者: from charms.layer.apache_bigtop_base.Bigtop import trigger_puppet [as 别名]
    def configure(self, hosts, zk_units):
        zks = []
        for unit in zk_units:
            ip = utils.resolve_private_address(unit["host"])
            zks.append(ip)
        zks.sort()
        zk_connect = ",".join(zks)

        roles = ["hbase-server", "hbase-master", "hbase-client"]

        override = {
            "hadoop_hbase::common_config::zookeeper_quorum": zk_connect,
            "hadoop_hbase::deploy::auxiliary": False,
        }

        bigtop = Bigtop()
        bigtop.render_site_yaml(hosts, roles, override)
        bigtop.trigger_puppet()
开发者ID:apache,项目名称:bigtop,代码行数:20,代码来源:bigtop_hbase.py


注:本文中的charms.layer.apache_bigtop_base.Bigtop.trigger_puppet方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。