本文整理汇总了Python中charms.layer.apache_bigtop_base.Bigtop类的典型用法代码示例。如果您正苦于以下问题:Python Bigtop类的具体用法?Python Bigtop怎么用?Python Bigtop使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Bigtop类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: install_pig
def install_pig(self):
'''
Trigger the Bigtop puppet recipe that handles the Pig service.
'''
# Dirs are handled by the bigtop deb. No need to call out to
# dist_config to do that work.
roles = ['pig-client']
bigtop = Bigtop()
bigtop.render_site_yaml(roles=roles)
bigtop.trigger_puppet()
# Set app version for juju status output; pig --version looks like:
# Apache Pig version 0.15.0 (r: unknown)
# compiled Feb 06 2016, 23:00:40
try:
pig_out = check_output(['pig', '-x', 'local', '--version']).decode()
except CalledProcessError as e:
pig_out = e.output
lines = pig_out.splitlines()
parts = lines[0].split() if lines else []
if len(parts) < 4:
hookenv.log('Error getting Pig version: {}'.format(pig_out),
hookenv.ERROR)
pig_ver = ''
else:
pig_ver = parts[3]
hookenv.application_version_set(pig_ver)
示例2: send_dn_all_info
def send_dn_all_info(datanode):
"""Send datanodes all dfs-slave relation data.
At this point, the namenode is ready to serve datanodes. Send all
dfs-slave relation data so that our 'namenode.ready' state becomes set.
"""
bigtop = Bigtop()
fqdn = get_fqdn()
hdfs_port = get_layer_opts().port('namenode')
webhdfs_port = get_layer_opts().port('nn_webapp_http')
datanode.send_spec(bigtop.spec())
datanode.send_namenodes([fqdn])
datanode.send_ports(hdfs_port, webhdfs_port)
# hosts_map, ssh_key, and clustername are required by the dfs-slave
# interface to signify NN's readiness. Send them, even though they are not
# utilized by bigtop.
# NB: update KV hosts with all datanodes prior to sending the hosts_map
# because dfs-slave gates readiness on a DN's presence in the hosts_map.
utils.update_kv_hosts(datanode.hosts_map())
datanode.send_hosts_map(utils.get_kv_hosts())
datanode.send_ssh_key('invalid')
datanode.send_clustername(hookenv.service_name())
# update status with slave count and report ready for hdfs
num_slaves = len(datanode.nodes())
hookenv.status_set('active', 'ready ({count} datanode{s})'.format(
count=num_slaves,
s='s' if num_slaves > 1 else '',
))
set_state('apache-bigtop-namenode.ready')
示例3: configure_kafka
def configure_kafka(self, zk_units, network_interface=None):
# Get ip:port data from our connected zookeepers
zks = []
for unit in zk_units:
ip = utils.resolve_private_address(unit['host'])
zks.append("%s:%s" % (ip, unit['port']))
zks.sort()
zk_connect = ",".join(zks)
service, unit_num = os.environ['JUJU_UNIT_NAME'].split('/', 1)
kafka_port = self.dist_config.port('kafka')
roles = ['kafka-server']
override = {
'kafka::server::broker_id': unit_num,
'kafka::server::port': kafka_port,
'kafka::server::zookeeper_connection_string': zk_connect,
}
if network_interface:
ip = Bigtop().get_ip_for_interface(network_interface)
override['kafka::server::bind_addr'] = ip
bigtop = Bigtop()
bigtop.render_site_yaml(roles=roles, overrides=override)
bigtop.trigger_puppet()
self.set_advertise()
self.restart()
示例4: send_nm_all_info
def send_nm_all_info(nodemanager):
"""Send nodemanagers all mapred-slave relation data.
At this point, the resourcemanager is ready to serve nodemanagers. Send all
mapred-slave relation data so that our 'resourcemanager.ready' state becomes set.
"""
bigtop = Bigtop()
rm_host = get_fqdn()
rm_ipc = get_layer_opts().port('resourcemanager')
jh_ipc = get_layer_opts().port('jobhistory')
jh_http = get_layer_opts().port('jh_webapp_http')
nodemanager.send_resourcemanagers([rm_host])
nodemanager.send_spec(bigtop.spec())
nodemanager.send_ports(rm_ipc, jh_http, jh_ipc)
# hosts_map and ssh_key are required by the mapred-slave interface to signify
# RM's readiness. Send them, even though they are not utilized by bigtop.
# NB: update KV hosts with all nodemanagers prior to sending the hosts_map
# because mapred-slave gates readiness on a NM's presence in the hosts_map.
utils.update_kv_hosts(nodemanager.hosts_map())
nodemanager.send_hosts_map(utils.get_kv_hosts())
nodemanager.send_ssh_key('invalid')
# update status with slave count and report ready for hdfs
num_slaves = len(nodemanager.nodes())
hookenv.status_set('active', 'ready ({count} nodemanager{s})'.format(
count=num_slaves,
s='s' if num_slaves > 1 else '',
))
set_state('apache-bigtop-resourcemanager.ready')
示例5: install_nodemanager
def install_nodemanager(namenode, resourcemanager):
"""Install if we have FQDNs.
We only need the master FQDNs to perform the nodemanager install, so poll
for master host data from the appropriate relation. This allows us to
install asap, even if '<master>.ready' is not set.
"""
namenodes = namenode.namenodes()
resourcemanagers = resourcemanager.resourcemanagers()
masters = namenodes + resourcemanagers
if namenodes and resourcemanagers and data_changed('nm.masters', masters):
installed = is_state('apache-bigtop-nodemanager.installed')
action = 'installing' if not installed else 'configuring'
hookenv.status_set('maintenance', '%s nodemanager' % action)
bigtop = Bigtop()
bigtop.render_site_yaml(
hosts={
'namenode': namenodes[0],
'resourcemanager': resourcemanagers[0],
},
roles=[
'nodemanager',
'mapred-app',
],
)
bigtop.queue_puppet()
set_state('apache-bigtop-nodemanager.pending')
示例6: install_resourcemanager
def install_resourcemanager(namenode):
"""Install if the namenode has sent its FQDN.
We only need the namenode FQDN to perform the RM install, so poll for
namenodes() data whenever we have a namenode relation. This allows us to
install asap, even if 'namenode.ready' is not set yet.
"""
if namenode.namenodes():
hookenv.status_set('maintenance', 'installing resourcemanager')
nn_host = namenode.namenodes()[0]
rm_host = get_fqdn()
bigtop = Bigtop()
hosts = {'namenode': nn_host, 'resourcemanager': rm_host}
bigtop.render_site_yaml(hosts=hosts, roles='resourcemanager')
bigtop.trigger_puppet()
# /etc/hosts entries from the KV are not currently used for bigtop,
# but a hosts_map attribute is required by some interfaces (eg: mapred-slave)
# to signify RM's readiness. Set our RM info in the KV to fulfill this
# requirement.
utils.initialize_kv_host()
# Add our ubuntu user to the hadoop and mapred groups.
get_layer_opts().add_users()
set_state('apache-bigtop-resourcemanager.installed')
hookenv.status_set('maintenance', 'resourcemanager installed')
else:
hookenv.status_set('waiting', 'waiting for namenode fqdn')
示例7: install_namenode
def install_namenode():
hookenv.status_set('maintenance', 'installing namenode')
bigtop = Bigtop()
nn_host = get_fqdn()
hosts = {'namenode': nn_host}
bigtop.render_site_yaml(hosts=hosts, roles='namenode')
bigtop.trigger_puppet()
# /etc/hosts entries from the KV are not currently used for bigtop,
# but a hosts_map attribute is required by some interfaces (eg: dfs-slave)
# to signify NN's readiness. Set our NN info in the KV to fulfill this
# requirement.
utils.initialize_kv_host()
# make our namenode listen on all interfaces
hdfs_site = Path('/etc/hadoop/conf/hdfs-site.xml')
with utils.xmlpropmap_edit_in_place(hdfs_site) as props:
props['dfs.namenode.rpc-bind-host'] = '0.0.0.0'
props['dfs.namenode.servicerpc-bind-host'] = '0.0.0.0'
props['dfs.namenode.http-bind-host'] = '0.0.0.0'
props['dfs.namenode.https-bind-host'] = '0.0.0.0'
# We need to create the 'mapred' user/group since we are not installing
# hadoop-mapreduce. This is needed so the namenode can access yarn
# job history files in hdfs. Also add our ubuntu user to the hadoop
# and mapred groups.
get_layer_opts().add_users()
set_state('apache-bigtop-namenode.installed')
hookenv.status_set('maintenance', 'namenode installed')
示例8: send_client_all_info
def send_client_all_info(client):
"""Send clients (plugin, RM, non-DNs) all dfs relation data.
At this point, the resourcemanager is ready to serve clients. Send all
mapred relation data so that our 'resourcemanager.ready' state becomes set.
"""
bigtop = Bigtop()
rm_host = get_fqdn()
rm_ipc = get_layer_opts().port('resourcemanager')
jh_ipc = get_layer_opts().port('jobhistory')
jh_http = get_layer_opts().port('jh_webapp_http')
client.send_resourcemanagers([rm_host])
client.send_spec(bigtop.spec())
client.send_ports(rm_ipc, jh_http, jh_ipc)
# resourcemanager.ready implies we have at least 1 nodemanager, which means
# yarn is ready for use. Inform clients of that with send_ready().
if is_state('apache-bigtop-resourcemanager.ready'):
client.send_ready(True)
else:
client.send_ready(False)
# hosts_map is required by the mapred interface to signify
# RM's readiness. Send it, even though it is not utilized by bigtop.
client.send_hosts_map(utils.get_kv_hosts())
示例9: start_datanode
def start_datanode(namenode):
hookenv.status_set('maintenance', 'starting datanode')
# NB: service should be started by install, but we want to verify it is
# running before we set the .started state and open ports. We always
# restart here, which may seem heavy-handed. However, restart works
# whether the service is currently started or stopped. It also ensures the
# service is using the most current config.
started = host.service_restart('hadoop-hdfs-datanode')
if started:
# Create a /user/ubuntu dir in HDFS (this is safe to run multiple times).
bigtop = Bigtop()
if not bigtop.check_hdfs_setup():
try:
utils.wait_for_hdfs(30)
bigtop.setup_hdfs()
except utils.TimeoutError:
# HDFS is not yet available or is still in safe mode, so we can't
# do the initial setup (create dirs); skip setting the .started
# state below so that we try again on the next hook.
hookenv.status_set('waiting', 'waiting on hdfs')
return
# HDFS is ready. Open ports and set .started, status, and app version
for port in get_layer_opts().exposed_ports('datanode'):
hookenv.open_port(port)
set_state('apache-bigtop-datanode.started')
hookenv.status_set('maintenance', 'datanode started')
hookenv.application_version_set(get_hadoop_version())
else:
hookenv.log('DataNode failed to start')
hookenv.status_set('blocked', 'datanode failed to start')
remove_state('apache-bigtop-datanode.started')
for port in get_layer_opts().exposed_ports('datanode'):
hookenv.close_port(port)
示例10: send_client_all_info
def send_client_all_info(client):
"""Send clients (plugin, RM, non-DNs) all dfs relation data.
At this point, the namenode is ready to serve clients. Send all
dfs relation data so that our 'namenode.ready' state becomes set.
"""
bigtop = Bigtop()
fqdn = get_fqdn()
hdfs_port = get_layer_opts().port('namenode')
webhdfs_port = get_layer_opts().port('nn_webapp_http')
client.send_spec(bigtop.spec())
client.send_namenodes([fqdn])
client.send_ports(hdfs_port, webhdfs_port)
# namenode.ready implies we have at least 1 datanode, which means hdfs
# is ready for use. Inform clients of that with send_ready().
if is_state('apache-bigtop-namenode.ready'):
client.send_ready(True)
else:
client.send_ready(False)
# hosts_map and clustername are required by the dfs interface to signify
# NN's readiness. Send it, even though they are not utilized by bigtop.
client.send_hosts_map(utils.get_kv_hosts())
client.send_clustername(hookenv.service_name())
示例11: trigger_bigtop
def trigger_bigtop(self):
'''
Trigger the Bigtop puppet recipe that handles the Zeppelin service.
'''
bigtop = Bigtop()
overrides = unitdata.kv().getrange('zeppelin.bigtop.overrides.',
strip=True)
# The zep deb depends on spark-core which unfortunately brings in
# most of hadoop. Include appropriate roles here to ensure these
# packages are configured in the same way as our other Bigtop
# software deployed with puppet.
bigtop.render_site_yaml(
roles=[
'spark-client',
'spark-yarn-slave',
'zeppelin-server',
],
overrides=overrides,
)
# NB: during an upgrade, we configure the site.yaml, but do not
# trigger puppet. The user must do that with the 'reinstall' action.
if unitdata.kv().get('zeppelin.version.repo', False):
hookenv.log("An upgrade is available and the site.yaml has been "
"configured. Run the 'reinstall' action to continue.",
level=hookenv.INFO)
else:
####################################################################
# BUG: BIGTOP-2742
# Default zeppelin init script looks for the literal '$(hostname)'
# string. Symlink it so it exists before the apt install from puppet
# tries to start the service.
import subprocess
host = subprocess.check_output(['hostname']).decode('utf8').strip()
zepp_pid = '/var/run/zeppelin/zeppelin-zeppelin-{}.pid'.format(host)
utils.run_as('root', 'mkdir', '-p', '/var/run/zeppelin')
utils.run_as('root', 'ln', '-sf',
zepp_pid,
'/var/run/zeppelin/zeppelin-zeppelin-$(hostname).pid')
####################################################################
bigtop.trigger_puppet()
self.wait_for_api(30)
####################################################################
# BUG: BIGTOP-2742
# Puppet apply will call systemctl daemon-reload, which removes the
# symlink we just created. Now that the bits are on disk, update the
# init script $(hostname) that caused this mess to begin with.
zepp_init_script = '/etc/init.d/zeppelin'
utils.re_edit_in_place(zepp_init_script, {
r'^# pidfile.*': '# pidfile: {}'.format(zepp_pid),
})
utils.run_as('root', 'systemctl', 'daemon-reload')
self.restart()
self.wait_for_api(30)
示例12: test_handle_queued_puppet
def test_handle_queued_puppet(self, mock_ver, mock_hookenv, mock_trigger):
'''
Verify that we attempt to call puppet when it has been queued, and
then clear the queued state.
'''
set_state('apache-bigtop-base.puppet_queued')
mock_ver.return_value = '1.2.0'
Bigtop._handle_queued_puppet()
self.assertTrue(mock_trigger.called)
self.assertFalse(is_state('apache-bigtop-base.puppet_queued'))
示例13: trigger_bigtop
def trigger_bigtop(self):
bigtop = Bigtop()
overrides = unitdata.kv().getrange('zeppelin.bigtop.overrides.',
strip=True)
bigtop.render_site_yaml(
roles=[
'zeppelin-server',
],
overrides=overrides,
)
bigtop.trigger_puppet()
self.wait_for_api(30)
示例14: install_hadoop_client_yarn
def install_hadoop_client_yarn(principal, namenode, resourcemanager):
if namenode.namenodes() and resourcemanager.resourcemanagers():
hookenv.status_set('maintenance', 'installing plugin (yarn)')
nn_host = namenode.namenodes()[0]
rm_host = resourcemanager.resourcemanagers()[0]
bigtop = Bigtop()
hosts = {'namenode': nn_host, 'resourcemanager': rm_host}
bigtop.render_site_yaml(hosts=hosts, roles='hadoop-client')
bigtop.trigger_puppet()
set_state('apache-bigtop-plugin.yarn.installed')
hookenv.status_set('maintenance', 'plugin (yarn) installed')
else:
hookenv.status_set('waiting', 'waiting for master fqdns')
示例15: install
def install(self, hbase=None, zk_units=None):
'''
Trigger the Bigtop puppet recipe that handles the Hive service.
'''
# Dirs are handled by the bigtop deb. No need to call out to
# dist_config to do that. We do want 'ubuntu' in the hive group though.
self.dist_config.add_users()
# Prep config
roles = ['hive-client', 'hive-metastore', 'hive-server2']
metastore = "thrift://{}:9083".format(hookenv.unit_private_ip())
if hbase:
roles.append('hive-hbase')
hb_connect = "{}:{}".format(hbase['host'], hbase['master_port'])
zk_hbase_connect = hbase['zk_connect']
else:
hb_connect = ""
zk_hbase_connect = ""
if zk_units:
hive_support_concurrency = True
zk_hive_connect = self.get_zk_connect(zk_units)
else:
hive_support_concurrency = False
zk_hive_connect = ""
override = {
'hadoop_hive::common_config::hbase_master': hb_connect,
'hadoop_hive::common_config::hbase_zookeeper_quorum':
zk_hbase_connect,
'hadoop_hive::common_config::hive_zookeeper_quorum':
zk_hive_connect,
'hadoop_hive::common_config::hive_support_concurrency':
hive_support_concurrency,
'hadoop_hive::common_config::metastore_uris': metastore,
'hadoop_hive::common_config::server2_thrift_port':
self.dist_config.port('hive-thrift'),
'hadoop_hive::common_config::server2_thrift_http_port':
self.dist_config.port('hive-thrift-web'),
}
bigtop = Bigtop()
bigtop.render_site_yaml(roles=roles, overrides=override)
bigtop.trigger_puppet()
# Bigtop doesn't create a hive-env.sh, but we need it for heap config
hive_env = self.dist_config.path('hive_conf') / 'hive-env.sh'
if not hive_env.exists():
(self.dist_config.path('hive_conf') / 'hive-env.sh.template').copy(
hive_env)