本文整理汇总了Python中teuthology.misc.split_role函数的典型用法代码示例。如果您正苦于以下问题:Python split_role函数的具体用法?Python split_role怎么用?Python split_role使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了split_role函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_users
def create_users(ctx, config):
"""
Create a main and an alternate s3 user.
"""
assert isinstance(config, dict)
log.info('Creating rgw users...')
testdir = teuthology.get_testdir(ctx)
users = {'s3 main': 'foo', 's3 alt': 'bar'}
for client in config['clients']:
s3tests_conf = config['s3tests_conf'][client]
s3tests_conf.setdefault('fixtures', {})
s3tests_conf['fixtures'].setdefault('bucket prefix', 'test-' + client + '-{random}-')
for section, user in users.iteritems():
_config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
log.debug('Creating user {user} on {host}'.format(user=s3tests_conf[section]['user_id'], host=client))
cluster_name, daemon_type, client_id = teuthology.split_role(client)
client_with_id = daemon_type + '.' + client_id
ctx.cluster.only(client).run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client_with_id,
'user', 'create',
'--uid', s3tests_conf[section]['user_id'],
'--display-name', s3tests_conf[section]['display_name'],
'--access-key', s3tests_conf[section]['access_key'],
'--secret', s3tests_conf[section]['secret_key'],
'--email', s3tests_conf[section]['email'],
'--cluster', cluster_name,
],
)
try:
yield
finally:
for client in config['clients']:
for user in users.itervalues():
uid = '{user}.{client}'.format(user=user, client=client)
cluster_name, daemon_type, client_id = teuthology.split_role(client)
client_with_id = daemon_type + '.' + client_id
ctx.cluster.only(client).run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client_with_id,
'user', 'rm',
'--uid', uid,
'--purge-data',
'--cluster', cluster_name,
],
)
示例2: create_users
def create_users(ctx, config):
"""
Create rgw users to interact with the swift interface.
"""
assert isinstance(config, dict)
log.info('Creating rgw users...')
testdir = teuthology.get_testdir(ctx)
users = {'': 'foo', '2': 'bar'}
for client in config['clients']:
cluster_name, daemon_type, client_id = teuthology.split_role(client)
testswift_conf = config['testswift_conf'][client]
for suffix, user in users.iteritems():
_config_user(testswift_conf, '{user}.{client}'.format(user=user, client=client), user, suffix)
ctx.cluster.only(client).run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client,
'--cluster', cluster_name,
'user', 'create',
'--subuser', '{account}:{user}'.format(account=testswift_conf['func_test']['account{s}'.format(s=suffix)],user=user),
'--display-name', testswift_conf['func_test']['display_name{s}'.format(s=suffix)],
'--secret', testswift_conf['func_test']['password{s}'.format(s=suffix)],
'--email', testswift_conf['func_test']['email{s}'.format(s=suffix)],
'--key-type', 'swift',
'--access', 'full',
],
)
try:
yield
finally:
for client in config['clients']:
for user in users.itervalues():
uid = '{user}.{client}'.format(user=user, client=client)
cluster_name, daemon_type, client_id = teuthology.split_role(client)
ctx.cluster.only(client).run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client,
'--cluster', cluster_name,
'user', 'rm',
'--uid', uid,
'--purge-data',
],
)
示例3: wait_for_failure
def wait_for_failure(ctx, config):
"""
Wait for a failure of a ceph daemon
For example::
tasks:
- ceph.wait_for_failure: [mds.*]
tasks:
- ceph.wait_for_failure: [osd.0, osd.2]
tasks:
- ceph.wait_for_failure:
daemons: [osd.0, osd.2]
"""
if config is None:
config = {}
elif isinstance(config, list):
config = {"daemons": config}
daemons = ctx.daemons.resolve_role_list(config.get("daemons", None), CEPH_ROLE_TYPES, True)
for role in daemons:
cluster, type_, id_ = teuthology.split_role(role)
try:
ctx.daemons.get_daemon(type_, id_, cluster).wait()
except:
log.info("Saw expected daemon failure. Continuing.")
pass
else:
raise RuntimeError("daemon %s did not fail" % role)
yield
示例4: task
def task(ctx, config):
"""
Test monitor recovery from OSD
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'))
mons = ctx.cluster.only(teuthology.is_type('mon'))
# note down the first cluster_name and mon_id
# we will recover it later on
cluster_name, _, mon_id = teuthology.split_role(first_mon)
_nuke_mons(manager, mons, mon_id)
default_keyring = '/etc/ceph/{cluster}.keyring'.format(
cluster=cluster_name)
keyring_path = config.get('keyring_path', default_keyring)
_rebuild_db(ctx, manager, cluster_name, mon, mon_id, keyring_path)
_revive_mons(manager, mons, mon_id, keyring_path)
_revive_mgrs(ctx, manager)
_revive_osds(ctx, manager)
示例5: _revive_mons
def _revive_mons(manager, mons, recovered, keyring_path):
# revive monitors
# the initial monmap is in the ceph.conf, so we are good.
n_mons = 0
is_mon = teuthology.is_type('mon')
for remote, roles in mons.remotes.iteritems():
for role in roles:
if not is_mon(role):
continue
cluster, _, m = teuthology.split_role(role)
if recovered != m:
log.info('running mkfs on {cluster}:mon.{mon}'.format(
cluster=cluster,
mon=m))
remote.run(
args=[
'sudo',
'ceph-mon',
'--cluster', cluster,
'--mkfs',
'-i', m,
'--keyring', keyring_path])
log.info('reviving mon.{0}'.format(m))
manager.revive_mon(m)
n_mons += 1
manager.wait_for_mon_quorum_size(n_mons, timeout=30)
示例6: _delete_dir
def _delete_dir(ctx, role, created_mountpoint):
"""
Delete file used by this role, and delete the directory that this
role appeared in.
:param ctx: Context
:param role: "role.#" where # is used for the role id.
"""
cluster, _, id_ = misc.split_role(role)
remote = get_remote_for_role(ctx, role)
mnt = _client_mountpoint(ctx, cluster, id_)
client = os.path.join(mnt, 'client.{id}'.format(id=id_))
# Remove the directory inside the mount where the workunit ran
remote.run(
args=[
'sudo',
'rm',
'-rf',
'--',
client,
],
)
log.info("Deleted dir {dir}".format(dir=client))
# If the mount was an artificially created dir, delete that too
if created_mountpoint:
remote.run(
args=[
'rmdir',
'--',
mnt,
],
)
log.info("Deleted artificial mount point {dir}".format(dir=client))
示例7: stop
def stop(ctx, config):
"""
Stop ceph daemons
For example::
tasks:
- ceph.stop: [mds.*]
tasks:
- ceph.stop: [osd.0, osd.2]
tasks:
- ceph.stop:
daemons: [osd.0, osd.2]
"""
if config is None:
config = {}
elif isinstance(config, list):
config = {"daemons": config}
daemons = ctx.daemons.resolve_role_list(config.get("daemons", None), CEPH_ROLE_TYPES, True)
for role in daemons:
cluster, type_, id_ = teuthology.split_role(role)
ctx.daemons.get_daemon(type_, id_, cluster).stop()
yield
示例8: osd_scrub_pgs
def osd_scrub_pgs(ctx, config):
"""
Scrub pgs when we exit.
First make sure all pgs are active and clean.
Next scrub all osds.
Then periodically check until all pgs have scrub time stamps that
indicate the last scrub completed. Time out if no progess is made
here after two minutes.
"""
retries = 12
delays = 10
cluster_name = config["cluster"]
manager = ctx.managers[cluster_name]
all_clean = False
for _ in range(0, retries):
stats = manager.get_pg_stats()
states = [stat["state"] for stat in stats]
if len(set(states)) == 1 and states[0] == "active+clean":
all_clean = True
break
log.info("Waiting for all osds to be active and clean.")
time.sleep(delays)
if not all_clean:
log.info("Scrubbing terminated -- not all pgs were active and clean.")
return
check_time_now = time.localtime()
time.sleep(1)
all_roles = teuthology.all_roles(ctx.cluster)
for role in teuthology.cluster_roles_of_type(all_roles, "osd", cluster_name):
log.info("Scrubbing {osd}".format(osd=role))
_, _, id_ = teuthology.split_role(role)
manager.raw_cluster_cmd("osd", "deep-scrub", id_)
prev_good = 0
gap_cnt = 0
loop = True
while loop:
stats = manager.get_pg_stats()
timez = [stat["last_scrub_stamp"] for stat in stats]
loop = False
thiscnt = 0
for tmval in timez:
pgtm = time.strptime(tmval[0 : tmval.find(".")], "%Y-%m-%d %H:%M:%S")
if pgtm > check_time_now:
thiscnt += 1
else:
loop = True
if thiscnt > prev_good:
prev_good = thiscnt
gap_cnt = 0
else:
gap_cnt += 1
if gap_cnt > retries:
log.info("Exiting scrub checking -- not all pgs scrubbed.")
return
if loop:
log.info("Still waiting for all pgs to be scrubbed.")
time.sleep(delays)
示例9: start_apache
def start_apache(ctx, config, on_client = None, except_client = None):
"""
Start apache on remote sites.
"""
log.info('Starting apache...')
testdir = teuthology.get_testdir(ctx)
apaches = {}
clients_to_run = [on_client]
if on_client is None:
clients_to_run = config.keys()
for client in clients_to_run:
cluster_name, daemon_type, client_id = teuthology.split_role(client)
client_with_cluster = cluster_name + '.' + daemon_type + '.' + client_id
if client == except_client:
continue
(remote,) = ctx.cluster.only(client).remotes.keys()
system_type = teuthology.get_system_type(remote)
if system_type == 'deb':
apache_name = 'apache2'
else:
try:
remote.run(
args=[
'stat',
'/usr/sbin/httpd.worker',
],
)
apache_name = '/usr/sbin/httpd.worker'
except CommandFailedError:
apache_name = '/usr/sbin/httpd'
proc = remote.run(
args=[
'adjust-ulimits',
'daemon-helper',
'kill',
apache_name,
'-X',
'-f',
'{tdir}/apache/apache.{client_with_cluster}.conf'.format(tdir=testdir,
client_with_cluster=client_with_cluster),
],
logger=log.getChild(client),
stdin=run.PIPE,
wait=False,
)
apaches[client_with_cluster] = proc
try:
yield
finally:
log.info('Stopping apache...')
for client, proc in apaches.iteritems():
proc.stdin.close()
run.wait(apaches.itervalues())
示例10: _revive_osds
def _revive_osds(ctx, manager):
is_osd = teuthology.is_type('osd')
osds = ctx.cluster.only(is_osd)
for _, roles in osds.remotes.iteritems():
for role in roles:
if not is_osd(role):
continue
_, _, osd_id = teuthology.split_role(role)
log.info('reviving osd.{0}'.format(osd_id))
manager.revive_osd(osd_id)
示例11: _revive_mgrs
def _revive_mgrs(ctx, manager):
is_mgr = teuthology.is_type('mgr')
mgrs = ctx.cluster.only(is_mgr)
for _, roles in mgrs.remotes.iteritems():
for role in roles:
if not is_mgr(role):
continue
_, _, mgr_id = teuthology.split_role(role)
log.info('reviving mgr.{0}'.format(mgr_id))
manager.revive_mgr(mgr_id)
示例12: create_apache_dirs
def create_apache_dirs(ctx, config, on_client = None, except_client = None):
"""
Remotely create apache directories. Delete when finished.
"""
log.info('Creating apache directories...')
log.debug('client is %r', on_client)
testdir = teuthology.get_testdir(ctx)
clients_to_create_as = [on_client]
if on_client is None:
clients_to_create_as = config.keys()
for client in clients_to_create_as:
if client == except_client:
continue
cluster_name, daemon_type, client_id = teuthology.split_role(client)
client_with_cluster = cluster_name + '.' + daemon_type + '.' + client_id
ctx.cluster.only(client).run(
args=[
'mkdir',
'-p',
'{tdir}/apache/htdocs.{client_with_cluster}'.format(tdir=testdir,
client_with_cluster=client_with_cluster),
'{tdir}/apache/tmp.{client_with_cluster}/fastcgi_sock'.format(
tdir=testdir,
client_with_cluster=client_with_cluster),
run.Raw('&&'),
'mkdir',
'{tdir}/archive/apache.{client_with_cluster}'.format(tdir=testdir,
client_with_cluster=client_with_cluster),
],
)
try:
yield
finally:
log.info('Cleaning up apache directories...')
for client in clients_to_create_as:
ctx.cluster.only(client).run(
args=[
'rm',
'-rf',
'{tdir}/apache/tmp.{client_with_cluster}'.format(tdir=testdir,
client_with_cluster=client_with_cluster),
run.Raw('&&'),
'rmdir',
'{tdir}/apache/htdocs.{client_with_cluster}'.format(tdir=testdir,
client_with_cluster=client_with_cluster),
],
)
for client in clients_to_create_as:
ctx.cluster.only(client).run(
args=[
'rmdir',
'{tdir}/apache'.format(tdir=testdir),
],
check_status=False, # only need to remove once per host
)
示例13: setup
def setup(self):
super(RBDMirror, self).setup()
try:
self.client = self.config['client']
except KeyError:
raise ConfigError('rbd-mirror requires a client to connect with')
self.cluster_name, type_, self.client_id = misc.split_role(self.client)
if type_ != 'client':
msg = 'client role ({0}) must be a client'.format(self.client)
raise ConfigError(msg)
self.remote = get_remote_for_role(self.ctx, self.client)
示例14: restart
def restart(ctx, config):
"""
restart ceph daemons
For example::
tasks:
- ceph.restart: [all]
For example::
tasks:
- ceph.restart: [osd.0, mon.1, mds.*]
or::
tasks:
- ceph.restart:
daemons: [osd.0, mon.1]
wait-for-healthy: false
wait-for-osds-up: true
:param ctx: Context
:param config: Configuration
"""
if config is None:
config = {}
elif isinstance(config, list):
config = {"daemons": config}
daemons = ctx.daemons.resolve_role_list(config.get("daemons", None), CEPH_ROLE_TYPES, True)
clusters = set()
for role in daemons:
cluster, type_, id_ = teuthology.split_role(role)
ctx.daemons.get_daemon(type_, id_, cluster).restart()
clusters.add(cluster)
if config.get("wait-for-healthy", True):
for cluster in clusters:
healthy(ctx=ctx, config=dict(cluster=cluster))
if config.get("wait-for-osds-up", False):
for cluster in clusters:
wait_for_osds_up(ctx=ctx, config=dict(cluster=cluster))
manager = ctx.managers["ceph"]
for dmon in daemons:
if "." in dmon:
dm_parts = dmon.split(".")
if dm_parts[1].isdigit():
if dm_parts[0] == "osd":
manager.mark_down_osd(int(dm_parts[1]))
yield
示例15: extract_zone_cluster_name
def extract_zone_cluster_name(zone_config):
""" return the cluster (must be common to all zone endpoints) """
cluster_name = None
endpoints = zone_config.get('endpoints')
if not endpoints:
raise ConfigError('zone %s missing \'endpoints\' list' % \
zone_config['name'])
for role in endpoints:
name, _, _ = misc.split_role(role)
if not cluster_name:
cluster_name = name
elif cluster_name != name:
raise ConfigError('all zone %s endpoints must be in the same cluster' % \
zone_config['name'])
return cluster_name