本文整理汇总了Python中teuthology.misc.all_roles_of_type函数的典型用法代码示例。如果您正苦于以下问题:Python all_roles_of_type函数的具体用法?Python all_roles_of_type怎么用?Python all_roles_of_type使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了all_roles_of_type函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _spawn_on_all_clients
def _spawn_on_all_clients(ctx, refspec, tests, env, subdir, timeout=None):
"""
Make a scratch directory for each client in the cluster, and then for each
test spawn _run_tests() for each role.
See run_tests() for parameter documentation.
"""
client_generator = misc.all_roles_of_type(ctx.cluster, 'client')
client_remotes = list()
created_mountpoint = {}
for client in client_generator:
(client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys()
client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
created_mountpoint[client] = _make_scratch_dir(ctx, "client.{id}".format(id=client), subdir)
for unit in tests:
with parallel() as p:
for remote, role in client_remotes:
p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir,
timeout=timeout)
# cleanup the generated client directories
client_generator = misc.all_roles_of_type(ctx.cluster, 'client')
for client in client_generator:
_delete_dir(ctx, 'client.{id}'.format(id=client), created_mountpoint[client])
示例2: _spawn_on_all_clients
def _spawn_on_all_clients(ctx, refspec, tests, env, subdir):
"""
Make a scratch directory for each client in the cluster, and then for each
test spawn _run_tests for each role.
:param ctx: Context
:param refspec: branch, sha1, or version tag used to identify this
build
:param tests: specific tests specified.
:param env: evnironment set in yaml file. Could be None.
:param subdir: subdirectory set in yaml file. Could be None
"""
client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
client_remotes = list()
for client in client_generator:
(client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys()
client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
_make_scratch_dir(ctx, "client.{id}".format(id=client), subdir)
for unit in tests:
with parallel() as p:
for remote, role in client_remotes:
p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir)
# cleanup the generated client directories
client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
for client in client_generator:
_delete_dir(ctx, 'client.{id}'.format(id=client))
示例3: __init__
def __init__(self, ctx):
self._ctx = ctx
self.mds_ids = list(misc.all_roles_of_type(ctx.cluster, 'mds'))
if len(self.mds_ids) == 0:
raise RuntimeError("This task requires at least one MDS")
first_mon = misc.get_first_mon(ctx, None)
(self.mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys()
self.mon_manager = ceph_manager.CephManager(self.mon_remote, ctx=ctx, logger=log.getChild('ceph_manager'))
self.mds_daemons = dict([(mds_id, self._ctx.daemons.get_daemon('mds', mds_id)) for mds_id in self.mds_ids])
client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client'))
self.client_id = client_list[0]
self.client_remote = list(misc.get_clients(ctx=ctx, roles=["client.{0}".format(self.client_id)]))[0][1]
示例4: __init__
def __init__(self, ctx, fscid=None, name=None, create=False,
ec_profile=None):
super(Filesystem, self).__init__(ctx)
self.name = name
self.ec_profile = ec_profile
self.id = None
self.metadata_pool_name = None
self.metadata_overlay = False
self.data_pool_name = None
self.data_pools = None
client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client'))
self.client_id = client_list[0]
self.client_remote = list(misc.get_clients(ctx=ctx, roles=["client.{0}".format(self.client_id)]))[0][1]
if name is not None:
if fscid is not None:
raise RuntimeError("cannot specify fscid when creating fs")
if create and not self.legacy_configured():
self.create()
else:
if fscid is not None:
self.id = fscid
self.getinfo(refresh = True)
# Stash a reference to the first created filesystem on ctx, so
# that if someone drops to the interactive shell they can easily
# poke our methods.
if not hasattr(self._ctx, "filesystem"):
self._ctx.filesystem = self
示例5: normalize_config
def normalize_config(ctx, config):
"""
Returns a config whose keys are all real roles.
Generic roles (client, mon, osd, etc.) are replaced with
the actual roles (client.0, client.1, etc.). If the config
specifies a different version for a specific role, this is
unchanged.
For example, with 3 OSDs this::
osd:
tag: v3.0
kdb: true
osd.1:
branch: new_btrfs
kdb: false
osd.3:
deb: /path/to/linux-whatever.deb
is transformed into::
osd.0:
tag: v3.0
kdb: true
osd.1:
branch: new_btrfs
kdb: false
osd.2:
tag: v3.0
kdb: true
osd.3:
deb: /path/to/linux-whatever.deb
If config is None or just specifies a version to use,
it is applied to all nodes.
"""
if config is None or \
len(filter(lambda x: x in ['tag', 'branch', 'sha1', 'kdb',
'deb'],
config.keys())) == len(config.keys()):
new_config = {}
if config is None:
config = {'branch': 'master'}
for _, roles_for_host in ctx.cluster.remotes.iteritems():
new_config[roles_for_host[0]] = config
return new_config
new_config = {}
for role, role_config in config.iteritems():
if role_config is None:
role_config = {'branch': 'master'}
if '.' in role:
new_config[role] = role_config
else:
for id_ in teuthology.all_roles_of_type(ctx.cluster, role):
name = '{type}.{id}'.format(type=role, id=id_)
# specific overrides generic
if name not in config:
new_config[name] = role_config
return new_config
示例6: task
def task(ctx, config):
"""
Mount/unmount a ``kernel`` client.
The config is optional and defaults to mounting on all clients. If
a config is given, it is expected to be a list of clients to do
this operation on. This lets you e.g. set up one client with
``ceph-fuse`` and another with ``kclient``.
Example that mounts all clients::
tasks:
- ceph:
- kclient:
- interactive:
Example that uses both ``kclient` and ``ceph-fuse``::
tasks:
- ceph:
- ceph-fuse: [client.0]
- kclient: [client.1]
- interactive:
:param ctx: Context
:param config: Configuration
"""
log.info('Mounting kernel clients...')
assert config is None or isinstance(config, list), \
"task kclient got invalid config"
if config is None:
config = ['client.{id}'.format(id=id_)
for id_ in misc.all_roles_of_type(ctx.cluster, 'client')]
clients = list(misc.get_clients(ctx=ctx, roles=config))
test_dir = misc.get_testdir(ctx)
# Assemble mon addresses
remotes_and_roles = ctx.cluster.remotes.items()
roles = [roles for (remote_, roles) in remotes_and_roles]
ips = [remote_.ssh.get_transport().getpeername()[0]
for (remote_, _) in remotes_and_roles]
mons = misc.get_mons(roles, ips).values()
mounts = {}
for id_, remote in clients:
kernel_mount = KernelMount(mons, test_dir, id_, remote)
mounts[id_] = kernel_mount
kernel_mount.mount()
ctx.mounts = mounts
try:
yield mounts
finally:
log.info('Unmounting kernel clients...')
for mount in mounts.values():
mount.umount()
示例7: _spawn_on_all_clients
def _spawn_on_all_clients(ctx, refspec, tests, env, subdir):
client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
client_remotes = list()
for client in client_generator:
(client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys()
client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
_make_scratch_dir(ctx, "client.{id}".format(id=client), subdir)
for unit in tests:
with parallel() as p:
for remote, role in client_remotes:
p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir)
# cleanup the generated client directories
client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
for client in client_generator:
_delete_dir(ctx, 'client.{id}'.format(id=client), subdir)
示例8: create
def create(self):
pg_warn_min_per_osd = int(self.get_config("mon_pg_warn_min_per_osd"))
osd_count = len(list(misc.all_roles_of_type(self._ctx.cluster, "osd")))
pgs_per_fs_pool = pg_warn_min_per_osd * osd_count
self.admin_remote.run(args=["sudo", "ceph", "osd", "pool", "create", "metadata", pgs_per_fs_pool.__str__()])
self.admin_remote.run(args=["sudo", "ceph", "osd", "pool", "create", "data", pgs_per_fs_pool.__str__()])
self.admin_remote.run(args=["sudo", "ceph", "fs", "new", "default", "metadata", "data"])
示例9: create
def create(self):
pg_warn_min_per_osd = int(self.get_config('mon_pg_warn_min_per_osd'))
osd_count = len(list(misc.all_roles_of_type(self._ctx.cluster, 'osd')))
pgs_per_fs_pool = pg_warn_min_per_osd * osd_count
self.mon_remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create', 'metadata', pgs_per_fs_pool.__str__()])
self.mon_remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create', 'data', pgs_per_fs_pool.__str__()])
self.mon_remote.run(args=['sudo', 'ceph', 'fs', 'new', 'default', 'metadata', 'data'])
示例10: end
def end(self):
overrides = self.ctx.config.get('overrides', {})
misc.deep_merge(self.config, overrides.get('check-counter', {}))
cluster_name = self.config.get('cluster_name', None)
dry_run = self.config.get('dry_run', False)
targets = self.config.get('counters', {})
if cluster_name is None:
cluster_name = self.ctx.managers.keys()[0]
for daemon_type, counters in targets.items():
# List of 'a', 'b', 'c'...
daemon_ids = list(misc.all_roles_of_type(self.ctx.cluster, daemon_type))
daemons = dict([(daemon_id,
self.ctx.daemons.get_daemon(daemon_type, daemon_id))
for daemon_id in daemon_ids])
seen = set()
for daemon_id, daemon in daemons.items():
if not daemon.running():
log.info("Ignoring daemon {0}, it isn't running".format(daemon_id))
continue
else:
log.debug("Getting stats from {0}".format(daemon_id))
manager = self.ctx.managers[cluster_name]
proc = manager.admin_socket(daemon_type, daemon_id, ["perf", "dump"])
response_data = proc.stdout.getvalue().strip()
if response_data:
perf_dump = json.loads(response_data)
else:
log.warning("No admin socket response from {0}, skipping".format(daemon_id))
continue
for counter in counters:
subsys, counter_id = counter.split(".")
if subsys not in perf_dump or counter_id not in perf_dump[subsys]:
log.warning("Counter '{0}' not found on daemon {1}.{2}".format(
counter, daemon_type, daemon_id))
continue
value = perf_dump[subsys][counter_id]
log.info("Daemon {0}.{1} {2}={3}".format(
daemon_type, daemon_id, counter, value
))
if value > 0:
seen.add(counter)
if not dry_run:
unseen = set(counters) - set(seen)
if unseen:
raise RuntimeError("The following counters failed to be set "
"on {0} daemons: {1}".format(
daemon_type, unseen
))
示例11: get_config
def get_config(self, key, service_type=None):
"""
Get config from mon by default, or a specific service if caller asks for it
"""
if service_type is None:
service_type = 'mon'
service_id = sorted(misc.all_roles_of_type(self._ctx.cluster, service_type))[0]
return self.json_asok(['config', 'get', key], service_type, service_id)[key]
示例12: task
def task(ctx, config):
"""
Go through filesystem creation with a synthetic failure in an MDS
in its 'up:creating' state, to exercise the retry behaviour.
"""
# Grab handles to the teuthology objects of interest
mdslist = list(misc.all_roles_of_type(ctx.cluster, 'mds'))
if len(mdslist) != 1:
# Require exactly one MDS, the code path for creation failure when
# a standby is available is different
raise RuntimeError("This task requires exactly one MDS")
mds_id = mdslist[0]
(mds_remote,) = ctx.cluster.only('mds.{_id}'.format(_id=mds_id)).remotes.iterkeys()
manager = ceph_manager.CephManager(
mds_remote, ctx=ctx, logger=log.getChild('ceph_manager'),
)
# Stop MDS
self.fs.set_max_mds(0)
self.fs.mds_stop(mds_id)
self.fs.mds_fail(mds_id)
# Reset the filesystem so that next start will go into CREATING
manager.raw_cluster_cmd('fs', 'rm', "default", "--yes-i-really-mean-it")
manager.raw_cluster_cmd('fs', 'new', "default", "metadata", "data")
# Start the MDS with mds_kill_create_at set, it will crash during creation
mds.restart_with_args(["--mds_kill_create_at=1"])
try:
mds.wait_for_exit()
except CommandFailedError as e:
if e.exitstatus == 1:
log.info("MDS creation killed as expected")
else:
log.error("Unexpected status code %s" % e.exitstatus)
raise
# Since I have intentionally caused a crash, I will clean up the resulting core
# file to avoid task.internal.coredump seeing it as a failure.
log.info("Removing core file from synthetic MDS failure")
mds_remote.run(args=['rm', '-f', Raw("{archive}/coredump/*.core".format(archive=misc.get_archive_dir(ctx)))])
# It should have left the MDS map state still in CREATING
status = self.fs.status().get_mds(mds_id)
assert status['state'] == 'up:creating'
# Start the MDS again without the kill flag set, it should proceed with creation successfully
mds.restart()
# Wait for state ACTIVE
self.fs.wait_for_state("up:active", timeout=120, mds_id=mds_id)
# The system should be back up in a happy healthy state, go ahead and run any further tasks
# inside this context.
yield
示例13: task
def task(ctx, config):
"""
Create a mount dir 'client' that is just the local disk:
Example that "mounts" all clients:
tasks:
- localdir:
- interactive:
Example for a specific client:
tasks:
- localdir: [client.2]
- interactive:
:param ctx: Context
:param config: Configuration
"""
log.info('Creating local mnt dirs...')
testdir = teuthology.get_testdir(ctx)
if config is None:
config = list('client.{id}'.format(id=id_)
for id_ in teuthology.all_roles_of_type(ctx.cluster,
'client'))
clients = list(teuthology.get_clients(ctx=ctx, roles=config))
for id_, remote in clients:
mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
log.info('Creating dir {remote} {mnt}...'.format(
remote=remote, mnt=mnt))
remote.run(
args=[
'mkdir',
'--',
mnt,
],
)
try:
yield
finally:
log.info('Removing local mnt dirs...')
for id_, remote in clients:
mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
remote.run(
args=[
'rm',
'-rf',
'--',
mnt,
],
)
示例14: task
def task(ctx, config):
"""
Enable most ceph console logging
Example that enables logging on all clients::
tasks:
- ceph:
- kclient:
- kcon_most
- interactive:
Example that enables logging only on the client using kclient::
tasks:
- ceph:
- kclient: [client.0]
- kcon_most [client.0]
- interactive:
"""
log.info('Enable additional kernel logging...')
assert config is None or isinstance(config, list), \
"task kcon_most got invalid config"
if config is None:
config = ['client.{id}'.format(id=id_)
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
clients = list(teuthology.get_clients(ctx=ctx, roles=config))
testdir = teuthology.get_testdir(ctx)
for id_, remote in clients:
# TODO: Don't have to run this more than once per node (remote)
log.info('Enable logging on client.{id} at {remote} ...'.format(
id=id_, remote=remote))
remote.run(
args=[
'sudo',
'kcon_most',
'on'
],
)
try:
yield
finally:
log.info('Disable extra kernel logging on clients...')
for id_, remote in clients:
log.debug('Disable extra kernel logging on client.{id}...'.format(id=id_))
remote.run(
args=[
'sudo',
'kcon_most',
'off'
],
)
示例15: task
def task(ctx, config):
"""
Spin up apache configured to run a rados gateway.
Only one should be run per machine, since it uses a hard-coded port for now.
For example, to run rgw on all clients::
tasks:
- ceph:
- rgw:
To only run on certain clients::
tasks:
- ceph:
- rgw: [client.0, client.3]
or
tasks:
- ceph:
- rgw:
client.0:
client.3:
To run radosgw through valgrind:
tasks:
- ceph:
- rgw:
client.0:
valgrind: [--tool=memcheck]
client.3:
valgrind: [--tool=memcheck]
"""
if config is None:
config = dict(('client.{id}'.format(id=id_), None)
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
elif isinstance(config, list):
config = dict((name, None) for name in config)
for _, roles_for_host in ctx.cluster.remotes.iteritems():
running_rgw = False
for role in roles_for_host:
if role in config.iterkeys():
assert not running_rgw, "Only one client per host can run rgw."
running_rgw = True
with contextutil.nested(
lambda: create_dirs(ctx=ctx, config=config),
lambda: ship_config(ctx=ctx, config=config),
lambda: start_rgw(ctx=ctx, config=config),
lambda: start_apache(ctx=ctx, config=config),
):
yield