本文整理汇总了Python中teuthology.misc.roles_of_type函数的典型用法代码示例。如果您正苦于以下问题:Python roles_of_type函数的具体用法?Python roles_of_type怎么用?Python roles_of_type使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了roles_of_type函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_ceph_conf
def create_ceph_conf(ctx, config):
devs_to_clean = {}
remote_to_roles_to_devs = {}
remote_to_roles_to_journals = {}
osds = ctx.cluster.only(teuthology.is_type('osd'))
for remote, roles_for_host in osds.remotes.iteritems():
devs = teuthology.get_scratch_devices(remote)
roles_to_devs = {}
roles_to_journals = {}
if config.get('fs'):
log.info('fs option selected, checking for scratch devs')
log.info('found devs: %s' % (str(devs),))
devs_id_map = teuthology.get_wwn_id_map(remote, devs)
iddevs = devs_id_map.values()
roles_to_devs = assign_devs(
teuthology.roles_of_type(roles_for_host, 'osd'), iddevs
)
if len(roles_to_devs) < len(iddevs):
iddevs = iddevs[len(roles_to_devs):]
devs_to_clean[remote] = []
if config.get('block_journal'):
log.info('block journal enabled')
roles_to_journals = assign_devs(
teuthology.roles_of_type(roles_for_host, 'osd'), iddevs
)
log.info('journal map: %s', roles_to_journals)
if config.get('tmpfs_journal'):
log.info('tmpfs journal enabled')
roles_to_journals = {}
remote.run( args=[ 'sudo', 'mount', '-t', 'tmpfs', 'tmpfs', '/mnt' ] )
for osd in teuthology.roles_of_type(roles_for_host, 'osd'):
tmpfs = '/mnt/osd.%s' % osd
roles_to_journals[osd] = tmpfs
remote.run( args=[ 'truncate', '-s', '1500M', tmpfs ] )
log.info('journal map: %s', roles_to_journals)
log.info('dev map: %s' % (str(roles_to_devs),))
remote_to_roles_to_devs[remote] = roles_to_devs
remote_to_roles_to_journals[remote] = roles_to_journals
log.info('Generating config...')
remotes_and_roles = ctx.cluster.remotes.items()
roles = [role_list for (remote, role_list) in remotes_and_roles]
ips = [host for (host, port) in (remote.ssh.get_transport().getpeername() for (remote, role_list) in remotes_and_roles)]
conf = teuthology.skeleton_config(ctx, roles=roles, ips=ips)
ctx.ceph = argparse.Namespace()
ctx.ceph.conf = conf
log.info(ctx)
yield
示例2: execute
def execute(ctx, config):
procs = []
osds = ctx.cluster.only(teuthology.is_type('osd'))
for remote, roles_for_host in osds.remotes.iteritems():
roles_to_devs = ctx.disk_config.remote_to_roles_to_dev[remote]
for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
if roles_to_devs.get(id_):
dev = roles_to_devs[id_]
log.info("running blktrace on %s: %s" % (remote.name, dev))
proc = remote.run(
args=[
'cd',
log_dir,
run.Raw(';'),
'/tmp/cephtest/daemon-helper',
daemon_signal,
'sudo',
blktrace,
'-o',
dev.rsplit("/", 1)[1],
'-d',
dev,
],
wait=False,
stdin=run.PIPE,
)
procs.append(proc)
try:
yield
finally:
osds = ctx.cluster.only(teuthology.is_type('osd'))
log.info('stopping blktrace processs')
for proc in procs:
proc.stdin.close()
示例3: osd_admin_socket
def osd_admin_socket(self, osdnum, command, check_status=True):
"""
Remotely start up ceph specifying the admin socket
"""
testdir = teuthology.get_testdir(self.ctx)
remote = None
for _remote, roles_for_host in self.ctx.cluster.remotes.iteritems():
for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
if int(id_) == int(osdnum):
remote = _remote
assert remote is not None
args = [
'sudo',
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'ceph',
'--admin-daemon',
'/var/run/ceph/ceph-osd.{id}.asok'.format(id=osdnum),
]
args.extend(command)
return remote.run(
args=args,
stdout=StringIO(),
wait=True,
check_status=check_status
)
示例4: create_keyring
def create_keyring(ctx):
log.info('Setting up client nodes...')
clients = ctx.cluster.only(teuthology.is_type('client'))
testdir = teuthology.get_testdir(ctx)
coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
for remote, roles_for_host in clients.remotes.iteritems():
for id_ in teuthology.roles_of_type(roles_for_host, 'client'):
client_keyring = '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
remote.run(
args=[
'sudo',
'adjust-ulimits',
'ceph-coverage',
coverage_dir,
'ceph-authtool',
'--create-keyring',
'--gen-key',
# TODO this --name= is not really obeyed, all unknown "types" are munged to "client"
'--name=client.{id}'.format(id=id_),
client_keyring,
run.Raw('&&'),
'sudo',
'chmod',
'0644',
client_keyring,
],
)
示例5: get_dev_for_osd
def get_dev_for_osd(ctx, config):
"""Get a list of all osd device names."""
osd_devs = []
for remote, roles_for_host in ctx.cluster.remotes.iteritems():
host = remote.name.split('@')[-1]
shortname = host.split('.')[0]
devs = teuthology.get_scratch_devices(remote)
num_osd_per_host = list(
teuthology.roles_of_type(
roles_for_host, 'osd'))
num_osds = len(num_osd_per_host)
if config.get('separate_journal_disk') is not None:
num_devs_reqd = 2 * num_osds
assert num_devs_reqd <= len(
devs), 'fewer data and journal disks than required ' + shortname
for dindex in range(0, num_devs_reqd, 2):
jd_index = dindex + 1
dev_short = devs[dindex].split('/')[-1]
jdev_short = devs[jd_index].split('/')[-1]
osd_devs.append((shortname, dev_short, jdev_short))
else:
assert num_osds <= len(devs), 'fewer disks than osds ' + shortname
for dev in devs[:num_osds]:
dev_short = dev.split('/')[-1]
osd_devs.append((shortname, dev_short))
return osd_devs
示例6: get_nodes_using_roles
def get_nodes_using_roles(ctx, config, role):
newl = []
for _remote, roles_for_host in ctx.cluster.remotes.iteritems():
for id_ in teuthology.roles_of_type(roles_for_host, role):
rem = _remote
if role == 'mon':
req1 = str(rem).split('@')[-1]
else:
req = str(rem).split('.')[0]
req1 = str(req).split('@')[1]
newl.append(req1)
return newl
示例7: get_nodes_using_roles
def get_nodes_using_roles(ctx, config, role):
"""Extract the names of nodes that match a given role from a cluster"""
newl = []
for _remote, roles_for_host in ctx.cluster.remotes.iteritems():
for id_ in teuthology.roles_of_type(roles_for_host, role):
rem = _remote
if role == 'mon':
req1 = str(rem).split('@')[-1]
else:
req = str(rem).split('.')[0]
req1 = str(req).split('@')[1]
newl.append(req1)
return newl
示例8: get_dev_for_osd
def get_dev_for_osd(ctx, config):
osd_devs = []
for remote, roles_for_host in ctx.cluster.remotes.iteritems():
host = remote.name.split('@')[-1]
shortname = host.split('.')[0]
devs = teuthology.get_scratch_devices(remote)
num_osd_per_host = list(teuthology.roles_of_type(roles_for_host, 'osd'))
num_osds = len(num_osd_per_host)
assert num_osds <= len(devs), 'fewer disks than osds on ' + shortname
for dev in devs[:num_osds]:
dev_short = dev.split('/')[-1]
osd_devs.append('{host}:{dev}'.format(host=shortname, dev=dev_short))
return osd_devs
示例9: get_dev_for_osd
def get_dev_for_osd(ctx, config):
"""Get a list of all osd device names."""
osd_devs = []
for remote, roles_for_host in ctx.cluster.remotes.iteritems():
host = remote.name.split("@")[-1]
shortname = host.split(".")[0]
devs = teuthology.get_scratch_devices(remote)
num_osd_per_host = list(teuthology.roles_of_type(roles_for_host, "osd"))
num_osds = len(num_osd_per_host)
assert num_osds <= len(devs), "fewer disks than osds on " + shortname
for dev in devs[:num_osds]:
dev_short = dev.split("/")[-1]
osd_devs.append("{host}:{dev}".format(host=shortname, dev=dev_short))
return osd_devs
示例10: osd_admin_socket
def osd_admin_socket(self, osdnum, command, check_status=True):
testdir = teuthology.get_testdir(self.ctx)
remote = None
for _remote, roles_for_host in self.ctx.cluster.remotes.iteritems():
for id_ in teuthology.roles_of_type(roles_for_host, "osd"):
if int(id_) == int(osdnum):
remote = _remote
assert remote is not None
args = [
"sudo",
"adjust-ulimits",
"ceph-coverage",
"{tdir}/archive/coverage".format(tdir=testdir),
"ceph",
"--admin-daemon",
"/var/run/ceph/ceph-osd.{id}.asok".format(id=osdnum),
]
args.extend(command)
return remote.run(args=args, stdout=StringIO(), wait=True, check_status=check_status)
示例11: make_deamons_list
def make_deamons_list(ctx, config):
for type_ in ['mon','mds','osd','client','samba']:
daemons = ctx.cluster.only(teuthology.is_type(type_))
if daemons is None: continue
for remote, roles_for_host in daemons.remotes.iteritems():
for id_ in teuthology.roles_of_type(roles_for_host, type_):
name = '%s.%s' % (type_, id_)
ctx.daemons.add_daemon(remote, type_, id_,
args='no-op',
logger=log.getChild(name),
stdin=run.PIPE,
wait=False,
)
log.info('ctx daemon lists')
log.info(ctx.daemons.resolve_role_list(roles=None, types=['mon','mds','osd','client','samba']))
yield
示例12: osd_admin_socket
def osd_admin_socket(self, osdnum, command):
remote = None
for _remote, roles_for_host in self.ctx.cluster.remotes.iteritems():
for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
if int(id_) == osdnum:
remote = _remote
assert remote is not None
args=[
'LD_LIBRARY_PRELOAD=/tmp/cephtest/binary/usr/local/lib',
'/tmp/cephtest/enable-coredump',
'/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
'/tmp/cephtest/archive/coverage',
'/tmp/cephtest/binary/usr/local/bin/ceph',
'-k', '/tmp/cephtest/ceph.keyring',
'-c', '/tmp/cephtest/ceph.conf',
'--admin-daemon',
"/tmp/cephtest/asok.osd.%s"%(str(osdnum),)]
args.extend(command)
return remote.run(
args=args,
stdout=StringIO(),
wait=True,
)
示例13: build_ceph_cluster
#.........这里部分代码省略.........
if estatus_mon_d != 0:
raise RuntimeError("ceph-deploy: Failed to delete monitor")
node_dev_list = get_dev_for_osd(ctx, config)
for d in node_dev_list:
osd_create_cmds = "./ceph-deploy osd create --zap-disk" + " " + d
estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds)
if estatus_osd == 0:
log.info("successfully created osd")
no_of_osds += 1
else:
zap_disk = "./ceph-deploy disk zap" + " " + d
execute_ceph_deploy(ctx, config, zap_disk)
estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds)
if estatus_osd == 0:
log.info("successfully created osd")
no_of_osds += 1
else:
raise RuntimeError("ceph-deploy: Failed to create osds")
if config.get("wait-for-healthy", True) and no_of_osds >= 2:
is_healthy(ctx=ctx, config=None)
log.info("Setting up client nodes...")
conf_path = "/etc/ceph/ceph.conf"
admin_keyring_path = "/etc/ceph/ceph.client.admin.keyring"
first_mon = teuthology.get_first_mon(ctx, config)
(mon0_remote,) = ctx.cluster.only(first_mon).remotes.keys()
conf_data = teuthology.get_file(remote=mon0_remote, path=conf_path, sudo=True)
admin_keyring = teuthology.get_file(remote=mon0_remote, path=admin_keyring_path, sudo=True)
clients = ctx.cluster.only(teuthology.is_type("client"))
for remot, roles_for_host in clients.remotes.iteritems():
for id_ in teuthology.roles_of_type(roles_for_host, "client"):
client_keyring = "/etc/ceph/ceph.client.{id}.keyring".format(id=id_)
mon0_remote.run(
args=[
"cd",
"{tdir}".format(tdir=testdir),
run.Raw("&&"),
"sudo",
"bash",
"-c",
run.Raw('"'),
"ceph",
"auth",
"get-or-create",
"client.{id}".format(id=id_),
"mds",
"allow",
"mon",
"allow *",
"osd",
"allow *",
run.Raw(">"),
client_keyring,
run.Raw('"'),
]
)
key_data = teuthology.get_file(remote=mon0_remote, path=client_keyring, sudo=True)
teuthology.sudo_write_file(remote=remot, path=client_keyring, data=key_data, perms="0644")
teuthology.sudo_write_file(remote=remot, path=admin_keyring_path, data=admin_keyring, perms="0644")
teuthology.sudo_write_file(remote=remot, path=conf_path, data=conf_data, perms="0644")
else:
raise RuntimeError("The cluster is NOT operational due to insufficient OSDs")
示例14: run_daemon
def run_daemon(ctx, config, type_):
log.info('Starting %s daemons...' % type_)
daemons = ctx.cluster.only(teuthology.is_type(type_))
coverage_dir = '/tmp/cephtest/archive/coverage'
daemon_signal = 'kill'
if config.get('coverage') or config.get('valgrind') is not None:
daemon_signal = 'term'
num_active = 0
for remote, roles_for_host in daemons.remotes.iteritems():
for id_ in teuthology.roles_of_type(roles_for_host, type_):
name = '%s.%s' % (type_, id_)
if not id_.endswith('-s'):
num_active += 1
run_cmd = [
'/tmp/cephtest/enable-coredump',
'/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
coverage_dir,
'/tmp/cephtest/daemon-helper',
daemon_signal,
]
run_cmd_tail = [
'/tmp/cephtest/binary/usr/local/bin/ceph-%s' % type_,
'-f',
'-i', id_,
'-c', '/tmp/cephtest/ceph.conf']
if config.get('valgrind') is not None:
valgrind_args = None
if type_ in config['valgrind']:
valgrind_args = config['valgrind'][type_]
if name in config['valgrind']:
valgrind_args = config['valgrind'][name]
run_cmd.extend(teuthology.get_valgrind_args(name, valgrind_args))
if type_ in config.get('cpu_profile', []):
profile_path = '/tmp/cephtest/archive/log/%s.%s.prof' % (type_, id_)
run_cmd.extend([ 'env', 'CPUPROFILE=%s' % profile_path ])
run_cmd.extend(run_cmd_tail)
ctx.daemons.add_daemon(remote, type_, id_,
args=run_cmd,
logger=log.getChild(name),
stdin=run.PIPE,
wait=False,
)
if type_ == 'mds':
firstmon = teuthology.get_first_mon(ctx, config)
(mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
mon0_remote.run(args=[
'/tmp/cephtest/enable-coredump',
'/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
coverage_dir,
'/tmp/cephtest/binary/usr/local/bin/ceph',
'-c', '/tmp/cephtest/ceph.conf',
'mds', 'set_max_mds', str(num_active)])
try:
yield
finally:
log.info('Shutting down %s daemons...' % type_)
exc_info = (None, None, None)
for daemon in ctx.daemons.iter_daemons_of_role(type_):
try:
daemon.stop()
except (run.CommandFailedError,
run.CommandCrashedError,
run.ConnectionLostError):
exc_info = sys.exc_info()
log.exception('Saw exception from %s.%s', daemon.role, daemon.id_)
if exc_info != (None, None, None):
raise exc_info[0], exc_info[1], exc_info[2]
示例15: cluster
def cluster(ctx, config):
log.info('Creating ceph cluster...')
run.wait(
ctx.cluster.run(
args=[
'install', '-d', '-m0755', '--',
'/tmp/cephtest/data',
],
wait=False,
)
)
devs_to_clean = {}
remote_to_roles_to_devs = {}
remote_to_roles_to_journals = {}
osds = ctx.cluster.only(teuthology.is_type('osd'))
for remote, roles_for_host in osds.remotes.iteritems():
devs = teuthology.get_scratch_devices(remote)
roles_to_devs = {}
roles_to_journals = {}
if config.get('fs'):
log.info('fs option selected, checkin for scratch devs')
log.info('found devs: %s' % (str(devs),))
roles_to_devs = assign_devs(
teuthology.roles_of_type(roles_for_host, 'osd'), devs
)
if len(roles_to_devs) < len(devs):
devs = devs[len(roles_to_devs):]
log.info('dev map: %s' % (str(roles_to_devs),))
devs_to_clean[remote] = []
if config.get('block_journal'):
log.info('block journal enabled')
roles_to_journals = assign_devs(
teuthology.roles_of_type(roles_for_host, 'osd'), devs
)
log.info('journal map: %s', roles_to_journals)
if config.get('tmpfs_journal'):
log.info('tmpfs journal enabled')
roles_to_journals = {}
remote.run( args=[ 'sudo', 'mount', '-t', 'tmpfs', 'tmpfs', '/mnt' ] )
for osd in teuthology.roles_of_type(roles_for_host, 'osd'):
tmpfs = '/mnt/osd.%s' % osd
roles_to_journals[osd] = tmpfs
remote.run( args=[ 'truncate', '-s', '1500M', tmpfs ] )
log.info('journal map: %s', roles_to_journals)
remote_to_roles_to_devs[remote] = roles_to_devs
remote_to_roles_to_journals[remote] = roles_to_journals
log.info('Generating config...')
remotes_and_roles = ctx.cluster.remotes.items()
roles = [roles for (remote, roles) in remotes_and_roles]
ips = [host for (host, port) in (remote.ssh.get_transport().getpeername() for (remote, roles) in remotes_and_roles)]
conf = teuthology.skeleton_config(roles=roles, ips=ips)
for remote, roles_to_journals in remote_to_roles_to_journals.iteritems():
for role, journal in roles_to_journals.iteritems():
key = "osd." + str(role)
if key not in conf:
conf[key] = {}
conf[key]['osd journal'] = journal
for section, keys in config['conf'].iteritems():
for key, value in keys.iteritems():
log.info("[%s] %s = %s" % (section, key, value))
if section not in conf:
conf[section] = {}
conf[section][key] = value
if config.get('tmpfs_journal'):
conf['journal dio'] = False
ctx.ceph = argparse.Namespace()
ctx.ceph.conf = conf
log.info('Writing configs...')
conf_fp = StringIO()
conf.write(conf_fp)
conf_fp.seek(0)
writes = ctx.cluster.run(
args=[
'python',
'-c',
'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
'/tmp/cephtest/ceph.conf',
],
stdin=run.PIPE,
wait=False,
)
teuthology.feed_many_stdins_and_close(conf_fp, writes)
run.wait(writes)
coverage_dir = '/tmp/cephtest/archive/coverage'
firstmon = teuthology.get_first_mon(ctx, config)
log.info('Setting up %s...' % firstmon)
ctx.cluster.only(firstmon).run(
#.........这里部分代码省略.........