本文整理汇总了Python中teuthology.misc.get_first_mon函数的典型用法代码示例。如果您正苦于以下问题:Python get_first_mon函数的具体用法?Python get_first_mon怎么用?Python get_first_mon使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_first_mon函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: task
def task(ctx, config):
"""
Stress test the monitor by thrashing them while another task/workunit
is running.
Please refer to MonitorThrasher class for further information on the
available options.
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'mon_thrash task only accepts a dict for configuration'
assert len(_get_mons(ctx)) > 2, \
'mon_thrash task requires at least 3 monitors'
log.info('Beginning mon_thrash...')
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
thrash_proc = MonitorThrasher(ctx,
manager, config,
logger=log.getChild('mon_thrasher'))
try:
log.debug('Yielding')
yield
finally:
log.info('joining mon_thrasher')
thrash_proc.do_join()
mons = _get_mons(ctx)
manager.wait_for_mon_quorum_size(len(mons))
示例2: download_ceph_deploy
def download_ceph_deploy(ctx, config):
log.info('Downloading ceph-deploy...')
testdir = teuthology.get_testdir(ctx)
ceph_admin = teuthology.get_first_mon(ctx, config)
ctx.cluster.only(ceph_admin).run(
args=[
'git', 'clone',
# 'http://github.com/ceph/ceph-deploy.git',
'git://ceph.com/ceph-deploy.git',
'{tdir}/ceph-deploy'.format(tdir=testdir),
],
)
ctx.cluster.only(ceph_admin).run(
args=[
'cd',
'{tdir}/ceph-deploy'.format(tdir=testdir),
run.Raw('&&'),
'./bootstrap',
],
)
try:
yield
finally:
log.info('Removing ceph-deploy ...')
ctx.cluster.only(ceph_admin).run(
args=[
'rm',
'-rf',
'{tdir}/ceph-deploy'.format(tdir=testdir),
],
)
示例3: task
def task(ctx, config):
"""
Test monitor recovery from OSD
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'))
mons = ctx.cluster.only(teuthology.is_type('mon'))
# note down the first cluster_name and mon_id
# we will recover it later on
cluster_name, _, mon_id = teuthology.split_role(first_mon)
_nuke_mons(manager, mons, mon_id)
default_keyring = '/etc/ceph/{cluster}.keyring'.format(
cluster=cluster_name)
keyring_path = config.get('keyring_path', default_keyring)
_rebuild_db(ctx, manager, cluster_name, mon, mon_id, keyring_path)
_revive_mons(manager, mons, mon_id, keyring_path)
_revive_mgrs(ctx, manager)
_revive_osds(ctx, manager)
示例4: download_ceph_deploy
def download_ceph_deploy(ctx, config):
"""
Downloads ceph-deploy from the ceph.com git mirror and (by default)
switches to the master branch. If the `ceph-deploy-branch` is specified, it
will use that instead.
"""
log.info("Downloading ceph-deploy...")
testdir = teuthology.get_testdir(ctx)
ceph_admin = teuthology.get_first_mon(ctx, config)
default_cd_branch = {"ceph-deploy-branch": "master"}
ceph_deploy_branch = config.get("ceph-deploy", default_cd_branch).get("ceph-deploy-branch")
ctx.cluster.only(ceph_admin).run(
args=[
"git",
"clone",
"-b",
ceph_deploy_branch,
teuth_config.ceph_git_base_url + "ceph-deploy.git",
"{tdir}/ceph-deploy".format(tdir=testdir),
]
)
ctx.cluster.only(ceph_admin).run(
args=["cd", "{tdir}/ceph-deploy".format(tdir=testdir), run.Raw("&&"), "./bootstrap"]
)
try:
yield
finally:
log.info("Removing ceph-deploy ...")
ctx.cluster.only(ceph_admin).run(args=["rm", "-rf", "{tdir}/ceph-deploy".format(tdir=testdir)])
示例5: is_healthy
def is_healthy(ctx, config):
"""Wait until a Ceph cluster is healthy."""
testdir = teuthology.get_testdir(ctx)
ceph_admin = teuthology.get_first_mon(ctx, config)
(remote,) = ctx.cluster.only(ceph_admin).remotes.keys()
max_tries = 90 # 90 tries * 10 secs --> 15 minutes
tries = 0
while True:
tries += 1
if tries >= max_tries:
msg = "ceph health was unable to get 'HEALTH_OK' after waiting 15 minutes"
raise RuntimeError(msg)
r = remote.run(
args=[
'cd',
'{tdir}'.format(tdir=testdir),
run.Raw('&&'),
'sudo', 'ceph',
'health',
],
stdout=StringIO(),
logger=log.getChild('health'),
)
out = r.stdout.getvalue()
log.info('Ceph health: %s', out.rstrip('\n'))
if out.split(None, 1)[0] == 'HEALTH_OK':
break
time.sleep(10)
示例6: healthy
def healthy(ctx, config):
"""
Wait for all osd's to be up, and for the ceph health monitor to return HEALTH_OK.
:param ctx: Context
:param config: Configuration
"""
config = config if isinstance(config, dict) else dict()
cluster_name = config.get('cluster', 'ceph')
log.info('Waiting until ceph cluster %s is healthy...', cluster_name)
firstmon = teuthology.get_first_mon(ctx, config, cluster_name)
(mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
teuthology.wait_until_osds_up(
ctx,
cluster=ctx.cluster,
remote=mon0_remote,
ceph_cluster=cluster_name,
)
teuthology.wait_until_healthy(
ctx,
remote=mon0_remote,
ceph_cluster=cluster_name,
)
if ctx.cluster.only(teuthology.is_type('mds', cluster_name)).remotes:
# Some MDSs exist, wait for them to be healthy
ceph_fs = Filesystem(ctx) # TODO: make Filesystem cluster-aware
ceph_fs.wait_for_daemons(timeout=300)
示例7: wait_for_mon_quorum
def wait_for_mon_quorum(ctx, config):
"""
Check renote ceph status until all monitors are up.
:param ctx: Context
:param config: Configuration
"""
assert isinstance(config, list)
firstmon = teuthology.get_first_mon(ctx, config)
(remote,) = ctx.cluster.only(firstmon).remotes.keys()
while True:
r = remote.run(
args=[
'ceph',
'quorum_status',
],
stdout=StringIO(),
logger=log.getChild('quorum_status'),
)
j = json.loads(r.stdout.getvalue())
q = j.get('quorum_names', [])
log.debug('Quorum: %s', q)
if sorted(q) == sorted(config):
break
time.sleep(1)
示例8: __init__
def __init__(self, ctx, manager, config, logger):
self.ctx = ctx
self.manager = manager
self.stopping = False
self.logger = logger
self.config = config
if self.config is None:
self.config = dict()
self.check_interval = float(self.config.get('interval', 30.0))
first_mon = teuthology.get_first_mon(ctx, config)
remote = ctx.cluster.only(first_mon).remotes.keys()[0]
proc = remote.run(
args=[
'sudo',
'ceph-mon',
'-i', first_mon[4:],
'--show-config-value', 'mon_clock_drift_allowed'
], stdout=StringIO(), wait=True
)
self.max_skew = self.config.get('max-skew', float(proc.stdout.getvalue()))
self.expect_skew = self.config.get('expect-skew', False)
self.never_fail = self.config.get('never-fail', False)
self.at_least_once = self.config.get('at-least-once', True)
self.at_least_once_timeout = self.config.get('at-least-once-timeout', 600.0)
示例9: task
def task(ctx, config):
"""
Use clas ClockSkewCheck to check for clock skews on the monitors.
This task will spawn a thread running ClockSkewCheck's do_check().
All the configuration will be directly handled by ClockSkewCheck,
so please refer to the class documentation for further information.
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'mon_clock_skew_check task only accepts a dict for configuration'
log.info('Beginning mon_clock_skew_check...')
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
skew_check = ClockSkewCheck(ctx,
manager, config,
logger=log.getChild('mon_clock_skew_check'))
skew_check_thread = gevent.spawn(skew_check.do_check)
try:
yield
finally:
log.info('joining mon_clock_skew_check')
skew_check.finish()
skew_check_thread.get()
示例10: execute_playbook
def execute_playbook(self):
"""
Execute ansible-playbook
:param _logfile: Use this file-like object instead of a LoggerFile for
testing
"""
args = [
'ANSIBLE_STDOUT_CALLBACK=debug',
'ansible-playbook', '-vv',
'-i', 'inven.yml', 'site.yml'
]
log.debug("Running %s", args)
# If there is an installer.0 node, use that for the installer.
# Otherwise, use the first mon node as installer node.
ansible_loc = self.ctx.cluster.only('installer.0')
(ceph_first_mon,) = self.ctx.cluster.only(
misc.get_first_mon(self.ctx,
self.config)).remotes.iterkeys()
if ansible_loc.remotes:
(ceph_installer,) = ansible_loc.remotes.iterkeys()
else:
ceph_installer = ceph_first_mon
self.ceph_first_mon = ceph_first_mon
self.ceph_installer = ceph_installer
self.args = args
if self.config.get('rhbuild'):
self.run_rh_playbook()
else:
self.run_playbook()
示例11: cephfs_setup
def cephfs_setup(ctx, config):
testdir = teuthology.get_testdir(ctx)
coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
first_mon = teuthology.get_first_mon(ctx, config)
(mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys()
mdss = ctx.cluster.only(teuthology.is_type('mds'))
# If there are any MDSs, then create a filesystem for them to use
# Do this last because requires mon cluster to be up and running
if mdss.remotes:
log.info('Setting up CephFS filesystem...')
ceph_fs = Filesystem(ctx)
if not ceph_fs.legacy_configured():
ceph_fs.create()
is_active_mds = lambda role: role.startswith('mds.') and not role.endswith('-s') and role.find('-s-') == -1
all_roles = [item for remote_roles in mdss.remotes.values() for item in remote_roles]
num_active = len([r for r in all_roles if is_active_mds(r)])
mon_remote.run(args=[
'adjust-ulimits',
'ceph-coverage',
coverage_dir,
'ceph',
'mds', 'set_max_mds', str(num_active)])
yield
示例12: crush_setup
def crush_setup(ctx, config):
first_mon = teuthology.get_first_mon(ctx, config)
(mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys()
profile = config.get('crush_tunables', 'default')
log.info('Setting crush tunables to %s', profile)
mon_remote.run(
args=['sudo', 'ceph', 'osd', 'crush', 'tunables', profile])
yield
示例13: crush_setup
def crush_setup(ctx, config):
cluster_name = config["cluster"]
first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
(mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys()
profile = config.get("crush_tunables", "default")
log.info("Setting crush tunables to %s", profile)
mon_remote.run(args=["sudo", "ceph", "--cluster", cluster_name, "osd", "crush", "tunables", profile])
yield
示例14: wait_for_osds_up
def wait_for_osds_up(ctx, config):
log.info('Waiting until ceph osds are all up...')
firstmon = teuthology.get_first_mon(ctx, config)
(mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
teuthology.wait_until_osds_up(
ctx,
cluster=ctx.cluster,
remote=mon0_remote
)
示例15: task
def task(ctx, config):
"""
Test [deep] repair in several situations:
Repair [Truncate, Data EIO, MData EIO] on [Primary|Replica]
The config should be as follows:
Must include the log-whitelist below
Must enable filestore_debug_inject_read_err config
example:
tasks:
- chef:
- install:
- ceph:
log-whitelist: ['candidate had a read error', 'deep-scrub 0 missing, 1 inconsistent objects', 'deep-scrub 0 missing, 4 inconsistent objects', 'deep-scrub 1 errors', 'deep-scrub 4 errors', '!= known omap_digest', 'repair 0 missing, 1 inconsistent objects', 'repair 0 missing, 4 inconsistent objects', 'repair 1 errors, 1 fixed', 'repair 4 errors, 4 fixed', 'scrub 0 missing, 1 inconsistent', 'scrub 1 errors', 'size 1 != known size']
conf:
osd:
filestore debug inject read err: true
- repair_test:
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'repair_test task only accepts a dict for config'
if not hasattr(ctx, 'manager'):
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
ctx.manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager')
)
num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
log.info('num_osds is %s' % num_osds)
while len(ctx.manager.get_osd_status()['up']) < num_osds:
time.sleep(10)
tests = [
gen_repair_test_1(mdataerr(ctx), choose_primary(ctx), "scrub"),
gen_repair_test_1(mdataerr(ctx), choose_replica(ctx), "scrub"),
gen_repair_test_1(dataerr(ctx), choose_primary(ctx), "deep-scrub"),
gen_repair_test_1(dataerr(ctx), choose_replica(ctx), "deep-scrub"),
gen_repair_test_1(trunc(ctx), choose_primary(ctx), "scrub"),
gen_repair_test_1(trunc(ctx), choose_replica(ctx), "scrub"),
gen_repair_test_2(choose_primary(ctx)),
gen_repair_test_2(choose_replica(ctx))
]
for test in tests:
run_test(ctx, config, test)