本文整理汇总了Python中teuthology.misc.pull_directory函数的典型用法代码示例。如果您正苦于以下问题:Python pull_directory函数的具体用法?Python pull_directory怎么用?Python pull_directory使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pull_directory函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: archive
def archive(ctx, config):
"""
Handle the creation and deletion of the archive directory.
"""
log.info("Creating archive directory...")
archive_dir = misc.get_archive_dir(ctx)
run.wait(ctx.cluster.run(args=["install", "-d", "-m0755", "--", archive_dir], wait=False))
try:
yield
except Exception:
# we need to know this below
set_status(ctx.summary, "fail")
raise
finally:
passed = get_status(ctx.summary) == "pass"
if ctx.archive is not None and not (ctx.config.get("archive-on-error") and passed):
log.info("Transferring archived files...")
logdir = os.path.join(ctx.archive, "remote")
if not os.path.exists(logdir):
os.mkdir(logdir)
for rem in ctx.cluster.remotes.iterkeys():
path = os.path.join(logdir, rem.shortname)
misc.pull_directory(rem, archive_dir, path)
# Check for coredumps and pull binaries
fetch_binaries_for_coredumps(path, rem)
log.info("Removing archive directory...")
run.wait(ctx.cluster.run(args=["rm", "-rf", "--", archive_dir], wait=False))
示例2: archive
def archive(ctx, config):
log.info('Creating archive directory...')
run.wait(
ctx.cluster.run(
args=[
'install', '-d', '-m0755', '--',
'/tmp/cephtest/archive',
],
wait=False,
)
)
try:
yield
finally:
if ctx.archive is not None:
log.info('Transferring archived files...')
logdir = os.path.join(ctx.archive, 'remote')
os.mkdir(logdir)
for remote in ctx.cluster.remotes.iterkeys():
path = os.path.join(logdir, remote.shortname)
teuthology.pull_directory(remote, '/tmp/cephtest/archive', path)
log.info('Removing archive directory...')
run.wait(
ctx.cluster.run(
args=[
'rm',
'-rf',
'--',
'/tmp/cephtest/archive',
],
wait=False,
),
)
示例3: archive
def archive(ctx, config):
"""
Handle the creation and deletion of the archive directory.
"""
log.info('Creating archive directory...')
archive_dir = misc.get_archive_dir(ctx)
run.wait(
ctx.cluster.run(
args=[
'install', '-d', '-m0755', '--', archive_dir,
],
wait=False,
)
)
try:
yield
except Exception:
# we need to know this below
set_status(ctx.summary, 'fail')
raise
finally:
passed = get_status(ctx.summary) == 'pass'
if ctx.archive is not None and \
not (ctx.config.get('archive-on-error') and passed):
log.info('Transferring archived files...')
logdir = os.path.join(ctx.archive, 'remote')
if (not os.path.exists(logdir)):
os.mkdir(logdir)
for rem in ctx.cluster.remotes.iterkeys():
path = os.path.join(logdir, rem.shortname)
misc.pull_directory(rem, archive_dir, path)
# Check for coredumps and pull binaries
fetch_binaries_for_coredumps(path, rem)
log.info('Removing archive directory...')
run.wait(
ctx.cluster.run(
args=[
'rm',
'-rf',
'--',
archive_dir,
],
wait=False,
),
)
示例4: archive
def archive(ctx, config):
"""
Handle the creation and deletion of the archive directory.
"""
log.info('Creating archive directory...')
archive_dir = teuthology.get_archive_dir(ctx)
run.wait(
ctx.cluster.run(
args=[
'install', '-d', '-m0755', '--', archive_dir,
],
wait=False,
)
)
try:
yield
except Exception:
# we need to know this below
ctx.summary['success'] = False
raise
finally:
if ctx.archive is not None and \
not (ctx.config.get('archive-on-error') and ctx.summary['success']):
log.info('Transferring archived files...')
logdir = os.path.join(ctx.archive, 'remote')
if (not os.path.exists(logdir)):
os.mkdir(logdir)
for remote in ctx.cluster.remotes.iterkeys():
path = os.path.join(logdir, remote.shortname)
teuthology.pull_directory(remote, archive_dir, path)
log.info('Removing archive directory...')
run.wait(
ctx.cluster.run(
args=[
'rm',
'-rf',
'--',
archive_dir,
],
wait=False,
),
)
示例5: collect_logs
def collect_logs(self):
ctx = self.ctx
if ctx.archive is not None and \
not (ctx.config.get('archive-on-error') and ctx.summary['success']):
log.info('Archiving logs...')
path = os.path.join(ctx.archive, 'remote')
os.makedirs(path)
def wanted(role):
# Only attempt to collect logs from hosts which are part of the
# cluster
return any(map(
lambda role_stub: role.startswith(role_stub),
self.groups_to_roles.values(),
))
for remote in ctx.cluster.only(wanted).remotes.keys():
sub = os.path.join(path, remote.shortname)
os.makedirs(sub)
misc.pull_directory(remote, '/var/log/ceph',
os.path.join(sub, 'log'))
示例6: archive
def archive(ctx, config):
log.info("Creating archive directory...")
archive_dir = teuthology.get_archive_dir(ctx)
run.wait(ctx.cluster.run(args=["install", "-d", "-m0755", "--", archive_dir], wait=False))
try:
yield
except Exception:
# we need to know this below
ctx.summary["success"] = False
raise
finally:
if ctx.archive is not None and not (ctx.config.get("archive-on-error") and ctx.summary["success"]):
log.info("Transferring archived files...")
logdir = os.path.join(ctx.archive, "remote")
if not os.path.exists(logdir):
os.mkdir(logdir)
for remote in ctx.cluster.remotes.iterkeys():
path = os.path.join(logdir, remote.shortname)
teuthology.pull_directory(remote, archive_dir, path)
log.info("Removing archive directory...")
run.wait(ctx.cluster.run(args=["rm", "-rf", "--", archive_dir], wait=False))
示例7: ceph_log
def ceph_log(ctx, config):
"""
Create /var/log/ceph log directory that is open to everyone.
Add valgrind and profiling-logger directories.
:param ctx: Context
:param config: Configuration
"""
log.info('Making ceph log dir writeable by non-root...')
run.wait(
ctx.cluster.run(
args=[
'sudo',
'chmod',
'777',
'/var/log/ceph',
],
wait=False,
)
)
log.info('Disabling ceph logrotate...')
run.wait(
ctx.cluster.run(
args=[
'sudo',
'rm', '-f', '--',
'/etc/logrotate.d/ceph',
],
wait=False,
)
)
log.info('Creating extra log directories...')
run.wait(
ctx.cluster.run(
args=[
'sudo',
'install', '-d', '-m0777', '--',
'/var/log/ceph/valgrind',
'/var/log/ceph/profiling-logger',
],
wait=False,
)
)
try:
yield
finally:
if ctx.archive is not None and \
not (ctx.config.get('archive-on-error') and ctx.summary['success']):
# and logs
log.info('Compressing logs...')
run.wait(
ctx.cluster.run(
args=[
'sudo',
'find',
'/var/log/ceph',
'-name',
'*.log',
'-print0',
run.Raw('|'),
'sudo',
'xargs',
'-0',
'--no-run-if-empty',
'--',
'gzip',
'--',
],
wait=False,
),
)
log.info('Archiving logs...')
path = os.path.join(ctx.archive, 'remote')
os.makedirs(path)
for remote in ctx.cluster.remotes.iterkeys():
sub = os.path.join(path, remote.shortname)
os.makedirs(sub)
teuthology.pull_directory(remote, '/var/log/ceph',
os.path.join(sub, 'log'))
示例8: build_ceph_cluster
#.........这里部分代码省略.........
'sudo','bash','-c',
run.Raw('"'),'ceph',
'auth',
'get-or-create',
'client.{id}'.format(id=id_),
'mds', 'allow',
'mon', 'allow *',
'osd', 'allow *',
run.Raw('>'),
client_keyring,
run.Raw('"'),
],
)
key_data = teuthology.get_file(
remote=mon0_remote,
path=client_keyring,
sudo=True,
)
teuthology.sudo_write_file(
remote=remot,
path=client_keyring,
data=key_data,
perms='0644'
)
teuthology.sudo_write_file(
remote=remot,
path=admin_keyring_path,
data=admin_keyring,
perms='0644'
)
teuthology.sudo_write_file(
remote=remot,
path=conf_path,
data=conf_data,
perms='0644'
)
else:
raise RuntimeError("The cluster is NOT operational due to insufficient OSDs")
try:
yield
finally:
log.info('Stopping ceph...')
ctx.cluster.run(args=[
'sudo', 'stop', 'ceph-all',
run.Raw('||'),
'sudo', 'service', 'ceph', 'stop'
])
if ctx.archive is not None:
# archive mon data, too
log.info('Archiving mon data...')
path = os.path.join(ctx.archive, 'data')
os.makedirs(path)
mons = ctx.cluster.only(teuthology.is_type('mon'))
for remote, roles in mons.remotes.iteritems():
for role in roles:
if role.startswith('mon.'):
teuthology.pull_directory_tarball(
remote,
'/var/lib/ceph/mon',
path + '/' + role + '.tgz')
log.info('Compressing logs...')
run.wait(
ctx.cluster.run(
args=[
'sudo',
'find',
'/var/log/ceph',
'-name',
'*.log',
'-print0',
run.Raw('|'),
'sudo',
'xargs',
'-0',
'--no-run-if-empty',
'--',
'gzip',
'--',
],
wait=False,
),
)
log.info('Archiving logs...')
path = os.path.join(ctx.archive, 'remote')
os.makedirs(path)
for remote in ctx.cluster.remotes.iterkeys():
sub = os.path.join(path, remote.shortname)
os.makedirs(sub)
teuthology.pull_directory(remote, '/var/log/ceph',
os.path.join(sub, 'log'))
log.info('Purging package...')
execute_ceph_deploy(ctx, config, purge_nodes)
log.info('Purging data...')
execute_ceph_deploy(ctx, config, purgedata_nodes)
示例9: build_ceph_cluster
#.........这里部分代码省略.........
)
teuthology.sudo_write_file(
remote=remot,
path=conf_path,
data=conf_data,
perms='0644'
)
if mds_nodes:
log.info('Configuring CephFS...')
ceph_fs = Filesystem(ctx, create=True)
elif not config.get('only_mon'):
raise RuntimeError(
"The cluster is NOT operational due to insufficient OSDs")
yield
except Exception:
log.info(
"Error encountered, logging exception before tearing down ceph-deploy")
log.info(traceback.format_exc())
raise
finally:
if config.get('keep_running'):
return
log.info('Stopping ceph...')
ctx.cluster.run(args=['sudo', 'stop', 'ceph-all', run.Raw('||'),
'sudo', 'service', 'ceph', 'stop', run.Raw('||'),
'sudo', 'systemctl', 'stop', 'ceph.target'])
# Are you really not running anymore?
# try first with the init tooling
# ignoring the status so this becomes informational only
ctx.cluster.run(
args=[
'sudo', 'status', 'ceph-all', run.Raw('||'),
'sudo', 'service', 'ceph', 'status', run.Raw('||'),
'sudo', 'systemctl', 'status', 'ceph.target'],
check_status=False)
# and now just check for the processes themselves, as if upstart/sysvinit
# is lying to us. Ignore errors if the grep fails
ctx.cluster.run(args=['sudo', 'ps', 'aux', run.Raw('|'),
'grep', '-v', 'grep', run.Raw('|'),
'grep', 'ceph'], check_status=False)
if ctx.archive is not None:
# archive mon data, too
log.info('Archiving mon data...')
path = os.path.join(ctx.archive, 'data')
os.makedirs(path)
mons = ctx.cluster.only(teuthology.is_type('mon'))
for remote, roles in mons.remotes.iteritems():
for role in roles:
if role.startswith('mon.'):
teuthology.pull_directory_tarball(
remote,
'/var/lib/ceph/mon',
path + '/' + role + '.tgz')
log.info('Compressing logs...')
run.wait(
ctx.cluster.run(
args=[
'sudo',
'find',
'/var/log/ceph',
'-name',
'*.log',
'-print0',
run.Raw('|'),
'sudo',
'xargs',
'-0',
'--no-run-if-empty',
'--',
'gzip',
'--',
],
wait=False,
),
)
log.info('Archiving logs...')
path = os.path.join(ctx.archive, 'remote')
os.makedirs(path)
for remote in ctx.cluster.remotes.iterkeys():
sub = os.path.join(path, remote.shortname)
os.makedirs(sub)
teuthology.pull_directory(remote, '/var/log/ceph',
os.path.join(sub, 'log'))
# Prevent these from being undefined if the try block fails
all_nodes = get_all_nodes(ctx, config)
purge_nodes = './ceph-deploy purge' + " " + all_nodes
purgedata_nodes = './ceph-deploy purgedata' + " " + all_nodes
log.info('Purging package...')
execute_ceph_deploy(purge_nodes)
log.info('Purging data...')
execute_ceph_deploy(purgedata_nodes)
示例10: ceph_log
#.........这里部分代码省略.........
args=['sudo', 'logrotate', '/etc/logrotate.d/ceph-test.conf'
],
wait=False,
)
)
def begin(self):
self.thread = gevent.spawn(self.invoke_logrotate)
def end(self):
self.stop_event.set()
self.thread.get()
def write_rotate_conf(ctx, daemons):
testdir = teuthology.get_testdir(ctx)
rotate_conf_path = os.path.join(os.path.dirname(__file__), 'logrotate.conf')
with file(rotate_conf_path, 'rb') as f:
conf = ""
for daemon, size in daemons.iteritems():
log.info('writing logrotate stanza for {daemon}'.format(daemon=daemon))
conf += f.read().format(daemon_type=daemon, max_size=size)
f.seek(0, 0)
for remote in ctx.cluster.remotes.iterkeys():
teuthology.write_file(remote=remote,
path='{tdir}/logrotate.ceph-test.conf'.format(tdir=testdir),
data=StringIO(conf)
)
remote.run(
args=[
'sudo',
'mv',
'{tdir}/logrotate.ceph-test.conf'.format(tdir=testdir),
'/etc/logrotate.d/ceph-test.conf',
run.Raw('&&'),
'sudo',
'chmod',
'0644',
'/etc/logrotate.d/ceph-test.conf',
run.Raw('&&'),
'sudo',
'chown',
'root.root',
'/etc/logrotate.d/ceph-test.conf'
]
)
remote.chcon('/etc/logrotate.d/ceph-test.conf',
'system_u:object_r:etc_t:s0')
if ctx.config.get('log-rotate'):
daemons = ctx.config.get('log-rotate')
log.info('Setting up log rotation with ' + str(daemons))
write_rotate_conf(ctx, daemons)
logrotater = Rotater()
logrotater.begin()
try:
yield
finally:
if ctx.config.get('log-rotate'):
log.info('Shutting down logrotate')
logrotater.end()
ctx.cluster.run(
args=['sudo', 'rm', '/etc/logrotate.d/ceph-test.conf'
]
)
if ctx.archive is not None and \
not (ctx.config.get('archive-on-error') and ctx.summary['success']):
# and logs
log.info('Compressing logs...')
run.wait(
ctx.cluster.run(
args=[
'sudo',
'find',
'/var/log/ceph',
'-name',
'*.log',
'-print0',
run.Raw('|'),
'sudo',
'xargs',
'-0',
'--no-run-if-empty',
'--',
'gzip',
'--',
],
wait=False,
),
)
log.info('Archiving logs...')
path = os.path.join(ctx.archive, 'remote')
os.makedirs(path)
for remote in ctx.cluster.remotes.iterkeys():
sub = os.path.join(path, remote.shortname)
os.makedirs(sub)
teuthology.pull_directory(remote, '/var/log/ceph',
os.path.join(sub, 'log'))
示例11: cluster
#.........这里部分代码省略.........
config['log_whitelist']) is not None:
log.warning('Found errors (ERR|WRN|SEC) in cluster log')
ctx.summary['success'] = False
# use the most severe problem as the failure reason
if 'failure_reason' not in ctx.summary:
for pattern in ['\[SEC\]', '\[ERR\]', '\[WRN\]']:
match = first_in_ceph_log(pattern, config['log_whitelist'])
if match is not None:
ctx.summary['failure_reason'] = \
'"{match}" in cluster log'.format(
match=match.rstrip('\n'),
)
break
for remote, dirs in devs_to_clean.iteritems():
for dir_ in dirs:
log.info('Unmounting %s on %s' % (dir_, remote))
remote.run(
args=[
'sync',
run.Raw('&&'),
'sudo',
'umount',
'-f',
dir_
]
)
if config.get('tmpfs_journal'):
log.info('tmpfs journal enabled - unmounting tmpfs at /mnt')
for remote, roles_for_host in osds.remotes.iteritems():
remote.run(
args=[ 'sudo', 'umount', '-f', '/mnt' ],
check_status=False,
)
if ctx.archive is not None and \
not (ctx.config.get('archive-on-error') and ctx.summary['success']):
# archive mon data, too
log.info('Archiving mon data...')
path = os.path.join(ctx.archive, 'data')
os.makedirs(path)
for remote, roles in mons.remotes.iteritems():
for role in roles:
if role.startswith('mon.'):
teuthology.pull_directory_tarball(
remote,
'/var/lib/ceph/mon',
path + '/' + role + '.tgz')
# and logs
log.info('Compressing logs...')
run.wait(
ctx.cluster.run(
args=[
'sudo',
'find',
'/var/log/ceph',
'-name',
'*.log',
'-print0',
run.Raw('|'),
'sudo',
'xargs',
'-0',
'--no-run-if-empty',
'--',
'gzip',
'--',
],
wait=False,
),
)
log.info('Archiving logs...')
path = os.path.join(ctx.archive, 'remote')
os.makedirs(path)
for remote in ctx.cluster.remotes.iterkeys():
sub = os.path.join(path, remote.shortname)
os.makedirs(sub)
teuthology.pull_directory(remote, '/var/log/ceph',
os.path.join(sub, 'log'))
log.info('Cleaning ceph cluster...')
run.wait(
ctx.cluster.run(
args=[
'sudo',
'rm',
'-rf',
'--',
conf_path,
keyring_path,
'{tdir}/data'.format(tdir=testdir),
'{tdir}/monmap'.format(tdir=testdir),
],
wait=False,
),
)
示例12: task
def task(ctx, config):
"""
Test monitor recovery from OSD
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'))
mons = ctx.cluster.only(teuthology.is_type('mon'))
assert mons
# note down the first cluster_name and mon_id
# we will recover it later on
cluster_name = None
mon_id = None
for remote, roles in mons.remotes.iteritems():
is_mon = teuthology.is_type('mon')
for role in roles:
if not is_mon(role):
continue
cluster, _, m = teuthology.split_role(role)
if cluster_name is None:
cluster_name = cluster
mon_id = m
assert cluster_name == cluster
log.info('killing {cluster}:mon.{mon}'.format(
cluster=cluster,
mon=m))
manager.kill_mon(m)
mon_data = os.path.join('/var/lib/ceph/mon/',
'{0}-{1}'.format(cluster_name, m))
if m == mon_id:
# so we will only need to recreate the store.db for the
# first mon, would be easier than mkfs on it then replace
# the its store.db with the recovered one
store_dir = os.path.join(mon_data, 'store.db')
remote.run(args=['sudo', 'rm', '-r', store_dir])
else:
remote.run(args=['sudo', 'rm', '-r', mon_data])
local_mstore = tempfile.mkdtemp()
# collect the maps from all OSDs
osds = ctx.cluster.only(teuthology.is_type('osd'))
assert osds
for osd, roles in osds.remotes.iteritems():
is_osd = teuthology.is_type('osd')
for role in roles:
if not is_osd(role):
continue
cluster, _, osd_id = teuthology.split_role(role)
assert cluster_name == cluster
log.info('collecting maps from {cluster}:osd.{osd}'.format(
cluster=cluster,
osd=osd_id))
# push leveldb to OSD
osd_mstore = os.path.join(teuthology.get_testdir(ctx), 'mon-store')
osd.run(args=['sudo', 'mkdir', '-m', 'o+x', '-p', osd_mstore])
push_directory(local_mstore, osd, osd_mstore)
log.info('rm -rf {0}'.format(local_mstore))
shutil.rmtree(local_mstore)
# update leveldb with OSD data
options = '--op update-mon-db --mon-store-path {0}'
log.info('cot {0}'.format(osd_mstore))
manager.objectstore_tool(pool=None,
options=options.format(osd_mstore),
args='',
osd=osd_id,
do_revive=False)
# pull the updated mon db
log.info('pull dir {0} -> {1}'.format(osd_mstore, local_mstore))
local_mstore = tempfile.mkdtemp()
teuthology.pull_directory(osd, osd_mstore, local_mstore)
log.info('rm -rf osd:{0}'.format(osd_mstore))
osd.run(args=['sudo', 'rm', '-fr', osd_mstore])
# recover the first_mon with re-built mon db
# pull from recovered leveldb from client
mon_store_dir = os.path.join('/var/lib/ceph/mon',
'{0}-{1}'.format(cluster_name, mon_id))
push_directory(local_mstore, mon, mon_store_dir)
mon.run(args=['sudo', 'chown', '-R', 'ceph:ceph', mon_store_dir])
shutil.rmtree(local_mstore)
default_keyring = '/etc/ceph/{cluster}.keyring'.format(
cluster=cluster_name)
keyring_path = config.get('keyring_path', default_keyring)
# fill up the caps in the keyring file
mon.run(args=['sudo',
'ceph-authtool', keyring_path,
'-n', 'mon.',
'--cap', 'mon', 'allow *'])
#.........这里部分代码省略.........
示例13: ceph_log
#.........这里部分代码省略.........
)
)
class Rotater(object):
stop_event = gevent.event.Event()
def invoke_logrotate(self):
# 1) install ceph-test.conf in /etc/logrotate.d
# 2) continuously loop over logrotate invocation with ceph-test.conf
while not self.stop_event.is_set():
self.stop_event.wait(timeout=30)
run.wait(ctx.cluster.run(args=["sudo", "logrotate", "/etc/logrotate.d/ceph-test.conf"], wait=False))
def begin(self):
self.thread = gevent.spawn(self.invoke_logrotate)
def end(self):
self.stop_event.set()
self.thread.get()
def write_rotate_conf(ctx, daemons):
testdir = teuthology.get_testdir(ctx)
rotate_conf_path = os.path.join(os.path.dirname(__file__), "logrotate.conf")
with file(rotate_conf_path, "rb") as f:
conf = ""
for daemon, size in daemons.iteritems():
log.info("writing logrotate stanza for {daemon}".format(daemon=daemon))
conf += f.read().format(daemon_type=daemon, max_size=size)
f.seek(0, 0)
for remote in ctx.cluster.remotes.iterkeys():
teuthology.write_file(
remote=remote, path="{tdir}/logrotate.ceph-test.conf".format(tdir=testdir), data=StringIO(conf)
)
remote.run(
args=[
"sudo",
"mv",
"{tdir}/logrotate.ceph-test.conf".format(tdir=testdir),
"/etc/logrotate.d/ceph-test.conf",
run.Raw("&&"),
"sudo",
"chmod",
"0644",
"/etc/logrotate.d/ceph-test.conf",
run.Raw("&&"),
"sudo",
"chown",
"root.root",
"/etc/logrotate.d/ceph-test.conf",
]
)
remote.chcon("/etc/logrotate.d/ceph-test.conf", "system_u:object_r:etc_t:s0")
if ctx.config.get("log-rotate"):
daemons = ctx.config.get("log-rotate")
log.info("Setting up log rotation with " + str(daemons))
write_rotate_conf(ctx, daemons)
logrotater = Rotater()
logrotater.begin()
try:
yield
finally:
if ctx.config.get("log-rotate"):
log.info("Shutting down logrotate")
logrotater.end()
ctx.cluster.run(args=["sudo", "rm", "/etc/logrotate.d/ceph-test.conf"])
if ctx.archive is not None and not (ctx.config.get("archive-on-error") and ctx.summary["success"]):
# and logs
log.info("Compressing logs...")
run.wait(
ctx.cluster.run(
args=[
"sudo",
"find",
"/var/log/ceph",
"-name",
"*.log",
"-print0",
run.Raw("|"),
"sudo",
"xargs",
"-0",
"--no-run-if-empty",
"--",
"gzip",
"--",
],
wait=False,
)
)
log.info("Archiving logs...")
path = os.path.join(ctx.archive, "remote")
os.makedirs(path)
for remote in ctx.cluster.remotes.iterkeys():
sub = os.path.join(path, remote.shortname)
os.makedirs(sub)
teuthology.pull_directory(remote, "/var/log/ceph", os.path.join(sub, "log"))
示例14: _rebuild_db
def _rebuild_db(ctx, manager, cluster_name, mon, mon_id, keyring_path):
local_mstore = tempfile.mkdtemp()
# collect the maps from all OSDs
is_osd = teuthology.is_type('osd')
osds = ctx.cluster.only(is_osd)
assert osds
for osd, roles in osds.remotes.iteritems():
for role in roles:
if not is_osd(role):
continue
cluster, _, osd_id = teuthology.split_role(role)
assert cluster_name == cluster
log.info('collecting maps from {cluster}:osd.{osd}'.format(
cluster=cluster,
osd=osd_id))
# push leveldb to OSD
osd_mstore = os.path.join(teuthology.get_testdir(ctx), 'mon-store')
osd.run(args=['sudo', 'mkdir', '-m', 'o+x', '-p', osd_mstore])
_push_directory(local_mstore, osd, osd_mstore)
log.info('rm -rf {0}'.format(local_mstore))
shutil.rmtree(local_mstore)
# update leveldb with OSD data
options = '--no-mon-config --op update-mon-db --mon-store-path {0}'
log.info('cot {0}'.format(osd_mstore))
manager.objectstore_tool(pool=None,
options=options.format(osd_mstore),
args='',
osd=osd_id,
do_revive=False)
# pull the updated mon db
log.info('pull dir {0} -> {1}'.format(osd_mstore, local_mstore))
local_mstore = tempfile.mkdtemp()
teuthology.pull_directory(osd, osd_mstore, local_mstore)
log.info('rm -rf osd:{0}'.format(osd_mstore))
osd.run(args=['sudo', 'rm', '-fr', osd_mstore])
# recover the first_mon with re-built mon db
# pull from recovered leveldb from client
mon_store_dir = os.path.join('/var/lib/ceph/mon',
'{0}-{1}'.format(cluster_name, mon_id))
_push_directory(local_mstore, mon, mon_store_dir)
mon.run(args=['sudo', 'chown', '-R', 'ceph:ceph', mon_store_dir])
shutil.rmtree(local_mstore)
# fill up the caps in the keyring file
mon.run(args=['sudo',
'ceph-authtool', keyring_path,
'-n', 'mon.',
'--cap', 'mon', 'allow *'])
mon.run(args=['sudo',
'ceph-authtool', keyring_path,
'-n', 'client.admin',
'--cap', 'mon', 'allow *',
'--cap', 'osd', 'allow *',
'--cap', 'mds', 'allow *',
'--cap', 'mgr', 'allow *'])
mon.run(args=['sudo', '-u', 'ceph',
'CEPH_ARGS=--no-mon-config',
'ceph-monstore-tool', mon_store_dir,
'rebuild', '--', '--keyring',
keyring_path])
示例15: build_ceph_cluster
#.........这里部分代码省略.........
return
log.info("Stopping ceph...")
ctx.cluster.run(
args=[
"sudo",
"stop",
"ceph-all",
run.Raw("||"),
"sudo",
"service",
"ceph",
"stop",
run.Raw("||"),
"sudo",
"systemctl",
"stop",
"ceph.target",
]
)
# Are you really not running anymore?
# try first with the init tooling
# ignoring the status so this becomes informational only
ctx.cluster.run(
args=[
"sudo",
"status",
"ceph-all",
run.Raw("||"),
"sudo",
"service",
"ceph",
"status",
run.Raw("||"),
"sudo",
"systemctl",
"status",
"ceph.target",
],
check_status=False,
)
# and now just check for the processes themselves, as if upstart/sysvinit
# is lying to us. Ignore errors if the grep fails
ctx.cluster.run(
args=["sudo", "ps", "aux", run.Raw("|"), "grep", "-v", "grep", run.Raw("|"), "grep", "ceph"],
check_status=False,
)
if ctx.archive is not None:
# archive mon data, too
log.info("Archiving mon data...")
path = os.path.join(ctx.archive, "data")
os.makedirs(path)
mons = ctx.cluster.only(teuthology.is_type("mon"))
for remote, roles in mons.remotes.iteritems():
for role in roles:
if role.startswith("mon."):
teuthology.pull_directory_tarball(remote, "/var/lib/ceph/mon", path + "/" + role + ".tgz")
log.info("Compressing logs...")
run.wait(
ctx.cluster.run(
args=[
"sudo",
"find",
"/var/log/ceph",
"-name",
"*.log",
"-print0",
run.Raw("|"),
"sudo",
"xargs",
"-0",
"--no-run-if-empty",
"--",
"gzip",
"--",
],
wait=False,
)
)
log.info("Archiving logs...")
path = os.path.join(ctx.archive, "remote")
os.makedirs(path)
for remote in ctx.cluster.remotes.iterkeys():
sub = os.path.join(path, remote.shortname)
os.makedirs(sub)
teuthology.pull_directory(remote, "/var/log/ceph", os.path.join(sub, "log"))
# Prevent these from being undefined if the try block fails
all_nodes = get_all_nodes(ctx, config)
purge_nodes = "./ceph-deploy purge" + " " + all_nodes
purgedata_nodes = "./ceph-deploy purgedata" + " " + all_nodes
log.info("Purging package...")
execute_ceph_deploy(purge_nodes)
log.info("Purging data...")
execute_ceph_deploy(purgedata_nodes)