本文整理汇总了Python中teuthology.orchestra.run.wait函数的典型用法代码示例。如果您正苦于以下问题:Python wait函数的具体用法?Python wait怎么用?Python wait使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了wait函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: umount_wait
def umount_wait(self, force=False, require_clean=False):
"""
:param force: Complete cleanly even if the MDS is offline
"""
if force:
assert not require_clean # mutually exclusive
# When we expect to be forcing, kill the ceph-fuse process directly.
# This should avoid hitting the more aggressive fallback killing
# in umount() which can affect other mounts too.
self.fuse_daemon.stdin.close()
# However, we will still hit the aggressive wait if there is an ongoing
# mount -o remount (especially if the remount is stuck because MDSs
# are unavailable)
self.umount()
try:
if self.fuse_daemon:
# Permit a timeout, so that we do not block forever
run.wait([self.fuse_daemon], 900)
except MaxWhileTries:
log.error("process failed to terminate after unmount. This probably"
"indicates a bug within ceph-fuse.")
raise
except CommandFailedError:
if require_clean:
raise
self.cleanup()
示例2: umount
def umount(self, force=False):
log.debug('Unmounting client client.{id}...'.format(id=self.client_id))
cmd=['sudo', 'umount', self.mountpoint]
if force:
cmd.append('-f')
try:
self.client_remote.run(args=cmd)
except Exception as e:
self.client_remote.run(args=[
'sudo',
run.Raw('PATH=/usr/sbin:$PATH'),
'lsof',
run.Raw(';'),
'ps', 'auxf',
])
raise e
rproc = self.client_remote.run(
args=[
'rmdir',
'--',
self.mountpoint,
],
wait=False
)
run.wait([rproc], UMOUNT_TIMEOUT)
self.mounted = False
示例3: sudo
def sudo(ctx, config):
"""
Enable use of sudo
"""
log.info('Configuring sudo...')
sudoers_file = '/etc/sudoers'
backup_ext = '.orig.teuthology'
tty_expr = r's/^\([^#]*\) \(requiretty\)/\1 !\2/g'
pw_expr = r's/^\([^#]*\) !\(visiblepw\)/\1 \2/g'
run.wait(
ctx.cluster.run(
args="sudo sed -i{ext} -e '{tty}' -e '{pw}' {path}".format(
ext=backup_ext, tty=tty_expr, pw=pw_expr,
path=sudoers_file
),
wait=False,
)
)
try:
yield
finally:
log.info('Restoring {0}...'.format(sudoers_file))
ctx.cluster.run(
args="sudo mv -f {path}{ext} {path}".format(
path=sudoers_file, ext=backup_ext
)
)
示例4: base
def base(ctx, config):
"""
Create the test directory that we will be using on the remote system
"""
log.info('Creating test directory...')
testdir = misc.get_testdir(ctx)
run.wait(
ctx.cluster.run(
args=['mkdir', '-p', '-m0755', '--', testdir],
wait=False,
)
)
try:
yield
finally:
log.info('Tidying up after the test...')
# if this fails, one of the earlier cleanups is flawed; don't
# just cram an rm -rf here
run.wait(
ctx.cluster.run(
args=['find', testdir, '-ls',
run.Raw(';'),
'rmdir', '--', testdir],
wait=False,
),
)
示例5: _exec_host
def _exec_host(barrier, barrier_queue, remote, sudo, testdir, ls):
log.info('Running commands on host %s', remote.name)
args = [
'TESTDIR={tdir}'.format(tdir=testdir),
'bash',
'-s'
]
if sudo:
args.insert(0, 'sudo')
r = remote.run( args=args, stdin=tor.PIPE, wait=False)
r.stdin.writelines(['set -e\n'])
r.stdin.flush()
for l in ls:
l.replace('$TESTDIR', testdir)
if l == "barrier":
_do_barrier(barrier, barrier_queue, remote)
continue
r.stdin.writelines([l, '\n'])
r.stdin.flush()
r.stdin.writelines(['\n'])
r.stdin.flush()
r.stdin.close()
tor.wait([r])
示例6: wait_for_exit
def wait_for_exit(self):
"""
clear remote run command value after waiting for exit.
"""
if self.proc:
try:
run.wait([self.proc])
finally:
self.proc = None
示例7: start_apache
def start_apache(ctx, config, on_client = None, except_client = None):
"""
Start apache on remote sites.
"""
log.info('Starting apache...')
testdir = teuthology.get_testdir(ctx)
apaches = {}
clients_to_run = [on_client]
if on_client is None:
clients_to_run = config.keys()
for client in clients_to_run:
cluster_name, daemon_type, client_id = teuthology.split_role(client)
client_with_cluster = cluster_name + '.' + daemon_type + '.' + client_id
if client == except_client:
continue
(remote,) = ctx.cluster.only(client).remotes.keys()
system_type = teuthology.get_system_type(remote)
if system_type == 'deb':
apache_name = 'apache2'
else:
try:
remote.run(
args=[
'stat',
'/usr/sbin/httpd.worker',
],
)
apache_name = '/usr/sbin/httpd.worker'
except CommandFailedError:
apache_name = '/usr/sbin/httpd'
proc = remote.run(
args=[
'adjust-ulimits',
'daemon-helper',
'kill',
apache_name,
'-X',
'-f',
'{tdir}/apache/apache.{client_with_cluster}.conf'.format(tdir=testdir,
client_with_cluster=client_with_cluster),
],
logger=log.getChild(client),
stdin=run.PIPE,
wait=False,
)
apaches[client_with_cluster] = proc
try:
yield
finally:
log.info('Stopping apache...')
for client, proc in apaches.iteritems():
proc.stdin.close()
run.wait(apaches.itervalues())
示例8: thread
def thread():
"""Thread spawned by gevent"""
clients = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
log.info('clients are %s' % clients)
manager = ctx.managers['ceph']
if config.get('ec_pool', False):
profile = config.get('erasure_code_profile', {})
profile_name = profile.get('name', 'teuthologyprofile')
manager.create_erasure_code_profile(profile_name, profile)
else:
profile_name = None
for i in range(int(config.get('runs', '1'))):
log.info("starting run %s out of %s", str(i), config.get('runs', '1'))
tests = {}
existing_pools = config.get('pools', [])
created_pools = []
for role in config.get('clients', clients):
assert isinstance(role, basestring)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
pool = config.get('pool', None)
if not pool and existing_pools:
pool = existing_pools.pop()
else:
pool = manager.create_pool_with_unique_name(
erasure_code_profile_name=profile_name,
erasure_code_use_overwrites=
config.get('erasure_code_use_overwrites', False)
)
created_pools.append(pool)
if config.get('fast_read', False):
manager.raw_cluster_cmd(
'osd', 'pool', 'set', pool, 'fast_read', 'true')
min_size = config.get('min_size', None);
if min_size is not None:
manager.raw_cluster_cmd(
'osd', 'pool', 'set', pool, 'min_size', str(min_size))
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
proc = remote.run(
args=["CEPH_CLIENT_ID={id_}".format(id_=id_)] + args +
["--pool", pool],
logger=log.getChild("rados.{id}".format(id=id_)),
stdin=run.PIPE,
wait=False
)
tests[id_] = proc
run.wait(tests.itervalues())
for pool in created_pools:
manager.wait_snap_trimming_complete(pool);
manager.remove_pool(pool)
示例9: coredump
def coredump(ctx, config):
"""
Stash a coredump of this system if an error occurs.
"""
log.info('Enabling coredump saving...')
archive_dir = misc.get_archive_dir(ctx)
run.wait(
ctx.cluster.run(
args=[
'install', '-d', '-m0755', '--',
'{adir}/coredump'.format(adir=archive_dir),
run.Raw('&&'),
'sudo', 'sysctl', '-w', 'kernel.core_pattern={adir}/coredump/%t.%p.core'.format(adir=archive_dir),
],
wait=False,
)
)
try:
yield
finally:
run.wait(
ctx.cluster.run(
args=[
'sudo', 'sysctl', '-w', 'kernel.core_pattern=core',
run.Raw('&&'),
# don't litter the archive dir if there were no cores dumped
'rmdir',
'--ignore-fail-on-non-empty',
'--',
'{adir}/coredump'.format(adir=archive_dir),
],
wait=False,
)
)
# set status = 'fail' if the dir is still there = coredumps were
# seen
for rem in ctx.cluster.remotes.iterkeys():
r = rem.run(
args=[
'if', 'test', '!', '-e', '{adir}/coredump'.format(adir=archive_dir), run.Raw(';'), 'then',
'echo', 'OK', run.Raw(';'),
'fi',
],
stdout=StringIO(),
)
if r.stdout.getvalue() != 'OK\n':
log.warning('Found coredumps on %s, flagging run as failed', rem)
set_status(ctx.summary, 'fail')
if 'failure_reason' not in ctx.summary:
ctx.summary['failure_reason'] = \
'Found coredumps on {rem}'.format(rem=rem)
示例10: invoke_logrotate
def invoke_logrotate(self):
# 1) install ceph-test.conf in /etc/logrotate.d
# 2) continuously loop over logrotate invocation with ceph-test.conf
while not self.stop_event.is_set():
self.stop_event.wait(timeout=30)
run.wait(
ctx.cluster.run(
args=['sudo', 'logrotate', '/etc/logrotate.d/ceph-test.conf'
],
wait=False,
)
)
示例11: start_apache
def start_apache(ctx, config):
"""
Start apache on remote sites.
"""
log.info('Starting apache...')
testdir = teuthology.get_testdir(ctx)
apaches = {}
for client in config.iterkeys():
(remote,) = ctx.cluster.only(client).remotes.keys()
system_type = teuthology.get_system_type(remote)
if system_type == 'deb':
apache_name = 'apache2'
else:
try:
remote.run(
args=[
'stat',
'/usr/sbin/httpd.worker',
],
)
apache_name = '/usr/sbin/httpd.worker'
except CommandFailedError:
apache_name = '/usr/sbin/httpd'
proc = remote.run(
args=[
'adjust-ulimits',
'daemon-helper',
'kill',
apache_name,
'-X',
'-f',
'{tdir}/apache/apache.{client}.conf'.format(tdir=testdir,
client=client),
],
logger=log.getChild(client),
stdin=run.PIPE,
wait=False,
)
apaches[client] = proc
try:
yield
finally:
log.info('Stopping apache...')
for client, proc in apaches.iteritems():
proc.stdin.close()
run.wait(apaches.itervalues())
示例12: wait
def wait(self, timeout=300):
"""
Wait for daemon to exit
Wait for daemon to stop (but don't trigger the stop). Pass up
any exception. Mark the daemon as not running.
"""
self.log.debug('waiting for process to exit')
try:
run.wait([self.proc], timeout=timeout)
self.log.info('Stopped')
except:
self.log.info('Failed')
raise
finally:
self.proc = None
示例13: _exec_role
def _exec_role(remote, role, sudo, ls):
log.info('Running commands on role %s host %s', role, remote.name)
cid=role.split('.')[1]
args = ['bash', '-s']
if sudo:
args.insert(0, 'sudo')
r = remote.run( args=args, stdin=tor.PIPE, wait=False)
r.stdin.writelines(['set -e\n'])
r.stdin.flush()
r.stdin.writelines(['cd /tmp/cephtest/mnt.{cid}\n'.format(cid=cid)])
r.stdin.flush()
for l in ls:
r.stdin.writelines([l, '\n'])
r.stdin.flush()
r.stdin.writelines(['\n'])
r.stdin.flush()
r.stdin.close()
tor.wait([r])
示例14: umount
def umount(self, force=False):
log.debug('Unmounting client client.{id}...'.format(id=self.client_id))
cmd=['sudo', 'umount', self.mountpoint]
if force:
cmd.append('-f')
self.client_remote.run(args=cmd)
rproc = self.client_remote.run(
args=[
'rmdir',
'--',
self.mountpoint,
],
wait=False
)
run.wait([rproc], UMOUNT_TIMEOUT)
self.mounted = False
示例15: task
def task(ctx, config):
"""
Run chef-solo on all nodes.
Optional parameters:
tasks:
-chef
script_url: # override default location for solo-from-scratch for Chef
chef_repo: # override default Chef repo used by solo-from-scratch
chef_branch: # to choose a different upstream branch for ceph-qa-chef
"""
log.info("Running chef-solo...")
if config is None:
config = {}
assert isinstance(config, dict), "chef - need config"
chef_script = config.get(
"script_url", "http://git.ceph.com/?p=ceph-qa-chef.git;a=blob_plain;f=solo/solo-from-scratch;hb=HEAD"
)
chef_repo = config.get("chef_repo", "")
chef_branch = config.get("chef_branch", "")
run.wait(
ctx.cluster.run(
args=[
"wget",
# '-q',
"-O-",
# 'https://raw.github.com/ceph/ceph-qa-chef/master/solo/solo-from-scratch',
chef_script,
run.Raw("|"),
run.Raw("CHEF_REPO={repo}".format(repo=chef_repo)),
run.Raw("CHEF_BRANCH={branch}".format(branch=chef_branch)),
"sh",
"-x",
],
wait=False,
)
)
log.info("Reconnecting after ceph-qa-chef run")
misc.reconnect(ctx, 10) # Reconnect for ulimit and other ceph-qa-chef changes