本文整理汇总了Python中teuthology.parallel.parallel函数的典型用法代码示例。如果您正苦于以下问题:Python parallel函数的具体用法?Python parallel怎么用?Python parallel使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了parallel函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: remove_sources
def remove_sources(ctx, config):
"""
Removes repo source files from each remote in ctx.
:param ctx: the argparse.Namespace object
:param config: the config dict
"""
remove_sources_pkgs = {
'deb': _remove_sources_list_deb,
'rpm': _remove_sources_list_rpm,
}
with parallel() as p:
project = config.get('project', 'ceph')
log.info("Removing {proj} sources lists".format(
proj=project))
for remote in ctx.cluster.remotes.iterkeys():
remove_fn = remove_sources_pkgs[remote.os.package_type]
p.spawn(remove_fn, remote, project)
with parallel() as p:
project = 'calamari'
log.info("Removing {proj} sources lists".format(
proj=project))
for remote in ctx.cluster.remotes.iterkeys():
remove_fn = remove_sources_pkgs[remote.os.package_type]
p.spawn(remove_fn, remote, project)
示例2: task
def task(ctx, config):
"""
Run an autotest test on the ceph cluster.
Only autotest client tests are supported.
The config is a mapping from role name to list of tests to run on
that client.
For example::
tasks:
- ceph:
- ceph-fuse: [client.0, client.1]
- autotest:
client.0: [dbench]
client.1: [bonnie]
You can also specify a list of tests to run on all clients::
tasks:
- ceph:
- ceph-fuse:
- autotest:
all: [dbench]
"""
assert isinstance(config, dict)
config = teuthology.replace_all_with_clients(ctx.cluster, config)
log.info('Setting up autotest...')
testdir = teuthology.get_testdir(ctx)
with parallel() as p:
for role in config.iterkeys():
(remote,) = ctx.cluster.only(role).remotes.keys()
p.spawn(_download, testdir, remote)
log.info('Making a separate scratch dir for every client...')
for role in config.iterkeys():
assert isinstance(role, basestring)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
scratch = os.path.join(mnt, 'client.{id}'.format(id=id_))
remote.run(
args=[
'sudo',
'install',
'-d',
'-m', '0755',
'--owner={user}'.format(user='ubuntu'), #TODO
'--',
scratch,
],
)
with parallel() as p:
for role, tests in config.iteritems():
(remote,) = ctx.cluster.only(role).remotes.keys()
p.spawn(_run_tests, testdir, remote, role, tests)
示例3: task
def task(ctx, config):
"""
Execute commands on multiple roles in parallel
tasks:
- ceph:
- ceph-fuse: [client.0, client.1]
- pexec:
client.0:
- while true; do echo foo >> bar; done
client.1:
- sleep 1
- tail -f bar
- interactive:
"""
log.info('Executing custom commands...')
assert isinstance(config, dict), "task pexec got invalid config"
sudo = False
if 'sudo' in config:
sudo = config['sudo']
del config['sudo']
if 'all' in config and len(config) == 1:
a = config['all']
roles = teuthology.all_roles(ctx.cluster)
config = dict((id_, a) for id_ in roles)
with parallel() as p:
for role, ls in config.iteritems():
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
p.spawn(_exec_role, remote, role, sudo, ls)
示例4: purge_data
def purge_data(ctx):
"""
Purge /var/lib/ceph
"""
with parallel() as p:
for remote in ctx.cluster.remotes.iterkeys():
p.spawn(_purge_data, remote)
示例5: _spawn_on_all_clients
def _spawn_on_all_clients(ctx, refspec, tests, env, subdir, timeout=None):
"""
Make a scratch directory for each client in the cluster, and then for each
test spawn _run_tests() for each role.
See run_tests() for parameter documentation.
"""
is_client = misc.is_type('client')
client_remotes = {}
created_mountpoint = {}
for remote, roles_for_host in ctx.cluster.remotes.items():
for role in roles_for_host:
if is_client(role):
client_remotes[role] = remote
created_mountpoint[role] = _make_scratch_dir(ctx, role, subdir)
for unit in tests:
with parallel() as p:
for role, remote in client_remotes.items():
p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir,
timeout=timeout)
# cleanup the generated client directories
for role, _ in client_remotes.items():
_delete_dir(ctx, role, created_mountpoint[role])
示例6: _spawn_on_all_clients
def _spawn_on_all_clients(ctx, refspec, tests, env, subdir, timeout=None):
"""
Make a scratch directory for each client in the cluster, and then for each
test spawn _run_tests() for each role.
See run_tests() for parameter documentation.
"""
client_generator = misc.all_roles_of_type(ctx.cluster, 'client')
client_remotes = list()
created_mountpoint = {}
for client in client_generator:
(client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys()
client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
created_mountpoint[client] = _make_scratch_dir(ctx, "client.{id}".format(id=client), subdir)
for unit in tests:
with parallel() as p:
for remote, role in client_remotes:
p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir,
timeout=timeout)
# cleanup the generated client directories
client_generator = misc.all_roles_of_type(ctx.cluster, 'client')
for client in client_generator:
_delete_dir(ctx, 'client.{id}'.format(id=client), created_mountpoint[client])
示例7: vm_setup
def vm_setup(ctx, config):
"""
Look for virtual machines and handle their initialization
"""
all_tasks = [x.keys()[0] for x in ctx.config['tasks']]
need_chef = False
if 'chef' in all_tasks or 'kernel' in all_tasks:
need_chef = True
with parallel() as p:
editinfo = os.path.join(os.path.dirname(__file__),'edit_sudoers.sh')
for rem in ctx.cluster.remotes.iterkeys():
mname = rem.shortname
if misc.is_vm(mname):
r = rem.run(args=['test', '-e', '/ceph-qa-ready',],
stdout=StringIO(),
check_status=False,)
if r.returncode != 0:
p1 = subprocess.Popen(['cat', editinfo], stdout=subprocess.PIPE)
p2 = subprocess.Popen(
[
'ssh',
'-o', 'StrictHostKeyChecking=no',
'-t', '-t',
str(rem),
'sudo',
'sh'
],
stdin=p1.stdout, stdout=subprocess.PIPE
)
_, err = p2.communicate()
if err:
log.info("Edit of /etc/sudoers failed: %s", err)
if need_chef:
p.spawn(_download_and_run_chef, rem)
示例8: task
def task(ctx, config):
"""
Run fsx on an rbd image.
Currently this requires running as client.admin
to create a pool.
Specify which clients to run on as a list::
tasks:
ceph:
rbd_fsx:
clients: [client.0, client.1]
You can optionally change some properties of fsx:
tasks:
ceph:
rbd_fsx:
clients: <list of clients>
seed: <random seed number, or 0 to use the time>
ops: <number of operations to do>
size: <maximum image size in bytes>
valgrind: [--tool=<valgrind tool>]
"""
log.info('starting rbd_fsx...')
with parallel() as p:
for role in config['clients']:
p.spawn(_run_one_client, ctx, config, role)
yield
示例9: remove_sources
def remove_sources(ctx, config):
remove_sources_pkgs = {"deb": _remove_sources_list_deb, "rpm": _remove_sources_list_rpm}
log.info("Removing {proj} sources lists".format(proj=config.get("project", "ceph")))
with parallel() as p:
for remote in ctx.cluster.remotes.iterkeys():
system_type = teuthology.get_system_type(remote)
p.spawn(remove_sources_pkgs[system_type], remote, config.get("project", "ceph"))
示例10: task
def task(ctx, config):
"""
Run a group of tasks in parallel.
example:
- parallel:
- tasktest:
- tasktest:
You can also reference the job from elsewhere:
foo:
tasktest:
tasks:
- parallel:
- foo
- tasktest:
That is, if the entry is not a dict, we will look it up in the top-level
config.
Sequential task and Parallel tasks can be nested.
"""
log.info('starting parallel...')
with parallel.parallel() as p:
for entry in config:
if not isinstance(entry, dict):
entry = ctx.config.get(entry, {})
((taskname, confg),) = entry.iteritems()
p.spawn(_run_spawned, ctx, confg, taskname)
示例11: _spawn_on_all_clients
def _spawn_on_all_clients(ctx, refspec, tests, env, subdir):
"""
Make a scratch directory for each client in the cluster, and then for each
test spawn _run_tests for each role.
:param ctx: Context
:param refspec: branch, sha1, or version tag used to identify this
build
:param tests: specific tests specified.
:param env: evnironment set in yaml file. Could be None.
:param subdir: subdirectory set in yaml file. Could be None
"""
client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
client_remotes = list()
for client in client_generator:
(client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys()
client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
_make_scratch_dir(ctx, "client.{id}".format(id=client), subdir)
for unit in tests:
with parallel() as p:
for remote, role in client_remotes:
p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir)
# cleanup the generated client directories
client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
for client in client_generator:
_delete_dir(ctx, 'client.{id}'.format(id=client))
示例12: run_xfstests
def run_xfstests(ctx, config):
"""
Run xfstests over specified devices.
Warning: both the test and scratch devices specified will be
overwritten. Normally xfstests modifies (but does not destroy)
the test device, but for now the run script used here re-makes
both filesystems.
Note: Only one instance of xfstests can run on a single host at
a time, although this is not enforced.
This task in its current form needs some improvement. For
example, it assumes all roles provided in the config are
clients, and that the config provided is a list of key/value
pairs. For now please use the xfstests() interface, below.
For example::
tasks:
- ceph:
- rbd.run_xfstests:
client.0:
count: 2
test_dev: 'test_dev'
scratch_dev: 'scratch_dev'
fs_type: 'xfs'
tests: '1-9 11-15 17 19-21 26-28 31-34 41 45-48'
"""
with parallel() as p:
for role, properties in config.items():
p.spawn(run_xfstests_one_client, ctx, role, properties)
yield
示例13: nuke
def nuke(ctx, log, should_unlock, sync_clocks=True, reboot_all=True,
noipmi=False):
from teuthology.parallel import parallel
from teuthology.lock import list_locks
total_unnuked = {}
targets = dict(ctx.config['targets'])
if ctx.name:
log.info('Checking targets against current locks')
locks = list_locks(ctx)
#Remove targets who's description doesn't match archive name.
for lock in locks:
for target in targets:
if target == lock['name']:
if ctx.name not in lock['description']:
del ctx.config['targets'][lock['name']]
log.info('Not nuking %s because description doesn\'t match', lock['name'])
with parallel() as p:
for target, hostkey in ctx.config['targets'].iteritems():
p.spawn(
nuke_one,
ctx,
{target: hostkey},
log,
should_unlock,
sync_clocks,
reboot_all,
ctx.config.get('check-locks', True),
noipmi,
)
for unnuked in p:
if unnuked:
total_unnuked.update(unnuked)
if total_unnuked:
log.error('Could not nuke the following targets:\n' + '\n '.join(['targets:', ] + yaml.safe_dump(total_unnuked, default_flow_style=False).splitlines()))
示例14: rh_install
def rh_install(ctx, config):
"""
Installs rh ceph on all hosts in ctx.
:param ctx: the argparse.Namespace object
:param config: the config dict
"""
version = config["rhbuild"]
rh_versions = ["1.3.0", "1.3.1"]
if version in rh_versions:
log.info("%s is a supported version", version)
else:
raise RuntimeError("Unsupported RH Ceph version %s", version)
with parallel() as p:
for remote in ctx.cluster.remotes.iterkeys():
if remote.os.name == "rhel":
log.info("Installing on RHEL node: %s", remote.shortname)
p.spawn(rh_install_pkgs, ctx, remote, version)
else:
log.info("Node %s is not RHEL", remote.shortname)
raise RuntimeError("Test requires RHEL nodes")
try:
yield
finally:
if config.get("skip_uninstall"):
log.info("Skipping uninstall of Ceph")
else:
rh_uninstall(ctx=ctx, config=config)
示例15: vm_setup
def vm_setup(ctx, config):
"""
Look for virtual machines and handle their initialization
"""
all_tasks = [x.keys()[0] for x in ctx.config["tasks"]]
need_ansible = False
if "kernel" in all_tasks and "ansible.cephlab" not in all_tasks:
need_ansible = True
ansible_hosts = set()
with parallel():
editinfo = os.path.join(os.path.dirname(__file__), "edit_sudoers.sh")
for rem in ctx.cluster.remotes.iterkeys():
if misc.is_vm(rem.shortname):
ansible_hosts.add(rem.shortname)
r = rem.run(args=["test", "-e", "/ceph-qa-ready"], stdout=StringIO(), check_status=False)
if r.returncode != 0:
p1 = subprocess.Popen(["cat", editinfo], stdout=subprocess.PIPE)
p2 = subprocess.Popen(
["ssh", "-o", "StrictHostKeyChecking=no", "-t", "-t", str(rem), "sudo", "sh"],
stdin=p1.stdout,
stdout=subprocess.PIPE,
)
_, err = p2.communicate()
if err:
log.error("Edit of /etc/sudoers failed: %s", err)
if need_ansible and ansible_hosts:
log.info("Running ansible on %s", list(ansible_hosts))
ansible_config = dict(hosts=list(ansible_hosts))
with ansible.CephLab(ctx, config=ansible_config):
pass