本文整理汇总了Python中settings.getnodes函数的典型用法代码示例。如果您正苦于以下问题:Python getnodes函数的具体用法?Python getnodes怎么用?Python getnodes使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了getnodes函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
def run(self):
super(KvmRbdFio, self).run()
# We'll always drop caches for rados bench
self.dropcaches()
monitoring.start(self.run_dir)
time.sleep(5)
names = ""
for i in xrange(self.concurrent_procs):
names += "--name=/srv/rbdfio-`hostname -s`-%d/cbt-kvmrbdfio " % i
out_file = '%s/output' % self.run_dir
pre_cmd = 'sudo fio --rw=read -ioengine=sync --numjobs=1 --bs=4M --runtime=1 --size %dM %s > /dev/null' % (self.vol_size * 9/10, names)
fio_cmd = 'sudo fio --rw=%s -ioengine=%s --runtime=%s --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM %s > %s' % (self.mode, self.ioengine, self.time, self.op_size, self.iodepth, self.vol_size * 9/10, names, out_file)
print 'Attempting to populating fio files...'
common.pdsh(settings.getnodes('clients'), pre_cmd).communicate()
print 'Running rbd fio %s test.' % self.mode
common.pdsh(settings.getnodes('clients'), fio_cmd).communicate()
# ps = []
# for i in xrange(self.concurrent_procs):
# out_file = '%s/output.%s' % (self.run_dir, i)
# p = common.pdsh(settings.cluster.get('clients'), 'sudo fio --rw=%s -ioengine=%s --runtime=%s --name=/srv/rbdfio-`hostname -s`-%d/cbt-rbdfio --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM > %s' % (self.mode, self.ioengine, self.time, i, self.op_size, self.iodepth, self.vol_size * 9/10, out_file))
# ps.append(p)
# for p in ps:
# p.wait()
monitoring.stop(self.run_dir)
common.sync_files('%s/*' % self.run_dir, self.out_dir)
示例2: initialize
def initialize(self):
common.cleanup_tests()
if not self.use_existing:
common.setup_cluster()
common.setup_ceph()
# Create the run directory
common.make_remote_dir(self.run_dir)
# Setup the pools
monitoring.start("%s/pool_monitoring" % self.run_dir)
for i in xrange(self.concurrent_procs):
for node in settings.getnodes('clients').split(','):
node = node.rpartition("@")[2]
common.pdsh(settings.getnodes('head'), 'sudo ceph osd pool create rados-bench-%s-%s %d %d' % (node, i, self.pgs_per_pool, self.pgs_per_pool)).communicate()
common.pdsh(settings.getnodes('head'), 'sudo ceph osd pool set rados-bench-%s-%s size 1' % (node, i)).communicate()
# check the health for each pool.
print 'Checking Healh after pool creation.'
common.check_health()
monitoring.stop()
print 'Running scrub monitoring.'
monitoring.start("%s/scrub_monitoring" % self.run_dir)
common.check_scrub()
monitoring.stop()
print 'Pausing for 60s for idle monitoring.'
monitoring.start("%s/idle_monitoring" % self.run_dir)
time.sleep(60)
monitoring.stop()
common.sync_files('%s/*' % self.run_dir, self.out_dir)
return True
示例3: rmpool
def rmpool(self, name, profile_name):
pool_profiles = self.config.get("pool_profiles", {"default": {}})
profile = pool_profiles.get(profile_name, {})
cache_profile = profile.get("cache_profile", None)
if cache_profile:
cache_name = "%s-cache" % name
# flush and remove the overlay and such
common.pdsh(
settings.getnodes("head"),
"sudo ceph -c %s osd tier cache-mode %s forward" % (self.tmp_conf, cache_name),
).communicate()
common.pdsh(
settings.getnodes("head"), "sudo rados -c %s -p %s cache-flush-evict-all" % (self.tmp_conf, cache_name)
).communicate()
common.pdsh(
settings.getnodes("head"), "sudo ceph -c %s osd tier remove-overlay %s" % (self.tmp_conf, name)
).communicate()
common.pdsh(
settings.getnodes("head"), "sudo ceph -c %s osd tier remove %s %s" % (self.tmp_conf, name, cache_name)
).communicate()
# delete the cache pool
self.rmpool(cache_name, cache_profile)
common.pdsh(
settings.getnodes("head"),
"sudo ceph -c %s osd pool delete %s %s --yes-i-really-really-mean-it" % (self.tmp_conf, name, name),
).communicate()
示例4: pre
def pre(self):
pre_time = self.config.get("pre_time", 60)
common.pdsh(settings.getnodes('head'), self.logcmd('Starting Recovery Test Thread, waiting %s seconds.' % pre_time)).communicate()
time.sleep(pre_time)
lcmd = self.logcmd("Setting the ceph osd noup flag")
common.pdsh(settings.getnodes('head'), '%s -c %s osd set noup;%s' % (self.ceph_cmd, self.cluster.tmp_conf, lcmd)).communicate()
self.state = 'markdown'
示例5: initialize
def initialize(self):
pass
# self.cleanup()
# super(KvmRbdFio, self).initialize()
common.setup_cluster()
# common.setup_ceph()
# Setup the pools
# common.pdsh(settings.cluster.get('head'), 'sudo ceph osd pool create rbdfio %d %d' % (self.pgs, self.pgs)).communicate()
# common.pdsh(settings.cluster.get('head'), 'sudo ceph osd pool set rbdfio size 1').communicate()
# print 'Checking Healh after pool creation.'
# common.check_health()
# common.pdsh(settings.cluster.get('clients'), 'sudo modprobe rbd').communicate()
# for i in xrange(self.concurrent_procs):
names = ""
for i in xrange(self.concurrent_procs):
letter = string.ascii_lowercase[i+1]
# common.pdsh(settings.cluster.get('clients'), 'sudo rbd create rbdfio/rbdfio-`hostname -s`-%d --size %d' % (i, self.vol_size)).communicate()
# common.pdsh(settings.cluster.get('clients'), 'sudo rbd map rbdfio-`hostname -s`-%d --pool rbdfio --id admin' % i).communicate()
# common.pdsh(settings.cluster.get('clients'), 'sudo echo "%s %s rbdfio rbdfio-`hostname -s`-%d" | sudo tee /sys/bus/rbd/add && sudo /sbin/udevadm settle' % (self.rbdadd_mons, self.rbdadd_options, i)).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mkfs.xfs /dev/vd%s' % letter).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mkdir /srv/rbdfio-`hostname -s`-%d' % i).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mount -t xfs -o noatime,inode64 /dev/vd%s /srv/rbdfio-`hostname -s`-%d' %(letter, i)).communicate()
# Create the run directory
common.make_remote_dir(self.run_dir)
示例6: initialize
def initialize(self):
super(RawFio, self).initialize()
common.pdsh(settings.getnodes('clients'),
'sudo rm -rf %s' % self.run_dir,
continue_if_error=False).communicate()
common.make_remote_dir(self.run_dir)
clnts = settings.getnodes('clients')
logger.info('creating mountpoints...')
logger.info('Attempting to initialize fio files...')
initializer_list = []
for i in range(self.concurrent_procs):
b = self.block_devices[i % len(self.block_devices)]
fiopath = b
pre_cmd = 'sudo %s --rw=write -ioengine=%s --bs=%s ' % (self.fio_cmd, self.ioengine, self.op_size)
pre_cmd = '%s --size %dM --name=%s --output-format=%s> /dev/null' % (
pre_cmd, self.vol_size, fiopath, self.fio_out_format)
initializer_list.append(common.pdsh(clnts, pre_cmd,
continue_if_error=False))
for p in initializer_list:
p.communicate()
# Create the run directory
common.pdsh(clnts, 'rm -rf %s' % self.run_dir,
continue_if_error=False).communicate()
common.make_remote_dir(self.run_dir)
示例7: mkimages
def mkimages(self):
monitoring.start("%s/pool_monitoring" % self.run_dir)
self.cluster.rmpool(self.poolname, self.pool_profile)
self.cluster.mkpool(self.poolname, self.pool_profile)
for node in settings.getnodes('clients').split(','):
node = node.rpartition("@")[2]
common.pdsh(settings.getnodes('head'), '/usr/bin/rbd create cbt-librbdfio-%s --size %s --pool %s --order %s' % (node, self.vol_size, self.poolname, self.vol_order)).communicate()
monitoring.stop()
示例8: markdown
def markdown(self):
for osdnum in self.config.get('osds'):
lcmd = self.logcmd("Marking OSD %s down." % osdnum)
common.pdsh(settings.getnodes('head'), '%s -c %s osd down %s;%s' % (self.ceph_cmd, self.cluster.tmp_conf, osdnum, lcmd)).communicate()
lcmd = self.logcmd("Marking OSD %s out." % osdnum)
common.pdsh(settings.getnodes('head'), '%s -c %s osd out %s;%s' % (self.ceph_cmd, self.cluster.tmp_conf, osdnum, lcmd)).communicate()
common.pdsh(settings.getnodes('head'), self.logcmd('Waiting for the cluster to break and heal')).communicate()
self.state = 'osdout'
示例9: mkimages
def mkimages(self):
monitoring.start("%s/pool_monitoring" % self.run_dir)
self.cluster.rmpool(self.poolname, self.pool_profile)
self.cluster.mkpool(self.poolname, self.pool_profile)
common.pdsh(settings.getnodes('clients'), '/usr/bin/rbd create cbt-kernelrbdfio-`hostname -s` --size %s --pool %s' % (self.vol_size, self.poolname)).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo rbd map cbt-kernelrbdfio-`hostname -s` --pool %s --id admin' % self.poolname).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mkfs.xfs /dev/rbd/cbt-kernelrbdfio/cbt-kernelrbdfio-`hostname -s`').communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mkdir -p -m0755 -- %s/cbt-kernelrbdfio-`hostname -s`' % self.cluster.mnt_dir).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mount -t xfs -o noatime,inode64 /dev/rbd/cbt-kernelrbdfio/cbt-kernelrbdfio-`hostname -s` %s/cbt-kernelrbdfio-`hostname -s`' % self.cluster.mnt_dir).communicate()
monitoring.stop()
示例10: stop
def stop(directory=None):
nodes = settings.getnodes('clients', 'osds', 'mons', 'rgws')
common.pdsh(nodes, 'pkill -SIGINT -f collectl').communicate()
common.pdsh(nodes, 'sudo pkill -SIGINT -f perf_3.6').communicate()
common.pdsh(settings.getnodes('osds'), 'sudo pkill -SIGINT -f blktrace').communicate()
if directory:
sc = settings.cluster
common.pdsh(nodes, 'cd %s/perf;sudo chown %s.%s perf.data' % (directory, sc.get('user'), sc.get('user')))
make_movies(directory)
示例11: _run
def _run(self, mode, run_dir, out_dir):
# We'll always drop caches for rados bench
self.dropcaches()
if self.concurrent_ops:
concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops
#determine rados version
rados_version_str, err = common.pdsh(settings.getnodes('head'), '/usr/bin/rados -v').communicate()
m = re.findall("version (\d+)", rados_version_str)
rados_version = int(m[0])
if mode in ['write'] or rados_version < 9:
op_size_str = '-b %s' % self.op_size
else:
op_size_str = ''
common.make_remote_dir(run_dir)
# dump the cluster config
self.cluster.dump_config(run_dir)
# Run the backfill testing thread if requested
if 'recovery_test' in self.cluster.config:
recovery_callback = self.recovery_callback
self.cluster.create_recovery_test(run_dir, recovery_callback)
# Run rados bench
monitoring.start(run_dir)
logger.info('Running radosbench %s test.' % mode)
ps = []
for i in xrange(self.concurrent_procs):
out_file = '%s/output.%s' % (run_dir, i)
objecter_log = '%s/objecter.%s.log' % (run_dir, i)
# default behavior is to use a single storage pool
pool_name = self.pool
run_name = '--run-name %s`hostname -s`-%s'%(self.object_set_id, i)
if self.pool_per_proc: # support previous behavior of 1 storage pool per rados process
pool_name = 'rados-bench-`hostname -s`-%s'%i
run_name = ''
rados_bench_cmd = '%s -c %s -p %s bench %s %s %s %s %s --no-cleanup 2> %s > %s' % \
(self.cmd_path_full, self.tmp_conf, pool_name, op_size_str, self.time, mode, concurrent_ops_str, run_name, objecter_log, out_file)
p = common.pdsh(settings.getnodes('clients'), rados_bench_cmd)
ps.append(p)
for p in ps:
p.wait()
monitoring.stop(run_dir)
# If we were doing recovery, wait until it's done.
if 'recovery_test' in self.cluster.config:
self.cluster.wait_recovery_done()
# Finally, get the historic ops
self.cluster.dump_historic_ops(run_dir)
common.sync_files('%s/*' % run_dir, out_dir)
示例12: osdin
def osdin(self):
# Wait until the cluster is healthy.
ret = self.cluster.check_health(self.health_checklist, "%s/recovery.log" % self.config.get('run_dir'))
if self.inhealthtries < self.maxhealthtries and ret == 0:
self.inhealthtries = self.inhealthtries + 1
return # Cluster hasn't become unhealthy yet.
if ret == 0:
common.pdsh(settings.getnodes('head'), self.logcmd('Cluster never went unhealthy.')).communicate()
else:
common.pdsh(settings.getnodes('head'), self.logcmd('Cluster appears to have healed.')).communicate()
self.state = "post"
示例13: mkimages
def mkimages(self):
monitoring.start("%s/pool_monitoring" % self.run_dir)
self.cluster.rmpool(self.poolname, self.pool_profile)
self.cluster.mkpool(self.poolname, self.pool_profile)
for node in settings.getnodes("clients").split(","):
for volnum in xrange(0, self.volumes_per_client):
node = node.rpartition("@")[2]
common.pdsh(
settings.getnodes("head"),
"/usr/bin/rbd create cbt-librbdfio-%s-%d --size %s --pool %s --order %s"
% (node, volnum, self.vol_size, self.poolname, self.vol_order),
).communicate()
monitoring.stop()
示例14: pre
def pre(self):
pre_time = self.config.get("pre_time", 60)
common.pdsh(settings.getnodes('head'), self.logcmd('Starting Recovery Test Thread, waiting %s seconds.' % pre_time)).communicate()
time.sleep(pre_time)
lcmd = self.logcmd("Setting the ceph osd noup flag")
common.pdsh(settings.getnodes('head'), 'ceph -c %s ceph osd set noup;%s' % (self.cluster.tmp_conf, lcmd)).communicate()
for osdnum in self.config.get('osds'):
lcmd = self.logcmd("Marking OSD %s down." % osdnum)
common.pdsh(settings.getnodes('head'), 'ceph -c %s osd down %s;%s' % (self.cluster.tmp_conf, osdnum, lcmd)).communicate()
lcmd = self.logcmd("Marking OSD %s out." % osdnum)
common.pdsh(settings.getnodes('head'), 'ceph -c %s osd out %s;%s' % (self.cluster.tmp_conf, osdnum, lcmd)).communicate()
common.pdsh(settings.getnodes('head'), self.logcmd('Waiting for the cluster to break and heal')).communicate()
self.state = 'osdout'
示例15: initialize
def initialize(self):
self.cleanup()
super(RbdFio, self).initialize()
common.setup_cluster()
common.setup_ceph()
common.dump_config(self.run_dir)
# Setup the pools
common.pdsh(settings.getnodes('head'), 'sudo ceph osd pool create rbdfio %d %d' % (self.pgs, self.pgs)).communicate()
common.pdsh(settings.getnodes('head'), 'sudo ceph osd pool set rbdfio size 1').communicate()
print 'Checking Healh after pool creation.'
common.check_health()
common.pdsh(settings.getnodes('clients'), 'sudo modprobe rbd').communicate()
for i in xrange(self.concurrent_procs):
common.pdsh(settings.getnodes('clients'), 'sudo rbd create rbdfio/rbdfio-`hostname -s`-%d --size %d' % (i, self.vol_size)).communicate()
# common.pdsh(settings.cluster.get('clients'), 'sudo rbd map rbdfio-`hostname -s`-%d --pool rbdfio --id admin' % i).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo echo "%s %s rbdfio rbdfio-`hostname -s`-%d" | sudo tee /sys/bus/rbd/add && sudo /sbin/udevadm settle' % (self.rbdadd_mons, self.rbdadd_options, i)).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mkfs.xfs /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d' % i).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mkdir /srv/rbdfio-`hostname -s`-%d' % i).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mount -t xfs -o noatime,inode64 /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d /srv/rbdfio-`hostname -s`-%d' %(i, i)).communicate()
common.check_scrub()
# Create the run directory
common.make_remote_dir(self.run_dir)