本文整理汇总了Python中monitoring.start函数的典型用法代码示例。如果您正苦于以下问题:Python start函数的具体用法?Python start怎么用?Python start使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了start函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
def run(self):
super(CephTestRados, self).run()
# Remake the pool
self.mkpool()
self.dropcaches()
self.cluster.dump_config(self.run_dir)
monitoring.start(self.run_dir)
time.sleep(5)
# Run the backfill testing thread if requested
if 'recovery_test' in self.cluster.config:
recovery_callback = self.recovery_callback
self.cluster.create_recovery_test(self.run_dir, recovery_callback)
logger.info('Running ceph_test_rados.')
ps = []
for i in xrange(1):
p = common.pdsh(settings.getnodes('clients'), self.mkcmd())
ps.append(p)
for p in ps:
p.wait()
# If we were doing recovery, wait until it's done.
if 'recovery_test' in self.cluster.config:
self.cluster.wait_recovery_done()
monitoring.stop(self.run_dir)
# Finally, get the historic ops
self.cluster.dump_historic_ops(self.run_dir)
common.sync_files('%s/*' % self.run_dir, self.out_dir)
示例2: _run
def _run(self, mode, run_dir, out_dir):
# We'll always drop caches for rados bench
self.dropcaches()
if self.concurrent_ops:
concurrent_ops_str = "--concurrent-ios %s" % self.concurrent_ops
op_size_str = "-b %s" % self.op_size
common.make_remote_dir(run_dir)
monitoring.start(run_dir)
# Run rados bench
print "Running radosbench read test."
ps = []
for i in xrange(self.concurrent_procs):
out_file = "%s/output.%s" % (run_dir, i)
objecter_log = "%s/objecter.%s.log" % (run_dir, i)
p = common.pdsh(
settings.cluster.get("clients"),
"/usr/bin/rados -p rados-bench-%s %s bench %s %s %s --no-cleanup 2> %s > %s"
% (i, op_size_str, self.time, mode, concurrent_ops_str, objecter_log, out_file),
)
ps.append(p)
for p in ps:
p.wait()
monitoring.stop(run_dir)
common.sync_files("%s/*" % run_dir, out_dir)
示例3: _run
def _run(self, mode, run_dir, out_dir):
# We'll always drop caches for rados bench
self.dropcaches()
if self.concurrent_ops:
concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops
op_size_str = '-b %s' % self.op_size
common.make_remote_dir(run_dir)
# dump the cluster config
common.dump_config(run_dir)
monitoring.start(run_dir)
# Run rados bench
print 'Running radosbench read test.'
ps = []
for i in xrange(self.concurrent_procs):
out_file = '%s/output.%s' % (run_dir, i)
objecter_log = '%s/objecter.%s.log' % (run_dir, i)
p = common.pdsh(settings.getnodes('clients'), '/usr/bin/rados -p rados-bench-`hostname -s`-%s %s bench %s %s %s --no-cleanup 2> %s > %s' % (i, op_size_str, self.time, mode, concurrent_ops_str, objecter_log, out_file))
ps.append(p)
for p in ps:
p.wait()
monitoring.stop(run_dir)
# Get the historic ops
common.dump_historic_ops(run_dir)
common.sync_files('%s/*' % run_dir, out_dir)
示例4: initialize
def initialize(self):
super(Cosbench, self).initialize()
logger.debug('Running cosbench and radosgw check.')
self.prerun_check()
logger.debug('Running scrub monitoring.')
monitoring.start("%s/scrub_monitoring" % self.run_dir)
self.cluster.check_scrub()
monitoring.stop()
logger.debug('Pausing for 60s for idle monitoring.')
monitoring.start("%s/idle_monitoring" % self.run_dir)
time.sleep(60)
monitoring.stop()
common.sync_files('%s' % self.run_dir, self.out_dir)
# Create the run directory
common.make_remote_dir(self.run_dir)
conf = self.config
if not self.config["template"]:
self.config["template"] = "default"
self.config["workload"] = self.choose_template("default", conf)
self.prepare_xml(self.config["workload"])
return True
示例5: run
def run(self):
super(KvmRbdFio, self).run()
# We'll always drop caches for rados bench
self.dropcaches()
monitoring.start(self.run_dir)
time.sleep(5)
names = ""
for i in xrange(self.concurrent_procs):
names += "--name=/srv/rbdfio-`hostname -s`-%d/cbt-kvmrbdfio " % i
out_file = '%s/output' % self.run_dir
pre_cmd = 'sudo fio --rw=read -ioengine=sync --numjobs=1 --bs=4M --runtime=1 --size %dM %s > /dev/null' % (self.vol_size * 9/10, names)
fio_cmd = 'sudo fio --rw=%s -ioengine=%s --runtime=%s --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM %s > %s' % (self.mode, self.ioengine, self.time, self.op_size, self.iodepth, self.vol_size * 9/10, names, out_file)
print 'Attempting to populating fio files...'
common.pdsh(settings.cluster.get('clients'), pre_cmd).communicate()
print 'Running rbd fio %s test.' % self.mode
common.pdsh(settings.cluster.get('clients'), fio_cmd).communicate()
# ps = []
# for i in xrange(self.concurrent_procs):
# out_file = '%s/output.%s' % (self.run_dir, i)
# p = common.pdsh(settings.cluster.get('clients'), 'sudo fio --rw=%s -ioengine=%s --runtime=%s --name=/srv/rbdfio-`hostname -s`-%d/cbt-rbdfio --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM > %s' % (self.mode, self.ioengine, self.time, i, self.op_size, self.iodepth, self.vol_size * 9/10, out_file))
# ps.append(p)
# for p in ps:
# p.wait()
monitoring.stop(self.run_dir)
common.sync_files('%s/*' % self.run_dir, self.out_dir)
示例6: run
def run(self):
super(LibrbdFio, self).run()
# We'll always drop caches for rados bench
self.dropcaches()
# dump the cluster config
self.cluster.dump_config(self.run_dir)
monitoring.start(self.run_dir)
time.sleep(5)
# Run the backfill testing thread if requested
if "recovery_test" in self.cluster.config:
recovery_callback = self.recovery_callback
self.cluster.create_recovery_test(self.run_dir, recovery_callback)
print "Running rbd fio %s test." % self.mode
ps = []
for i in xrange(self.volumes_per_client):
fio_cmd = self.mkfiocmd(i)
p = common.pdsh(settings.getnodes("clients"), fio_cmd)
ps.append(p)
for p in ps:
p.wait()
# If we were doing recovery, wait until it's done.
if "recovery_test" in self.cluster.config:
self.cluster.wait_recovery_done()
monitoring.stop(self.run_dir)
# Finally, get the historic ops
self.cluster.dump_historic_ops(self.run_dir)
common.sync_files("%s/*" % self.run_dir, self.out_dir)
示例7: initialize
def initialize(self):
super(LibrbdFio, self).initialize()
print 'Running scrub monitoring.'
monitoring.start("%s/scrub_monitoring" % self.run_dir)
self.cluster.check_scrub()
monitoring.stop()
print 'Pausing for 60s for idle monitoring.'
monitoring.start("%s/idle_monitoring" % self.run_dir)
time.sleep(60)
monitoring.stop()
common.sync_files('%s/*' % self.run_dir, self.out_dir)
self.mkimages()
# Create the run directory
common.make_remote_dir(self.run_dir)
# populate the fio files
print 'Attempting to populating fio files...'
pre_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=cbt-librbdfio-`hostname -s` --invalidate=0 --rw=write --numjobs=%s --bs=4M --size %dM %s > /dev/null' % (self.cmd_path, self.poolname, self.numjobs, self.vol_size, self.names)
common.pdsh(settings.getnodes('clients'), pre_cmd).communicate()
return True
示例8: initialize
def initialize(self):
super(LibrbdFio, self).initialize()
# Clean and Create the run directory
common.clean_remote_dir(self.run_dir)
common.make_remote_dir(self.run_dir)
logger.info('Pausing for 60s for idle monitoring.')
monitoring.start("%s/idle_monitoring" % self.run_dir)
time.sleep(60)
monitoring.stop()
common.sync_files('%s/*' % self.run_dir, self.out_dir)
self.mkimages()
# populate the fio files
ps = []
logger.info('Attempting to populating fio files...')
if (self.use_existing_volumes == False):
for volnum in xrange(self.volumes_per_client):
rbd_name = 'cbt-librbdfio-`%s`-%d' % (common.get_fqdn_cmd(), volnum)
pre_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=%s --invalidate=0 --rw=write --numjobs=%s --bs=4M --size %dM %s --output-format=%s > /dev/null' % (self.cmd_path, self.pool_name, rbd_name, self.numjobs, self.vol_size, self.names, self.fio_out_format)
p = common.pdsh(settings.getnodes('clients'), pre_cmd)
ps.append(p)
for p in ps:
p.wait()
return True
示例9: initialize
def initialize(self):
super(RbdFio, self).initialize()
logger.info('Running scrub monitoring.')
monitoring.start("%s/scrub_monitoring" % self.run_dir)
self.cluster.check_scrub()
monitoring.stop()
logger.info('Pausing for 60s for idle monitoring.')
monitoring.start("%s/idle_monitoring" % self.run_dir)
time.sleep(60)
monitoring.stop()
common.sync_files('%s/*' % self.run_dir, self.out_dir)
self.mkimages()
# Create the run directory
common.make_remote_dir(self.run_dir)
# populate the fio files
logger.info('Attempting to populating fio files...')
pre_cmd = 'sudo %s --ioengine=%s --rw=write --numjobs=%s --bs=4M --size %dM %s > /dev/null' % (self.cmd_path, self.ioengine, self.numjobs, self.vol_size*0.9, self.names)
common.pdsh(settings.getnodes('clients'), pre_cmd).communicate()
return True
示例10: run
def run(self):
# First create a credential file for each gateway
self.mkcredfiles()
# We'll always drop caches for rados bench
self.dropcaches()
# dump the cluster config
self.cluster.dump_config(self.run_dir)
# Run the backfill testing thread if requested
if 'recovery_test' in self.cluster.config:
recovery_callback = self.recovery_callback
self.cluster.create_recovery_test(self.run_dir, recovery_callback)
# Run getput
monitoring.start(self.run_dir)
logger.info('Running getput %s test.' % self.test)
ps = []
for i in xrange(0, len(self.auth_urls)):
cmd = self.mkgetputcmd("%s/gw%02d.cred" % (self.run_dir, i), i)
p = common.pdsh(settings.getnodes('clients'), cmd)
ps.append(p)
for p in ps:
p.wait()
monitoring.stop(self.run_dir)
# If we were doing recovery, wait until it's done.
if 'recovery_test' in self.cluster.config:
self.cluster.wait_recovery_done()
# Finally, get the historic ops
self.cluster.dump_historic_ops(self.run_dir)
common.sync_files('%s/*' % self.run_dir, self.out_dir)
示例11: run
def run(self):
super(RbdFio, self).run()
# Set client readahead
self.set_client_param('read_ahead_kb', self.client_ra)
# We'll always drop caches for rados bench
self.dropcaches()
common.make_remote_dir(self.run_dir)
monitoring.start(self.run_dir)
# Run rados bench
print 'Running rbd fio %s test.' % self.mode
names = ""
for i in xrange(self.concurrent_procs):
names += "--name=%s/mnt/rbdfio-`hostname -s`-%d/cbt-rbdfio " % (self.tmp_dir, i)
out_file = '%s/output' % self.run_dir
fio_cmd = 'sudo fio --rw=%s -ioengine=%s --runtime=%s --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM %s > %s' % (self.mode, self.ioengine, self.time, self.op_size, self.iodepth, self.vol_size * 9/10, names, out_file)
common.pdsh(settings.getnodes('clients'), fio_cmd).communicate()
# ps = []
# for i in xrange(self.concurrent_procs):
# out_file = '%s/output.%s' % (self.run_dir, i)
# p = common.pdsh(settings.cluster.get('clients'), 'sudo fio --rw=%s -ioengine=%s --runtime=%s --name=/srv/rbdfio-`hostname -s`-%d/cbt-rbdfio --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM > %s' % (self.mode, self.ioengine, self.time, i, self.op_size, self.iodepth, self.vol_size * 9/10, out_file))
# ps.append(p)
# for p in ps:
# p.wait()
monitoring.stop(self.run_dir)
common.sync_files('%s/*' % self.run_dir, self.out_dir)
示例12: initialize
def initialize(self):
super(RbdFio, self).initialize()
self.cleanup()
if not self.use_existing:
self.cluster.initialize()
self.cluster.dump_config(self.run_dir)
# Setup the pools
monitoring.start("%s/pool_monitoring" % self.run_dir)
common.pdsh(settings.getnodes('head'), 'sudo ceph -c %s osd pool create rbdfio %d %d' % (self.tmp_conf, self.pgs, self.pgs)).communicate()
common.pdsh(settings.getnodes('head'), 'sudo ceph -c %s osd pool set rbdfio size 1' % self.tmp_conf).communicate()
print 'Checking Healh after pool creation.'
self.cluster.check_health()
monitoring.stop()
# Mount the filesystem
common.pdsh(settings.getnodes('clients'), 'sudo modprobe rbd').communicate()
for i in xrange(self.concurrent_procs):
common.pdsh(settings.getnodes('clients'), 'sudo rbd -c %s create rbdfio/rbdfio-`hostname -s`-%d --size %d' % (self.tmp_conf, i, self.vol_size)).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo echo "%s %s rbdfio rbdfio-`hostname -s`-%d" | sudo tee /sys/bus/rbd/add && sudo /sbin/udevadm settle' % (self.rbdadd_mons, self.rbdadd_options, i)).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mkfs.xfs /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d' % i).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mkdir -p -m0755 -- %s/mnt/rbdfio-`hostname -s`-%d' % (self.tmp_dir, i)).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mount -t xfs -o noatime,inode64 /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d %s/mnt/rbdfio-`hostname -s`-%d' % (i, self.tmp_dir, i)).communicate()
print 'Running scrub monitoring'
monitoring.start("%s/scrub_monitoring" % self.run_dir)
self.cluster.check_scrub()
monitoring.stop()
# Create the run directory
common.make_remote_dir(self.run_dir)
示例13: initialize
def initialize(self):
super(LibrbdFio, self).initialize()
print "Running scrub monitoring."
monitoring.start("%s/scrub_monitoring" % self.run_dir)
self.cluster.check_scrub()
monitoring.stop()
print "Pausing for 60s for idle monitoring."
monitoring.start("%s/idle_monitoring" % self.run_dir)
time.sleep(60)
monitoring.stop()
common.sync_files("%s/*" % self.run_dir, self.out_dir)
self.mkimages()
# Create the run directory
common.make_remote_dir(self.run_dir)
# populate the fio files
ps = []
print "Attempting to populating fio files..."
for i in xrange(self.volumes_per_client):
pre_cmd = (
"sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=cbt-librbdfio-`hostname -s`-%d --invalidate=0 --rw=write --numjobs=%s --bs=4M --size %dM %s > /dev/null"
% (self.cmd_path, self.poolname, i, self.numjobs, self.vol_size, self.names)
)
p = common.pdsh(settings.getnodes("clients"), pre_cmd)
ps.append(p)
for p in ps:
p.wait()
return True
示例14: mkpools
def mkpools(self):
monitoring.start("%s/pool_monitoring" % self.run_dir)
for i in xrange(self.concurrent_procs):
for node in settings.getnodes('clients').split(','):
node = node.rpartition("@")[2]
self.cluster.rmpool('rados-bench-%s-%s' % (node, i), self.pool_profile)
self.cluster.mkpool('rados-bench-%s-%s' % (node, i), self.pool_profile)
monitoring.stop()
示例15: mkimages
def mkimages(self):
monitoring.start("%s/pool_monitoring" % self.run_dir)
self.cluster.rmpool(self.poolname, self.pool_profile)
self.cluster.mkpool(self.poolname, self.pool_profile)
for node in settings.getnodes('clients').split(','):
node = node.rpartition("@")[2]
common.pdsh(settings.getnodes('head'), '/usr/bin/rbd create cbt-librbdfio-%s --size %s --pool %s --order %s' % (node, self.vol_size, self.poolname, self.vol_order)).communicate()
monitoring.stop()