当前位置: 首页>>代码示例>>Python>>正文


Python Tools.run_cmd方法代码示例

本文整理汇总了Python中Tools.Tools.run_cmd方法的典型用法代码示例。如果您正苦于以下问题:Python Tools.run_cmd方法的具体用法?Python Tools.run_cmd怎么用?Python Tools.run_cmd使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Tools.Tools的用法示例。


在下文中一共展示了Tools.run_cmd方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from Tools import Tools [as 别名]
# 或者: from Tools.Tools import run_cmd [as 别名]
class Connmon:

    def __init__(self, config):
        self.logger = logging.getLogger('browbeat.Connmon')
        self.config = config
        self.tools = Tools(self.config)
        return None

    # Start connmond
    def start_connmon(self, retry=None):
        self.stop_connmon()
        tool = "connmond"
        connmond = self.tools.find_cmd(tool)
        if not connmond:
            self.logger.error("Unable to find {}".format(tool))
        as_sudo = self.config['connmon']['sudo']
        cmd = ""
        if as_sudo:
            cmd += "sudo "
        cmd += "screen -X -S connmond kill"
        self.tools.run_cmd(cmd)
        self.logger.info("Starting connmond")
        cmd = ""
        cmd += "{} --config /etc/connmon.cfg > /tmp/connmond 2>&1 &".format(
            connmond)
        self.tools.run_cmd(cmd)
        if self.check_connmon_results is False:
            if retry is None:
                self.start_connmon(retry=True)
            else:
                return False
        else:
            return True

    def check_connmon_results(self, result_file='/tmp/connmon_results.csv'):
        return os.path.isfile(result_file)

    # Stop connmond
    def stop_connmon(self):
        self.logger.info("Stopping connmond")
        return self.tools.run_cmd("pkill -9 connmond")

    # Create Connmon graphs
    def connmon_graphs(self, result_dir, test_name):
        cmd = "python graphing/connmonplot.py {}/connmon/{}.csv".format(result_dir,
                                                                        test_name)
        return self.tools.run_cmd(cmd)

    # Move connmon results
    def move_connmon_results(self, result_dir, test_name):
        path = "%s/connmon" % result_dir
        if not os.path.exists(path):
            os.mkdir(path)
        return shutil.move("/tmp/connmon_results.csv",
                           "{}/connmon/{}.csv".format(result_dir, test_name))
开发者ID:ekuric,项目名称:browbeat,代码行数:57,代码来源:Connmon.py

示例2: Rally

# 需要导入模块: from Tools import Tools [as 别名]
# 或者: from Tools.Tools import run_cmd [as 别名]
class Rally(WorkloadBase):

    def __init__(self, config, hosts=None):
        self.logger = logging.getLogger('browbeat.Rally')
        self.config = config
        self.tools = Tools(self.config)
        self.connmon = Connmon(self.config)
        self.grafana = Grafana(self.config)
        self.elastic = Elastic(self.config)
        self.error_count = 0
        self.pass_count = 0
        self.test_count = 0
        self.scenario_count = 0

    def run_scenario(self, task_file, scenario_args, result_dir, test_name, benchmark):
        self.logger.debug("--------------------------------")
        self.logger.debug("task_file: {}".format(task_file))
        self.logger.debug("scenario_args: {}".format(scenario_args))
        self.logger.debug("result_dir: {}".format(result_dir))
        self.logger.debug("test_name: {}".format(test_name))
        self.logger.debug("--------------------------------")

        from_ts = int(time.time() * 1000)
        if 'sleep_before' in self.config['rally']:
            time.sleep(self.config['rally']['sleep_before'])
        task_args = str(scenario_args).replace("'", "\"")
        plugins = []
        if "plugins" in self.config['rally']:
            if len(self.config['rally']['plugins']) > 0:
                for plugin in self.config['rally']['plugins']:
                    for name in plugin:
                        plugins.append(plugin[name])
        plugin_string = ""
        if len(plugins) > 0:
            plugin_string = "--plugin-paths {}".format(",".join(plugins))
        cmd = "source {}; ".format(self.config['rally']['venv'])
        cmd += "rally {} task start {} --task-args \'{}\' 2>&1 | tee {}.log".format(
            plugin_string, task_file,task_args, test_name)
        from_time = time.time()
        self.tools.run_cmd(cmd)
        to_time = time.time()
        if 'sleep_after' in self.config['rally']:
            time.sleep(self.config['rally']['sleep_after'])
        to_ts = int(time.time() * 1000)
        self.grafana.create_grafana_urls({'from_ts':from_ts, 'to_ts':to_ts})
        self.grafana.print_dashboard_url(test_name)
        self.grafana.log_snapshot_playbook_cmd(from_ts, to_ts, result_dir, test_name)
        self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name)
        return (from_time, to_time)

    def update_tests(self):
        self.test_count += 1

    def update_pass_tests(self):
        self.pass_count += 1

    def update_fail_tests(self):
        self.error_count += 1

    def update_scenarios(self):
        self.scenario_count += 1

    def get_task_id(self, test_name):
        cmd = "grep \"rally task results\" {}.log | awk '{{print $4}}'".format(
            test_name)
        return self.tools.run_cmd(cmd)

    def _get_details(self):
        self.logger.info(
            "Current number of Rally scenarios executed:{}".format(
                self.scenario_count))
        self.logger.info("Current number of Rally tests executed:{}".format(self.test_count))
        self.logger.info("Current number of Rally tests passed:{}".format(self.pass_count))
        self.logger.info("Current number of Rally test failures:{}".format(self.error_count))

    def gen_scenario_html(self, task_ids, test_name):
        all_task_ids = ' '.join(task_ids)
        cmd = "source {}; ".format(self.config['rally']['venv'])
        cmd += "rally task report --task {} --out {}.html".format(
            all_task_ids, test_name)
        return self.tools.run_cmd(cmd)

    def gen_scenario_json(self, task_id):
        cmd = "source {}; ".format(self.config['rally']['venv'])
        cmd += "rally task results {}".format(task_id)
        return self.tools.run_cmd(cmd)

    def gen_scenario_json_file(self, task_id, test_name):
        cmd = "source {}; ".format(self.config['rally']['venv'])
        cmd += "rally task results {} > {}.json".format(task_id, test_name)
        return self.tools.run_cmd(cmd)

    def rally_metadata(self, result, meta) :
        result['rally_metadata'] = meta
        return result

    def json_result(self,task_id):
        rally_data = {}
        rally_errors = []
        rally_sla = []
#.........这里部分代码省略.........
开发者ID:danielmellado,项目名称:browbeat,代码行数:103,代码来源:Rally.py

示例3: Rally

# 需要导入模块: from Tools import Tools [as 别名]
# 或者: from Tools.Tools import run_cmd [as 别名]
class Rally(WorkloadBase):

    def __init__(self, config, hosts=None):
        self.logger = logging.getLogger('browbeat.Rally')
        self.config = config
        self.tools = Tools(self.config)
        self.connmon = Connmon(self.config)
        self.grafana = Grafana(self.config)
        self.error_count = 0
        self.pass_count = 0
        self.test_count = 0
        self.scenario_count = 0

    def run_scenario(self, task_file, scenario_args, result_dir, test_name, benchmark):
        self.logger.debug("--------------------------------")
        self.logger.debug("task_file: {}".format(task_file))
        self.logger.debug("scenario_args: {}".format(scenario_args))
        self.logger.debug("result_dir: {}".format(result_dir))
        self.logger.debug("test_name: {}".format(test_name))
        self.logger.debug("--------------------------------")

        from_ts = int(time.time() * 1000)
        if 'sleep_before' in self.config['rally']:
            time.sleep(self.config['rally']['sleep_before'])
        task_args = str(scenario_args).replace("'", "\"")
        plugins = []
        if "plugins" in self.config['rally']:
            if len(self.config['rally']['plugins']) > 0:
                for plugin in self.config['rally']['plugins']:
                    for name in plugin:
                        plugins.append(plugin[name])
        plugin_string = ""
        if len(plugins) > 0:
            plugin_string = "--plugin-paths {}".format(",".join(plugins))
        cmd = "source {}; ".format(self.config['rally']['venv'])
        cmd += "rally {} task start {} --task-args \'{}\' 2>&1 | tee {}.log".format(
            plugin_string, task_file,task_args, test_name)
        from_time = time.time()
        self.tools.run_cmd(cmd)
        to_time = time.time()
        if 'sleep_after' in self.config['rally']:
            time.sleep(self.config['rally']['sleep_after'])
        to_ts = int(time.time() * 1000)
        return (from_time, to_time)
        self.grafana.print_dashboard_url(from_ts, to_ts, test_name)
        self.grafana.log_snapshot_playbook_cmd(
            from_ts, to_ts, result_dir, test_name)
        self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name)

    def update_tests(self):
        self.test_count += 1

    def update_pass_tests(self):
        self.pass_count += 1

    def update_fail_tests(self):
        self.error_count += 1

    def update_scenarios(self):
        self.scenario_count += 1

    def get_task_id(self, test_name):
        cmd = "grep \"rally task results\" {}.log | awk '{{print $4}}'".format(
            test_name)
        return self.tools.run_cmd(cmd)

    def _get_details(self):
        self.logger.info(
            "Current number of Rally scenarios executed:{}".format(
                self.scenario_count))
        self.logger.info("Current number of Rally tests executed:{}".format(self.test_count))
        self.logger.info("Current number of Rally tests passed:{}".format(self.pass_count))
        self.logger.info("Current number of Rally test failures:{}".format(self.error_count))

    def gen_scenario_html(self, task_ids, test_name):
        all_task_ids = ' '.join(task_ids)
        cmd = "source {}; ".format(self.config['rally']['venv'])
        cmd += "rally task report --task {} --out {}.html".format(
            all_task_ids, test_name)
        return self.tools.run_cmd(cmd)

    def gen_scenario_json(self, task_id, test_name):
        cmd = "source {}; ".format(self.config['rally']['venv'])
        cmd += "rally task results {} > {}.json".format(task_id, test_name)
        return self.tools.run_cmd(cmd)

    def start_workloads(self):
        """Iterates through all rally scenarios in browbeat yaml config file"""
        results = OrderedDict()
        self.logger.info("Starting Rally workloads")
        time_stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
        self.logger.debug("Time Stamp (Prefix): {}".format(time_stamp))
        benchmarks = self.config.get('rally')['benchmarks']
        if len(benchmarks) > 0:
            for benchmark in benchmarks:
                if benchmark['enabled']:
                    self.logger.info("Benchmark: {}".format(benchmark['name']))
                    scenarios = benchmark['scenarios']
                    def_concurrencies = benchmark['concurrency']
                    def_times = benchmark['times']
#.........这里部分代码省略.........
开发者ID:bengland2,项目名称:browbeat,代码行数:103,代码来源:Rally.py

示例4: Shaker

# 需要导入模块: from Tools import Tools [as 别名]
# 或者: from Tools.Tools import run_cmd [as 别名]
class Shaker(WorkloadBase):

    def __init__(self, config):
        self.logger = logging.getLogger('browbeat.Shaker')
        self.config = config
        self.tools = Tools(self.config)
        self.grafana = Grafana(self.config)
        self.error_count = 0
        self.pass_count = 0
        self.test_count = 0
        self.scenario_count = 0

    def shaker_checks(self):
        cmd = "source /home/stack/overcloudrc; glance image-list | grep -w shaker-image"
        if self.tools.run_cmd(cmd) == "":
            self.logger.error("Shaker Image is not built, try again")
            exit(1)
        else:
            self.logger.info("Shaker image is built, continuing")

    def get_stats(self):
        self.logger.info("Current number of Shaker tests executed: {}".format(self.test_count))
        self.logger.info("Current number of Shaker tests passed: {}".format(self.pass_count))
        self.logger.info("Current number of Shaker tests failed: {}".format(self.error_count))

    def final_stats(self, total):
        self.logger.info("Total Shaker scenarios enabled by user: {}".format(total))
        self.logger.info("Total number of Shaker tests executed: {}".format(self.test_count))
        self.logger.info("Total number of Shaker tests passed: {}".format(self.pass_count))
        self.logger.info("Total number of Shaker tests failed: {}".format(self.error_count))

    def update_tests(self):
        self.test_count += 1

    def update_pass_tests(self):
        self.pass_count += 1

    def update_fail_tests(self):
        self.error_count += 1

    def update_scenarios(self):
        self.scenario_count += 1

    def set_scenario(self, scenario):
        fname = scenario['file']
        stream = open(fname, 'r')
        data = yaml.load(stream)
        stream.close()
        default_placement = "double_room"
        default_density = 1
        default_compute = 1
        default_progression = "linear"
        default_time = 60
        if "placement" in scenario:
            data['deployment']['accommodation'][1] = scenario['placement']
        else:
            data['deployment']['accommodation'][1] = default_placement
        if "density" in scenario:
            data['deployment']['accommodation'][
                2]['density'] = scenario['density']
        else:
            data['deployment']['accommodation'][2]['density'] = default_density
        if "compute" in scenario:
            data['deployment']['accommodation'][3][
                'compute_nodes'] = scenario['compute']
        else:
            data['deployment']['accommodation'][3][
                'compute_nodes'] = default_compute
        if "progression" in scenario:
            data['execution']['progression'] = scenario['progression']
        else:
            data['execution']['progression'] = default_progression
        data['execution']['tests'] = [d for d in data['execution']
                                      ['tests'] if d.get('class') == "iperf_graph"]
        if "time" in scenario:
            data['execution']['tests'][0]['time'] = scenario['time']
        else:
            data['execution']['tests'][0]['time'] = default_time
        with open(fname, 'w') as yaml_file:
            yaml_file.write(yaml.dump(data, default_flow_style=False))

    def get_uuidlist(self, data):
        uuidlist = []
        for key in data['records'].iterkeys():
            uuidlist.append(key)
        return uuidlist

    def result_check(self, result_dir, test_name, scenario, to_time, from_time):
        outputfile = os.path.join(result_dir,test_name + "." + "json")
        error = False
        with open(outputfile) as data_file:
            data = json.load(data_file)
        uuidlist = self.get_uuidlist(data)
        workload = self.__class__.__name__
        new_test_name = test_name.split('-')
        new_test_name = new_test_name[3:]
        new_test_name = '-'.join(new_test_name)
        for uuid in uuidlist:
            if data['records'][uuid]['status'] != "ok":
                error = True
#.........这里部分代码省略.........
开发者ID:bengland2,项目名称:browbeat,代码行数:103,代码来源:Shaker.py


注:本文中的Tools.Tools.run_cmd方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。