当前位置: 首页>>代码示例>>Python>>正文


Python Tools.create_results_dir方法代码示例

本文整理汇总了Python中Tools.Tools.create_results_dir方法的典型用法代码示例。如果您正苦于以下问题:Python Tools.create_results_dir方法的具体用法?Python Tools.create_results_dir怎么用?Python Tools.create_results_dir使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Tools.Tools的用法示例。


在下文中一共展示了Tools.create_results_dir方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: Rally

# 需要导入模块: from Tools import Tools [as 别名]
# 或者: from Tools.Tools import create_results_dir [as 别名]

#.........这里部分代码省略.........
        self.logger.info("Starting Rally workloads")
        es_ts = datetime.datetime.now()
        dir_ts = es_ts.strftime("%Y%m%d-%H%M%S")
        self.logger.debug("Time Stamp (Prefix): {}".format(dir_ts))
        benchmarks = self.config.get('rally')['benchmarks']
        if len(benchmarks) > 0:
            for benchmark in benchmarks:
                if benchmark['enabled']:
                    self.logger.info("Benchmark: {}".format(benchmark['name']))
                    scenarios = benchmark['scenarios']
                    def_concurrencies = benchmark['concurrency']
                    def_times = benchmark['times']
                    self.logger.debug(
                        "Default Concurrencies: {}".format(def_concurrencies))
                    self.logger.debug("Default Times: {}".format(def_times))
                    for scenario in scenarios:
                        if scenario['enabled']:
                            self.update_scenarios()
                            self.update_total_scenarios()
                            scenario_name = scenario['name']
                            scenario_file = scenario['file']
                            self.logger.info(
                                "Running Scenario: {}".format(scenario_name))
                            self.logger.debug(
                                "Scenario File: {}".format(scenario_file))

                            del scenario['enabled']
                            del scenario['file']
                            del scenario['name']
                            if len(scenario) > 0:
                                self.logger.debug(
                                    "Overriding Scenario Args: {}".format(scenario))

                            result_dir = self.tools.create_results_dir(
                                self.config['browbeat'][
                                    'results'], dir_ts, benchmark['name'],
                                scenario_name)
                            self.logger.debug("Created result directory: {}".format(result_dir))
                            workload = self.__class__.__name__
                            self.workload_logger(result_dir, workload)

                            # Override concurrency/times
                            if 'concurrency' in scenario:
                                concurrencies = scenario['concurrency']
                                del scenario['concurrency']
                            else:
                                concurrencies = def_concurrencies
                            if 'times' not in scenario:
                                scenario['times'] = def_times

                            for concurrency in concurrencies:
                                scenario['concurrency'] = concurrency
                                for run in range(self.config['browbeat']['rerun']):
                                    if run not in results:
                                        results[run] = []
                                    self.update_tests()
                                    self.update_total_tests()
                                    test_name = "{}-browbeat-{}-{}-iteration-{}".format(
                                        dir_ts, scenario_name, concurrency, run)

                                    if not result_dir:
                                        self.logger.error(
                                            "Failed to create result directory")
                                        exit(1)

                                    # Start connmon before rally
开发者ID:danielmellado,项目名称:browbeat,代码行数:70,代码来源:Rally.py

示例2: Shaker

# 需要导入模块: from Tools import Tools [as 别名]
# 或者: from Tools.Tools import create_results_dir [as 别名]

#.........这里部分代码省略.........
                scenario['name'],
                new_test_name,
                workload,
                "fail")
        else:
            self.logger.info("Completed Test: {}".format(scenario['name']))
            self.logger.info(
                "Saved report to: {}".format(
                    os.path.join(
                        result_dir,
                        test_name +
                        "." +
                        "html")))
            self.logger.info("saved log to: {}.log".format(os.path.join(result_dir, test_name)))
            self.update_pass_tests()
            self.update_total_pass_tests()
            self.get_time_dict(
                to_time,
                from_time,
                scenario['name'],
                new_test_name,
                workload,
                "pass")

    def run_scenario(self, scenario, result_dir, test_name):
        filename = scenario['file']
        server_endpoint = self.config['shaker']['server']
        port_no = self.config['shaker']['port']
        flavor = self.config['shaker']['flavor']
        venv = self.config['shaker']['venv']
        shaker_region = self.config['shaker']['shaker_region']
        timeout = self.config['shaker']['join_timeout']
        cmd_1 = (
            "source {}/bin/activate; source /home/stack/overcloudrc").format(venv)
        cmd_2 = (
            "shaker --server-endpoint {0}:{1} --flavor-name {2} --scenario {3}"
            " --os-region-name {7} --agent-join-timeout {6}"
            " --report {4}/{5}.html --output {4}/{5}.json"
            " --debug > {4}/{5}.log 2>&1").format(
            server_endpoint,
            port_no,
            flavor,
            filename,
            result_dir,
            test_name,
            timeout,
            shaker_region)
        cmd = ("{}; {}").format(cmd_1, cmd_2)
        from_ts = int(time.time() * 1000)
        if 'sleep_before' in self.config['shaker']:
            time.sleep(self.config['shaker']['sleep_before'])
        from_time = time.time()
        self.tools.run_cmd(cmd)
        to_time = time.time()
        self.update_tests()
        self.update_total_tests()
        self.result_check(result_dir, test_name, scenario, to_time, from_time)
        if 'sleep_after' in self.config['shaker']:
            time.sleep(self.config['shaker']['sleep_after'])
        to_ts = int(time.time() * 1000)
        # Snapshotting
        self.grafana.print_dashboard_url(from_ts, to_ts, test_name)
        self.grafana.log_snapshot_playbook_cmd(
            from_ts, to_ts, result_dir, test_name)
        self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name)

    def run_shaker(self):
        self.logger.info("Starting Shaker workloads")
        time_stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
        self.logger.debug("Time Stamp (Prefix): {}".format(time_stamp))
        scenarios = self.config.get('shaker')['scenarios']
        self.shaker_checks()
        scen_length = len(scenarios)
        if scen_length > 0:
            for scenario in scenarios:
                if scenario['enabled']:
                    self.update_scenarios()
                    self.update_total_scenarios()
                    self.logger.info("Scenario: {}".format(scenario['name']))
                    self.set_scenario(scenario)
                    self.logger.debug("Set Scenario File: {}".format(
                        scenario['file']))
                    result_dir = self.tools.create_results_dir(
                        self.config['browbeat']['results'], time_stamp, "shaker",
                        scenario['name'])
                    workload = self.__class__.__name__
                    self.workload_logger(result_dir, workload)
                    time_stamp1 = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
                    test_name = "{}-browbeat-{}-{}".format(time_stamp1,
                                                           "shaker", scenario['name'])
                    self.run_scenario(scenario, result_dir, test_name)
                    self.get_stats()
                else:
                    self.logger.info(
                        "Skipping {} as scenario enabled: false".format(
                            scenario['name']))
            self.final_stats(self.scenario_count)
        else:
            self.logger.error(
                "Configuration file contains no shaker scenarios")
开发者ID:bengland2,项目名称:browbeat,代码行数:104,代码来源:Shaker.py

示例3: Rally

# 需要导入模块: from Tools import Tools [as 别名]
# 或者: from Tools.Tools import create_results_dir [as 别名]

#.........这里部分代码省略.........
        results = OrderedDict()
        self.logger.info("Starting Rally workloads")
        time_stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
        self.logger.debug("Time Stamp (Prefix): {}".format(time_stamp))
        benchmarks = self.config.get('rally')['benchmarks']
        if len(benchmarks) > 0:
            for benchmark in benchmarks:
                if benchmark['enabled']:
                    self.logger.info("Benchmark: {}".format(benchmark['name']))
                    scenarios = benchmark['scenarios']
                    def_concurrencies = benchmark['concurrency']
                    def_times = benchmark['times']
                    self.logger.debug(
                        "Default Concurrencies: {}".format(def_concurrencies))
                    self.logger.debug("Default Times: {}".format(def_times))
                    for scenario in scenarios:
                        if scenario['enabled']:
                            self.update_scenarios()
                            self.update_total_scenarios()
                            scenario_name = scenario['name']
                            scenario_file = scenario['file']
                            self.logger.info(
                                "Running Scenario: {}".format(scenario_name))
                            self.logger.debug(
                                "Scenario File: {}".format(scenario_file))

                            del scenario['enabled']
                            del scenario['file']
                            del scenario['name']
                            if len(scenario) > 0:
                                self.logger.debug(
                                    "Overriding Scenario Args: {}".format(scenario))

                            result_dir = self.tools.create_results_dir(
                                self.config['browbeat'][
                                    'results'], time_stamp, benchmark['name'],
                                scenario_name)
                            self.logger.debug("Created result directory: {}".format(result_dir))
                            workload = self.__class__.__name__
                            self.workload_logger(result_dir, workload)

                            # Override concurrency/times
                            if 'concurrency' in scenario:
                                concurrencies = scenario['concurrency']
                                del scenario['concurrency']
                            else:
                                concurrencies = def_concurrencies
                            if 'times' not in scenario:
                                scenario['times'] = def_times

                            for concurrency in concurrencies:
                                scenario['concurrency'] = concurrency
                                for run in range(self.config['browbeat']['rerun']):
                                    if run not in results:
                                        results[run] = []
                                    self.update_tests()
                                    self.update_total_tests()
                                    test_name = "{}-browbeat-{}-{}-iteration-{}".format(
                                        time_stamp, scenario_name, concurrency, run)

                                    if not result_dir:
                                        self.logger.error(
                                            "Failed to create result directory")
                                        exit(1)

                                    # Start connmon before rally
开发者ID:bengland2,项目名称:browbeat,代码行数:70,代码来源:Rally.py

示例4: PerfKit

# 需要导入模块: from Tools import Tools [as 别名]
# 或者: from Tools.Tools import create_results_dir [as 别名]

#.........这里部分代码省略.........
            shutil.rmtree("/tmp/perfkitbenchmarker/run_browbeat")

        if self.config['connmon']['enabled']:
            self.connmon.start_connmon()

        # Run PerfKit
        from_ts = int(time.time() * 1000)
        if 'sleep_before' in self.config['perfkit']:
            time.sleep(self.config['perfkit']['sleep_before'])
        self.logger.info("Running Perfkit Command: {}".format(cmd))
        stdout_file = open("{}/pkb.stdout.log".format(result_dir), 'w')
        stderr_file = open("{}/pkb.stderr.log".format(result_dir), 'w')
        from_time = time.time()
        process = subprocess.Popen(cmd, shell=True, stdout=stdout_file, stderr=stderr_file)
        process.communicate()
        to_time = time.time()
        if 'sleep_after' in self.config['perfkit']:
            time.sleep(self.config['perfkit']['sleep_after'])
        to_ts = int(time.time() * 1000)

        # Stop connmon at end of perfkit task
        if self.config['connmon']['enabled']:
            self.connmon.stop_connmon()
            try:
                self.connmon.move_connmon_results(result_dir, test_name)
                self.connmon.connmon_graphs(result_dir, test_name)
            except:
                self.logger.error("Connmon Result data missing, Connmon never started")
        workload = self.__class__.__name__
        new_test_name = test_name.split('-')
        new_test_name = new_test_name[2:]
        new_test_name = '-'.join(new_test_name)
        # Determine success
        try:
            with open("{}/pkb.stderr.log".format(result_dir), 'r') as stderr:
                if any('SUCCEEDED' in line for line in stderr):
                    self.logger.info("Benchmark completed.")
                    self.update_pass_tests()
                    self.update_total_pass_tests()
                    self.get_time_dict(
                        to_time,
                        from_time,
                        benchmark_config['benchmarks'],
                        new_test_name,
                        workload,
                        "pass")

                else:
                    self.logger.error("Benchmark failed.")
                    self.update_fail_tests()
                    self.update_total_fail_tests()
                    self.get_time_dict(
                        to_time,
                        from_time,
                        benchmark_config['benchmarks'],
                        new_test_name,
                        workload,
                        "fail")
        except IOError:
            self.logger.error(
                "File missing: {}/pkb.stderr.log".format(result_dir))

        # Copy all results
        for perfkit_file in glob.glob("/tmp/perfkitbenchmarker/run_browbeat/*"):
            shutil.move(perfkit_file, result_dir)
        if os.path.exists("/tmp/perfkitbenchmarker/run_browbeat"):
            shutil.rmtree("/tmp/perfkitbenchmarker/run_browbeat")

        # Grafana integration
        self.grafana.print_dashboard_url(from_ts, to_ts, test_name)
        self.grafana.log_snapshot_playbook_cmd(
            from_ts, to_ts, result_dir, test_name)
        self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name)

    def start_workloads(self):
        self.logger.info("Starting PerfKitBenchmarker Workloads.")
        time_stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
        self.logger.debug("Time Stamp (Prefix): {}".format(time_stamp))
        benchmarks = self.config.get('perfkit')['benchmarks']
        if len(benchmarks) > 0:
            for benchmark in benchmarks:
                if benchmark['enabled']:
                    self.logger.info("Benchmark: {}".format(benchmark['name']))
                    self.update_scenarios()
                    self.update_total_scenarios()
                    for run in range(self.config['browbeat']['rerun']):
                        self.update_tests()
                        self.update_total_tests()
                        result_dir = self.tools.create_results_dir(
                            self.config['browbeat']['results'], time_stamp, benchmark['name'], run)
                        test_name = "{}-{}-{}".format(time_stamp, benchmark['name'], run)
                        workload = self.__class__.__name__
                        self.workload_logger(result_dir, workload)
                        self.run_benchmark(benchmark, result_dir, test_name)
                        self._log_details()
                else:
                    self.logger.info(
                        "Skipping {} benchmark, enabled: false".format(benchmark['name']))
        else:
            self.logger.error("Config file contains no perfkit benchmarks.")
开发者ID:bengland2,项目名称:browbeat,代码行数:104,代码来源:PerfKit.py


注:本文中的Tools.Tools.create_results_dir方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。