本文整理汇总了Python中webkitpy.common.host.Host._initialize_scm方法的典型用法代码示例。如果您正苦于以下问题:Python Host._initialize_scm方法的具体用法?Python Host._initialize_scm怎么用?Python Host._initialize_scm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类webkitpy.common.host.Host
的用法示例。
在下文中一共展示了Host._initialize_scm方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: PerfTestsRunner
# 需要导入模块: from webkitpy.common.host import Host [as 别名]
# 或者: from webkitpy.common.host.Host import _initialize_scm [as 别名]
class PerfTestsRunner(object):
_default_branch = "webkit-trunk"
_EXIT_CODE_BAD_BUILD = -1
_EXIT_CODE_BAD_JSON = -2
_EXIT_CODE_FAILED_UPLOADING = -3
def __init__(self, args=None, port=None):
self._options, self._args = PerfTestsRunner._parse_args(args)
if port:
self._port = port
self._host = self._port.host
else:
self._host = Host()
self._port = self._host.port_factory.get(self._options.platform, self._options)
self._host._initialize_scm()
self._webkit_base_dir_len = len(self._port.webkit_base())
self._base_path = self._port.perf_tests_dir()
self._results = {}
self._timestamp = time.time()
@staticmethod
def _parse_args(args=None):
perf_option_list = [
optparse.make_option(
"--debug",
action="store_const",
const="Debug",
dest="configuration",
help="Set the configuration to Debug",
),
optparse.make_option(
"--release",
action="store_const",
const="Release",
dest="configuration",
help="Set the configuration to Release",
),
optparse.make_option("--platform", help="Specify port/platform being tested (i.e. chromium-mac)"),
optparse.make_option(
"--chromium",
action="store_const",
const="chromium",
dest="platform",
help="Alias for --platform=chromium",
),
optparse.make_option(
"--builder-name",
help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2."),
),
optparse.make_option("--build-number", help=("The build number of the builder running this script.")),
optparse.make_option(
"--build",
dest="build",
action="store_true",
default=True,
help="Check to ensure the DumpRenderTree build is up-to-date (default).",
),
optparse.make_option(
"--build-directory",
help="Path to the directory under which build files are kept (should not include configuration)",
),
optparse.make_option("--time-out-ms", default=600 * 1000, help="Set the timeout for each test"),
optparse.make_option(
"--pause-before-testing",
dest="pause_before_testing",
action="store_true",
default=False,
help="Pause before running the tests to let user attach a performance monitor.",
),
optparse.make_option("--output-json-path", help="Filename of the JSON file that summaries the results"),
optparse.make_option(
"--source-json-path",
help="Path to a JSON file to be merged into the JSON file when --output-json-path is present",
),
optparse.make_option(
"--test-results-server",
help="Upload the generated JSON file to the specified server when --output-json-path is present",
),
optparse.make_option(
"--webkit-test-runner",
"-2",
action="store_true",
help="Use WebKitTestRunner rather than DumpRenderTree.",
),
]
return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
def _collect_tests(self):
"""Return the list of tests found."""
def _is_test_file(filesystem, dirname, filename):
return filesystem.splitext(filename)[1] in [".html", ".svg"]
filesystem = self._host.filesystem
paths = []
for arg in self._args:
paths.append(arg)
relpath = filesystem.relpath(arg, self._base_path)
if relpath:
#.........这里部分代码省略.........
示例2: PerfTestsRunner
# 需要导入模块: from webkitpy.common.host import Host [as 别名]
# 或者: from webkitpy.common.host.Host import _initialize_scm [as 别名]
class PerfTestsRunner(object):
_perf_tests_base_dir = 'PerformanceTests'
_test_directories_for_chromium_style_tests = ['inspector']
_default_branch = 'webkit-trunk'
_EXIT_CODE_BAD_BUILD = -1
_EXIT_CODE_BAD_JSON = -2
_EXIT_CODE_FAILED_UPLOADING = -3
def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None, port=None):
self._buildbot_output = buildbot_output
self._options, self._args = PerfTestsRunner._parse_args(args)
if port:
self._port = port
self._host = self._port.host
else:
self._host = Host()
self._port = self._host.port_factory.get(self._options.platform, self._options)
self._host._initialize_scm()
self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output, configure_logging=False)
self._webkit_base_dir_len = len(self._port.webkit_base())
self._base_path = self._port.perf_tests_dir()
self._results = {}
self._timestamp = time.time()
@staticmethod
def _parse_args(args=None):
print_options = printing.print_options()
perf_option_list = [
optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
help='Set the configuration to Debug'),
optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
help='Set the configuration to Release'),
optparse.make_option("--platform",
help="Specify port/platform being tested (i.e. chromium-mac)"),
optparse.make_option("--builder-name",
help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
optparse.make_option("--build-number",
help=("The build number of the builder running this script.")),
optparse.make_option("--build", dest="build", action="store_true", default=True,
help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
optparse.make_option("--build-directory",
help="Path to the directory under which build files are kept (should not include configuration)"),
optparse.make_option("--time-out-ms", default=600 * 1000,
help="Set the timeout for each test"),
optparse.make_option("--output-json-path",
help="Filename of the JSON file that summaries the results"),
optparse.make_option("--source-json-path",
help="Path to a JSON file to be merged into the JSON file when --output-json-path is present"),
optparse.make_option("--test-results-server",
help="Upload the generated JSON file to the specified server when --output-json-path is present"),
]
option_list = (perf_option_list + print_options)
return optparse.OptionParser(option_list=option_list).parse_args(args)
def _collect_tests(self):
"""Return the list of tests found."""
def _is_test_file(filesystem, dirname, filename):
return filename.endswith('.html')
skipped_directories = set(['.svn', 'resources'])
tests = find_files.find(self._host.filesystem, self._base_path, self._args, skipped_directories, _is_test_file)
return [test for test in tests if not self._port.skips_perf_test(self._port.relative_perf_test_filename(test))]
def run(self):
if self._options.help_printing:
self._printer.help_printing()
self._printer.cleanup()
return 0
if not self._port.check_build(needs_http=False):
_log.error("Build not up to date for %s" % self._port._path_to_driver())
return self._EXIT_CODE_BAD_BUILD
# We wrap any parts of the run that are slow or likely to raise exceptions
# in a try/finally to ensure that we clean up the logging configuration.
unexpected = -1
try:
tests = self._collect_tests()
unexpected = self._run_tests_set(sorted(list(tests)), self._port)
finally:
self._printer.cleanup()
options = self._options
if self._options.output_json_path:
# FIXME: Add --branch or auto-detect the branch we're in
test_results_server = options.test_results_server
branch = self._default_branch if test_results_server else None
build_number = int(options.build_number) if options.build_number else None
if not self._generate_json(self._timestamp, options.output_json_path, options.source_json_path,
branch, options.platform, options.builder_name, build_number) and not unexpected:
return self._EXIT_CODE_BAD_JSON
if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
return self._EXIT_CODE_FAILED_UPLOADING
return unexpected
def _generate_json(self, timestamp, output_json_path, source_json_path, branch, platform, builder_name, build_number):
#.........这里部分代码省略.........
示例3: PerfTestsRunner
# 需要导入模块: from webkitpy.common.host import Host [as 别名]
# 或者: from webkitpy.common.host.Host import _initialize_scm [as 别名]
class PerfTestsRunner(object):
_perf_tests_base_dir = 'PerformanceTests'
_test_directories_for_chromium_style_tests = ['inspector']
def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None):
self._buildbot_output = buildbot_output
self._options, self._args = self._parse_args(args)
self._host = Host()
self._host._initialize_scm()
self._port = self._host.port_factory.get(self._options.platform, self._options)
self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output, configure_logging=False)
self._webkit_base_dir_len = len(self._port.webkit_base())
self._base_path = self._host.filesystem.join(self._port.webkit_base(), self._perf_tests_base_dir)
def _parse_args(self, args=None):
print_options = printing.print_options()
perf_option_list = [
optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
help='Set the configuration to Debug'),
optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
help='Set the configuration to Release'),
optparse.make_option("--platform",
help="Specify port/platform being tested (i.e. chromium-mac)"),
optparse.make_option("--build-directory",
help="Path to the directory under which build files are kept (should not include configuration)"),
optparse.make_option("--time-out-ms", default=30000,
help="Set the timeout for each test"),
]
option_list = (perf_option_list + print_options)
return optparse.OptionParser(option_list=option_list).parse_args(args)
def _collect_tests(self):
"""Return the list of tests found."""
def _is_test_file(filesystem, dirname, filename):
return filename.endswith('.html')
return find_files.find(self._host.filesystem, self._base_path, paths=self._args, file_filter=_is_test_file)
def run(self):
if self._options.help_printing:
self._printer.help_printing()
self._printer.cleanup()
return 0
if not self._port.check_build(needs_http=False):
_log.error("Build not up to date for %s" % self._port._path_to_driver())
return -1
# We wrap any parts of the run that are slow or likely to raise exceptions
# in a try/finally to ensure that we clean up the logging configuration.
unexpected = -1
try:
tests = self._collect_tests()
unexpected = self._run_tests_set(tests, self._port)
finally:
self._printer.cleanup()
return unexpected
def _print_status(self, tests, expected, unexpected):
if len(tests) == expected + unexpected:
status = "Ran %d tests" % len(tests)
else:
status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
if unexpected:
status += " (%d didn't run)" % unexpected
self._printer.write(status)
def _run_tests_set(self, tests, port):
result_count = len(tests)
expected = 0
unexpected = 0
driver_need_restart = False
driver = None
for test in tests:
if driver_need_restart:
_log.debug("%s killing driver" % test)
driver.stop()
driver = None
if not driver:
driver = port.create_driver(worker_number=1)
relative_test_path = self._host.filesystem.relpath(test, self._base_path)
self._printer.write('Running %s (%d of %d)' % (relative_test_path, expected + unexpected + 1, len(tests)))
is_chromium_style = self._host.filesystem.split(relative_test_path)[0] in self._test_directories_for_chromium_style_tests
test_failed, driver_need_restart = self._run_single_test(test, driver, is_chromium_style)
if test_failed:
unexpected = unexpected + 1
else:
expected = expected + 1
self._printer.write('')
if driver:
driver.stop()
#.........这里部分代码省略.........