本文整理汇总了Python中webkitpy.common.host.Host类的典型用法代码示例。如果您正苦于以下问题:Python Host类的具体用法?Python Host怎么用?Python Host使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Host类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _prepare_config
def _prepare_config(self, options, args, tool):
results_directory = args[0]
host = Host()
host.initialize_scm()
print 'Parsing full_results.json...'
results_json_path = host.filesystem.join(results_directory, 'full_results.json')
results_json = json_results_generator.load_json(host.filesystem, results_json_path)
port = tool.port_factory.get()
layout_tests_directory = port.layout_tests_dir()
platforms = host.filesystem.listdir(host.filesystem.join(layout_tests_directory, 'platform'))
self._test_config = TestConfig(port, layout_tests_directory, results_directory, platforms, host)
print 'Gathering current baselines...'
self._gather_baselines(results_json)
return {
'test_config': self._test_config,
"results_json": results_json,
"platforms_json": {
'platforms': platforms,
'defaultPlatform': port.name(),
},
}
示例2: __init__
def __init__(self, path):
MultiCommandTool.__init__(self)
Host.__init__(self)
self._path = path
self.status_server = StatusServer()
self.wakeup_event = threading.Event()
self._deprecated_port = None
示例3: test_import_dir_with_no_tests
def test_import_dir_with_no_tests(self):
# FIXME: Use MockHosts instead.
host = Host()
host.executive = MockExecutive2(exception=ScriptError("abort: no repository found in '/Volumes/Source/src/wk/Tools/Scripts/webkitpy/w3c' (.hg not found)!"))
importer = TestImporter(host, None, optparse.Values({"overwrite": False}))
importer.source_directory = importer.path_from_webkit_root("Tools", "Scripts", "webkitpy", "w3c")
importer.destination_directory = tempfile.mkdtemp(prefix='csswg')
oc = OutputCapture()
oc.capture_output()
try:
importer.do_import()
finally:
oc.restore_output()
shutil.rmtree(importer.destination_directory, ignore_errors=True)
示例4: test_import_dir_with_no_tests_and_no_hg
def test_import_dir_with_no_tests_and_no_hg(self):
# FIXME: Use MockHosts instead.
host = Host()
host.executive = MockExecutive2(exception=OSError())
importer = TestImporter(host, None, optparse.Values({"overwrite": False}))
importer.source_directory = importer.path_from_webkit_root("Tools", "Scripts", "webkitpy", "w3c")
importer.destination_directory = tempfile.mkdtemp(prefix='csswg')
oc = OutputCapture()
oc.capture_output()
try:
importer.do_import()
finally:
oc.restore_output()
shutil.rmtree(importer.destination_directory, ignore_errors=True)
示例5: __init__
def __init__(self):
self._host = Host()
self._filesystem = self._host.filesystem
self._host.initialize_scm()
self._webkit_root = self._host.scm().checkout_root
# These settings might vary between WebKit and Blink
self._css_property_file = self.path_from_webkit_root('Source', 'core', 'css', 'CSSPropertyNames.in')
self._css_property_split_string = 'alias_for='
self.prefixed_properties = self.read_webkit_prefixed_css_property_list()
示例6: get_test_baselines
def get_test_baselines(test_file, test_config):
# FIXME: This seems like a hack. This only seems used to access the Port.expected_baselines logic.
class AllPlatformsPort(Port):
def __init__(self, host):
super(AllPlatformsPort, self).__init__(host, 'mac')
self._platforms_by_directory = dict([(self._webkit_baseline_path(p), p) for p in test_config.platforms])
def baseline_search_path(self):
return self._platforms_by_directory.keys()
def platform_from_directory(self, directory):
return self._platforms_by_directory[directory]
test_path = test_config.filesystem.join(test_config.layout_tests_directory, test_file)
# FIXME: This should get the Host from the test_config to be mockable!
host = Host()
host.initialize_scm()
host.filesystem = test_config.filesystem
all_platforms_port = AllPlatformsPort(host)
all_test_baselines = {}
for baseline_extension in ('.txt', '.checksum', '.png'):
test_baselines = test_config.test_port.expected_baselines(test_file, baseline_extension)
baselines = all_platforms_port.expected_baselines(test_file, baseline_extension, all_baselines=True)
for platform_directory, expected_filename in baselines:
if not platform_directory:
continue
if platform_directory == test_config.layout_tests_directory:
platform = 'base'
else:
platform = all_platforms_port.platform_from_directory(platform_directory)
platform_baselines = all_test_baselines.setdefault(platform, {})
was_used_for_test = (platform_directory, expected_filename) in test_baselines
platform_baselines[baseline_extension] = was_used_for_test
return all_test_baselines
示例7: __init__
def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None, port=None):
self._buildbot_output = buildbot_output
self._options, self._args = PerfTestsRunner._parse_args(args)
if port:
self._port = port
self._host = self._port.host
else:
self._host = Host()
self._port = self._host.port_factory.get(self._options.platform, self._options)
self._host._initialize_scm()
self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output, configure_logging=False)
self._webkit_base_dir_len = len(self._port.webkit_base())
self._base_path = self._port.perf_tests_dir()
self._results = {}
self._timestamp = time.time()
示例8: __init__
def __init__(self, args=None, port=None):
self._options, self._args = PerfTestsRunner._parse_args(args)
if port:
self._port = port
self._host = self._port.host
else:
self._host = Host()
self._port = self._host.port_factory.get(self._options.platform, self._options)
# The GTK+ and EFL ports only supports WebKit2, so they always use WKTR.
if self._port.name().startswith("gtk") or self._port.name().startswith("efl"):
self._options.webkit_test_runner = True
self._host.initialize_scm()
self._webkit_base_dir_len = len(self._port.webkit_base())
self._base_path = self._port.perf_tests_dir()
self._timestamp = time.time()
self._utc_timestamp = datetime.datetime.utcnow()
示例9: PerfTestsRunner
class PerfTestsRunner(object):
_default_branch = 'webkit-trunk'
EXIT_CODE_BAD_BUILD = -1
EXIT_CODE_BAD_SOURCE_JSON = -2
EXIT_CODE_BAD_MERGE = -3
EXIT_CODE_FAILED_UPLOADING = -4
EXIT_CODE_BAD_PREPARATION = -5
_DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'
def __init__(self, args=None, port=None):
self._options, self._args = PerfTestsRunner._parse_args(args)
if port:
self._port = port
self._host = self._port.host
else:
self._host = Host()
self._port = self._host.port_factory.get(self._options.platform, self._options)
# The GTK+ and EFL ports only supports WebKit2, so they always use WKTR.
if self._port.name().startswith("gtk") or self._port.name().startswith("efl"):
self._options.webkit_test_runner = True
self._host.initialize_scm()
self._webkit_base_dir_len = len(self._port.webkit_base())
self._base_path = self._port.perf_tests_dir()
self._timestamp = time.time()
self._utc_timestamp = datetime.datetime.utcnow()
@staticmethod
def _parse_args(args=None):
def _expand_path(option, opt_str, value, parser):
path = os.path.expandvars(os.path.expanduser(value))
setattr(parser.values, option.dest, path)
perf_option_list = [
optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
help='Set the configuration to Debug'),
optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
help='Set the configuration to Release'),
optparse.make_option("--platform",
help="Specify port/platform being tested (i.e. chromium-mac)"),
optparse.make_option("--builder-name",
help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
optparse.make_option("--build-number",
help=("The build number of the builder running this script.")),
optparse.make_option("--build", dest="build", action="store_true", default=True,
help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
optparse.make_option("--no-build", dest="build", action="store_false",
help="Don't check to see if the DumpRenderTree build is up-to-date."),
optparse.make_option("--build-directory",
help="Path to the directory under which build files are kept (should not include configuration)"),
optparse.make_option("--time-out-ms", default=600 * 1000,
help="Set the timeout for each test"),
optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
help="Do no generate results JSON and results page."),
optparse.make_option("--output-json-path", action='callback', callback=_expand_path, type="str",
help="Path to generate a JSON file at; may contain previous results if it already exists."),
optparse.make_option("--reset-results", action="store_true",
help="Clears the content in the generated JSON file before adding the results."),
optparse.make_option("--slave-config-json-path", action='callback', callback=_expand_path, type="str",
help="Only used on bots. Path to a slave configuration file."),
optparse.make_option("--description",
help="Add a description to the output JSON file if one is generated"),
optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
help="Don't launch a browser with results after the tests are done"),
optparse.make_option("--test-results-server",
help="Upload the generated JSON file to the specified server when --output-json-path is present."),
optparse.make_option("--dump-render-tree", "-1", action="store_false", default=True, dest="webkit_test_runner",
help="Use DumpRenderTree rather than WebKitTestRunner."),
optparse.make_option("--force", dest="use_skipped_list", action="store_false", default=True,
help="Run all tests, including the ones in the Skipped list."),
optparse.make_option("--profile", action="store_true",
help="Output per-test profile information."),
optparse.make_option("--profiler", action="store",
help="Output per-test profile information, using the specified profiler."),
optparse.make_option("--additional-drt-flag", action="append",
default=[], help="Additional command line flag to pass to DumpRenderTree "
"Specify multiple times to add multiple flags."),
optparse.make_option("--driver-name", type="string",
help="Alternative DumpRenderTree binary to use"),
optparse.make_option("--repeat", default=1, type="int",
help="Specify number of times to run test set (default: 1)."),
optparse.make_option("--test-runner-count", default=-1, type="int",
help="Specify number of times to invoke test runner for each performance test."),
optparse.make_option("--wrapper",
help="wrapper command to insert before invocations of "
"DumpRenderTree or WebKitTestRunner; option is split on whitespace before "
"running. (Example: --wrapper='valgrind --smc-check=all')"),
]
return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
def _collect_tests(self):
test_extensions = ['.html', '.svg']
def _is_test_file(filesystem, dirname, filename):
return filesystem.splitext(filename)[1] in test_extensions
filesystem = self._host.filesystem
paths = []
#.........这里部分代码省略.........
示例10: PerfTestsRunner
class PerfTestsRunner(object):
_default_branch = "webkit-trunk"
EXIT_CODE_BAD_BUILD = -1
EXIT_CODE_BAD_SOURCE_JSON = -2
EXIT_CODE_BAD_MERGE = -3
EXIT_CODE_FAILED_UPLOADING = -4
EXIT_CODE_BAD_PREPARATION = -5
_DEFAULT_JSON_FILENAME = "PerformanceTestsResults.json"
def __init__(self, args=None, port=None):
self._options, self._args = PerfTestsRunner._parse_args(args)
if port:
self._port = port
self._host = self._port.host
else:
self._host = Host()
self._port = self._host.port_factory.get(self._options.platform, self._options)
self._host.initialize_scm()
self._webkit_base_dir_len = len(self._port.webkit_base())
self._base_path = self._port.perf_tests_dir()
self._timestamp = time.time()
self._utc_timestamp = datetime.datetime.utcnow()
@staticmethod
def _parse_args(args=None):
def _expand_path(option, opt_str, value, parser):
path = os.path.expandvars(os.path.expanduser(value))
setattr(parser.values, option.dest, path)
perf_option_list = [
optparse.make_option(
"--debug",
action="store_const",
const="Debug",
dest="configuration",
help="Set the configuration to Debug",
),
optparse.make_option(
"--release",
action="store_const",
const="Release",
dest="configuration",
help="Set the configuration to Release",
),
optparse.make_option("--platform", help="Specify port/platform being tested (i.e. chromium-mac)"),
optparse.make_option(
"--builder-name",
help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2."),
),
optparse.make_option("--build-number", help=("The build number of the builder running this script.")),
optparse.make_option(
"--build",
dest="build",
action="store_true",
default=True,
help="Check to ensure the DumpRenderTree build is up-to-date (default).",
),
optparse.make_option(
"--no-build",
dest="build",
action="store_false",
help="Don't check to see if the DumpRenderTree build is up-to-date.",
),
optparse.make_option(
"--build-directory",
help="Path to the directory under which build files are kept (should not include configuration)",
),
optparse.make_option("--time-out-ms", default=600 * 1000, help="Set the timeout for each test"),
optparse.make_option(
"--no-results",
action="store_false",
dest="generate_results",
default=True,
help="Do no generate results JSON and results page.",
),
optparse.make_option(
"--output-json-path",
action="callback",
callback=_expand_path,
type="str",
help="Path to generate a JSON file at; may contain previous results if it already exists.",
),
optparse.make_option(
"--reset-results",
action="store_true",
help="Clears the content in the generated JSON file before adding the results.",
),
optparse.make_option(
"--slave-config-json-path",
action="callback",
callback=_expand_path,
type="str",
help="Only used on bots. Path to a slave configuration file.",
),
optparse.make_option("--description", help="Add a description to the output JSON file if one is generated"),
optparse.make_option(
"--no-show-results",
action="store_false",
default=True,
#.........这里部分代码省略.........
示例11: PerfTestsRunner
class PerfTestsRunner(object):
_default_branch = "webkit-trunk"
_EXIT_CODE_BAD_BUILD = -1
_EXIT_CODE_BAD_JSON = -2
_EXIT_CODE_FAILED_UPLOADING = -3
def __init__(self, args=None, port=None):
self._options, self._args = PerfTestsRunner._parse_args(args)
if port:
self._port = port
self._host = self._port.host
else:
self._host = Host()
self._port = self._host.port_factory.get(self._options.platform, self._options)
self._host._initialize_scm()
self._webkit_base_dir_len = len(self._port.webkit_base())
self._base_path = self._port.perf_tests_dir()
self._results = {}
self._timestamp = time.time()
@staticmethod
def _parse_args(args=None):
perf_option_list = [
optparse.make_option(
"--debug",
action="store_const",
const="Debug",
dest="configuration",
help="Set the configuration to Debug",
),
optparse.make_option(
"--release",
action="store_const",
const="Release",
dest="configuration",
help="Set the configuration to Release",
),
optparse.make_option("--platform", help="Specify port/platform being tested (i.e. chromium-mac)"),
optparse.make_option(
"--chromium",
action="store_const",
const="chromium",
dest="platform",
help="Alias for --platform=chromium",
),
optparse.make_option(
"--builder-name",
help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2."),
),
optparse.make_option("--build-number", help=("The build number of the builder running this script.")),
optparse.make_option(
"--build",
dest="build",
action="store_true",
default=True,
help="Check to ensure the DumpRenderTree build is up-to-date (default).",
),
optparse.make_option(
"--build-directory",
help="Path to the directory under which build files are kept (should not include configuration)",
),
optparse.make_option("--time-out-ms", default=600 * 1000, help="Set the timeout for each test"),
optparse.make_option(
"--pause-before-testing",
dest="pause_before_testing",
action="store_true",
default=False,
help="Pause before running the tests to let user attach a performance monitor.",
),
optparse.make_option("--output-json-path", help="Filename of the JSON file that summaries the results"),
optparse.make_option(
"--source-json-path",
help="Path to a JSON file to be merged into the JSON file when --output-json-path is present",
),
optparse.make_option(
"--test-results-server",
help="Upload the generated JSON file to the specified server when --output-json-path is present",
),
optparse.make_option(
"--webkit-test-runner",
"-2",
action="store_true",
help="Use WebKitTestRunner rather than DumpRenderTree.",
),
]
return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
def _collect_tests(self):
"""Return the list of tests found."""
def _is_test_file(filesystem, dirname, filename):
return filesystem.splitext(filename)[1] in [".html", ".svg"]
filesystem = self._host.filesystem
paths = []
for arg in self._args:
paths.append(arg)
relpath = filesystem.relpath(arg, self._base_path)
if relpath:
#.........这里部分代码省略.........
示例12: __init__
def __init__(self):
self._host = Host()
self._filesystem = self._host.filesystem
self._host.initialize_scm()
self._webkit_root = self._host.scm().checkout_root
self.prefixed_properties = self.read_webkit_prefixed_css_property_list()
示例13: PerfTestsRunner
class PerfTestsRunner(object):
_perf_tests_base_dir = 'PerformanceTests'
_test_directories_for_chromium_style_tests = ['inspector']
_default_branch = 'webkit-trunk'
_EXIT_CODE_BAD_BUILD = -1
_EXIT_CODE_BAD_JSON = -2
_EXIT_CODE_FAILED_UPLOADING = -3
def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None, port=None):
self._buildbot_output = buildbot_output
self._options, self._args = PerfTestsRunner._parse_args(args)
if port:
self._port = port
self._host = self._port.host
else:
self._host = Host()
self._port = self._host.port_factory.get(self._options.platform, self._options)
self._host._initialize_scm()
self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output, configure_logging=False)
self._webkit_base_dir_len = len(self._port.webkit_base())
self._base_path = self._port.perf_tests_dir()
self._results = {}
self._timestamp = time.time()
@staticmethod
def _parse_args(args=None):
print_options = printing.print_options()
perf_option_list = [
optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
help='Set the configuration to Debug'),
optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
help='Set the configuration to Release'),
optparse.make_option("--platform",
help="Specify port/platform being tested (i.e. chromium-mac)"),
optparse.make_option("--builder-name",
help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
optparse.make_option("--build-number",
help=("The build number of the builder running this script.")),
optparse.make_option("--build", dest="build", action="store_true", default=True,
help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
optparse.make_option("--build-directory",
help="Path to the directory under which build files are kept (should not include configuration)"),
optparse.make_option("--time-out-ms", default=600 * 1000,
help="Set the timeout for each test"),
optparse.make_option("--output-json-path",
help="Filename of the JSON file that summaries the results"),
optparse.make_option("--source-json-path",
help="Path to a JSON file to be merged into the JSON file when --output-json-path is present"),
optparse.make_option("--test-results-server",
help="Upload the generated JSON file to the specified server when --output-json-path is present"),
]
option_list = (perf_option_list + print_options)
return optparse.OptionParser(option_list=option_list).parse_args(args)
def _collect_tests(self):
"""Return the list of tests found."""
def _is_test_file(filesystem, dirname, filename):
return filename.endswith('.html')
skipped_directories = set(['.svn', 'resources'])
tests = find_files.find(self._host.filesystem, self._base_path, self._args, skipped_directories, _is_test_file)
return [test for test in tests if not self._port.skips_perf_test(self._port.relative_perf_test_filename(test))]
def run(self):
if self._options.help_printing:
self._printer.help_printing()
self._printer.cleanup()
return 0
if not self._port.check_build(needs_http=False):
_log.error("Build not up to date for %s" % self._port._path_to_driver())
return self._EXIT_CODE_BAD_BUILD
# We wrap any parts of the run that are slow or likely to raise exceptions
# in a try/finally to ensure that we clean up the logging configuration.
unexpected = -1
try:
tests = self._collect_tests()
unexpected = self._run_tests_set(sorted(list(tests)), self._port)
finally:
self._printer.cleanup()
options = self._options
if self._options.output_json_path:
# FIXME: Add --branch or auto-detect the branch we're in
test_results_server = options.test_results_server
branch = self._default_branch if test_results_server else None
build_number = int(options.build_number) if options.build_number else None
if not self._generate_json(self._timestamp, options.output_json_path, options.source_json_path,
branch, options.platform, options.builder_name, build_number) and not unexpected:
return self._EXIT_CODE_BAD_JSON
if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
return self._EXIT_CODE_FAILED_UPLOADING
return unexpected
def _generate_json(self, timestamp, output_json_path, source_json_path, branch, platform, builder_name, build_number):
#.........这里部分代码省略.........
示例14: __init__
def __init__(self, path):
MultiCommandTool.__init__(self)
Host.__init__(self)
self._path = path
示例15: fake_dir_path
def fake_dir_path(self, dirname):
filesystem = Host().filesystem
webkit_root = WebKitFinder(filesystem).webkit_base()
return filesystem.abspath(filesystem.join(webkit_root, "LayoutTests", "css", dirname))