本文整理汇总了Python中webkitpy.common.host.Host.initialize_scm方法的典型用法代码示例。如果您正苦于以下问题:Python Host.initialize_scm方法的具体用法?Python Host.initialize_scm怎么用?Python Host.initialize_scm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类webkitpy.common.host.Host
的用法示例。
在下文中一共展示了Host.initialize_scm方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _prepare_config
# 需要导入模块: from webkitpy.common.host import Host [as 别名]
# 或者: from webkitpy.common.host.Host import initialize_scm [as 别名]
def _prepare_config(self, options, args, tool):
results_directory = args[0]
host = Host()
host.initialize_scm()
print 'Parsing full_results.json...'
results_json_path = host.filesystem.join(results_directory, 'full_results.json')
results_json = json_results_generator.load_json(host.filesystem, results_json_path)
port = tool.port_factory.get()
layout_tests_directory = port.layout_tests_dir()
platforms = host.filesystem.listdir(host.filesystem.join(layout_tests_directory, 'platform'))
self._test_config = TestConfig(port, layout_tests_directory, results_directory, platforms, host)
print 'Gathering current baselines...'
self._gather_baselines(results_json)
return {
'test_config': self._test_config,
"results_json": results_json,
"platforms_json": {
'platforms': platforms,
'defaultPlatform': port.name(),
},
}
示例2: get_test_baselines
# 需要导入模块: from webkitpy.common.host import Host [as 别名]
# 或者: from webkitpy.common.host.Host import initialize_scm [as 别名]
def get_test_baselines(test_file, test_config):
# FIXME: This seems like a hack. This only seems used to access the Port.expected_baselines logic.
class AllPlatformsPort(Port):
def __init__(self, host):
super(AllPlatformsPort, self).__init__(host, 'mac')
self._platforms_by_directory = dict([(self._webkit_baseline_path(p), p) for p in test_config.platforms])
def baseline_search_path(self):
return self._platforms_by_directory.keys()
def platform_from_directory(self, directory):
return self._platforms_by_directory[directory]
test_path = test_config.filesystem.join(test_config.layout_tests_directory, test_file)
# FIXME: This should get the Host from the test_config to be mockable!
host = Host()
host.initialize_scm()
host.filesystem = test_config.filesystem
all_platforms_port = AllPlatformsPort(host)
all_test_baselines = {}
for baseline_extension in ('.txt', '.checksum', '.png'):
test_baselines = test_config.test_port.expected_baselines(test_file, baseline_extension)
baselines = all_platforms_port.expected_baselines(test_file, baseline_extension, all_baselines=True)
for platform_directory, expected_filename in baselines:
if not platform_directory:
continue
if platform_directory == test_config.layout_tests_directory:
platform = 'base'
else:
platform = all_platforms_port.platform_from_directory(platform_directory)
platform_baselines = all_test_baselines.setdefault(platform, {})
was_used_for_test = (platform_directory, expected_filename) in test_baselines
platform_baselines[baseline_extension] = was_used_for_test
return all_test_baselines
示例3: PerfTestsRunner
# 需要导入模块: from webkitpy.common.host import Host [as 别名]
# 或者: from webkitpy.common.host.Host import initialize_scm [as 别名]
class PerfTestsRunner(object):
_default_branch = 'webkit-trunk'
EXIT_CODE_BAD_BUILD = -1
EXIT_CODE_BAD_SOURCE_JSON = -2
EXIT_CODE_BAD_MERGE = -3
EXIT_CODE_FAILED_UPLOADING = -4
EXIT_CODE_BAD_PREPARATION = -5
_DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'
def __init__(self, args=None, port=None):
self._options, self._args = PerfTestsRunner._parse_args(args)
if port:
self._port = port
self._host = self._port.host
else:
self._host = Host()
self._port = self._host.port_factory.get(self._options.platform, self._options)
# The GTK+ and EFL ports only supports WebKit2, so they always use WKTR.
if self._port.name().startswith("gtk") or self._port.name().startswith("efl"):
self._options.webkit_test_runner = True
self._host.initialize_scm()
self._webkit_base_dir_len = len(self._port.webkit_base())
self._base_path = self._port.perf_tests_dir()
self._timestamp = time.time()
self._utc_timestamp = datetime.datetime.utcnow()
@staticmethod
def _parse_args(args=None):
def _expand_path(option, opt_str, value, parser):
path = os.path.expandvars(os.path.expanduser(value))
setattr(parser.values, option.dest, path)
perf_option_list = [
optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
help='Set the configuration to Debug'),
optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
help='Set the configuration to Release'),
optparse.make_option("--platform",
help="Specify port/platform being tested (i.e. chromium-mac)"),
optparse.make_option("--builder-name",
help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
optparse.make_option("--build-number",
help=("The build number of the builder running this script.")),
optparse.make_option("--build", dest="build", action="store_true", default=True,
help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
optparse.make_option("--no-build", dest="build", action="store_false",
help="Don't check to see if the DumpRenderTree build is up-to-date."),
optparse.make_option("--build-directory",
help="Path to the directory under which build files are kept (should not include configuration)"),
optparse.make_option("--time-out-ms", default=600 * 1000,
help="Set the timeout for each test"),
optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
help="Do no generate results JSON and results page."),
optparse.make_option("--output-json-path", action='callback', callback=_expand_path, type="str",
help="Path to generate a JSON file at; may contain previous results if it already exists."),
optparse.make_option("--reset-results", action="store_true",
help="Clears the content in the generated JSON file before adding the results."),
optparse.make_option("--slave-config-json-path", action='callback', callback=_expand_path, type="str",
help="Only used on bots. Path to a slave configuration file."),
optparse.make_option("--description",
help="Add a description to the output JSON file if one is generated"),
optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
help="Don't launch a browser with results after the tests are done"),
optparse.make_option("--test-results-server",
help="Upload the generated JSON file to the specified server when --output-json-path is present."),
optparse.make_option("--dump-render-tree", "-1", action="store_false", default=True, dest="webkit_test_runner",
help="Use DumpRenderTree rather than WebKitTestRunner."),
optparse.make_option("--force", dest="use_skipped_list", action="store_false", default=True,
help="Run all tests, including the ones in the Skipped list."),
optparse.make_option("--profile", action="store_true",
help="Output per-test profile information."),
optparse.make_option("--profiler", action="store",
help="Output per-test profile information, using the specified profiler."),
optparse.make_option("--additional-drt-flag", action="append",
default=[], help="Additional command line flag to pass to DumpRenderTree "
"Specify multiple times to add multiple flags."),
optparse.make_option("--driver-name", type="string",
help="Alternative DumpRenderTree binary to use"),
optparse.make_option("--repeat", default=1, type="int",
help="Specify number of times to run test set (default: 1)."),
optparse.make_option("--test-runner-count", default=-1, type="int",
help="Specify number of times to invoke test runner for each performance test."),
optparse.make_option("--wrapper",
help="wrapper command to insert before invocations of "
"DumpRenderTree or WebKitTestRunner; option is split on whitespace before "
"running. (Example: --wrapper='valgrind --smc-check=all')"),
]
return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
def _collect_tests(self):
test_extensions = ['.html', '.svg']
def _is_test_file(filesystem, dirname, filename):
return filesystem.splitext(filename)[1] in test_extensions
filesystem = self._host.filesystem
paths = []
#.........这里部分代码省略.........
示例4: PerfTestsRunner
# 需要导入模块: from webkitpy.common.host import Host [as 别名]
# 或者: from webkitpy.common.host.Host import initialize_scm [as 别名]
class PerfTestsRunner(object):
_default_branch = 'webkit-trunk'
EXIT_CODE_BAD_BUILD = -1
EXIT_CODE_BAD_SOURCE_JSON = -2
EXIT_CODE_BAD_MERGE = -3
EXIT_CODE_FAILED_UPLOADING = -4
EXIT_CODE_BAD_PREPARATION = -5
_DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'
def __init__(self, args=None, port=None):
self._options, self._args = PerfTestsRunner._parse_args(args)
if port:
self._port = port
self._host = self._port.host
else:
self._host = Host()
self._port = self._host.port_factory.get(self._options.platform, self._options)
self._host.initialize_scm()
self._webkit_base_dir_len = len(self._port.webkit_base())
self._base_path = self._port.perf_tests_dir()
self._timestamp = time.time()
self._utc_timestamp = datetime.datetime.utcnow()
@staticmethod
def _parse_args(args=None):
def _expand_path(option, opt_str, value, parser):
path = os.path.expandvars(os.path.expanduser(value))
setattr(parser.values, option.dest, path)
perf_option_list = [
optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
help='Set the configuration to Debug'),
optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
help='Set the configuration to Release'),
optparse.make_option('-t', '--target', dest='configuration',
help='Specify the target build subdirectory under src/out/'),
optparse.make_option("--platform",
help="Specify port/platform being tested (e.g. mac)"),
optparse.make_option("--chromium",
action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
optparse.make_option("--android",
action="store_const", const='android', dest='platform', help='Alias for --platform=android'),
optparse.make_option("--builder-name",
help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
optparse.make_option("--build-number",
help=("The build number of the builder running this script.")),
optparse.make_option("--build", dest="build", action="store_true", default=True,
help="Check to ensure the DumpRenderTree build is up to date (default)."),
optparse.make_option("--no-build", dest="build", action="store_false",
help="Don't check to see if the DumpRenderTree build is up to date."),
optparse.make_option("--build-directory",
help="Path to the directory under which build files are kept (should not include configuration)"),
optparse.make_option("--time-out-ms", default=600 * 1000,
help="Set the timeout for each test"),
optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
help="Do no generate results JSON and results page."),
optparse.make_option("--output-json-path", action='callback', callback=_expand_path, type="str",
help="Path to generate a JSON file at; may contain previous results if it already exists."),
optparse.make_option("--reset-results", action="store_true",
help="Clears the content in the generated JSON file before adding the results."),
optparse.make_option("--slave-config-json-path", action='callback', callback=_expand_path, type="str",
help="Only used on bots. Path to a slave configuration file."),
optparse.make_option("--description",
help="Add a description to the output JSON file if one is generated"),
optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
help="Don't launch a browser with results after the tests are done"),
optparse.make_option("--test-results-server",
help="Upload the generated JSON file to the specified server when --output-json-path is present."),
optparse.make_option("--force", dest="use_skipped_list", action="store_false", default=True,
help="Run all tests, including the ones in the Skipped list."),
optparse.make_option("--profile", action="store_true",
help="Output per-test profile information."),
optparse.make_option("--profiler", action="store",
help="Output per-test profile information, using the specified profiler."),
optparse.make_option("--additional-driver-flag", action="append",
default=[], help="Additional command line flag to pass to DumpRenderTree "
"Specify multiple times to add multiple flags."),
optparse.make_option("--driver-name", type="string",
help="Alternative DumpRenderTree binary to use"),
optparse.make_option("--content-shell", action="store_true",
help="Use Content Shell instead of DumpRenderTree"),
optparse.make_option("--repeat", default=1, type="int",
help="Specify number of times to run test set (default: 1)."),
optparse.make_option("--test-runner-count", default=DEFAULT_TEST_RUNNER_COUNT, type="int",
help="Specify number of times to invoke test runner for each performance test."),
]
return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
def _collect_tests(self):
test_extensions = ['.html', '.svg']
def _is_test_file(filesystem, dirname, filename):
return filesystem.splitext(filename)[1] in test_extensions
filesystem = self._host.filesystem
paths = []
for arg in self._args:
if filesystem.exists(filesystem.join(self._base_path, arg)):
paths.append(arg)
#.........这里部分代码省略.........
示例5: W3CTestConverter
# 需要导入模块: from webkitpy.common.host import Host [as 别名]
# 或者: from webkitpy.common.host.Host import initialize_scm [as 别名]
class W3CTestConverter(object):
def __init__(self):
self._host = Host()
self._filesystem = self._host.filesystem
self._host.initialize_scm()
self._webkit_root = self._host.scm().checkout_root
# These settings might vary between WebKit and Blink
self._css_property_file = self.path_from_webkit_root('Source', 'core', 'css', 'CSSPropertyNames.in')
self._css_property_split_string = 'alias_for='
self.prefixed_properties = self.read_webkit_prefixed_css_property_list()
def path_from_webkit_root(self, *comps):
return self._filesystem.abspath(self._filesystem.join(self._webkit_root, *comps))
def read_webkit_prefixed_css_property_list(self):
prefixed_properties = []
contents = self._filesystem.read_text_file(self._css_property_file)
for line in contents.splitlines():
# Find lines starting with the -webkit- prefix.
match = re.match('-webkit-[\w|-]*', line)
if match:
# Ignore lines where both the prefixed and non-prefixed property
# are supported - denoted by -webkit-some-property = some-property.
fields = line.split(self._css_property_split_string)
if len(fields) == 2 and fields[1].strip() in fields[0].strip():
continue
prefixed_properties.append(match.group(0))
return prefixed_properties
def convert_for_webkit(self, new_path, filename):
""" Converts a file's |contents| so it will function correctly in its |new_path| in Webkit.
Returns the list of modified properties and the modified text if the file was modifed, None otherwise."""
contents = self._filesystem.read_binary_file(filename)
if filename.endswith('.css'):
return self.convert_css(contents, filename)
return self.convert_html(new_path, contents, filename)
def convert_css(self, contents, filename):
return self.add_webkit_prefix_to_unprefixed_properties(contents, filename)
def convert_html(self, new_path, contents, filename):
doc = BeautifulSoup(contents)
did_modify_paths = self.convert_testharness_paths(doc, new_path, filename)
converted_properties_and_content = self.convert_prefixed_properties(doc, filename)
return converted_properties_and_content if (did_modify_paths or converted_properties_and_content[0]) else None
def convert_testharness_paths(self, doc, new_path, filename):
""" Update links to testharness.js in the BeautifulSoup |doc| to point to the copy in |new_path|.
Returns whether the document was modified."""
# Look for the W3C-style path to any testharness files - scripts (.js) or links (.css)
pattern = re.compile('/resources/testharness')
script_tags = doc.findAll(src=pattern)
link_tags = doc.findAll(href=pattern)
testharness_tags = script_tags + link_tags
if not testharness_tags:
return False
resources_path = self.path_from_webkit_root('LayoutTests', 'resources')
resources_relpath = self._filesystem.relpath(resources_path, new_path)
for tag in testharness_tags:
# FIXME: We need to handle img, audio, video tags also.
attr = 'src'
if tag.name != 'script':
attr = 'href'
if not attr in tag.attrMap:
# FIXME: Figure out what to do w/ invalid tags. For now, we return False
# and leave the document unmodified, which means that it'll probably fail to run.
_log.error("Missing an attr in %s" % filename)
return False
old_path = tag[attr]
new_tag = Tag(doc, tag.name, tag.attrs)
new_tag[attr] = re.sub(pattern, resources_relpath + '/testharness', old_path)
self.replace_tag(tag, new_tag)
return True
def convert_prefixed_properties(self, doc, filename):
""" Searches a BeautifulSoup |doc| for any CSS properties requiring the -webkit- prefix and converts them.
Returns the list of converted properties and the modified document as a string """
converted_properties = []
# Look for inline and document styles.
inline_styles = doc.findAll(style=re.compile('.*'))
style_tags = doc.findAll('style')
all_styles = inline_styles + style_tags
#.........这里部分代码省略.........
示例6: PerfTestsRunner
# 需要导入模块: from webkitpy.common.host import Host [as 别名]
# 或者: from webkitpy.common.host.Host import initialize_scm [as 别名]
class PerfTestsRunner(object):
_default_branch = "webkit-trunk"
EXIT_CODE_BAD_BUILD = -1
EXIT_CODE_BAD_SOURCE_JSON = -2
EXIT_CODE_BAD_MERGE = -3
EXIT_CODE_FAILED_UPLOADING = -4
EXIT_CODE_BAD_PREPARATION = -5
_DEFAULT_JSON_FILENAME = "PerformanceTestsResults.json"
def __init__(self, args=None, port=None):
self._options, self._args = PerfTestsRunner._parse_args(args)
if port:
self._port = port
self._host = self._port.host
else:
self._host = Host()
self._port = self._host.port_factory.get(self._options.platform, self._options)
self._host.initialize_scm()
self._webkit_base_dir_len = len(self._port.webkit_base())
self._base_path = self._port.perf_tests_dir()
self._timestamp = time.time()
self._utc_timestamp = datetime.datetime.utcnow()
@staticmethod
def _parse_args(args=None):
def _expand_path(option, opt_str, value, parser):
path = os.path.expandvars(os.path.expanduser(value))
setattr(parser.values, option.dest, path)
perf_option_list = [
optparse.make_option(
"--debug",
action="store_const",
const="Debug",
dest="configuration",
help="Set the configuration to Debug",
),
optparse.make_option(
"--release",
action="store_const",
const="Release",
dest="configuration",
help="Set the configuration to Release",
),
optparse.make_option("--platform", help="Specify port/platform being tested (i.e. chromium-mac)"),
optparse.make_option(
"--builder-name",
help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2."),
),
optparse.make_option("--build-number", help=("The build number of the builder running this script.")),
optparse.make_option(
"--build",
dest="build",
action="store_true",
default=True,
help="Check to ensure the DumpRenderTree build is up-to-date (default).",
),
optparse.make_option(
"--no-build",
dest="build",
action="store_false",
help="Don't check to see if the DumpRenderTree build is up-to-date.",
),
optparse.make_option(
"--build-directory",
help="Path to the directory under which build files are kept (should not include configuration)",
),
optparse.make_option("--time-out-ms", default=600 * 1000, help="Set the timeout for each test"),
optparse.make_option(
"--no-results",
action="store_false",
dest="generate_results",
default=True,
help="Do no generate results JSON and results page.",
),
optparse.make_option(
"--output-json-path",
action="callback",
callback=_expand_path,
type="str",
help="Path to generate a JSON file at; may contain previous results if it already exists.",
),
optparse.make_option(
"--reset-results",
action="store_true",
help="Clears the content in the generated JSON file before adding the results.",
),
optparse.make_option(
"--slave-config-json-path",
action="callback",
callback=_expand_path,
type="str",
help="Only used on bots. Path to a slave configuration file.",
),
optparse.make_option("--description", help="Add a description to the output JSON file if one is generated"),
optparse.make_option(
"--no-show-results",
action="store_false",
default=True,
#.........这里部分代码省略.........