本文整理汇总了Python中treeherder.log_parser.artifactbuildercollection.ArtifactBuilderCollection类的典型用法代码示例。如果您正苦于以下问题:Python ArtifactBuilderCollection类的具体用法?Python ArtifactBuilderCollection怎么用?Python ArtifactBuilderCollection使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ArtifactBuilderCollection类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: do_test
def do_test(log, check_errors=True):
"""
Test a single log.
``log`` - the url prefix of the log to test. Also searches for the
result file with the same prefix.
"""
url = "file://{0}".format(
SampleData().get_log_path("{0}.txt.gz".format(log)))
builder = BuildbotLogViewArtifactBuilder(url, check_errors=check_errors)
lpc = ArtifactBuilderCollection(url, builders=builder)
lpc.parse()
act = lpc.artifacts[builder.name]
# we can't compare the "logurl" field, because it's a fully qualified url,
# so it will be different depending on the config it's run in.
assert "logurl" in act
del(act["logurl"])
exp = test_utils.load_exp("{0}.logview.json".format(log))
# :: use to create the ``exp`` files, if you're making a lot of them
# with open(SampleData().get_log_path("{0}.logview.json".format(log)), "w") as f:
# f.write(json.dumps(act, indent=4))
# f.close()
# log urls won't match in tests, since they're machine specific
# but leave it in the exp file as an example of what the real structure
# should look like.
del(exp["logurl"])
assert act == exp#, diff(exp, act)
开发者ID:GoogleInternetAuthorityG2SUNGHAN,项目名称:treeherder-service,代码行数:34,代码来源:test_log_view_artifact_builder.py
示例2: handle
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("Need to specify (only) log URL")
if options['profile']:
num_runs = options['profile']
else:
num_runs = 1
times = []
for i in range(num_runs):
start = time.time()
artifact_bc = ArtifactBuilderCollection(args[0],
check_errors=True)
artifact_bc.parse()
times.append(time.time() - start)
if not options['profile']:
for name, artifact in artifact_bc.artifacts.items():
print "%s, %s" % (name, json.dumps(artifact))
if options['profile']:
print "Timings: %s" % times
print "Average: %s" % (sum(times)/len(times))
print "Total: %s" % sum(times)
示例3: test_all_builders_complete
def test_all_builders_complete():
"""test when parse.complete is true creates correct structure"""
log = "mozilla-central_fedora-b2g_test-crashtest-1-bm54-tests1-linux-build50"
url = "file://{0}".format(
SampleData().get_log_path("{0}.txt.gz".format(log)))
lpc = ArtifactBuilderCollection(
url,
)
for builder in lpc.builders:
builder.parser.complete = True
lpc.parse()
exp = {
"text_log_summary": {
"step_data": {
"steps": [],
"errors_truncated": False
},
},
"Job Info": {
"job_details": []
}
}
act = lpc.artifacts
# we can't compare the "logurl" field, because it's a fully qualified url,
# so it will be different depending on the config it's run in.
assert "logurl" in act["text_log_summary"]
assert "logurl" in act["Job Info"]
del(act["Job Info"]["logurl"])
del(act["text_log_summary"]["logurl"])
assert exp == lpc.artifacts, diff(exp, lpc.artifacts)
示例4: test_all_builders_complete
def test_all_builders_complete():
"""test when parse.complete is true creates correct structure"""
url = add_log_response(
"mozilla-central_fedora-b2g_test-crashtest-1-bm54-tests1-linux-build50.txt.gz"
)
lpc = ArtifactBuilderCollection(url)
for builder in lpc.builders:
builder.parser.complete = True
lpc.parse()
exp = {
"text_log_summary": {
"step_data": {
"steps": [],
"errors_truncated": False
},
"logurl": url,
},
"Job Info": {
"job_details": [],
"logurl": url,
}
}
assert exp == lpc.artifacts
示例5: extract_text_log_artifacts
def extract_text_log_artifacts(project, log_url, job_guid):
"""Generate a summary artifact for the raw text log."""
# parse a log given its url
artifact_bc = ArtifactBuilderCollection(log_url)
artifact_bc.parse()
artifact_list = []
for name, artifact in artifact_bc.artifacts.items():
if name == 'Job Info':
for detail in artifact['job_details']:
if ('title' in detail and detail['title'] == 'artifact uploaded'
and detail['value'].endswith('_errorsummary.log')):
# using .send_task to avoid an import loop.
celery_app.send_task('store-error-summary',
[project, detail['url'], job_guid],
routing_key='store_error_summary')
artifact_list.append({
"job_guid": job_guid,
"name": name,
"type": 'json',
"blob": json.dumps(artifact)
})
artifact_list.extend(get_error_summary_artifacts(artifact_list))
return artifact_list
示例6: do_test
def do_test(log):
"""
Test a single log with the ``JobArtifactBuilder``.
``log`` - the url prefix of the log to test. Also searches for the
result file with the same prefix.
"""
url = add_log_response("{}.txt.gz".format(log))
builder = BuildbotJobArtifactBuilder(url)
lpc = ArtifactBuilderCollection(url, builders=builder)
lpc.parse()
act = lpc.artifacts[builder.name]
exp = test_utils.load_exp("{0}.jobartifact.json".format(log))
# :: Uncomment to create the ``exp`` files, if you're making a lot of them
# import json
# from tests.sampledata import SampleData
# with open(SampleData().get_log_path("{0}.jobartifact.json".format(log)), "w") as f:
# f.write(json.dumps(act, indent=4))
# assert act == exp, diff(exp, act)
# if you want to gather results for a new test, use this
assert len(act) == len(exp)
for index, artifact in act.items():
assert artifact == exp[index]
示例7: do_test
def do_test(log):
"""
Test a single log with the ``JobArtifactBuilder``.
``log`` - the url prefix of the log to test. Also searches for the
result file with the same prefix.
"""
url = "file://{0}".format(
SampleData().get_log_path("{0}.txt.gz".format(log)))
exp = test_utils.load_exp("{0}.jobartifact.json".format(log))
builder = BuildbotJobArtifactBuilder(url)
lpc = ArtifactBuilderCollection(url, builders=builder)
lpc.parse()
act = lpc.artifacts[builder.name]
# we can't compare the "logurl" field, because it's a fully qualified url,
# so it will be different depending on the config it's run in.
assert "logurl" in act
del(act["logurl"])
# leaving the logurl in the exp files so they are a good example of the
# expected structure.
del(exp["logurl"])
# assert act == exp, diff(exp, act)
# if you want to gather results for a new test, use this
assert len(act) == len(exp)
for index, artifact in act.items():
assert artifact == exp[index]
示例8: extract_text_log_artifacts
def extract_text_log_artifacts(project, log_url, job_guid):
"""Generate a summary artifact for the raw text log."""
# parse a log given its url
artifact_bc = ArtifactBuilderCollection(log_url)
artifact_bc.parse()
artifact_list = []
for name, artifact in artifact_bc.artifacts.items():
artifact_list.append({"job_guid": job_guid, "name": name, "type": "json", "blob": json.dumps(artifact)})
artifact_list.extend(get_error_summary_artifacts(artifact_list))
return artifact_list
示例9: test_performance_log_parsing
def test_performance_log_parsing():
"""
Validate that we can parse a generic performance artifact
"""
sd = SampleData()
for logfile in ['mozilla-inbound-android-api-11-debug-bm91-build1-build1317.txt.gz',
'try_ubuntu64_hw_test-chromez-bm103-tests1-linux-build1429.txt.gz']:
file_path = sd.get_log_path(logfile)
file_url = 'file://{}'.format(file_path)
builder = BuildbotPerformanceDataArtifactBuilder(url=file_url)
lpc = ArtifactBuilderCollection(file_url, builders=[builder])
lpc.parse()
act = lpc.artifacts[builder.name]
validate(act['performance_data'], PERFHERDER_SCHEMA)
示例10: test_log_download_size_limit
def test_log_download_size_limit():
"""Test that logs whose Content-Length exceed the size limit are not parsed."""
url = 'http://foo.tld/fake_large_log.tar.gz'
responses.add(
responses.GET,
url,
body='',
adding_headers={
'Content-Encoding': 'gzip',
'Content-Length': str(MAX_DOWNLOAD_SIZE_IN_BYTES + 1),
}
)
lpc = ArtifactBuilderCollection(url)
with pytest.raises(LogSizeException):
lpc.parse()
示例11: extract_text_log_artifacts
def extract_text_log_artifacts(job_log):
"""Generate a set of artifacts by parsing from the raw text log."""
# parse a log given its url
artifact_bc = ArtifactBuilderCollection(job_log.url)
artifact_bc.parse()
artifact_list = []
for name, artifact in artifact_bc.artifacts.items():
artifact_list.append({
"job_guid": job_log.job.guid,
"name": name,
"type": 'json',
"blob": json.dumps(artifact)
})
return artifact_list
示例12: test_talos_log_parsing
def test_talos_log_parsing():
"""
Make sure all performance data log examples validate with the
talos json schema.
"""
sd = SampleData()
files = sd.get_talos_logs()
for file_url in files:
builder = BuildbotTalosDataArtifactBuilder(url=file_url)
lpc = ArtifactBuilderCollection(file_url, builders=[builder])
lpc.parse()
act = lpc.artifacts[builder.name]
# Validate the data returned has the required datazilla
# json schema
for talos_datum in act['talos_data']:
validate(talos_datum, TALOS_SCHEMA)
示例13: test_performance_log_parsing
def test_performance_log_parsing():
"""
Validate that we can parse a generic performance artifact
"""
# first two have only one artifact, second has two artifacts
for (logfile, num_perf_artifacts) in [
('mozilla-inbound-android-api-11-debug-bm91-build1-build1317.txt.gz', 1),
('try_ubuntu64_hw_test-chromez-bm103-tests1-linux-build1429.txt.gz', 1),
('mozilla-inbound-linux64-bm72-build1-build225.txt.gz', 2)]:
url = add_log_response(logfile)
builder = BuildbotPerformanceDataArtifactBuilder(url=url)
lpc = ArtifactBuilderCollection(url, builders=[builder])
lpc.parse()
act = lpc.artifacts[builder.name]
assert len(act['performance_data']) == num_perf_artifacts
for perfherder_artifact in act['performance_data']:
validate(perfherder_artifact, PERFHERDER_SCHEMA)
示例14: test_performance_log_parsing
def test_performance_log_parsing():
"""
Make sure all performance data log examples validate with the
datazilla json schema.
"""
sd = SampleData()
files = sd.get_performance_logs()
tda = TalosDataAdapter()
for file_url in files:
builder = BuildbotPerformanceDataArtifactBuilder(url=file_url)
lpc = ArtifactBuilderCollection(file_url, builders=[builder])
lpc.parse()
act = lpc.artifacts[builder.name]
# Validate the data returned has the required datazilla
# json schema
validate(act['talos_data'][0], tda.datazilla_schema)
示例15: handle
def handle(self, *args, **options):
if options['profile']:
num_runs = options['profile']
else:
num_runs = 1
times = []
for _ in range(num_runs):
start = time.time()
artifact_bc = ArtifactBuilderCollection(options['log_url'])
artifact_bc.parse()
times.append(time.time() - start)
if not options['profile']:
for name, artifact in artifact_bc.artifacts.items():
print("%s, %s" % (name, json.dumps(artifact, indent=2)))
if options['profile']:
print("Timings: %s" % times)
print("Average: %s" % (sum(times)/len(times)))
print("Total: %s" % sum(times))