本文整理汇总了Python中treeherder.etl.job_loader.JobLoader.process_job方法的典型用法代码示例。如果您正苦于以下问题:Python JobLoader.process_job方法的具体用法?Python JobLoader.process_job怎么用?Python JobLoader.process_job使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类treeherder.etl.job_loader.JobLoader
的用法示例。
在下文中一共展示了JobLoader.process_job方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_ingest_pulse_jobs
# 需要导入模块: from treeherder.etl.job_loader import JobLoader [as 别名]
# 或者: from treeherder.etl.job_loader.JobLoader import process_job [as 别名]
def test_ingest_pulse_jobs(pulse_jobs, test_repository, push_stored,
failure_classifications, mock_log_parser):
"""
Ingest a job through the JSON Schema validated JobLoader used by Pulse
"""
jl = JobLoader()
revision = push_stored[0]["revision"]
for job in pulse_jobs:
job["origin"]["revision"] = revision
jl.process_job(job)
jobs = Job.objects.all()
assert len(jobs) == 5
assert [job.taskcluster_metadata for job in jobs]
assert set(TaskclusterMetadata.objects.values_list(
'task_id', flat=True)) == set(['IYyscnNMTLuxzna7PNqUJQ',
'XJCbbRQ6Sp-UL1lL-tw5ng',
'ZsSzJQu3Q7q2MfehIBAzKQ',
'bIzVZt9jQQKgvQYD3a2HQw'])
job_logs = JobLog.objects.filter(job_id=1)
assert job_logs.count() == 2
logs_expected = [{"name": "builds-4h",
"url": "http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/tinderbox-builds/mozilla-inbound-linux64/mozilla-inbound_linux64_spidermonkey-warnaserr-bm57-build1-build352.txt.gz",
"parse_status": 0},
{"name": "errorsummary_json",
"url": "http://mozilla-releng-blobs.s3.amazonaws.com/blobs/Mozilla-Inbound-Non-PGO/sha512/05c7f57df6583c6351c6b49e439e2678e0f43c2e5b66695ea7d096a7519e1805f441448b5ffd4cc3b80b8b2c74b244288fda644f55ed0e226ef4e25ba02ca466",
"parse_status": 0}]
assert [{"name": item.name, "url": item.url, "parse_status": item.status}
for item in job_logs.all()] == logs_expected
assert JobDetail.objects.count() == 2
示例2: test_skip_unscheduled
# 需要导入模块: from treeherder.etl.job_loader import JobLoader [as 别名]
# 或者: from treeherder.etl.job_loader.JobLoader import process_job [as 别名]
def test_skip_unscheduled(first_job, failure_classifications,
mock_log_parser):
jl = JobLoader()
first_job["state"] = "unscheduled"
jl.process_job(first_job)
assert not Job.objects.count()
示例3: test_ingest_pulse_jobs_with_missing_push
# 需要导入模块: from treeherder.etl.job_loader import JobLoader [as 别名]
# 或者: from treeherder.etl.job_loader.JobLoader import process_job [as 别名]
def test_ingest_pulse_jobs_with_missing_push(pulse_jobs):
"""
Ingest jobs with missing pushes, so they should throw an exception
"""
jl = JobLoader()
job = pulse_jobs[0]
job["origin"]["revision"] = "1234567890123456789012345678901234567890"
with pytest.raises(MissingPushException):
for pulse_job in pulse_jobs:
jl.process_job(pulse_job)
# if one job isn't ready, except on the whole batch. They'll retry as a
# task after the timeout.
assert Job.objects.count() == 0
示例4: test_ingest_pulse_jobs_bad_project
# 需要导入模块: from treeherder.etl.job_loader import JobLoader [as 别名]
# 或者: from treeherder.etl.job_loader.JobLoader import process_job [as 别名]
def test_ingest_pulse_jobs_bad_project(pulse_jobs, test_repository, push_stored,
failure_classifications, mock_log_parser):
"""
Test ingesting a pulse job with bad repo will skip, ingest others
"""
jl = JobLoader()
revision = push_stored[0]["revision"]
job = pulse_jobs[0]
job["origin"]["revision"] = revision
job["origin"]["project"] = "ferd"
for pulse_job in pulse_jobs:
jl.process_job(pulse_job)
# length of pulse jobs is 5, so one will be skipped due to bad project
assert Job.objects.count() == 4
示例5: test_ingest_pending_pulse_job
# 需要导入模块: from treeherder.etl.job_loader import JobLoader [as 别名]
# 或者: from treeherder.etl.job_loader.JobLoader import process_job [as 别名]
def test_ingest_pending_pulse_job(pulse_jobs, push_stored,
failure_classifications, mock_log_parser):
"""
Test that ingesting a pending job (1) works and (2) ingests the
taskcluster metadata
"""
jl = JobLoader()
pulse_job = pulse_jobs[0]
revision = push_stored[0]["revision"]
pulse_job["origin"]["revision"] = revision
pulse_job["state"] = "pending"
jl.process_job(pulse_job)
jobs = Job.objects.all()
assert len(jobs) == 1
job = jobs[0]
assert job.taskcluster_metadata
assert job.taskcluster_metadata.task_id == 'IYyscnNMTLuxzna7PNqUJQ'
# should not have processed any log or details for pending jobs
assert JobLog.objects.count() == 2
assert JobDetail.objects.count() == 2