本文整理汇总了Python中treeherder.etl.push.store_push_data函数的典型用法代码示例。如果您正苦于以下问题:Python store_push_data函数的具体用法?Python store_push_data怎么用?Python store_push_data使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了store_push_data函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_ingest_running_to_retry_to_success_sample_job
def test_ingest_running_to_retry_to_success_sample_job(test_repository,
failure_classifications,
sample_data,
sample_push,
mock_log_parser,
ingestion_cycles):
# verifies that retries to success work, no matter how jobs are batched
store_push_data(test_repository, sample_push)
job_datum = copy.deepcopy(sample_data.job_data[0])
job_datum['revision'] = sample_push[0]['revision']
job = job_datum['job']
job_guid_root = job['job_guid']
job_data = []
for (state, result, job_guid) in [
('running', 'unknown', job_guid_root),
('completed', 'retry',
job_guid_root + "_" + str(job['end_timestamp'])[-5:]),
('completed', 'success', job_guid_root)]:
new_job_datum = copy.deepcopy(job_datum)
new_job_datum['job']['state'] = state
new_job_datum['job']['result'] = result
new_job_datum['job']['job_guid'] = job_guid
job_data.append(new_job_datum)
for (i, j) in ingestion_cycles:
store_job_data(test_repository, job_data[i:j])
assert Job.objects.count() == 2
assert Job.objects.get(id=1).result == 'retry'
assert Job.objects.get(id=2).result == 'success'
assert JobLog.objects.count() == 2
示例2: test_create_error_summary
def test_create_error_summary(failure_classifications,
jobs_with_local_log, sample_push,
test_repository):
"""
check that a bug suggestions artifact gets inserted when running
a parse_log task for a failed job, and that the number of
bug search terms/suggestions matches the number of error lines.
"""
store_push_data(test_repository, sample_push)
jobs = jobs_with_local_log
for job in jobs:
job['job']['result'] = "testfailed"
job['revision'] = sample_push[0]['revision']
store_job_data(test_repository, jobs)
bug_suggestions = get_error_summary(Job.objects.get(id=1))
# we must have one bugs item per error in bug_suggestions.
# errors with no bug suggestions will just have an empty
# bugs list
assert TextLogError.objects.count() == len(bug_suggestions)
# We really need to add some tests that check the values of each entry
# in bug_suggestions, but for now this is better than nothing.
expected_keys = set(["search", "search_terms", "bugs"])
for failure_line in bug_suggestions:
assert set(failure_line.keys()) == expected_keys
示例3: process
def process(self, message_body, exchange):
transformer = self.get_transformer_class(exchange)(message_body)
try:
newrelic.agent.add_custom_parameter("url", transformer.repo_url)
newrelic.agent.add_custom_parameter("branch", transformer.branch)
repos = Repository.objects
if transformer.branch:
repos = repos.filter(branch__regex="(^|,)%s($|,)" % transformer.branch)
else:
repos = repos.filter(branch=None)
repo = repos.get(url=transformer.repo_url, active_status="active")
newrelic.agent.add_custom_parameter("repository", repo.name)
except ObjectDoesNotExist:
repo_info = transformer.get_info()
repo_info.update({
"url": transformer.repo_url,
"branch": transformer.branch,
})
newrelic.agent.record_custom_event("skip_unknown_repository",
repo_info)
logger.warning("Skipping unsupported repo: %s %s",
transformer.repo_url,
transformer.branch)
return
transformed_data = transformer.transform(repo.name)
logger.info("Storing push for %s %s %s",
repo.name,
transformer.repo_url,
transformer.branch)
store_push_data(repo, [transformed_data])
示例4: push_with_three_jobs
def push_with_three_jobs(sample_data, sample_push, test_repository):
"""
Stores a number of jobs in the same push.
"""
num_jobs = 3
push = sample_push[0]
jobs = copy.deepcopy(sample_data.job_data[0:num_jobs])
# Only store data for the first push....
store_push_data(test_repository, [push])
blobs = []
for index, blob in enumerate(jobs):
# Modify job structure to sync with the push sample data
if 'sources' in blob:
del blob['sources']
# Skip log references since they do not work correctly in pending state.
if 'log_references' in blob['job']:
del blob['job']['log_references']
blob['revision'] = push['revision']
blob['job']['state'] = 'pending'
blobs.append(blob)
# Store and process the jobs so they are present in the tables.
store_job_data(test_repository, blobs)
return Push.objects.get(repository=test_repository,
revision=push['revision'])
示例5: process
def process(self, message_body, exchange):
transformer = self.get_transformer_class(exchange)(message_body)
try:
newrelic.agent.add_custom_parameter("url", transformer.repo_url)
newrelic.agent.add_custom_parameter("branch", transformer.branch)
repo = Repository.objects.get(url=transformer.repo_url,
branch=transformer.branch,
active_status="active")
newrelic.agent.add_custom_parameter("repository", repo.name)
except ObjectDoesNotExist:
repo_info = transformer.get_info()
repo_info.update({
"url": transformer.repo_url,
"branch": transformer.branch,
})
newrelic.agent.record_custom_event("skip_unknown_repository",
repo_info)
logger.warn("Skipping unsupported repo: {} {}".format(
transformer.repo_url,
transformer.branch))
return
transformed_data = transformer.transform(repo.name)
logger.info("Storing push for {} {} {}".format(
repo.name,
transformer.repo_url,
transformer.branch))
store_push_data(repo, [transformed_data])
示例6: eleven_job_blobs
def eleven_job_blobs(sample_data, sample_push, test_repository, mock_log_parser):
store_push_data(test_repository, sample_push)
num_jobs = 11
jobs = sample_data.job_data[0:num_jobs]
max_index = len(sample_push) - 1
push_index = 0
blobs = []
for blob in jobs:
if push_index > max_index:
push_index = 0
# Modify job structure to sync with the push sample data
if 'sources' in blob:
del blob['sources']
blob['revision'] = sample_push[push_index]['revision']
blobs.append(blob)
push_index += 1
return blobs
示例7: test_push_list_without_jobs
def test_push_list_without_jobs(client,
test_repository,
sample_push):
"""
test retrieving a push list without jobs
"""
store_push_data(test_repository, sample_push)
resp = client.get(
reverse("push-list", kwargs={"project": test_repository.name})
)
assert resp.status_code == 200
# The .json() method of the Django test client doesn't handle unicode properly on
# Python 2, so we have to deserialize ourselves. TODO: Clean up once on Python 3.
data = json.loads(resp.content)
results = data['results']
assert len(results) == 10
assert all([('platforms' not in result) for result in results])
meta = data['meta']
assert meta == {
u'count': len(results),
u'filter_params': {},
u'repository': test_repository.name
}
示例8: test_push_list_single_long_revision_stored_long
def test_push_list_single_long_revision_stored_long(webapp, sample_push, test_repository):
"""
test retrieving a push list with store long revision, filtered by a single long revision
"""
long_revision = "21fb3eed1b5f3456789012345678901234567890"
# store a push with long revision
push = copy.deepcopy(sample_push[0])
push["revisions"][0]["revision"] = long_revision
store_push_data(test_repository, [push])
resp = webapp.get(
reverse("push-list", kwargs={"project": test_repository.name}),
{"revision": long_revision}
)
assert resp.status_int == 200
results = resp.json['results']
meta = resp.json['meta']
assert len(results) == 1
assert set([ph["revision"] for ph in results]) == {sample_push[0]['revision']}
assert(meta == {
'count': 1,
'revision': long_revision,
'filter_params': {
'revisions_long_revision': long_revision
},
'repository': test_repository.name}
)
示例9: test_ingest_buildbot_tier1_job
def test_ingest_buildbot_tier1_job(test_repository, sample_data, sample_push,
failure_classifications, mock_log_parser):
"""Tier is set to 1 if no lower_tier_signatures is used (ie: TaskCluster)"""
job_data = sample_data.job_data[:1]
store_push_data(test_repository, sample_push)
store_job_data(test_repository, job_data)
job = Job.objects.all().first()
assert job.tier == 1
示例10: test_ingest_job_default_tier
def test_ingest_job_default_tier(test_repository, sample_data, sample_push,
failure_classifications, mock_log_parser):
"""Tier is set to 1 by default"""
job_data = sample_data.job_data[:1]
store_push_data(test_repository, sample_push)
store_job_data(test_repository, job_data)
job = Job.objects.all().first()
assert job.tier == 1
示例11: test_bad_date_value_ingestion
def test_bad_date_value_ingestion(test_repository, failure_classifications,
sample_push, mock_log_parser):
"""
Test ingesting a job blob with bad date value
"""
blob = job_data(start_timestamp="foo",
revision=sample_push[0]['revision'])
store_push_data(test_repository, sample_push[:1])
store_job_data(test_repository, [blob])
示例12: test_push_list_empty_push_still_show
def test_push_list_empty_push_still_show(webapp, sample_push, test_repository):
"""
test retrieving a push list, when the push has no jobs.
should show.
"""
store_push_data(test_repository, sample_push)
resp = webapp.get(
reverse("push-list", kwargs={"project": test_repository.name}),
)
assert resp.status_int == 200
assert len(resp.json['results']) == 10
示例13: test_push_list_empty_push_still_show
def test_push_list_empty_push_still_show(client, sample_push, test_repository):
"""
test retrieving a push list, when the push has no jobs.
should show.
"""
store_push_data(test_repository, sample_push)
resp = client.get(
reverse("push-list", kwargs={"project": test_repository.name}),
)
assert resp.status_code == 200
# The .json() method of the Django test client doesn't handle unicode properly on
# Python 2, so we have to deserialize ourselves. TODO: Clean up once on Python 3.
data = json.loads(resp.content)
assert len(data['results']) == 10
示例14: test_parse_log
def test_parse_log(test_repository, failure_classifications, jobs_with_local_log, sample_push):
"""
check that 2 job_artifacts get inserted when running a parse_log task for
a successful job and that JobDetail objects get created
"""
store_push_data(test_repository, sample_push)
jobs = jobs_with_local_log
for job in jobs:
# make this a successful job, to check it's still parsed for errors
job['job']['result'] = "success"
job['revision'] = sample_push[0]['revision']
store_job_data(test_repository, jobs)
# this log generates 4 job detail objects at present
print(JobDetail.objects.count() == 4)
示例15: test_ingest_running_to_retry_sample_job
def test_ingest_running_to_retry_sample_job(test_repository,
failure_classifications,
sample_data,
sample_push,
mock_log_parser,
same_ingestion_cycle):
"""Process a single job structure in the job_data.txt file"""
store_push_data(test_repository, sample_push)
job_data = copy.deepcopy(sample_data.job_data[:1])
job = job_data[0]['job']
job_data[0]['revision'] = sample_push[0]['revision']
job['state'] = 'running'
job['result'] = 'unknown'
def _simulate_retry_job(job):
job['state'] = 'completed'
job['result'] = 'retry'
# convert the job_guid to what it would be on a retry
job['job_guid'] = job['job_guid'] + "_" + str(job['end_timestamp'])[-5:]
return job
if same_ingestion_cycle:
# now we simulate the complete version of the job coming in (on the
# same push)
new_job_datum = copy.deepcopy(job_data[0])
new_job_datum['job'] = _simulate_retry_job(new_job_datum['job'])
job_data.append(new_job_datum)
store_job_data(test_repository, job_data)
else:
# store the job in the initial state
store_job_data(test_repository, job_data)
# now we simulate the complete version of the job coming in and
# ingest a second time
job = _simulate_retry_job(job)
store_job_data(test_repository, job_data)
assert Job.objects.count() == 1
job = Job.objects.get(id=1)
assert job.result == 'retry'
# guid should be the retry one
assert job.guid == job_data[-1]['job']['job_guid']