本文整理汇总了Python中treeherder.etl.common.fetch_json函数的典型用法代码示例。如果您正苦于以下问题:Python fetch_json函数的具体用法?Python fetch_json怎么用?Python fetch_json使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了fetch_json函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: extract
def extract(self, url, revision):
logger.info("extracting missing resultsets: {0}".format(url))
try:
return fetch_json(url)
except requests.exceptions.HTTPError as e:
status_code = e.response.status_code
if status_code == 404:
# we will sometimes get here because builds4hr/pending/running have a
# job with a resultset that json-pushes doesn't know about. So far
# I have only found this to be the case when it uses a revision from
# the wrong repo. For example: mozilla-central, but l10n. The l10n
# is a separate repo, but buildbot shows it as the same. So we
# create this dummy resultset with ``active_status`` of ``onhold``.
#
# The effect of this is that we won't keep trying to re-fetch
# the bogus pushlog, but the jobs are (correctly) not shown in the
# UI, since they're bad data.
logger.warn(("no pushlog in json-pushes. generating a dummy"
" onhold placeholder: {0}").format(url))
# we want to make a "dummy" resultset that is "onhold",
# because json-pushes doesn't know about it.
# This is, in effect, what TBPL does.
# These won't show in the UI, because they only fetch "active"
# resultsets
return get_not_found_onhold_push(url, revision)
logger.warning("HTTPError %s fetching: %s", status_code, url)
raise
示例2: fetch_resultset
def fetch_resultset(self, url, repository, sha=None):
params = {"sha": sha} if sha else {}
params.update(self.CREDENTIALS)
logger.info("Fetching resultset details: {}".format(url))
try:
commits = self.get_cleaned_commits(fetch_json(url, params))
head_commit = commits[-1]
resultset = {
"revision": head_commit["sha"],
"push_timestamp": to_timestamp(
head_commit["commit"]["author"]["date"]),
"author": head_commit["commit"]["author"]["email"],
}
revisions = []
for commit in commits:
revisions.append({
"comment": commit["commit"]["message"],
"repository": repository,
"author": "{} <{}>".format(
commit["commit"]["author"]["name"],
commit["commit"]["author"]["email"]),
"revision": commit["sha"]
})
resultset["revisions"] = revisions
return resultset
except Exception as ex:
logger.exception("Error fetching commits", exc_info=ex)
newrelic.agent.record_exception(ex, params={
"url": url, "repository": repository, "sha": sha
})
示例3: _query_latest_gecko_decision_task_id
def _query_latest_gecko_decision_task_id(self, repo_name):
url = self.tc_index_url % repo_name
logger.info('Fetching {}'.format(url))
latest_task = fetch_json(url)
task_id = latest_task['taskId']
logger.info('For {} we found the task id: {}'.format(repo_name, task_id))
return task_id
示例4: fetch_push
def fetch_push(self, url, repository, sha=None):
newrelic.agent.add_custom_parameter("sha", sha)
logger.info("fetching for {} {}".format(repository, url))
# there will only ever be one, with this url
push = fetch_json(url)["pushes"].values()[0]
commits = []
# TODO: Remove this when bug 1257602 is addressed
rev_hash_components = []
# we only want to ingest the last 200 commits for each push,
# to protect against the 5000+ commit merges on release day uplift.
for commit in push['changesets'][-200:]:
commits.append({
"revision": commit["node"],
"author": commit["author"],
"comment": commit["desc"],
})
rev_hash_components.append(commit['node'])
rev_hash_components.append(commit['branch'])
return {
"revision": commits[-1]["revision"],
'revision_hash': generate_revision_hash(rev_hash_components),
"author": push["user"],
"push_timestamp": push["date"],
"revisions": commits,
}
示例5: fetch_push
def fetch_push(self, url, repository):
params = {}
params.update(self.CREDENTIALS)
logger.info("Fetching push details: %s", url)
commits = self.get_cleaned_commits(fetch_json(url, params))
head_commit = commits[-1]
push = {
"revision": head_commit["sha"],
"push_timestamp": to_timestamp(
head_commit["commit"]["author"]["date"]),
"author": head_commit["commit"]["author"]["email"],
}
revisions = []
for commit in commits:
revisions.append({
"comment": commit["commit"]["message"],
"author": u"{} <{}>".format(
commit["commit"]["author"]["name"],
commit["commit"]["author"]["email"]),
"revision": commit["sha"]
})
push["revisions"] = revisions
return push
示例6: get_bugs_for_search_term
def get_bugs_for_search_term(search, base_uri):
"""
Fetch the base_uri endpoint filtering on search and status.
Status must be either 'open' or 'closed'
"""
from treeherder.etl.common import fetch_json
params = {
'search': search
}
return fetch_json(base_uri, params=params)
示例7: run
def run(self, revision_filter=None, project_filter=None, job_group_filter=None):
""" Returns True if new completed jobs were loaded, False otherwise. """
builds_4hr = common.fetch_json(settings.BUILDAPI_BUILDS4H_URL)
job_collections, job_ids_seen = self.transform(builds_4hr,
revision_filter=revision_filter,
project_filter=project_filter,
job_group_filter=job_group_filter)
if job_collections:
th_publisher.post_treeherder_collections(job_collections,
chunk_size=settings.BUILDAPI_BUILDS4H_CHUNK_SIZE)
cache.set(CACHE_KEYS['complete'], job_ids_seen)
return bool(job_collections)
示例8: fetch_intermittent_bugs
def fetch_intermittent_bugs(offset, limit):
url = settings.BZ_API_URL + '/rest/bug'
params = {
'keywords': 'intermittent-failure',
'chfieldfrom': '-1y',
'include_fields': ('id,summary,status,resolution,op_sys,cf_crash_signature,'
'keywords,last_change_time, whiteboard'),
'offset': offset,
'limit': limit,
}
response = fetch_json(url, params=params)
return response.get('bugs', [])
示例9: run
def run(self, revision_filter=None, project_filter=None, job_group_filter=None):
""" Returns True if new completed jobs were loaded, False otherwise. """
builds_4hr = common.fetch_json(BUILDS4H_URL)
job_collections, job_ids_seen = self.transform(builds_4hr,
revision_filter=revision_filter,
project_filter=project_filter,
job_group_filter=job_group_filter)
if job_collections:
store_jobs(job_collections, chunk_size=500)
cache.set(CACHE_KEYS['complete'], job_ids_seen, FOUR_HOURS_IN_SECONDS)
return bool(job_collections)
示例10: run
def run(self, revision_filter=None, project_filter=None, job_group_filter=None):
""" Returns True if new running jobs were loaded, False otherwise. """
builds_running = common.fetch_json(settings.BUILDAPI_RUNNING_URL)
job_collections, job_ids_seen = self.transform(builds_running,
'running',
revision_filter=revision_filter,
project_filter=project_filter,
job_group_filter=job_group_filter)
if job_collections:
store_jobs(job_collections,
chunk_size=settings.BUILDAPI_RUNNING_CHUNK_SIZE)
cache.set(CACHE_KEYS['running'], job_ids_seen)
return bool(job_collections)
示例11: _taskcluster_runnable_jobs_gz
def _taskcluster_runnable_jobs_gz(tc_graph_url):
try:
# `force_gzip_encoding` works around Taskcluster not setting `Content-Encoding: gzip`:
# https://bugzilla.mozilla.org/show_bug.cgi?id=1423215
tc_graph = fetch_json(tc_graph_url, force_gzip_decompression=True)
except ValidationError:
logger.warning('Failed to validate %s', tc_graph_url)
return []
except requests.exceptions.HTTPError as e:
logger.info('HTTPError %s when getting taskgraph at %s',
e.response.status_code, tc_graph_url)
return []
return tc_graph
示例12: _taskcluster_runnable_jobs
def _taskcluster_runnable_jobs(project, decision_task_id):
ret = []
tc_graph = {}
if not decision_task_id:
decision_task_id = query_latest_gecko_decision_task_id(project)
# Some trees (e.g. comm-central) don't have a decision task, which means there are no taskcluster runnable jobs
if not decision_task_id:
return ret
tc_graph_url = settings.TASKCLUSTER_TASKGRAPH_URL.format(task_id=decision_task_id)
validate = URLValidator()
try:
validate(tc_graph_url)
tc_graph = fetch_json(tc_graph_url)
except ValidationError:
logger.warning('Failed to validate {}'.format(tc_graph_url))
return []
except requests.exceptions.HTTPError as e:
logger.info('HTTPError {} when getting taskgraph at {}'.format(
e.response.status_code, tc_graph_url))
return []
for label, node in tc_graph.iteritems():
if not ('extra' in node['task'] and 'treeherder' in node['task']['extra']):
# some tasks don't have the treeherder information we need
# to be able to display them (and are not intended to be
# displayed). skip.
continue
treeherder_options = node['task']['extra']['treeherder']
task_metadata = node['task']['metadata']
platform_option = ' '.join(treeherder_options.get('collection', {}).keys())
ret.append({
'build_platform': treeherder_options.get('machine', {}).get('platform', ''),
'build_system_type': 'taskcluster',
'job_group_name': treeherder_options.get('groupName', ''),
'job_group_symbol': treeherder_options.get('groupSymbol', ''),
'job_type_description': task_metadata['description'],
'job_type_name': task_metadata['name'],
'job_type_symbol': treeherder_options['symbol'],
'platform': treeherder_options.get('machine', {}).get('platform', ''),
'platform_option': platform_option,
'ref_data_name': label,
'state': 'runnable',
'result': 'runnable',
'job_coalesced_to_guid': None
})
return ret
示例13: query_latest_gecko_decision_task_id
def query_latest_gecko_decision_task_id(project):
url = TASKCLUSTER_INDEX_URL % project
logger.info('Fetching %s', url)
try:
latest_task = fetch_json(url)
task_id = latest_task['taskId']
logger.info('For %s we found the task id: %s', project, task_id)
except requests.exceptions.HTTPError as e:
# Specifically handle 404 errors, as it means there's no decision task on this push
if e.response.status_code == 404:
logger.info('For %s we did not find a task id', project)
task_id = None
else:
raise
return task_id
示例14: _taskcluster_runnable_jobs
def _taskcluster_runnable_jobs(project, decision_task_id):
ret = []
tc_graph = {}
if not decision_task_id:
decision_task_id = query_latest_gecko_decision_task_id(project)
# Some trees (e.g. comm-central) don't have a decision task, which means there are no taskcluster runnable jobs
if not decision_task_id:
return ret
tc_graph_url = RUNNABLE_JOBS_URL.format(task_id=decision_task_id)
validate = URLValidator()
try:
validate(tc_graph_url)
tc_graph = fetch_json(tc_graph_url)
except ValidationError:
logger.warning('Failed to validate %s', tc_graph_url)
return []
except requests.exceptions.HTTPError as e:
logger.info('HTTPError %s when getting uncompressed taskgraph at %s',
e.response.status_code, tc_graph_url)
# TODO: Remove this fallback once all .gz artifacts have expired
logger.info('Attempting to fall back to the compressed taskgraph...')
newrelic.agent.record_custom_event(
"runnable_jobs_fallback",
{
"message": "runnable-jobs.json artifact not found, falling back to gz version",
"project": project,
"url": tc_graph_url
}
)
tc_graph = _taskcluster_runnable_jobs_gz(tc_graph_url + ".gz")
for label, node in iteritems(tc_graph):
ret.append({
'build_platform': node.get('platform', ''),
'build_system_type': 'taskcluster',
'job_group_name': node.get('groupName', ''),
'job_group_symbol': node.get('groupSymbol', ''),
'job_type_name': label,
'job_type_symbol': node['symbol'],
'platform': node.get('platform'),
'platform_option': ' '.join(node.get('collection', {}).keys()),
'ref_data_name': label,
'state': 'runnable',
'result': 'runnable',
})
return ret
示例15: _taskcluster_runnable_jobs
def _taskcluster_runnable_jobs(project):
decision_task_id = query_latest_gecko_decision_task_id(project)
# Some trees (e.g. comm-central) don't have a decision task, which means there are no taskcluster runnable jobs
if not decision_task_id:
return []
for run_number in range(0, 5):
tc_graph_url = RUNNABLE_JOBS_URL.format(task_id=decision_task_id, run_number=run_number)
validate = URLValidator()
try:
validate(tc_graph_url)
except ValidationError:
logger.warning('Failed to validate %s', tc_graph_url)
return []
try:
tc_graph = fetch_json(tc_graph_url)
except requests.exceptions.HTTPError as e:
logger.info('HTTPError %s when getting taskgraph at %s',
e.response.status_code, tc_graph_url)
continue
return [
{
'build_platform': node.get('platform', ''),
'build_system_type': 'taskcluster',
'job_group_name': node.get('groupName', ''),
'job_group_symbol': node.get('groupSymbol', ''),
'job_type_name': label,
'job_type_symbol': node['symbol'],
'platform': node.get('platform'),
'platform_option': ' '.join(node.get('collection', {}).keys()),
'ref_data_name': label,
'state': 'runnable',
'result': 'runnable',
}
for label, node in tc_graph.items()
]
return []