本文整理汇总了Python中changes.models.JobPlan.get_build_step_for_job方法的典型用法代码示例。如果您正苦于以下问题:Python JobPlan.get_build_step_for_job方法的具体用法?Python JobPlan.get_build_step_for_job怎么用?Python JobPlan.get_build_step_for_job使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类changes.models.JobPlan
的用法示例。
在下文中一共展示了JobPlan.get_build_step_for_job方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: post
# 需要导入模块: from changes.models import JobPlan [as 别名]
# 或者: from changes.models.JobPlan import get_build_step_for_job [as 别名]
def post(self, build_id):
build = Build.query.options(
joinedload('project', innerjoin=True),
joinedload('author'),
joinedload('source').joinedload('revision'),
).get(build_id)
if build is None:
return '', 404
if build.status == Status.finished:
return '', 204
cancelled = []
# find any active/pending jobs
for job in filter(lambda x: x.status != Status.finished, build.jobs):
# TODO(dcramer): we make an assumption that there is a single step
_, implementation = JobPlan.get_build_step_for_job(job_id=job.id)
if not implementation:
continue
implementation.cancel(job=job)
cancelled.append(job)
if not cancelled:
return '', 204
build.status = Status.finished
build.result = Result.aborted
db.session.add(build)
return self.respond(build)
示例2: sync_artifact
# 需要导入模块: from changes.models import JobPlan [as 别名]
# 或者: from changes.models.JobPlan import get_build_step_for_job [as 别名]
def sync_artifact(artifact_id=None, **kwargs):
"""
Downloads an artifact from jenkins.
"""
artifact = Artifact.query.get(artifact_id)
if artifact is None:
return
step = artifact.step
if step.result == Result.aborted:
return
_, implementation = JobPlan.get_build_step_for_job(job_id=step.job_id)
# TODO(dcramer): we eventually want to abstract the entirety of Jenkins
# artifact syncing so that we pull files and then process them
if artifact.file:
try:
implementation.get_artifact_manager(step).process(artifact)
except UnrecoverableException:
current_app.logger.exception(
'Unrecoverable exception processing artifact %s: %s',
artifact.step_id, artifact)
else:
try:
implementation.fetch_artifact(artifact=artifact)
except UnrecoverableException:
current_app.logger.exception(
'Unrecoverable exception fetching artifact %s: %s',
artifact.step_id, artifact)
示例3: create_job
# 需要导入模块: from changes.models import JobPlan [as 别名]
# 或者: from changes.models.JobPlan import get_build_step_for_job [as 别名]
def create_job(job_id):
job = Job.query.get(job_id)
if not job:
return
# we might already be marked as finished for various reasons
# (such as aborting the task)
if job.status == Status.finished:
return
jobplan, implementation = JobPlan.get_build_step_for_job(job_id=job.id)
if implementation is None:
# TODO(dcramer): record a FailureReason?
job.status = Status.finished
job.result = Result.failed
current_app.logger.exception('No build plan set %s', job_id)
return
try:
implementation.execute(job=job)
except UnrecoverableException:
job.status = Status.finished
job.result = Result.aborted
current_app.logger.exception('Unrecoverable exception creating %s', job_id)
return
sync_job.delay(
job_id=job.id.hex,
task_id=job.id.hex,
parent_task_id=job.build_id.hex,
)
示例4: get
# 需要导入模块: from changes.models import JobPlan [as 别名]
# 或者: from changes.models.JobPlan import get_build_step_for_job [as 别名]
def get(self, step_id):
jobstep = JobStep.query.options(
joinedload('project', innerjoin=True),
).get(step_id)
if jobstep is None:
return '', 404
jobplan = JobPlan.query.filter(
JobPlan.job_id == jobstep.job_id,
).first()
# determine if there's an expected snapshot outcome
expected_image = SnapshotImage.query.filter(
SnapshotImage.job_id == jobstep.job_id,
).first()
current_image = None
# we only send a current snapshot if we're not expecting to build
# a new image
if not expected_image:
current_image = None
if jobplan:
current_image = jobplan.snapshot_image
if current_image is None and current_app.config['DEFAULT_SNAPSHOT']:
current_image = {
'id': current_app.config['DEFAULT_SNAPSHOT'],
}
context = self.serialize(jobstep)
context['commands'] = self.serialize(list(jobstep.commands))
context['snapshot'] = self.serialize(current_image)
context['expectedSnapshot'] = self.serialize(expected_image)
context['project'] = self.serialize(jobstep.project)
context['job'] = self.serialize(jobstep.job)
_, buildstep = JobPlan.get_build_step_for_job(jobstep.job_id)
resource_limits = buildstep.get_resource_limits() if buildstep else {}
if resource_limits:
context['resourceLimits'] = resource_limits
lxc_config = buildstep.get_lxc_config(jobstep) if buildstep else None
if lxc_config:
context["adapter"] = "lxc"
lxc_config = {
'preLaunch': lxc_config.prelaunch,
'postLaunch': lxc_config.postlaunch,
's3Bucket': lxc_config.s3_bucket,
'compression': lxc_config.compression,
'release': lxc_config.release,
}
context['lxcConfig'] = lxc_config
debugConfig = buildstep.debug_config if buildstep else {}
if 'debugForceInfraFailure' in jobstep.data:
debugConfig['forceInfraFailure'] = jobstep.data['debugForceInfraFailure']
if debugConfig:
context['debugConfig'] = self.serialize(debugConfig)
return self.respond(context, serialize=False)
示例5: process
# 需要导入模块: from changes.models import JobPlan [as 别名]
# 或者: from changes.models.JobPlan import get_build_step_for_job [as 别名]
def process(self, fp):
try:
phase_config = json.load(fp)
except ValueError:
uri = build_uri('/find_build/{0}/'.format(self.step.job.build_id.hex))
self.logger.warning('Failed to parse json; (step=%s, build=%s)', self.step.id.hex, uri, exc_info=True)
self._add_failure_reason()
else:
_, implementation = JobPlan.get_build_step_for_job(job_id=self.step.job_id)
try:
implementation.expand_jobs(self.step, phase_config)
except Exception:
uri = build_uri('/find_build/{0}/'.format(self.step.job.build_id.hex))
self.logger.warning('expand_jobs failed (step=%s, build=%s)', self.step.id.hex, uri, exc_info=True)
self._add_failure_reason()
示例6: process
# 需要导入模块: from changes.models import JobPlan [as 别名]
# 或者: from changes.models.JobPlan import get_build_step_for_job [as 别名]
def process(self, fp):
try:
phase_config = json.load(fp)
_, implementation = JobPlan.get_build_step_for_job(job_id=self.step.job_id)
implementation.expand_jobs(self.step, phase_config)
except Exception:
uri = build_uri('/find_build/{0}/'.format(self.step.job.build_id.hex))
self.logger.warning('Failed to parse json; (step=%s, build=%s)', self.step.id.hex, uri, exc_info=True)
try_create(FailureReason, {
'step_id': self.step.id,
'job_id': self.step.job_id,
'build_id': self.step.job.build_id,
'project_id': self.step.project_id,
'reason': 'malformed_artifact'
})
db.session.commit()
示例7: create_job
# 需要导入模块: from changes.models import JobPlan [as 别名]
# 或者: from changes.models.JobPlan import get_build_step_for_job [as 别名]
def create_job(job_id):
"""
Kicks off a newly created job within a build;
enqueued for each job within a new build.
"""
job = Job.query.get(job_id)
if not job:
return
if job.project.status == ProjectStatus.inactive:
current_app.logger.warn('Project is not active: %s', job.project.slug)
job.status = Status.finished
job.result = Result.aborted
db.session.add(job)
db.session.flush()
return
# we might already be marked as finished for various reasons
# (such as aborting the task)
if job.status == Status.finished:
return
_, implementation = JobPlan.get_build_step_for_job(job_id=job.id)
if implementation is None:
# TODO(dcramer): record a FailureReason?
job.status = Status.finished
job.result = Result.aborted
db.session.add(job)
db.session.flush()
current_app.logger.exception('No build plan set %s', job_id)
return
try:
implementation.execute(job=job)
except UnrecoverableException:
job.status = Status.finished
job.result = Result.infra_failed
db.session.add(job)
db.session.flush()
current_app.logger.exception('Unrecoverable exception creating %s', job_id)
return
sync_job.delay(
job_id=job.id.hex,
task_id=job.id.hex,
parent_task_id=job.build_id.hex,
)
示例8: get
# 需要导入模块: from changes.models import JobPlan [as 别名]
# 或者: from changes.models.JobPlan import get_build_step_for_job [as 别名]
def get(self, step_id):
jobstep = JobStep.query.options(
joinedload('project', innerjoin=True),
).get(step_id)
if jobstep is None:
return '', 404
jobplan = JobPlan.query.filter(
JobPlan.job_id == jobstep.job_id,
).first()
# determine if there's an expected snapshot outcome
expected_image = SnapshotImage.query.filter(
SnapshotImage.job_id == jobstep.job_id,
).first()
current_image = None
# we only send a current snapshot if we're not expecting to build
# a new image
if not expected_image:
current_image = None
if jobplan:
current_image = jobplan.snapshot_image
if current_image is None and current_app.config['DEFAULT_SNAPSHOT']:
current_image = {
'id': current_app.config['DEFAULT_SNAPSHOT'],
}
context = self.serialize(jobstep)
context['commands'] = self.serialize(list(jobstep.commands))
context['snapshot'] = self.serialize(current_image)
context['expectedSnapshot'] = self.serialize(expected_image)
context['project'] = self.serialize(jobstep.project)
context['job'] = self.serialize(jobstep.job)
_, buildstep = JobPlan.get_build_step_for_job(jobstep.job_id)
debugConfig = buildstep.debug_config if buildstep else {}
if 'debugForceInfraFailure' in jobstep.data:
debugConfig['forceInfraFailure'] = jobstep.data['debugForceInfraFailure']
if debugConfig:
context['debugConfig'] = self.serialize(debugConfig)
return self.respond(context, serialize=False)
示例9: expand_command
# 需要导入模块: from changes.models import JobPlan [as 别名]
# 或者: from changes.models.JobPlan import get_build_step_for_job [as 别名]
def expand_command(self, command, expander, data):
jobstep = command.jobstep
phase_name = data.get('phase')
if not phase_name:
phase_count = db.session.query(
func.count(),
).filter(
JobPhase.job_id == jobstep.job_id,
).scalar()
phase_name = 'Phase #{}'.format(phase_count)
new_jobphase = JobPhase(
job_id=jobstep.job_id,
project_id=jobstep.project_id,
label=phase_name,
status=Status.queued,
)
db.session.add(new_jobphase)
_, buildstep = JobPlan.get_build_step_for_job(jobstep.job_id)
results = []
for future_jobstep in expander.expand(max_executors=jobstep.data['max_executors'],
test_stats_from=buildstep.get_test_stats_from()):
new_jobstep = buildstep.create_expanded_jobstep(jobstep, new_jobphase, future_jobstep)
results.append(new_jobstep)
# If there are no tests to run, the phase is done.
if len(results) == 0:
new_jobphase.status = Status.finished
new_jobphase.result = Result.passed
db.session.add(new_jobphase)
db.session.flush()
for new_jobstep in results:
sync_job_step.delay_if_needed(
step_id=new_jobstep.id.hex,
task_id=new_jobstep.id.hex,
parent_task_id=new_jobphase.job.id.hex,
)
return results
示例10: post
# 需要导入模块: from changes.models import JobPlan [as 别名]
# 或者: from changes.models.JobPlan import get_build_step_for_job [as 别名]
def post(self):
try:
with redis.lock('jobstep:allocate', nowait=True):
to_allocate = self.find_next_jobstep()
# Should 204, but flask/werkzeug throws StopIteration (bug!) for tests
if to_allocate is None:
return self.respond([])
to_allocate.status = Status.allocated
db.session.add(to_allocate)
db.session.flush()
except redis.UnableToGetLock:
return error('Another allocation is in progress', http_code=503)
try:
jobplan, buildstep = JobPlan.get_build_step_for_job(to_allocate.job_id)
assert jobplan and buildstep
context = self.serialize(to_allocate)
context['project'] = self.serialize(to_allocate.project)
context['resources'] = {
'cpus': to_allocate.data.get('cpus', 4),
'mem': to_allocate.data.get('mem', 8 * 1024),
}
context['cmd'] = buildstep.get_allocation_command(to_allocate)
return self.respond([context])
except Exception:
to_allocate.status = Status.finished
to_allocate.result = Result.aborted
db.session.add(to_allocate)
db.session.flush()
logging.exception(
'Exception occurred while allocating job step for project %s',
to_allocate.project.slug)
return error('Internal error while attempting allocation',
http_code=503)
示例11: process
# 需要导入模块: from changes.models import JobPlan [as 别名]
# 或者: from changes.models.JobPlan import get_build_step_for_job [as 别名]
def process(self, fp):
try:
phase_config = json.load(fp)
except ValueError:
uri = build_uri('/find_build/{0}/'.format(self.step.job.build_id.hex))
self.logger.warning('Failed to parse json; (step=%s, build=%s)', self.step.id.hex, uri, exc_info=True)
self._add_failure_reason()
else:
_, implementation = JobPlan.get_build_step_for_job(job_id=self.step.job_id)
try:
implementation.expand_jobs(self.step, phase_config)
except ArtifactParseError:
uri = build_uri('/find_build/{0}/'.format(self.step.job.build_id.hex))
self.logger.warning('malformed %s artifact (step=%s, build=%s)', self.FILENAMES[0],
self.step.id.hex, uri, exc_info=True)
self._add_failure_reason()
except Exception:
uri = build_uri('/find_build/{0}/'.format(self.step.job.build_id.hex))
self.logger.warning('expand_jobs failed (step=%s, build=%s)', self.step.id.hex, uri, exc_info=True)
self.step.result = Result.infra_failed
db.session.add(self.step)
db.session.commit()
示例12: expand_command
# 需要导入模块: from changes.models import JobPlan [as 别名]
# 或者: from changes.models.JobPlan import get_build_step_for_job [as 别名]
def expand_command(self, command, expander, data):
jobstep = command.jobstep
phase_name = data.get('phase')
if not phase_name:
phase_count = db.session.query(
func.count(),
).filter(
JobPhase.job_id == jobstep.job_id,
).scalar()
phase_name = 'Phase #{}'.format(phase_count)
jobstep.data['expanded'] = True
db.session.add(jobstep)
new_jobphase = JobPhase(
job_id=jobstep.job_id,
project_id=jobstep.project_id,
label=phase_name,
status=Status.queued,
)
db.session.add(new_jobphase)
_, buildstep = JobPlan.get_build_step_for_job(jobstep.job_id)
results = []
for future_jobstep in expander.expand(max_executors=jobstep.data['max_executors']):
new_jobstep = buildstep.expand_jobstep(jobstep, new_jobphase, future_jobstep)
results.append(new_jobstep)
db.session.flush()
for new_jobstep in results:
sync_job_step.delay_if_needed(
step_id=new_jobstep.id.hex,
task_id=new_jobstep.id.hex,
parent_task_id=new_jobphase.job.id.hex,
)
return results
示例13: _sync_artifacts_for_jobstep
# 需要导入模块: from changes.models import JobPlan [as 别名]
# 或者: from changes.models.JobPlan import get_build_step_for_job [as 别名]
def _sync_artifacts_for_jobstep(step):
# only generate the sync_artifact tasks for this step once
if Task.query.filter(
Task.parent_id == step.id,
Task.task_name == 'sync_artifact',
).first():
return
_, buildstep = JobPlan.get_build_step_for_job(job_id=step.job_id)
prefer_artifactstore = buildstep.prefer_artifactstore()
artifacts = Artifact.query.filter(Artifact.step_id == step.id).all()
to_sync = _get_artifacts_to_sync(artifacts, prefer_artifactstore)
# buildstep may want to check for e.g. required artifacts
buildstep.verify_final_artifacts(step, to_sync)
for artifact in to_sync:
sync_artifact.delay_if_needed(
artifact_id=artifact.id.hex,
task_id=artifact.id.hex,
parent_task_id=step.id.hex,
)
示例14: sync_job_step
# 需要导入模块: from changes.models import JobPlan [as 别名]
# 或者: from changes.models.JobPlan import get_build_step_for_job [as 别名]
def sync_job_step(step_id):
"""
Polls a jenkins build for updates. May have sync_artifact children.
"""
step = JobStep.query.get(step_id)
if not step:
return
jobplan, implementation = JobPlan.get_build_step_for_job(job_id=step.job_id)
# only synchronize if upstream hasn't suggested we're finished
if step.status != Status.finished:
implementation.update_step(step=step)
db.session.flush()
_sync_from_artifact_store(step)
if step.status == Status.finished:
_sync_artifacts_for_jobstep(step)
is_finished = (step.status == Status.finished and
# make sure all child tasks (like sync_artifact) have also finished
sync_job_step.verify_all_children() == Status.finished)
if not is_finished:
default_timeout = current_app.config['DEFAULT_JOB_TIMEOUT_MIN']
if has_timed_out(step, jobplan, default_timeout=default_timeout):
old_status = step.status
step.data['timed_out'] = True
implementation.cancel_step(step=step)
# Not all implementations can actually cancel, but it's dead to us as of now
# so we mark it as finished.
step.status = Status.finished
step.date_finished = datetime.utcnow()
# Implementations default to marking canceled steps as aborted,
# but we're not canceling on good terms (it should be done by now)
# so we consider it a failure here.
#
# We check whether the step was marked as in_progress to make a best
# guess as to whether this is an infrastructure failure, or the
# repository under test is just taking too long. This won't be 100%
# reliable, but is probably good enough.
if old_status == Status.in_progress:
step.result = Result.failed
else:
step.result = Result.infra_failed
db.session.add(step)
job = step.job
try_create(FailureReason, {
'step_id': step.id,
'job_id': job.id,
'build_id': job.build_id,
'project_id': job.project_id,
'reason': 'timeout'
})
db.session.flush()
statsreporter.stats().incr('job_step_timed_out')
# If we timeout something that isn't in progress, that's our fault, and we should know.
if old_status != Status.in_progress:
current_app.logger.warning(
"Timed out jobstep that wasn't in progress: %s (was %s)", step.id, old_status)
raise sync_job_step.NotFinished
# Ignore any 'failures' if the build did not finish properly.
# NOTE(josiah): we might want to include "unknown" and "skipped" here as
# well, or have some named condition like "not meaningful_result(step.result)".
if step.result in (Result.aborted, Result.infra_failed):
_report_jobstep_result(step)
return
# Check for FailureReason objects generated by child jobs
failure_result = _result_from_failure_reasons(step)
if failure_result and failure_result != step.result:
step.result = failure_result
db.session.add(step)
db.session.commit()
if failure_result == Result.infra_failed:
_report_jobstep_result(step)
return
try:
record_coverage_stats(step)
except Exception:
current_app.logger.exception('Failing recording coverage stats for step %s', step.id)
# We need the start time of this step's phase to determine if we're part of
# the last phase. So, if date_started is empty, wait for sync_phase to catch
# up and try again.
if _expects_tests(jobplan) and not step.phase.date_started:
current_app.logger.warning(
"Phase[%s].date_started is missing. Retrying Step", step.phase.id)
# Reset result to unknown to reduce window where test might be incorrectly green.
# Set status to in_progress so that the next sync_job_step will fetch status from Jenkins again.
#.........这里部分代码省略.........
示例15: post
# 需要导入模块: from changes.models import JobPlan [as 别名]
# 或者: from changes.models.JobPlan import get_build_step_for_job [as 别名]
def post(self):
args = json.loads(request.data)
try:
resources = args['resources']
except KeyError:
return error('Missing resources attribute')
total_cpus = int(resources['cpus'])
total_mem = int(resources['mem']) # MB
with statsreporter.stats().timer('jobstep_allocate'):
try:
with redis.lock('jobstep:allocate', nowait=True):
available_allocations = self.find_next_jobsteps(limit=10)
to_allocate = []
for jobstep in available_allocations:
req_cpus = jobstep.data.get('cpus', 4)
req_mem = jobstep.data.get('mem', 8 * 1024)
if total_cpus >= req_cpus and total_mem >= req_mem:
total_cpus -= req_cpus
total_mem -= req_mem
jobstep.status = Status.allocated
db.session.add(jobstep)
to_allocate.append(jobstep)
else:
logging.info('Not allocating %s due to lack of offered resources', jobstep.id.hex)
if not to_allocate:
# Should 204, but flask/werkzeug throws StopIteration (bug!) for tests
return self.respond([])
db.session.flush()
except UnableToGetLock:
return error('Another allocation is in progress', http_code=503)
context = []
for jobstep, jobstep_data in zip(to_allocate, self.serialize(to_allocate)):
try:
jobplan, buildstep = JobPlan.get_build_step_for_job(jobstep.job_id)
assert jobplan and buildstep
jobstep_data['project'] = self.serialize(jobstep.project)
jobstep_data['resources'] = {
'cpus': jobstep.data.get('cpus', 4),
'mem': jobstep.data.get('mem', 8 * 1024),
}
jobstep_data['cmd'] = buildstep.get_allocation_command(jobstep)
except Exception:
jobstep.status = Status.finished
jobstep.result = Result.infra_failed
db.session.add(jobstep)
db.session.flush()
logging.exception(
'Exception occurred while allocating job step %s for project %s',
jobstep.id.hex, jobstep.project.slug)
else:
context.append(jobstep_data)
return self.respond(context)