本文整理汇总了Python中changes.models.JobPlan类的典型用法代码示例。如果您正苦于以下问题:Python JobPlan类的具体用法?Python JobPlan怎么用?Python JobPlan使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了JobPlan类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: post
def post(self, build_id):
build = Build.query.options(
joinedload('project', innerjoin=True),
joinedload('author'),
joinedload('source').joinedload('revision'),
).get(build_id)
if build is None:
return '', 404
if build.status == Status.finished:
return '', 204
cancelled = []
# find any active/pending jobs
for job in filter(lambda x: x.status != Status.finished, build.jobs):
# TODO(dcramer): we make an assumption that there is a single step
_, implementation = JobPlan.get_build_step_for_job(job_id=job.id)
if not implementation:
continue
implementation.cancel(job=job)
cancelled.append(job)
if not cancelled:
return '', 204
build.status = Status.finished
build.result = Result.aborted
db.session.add(build)
return self.respond(build)
示例2: sync_artifact
def sync_artifact(artifact_id=None, **kwargs):
"""
Downloads an artifact from jenkins.
"""
artifact = Artifact.query.get(artifact_id)
if artifact is None:
return
step = artifact.step
if step.result == Result.aborted:
return
_, implementation = JobPlan.get_build_step_for_job(job_id=step.job_id)
# TODO(dcramer): we eventually want to abstract the entirety of Jenkins
# artifact syncing so that we pull files and then process them
if artifact.file:
try:
implementation.get_artifact_manager(step).process(artifact)
except UnrecoverableException:
current_app.logger.exception(
'Unrecoverable exception processing artifact %s: %s',
artifact.step_id, artifact)
else:
try:
implementation.fetch_artifact(artifact=artifact)
except UnrecoverableException:
current_app.logger.exception(
'Unrecoverable exception fetching artifact %s: %s',
artifact.step_id, artifact)
示例3: create_job
def create_job(job_id):
job = Job.query.get(job_id)
if not job:
return
# we might already be marked as finished for various reasons
# (such as aborting the task)
if job.status == Status.finished:
return
jobplan, implementation = JobPlan.get_build_step_for_job(job_id=job.id)
if implementation is None:
# TODO(dcramer): record a FailureReason?
job.status = Status.finished
job.result = Result.failed
current_app.logger.exception('No build plan set %s', job_id)
return
try:
implementation.execute(job=job)
except UnrecoverableException:
job.status = Status.finished
job.result = Result.aborted
current_app.logger.exception('Unrecoverable exception creating %s', job_id)
return
sync_job.delay(
job_id=job.id.hex,
task_id=job.id.hex,
parent_task_id=job.build_id.hex,
)
示例4: get
def get(self, step_id):
jobstep = JobStep.query.options(
joinedload('project', innerjoin=True),
).get(step_id)
if jobstep is None:
return '', 404
jobplan = JobPlan.query.filter(
JobPlan.job_id == jobstep.job_id,
).first()
# determine if there's an expected snapshot outcome
expected_image = SnapshotImage.query.filter(
SnapshotImage.job_id == jobstep.job_id,
).first()
current_image = None
# we only send a current snapshot if we're not expecting to build
# a new image
if not expected_image:
current_image = None
if jobplan:
current_image = jobplan.snapshot_image
if current_image is None and current_app.config['DEFAULT_SNAPSHOT']:
current_image = {
'id': current_app.config['DEFAULT_SNAPSHOT'],
}
context = self.serialize(jobstep)
context['commands'] = self.serialize(list(jobstep.commands))
context['snapshot'] = self.serialize(current_image)
context['expectedSnapshot'] = self.serialize(expected_image)
context['project'] = self.serialize(jobstep.project)
context['job'] = self.serialize(jobstep.job)
_, buildstep = JobPlan.get_build_step_for_job(jobstep.job_id)
resource_limits = buildstep.get_resource_limits() if buildstep else {}
if resource_limits:
context['resourceLimits'] = resource_limits
lxc_config = buildstep.get_lxc_config(jobstep) if buildstep else None
if lxc_config:
context["adapter"] = "lxc"
lxc_config = {
'preLaunch': lxc_config.prelaunch,
'postLaunch': lxc_config.postlaunch,
's3Bucket': lxc_config.s3_bucket,
'compression': lxc_config.compression,
'release': lxc_config.release,
}
context['lxcConfig'] = lxc_config
debugConfig = buildstep.debug_config if buildstep else {}
if 'debugForceInfraFailure' in jobstep.data:
debugConfig['forceInfraFailure'] = jobstep.data['debugForceInfraFailure']
if debugConfig:
context['debugConfig'] = self.serialize(debugConfig)
return self.respond(context, serialize=False)
示例5: execute_build
def execute_build(build, snapshot_id, no_snapshot):
if no_snapshot:
assert snapshot_id is None, 'Cannot specify snapshot with no_snapshot option'
# TODO(dcramer): most of this should be abstracted into sync_build as if it
# were a "im on step 0, create step 1"
project = build.project
# We choose a snapshot before creating jobplans. This is so that different
# jobplans won't end up using different snapshots in a build.
if snapshot_id is None and not no_snapshot:
snapshot = Snapshot.get_current(project.id)
if snapshot:
snapshot_id = snapshot.id
jobs = []
for plan in get_build_plans(project):
job = Job(
build=build,
build_id=build.id,
project=project,
project_id=project.id,
source=build.source,
source_id=build.source_id,
status=build.status,
label=plan.label,
)
db.session.add(job)
jobplan = JobPlan.build_jobplan(plan, job, snapshot_id=snapshot_id)
db.session.add(jobplan)
jobs.append(job)
db.session.commit()
for job in jobs:
create_job.delay(
job_id=job.id.hex,
task_id=job.id.hex,
parent_task_id=job.build_id.hex,
)
db.session.commit()
sync_build.delay(
build_id=build.id.hex,
task_id=build.id.hex,
)
return build
示例6: process
def process(self, fp):
try:
phase_config = json.load(fp)
except ValueError:
uri = build_uri('/find_build/{0}/'.format(self.step.job.build_id.hex))
self.logger.warning('Failed to parse json; (step=%s, build=%s)', self.step.id.hex, uri, exc_info=True)
self._add_failure_reason()
else:
_, implementation = JobPlan.get_build_step_for_job(job_id=self.step.job_id)
try:
implementation.expand_jobs(self.step, phase_config)
except Exception:
uri = build_uri('/find_build/{0}/'.format(self.step.job.build_id.hex))
self.logger.warning('expand_jobs failed (step=%s, build=%s)', self.step.id.hex, uri, exc_info=True)
self._add_failure_reason()
示例7: process
def process(self, fp):
try:
phase_config = json.load(fp)
_, implementation = JobPlan.get_build_step_for_job(job_id=self.step.job_id)
implementation.expand_jobs(self.step, phase_config)
except Exception:
uri = build_uri('/find_build/{0}/'.format(self.step.job.build_id.hex))
self.logger.warning('Failed to parse json; (step=%s, build=%s)', self.step.id.hex, uri, exc_info=True)
try_create(FailureReason, {
'step_id': self.step.id,
'job_id': self.step.job_id,
'build_id': self.step.job.build_id,
'project_id': self.step.project_id,
'reason': 'malformed_artifact'
})
db.session.commit()
示例8: create_job
def create_job(job_id):
"""
Kicks off a newly created job within a build;
enqueued for each job within a new build.
"""
job = Job.query.get(job_id)
if not job:
return
if job.project.status == ProjectStatus.inactive:
current_app.logger.warn('Project is not active: %s', job.project.slug)
job.status = Status.finished
job.result = Result.aborted
db.session.add(job)
db.session.flush()
return
# we might already be marked as finished for various reasons
# (such as aborting the task)
if job.status == Status.finished:
return
_, implementation = JobPlan.get_build_step_for_job(job_id=job.id)
if implementation is None:
# TODO(dcramer): record a FailureReason?
job.status = Status.finished
job.result = Result.aborted
db.session.add(job)
db.session.flush()
current_app.logger.exception('No build plan set %s', job_id)
return
try:
implementation.execute(job=job)
except UnrecoverableException:
job.status = Status.finished
job.result = Result.infra_failed
db.session.add(job)
db.session.flush()
current_app.logger.exception('Unrecoverable exception creating %s', job_id)
return
sync_job.delay(
job_id=job.id.hex,
task_id=job.id.hex,
parent_task_id=job.build_id.hex,
)
示例9: execute_build
def execute_build(build):
# TODO(dcramer): most of this should be abstracted into sync_build as if it
# were a "im on step 0, create step 1"
project = build.project
jobs = []
for plan in get_build_plans(project):
job = Job(
build=build,
build_id=build.id,
project=project,
project_id=project.id,
source=build.source,
source_id=build.source_id,
status=build.status,
label=plan.label,
)
db.session.add(job)
jobplan = JobPlan.build_jobplan(plan, job)
db.session.add(jobplan)
jobs.append(job)
db.session.commit()
for job in jobs:
create_job.delay(
job_id=job.id.hex,
task_id=job.id.hex,
parent_task_id=job.build_id.hex,
)
db.session.commit()
sync_build.delay(
build_id=build.id.hex,
task_id=build.id.hex,
)
return build
示例10: expand_command
def expand_command(self, command, expander, data):
jobstep = command.jobstep
phase_name = data.get('phase')
if not phase_name:
phase_count = db.session.query(
func.count(),
).filter(
JobPhase.job_id == jobstep.job_id,
).scalar()
phase_name = 'Phase #{}'.format(phase_count)
new_jobphase = JobPhase(
job_id=jobstep.job_id,
project_id=jobstep.project_id,
label=phase_name,
status=Status.queued,
)
db.session.add(new_jobphase)
_, buildstep = JobPlan.get_build_step_for_job(jobstep.job_id)
results = []
for future_jobstep in expander.expand(max_executors=jobstep.data['max_executors'],
test_stats_from=buildstep.get_test_stats_from()):
new_jobstep = buildstep.create_expanded_jobstep(jobstep, new_jobphase, future_jobstep)
results.append(new_jobstep)
# If there are no tests to run, the phase is done.
if len(results) == 0:
new_jobphase.status = Status.finished
new_jobphase.result = Result.passed
db.session.add(new_jobphase)
db.session.flush()
for new_jobstep in results:
sync_job_step.delay_if_needed(
step_id=new_jobstep.id.hex,
task_id=new_jobstep.id.hex,
parent_task_id=new_jobphase.job.id.hex,
)
return results
示例11: get
def get(self, step_id):
jobstep = JobStep.query.options(
joinedload('project', innerjoin=True),
).get(step_id)
if jobstep is None:
return '', 404
jobplan = JobPlan.query.filter(
JobPlan.job_id == jobstep.job_id,
).first()
# determine if there's an expected snapshot outcome
expected_image = SnapshotImage.query.filter(
SnapshotImage.job_id == jobstep.job_id,
).first()
current_image = None
# we only send a current snapshot if we're not expecting to build
# a new image
if not expected_image:
current_image = None
if jobplan:
current_image = jobplan.snapshot_image
if current_image is None and current_app.config['DEFAULT_SNAPSHOT']:
current_image = {
'id': current_app.config['DEFAULT_SNAPSHOT'],
}
context = self.serialize(jobstep)
context['commands'] = self.serialize(list(jobstep.commands))
context['snapshot'] = self.serialize(current_image)
context['expectedSnapshot'] = self.serialize(expected_image)
context['project'] = self.serialize(jobstep.project)
context['job'] = self.serialize(jobstep.job)
_, buildstep = JobPlan.get_build_step_for_job(jobstep.job_id)
debugConfig = buildstep.debug_config if buildstep else {}
if 'debugForceInfraFailure' in jobstep.data:
debugConfig['forceInfraFailure'] = jobstep.data['debugForceInfraFailure']
if debugConfig:
context['debugConfig'] = self.serialize(debugConfig)
return self.respond(context, serialize=False)
示例12: post
def post(self):
try:
with redis.lock('jobstep:allocate', nowait=True):
to_allocate = self.find_next_jobstep()
# Should 204, but flask/werkzeug throws StopIteration (bug!) for tests
if to_allocate is None:
return self.respond([])
to_allocate.status = Status.allocated
db.session.add(to_allocate)
db.session.flush()
except redis.UnableToGetLock:
return error('Another allocation is in progress', http_code=503)
try:
jobplan, buildstep = JobPlan.get_build_step_for_job(to_allocate.job_id)
assert jobplan and buildstep
context = self.serialize(to_allocate)
context['project'] = self.serialize(to_allocate.project)
context['resources'] = {
'cpus': to_allocate.data.get('cpus', 4),
'mem': to_allocate.data.get('mem', 8 * 1024),
}
context['cmd'] = buildstep.get_allocation_command(to_allocate)
return self.respond([context])
except Exception:
to_allocate.status = Status.finished
to_allocate.result = Result.aborted
db.session.add(to_allocate)
db.session.flush()
logging.exception(
'Exception occurred while allocating job step for project %s',
to_allocate.project.slug)
return error('Internal error while attempting allocation',
http_code=503)
示例13: process
def process(self, fp):
try:
phase_config = json.load(fp)
except ValueError:
uri = build_uri('/find_build/{0}/'.format(self.step.job.build_id.hex))
self.logger.warning('Failed to parse json; (step=%s, build=%s)', self.step.id.hex, uri, exc_info=True)
self._add_failure_reason()
else:
_, implementation = JobPlan.get_build_step_for_job(job_id=self.step.job_id)
try:
implementation.expand_jobs(self.step, phase_config)
except ArtifactParseError:
uri = build_uri('/find_build/{0}/'.format(self.step.job.build_id.hex))
self.logger.warning('malformed %s artifact (step=%s, build=%s)', self.FILENAMES[0],
self.step.id.hex, uri, exc_info=True)
self._add_failure_reason()
except Exception:
uri = build_uri('/find_build/{0}/'.format(self.step.job.build_id.hex))
self.logger.warning('expand_jobs failed (step=%s, build=%s)', self.step.id.hex, uri, exc_info=True)
self.step.result = Result.infra_failed
db.session.add(self.step)
db.session.commit()
示例14: expand_command
def expand_command(self, command, expander, data):
jobstep = command.jobstep
phase_name = data.get('phase')
if not phase_name:
phase_count = db.session.query(
func.count(),
).filter(
JobPhase.job_id == jobstep.job_id,
).scalar()
phase_name = 'Phase #{}'.format(phase_count)
jobstep.data['expanded'] = True
db.session.add(jobstep)
new_jobphase = JobPhase(
job_id=jobstep.job_id,
project_id=jobstep.project_id,
label=phase_name,
status=Status.queued,
)
db.session.add(new_jobphase)
_, buildstep = JobPlan.get_build_step_for_job(jobstep.job_id)
results = []
for future_jobstep in expander.expand(max_executors=jobstep.data['max_executors']):
new_jobstep = buildstep.expand_jobstep(jobstep, new_jobphase, future_jobstep)
results.append(new_jobstep)
db.session.flush()
for new_jobstep in results:
sync_job_step.delay_if_needed(
step_id=new_jobstep.id.hex,
task_id=new_jobstep.id.hex,
parent_task_id=new_jobphase.job.id.hex,
)
return results
示例15: _sync_artifacts_for_jobstep
def _sync_artifacts_for_jobstep(step):
# only generate the sync_artifact tasks for this step once
if Task.query.filter(
Task.parent_id == step.id,
Task.task_name == 'sync_artifact',
).first():
return
_, buildstep = JobPlan.get_build_step_for_job(job_id=step.job_id)
prefer_artifactstore = buildstep.prefer_artifactstore()
artifacts = Artifact.query.filter(Artifact.step_id == step.id).all()
to_sync = _get_artifacts_to_sync(artifacts, prefer_artifactstore)
# buildstep may want to check for e.g. required artifacts
buildstep.verify_final_artifacts(step, to_sync)
for artifact in to_sync:
sync_artifact.delay_if_needed(
artifact_id=artifact.id.hex,
task_id=artifact.id.hex,
parent_task_id=step.id.hex,
)