本文整理汇总了Python中teuthology.job_status.set_status函数的典型用法代码示例。如果您正苦于以下问题:Python set_status函数的具体用法?Python set_status怎么用?Python set_status使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了set_status函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: archive
def archive(ctx, config):
"""
Handle the creation and deletion of the archive directory.
"""
log.info("Creating archive directory...")
archive_dir = misc.get_archive_dir(ctx)
run.wait(ctx.cluster.run(args=["install", "-d", "-m0755", "--", archive_dir], wait=False))
try:
yield
except Exception:
# we need to know this below
set_status(ctx.summary, "fail")
raise
finally:
passed = get_status(ctx.summary) == "pass"
if ctx.archive is not None and not (ctx.config.get("archive-on-error") and passed):
log.info("Transferring archived files...")
logdir = os.path.join(ctx.archive, "remote")
if not os.path.exists(logdir):
os.mkdir(logdir)
for rem in ctx.cluster.remotes.iterkeys():
path = os.path.join(logdir, rem.shortname)
misc.pull_directory(rem, archive_dir, path)
# Check for coredumps and pull binaries
fetch_binaries_for_coredumps(path, rem)
log.info("Removing archive directory...")
run.wait(ctx.cluster.run(args=["rm", "-rf", "--", archive_dir], wait=False))
示例2: check_packages
def check_packages(ctx, config):
"""
Checks gitbuilder to determine if there are missing packages for this job.
If there are missing packages, fail the job.
"""
for task in ctx.config["tasks"]:
if task.keys()[0] == "buildpackages":
log.info("Checking packages skipped because " "the task buildpackages was found.")
return
log.info("Checking packages...")
os_type = ctx.config.get("os_type")
sha1 = ctx.config.get("sha1")
# We can only do this check if there are a defined sha1 and os_type
# in the job config.
if os_type and sha1:
package = GitbuilderProject("ceph", ctx.config)
template = "Checking packages for os_type,'{os}' flavor '{flav}' and" " ceph hash '{ver}'"
log.info(template.format(os=package.os_type, flav=package.flavor, ver=package.sha1))
if package.version:
log.info("Found packages for ceph version {ver}".format(ver=package.version))
else:
msg = "Packages for distro '{d}' and ceph hash '{ver}' not found"
msg = msg.format(d=package.distro, ver=package.sha1)
log.error(msg)
# set the failure message and update paddles with the status
ctx.summary["failure_reason"] = msg
set_status(ctx.summary, "dead")
report.try_push_job_info(ctx.config, dict(status="dead"))
raise VersionNotFoundError(package.base_url)
else:
log.info("Checking packages skipped, missing os_type '{os}' or ceph hash '{ver}'".format(os=os_type, ver=sha1))
示例3: coredump
def coredump(ctx, config):
"""
Stash a coredump of this system if an error occurs.
"""
log.info('Enabling coredump saving...')
archive_dir = misc.get_archive_dir(ctx)
run.wait(
ctx.cluster.run(
args=[
'install', '-d', '-m0755', '--',
'{adir}/coredump'.format(adir=archive_dir),
run.Raw('&&'),
'sudo', 'sysctl', '-w', 'kernel.core_pattern={adir}/coredump/%t.%p.core'.format(adir=archive_dir),
],
wait=False,
)
)
try:
yield
finally:
run.wait(
ctx.cluster.run(
args=[
'sudo', 'sysctl', '-w', 'kernel.core_pattern=core',
run.Raw('&&'),
# don't litter the archive dir if there were no cores dumped
'rmdir',
'--ignore-fail-on-non-empty',
'--',
'{adir}/coredump'.format(adir=archive_dir),
],
wait=False,
)
)
# set status = 'fail' if the dir is still there = coredumps were
# seen
for rem in ctx.cluster.remotes.iterkeys():
r = rem.run(
args=[
'if', 'test', '!', '-e', '{adir}/coredump'.format(adir=archive_dir), run.Raw(';'), 'then',
'echo', 'OK', run.Raw(';'),
'fi',
],
stdout=StringIO(),
)
if r.stdout.getvalue() != 'OK\n':
log.warning('Found coredumps on %s, flagging run as failed', rem)
set_status(ctx.summary, 'fail')
if 'failure_reason' not in ctx.summary:
ctx.summary['failure_reason'] = \
'Found coredumps on {rem}'.format(rem=rem)
示例4: archive
def archive(ctx, config):
"""
Handle the creation and deletion of the archive directory.
"""
log.info('Creating archive directory...')
archive_dir = misc.get_archive_dir(ctx)
run.wait(
ctx.cluster.run(
args=[
'install', '-d', '-m0755', '--', archive_dir,
],
wait=False,
)
)
try:
yield
except Exception:
# we need to know this below
set_status(ctx.summary, 'fail')
raise
finally:
passed = get_status(ctx.summary) == 'pass'
if ctx.archive is not None and \
not (ctx.config.get('archive-on-error') and passed):
log.info('Transferring archived files...')
logdir = os.path.join(ctx.archive, 'remote')
if (not os.path.exists(logdir)):
os.mkdir(logdir)
for rem in ctx.cluster.remotes.iterkeys():
path = os.path.join(logdir, rem.shortname)
misc.pull_directory(rem, archive_dir, path)
# Check for coredumps and pull binaries
fetch_binaries_for_coredumps(path, rem)
log.info('Removing archive directory...')
run.wait(
ctx.cluster.run(
args=[
'rm',
'-rf',
'--',
archive_dir,
],
wait=False,
),
)
示例5: check_packages
def check_packages(ctx, config):
"""
Checks gitbuilder to determine if there are missing packages for this job.
If there are missing packages, fail the job.
"""
log.info("Checking packages...")
sha1 = ctx.config.get("sha1")
os_type = ctx.config.get("os_type")
flavor = get_install_task_flavor(ctx.config)
# We can only do this check if there are a defined sha1 and os_type
# in the job config.
if os_type and sha1:
template = "Checking packages for os_type,'{os}' flavor '{flav}' and" \
" ceph hash '{ver}'"
log.info(
template.format(
os=os_type,
flav=flavor,
ver=sha1,
)
)
if not has_packages_for_distro(sha1, os_type, flavor):
msg = "Packages for os_type '{os}' and ceph hash '{ver}' not found"
msg = msg.format(
os=os_type,
ver=sha1,
)
log.error(msg)
# set the failure message and update paddles with the status
ctx.summary["failure_reason"] = msg
set_status(ctx.summary, "dead")
report.try_push_job_info(ctx.config, dict(status='dead'))
raise RuntimeError(msg)
else:
log.info(
"Checking packages skipped, missing os_type '{os}' or ceph hash '{ver}'".format(
os=os_type,
ver=sha1,
)
)
示例6: syslog
#.........这里部分代码省略.........
ctx.cluster.run(
args=["sudo", "rm", "-f", "--", CONF, run.Raw("&&"), "sudo", "service", "rsyslog", "restart"],
wait=False,
)
)
# race condition: nothing actually says rsyslog had time to
# flush the file fully. oh well.
log.info("Checking logs for errors...")
for rem in ctx.cluster.remotes.iterkeys():
log.debug("Checking %s", rem.name)
r = rem.run(
args=[
"egrep",
"--binary-files=text",
"\\bBUG\\b|\\bINFO\\b|\\bDEADLOCK\\b",
run.Raw("{adir}/syslog/*.log".format(adir=archive_dir)),
run.Raw("|"),
"grep",
"-v",
"task .* blocked for more than .* seconds",
run.Raw("|"),
"grep",
"-v",
"lockdep is turned off",
run.Raw("|"),
"grep",
"-v",
"trying to register non-static key",
run.Raw("|"),
"grep",
"-v",
"DEBUG: fsize", # xfs_fsr
run.Raw("|"),
"grep",
"-v",
"CRON", # ignore cron noise
run.Raw("|"),
"grep",
"-v",
"BUG: bad unlock balance detected", # #6097
run.Raw("|"),
"grep",
"-v",
"inconsistent lock state", # FIXME see #2523
run.Raw("|"),
"grep",
"-v",
"*** DEADLOCK ***", # part of lockdep output
run.Raw("|"),
"grep",
"-v",
"INFO: possible irq lock inversion dependency detected", # FIXME see #2590 and #147
run.Raw("|"),
"grep",
"-v",
"INFO: NMI handler (perf_event_nmi_handler) took too long to run",
run.Raw("|"),
"grep",
"-v",
"INFO: recovery required on readonly",
run.Raw("|"),
"grep",
"-v",
"ceph-create-keys: INFO",
run.Raw("|"),
"head",
"-n",
"1",
],
stdout=StringIO(),
)
stdout = r.stdout.getvalue()
if stdout != "":
log.error("Error in syslog on %s: %s", rem.name, stdout)
set_status(ctx.summary, "fail")
if "failure_reason" not in ctx.summary:
ctx.summary["failure_reason"] = "'{error}' in syslog".format(error=stdout)
log.info("Compressing syslogs...")
run.wait(
ctx.cluster.run(
args=[
"find",
"{adir}/syslog".format(adir=archive_dir),
"-name",
"*.log",
"-print0",
run.Raw("|"),
"sudo",
"xargs",
"-0",
"--no-run-if-empty",
"--",
"gzip",
"--",
],
wait=False,
)
)
示例7: coredump
def coredump(ctx, config):
"""
Stash a coredump of this system if an error occurs.
"""
log.info("Enabling coredump saving...")
archive_dir = misc.get_archive_dir(ctx)
run.wait(
ctx.cluster.run(
args=[
"install",
"-d",
"-m0755",
"--",
"{adir}/coredump".format(adir=archive_dir),
run.Raw("&&"),
"sudo",
"sysctl",
"-w",
"kernel.core_pattern={adir}/coredump/%t.%p.core".format(adir=archive_dir),
],
wait=False,
)
)
try:
yield
finally:
run.wait(
ctx.cluster.run(
args=[
"sudo",
"sysctl",
"-w",
"kernel.core_pattern=core",
run.Raw("&&"),
# don't litter the archive dir if there were no cores dumped
"rmdir",
"--ignore-fail-on-non-empty",
"--",
"{adir}/coredump".format(adir=archive_dir),
],
wait=False,
)
)
# set status = 'fail' if the dir is still there = coredumps were
# seen
for rem in ctx.cluster.remotes.iterkeys():
r = rem.run(
args=[
"if",
"test",
"!",
"-e",
"{adir}/coredump".format(adir=archive_dir),
run.Raw(";"),
"then",
"echo",
"OK",
run.Raw(";"),
"fi",
],
stdout=StringIO(),
)
if r.stdout.getvalue() != "OK\n":
log.warning("Found coredumps on %s, flagging run as failed", rem)
set_status(ctx.summary, "fail")
if "failure_reason" not in ctx.summary:
ctx.summary["failure_reason"] = "Found coredumps on {rem}".format(rem=rem)
示例8: syslog
#.........这里部分代码省略.........
args=[
'sudo',
'service',
# a mere reload (SIGHUP) doesn't seem to make
# rsyslog open the files
'rsyslog',
'restart',
],
wait=False,
),
)
yield
finally:
log.info('Shutting down syslog monitoring...')
run.wait(
ctx.cluster.run(
args=[
'sudo',
'rm',
'-f',
'--',
CONF,
run.Raw('&&'),
'sudo',
'service',
'rsyslog',
'restart',
],
wait=False,
),
)
# race condition: nothing actually says rsyslog had time to
# flush the file fully. oh well.
log.info('Checking logs for errors...')
for rem in ctx.cluster.remotes.iterkeys():
log.debug('Checking %s', rem.name)
r = rem.run(
args=[
'egrep', '--binary-files=text',
'\\bBUG\\b|\\bINFO\\b|\\bDEADLOCK\\b',
run.Raw('{adir}/syslog/*.log'.format(adir=archive_dir)),
run.Raw('|'),
'grep', '-v', 'task .* blocked for more than .* seconds',
run.Raw('|'),
'grep', '-v', 'lockdep is turned off',
run.Raw('|'),
'grep', '-v', 'trying to register non-static key',
run.Raw('|'),
'grep', '-v', 'DEBUG: fsize', # xfs_fsr
run.Raw('|'),
'grep', '-v', 'CRON', # ignore cron noise
run.Raw('|'),
'grep', '-v', 'BUG: bad unlock balance detected', # #6097
run.Raw('|'),
'grep', '-v', 'inconsistent lock state', # FIXME see #2523
run.Raw('|'),
'grep', '-v', '*** DEADLOCK ***', # part of lockdep output
run.Raw('|'),
'grep', '-v', 'INFO: possible irq lock inversion dependency detected', # FIXME see #2590 and #147
run.Raw('|'),
'grep', '-v', 'INFO: NMI handler (perf_event_nmi_handler) took too long to run',
run.Raw('|'),
'grep', '-v', 'INFO: recovery required on readonly',
run.Raw('|'),
'head', '-n', '1',
],
stdout=StringIO(),
)
stdout = r.stdout.getvalue()
if stdout != '':
log.error('Error in syslog on %s: %s', rem.name, stdout)
set_status(ctx.summary, 'fail')
if 'failure_reason' not in ctx.summary:
ctx.summary['failure_reason'] = \
"'{error}' in syslog".format(error=stdout)
log.info('Compressing syslogs...')
run.wait(
ctx.cluster.run(
args=[
'find',
'{adir}/syslog'.format(adir=archive_dir),
'-name',
'*.log',
'-print0',
run.Raw('|'),
'sudo',
'xargs',
'-0',
'--no-run-if-empty',
'--',
'gzip',
'--',
],
wait=False,
),
)
示例9: _set_status
def _set_status(self, status):
set_status(self.ctx.summary, status)
示例10: lock_machines
def lock_machines(ctx, config):
"""
Lock machines. Called when the teuthology run finds and locks
new machines. This is not called if the one has teuthology-locked
machines and placed those keys in the Targets section of a yaml file.
"""
# It's OK for os_type and os_version to be None here. If we're trying
# to lock a bare metal machine, we'll take whatever is available. If
# we want a vps, defaults will be provided by misc.get_distro and
# misc.get_distro_version in provision.create_if_vm
os_type = ctx.config.get("os_type")
os_version = ctx.config.get("os_version")
arch = ctx.config.get('arch')
log.info('Locking machines...')
assert isinstance(config[0], int), 'config[0] must be an integer'
machine_type = config[1]
total_requested = config[0]
# We want to make sure there are always this many machines available
reserved = teuth_config.reserve_machines
assert isinstance(reserved, int), 'reserve_machines must be integer'
assert (reserved >= 0), 'reserve_machines should >= 0'
# change the status during the locking process
report.try_push_job_info(ctx.config, dict(status='waiting'))
all_locked = dict()
requested = total_requested
while True:
# get a candidate list of machines
machines = lock.list_locks(machine_type=machine_type, up=True,
locked=False, count=requested + reserved)
if machines is None:
if ctx.block:
log.error('Error listing machines, trying again')
time.sleep(20)
continue
else:
raise RuntimeError('Error listing machines')
# make sure there are machines for non-automated jobs to run
if len(machines) < reserved + requested and ctx.owner.startswith('scheduled'):
if ctx.block:
log.info(
'waiting for more %s machines to be free (need %s + %s, have %s)...',
machine_type,
reserved,
requested,
len(machines),
)
time.sleep(10)
continue
else:
assert 0, ('not enough machines free; need %s + %s, have %s' %
(reserved, requested, len(machines)))
try:
newly_locked = lock.lock_many(ctx, requested, machine_type,
ctx.owner, ctx.archive, os_type,
os_version, arch)
except Exception:
# Lock failures should map to the 'dead' status instead of 'fail'
set_status(ctx.summary, 'dead')
raise
all_locked.update(newly_locked)
log.info(
'{newly_locked} {mtype} machines locked this try, '
'{total_locked}/{total_requested} locked so far'.format(
newly_locked=len(newly_locked),
mtype=machine_type,
total_locked=len(all_locked),
total_requested=total_requested,
)
)
if len(all_locked) == total_requested:
vmlist = []
for lmach in all_locked:
if misc.is_vm(lmach):
vmlist.append(lmach)
if vmlist:
log.info('Waiting for virtual machines to come up')
keys_dict = dict()
loopcount = 0
while len(keys_dict) != len(vmlist):
loopcount += 1
time.sleep(10)
keys_dict = misc.ssh_keyscan(vmlist)
log.info('virtual machine is still unavailable')
if loopcount == 40:
loopcount = 0
log.info('virtual machine(s) still not up, ' +
'recreating unresponsive ones.')
for guest in vmlist:
if guest not in keys_dict.keys():
log.info('recreating: ' + guest)
full_name = misc.canonicalize_hostname(guest)
provision.destroy_if_vm(ctx, full_name)
provision.create_if_vm(ctx, full_name)
if lock.do_update_keys(keys_dict):
log.info("Error in virtual machine keys")
newscandict = {}
#.........这里部分代码省略.........