本文整理汇总了Python中cms.db.FileCacher.FileCacher.get_file方法的典型用法代码示例。如果您正苦于以下问题:Python FileCacher.get_file方法的具体用法?Python FileCacher.get_file怎么用?Python FileCacher.get_file使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cms.db.FileCacher.FileCacher
的用法示例。
在下文中一共展示了FileCacher.get_file方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: file_length
# 需要导入模块: from cms.db.FileCacher import FileCacher [as 别名]
# 或者: from cms.db.FileCacher.FileCacher import get_file [as 别名]
def file_length(digest, file_cacher=None, file_lengther=None):
"""Compute the length of the file identified by digest.
digest (string): the digest of the file.
file_cacher (FileCacher): the cacher to use, or None.
file_lengther (class): a File-like object that tell the dimension
of the input (see example above for how to
write one).
return (int): the length of the tile.
"""
if file_cacher is None:
file_cacher = FileCacher()
if file_lengther is None:
file_lengther = FileLengther
lengther = file_lengther()
file_cacher.get_file(digest, file_obj=lengther)
return lengther.tell()
示例2: __init__
# 需要导入模块: from cms.db.FileCacher import FileCacher [as 别名]
# 或者: from cms.db.FileCacher.FileCacher import get_file [as 别名]
class SpoolExporter:
"""This service creates a tree structure "similar" to the one used
in Italian IOI repository for storing the results of a contest.
"""
def __init__(self, contest_id, spool_dir):
self.contest_id = contest_id
self.spool_dir = spool_dir
self.upload_dir = os.path.join(self.spool_dir, "upload")
self.contest = None
self.file_cacher = FileCacher()
def run(self):
"""Interface to make the class do its job."""
return self.do_export()
def do_export(self):
"""Run the actual export code.
"""
logger.operation = "exporting contest %s" % self.contest_id
logger.info("Starting export.")
logger.info("Creating dir structure.")
try:
os.mkdir(self.spool_dir)
except OSError:
logger.error("The specified directory already exists, "
"I won't overwrite it.")
return False
os.mkdir(self.upload_dir)
with SessionGen(commit=False) as session:
self.contest = Contest.get_from_id(self.contest_id, session)
# Creating users' directory.
for user in self.contest.users:
if not user.hidden:
os.mkdir(os.path.join(self.upload_dir, user.username))
self.export_submissions()
self.export_ranking()
logger.info("Export finished.")
logger.operation = ""
return True
def export_submissions(self):
"""Export submissions' source files.
"""
logger.info("Exporting submissions.")
queue_file = codecs.open(os.path.join(self.spool_dir, "queue"), "w",
encoding="utf-8")
# FIXME - The enumeration of submission should be time-increasing
for submission in self.contest.get_submissions():
if submission.user.hidden:
continue
logger.info("Exporting submission %s." % submission.id)
username = submission.user.username
task = submission.task.name
timestamp = submission.timestamp
# Get source files to the spool directory.
file_digest = submission.files["%s.%s" % (task, "%l")].digest
upload_filename = os.path.join(
self.upload_dir, username, "%s.%d.%s" %
(task, timestamp, submission.language))
self.file_cacher.get_file(file_digest, path=upload_filename)
upload_filename = os.path.join(
self.upload_dir, username, "%s.%s" %
(task, submission.language))
self.file_cacher.get_file(file_digest, path=upload_filename)
print >> queue_file, "./upload/%s/%s.%d.%s" % \
(username, task, timestamp, submission.language)
# Write results file for the submission.
if submission.evaluated():
res_file = codecs.open(os.path.join(
self.spool_dir,
"%d.%s.%s.%s.res" % (timestamp, username,
task, submission.language)),
"w", encoding="utf-8")
res2_file = codecs.open(os.path.join(
self.spool_dir,
"%s.%s.%s.res" % (username, task,
submission.language)),
"w", encoding="utf-8")
total = 0.0
for num, evaluation in enumerate(submission.evaluations):
outcome = float(evaluation.outcome)
total += outcome
line = "Executing on file n. %2d %s (%.4f)" % \
(num, evaluation.text, outcome)
print >> res_file, line
print >> res2_file, line
line = "Score: %.6f" % total
#.........这里部分代码省略.........
示例3: Worker
# 需要导入模块: from cms.db.FileCacher import FileCacher [as 别名]
# 或者: from cms.db.FileCacher.FileCacher import get_file [as 别名]
class Worker(Service):
"""This service implement the possibility to compile and evaluate
submissions in a sandbox. The instructions to follow for the
operations are in the TaskType classes, while the sandbox is in
the Sandbox module.
"""
JOB_TYPE_COMPILATION = "compile"
JOB_TYPE_EVALUATION = "evaluate"
def __init__(self, shard):
logger.initialize(ServiceCoord("Worker", shard))
Service.__init__(self, shard, custom_logger=logger)
self.file_cacher = FileCacher(self)
self.task_type = None
self.work_lock = threading.Lock()
self.session = None
@rpc_method
def ignore_job(self):
"""RPC that inform the worker that its result for the current
action will be discarded. The worker will try to return as
soon as possible even if this means that the result are
inconsistent.
"""
# We inform the task_type to quit as soon as possible.
logger.info("Trying to interrupt job as requested.")
try:
self.task_type.ignore_job = True
except AttributeError:
pass # Job concluded right under our nose, that's ok too.
# FIXME - rpc_threaded is disable because it makes the call fail:
# we should investigate on this
@rpc_method
@rpc_threaded
def precache_files(self, contest_id):
"""RPC to ask the worker to precache of files in the contest.
contest_id (int): the id of the contest
"""
# Lock is not needed if the admins correctly placed cache and
# temp directories in the same filesystem. This is what
# usually happens since they are children of the same,
# cms-created, directory.
logger.info("Precaching files for contest %d." % contest_id)
with SessionGen(commit=False) as session:
contest = Contest.get_from_id(contest_id, session)
for digest in contest.enumerate_files(skip_submissions=True,
skip_user_tests=True):
self.file_cacher.get_file(digest)
logger.info("Precaching finished.")
@rpc_method
@rpc_threaded
def execute_job(self, job_dict):
job = Job.import_from_dict_with_type(job_dict)
if self.work_lock.acquire(False):
try:
logger.operation = "job '%s'" % (job.info)
logger.info("Request received")
job.shard = self.shard
self.task_type = get_task_type(job, self.file_cacher)
self.task_type.execute_job()
logger.info("Request finished.")
return job.export_to_dict()
except:
err_msg = "Worker failed on operation `%s'" % logger.operation
logger.error("%s\n%s" % (err_msg, traceback.format_exc()))
raise JobException(err_msg)
finally:
self.task_type = None
self.session = None
logger.operation = ""
self.work_lock.release()
else:
err_msg = "Request '%s' received, " \
"but declined because of acquired lock" % \
(job.info)
logger.warning(err_msg)
raise JobException(err_msg)
示例4: Worker
# 需要导入模块: from cms.db.FileCacher import FileCacher [as 别名]
# 或者: from cms.db.FileCacher.FileCacher import get_file [as 别名]
#.........这里部分代码省略.........
logger.info("Trying to interrupt job as requested.")
try:
self.task_type.ignore_job = True
except AttributeError:
pass # Job concluded right under our nose, that's ok too.
@rpc_method
@rpc_threaded
def compile(self, submission_id):
"""RPC to ask the worker to compile the submission.
submission_id (int): the id of the submission to compile.
"""
return self.action(submission_id, Worker.JOB_TYPE_COMPILATION)
@rpc_method
@rpc_threaded
def evaluate(self, submission_id):
"""RPC to ask the worker to evaluate the submission.
submission_id (int): the id of the submission to evaluate.
"""
return self.action(submission_id, Worker.JOB_TYPE_EVALUATION)
# FIXME - rpc_threaded is disable because it makes the call fail:
# we should investigate on this
@rpc_method
@rpc_threaded
def precache_files(self, contest_id):
"""RPC to ask the worker to precache of files in the contest.
contest_id (int): the id of the contest
"""
# Lock is not needed if the admins correctly placed cache and
# temp directories in the same filesystem. This is what
# usually happens since they are children of the same,
# cms-created, directory.
logger.info("Precaching files for contest %d." % contest_id)
with SessionGen(commit=False) as session:
contest = Contest.get_from_id(contest_id, session)
for digest in contest.enumerate_files(skip_submissions=True):
self.file_cacher.get_file(digest)
logger.info("Precaching finished.")
def action(self, submission_id, job_type):
"""The actual work - that can be compilation or evaluation
(the code is pretty much the same, the differencies are in
what we ask TaskType to do).
submission_id (string): the submission to which act on.
job_type (string): a constant JOB_TYPE_*.
"""
if self.work_lock.acquire(False):
try:
logger.operation = "%s of submission %s" % (job_type,
submission_id)
logger.info("Request received: %s of submission %s." %
(job_type, submission_id))
with SessionGen(commit=False) as self.session:
# Retrieve submission and task_type.
unused_submission, self.task_type = \
self.get_submission_data(submission_id)
# Store in the task type the shard number.
self.task_type.worker_shard = self.shard
# Do the actual work.
if job_type == Worker.JOB_TYPE_COMPILATION:
task_type_action = self.task_type.compile
elif job_type == Worker.JOB_TYPE_EVALUATION:
task_type_action = self.task_type.evaluate
else:
raise KeyError("Unexpected job type %s." % job_type)
logger.info("Request finished.")
return task_type_action()
except:
err_msg = "Worker failed on operation `%s'" % logger.operation
logger.error("%s\n%s" % (err_msg, traceback.format_exc()))
raise JobException(err_msg)
finally:
self.task_type = None
self.session = None
logger.operation = ""
self.work_lock.release()
else:
logger.warning("Request of %s of submission %s received, "
"but declined because of acquired lock" %
(job_type, submission_id))
return False
示例5: __init__
# 需要导入模块: from cms.db.FileCacher import FileCacher [as 别名]
# 或者: from cms.db.FileCacher.FileCacher import get_file [as 别名]
class ContestExporter:
"""This service exports every data about the contest that CMS
knows. The process of exporting and importing again should be
idempotent.
"""
def __init__(self, contest_id, export_target,
skip_submissions, skip_user_tests, light):
self.contest_id = contest_id
self.skip_submissions = skip_submissions
self.skip_user_tests = skip_user_tests
self.light = light
# If target is not provided, we use the contest's name.
if export_target == "":
with SessionGen(commit=False) as session:
contest = Contest.get_from_id(self.contest_id, session)
self.export_target = "dump_%s.tar.gz" % contest.name
else:
self.export_target = export_target
self.file_cacher = FileCacher()
def run(self):
"""Interface to make the class do its job."""
return self.do_export()
def do_export(self):
"""Run the actual export code.
"""
logger.operation = "exporting contest %d" % self.contest_id
logger.info("Starting export.")
export_dir = self.export_target
archive_info = get_archive_info(self.export_target)
if archive_info["write_mode"] != "":
# We are able to write to this archive.
if os.path.exists(self.export_target):
logger.error("The specified file already exists, "
"I won't overwrite it.")
return False
export_dir = os.path.join(tempfile.mkdtemp(),
archive_info["basename"])
logger.info("Creating dir structure.")
try:
os.mkdir(export_dir)
except OSError:
logger.error("The specified directory already exists, "
"I won't overwrite it.")
return False
files_dir = os.path.join(export_dir, "files")
descr_dir = os.path.join(export_dir, "descriptions")
os.mkdir(files_dir)
os.mkdir(descr_dir)
with SessionGen(commit=False) as session:
contest = Contest.get_from_id(self.contest_id, session)
# Export files.
logger.info("Exporting files.")
files = contest.enumerate_files(self.skip_submissions,
self.skip_user_tests,
light=self.light)
for _file in files:
if not self.safe_get_file(_file,
os.path.join(files_dir, _file),
os.path.join(descr_dir, _file)):
return False
# Export the contest in JSON format.
logger.info("Exporting the contest in JSON format.")
with open(os.path.join(export_dir, "contest.json"), 'w') as fout:
json.dump(contest.export_to_dict(
self.skip_submissions,
self.skip_user_tests),
fout, indent=4)
# If the admin requested export to file, we do that.
if archive_info["write_mode"] != "":
archive = tarfile.open(self.export_target,
archive_info["write_mode"])
archive.add(export_dir, arcname=archive_info["basename"])
archive.close()
shutil.rmtree(export_dir)
logger.info("Export finished.")
logger.operation = ""
return True
def safe_get_file(self, digest, path, descr_path=None):
"""Get file from FileCacher ensuring that the digest is
correct.
digest (string): the digest of the file to retrieve.
#.........这里部分代码省略.........
示例6: __init__
# 需要导入模块: from cms.db.FileCacher import FileCacher [as 别名]
# 或者: from cms.db.FileCacher.FileCacher import get_file [as 别名]
class SpoolExporter:
"""This service creates a tree structure "similar" to the one used
in Italian IOI repository for storing the results of a contest.
"""
def __init__(self, contest_id, spool_dir):
self.contest_id = contest_id
self.spool_dir = spool_dir
self.upload_dir = os.path.join(self.spool_dir, "upload")
self.contest = None
self.submissions = None
self.file_cacher = FileCacher()
def run(self):
"""Interface to make the class do its job."""
return self.do_export()
def do_export(self):
"""Run the actual export code.
"""
logger.operation = "exporting contest %s" % self.contest_id
logger.info("Starting export.")
logger.info("Creating dir structure.")
try:
os.mkdir(self.spool_dir)
except OSError:
logger.critical("The specified directory already exists, " "I won't overwrite it.")
return False
os.mkdir(self.upload_dir)
with SessionGen(commit=False) as session:
self.contest = Contest.get_from_id(self.contest_id, session)
self.submissions = sorted(
(submission for submission in self.contest.get_submissions() if not submission.user.hidden),
key=lambda submission: submission.timestamp,
)
# Creating users' directory.
for user in self.contest.users:
if not user.hidden:
os.mkdir(os.path.join(self.upload_dir, user.username))
try:
self.export_submissions()
self.export_ranking()
except Exception as error:
logger.critical("Generic error. %r" % error)
return False
logger.info("Export finished.")
logger.operation = ""
return True
def export_submissions(self):
"""Export submissions' source files.
"""
logger.info("Exporting submissions.")
queue_file = codecs.open(os.path.join(self.spool_dir, "queue"), "w", encoding="utf-8")
for submission in self.submissions:
logger.info("Exporting submission %s." % submission.id)
username = submission.user.username
task = submission.task.name
timestamp = submission.timestamp
# Get source files to the spool directory.
file_digest = submission.files["%s.%s" % (task, "%l")].digest
upload_filename = os.path.join(
self.upload_dir, username, "%s.%d.%s" % (task, timestamp, submission.language)
)
self.file_cacher.get_file(file_digest, path=upload_filename)
upload_filename = os.path.join(self.upload_dir, username, "%s.%s" % (task, submission.language))
self.file_cacher.get_file(file_digest, path=upload_filename)
print >> queue_file, "./upload/%s/%s.%d.%s" % (username, task, timestamp, submission.language)
# Write results file for the submission.
if submission.evaluated():
res_file = codecs.open(
os.path.join(self.spool_dir, "%d.%s.%s.%s.res" % (timestamp, username, task, submission.language)),
"w",
encoding="utf-8",
)
res2_file = codecs.open(
os.path.join(self.spool_dir, "%s.%s.%s.res" % (username, task, submission.language)),
"w",
encoding="utf-8",
)
total = 0.0
for num, evaluation in enumerate(submission.evaluations):
outcome = float(evaluation.outcome)
total += outcome
line = "Executing on file n. %2d %s (%.4f)" % (num, evaluation.text, outcome)
print >> res_file, line
print >> res2_file, line
#.........这里部分代码省略.........
示例7: __init__
# 需要导入模块: from cms.db.FileCacher import FileCacher [as 别名]
# 或者: from cms.db.FileCacher.FileCacher import get_file [as 别名]
class ContestExporter:
"""This service exports every data about the contest that CMS
knows. The process of exporting and importing again should be
idempotent.
"""
def __init__(self, contest_id, dump, export_target, skip_submissions,
light):
self.contest_id = contest_id
self.dump = dump
self.skip_submissions = skip_submissions
self.light = light
# If target is not provided, we use the contest's name.
if export_target == "":
with SessionGen(commit=False) as session:
contest = Contest.get_from_id(self.contest_id, session)
self.export_target = "dump_%s.tar.gz" % contest.name
else:
self.export_target = export_target
self.file_cacher = FileCacher()
def run(self):
"""Interface to make the class do its job."""
return self.do_export()
def do_export(self):
"""Run the actual export code.
"""
logger.operation = "exporting contest %d" % self.contest_id
logger.info("Starting export.")
export_dir = self.export_target
archive_info = get_archive_info(self.export_target)
if archive_info["write_mode"] != "":
# We are able to write to this archive.
if os.path.exists(self.export_target):
logger.error("The specified file already exists, "
"I won't overwrite it.")
return False
export_dir = os.path.join(tempfile.mkdtemp(),
archive_info["basename"])
logger.info("Creating dir structure.")
try:
os.mkdir(export_dir)
except OSError:
logger.error("The specified directory already exists, "
"I won't overwrite it.")
return False
files_dir = os.path.join(export_dir, "files")
descr_dir = os.path.join(export_dir, "descriptions")
os.mkdir(files_dir)
os.mkdir(descr_dir)
with SessionGen(commit=False) as session:
contest = Contest.get_from_id(self.contest_id, session)
# Export files.
logger.info("Exporting files.")
files = contest.enumerate_files(self.skip_submissions,
light=self.light)
for _file in files:
if not self.safe_get_file(_file,
os.path.join(files_dir, _file),
os.path.join(descr_dir, _file)):
return False
# Export the contest in JSON format.
logger.info("Exporting the contest in JSON format.")
with open(os.path.join(export_dir, "contest.json"), 'w') as fout:
json.dump(contest.export_to_dict(self.skip_submissions),
fout, indent=4)
if self.dump:
if not self.dump_database(export_dir):
return False
# If the admin requested export to file, we do that.
if archive_info["write_mode"] != "":
archive = tarfile.open(self.export_target,
archive_info["write_mode"])
archive.add(export_dir, arcname=archive_info["basename"])
archive.close()
shutil.rmtree(export_dir)
logger.info("Export finished.")
logger.operation = ""
return True
def dump_database(self, export_dir):
"""Dump the whole database. This is never used; however, this
part is retained for historical reasons.
#.........这里部分代码省略.........
示例8: TestFileCacher
# 需要导入模块: from cms.db.FileCacher import FileCacher [as 别名]
# 或者: from cms.db.FileCacher.FileCacher import get_file [as 别名]
class TestFileCacher(TestService):
"""Service that performs automatically some tests for the
FileCacher service.
"""
def __init__(self, shard):
logger.initialize(ServiceCoord("TestFileCacher", shard))
TestService.__init__(self, shard, custom_logger=logger)
# Assume we store the cache in "./cache/fs-cache-TestFileCacher-0/"
self.cache_base_path = os.path.join(config.cache_dir,
"fs-cache-TestFileCacher-0")
self.cache_path = None
self.content = None
self.fake_content = None
self.digest = None
self.file_obj = None
self.file_cacher = FileCacher(self)
#self.file_cacher = FileCacher(self, path="fs-storage")
def prepare(self):
"""Initialization for the test code - make sure that the cache
is empty before testing.
"""
logger.info("Please delete directory %s before." %
self.cache_base_path)
### TEST 000 ###
def test_000(self):
"""Send a ~100B random binary file to the storage through
FileCacher as a file-like object. FC should cache the content
locally.
"""
self.size = 100
self.content = "".join(chr(random.randint(0, 255))
for unused_i in xrange(self.size))
logger.info(" I am sending the ~100B binary file to FileCacher")
try:
data = self.file_cacher.put_file(file_obj=StringIO(self.content),
description="Test #000")
except Exception as error:
self.test_end(False, "Error received: %r." % error)
if not os.path.exists(
os.path.join(self.cache_base_path, "objects", data)):
self.test_end(False, "File not stored in local cache.")
elif open(os.path.join(self.cache_base_path, "objects", data),
"rb").read() != self.content:
self.test_end(False, "Local cache's content differ "
"from original file.")
else:
self.cache_path = os.path.join(self.cache_base_path, "objects",
data)
self.digest = data
self.test_end(True, "Data sent and cached without error.")
### TEST 001 ###
def test_001(self):
"""Retrieve the file.
"""
logger.info(" I am retrieving the ~100B binary file from FileCacher")
self.fake_content = "Fake content.\n"
with open(self.cache_path, "wb") as cached_file:
cached_file.write(self.fake_content)
try:
data = self.file_cacher.get_file(digest=self.digest,
temp_file_obj=True)
except Exception as error:
self.test_end(False, "Error received: %r." % error)
received = data.read()
data.close()
if received != self.fake_content:
if received == self.content:
self.test_end(False,
"Did not use the cache even if it could.")
else:
self.test_end(False, "Content differ.")
else:
self.test_end(True, "Data object received correctly.")
### TEST 002 ###
def test_002(self):
"""Check the size of the file.
"""
logger.info(" I am checking the size of the ~100B binary file")
try:
size = self.file_cacher.get_size(self.digest)
except Exception as error:
self.test_end(False, "Error received: %r." % error)
#.........这里部分代码省略.........
示例9: __init__
# 需要导入模块: from cms.db.FileCacher import FileCacher [as 别名]
# 或者: from cms.db.FileCacher.FileCacher import get_file [as 别名]
class ContestExporter:
"""This service exports every data about the contest that CMS
knows. The process of exporting and importing again should be
idempotent.
"""
def __init__(self, contest_id, export_target,
dump_files, dump_model, light,
skip_submissions, skip_user_tests):
self.contest_id = contest_id
self.dump_files = dump_files
self.dump_model = dump_model
self.light = light
self.skip_submissions = skip_submissions
self.skip_user_tests = skip_user_tests
# If target is not provided, we use the contest's name.
if export_target == "":
with SessionGen(commit=False) as session:
contest = Contest.get_from_id(self.contest_id, session)
self.export_target = "dump_%s.tar.gz" % contest.name
logger.warning("export_target not given, using \"%s\""
% self.export_target)
else:
self.export_target = export_target
self.file_cacher = FileCacher()
def do_export(self):
"""Run the actual export code."""
logger.operation = "exporting contest %d" % self.contest_id
logger.info("Starting export.")
export_dir = self.export_target
archive_info = get_archive_info(self.export_target)
if archive_info["write_mode"] != "":
# We are able to write to this archive.
if os.path.exists(self.export_target):
logger.critical("The specified file already exists, "
"I won't overwrite it.")
return False
export_dir = os.path.join(tempfile.mkdtemp(),
archive_info["basename"])
logger.info("Creating dir structure.")
try:
os.mkdir(export_dir)
except OSError:
logger.critical("The specified directory already exists, "
"I won't overwrite it.")
return False
files_dir = os.path.join(export_dir, "files")
descr_dir = os.path.join(export_dir, "descriptions")
os.mkdir(files_dir)
os.mkdir(descr_dir)
with SessionGen(commit=False) as session:
contest = Contest.get_from_id(self.contest_id, session)
# Export files.
if self.dump_files:
logger.info("Exporting files.")
files = contest.enumerate_files(self.skip_submissions,
self.skip_user_tests,
self.light)
for file_ in files:
if not self.safe_get_file(file_,
os.path.join(files_dir, file_),
os.path.join(descr_dir, file_)):
return False
# Export the contest in JSON format.
if self.dump_model:
logger.info("Exporting the contest to a JSON file.")
# We use strings because they'll be the keys of a JSON
# object; the contest will have ID 0.
self.ids = {contest.sa_identity_key: "0"}
self.queue = [contest]
data = dict()
while len(self.queue) > 0:
obj = self.queue.pop(0)
data[self.ids[obj.sa_identity_key]] = self.export_object(obj)
# Specify the "root" of the data graph
data["_objects"] = ["0"]
with io.open(os.path.join(export_dir,
"contest.json"), "wb") as fout:
json.dump(data, fout, encoding="utf-8",
indent=4, sort_keys=True)
# If the admin requested export to file, we do that.
if archive_info["write_mode"] != "":
#.........这里部分代码省略.........
示例10: Worker
# 需要导入模块: from cms.db.FileCacher import FileCacher [as 别名]
# 或者: from cms.db.FileCacher.FileCacher import get_file [as 别名]
class Worker(Service):
"""This service implement the possibility to compile and evaluate
submissions in a sandbox. The instructions to follow for the
operations are in the TaskType classes, while the sandbox is in
the Sandbox module.
"""
JOB_TYPE_COMPILATION = "compile"
JOB_TYPE_EVALUATION = "evaluate"
def __init__(self, shard):
logger.initialize(ServiceCoord("Worker", shard))
Service.__init__(self, shard, custom_logger=logger)
self.file_cacher = FileCacher(self)
self.work_lock = threading.Lock()
self.ignore_job = False
@rpc_method
def ignore_job(self):
"""RPC that inform the worker that its result for the current
action will be discarded. The worker will try to return as
soon as possible even if this means that the result are
inconsistent.
"""
# We remember to quit as soon as possible.
logger.info("Trying to interrupt job as requested.")
self.ignore_job = True
# FIXME - rpc_threaded is disable because it makes the call fail:
# we should investigate on this
@rpc_method
@rpc_threaded
def precache_files(self, contest_id):
"""RPC to ask the worker to precache of files in the contest.
contest_id (int): the id of the contest
"""
# Lock is not needed if the admins correctly placed cache and
# temp directories in the same filesystem. This is what
# usually happens since they are children of the same,
# cms-created, directory.
logger.info("Precaching files for contest %d." % contest_id)
with SessionGen(commit=False) as session:
contest = Contest.get_from_id(contest_id, session)
for digest in contest.enumerate_files(skip_submissions=True,
skip_user_tests=True):
self.file_cacher.get_file(digest)
logger.info("Precaching finished.")
@rpc_method
@rpc_threaded
def execute_job_group(self, job_group_dict):
job_group = JobGroup.import_from_dict(job_group_dict)
if self.work_lock.acquire(False):
try:
self.ignore_job = False
for k, job in job_group.jobs.iteritems():
logger.operation = "job '%s'" % (job.info)
logger.info("Request received")
job.shard = self.shard
# FIXME This is actually kind of a workaround...
# The only TaskType that needs it is OutputOnly.
job._key = k
# FIXME We're creating a new TaskType for each Job
# even if, at the moment, a JobGroup always uses
# the same TaskType and the same parameters. Yet,
# this could change in the future, so the best
# solution is to keep a cache of TaskTypes objects
# (like ScoringService does with ScoreTypes, except
# that we cannot index by Dataset ID here...).
task_type = get_task_type(job.task_type,
job.task_type_parameters)
task_type.execute_job(job, self.file_cacher)
logger.info("Request finished.")
if not job.success or self.ignore_job:
job_group.success = False
break
else:
job_group.success = True
return job_group.export_to_dict()
except:
err_msg = "Worker failed on operation `%s'" % logger.operation
logger.error("%s\n%s" % (err_msg, traceback.format_exc()))
raise JobException(err_msg)
finally:
#.........这里部分代码省略.........