本文整理汇总了Python中cms.db.Task类的典型用法代码示例。如果您正苦于以下问题:Python Task类的具体用法?Python Task怎么用?Python Task使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Task类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: post
def post(self):
fallback_page = "/tasks/add"
try:
attrs = dict()
self.get_string(attrs, "name", empty=None)
self.get_string(attrs, "category")
assert attrs.get("name") is not None, "No task name specified."
attrs["title"] = attrs["name"]
# Set default submission format as ["taskname.%l"]
attrs["submission_format"] = \
[SubmissionFormatElement("%s.%%l" % attrs["name"])]
# Create the task.
task = Task(**attrs)
self.sql_session.add(task)
except Exception as error:
self.application.service.add_notification(
make_datetime(), "Invalid field(s)", repr(error))
self.redirect(fallback_page)
return
try:
attrs = dict()
# Create its first dataset.
attrs["description"] = "Default"
attrs["autojudge"] = True
attrs["task_type"] = "Batch"
attrs["task_type_parameters"] = '["alone", ["", ""], "diff"]'
attrs["score_type"] = "Sum"
attrs["score_type_parameters"] = '100'
attrs["task"] = task
dataset = Dataset(**attrs)
self.sql_session.add(dataset)
# Make the dataset active. Life works better that way.
task.active_dataset = dataset
except Exception as error:
self.application.service.add_notification(
make_datetime(), "Invalid field(s)", repr(error))
self.redirect(fallback_page)
return
if self.try_commit():
# Create the task on RWS.
self.application.service.proxy_service.reinitialize()
self.redirect("/task/%s" % task.id)
else:
self.redirect(fallback_page)
示例2: dataset_updated
def dataset_updated(self, task_id):
"""Notice that the active dataset of a task has been changed.
Usually called by AdminWebServer when the contest administrator
changed the active dataset of a task. This means that we should
update all the scores for the task using the submission results
on the new active dataset. If some of them are not available
yet we keep the old scores (we don't delete them!) and wait for
ScoringService to notify us that the new ones are available.
task_id (int): the ID of the task whose dataset has changed.
"""
with SessionGen() as session:
task = Task.get_from_id(task_id, session)
dataset = task.active_dataset
logger.info("Dataset update for task %d (dataset now is %d)." % (
task.id, dataset.id))
# max_score and/or extra_headers might have changed.
self.reinitialize()
for submission in task.submissions:
# Update RWS.
if not submission.user.hidden and \
submission.get_result().scored():
self.send_score(submission)
示例3: extract_complexity
def extract_complexity(task_id, file_lengther=None):
"""Extract the complexity of all submissions of the task. The
results are stored in a file task_<id>.info
task_id (int): the id of the task we are interested in.
file_lengther (type): a File-like object that tell the dimension
of the input (see example above for how to write one).
return (int): 0 if operation was successful.
"""
with SessionGen() as session:
task = Task.get_from_id(task_id, session)
if task is None:
return -1
# Extracting the length of the testcase.
file_cacher = FileCacher()
testcases_lengths = [file_length(testcase.input,
file_cacher, file_lengther)
for testcase in task.testcases]
file_cacher.purge_cache()
# Compute the complexity of the solutions.
with open("task_%s.info" % task_id, "wt") as info:
for submission in task.contest.get_submissions():
if submission.task_id == task_id and \
submission.evaluated():
print submission.user.username
result = extract_complexity_submission(testcases_lengths,
submission)
if result[1] is None:
continue
info.write("Submission: %s" % submission.id)
info.write(" - user: %15s" % submission.user.username)
info.write(" - task: %s" % task.name)
if result[0] is not None:
info.write(" - score: %6.2lf" % result[0])
info.write(" - complexity: %20s" %
complexity_to_string(result[1]))
if result[2] is not None:
info.write(" - confidence %5.1lf" % result[2])
info.write("\n")
return 0
示例4: dataset_updated
def dataset_updated(self, task_id):
"""This function updates RWS with new data about a task. It should be
called after the live dataset of a task is changed.
task_id (int): id of the task whose dataset has changed.
"""
with SessionGen(commit=False) as session:
task = Task.get_from_id(task_id, session)
dataset = task.active_dataset
logger.info("Dataset update for task %d (dataset now is %d)." % (
task.id, dataset.id))
for submission in task.submissions:
# Update RWS.
if submission.get_result().scored():
self.rankings_send_score(submission)
示例5: get_task
#.........这里部分代码省略.........
args["token_mode"] = "finite"
# Set the old default values.
args["token_gen_initial"] = 0
args["token_gen_number"] = 0
args["token_gen_interval"] = timedelta()
# Copy the parameters to their new names.
load(conf, args, "token_total", "token_max_number")
load(conf, args, "token_min_interval", conv=make_timedelta)
load(conf, args, "token_initial", "token_gen_initial")
load(conf, args, "token_gen_number")
load(conf, args, "token_gen_time", "token_gen_interval",
conv=make_timedelta)
load(conf, args, "token_max", "token_gen_max")
# Remove some corner cases.
if args["token_gen_initial"] is None:
args["token_gen_initial"] = 0
if args["token_gen_interval"].total_seconds() == 0:
args["token_gen_interval"] = timedelta(minutes=1)
load(conf, args, "max_submission_number")
load(conf, args, "max_user_test_number")
load(conf, args, "min_submission_interval", conv=make_timedelta)
load(conf, args, "min_user_test_interval", conv=make_timedelta)
# Attachments
args["attachments"] = []
if os.path.exists(os.path.join(task_path, "att")):
for filename in os.listdir(os.path.join(task_path, "att")):
digest = self.file_cacher.put_file_from_path(
os.path.join(task_path, "att", filename),
"Attachment %s for task %s" % (filename, name))
args["attachments"] += [Attachment(filename, digest)]
task = Task(**args)
args = {}
args["task"] = task
args["description"] = conf.get("version", "Default")
args["autojudge"] = False
load(conf, args, ["time_limit", "timeout"], conv=float)
load(conf, args, ["memory_limit", "memlimit"])
# Builds the parameters that depend on the task type
args["managers"] = []
infile_param = conf.get("infile", "input.txt")
outfile_param = conf.get("outfile", "output.txt")
# If there is sol/grader.%l for some language %l, then,
# presuming that the task type is Batch, we retrieve graders
# in the form sol/grader.%l
graders = False
for lang in LANGUAGES:
if os.path.exists(os.path.join(
task_path, "sol", "grader.%s" % lang)):
graders = True
break
if graders:
# Read grader for each language
for lang in LANGUAGES:
grader_filename = os.path.join(
task_path, "sol", "grader.%s" % lang)
if os.path.exists(grader_filename):
digest = self.file_cacher.put_file_from_path(
grader_filename,
"Grader for task %s and language %s" % (name, lang))
示例6: get_task
def get_task(self, get_statement=True):
"""See docstring in class Loader.
"""
logger.info("Checking dos2unix presence")
i = os.system('dos2unix -V 2>/dev/null')
self.dos2unix_found = (i == 0)
if not self.dos2unix_found:
logger.error("dos2unix not found - tests will not be converted!")
name = os.path.basename(self.path)
logger.info("Loading parameters for task %s.", name)
args = {}
# Here we update the time of the last import.
touch(os.path.join(self.path, ".itime"))
# If this file is not deleted, then the import failed.
touch(os.path.join(self.path, ".import_error"))
# Get alphabetical task index for use in title.
tree = ET.parse(os.path.join(self.path, "problem.xml"))
root = tree.getroot()
args["name"] = name
args["title"] = root.find('names').find("name").attrib['value']
if get_statement:
args["statements"] = []
args["primary_statements"] = []
for language, language_code in LANGUAGE_MAP.iteritems():
path = os.path.join(self.path, 'statements',
'.pdf', language, 'problem.pdf')
if os.path.exists(path):
lang = LANGUAGE_MAP[language]
digest = self.file_cacher.put_file_from_path(
path,
"Statement for task %s (lang: %s)" % (name,
language))
args["statements"].append(Statement(lang, digest))
args["primary_statements"].append(lang)
args["primary_statements"] = json.dumps(args["primary_statements"])
args["submission_format"] = [SubmissionFormatElement("%s.%%l" % name)]
# These options cannot be configured in the Polygon format.
# Uncomment the following to set specific values for them.
# args['max_submission_number'] = 100
# args['max_user_test_number'] = 100
# args['min_submission_interval'] = make_timedelta(60)
# args['min_user_test_interval'] = make_timedelta(60)
# args['max_user_test_number'] = 10
# args['min_user_test_interval'] = make_timedelta(60)
# args['token_mode'] = 'infinite'
# args['token_max_number'] = 100
# args['token_min_interval'] = make_timedelta(60)
# args['token_gen_initial'] = 1
# args['token_gen_number'] = 1
# args['token_gen_interval'] = make_timedelta(1800)
# args['token_gen_max'] = 2
task_cms_conf_path = os.path.join(self.path, 'files')
task_cms_conf = None
if os.path.exists(os.path.join(task_cms_conf_path, 'cms_conf.py')):
sys.path.append(task_cms_conf_path)
logger.info("Found additional CMS options for task %s.", name)
task_cms_conf = __import__('cms_conf')
# TODO: probably should find more clever way to get rid of caching
task_cms_conf = reload(task_cms_conf)
sys.path.pop()
if task_cms_conf is not None and hasattr(task_cms_conf, "general"):
args.update(task_cms_conf.general)
task = Task(**args)
judging = root.find('judging')
testset = None
for testset in judging:
testset_name = testset.attrib["name"]
args = {}
args["task"] = task
args["description"] = testset_name
args["autojudge"] = False
tl = float(testset.find('time-limit').text)
ml = float(testset.find('memory-limit').text)
args["time_limit"] = tl * 0.001
args["memory_limit"] = int(ml / (1024 * 1024))
args["managers"] = []
infile_param = judging.attrib['input-file']
outfile_param = judging.attrib['output-file']
checker_src = os.path.join(self.path, "files", "check.cpp")
#.........这里部分代码省略.........
示例7: get_task
#.........这里部分代码省略.........
args["submission_format"].append("%s.out" % codename)
elif data["task_type"] == 'Notice':
args["submission_format"] = list()
else:
args["submission_format"] = ["%s.%%l" % name]
# These options cannot be configured in the TPS format.
# Uncomment the following to set specific values for them.
# args['max_user_test_number'] = 10
# args['min_user_test_interval'] = make_timedelta(60)
# args['token_mode'] = 'infinite'
# args['token_max_number'] = 100
# args['token_min_interval'] = make_timedelta(60)
# args['token_gen_initial'] = 1
# args['token_gen_number'] = 1
# args['token_gen_interval'] = make_timedelta(1800)
# args['token_gen_max'] = 2
if "score_precision" in data:
args['score_precision'] = int(data["score_precision"])
else:
args['score_precision'] = 2
args['max_submission_number'] = 50
args['max_user_test_number'] = 50
if data["task_type"] == 'OutputOnly':
args['max_submission_number'] = 100
args['max_user_test_number'] = 100
args['min_submission_interval'] = make_timedelta(60)
args['min_user_test_interval'] = make_timedelta(60)
task = Task(**args)
args = dict()
args["task"] = task
args["description"] = "Default"
args["autojudge"] = True
if data['task_type'] != 'OutputOnly' \
and data['task_type'] != 'Notice':
args["time_limit"] = float(data['time_limit'])
args["memory_limit"] = int(data['memory_limit'])
args["managers"] = {}
# Checker
checker_dir = os.path.join(self.path, "checker")
checker_src = os.path.join(checker_dir, "checker.cpp")
if os.path.exists(checker_src):
logger.info("Checker found, compiling")
checker_exe = os.path.join(checker_dir, "checker")
subprocess.call([
"g++", "-x", "c++", "-std=gnu++14", "-O2", "-static",
"-o", checker_exe, checker_src
])
digest = self.file_cacher.put_file_from_path(
checker_exe,
"Manager for task %s" % name)
args["managers"]['checker'] = Manager("checker", digest)
evaluation_param = "comparator"
else:
logger.info("Checker not found, using diff if necessary")
示例8: get_task
def get_task(self, get_statement=True):
"""See docstring in class Loader.
"""
logger.info("Checking dos2unix presence")
i = os.system('dos2unix -V 2>/dev/null')
self.dos2unix_found = (i == 0)
if not self.dos2unix_found:
logger.error("dos2unix not found - tests will not be converted!")
name = os.path.basename(self.path)
logger.info("Loading parameters for task %s.", name)
args = {}
# Here we update the time of the last import.
touch(os.path.join(self.path, ".itime"))
# If this file is not deleted, then the import failed.
touch(os.path.join(self.path, ".import_error"))
# Get alphabetical task index for use in title.
tree = ET.parse(os.path.join(self.path, "problem.xml"))
root = tree.getroot()
args["name"] = name
args["title"] = str(root.find('names').find("name").attrib['value'])
if get_statement:
args["statements"] = {}
args["primary_statements"] = []
for language, lang in iteritems(LANGUAGE_MAP):
path = os.path.join(self.path, 'statements',
'.pdf', language, 'problem.pdf')
if os.path.exists(path):
digest = self.file_cacher.put_file_from_path(
path,
"Statement for task %s (lang: %s)" % (name,
language))
args["statements"][lang] = Statement(lang, digest)
args["primary_statements"].append(lang)
args["submission_format"] = ["%s.%%l" % name]
# These options cannot be configured in the Polygon format.
# Uncomment the following to set specific values for them.
# args['max_submission_number'] = 100
# args['max_user_test_number'] = 100
# args['min_submission_interval'] = make_timedelta(60)
# args['min_user_test_interval'] = make_timedelta(60)
# args['max_user_test_number'] = 10
# args['min_user_test_interval'] = make_timedelta(60)
# args['token_mode'] = 'infinite'
# args['token_max_number'] = 100
# args['token_min_interval'] = make_timedelta(60)
# args['token_gen_initial'] = 1
# args['token_gen_number'] = 1
# args['token_gen_interval'] = make_timedelta(1800)
# args['token_gen_max'] = 2
task_cms_conf_path = os.path.join(self.path, 'files', 'cms_conf.py')
task_cms_conf = None
if os.path.exists(task_cms_conf_path):
logger.info("Found additional CMS options for task %s.", name)
with io.open(task_cms_conf_path, 'rb') as f:
task_cms_conf = imp.load_module('cms_conf', f,
task_cms_conf_path,
('.py', 'r', imp.PY_SOURCE))
if task_cms_conf is not None and hasattr(task_cms_conf, "general"):
args.update(task_cms_conf.general)
task = Task(**args)
judging = root.find('judging')
testset = None
for testset in judging:
testset_name = testset.attrib["name"]
args = {}
args["task"] = task
args["description"] = str(testset_name)
args["autojudge"] = False
tl = float(testset.find('time-limit').text)
ml = int(testset.find('memory-limit').text)
args["time_limit"] = tl * 0.001
args["memory_limit"] = ml // (1024 * 1024)
args["managers"] = {}
infile_param = judging.attrib['input-file']
outfile_param = judging.attrib['output-file']
# Checker can be in any of these two locations.
checker_src = os.path.join(self.path, "files", "check.cpp")
if not os.path.exists(checker_src):
checker_src = os.path.join(self.path, "check.cpp")
#.........这里部分代码省略.........
示例9: get_task
def get_task(self, get_statement=True):
"""See docstring in class TaskLoader."""
name = os.path.split(self.path)[1]
if (not os.path.exists(os.path.join(self.path, "task.yaml"))) and \
(not os.path.exists(os.path.join(self.path, "..", name + ".yaml"))):
logger.critical("File missing: \"task.yaml\"")
return None
# We first look for the yaml file inside the task folder,
# and eventually fallback to a yaml file in its parent folder.
try:
conf = yaml.safe_load(
io.open(os.path.join(self.path, "task.yaml"),
"rt", encoding="utf-8"))
except IOError as err:
try:
deprecated_path = os.path.join(self.path, "..", name + ".yaml")
conf = yaml.safe_load(io.open(deprecated_path, "rt",
encoding="utf-8"))
logger.warning("You're using a deprecated location for the "
"task.yaml file. You're advised to move %s to "
"%s.", deprecated_path,
os.path.join(self.path, "task.yaml"))
except IOError:
# Since both task.yaml and the (deprecated) "../taskname.yaml"
# are missing, we will only warn the user that task.yaml is
# missing (to avoid encouraging the use of the deprecated one)
raise err
# Here we update the time of the last import
touch(os.path.join(self.path, ".itime"))
# If this file is not deleted, then the import failed
touch(os.path.join(self.path, ".import_error"))
args = {}
load(conf, args, ["name", "nome_breve"])
load(conf, args, ["title", "nome"])
if name != args["name"]:
logger.info("The task name (%s) and the directory name (%s) are "
"different. The former will be used.", args["name"],
name)
if args["name"] == args["title"]:
logger.warning("Short name equals long name (title). "
"Please check.")
name = args["name"]
logger.info("Loading parameters for task %s.", name)
if get_statement:
primary_language = load(conf, None, "primary_language")
if primary_language is None:
primary_language = 'it'
paths = [os.path.join(self.path, "statement", "statement.pdf"),
os.path.join(self.path, "testo", "testo.pdf")]
for path in paths:
if os.path.exists(path):
digest = self.file_cacher.put_file_from_path(
path,
"Statement for task %s (lang: %s)" %
(name, primary_language))
break
else:
logger.critical("Couldn't find any task statement, aborting.")
sys.exit(1)
args["statements"] = {
primary_language: Statement(primary_language, digest)
}
args["primary_statements"] = [primary_language]
args["submission_format"] = ["%s.%%l" % name]
if conf.get("score_mode", None) == SCORE_MODE_MAX:
args["score_mode"] = SCORE_MODE_MAX
elif conf.get("score_mode", None) == SCORE_MODE_MAX_TOKENED_LAST:
args["score_mode"] = SCORE_MODE_MAX_TOKENED_LAST
# Use the new token settings format if detected.
if "token_mode" in conf:
load(conf, args, "token_mode")
load(conf, args, "token_max_number")
load(conf, args, "token_min_interval", conv=make_timedelta)
load(conf, args, "token_gen_initial")
load(conf, args, "token_gen_number")
load(conf, args, "token_gen_interval", conv=make_timedelta)
load(conf, args, "token_gen_max")
# Otherwise fall back on the old one.
else:
logger.warning(
"task.yaml uses a deprecated format for token settings which "
"will soon stop being supported, you're advised to update it.")
# Determine the mode.
if conf.get("token_initial", None) is None:
args["token_mode"] = TOKEN_MODE_DISABLED
#.........这里部分代码省略.........
示例10: get_task
def get_task(self, name):
"""See docstring in class Loader.
"""
try:
num = self.tasks_order[name]
# Here we expose an undocumented behavior, so that cmsMake can
# import a task even without the whole contest; this is not to
# be relied upon in general
except AttributeError:
num = 1
task_path = os.path.join(self.path, "problems", name)
logger.info("Loading parameters for task %s." % name)
args = {}
# Here we update the time of the last import
touch(os.path.join(task_path, ".itime"))
# If this file is not deleted, then the import failed
touch(os.path.join(task_path, ".import_error"))
args["num"] = num
# get alphabetical task index for use in title
index = None
contest_tree = ET.parse(os.path.join(self.path, "contest.xml"))
contest_root = contest_tree.getroot()
for problem in contest_root.find('problems'):
if os.path.basename(problem.attrib['url']) == name:
index = problem.attrib['index']
tree = ET.parse(os.path.join(task_path, "problem.xml"))
root = tree.getroot()
args["name"] = name
if index is not None:
args["title"] = index.upper() + '. '
else:
args["title"] = ''
args["title"] += root.find('names') \
.find("name[@language='%s']" % self.primary_language) \
.attrib['value']
args["statements"] = []
args["primary_statements"] = []
for language in self.languages:
path = os.path.join(task_path, 'statements',
'.pdf', language, 'problem.pdf')
if os.path.exists(path):
lang = LANGUAGE_MAP[language]
digest = self.file_cacher.put_file_from_path(
path,
"Statement for task %s (lang: %s)" % (name,
language))
args["statements"].append(Statement(lang, digest))
args["primary_statements"].append(lang)
args["primary_statements"] = '["%s"]' % \
'","'.join(args["primary_statements"])
args["submission_format"] = [SubmissionFormatElement("%s.%%l" % name)]
# args['max_submission_number'] = 100
# args['max_user_test_number'] = 100
# args['min_submission_interval'] = make_timedelta(60)
# args['min_user_test_interval'] = make_timedelta(60)
# args['max_user_test_number'] = 10
# args['min_user_test_interval'] = make_timedelta(60)
# args['token_mode'] = 'infinite'
# args['token_max_number'] = 100
# args['token_min_interval'] = make_timedelta(60)
# args['token_gen_initial'] = 1
# args['token_gen_number'] = 1
# args['token_gen_interval'] = make_timedelta(1800)
# args['token_gen_max'] = 2
task_cms_conf_path = os.path.join(task_path, 'files')
task_cms_conf = None
if os.path.exists(os.path.join(task_cms_conf_path, 'cms_conf.py')):
sys.path.append(task_cms_conf_path)
logger.info("Found additional CMS options for task %s." % name)
task_cms_conf = __import__('cms_conf')
# TODO: probably should find more clever way to get rid of caching
task_cms_conf = reload(task_cms_conf)
sys.path.pop()
if task_cms_conf is not None and hasattr(task_cms_conf, "general"):
args.update(task_cms_conf.general)
task = Task(**args)
judging = root.find('judging')
testset = None
for testset in judging:
testset_name = testset.attrib["name"]
args = {}
#.........这里部分代码省略.........
示例11: post
def post(self):
fallback_page = "/tasks/add"
try:
attrs = dict()
self.get_string(attrs, "name", empty=None)
self.get_string(attrs, "title")
assert attrs.get("name") is not None, "No task name specified."
self.get_string(attrs, "primary_statements")
self.get_submission_format(attrs)
self.get_string(attrs, "token_mode")
self.get_int(attrs, "token_max_number")
self.get_timedelta_sec(attrs, "token_min_interval")
self.get_int(attrs, "token_gen_initial")
self.get_int(attrs, "token_gen_number")
self.get_timedelta_min(attrs, "token_gen_interval")
self.get_int(attrs, "token_gen_max")
self.get_int(attrs, "max_submission_number")
self.get_int(attrs, "max_user_test_number")
self.get_timedelta_sec(attrs, "min_submission_interval")
self.get_timedelta_sec(attrs, "min_user_test_interval")
self.get_int(attrs, "score_precision")
self.get_string(attrs, "score_mode")
# Create the task.
task = Task(**attrs)
self.sql_session.add(task)
except Exception as error:
self.application.service.add_notification(
make_datetime(), "Invalid field(s)", repr(error))
self.redirect(fallback_page)
return
try:
attrs = dict()
self.get_time_limit(attrs, "time_limit")
self.get_memory_limit(attrs, "memory_limit")
self.get_task_type(attrs, "task_type", "TaskTypeOptions_")
self.get_score_type(attrs, "score_type", "score_type_parameters")
# Create its first dataset.
attrs["description"] = "Default"
attrs["autojudge"] = True
attrs["task"] = task
dataset = Dataset(**attrs)
self.sql_session.add(dataset)
# Make the dataset active. Life works better that way.
task.active_dataset = dataset
except Exception as error:
self.application.service.add_notification(
make_datetime(), "Invalid field(s)", repr(error))
self.redirect(fallback_page)
return
if self.try_commit():
# Create the task on RWS.
self.application.service.proxy_service.reinitialize()
self.redirect("/task/%s" % task.id)
else:
self.redirect(fallback_page)
示例12: get_task
def get_task(self, name):
"""See docstring in class Loader.
"""
try:
num = self.tasks_order[name]
# Here we expose an undocumented behavior, so that cmsMake can
# import a task even without the whole contest; this is not to
# be relied upon in general
except AttributeError:
num = 1
logger.info("Load task %s" % name)
task_path = os.path.join(self.path, name)
conf = {}
try:
conf = yaml.safe_load(
io.open(os.path.join(task_path, "task.yaml"),
"rt", encoding="utf-8"))
except IOError:
if os.path.exists(os.path.join(task_path, name + ".yaml")):
conf = yaml.safe_load(
io.open(os.path.join(task_path, name + ".yaml"),
"rt", encoding="utf-8"))
args = {}
args["num"] = num
args["name"] = name
args["title"] = name.title()
primary_language = conf.get("task", {}).get("primary_language", "en")
for path in os.listdir(os.path.join(task_path, "statement")):
digest = self.file_cacher.put_file_from_path(
os.path.join(task_path, "statement", path),
"Statement for task %s (lang: %s)" % (name,
primary_language))
break
else:
logger.critical("Couldn't find any task statement, aborting...")
sys.exit(1)
args["statements"] = [Statement(primary_language, digest)]
args["primary_statements"] = '["%s"]' % (primary_language)
args["submission_format"] = [
SubmissionFormatElement("%s.%%l" % name)]
args["token_mode"] = "disabled"
args.update(self.token_mode)
# Load attachments
args["attachments"] = []
if os.path.exists(os.path.join(task_path, "attachments")):
for filename in os.listdir(os.path.join(task_path, "attachments")):
digest = self.file_cacher.put_file_from_path(
os.path.join(task_path, "attachments", filename),
"Attachment %s for task %s" % (filename, name))
args["attachments"] += [Attachment(filename, digest)]
args.update(conf.get("task", {}))
task = Task(**args)
args = {}
args["task"] = task
args["description"] = "Default"
args["autojudge"] = False
args["time_limit"] = 2.0
args["memory_limit"] = 256
args["task_type"] = "Batch"
args["score_type"] = "Sum"
input_file = ""
output_file = ""
args["managers"] = []
# Overwrite parameters
for key, param in conf.iteritems():
if key == "input":
input_file = param
elif key == "output":
output_file = param
elif key == "time_limit":
args[key] = float(param)
elif key in self.timedelta_params:
args[key] = timedelta(seconds=param)
elif key != "subtasks_parameters" and key != "subtasks" and key != "task":
args[key] = param
# Intelligent tests format detector
# Load all tests recursively
def load_tests(tests_path, name):
if os.path.isfile(os.path.join(tests_path, name)):
return [name]
elif os.path.isdir(os.path.join(tests_path, name)):
l = []
for path in os.listdir(os.path.join(tests_path, name)):
l += load_tests(tests_path, os.path.join(name, path))
return l
else:
return []
full_names = load_tests(os.path.join(task_path, "tests"), "")
tests_dict = dict((os.path.split(test)[-1], test)
for test in full_names)
tests = []
#.........这里部分代码省略.........