本文整理汇总了Python中cms.db.Task.active_dataset方法的典型用法代码示例。如果您正苦于以下问题:Python Task.active_dataset方法的具体用法?Python Task.active_dataset怎么用?Python Task.active_dataset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cms.db.Task
的用法示例。
在下文中一共展示了Task.active_dataset方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: post
# 需要导入模块: from cms.db import Task [as 别名]
# 或者: from cms.db.Task import active_dataset [as 别名]
def post(self):
fallback_page = "/tasks/add"
try:
attrs = dict()
self.get_string(attrs, "name", empty=None)
self.get_string(attrs, "category")
assert attrs.get("name") is not None, "No task name specified."
attrs["title"] = attrs["name"]
# Set default submission format as ["taskname.%l"]
attrs["submission_format"] = \
[SubmissionFormatElement("%s.%%l" % attrs["name"])]
# Create the task.
task = Task(**attrs)
self.sql_session.add(task)
except Exception as error:
self.application.service.add_notification(
make_datetime(), "Invalid field(s)", repr(error))
self.redirect(fallback_page)
return
try:
attrs = dict()
# Create its first dataset.
attrs["description"] = "Default"
attrs["autojudge"] = True
attrs["task_type"] = "Batch"
attrs["task_type_parameters"] = '["alone", ["", ""], "diff"]'
attrs["score_type"] = "Sum"
attrs["score_type_parameters"] = '100'
attrs["task"] = task
dataset = Dataset(**attrs)
self.sql_session.add(dataset)
# Make the dataset active. Life works better that way.
task.active_dataset = dataset
except Exception as error:
self.application.service.add_notification(
make_datetime(), "Invalid field(s)", repr(error))
self.redirect(fallback_page)
return
if self.try_commit():
# Create the task on RWS.
self.application.service.proxy_service.reinitialize()
self.redirect("/task/%s" % task.id)
else:
self.redirect(fallback_page)
示例2: get_task
# 需要导入模块: from cms.db import Task [as 别名]
# 或者: from cms.db.Task import active_dataset [as 别名]
#.........这里部分代码省略.........
input_value = 0.0
n_input = testcases
if n_input != 0:
input_value = total_value / n_input
args["score_type_parameters"] = "%s" % input_value
else:
subtasks.append([points, testcases])
assert(100 == sum([int(st[0]) for st in subtasks]))
n_input = sum([int(st[1]) for st in subtasks])
args["score_type"] = "GroupMin"
args["score_type_parameters"] = "%s" % subtasks
if "n_input" in conf:
assert int(conf['n_input']) == n_input
# If gen/GEN doesn't exist, just fallback to Sum
except IOError:
args["score_type"] = "Sum"
total_value = float(conf.get("total_value", 100.0))
input_value = 0.0
n_input = int(conf['n_input'])
if n_input != 0:
input_value = total_value / n_input
args["score_type_parameters"] = "%s" % input_value
# If output_only is set, then the task type is OutputOnly
if conf.get('output_only', False):
args["task_type"] = "OutputOnly"
args["time_limit"] = None
args["memory_limit"] = None
args["task_type_parameters"] = '["%s"]' % evaluation_param
task.submission_format = [
SubmissionFormatElement("output_%03d.txt" % i)
for i in xrange(n_input)]
# If there is check/manager (or equivalent), then the task
# type is Communication
else:
paths = [os.path.join(task_path, "check", "manager"),
os.path.join(task_path, "cor", "manager")]
for path in paths:
if os.path.exists(path):
args["task_type"] = "Communication"
args["task_type_parameters"] = '[]'
digest = self.file_cacher.put_file_from_path(
path,
"Manager for task %s" % name)
args["managers"] += [
Manager("manager", digest)]
for lang in LANGUAGES:
stub_name = os.path.join(
task_path, "sol", "stub.%s" % lang)
if os.path.exists(stub_name):
digest = self.file_cacher.put_file_from_path(
stub_name,
"Stub for task %s and language %s" % (name,
lang))
args["managers"] += [
Manager("stub.%s" % lang, digest)]
else:
logger.warning("Stub for language %s not "
"found." % lang)
break
# Otherwise, the task type is Batch
else:
args["task_type"] = "Batch"
args["task_type_parameters"] = \
'["%s", ["%s", "%s"], "%s"]' % \
(compilation_param, infile_param, outfile_param,
evaluation_param)
args["testcases"] = []
for i in xrange(n_input):
input_digest = self.file_cacher.put_file_from_path(
os.path.join(task_path, "input", "input%d.txt" % i),
"Input %d for task %s" % (i, name))
output_digest = self.file_cacher.put_file_from_path(
os.path.join(task_path, "output", "output%d.txt" % i),
"Output %d for task %s" % (i, name))
args["testcases"] += [
Testcase("%03d" % i, False, input_digest, output_digest)]
if args["task_type"] == "OutputOnly":
task.attachments += [
Attachment("input_%03d.txt" % i, input_digest)]
public_testcases = load(conf, None, ["public_testcases", "risultati"],
conv=lambda x: "" if x is None else x)
if public_testcases != "":
for x in public_testcases.split(","):
args["testcases"][int(x.strip())].public = True
dataset = Dataset(**args)
task.active_dataset = dataset
# Import was successful
os.remove(os.path.join(task_path, ".import_error"))
logger.info("Task parameters loaded.")
return task
示例3: get_task
# 需要导入模块: from cms.db import Task [as 别名]
# 或者: from cms.db.Task import active_dataset [as 别名]
#.........这里部分代码省略.........
task_cms_conf = __import__('cms_conf')
# TODO: probably should find more clever way to get rid of caching
task_cms_conf = reload(task_cms_conf)
sys.path.pop()
if task_cms_conf is not None and hasattr(task_cms_conf, "general"):
args.update(task_cms_conf.general)
task = Task(**args)
judging = root.find('judging')
testset = None
for testset in judging:
testset_name = testset.attrib["name"]
args = {}
args["task"] = task
args["description"] = testset_name
args["autojudge"] = False
tl = float(testset.find('time-limit').text)
ml = float(testset.find('memory-limit').text)
args["time_limit"] = tl * 0.001
args["memory_limit"] = int(ml / (1024 * 1024))
args["managers"] = []
infile_param = judging.attrib['input-file']
outfile_param = judging.attrib['output-file']
checker_src = os.path.join(self.path, "files", "check.cpp")
if os.path.exists(checker_src):
logger.info("Checker found, compiling")
checker_exe = os.path.join(self.path, "files", "checker")
testlib_path = "/usr/local/include/cms/testlib.h"
if not config.installed:
testlib_path = os.path.join(os.path.dirname(__file__),
"polygon", "testlib.h")
os.system("cat %s | \
sed 's$testlib.h$%s$' | \
g++ -x c++ -O2 -static -o %s -" %
(checker_src, testlib_path, checker_exe))
digest = self.file_cacher.put_file_from_path(
checker_exe,
"Manager for task %s" % name)
args["managers"] += [
Manager("checker", digest)]
evaluation_param = "comparator"
else:
logger.info("Checker not found, using diff")
evaluation_param = "diff"
args["task_type"] = "Batch"
args["task_type_parameters"] = \
'["%s", ["%s", "%s"], "%s"]' % \
("alone", infile_param, outfile_param, evaluation_param)
args["score_type"] = "Sum"
total_value = 100.0
input_value = 0.0
testcases = int(testset.find('test-count').text)
n_input = testcases
if n_input != 0:
input_value = total_value / n_input
args["score_type_parameters"] = str(input_value)
args["testcases"] = []
for i in xrange(testcases):
infile = os.path.join(self.path, testset_name,
"%02d" % (i + 1))
outfile = os.path.join(self.path, testset_name,
"%02d.a" % (i + 1))
if self.dos2unix_found:
os.system('dos2unix -q %s' % (infile, ))
os.system('dos2unix -q %s' % (outfile, ))
input_digest = self.file_cacher.put_file_from_path(
infile,
"Input %d for task %s" % (i, name))
output_digest = self.file_cacher.put_file_from_path(
outfile,
"Output %d for task %s" % (i, name))
testcase = Testcase("%03d" % (i, ), False,
input_digest, output_digest)
testcase.public = True
args["testcases"] += [testcase]
if task_cms_conf is not None and \
hasattr(task_cms_conf, "datasets") and \
testset_name in task_cms_conf.datasets:
args.update(task_cms_conf.datasets[testset_name])
dataset = Dataset(**args)
if testset_name == "tests":
task.active_dataset = dataset
os.remove(os.path.join(self.path, ".import_error"))
logger.info("Task parameters loaded.")
return task
示例4: get_task
# 需要导入模块: from cms.db import Task [as 别名]
# 或者: from cms.db.Task import active_dataset [as 别名]
#.........这里部分代码省略.........
args["managers"][pas_manager] = Manager(pas_manager, digest)
if not os.path.exists(graders_dir):
logger.warning('Grader folder was not found')
graders_list = []
else:
graders_list = \
[filename
for filename in os.listdir(graders_dir)
if filename != 'manager.cpp']
for grader_name in graders_list:
grader_src = os.path.join(graders_dir, grader_name)
digest = self.file_cacher.put_file_from_path(
grader_src,
"Manager for task %s" % name)
if data['task_type'] == 'Communication' \
and os.path.splitext(grader_name)[0] == 'grader':
grader_name = 'stub' + os.path.splitext(grader_name)[1]
args["managers"][grader_name] = Manager(grader_name, digest)
# Manager
manager_src = os.path.join(graders_dir, 'manager.cpp')
if os.path.exists(manager_src):
logger.info("Manager found, compiling")
manager_exe = os.path.join(graders_dir, "manager")
subprocess.call([
"g++", "-x", "c++", "-O2", "-static",
"-o", manager_exe, manager_src
])
digest = self.file_cacher.put_file_from_path(
manager_exe,
"Manager for task %s" % name)
args["managers"]["manager"] = Manager("manager", digest)
# Testcases
args["testcases"] = {}
for codename in testcase_codenames:
infile = os.path.join(testcases_dir, "%s.in" % codename)
outfile = os.path.join(testcases_dir, "%s.out" % codename)
if not os.path.exists(outfile):
logger.critical(
'Could not find the output file for testcase %s', codename)
logger.critical('Aborting...')
return
input_digest = self.file_cacher.put_file_from_path(
infile,
"Input %s for task %s" % (codename, name))
output_digest = self.file_cacher.put_file_from_path(
outfile,
"Output %s for task %s" % (codename, name))
testcase = Testcase(codename, True,
input_digest, output_digest)
args["testcases"][codename] = testcase
# Score Type
subtasks_dir = os.path.join(self.path, 'subtasks')
if not os.path.exists(subtasks_dir):
logger.warning('Subtask folder was not found')
subtasks = []
else:
subtasks = sorted(os.listdir(subtasks_dir))
if len(subtasks) == 0:
number_tests = max(len(testcase_codenames), 1)
args["score_type"] = "Sum"
args["score_type_parameters"] = 100 / number_tests
else:
args["score_type"] = "GroupMin"
parsed_data = []
subtask_no = -1
add_optional_name = False
for subtask in subtasks:
subtask_no += 1
with open(os.path.join(subtasks_dir, subtask), 'rt',
encoding='utf-8') as subtask_json:
subtask_data = json.load(subtask_json)
score = int(subtask_data["score"])
testcases = "|".join(
re.escape(testcase)
for testcase in subtask_data["testcases"]
)
optional_name = "Subtask %d" % subtask_no
if subtask_no == 0 and score == 0:
add_optional_name = True
optional_name = "Samples"
if add_optional_name:
parsed_data.append([score, testcases, optional_name])
else:
parsed_data.append([score, testcases])
args["score_type_parameters"] = parsed_data
dataset = Dataset(**args)
task.active_dataset = dataset
logger.info("Task parameters loaded.")
return task
示例5: get_task
# 需要导入模块: from cms.db import Task [as 别名]
# 或者: from cms.db.Task import active_dataset [as 别名]
#.........这里部分代码省略.........
task = Task(**args)
judging = root.find('judging')
testset = None
for testset in judging:
testset_name = testset.attrib["name"]
args = {}
args["task"] = task
args["description"] = str(testset_name)
args["autojudge"] = False
tl = float(testset.find('time-limit').text)
ml = int(testset.find('memory-limit').text)
args["time_limit"] = tl * 0.001
args["memory_limit"] = ml // (1024 * 1024)
args["managers"] = {}
infile_param = judging.attrib['input-file']
outfile_param = judging.attrib['output-file']
# Checker can be in any of these two locations.
checker_src = os.path.join(self.path, "files", "check.cpp")
if not os.path.exists(checker_src):
checker_src = os.path.join(self.path, "check.cpp")
if os.path.exists(checker_src):
logger.info("Checker found, compiling")
checker_exe = os.path.join(
os.path.dirname(checker_src), "checker")
testlib_path = "/usr/local/include/cms"
testlib_include = os.path.join(testlib_path, "testlib.h")
if not config.installed:
testlib_path = os.path.join(os.path.dirname(__file__),
"polygon")
code = subprocess.call(["g++", "-x", "c++", "-O2", "-static",
"-DCMS", "-I", testlib_path,
"-include", testlib_include,
"-o", checker_exe, checker_src])
if code != 0:
logger.critical("Could not compile checker")
return None
digest = self.file_cacher.put_file_from_path(
checker_exe,
"Manager for task %s" % name)
args["managers"]["checker"] = Manager("checker", digest)
evaluation_param = "comparator"
else:
logger.info("Checker not found, using diff")
evaluation_param = "diff"
args["task_type"] = "Batch"
args["task_type_parameters"] = \
["alone", [infile_param, outfile_param], evaluation_param]
args["score_type"] = "Sum"
total_value = 100.0
input_value = 0.0
testcases = int(testset.find('test-count').text)
n_input = testcases
if n_input != 0:
input_value = total_value / n_input
args["score_type_parameters"] = input_value
args["testcases"] = {}
for i in range(testcases):
infile = os.path.join(self.path, testset_name,
"%02d" % (i + 1))
outfile = os.path.join(self.path, testset_name,
"%02d.a" % (i + 1))
if self.dos2unix_found:
os.system('dos2unix -q %s' % (infile, ))
os.system('dos2unix -q %s' % (outfile, ))
input_digest = self.file_cacher.put_file_from_path(
infile,
"Input %d for task %s" % (i, name))
output_digest = self.file_cacher.put_file_from_path(
outfile,
"Output %d for task %s" % (i, name))
testcase = Testcase("%03d" % (i, ), False,
input_digest, output_digest)
testcase.public = True
args["testcases"][testcase.codename] = testcase
if task_cms_conf is not None and \
hasattr(task_cms_conf, "datasets") and \
testset_name in task_cms_conf.datasets:
args.update(task_cms_conf.datasets[testset_name])
dataset = Dataset(**args)
if testset_name == "tests":
task.active_dataset = dataset
os.remove(os.path.join(self.path, ".import_error"))
logger.info("Task parameters loaded.")
return task
示例6: get_task
# 需要导入模块: from cms.db import Task [as 别名]
# 或者: from cms.db.Task import active_dataset [as 别名]
#.........这里部分代码省略.........
load(conf, args, "score_type")
load(conf, args, "score_type_parameters")
elif "score_type" in conf or "score_type_parameters" in conf:
logger.warning("To override score type data, task.yaml must "
"specify both 'score_type' and "
"'score_type_parameters'.")
# If output_only is set, then the task type is OutputOnly
if conf.get('output_only', False):
args["task_type"] = "OutputOnly"
args["time_limit"] = None
args["memory_limit"] = None
args["task_type_parameters"] = [evaluation_param]
task.submission_format = \
["output_%03d.txt" % i for i in range(n_input)]
# If there is check/manager (or equivalent), then the task
# type is Communication
else:
paths = [os.path.join(self.path, "check", "manager"),
os.path.join(self.path, "cor", "manager")]
for path in paths:
if os.path.exists(path):
num_processes = load(conf, None, "num_processes")
if num_processes is None:
num_processes = 1
logger.info("Task type Communication")
args["task_type"] = "Communication"
args["task_type_parameters"] = [num_processes]
digest = self.file_cacher.put_file_from_path(
path,
"Manager for task %s" % task.name)
args["managers"] += [
Manager("manager", digest)]
for lang in LANGUAGES:
stub_name = os.path.join(
self.path, "sol", "stub%s" % lang.source_extension)
if os.path.exists(stub_name):
digest = self.file_cacher.put_file_from_path(
stub_name,
"Stub for task %s and language %s" % (
task.name, lang.name))
args["managers"] += [
Manager(
"stub%s" % lang.source_extension, digest)]
else:
logger.warning("Stub for language %s not "
"found.", lang.name)
for other_filename in os.listdir(os.path.join(self.path,
"sol")):
if any(other_filename.endswith(header)
for header in HEADER_EXTS):
digest = self.file_cacher.put_file_from_path(
os.path.join(self.path, "sol", other_filename),
"Stub %s for task %s" % (other_filename,
task.name))
args["managers"] += [
Manager(other_filename, digest)]
break
# Otherwise, the task type is Batch
else:
args["task_type"] = "Batch"
args["task_type_parameters"] = \
[compilation_param, [infile_param, outfile_param],
evaluation_param]
args["testcases"] = []
for i in range(n_input):
input_digest = self.file_cacher.put_file_from_path(
os.path.join(self.path, "input", "input%d.txt" % i),
"Input %d for task %s" % (i, task.name))
output_digest = self.file_cacher.put_file_from_path(
os.path.join(self.path, "output", "output%d.txt" % i),
"Output %d for task %s" % (i, task.name))
args["testcases"] += [
Testcase("%03d" % i, False, input_digest, output_digest)]
if args["task_type"] == "OutputOnly":
task.attachments.set(
Attachment("input_%03d.txt" % i, input_digest))
public_testcases = load(conf, None, ["public_testcases", "risultati"],
conv=lambda x: "" if x is None else x)
if public_testcases == "all":
for t in args["testcases"]:
t.public = True
elif len(public_testcases) > 0:
for x in public_testcases.split(","):
args["testcases"][int(x.strip())].public = True
args["testcases"] = dict((tc.codename, tc) for tc in args["testcases"])
args["managers"] = dict((mg.filename, mg) for mg in args["managers"])
dataset = Dataset(**args)
task.active_dataset = dataset
# Import was successful
os.remove(os.path.join(self.path, ".import_error"))
logger.info("Task parameters loaded.")
return task
示例7: get_task
# 需要导入模块: from cms.db import Task [as 别名]
# 或者: from cms.db.Task import active_dataset [as 别名]
#.........这里部分代码省略.........
task_cms_conf = None
if os.path.exists(os.path.join(task_cms_conf_path, 'cms_conf.py')):
sys.path.append(task_cms_conf_path)
logger.info("Found additional CMS options for task %s." % name)
task_cms_conf = __import__('cms_conf')
# TODO: probably should find more clever way to get rid of caching
task_cms_conf = reload(task_cms_conf)
sys.path.pop()
if task_cms_conf is not None and hasattr(task_cms_conf, "general"):
args.update(task_cms_conf.general)
task = Task(**args)
judging = root.find('judging')
testset = None
for testset in judging:
testset_name = testset.attrib["name"]
args = {}
args["task"] = task
args["description"] = testset_name
args["autojudge"] = False
tl = float(testset.find('time-limit').text)
ml = float(testset.find('memory-limit').text)
args["time_limit"] = tl * 0.001
args["memory_limit"] = int(ml / (1024 * 1024))
args["managers"] = []
infile_param = judging.attrib['input-file']
outfile_param = judging.attrib['output-file']
checker_src = os.path.join(task_path, "files", "check.cpp")
if os.path.exists(checker_src):
logger.info("Checker found, compiling")
checker_exe = os.path.join(task_path, "files", "checker")
os.system("cat %s | \
sed 's$testlib.h$/usr/local/include/cms/testlib.h$' | \
g++ -x c++ -O2 -static -o %s -" %
(checker_src, checker_exe))
digest = self.file_cacher.put_file_from_path(
checker_exe,
"Manager for task %s" % name)
args["managers"] += [
Manager("checker", digest)]
evaluation_param = "comparator"
else:
logger.info("Checker not found, using diff")
evaluation_param = "diff"
args["task_type"] = "Batch"
args["task_type_parameters"] = \
'["%s", ["%s", "%s"], "%s"]' % \
("alone", infile_param, outfile_param, evaluation_param)
args["score_type"] = "Sum"
total_value = 100.0
input_value = 0.0
testcases = int(testset.find('test-count').text)
n_input = testcases
if n_input != 0:
input_value = total_value / n_input
args["score_type_parameters"] = str(input_value)
args["testcases"] = []
for i in xrange(testcases):
infile = os.path.join(task_path, testset_name,
"%02d" % (i + 1))
outfile = os.path.join(task_path, testset_name,
"%02d.a" % (i + 1))
if self.dos2unix_found:
os.system('dos2unix -q %s' % (infile, ))
os.system('dos2unix -q %s' % (outfile, ))
input_digest = self.file_cacher.put_file_from_path(
infile,
"Input %d for task %s" % (i, name))
output_digest = self.file_cacher.put_file_from_path(
outfile,
"Output %d for task %s" % (i, name))
testcase = Testcase("%03d" % (i, ), False,
input_digest, output_digest)
testcase.public = True
args["testcases"] += [testcase]
if task_cms_conf is not None and \
hasattr(task_cms_conf, "datasets") and \
testset_name in task_cms_conf.datasets:
args.update(task_cms_conf.datasets[testset_name])
dataset = Dataset(**args)
if testset_name == "tests":
task.active_dataset = dataset
os.remove(os.path.join(task_path, ".import_error"))
logger.info("Task parameters loaded.")
return task
示例8: post
# 需要导入模块: from cms.db import Task [as 别名]
# 或者: from cms.db.Task import active_dataset [as 别名]
def post(self):
fallback_page = "/tasks/add"
try:
attrs = dict()
self.get_string(attrs, "name", empty=None)
self.get_string(attrs, "title")
assert attrs.get("name") is not None, "No task name specified."
self.get_string(attrs, "primary_statements")
self.get_submission_format(attrs)
self.get_string(attrs, "token_mode")
self.get_int(attrs, "token_max_number")
self.get_timedelta_sec(attrs, "token_min_interval")
self.get_int(attrs, "token_gen_initial")
self.get_int(attrs, "token_gen_number")
self.get_timedelta_min(attrs, "token_gen_interval")
self.get_int(attrs, "token_gen_max")
self.get_int(attrs, "max_submission_number")
self.get_int(attrs, "max_user_test_number")
self.get_timedelta_sec(attrs, "min_submission_interval")
self.get_timedelta_sec(attrs, "min_user_test_interval")
self.get_int(attrs, "score_precision")
self.get_string(attrs, "score_mode")
# Create the task.
task = Task(**attrs)
self.sql_session.add(task)
except Exception as error:
self.application.service.add_notification(
make_datetime(), "Invalid field(s)", repr(error))
self.redirect(fallback_page)
return
try:
attrs = dict()
self.get_time_limit(attrs, "time_limit")
self.get_memory_limit(attrs, "memory_limit")
self.get_task_type(attrs, "task_type", "TaskTypeOptions_")
self.get_score_type(attrs, "score_type", "score_type_parameters")
# Create its first dataset.
attrs["description"] = "Default"
attrs["autojudge"] = True
attrs["task"] = task
dataset = Dataset(**attrs)
self.sql_session.add(dataset)
# Make the dataset active. Life works better that way.
task.active_dataset = dataset
except Exception as error:
self.application.service.add_notification(
make_datetime(), "Invalid field(s)", repr(error))
self.redirect(fallback_page)
return
if self.try_commit():
# Create the task on RWS.
self.application.service.proxy_service.reinitialize()
self.redirect("/task/%s" % task.id)
else:
self.redirect(fallback_page)
示例9: get_task
# 需要导入模块: from cms.db import Task [as 别名]
# 或者: from cms.db.Task import active_dataset [as 别名]
#.........这里部分代码省略.........
# Autodetect samples
samples = []
for test in tests:
if test["input"].find("dummy") != -1 or test["input"].find("sample") != -1:
samples.append(test["idx"])
for i in samples:
tests[i]["public"] = True
samples_group = {
"score": 0,
"type": "sum",
"public": rebuild_list(samples),
"private": [],
"hidden": [] }
tests_group = {
"score": 100,
"type": "sum",
"public": [],
"private": [],
"hidden": [] }
for i in xrange(len(tests)):
if not i in samples:
tests_group["private"].append(i)
tests_group["public"] = rebuild_list(tests_group["public"])
tests_group["private"] = rebuild_list(tests_group["private"])
if len(samples) == 0:
args["score_type_parameters"] = json.dumps([tests_group])
else:
args["score_type_parameters"] = json.dumps([samples_group, tests_group])
# Load testcases
args["testcases"] = []
for test in tests:
i = test["idx"]
input_digest = self.file_cacher.put_file_from_path(
os.path.join(task_path, "tests", test["input"]),
"Input %d for task %s" % (i, name))
output_digest = self.file_cacher.put_file_from_path(
os.path.join(task_path, "tests", test["output"]),
"Output %d for task %s" % (i, name))
args["testcases"] += [
Testcase("%03d" % i, test["public"], input_digest, output_digest)]
# Load graders (and stubs if any)
if os.path.isdir(os.path.join(task_path, "graders")):
for filename in os.listdir(os.path.join(task_path, "graders")):
digest = self.file_cacher.put_file_from_path(
os.path.join(task_path, "graders", filename),
"Grader %s for task %s" % (filename, name))
args["managers"] += [
Manager(filename, digest)]
compilation_param = "grader"
else:
compilation_param = "alone"
# Load checker
paths = [os.path.join(task_path, "checker"),
os.path.join(task_path, "check"),
os.path.join(task_path, "check.exe")]
for path in paths:
if os.path.isfile(path):
digest = self.file_cacher.put_file_from_path(
path,
"Checker for task %s" % name)
args["managers"] += [
Manager("checker", digest)]
evaluation_param = "comparator"
break
else:
evaluation_param = "diff"
# If the task type is Communication, try to load manager
path = os.path.join(task_path, "manager")
if os.path.isfile(path):
args["task_type"] = "Communication"
args["task_type_parameters"] = '[]'
digest = self.file_cacher.put_file_from_path(
path,
"Manager for task %s" % name)
args["managers"] += [
Manager("manager", digest)]
# Set task type parameters
if args["task_type"] == "OutputOnly":
args["time_limit"] = None
args["memory_limit"] = None
args["task_type_parameters"] = '["%s"]' % evaluation_param
task.submission_format = [
SubmissionFormatElement("%03d.out" % (i + 1))
for i in xrange(len(tests))]
elif args["task_type"] == "Batch":
args["task_type_parameters"] = \
'["%s", ["%s", "%s"], "%s"]' % \
(compilation_param, input_file, output_file,
evaluation_param)
logger.info("Task type is %s" % args["task_type"])
dataset = Dataset(**args)
task.active_dataset = dataset
logger.info("Task parameters loaded.")
return task