当前位置: 首页>>代码示例>>Python>>正文


Python FileCacher.put_file_from_path方法代码示例

本文整理汇总了Python中cms.db.filecacher.FileCacher.put_file_from_path方法的典型用法代码示例。如果您正苦于以下问题:Python FileCacher.put_file_from_path方法的具体用法?Python FileCacher.put_file_from_path怎么用?Python FileCacher.put_file_from_path使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cms.db.filecacher.FileCacher的用法示例。


在下文中一共展示了FileCacher.put_file_from_path方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: add_statement

# 需要导入模块: from cms.db.filecacher import FileCacher [as 别名]
# 或者: from cms.db.filecacher.FileCacher import put_file_from_path [as 别名]
def add_statement(task_name, language_code, statement_file, overwrite):
    logger.info("Adding the statement(language: %s) of task %s "
                "in the database.", language_code, task_name)

    if not os.path.exists(statement_file):
        logger.error("Statement file (path: %s) does not exist.",
                     statement_file)
        return False
    if not statement_file.endswith(".pdf"):
        logger.error("Statement file should be a pdf file.")
        return False

    with SessionGen() as session:
        task = session.query(Task)\
            .filter(Task.name == task_name).first()
        if not task:
            logger.error("No task named %s", task_name)
            return False
        try:
            file_cacher = FileCacher()
            digest = file_cacher.put_file_from_path(
                statement_file,
                "Statement for task %s (lang: %s)" %
                (task_name, language_code))
        except Exception:
            logger.error("Task statement storage failed.", exc_info=True)
        arr = session.query(Statement)\
            .filter(Statement.language == language_code)\
            .filter(Statement.task == task)\
            .all()
        if arr:  # Statement already exists
            if overwrite:
                logger.info("Overwriting already existing statement.")
                session.delete(arr[0])
                session.commit()
            else:
                logger.error("A statement with given language already exists. "
                             "Not overwriting.")
                return False
        statement = Statement(language_code, digest, task=task)
        session.add(statement)
        session.commit()

    logger.info("Statement added.")
    return True
开发者ID:Nyrio,项目名称:cms,代码行数:47,代码来源:AddStatement.py

示例2: test_testcases

# 需要导入模块: from cms.db.filecacher import FileCacher [as 别名]
# 或者: from cms.db.filecacher.FileCacher import put_file_from_path [as 别名]
def test_testcases(base_dir, soluzione, assume=None):
    global task, file_cacher

    # Use a FileCacher with a NullBackend in order to avoid to fill
    # the database with junk
    if file_cacher is None:
        file_cacher = FileCacher(null=True)

    # Load the task
    # TODO - This implies copying a lot of data to the FileCacher,
    # which is annoying if you have to do it continuously; it would be
    # better to use a persistent cache (although local, possibly
    # filesystem-based instead of database-based) and somehow detect
    # when the task has already been loaded
    if task is None:
        loader = YamlLoader(
            os.path.realpath(os.path.join(base_dir, "..")),
            file_cacher)
        # Normally we should import the contest before, but YamlLoader
        # accepts get_task() even without previous get_contest() calls
        task = loader.get_task(os.path.split(os.path.realpath(base_dir))[1])

    # Prepare the EvaluationJob
    dataset = task.active_dataset
    digest = file_cacher.put_file_from_path(
        os.path.join(base_dir, soluzione),
        "Solution %s for task %s" % (soluzione, task.name))
    executables = {task.name: Executable(filename=task.name, digest=digest)}
    jobs = [(t, EvaluationJob(
        task_type=dataset.task_type,
        task_type_parameters=json.loads(dataset.task_type_parameters),
        managers=dict(dataset.managers),
        executables=executables,
        input=dataset.testcases[t].input, output=dataset.testcases[t].output,
        time_limit=dataset.time_limit,
        memory_limit=dataset.memory_limit)) for t in dataset.testcases]
    tasktype = get_task_type(dataset=dataset)

    ask_again = True
    last_status = "ok"
    status = "ok"
    stop = False
    info = []
    points = []
    comments = []
    tcnames = []
    for jobinfo in sorted(jobs):
        print jobinfo[0],
        sys.stdout.flush()
        job = jobinfo[1]
        # Skip the testcase if we decide to consider everything to
        # timeout
        if stop:
            info.append("Time limit exceeded")
            points.append(0.0)
            comments.append("Timeout.")
            continue

        # Evaluate testcase
        last_status = status
        tasktype.evaluate(job, file_cacher)
        status = job.plus["exit_status"]
        info.append("Time: %5.3f   Wall: %5.3f   Memory: %s" %
                   (job.plus["execution_time"],
                    job.plus["execution_wall_clock_time"],
                    mem_human(job.plus["execution_memory"])))
        points.append(float(job.outcome))
        comments.append(format_status_text(job.text))
        tcnames.append(jobinfo[0])

        # If we saw two consecutive timeouts, ask wether we want to
        # consider everything to timeout
        if ask_again and status == "timeout" and last_status == "timeout":
            print
            print "Want to stop and consider everything to timeout? [y/N]",
            if assume is not None:
                print assume
                tmp = assume
            else:
                tmp = raw_input().lower()
            if tmp in ['y', 'yes']:
                stop = True
            else:
                ask_again = False

    # Result pretty printing
    print
    clen = max(len(c) for c in comments)
    ilen = max(len(i) for i in info)
    for (i, p, c, b) in zip(tcnames, points, comments, info):
        print "%s) %5.2lf --- %s [%s]" % (i, p, c.ljust(clen), b.center(ilen))

    return zip(points, comments, info)
开发者ID:laskarcyber,项目名称:cms,代码行数:95,代码来源:Test.py

示例3: add_submission

# 需要导入模块: from cms.db.filecacher import FileCacher [as 别名]
# 或者: from cms.db.filecacher.FileCacher import put_file_from_path [as 别名]
def add_submission(contest_id, username, task_name, timestamp, files):
    file_cacher = FileCacher()
    with SessionGen() as session:

        participation = session.query(Participation)\
            .join(Participation.user)\
            .filter(Participation.contest_id == contest_id)\
            .filter(User.username == username)\
            .first()
        if participation is None:
            logging.critical("User `%s' does not exists or "
                             "does not participate in the contest.", username)
            return False
        task = session.query(Task)\
            .filter(Task.contest_id == contest_id)\
            .filter(Task.name == task_name)\
            .first()
        if task is None:
            logging.critical("Unable to find task `%s'.", task_name)
            return False

        elements = set(task.submission_format)

        for file_ in files:
            if file_ not in elements:
                logging.critical("File `%s' is not in the submission format "
                                 "for the task.", file_)
                return False

        if any(element not in files for element in elements):
            logger.warning("Not all files from the submission format were "
                           "provided.")

        # files is now a subset of elements.
        # We ensure we can infer a language if the task requires it.
        language = None
        need_lang = any(element.find(".%l") != -1 for element in elements)
        if need_lang:
            try:
                language = language_from_submitted_files(files)
            except ValueError as e:
                logger.critical(e)
                return False
            if language is None:
                # This might happen in case not all files were provided.
                logger.critical("Unable to infer language from submission.")
                return False
        language_name = None if language is None else language.name

        # Store all files from the arguments, and obtain their digests..
        file_digests = {}
        try:
            for file_ in files:
                digest = file_cacher.put_file_from_path(
                    files[file_],
                    "Submission file %s sent by %s at %d."
                    % (file_, username, timestamp))
                file_digests[file_] = digest
        except Exception as e:
            logger.critical("Error while storing submission's file: %s.", e)
            return False

        # Create objects in the DB.
        submission = Submission(make_datetime(timestamp), language_name,
                                participation=participation, task=task)
        for filename, digest in iteritems(file_digests):
            session.add(File(filename, digest, submission=submission))
        session.add(submission)
        session.commit()
        maybe_send_notification(submission.id)

    return True
开发者ID:Nyrio,项目名称:cms,代码行数:74,代码来源:AddSubmission.py

示例4: add_submission

# 需要导入模块: from cms.db.filecacher import FileCacher [as 别名]
# 或者: from cms.db.filecacher.FileCacher import put_file_from_path [as 别名]
def add_submission(contest_id, username, task_name, timestamp, files):
    file_cacher = FileCacher()
    with SessionGen() as session:

        participation = session.query(Participation)\
            .join(Participation.user)\
            .filter(Participation.contest_id == contest_id)\
            .filter(User.username == username)\
            .first()
        if participation is None:
            logging.critical("User `%s' does not exists or "
                             "does not participate in the contest.", username)
            return False
        task = session.query(Task)\
            .filter(Task.contest_id == contest_id)\
            .filter(Task.name == task_name)\
            .first()
        if task is None:
            logging.critical("Unable to find task `%s'.", task_name)
            return False

        elements = [format.filename for format in task.submission_format]

        for file_ in files:
            if file_ not in elements:
                logging.critical("File `%s' is not in the submission format "
                                 "for the task.", file_)
                return False

        if any(element not in files for element in elements):
            logger.warning("Not all files from the submission format were "
                           "provided.")

        # files and elements now coincide. We compute the language for
        # each file and check that they do not mix.
        language = None
        for file_ in files:
            this_language = filename_to_language(files[file_])
            if this_language is None and "%l" in file_:
                logger.critical("Cannot recognize language for file `%s'.",
                                file_)
                return False

            if language is None:
                language = this_language
            elif this_language is not None and language != this_language:
                logger.critical("Mixed-language submission detected.")
                return False

        # Store all files from the arguments, and obtain their digests..
        file_digests = {}
        try:
            for file_ in files:
                digest = file_cacher.put_file_from_path(
                    files[file_],
                    "Submission file %s sent by %s at %d."
                    % (file_, username, timestamp))
                file_digests[file_] = digest
        except:
            logger.critical("Error while storing submission's file.",
                            exc_info=True)
            return False

        # Create objects in the DB.
        submission = Submission(make_datetime(timestamp), language,
                                participation=participation, task=task)
        for filename, digest in file_digests.items():
            session.add(File(filename, digest, submission=submission))
        session.add(submission)
        session.commit()

    return True
开发者ID:Corea,项目名称:cms,代码行数:74,代码来源:AddSubmission.py

示例5: test_testcases

# 需要导入模块: from cms.db.filecacher import FileCacher [as 别名]
# 或者: from cms.db.filecacher.FileCacher import put_file_from_path [as 别名]
def test_testcases(base_dir, solution, language, assume=None):
    global task, file_cacher

    # Use a FileCacher with a NullBackend in order to avoid to fill
    # the database with junk
    if file_cacher is None:
        file_cacher = FileCacher(null=True)

    cmscontrib.loaders.italy_yaml.logger = NullLogger()
    # Load the task
    # TODO - This implies copying a lot of data to the FileCacher,
    # which is annoying if you have to do it continuously; it would be
    # better to use a persistent cache (although local, possibly
    # filesystem-based instead of database-based) and somehow detect
    # when the task has already been loaded
    if task is None:
        loader = cmscontrib.loaders.italy_yaml.YamlLoader(base_dir,
                                                          file_cacher)
        task = loader.get_task(get_statement=False)

    # Prepare the EvaluationJob
    dataset = task.active_dataset
    digest = file_cacher.put_file_from_path(
        os.path.join(base_dir, solution),
        "Solution %s for task %s" % (solution, task.name))
    executables = {task.name: Executable(filename=task.name, digest=digest)}
    jobs = [(t, EvaluationJob(
        language=language,
        task_type=dataset.task_type,
        task_type_parameters=json.loads(dataset.task_type_parameters),
        managers=dict(dataset.managers),
        executables=executables,
        input=dataset.testcases[t].input, output=dataset.testcases[t].output,
        time_limit=dataset.time_limit,
        memory_limit=dataset.memory_limit)) for t in dataset.testcases]
    tasktype = get_task_type(dataset=dataset)

    ask_again = True
    last_status = "ok"
    status = "ok"
    stop = False
    info = []
    points = []
    comments = []
    tcnames = []
    for jobinfo in sorted(jobs):
        print(jobinfo[0])
        sys.stdout.flush()
        job = jobinfo[1]
        # Skip the testcase if we decide to consider everything to
        # timeout
        if stop:
            info.append("Time limit exceeded")
            points.append(0.0)
            comments.append("Timeout.")
            move_cursor(directions.UP, erase=True)
            continue

        # Evaluate testcase
        last_status = status
        tasktype.evaluate(job, file_cacher)
        status = job.plus.get("exit_status")
        info.append((job.plus.get("execution_time"),
                     job.plus.get("execution_memory")))
        points.append(float(job.outcome))
        comments.append(format_status_text(job.text))
        tcnames.append(jobinfo[0])

        # If we saw two consecutive timeouts, ask wether we want to
        # consider everything to timeout
        if ask_again and status == "timeout" and last_status == "timeout":
            print("Want to stop and consider everything to timeout? [y/N]",
                  end='')
            if assume is not None:
                print(assume)
                tmp = assume
            else:
                tmp = raw_input().lower()
            if tmp in ['y', 'yes']:
                stop = True
            else:
                ask_again = False
            print()
        move_cursor(directions.UP, erase=True)

    # Subtasks scoring
    try:
        subtasks = json.loads(dataset.score_type_parameters)
        subtasks[0]
    except:
        subtasks = [[100, len(info)]]

    if dataset.score_type == 'GroupMin':
        scoreFun = min
    else:
        if dataset.score_type != 'Sum':
            logger.warning("Score type %s not yet supported! Using Sum"
                           % dataset.score_type)

        def scoreFun(x):
#.........这里部分代码省略.........
开发者ID:olimpiadi-informatica,项目名称:cms,代码行数:103,代码来源:Test.py

示例6: ContestImporter

# 需要导入模块: from cms.db.filecacher import FileCacher [as 别名]
# 或者: from cms.db.filecacher.FileCacher import put_file_from_path [as 别名]

#.........这里部分代码省略.........

        cls = getattr(class_hook, data["_class"])

        args = dict()

        for prp in cls._col_props:
            if prp.key not in data:
                # We will let the __init__ of the class check if any
                # argument is missing, so it's safe to just skip here.
                continue

            col = prp.columns[0]
            col_type = type(col.type)

            val = data[prp.key]
            if col_type in [Boolean, Integer, Float, Unicode, RepeatedUnicode]:
                args[prp.key] = val
            elif col_type is String:
                args[prp.key] = val.encode('latin1') if val is not None else None
            elif col_type is DateTime:
                args[prp.key] = make_datetime(val) if val is not None else None
            elif col_type is Interval:
                args[prp.key] = timedelta(seconds=val) if val is not None else None
            else:
                raise RuntimeError("Unknown SQLAlchemy column type: %s" % col_type)

        return cls(**args)

    def add_relationships(self, data, obj):

        """Add the relationships to the given object, using the given data.

        Do what we didn't in import_objects: importing relationships.
        We already now the class of the object so we simply iterate over
        its relationship properties trying to load them from the data (if
        present), checking wheter they are IDs or collection of IDs,
        dereferencing them (i.e. getting the corresponding object) and
        reflecting all on the given object.

        Note that both this method and import_object don't check if the
        given data has more items than the ones we understand and use.

        """

        cls = type(obj)

        for prp in cls._rel_props:
            if prp.key not in data:
                # Relationships are always optional
                continue

            val = data[prp.key]
            if val is None:
                setattr(obj, prp.key, None)
            elif type(val) == unicode:
                setattr(obj, prp.key, self.objs[val])
            elif type(val) == list:
                setattr(obj, prp.key, list(self.objs[i] for i in val))
            elif type(val) == dict:
                setattr(obj, prp.key, dict((k, self.objs[v]) for k, v in val.iteritems()))
            else:
                raise RuntimeError("Unknown RelationshipProperty value: %s" % type(val))

    def safe_put_file(self, path, descr_path):

        """Put a file to FileCacher signaling every error (including
        digest mismatch).

        path (string): the path from which to load the file.
        descr_path (string): same for description.

        return (bool): True if all ok, False if something wrong.

        """

        # TODO - Probably this method could be merged in FileCacher

        # First read the description.
        try:
            with io.open(descr_path, 'rt', encoding='utf-8') as fin:
                description = fin.read()
        except IOError:
            description = ''

        # Put the file.
        try:
            digest = self.file_cacher.put_file_from_path(path, description)
        except Exception as error:
            logger.critical("File %s could not be put to file server (%r), "
                            "aborting." % (path, error))
            return False

        # Then check the digest.
        calc_digest = sha1sum(path)
        if digest != calc_digest:
            logger.critical("File %s has hash %s, but the server returned %s, "
                            "aborting." % (path, calc_digest, digest))
            return False

        return True
开发者ID:ronalchn,项目名称:cms,代码行数:104,代码来源:ContestImporter.py

示例7: main

# 需要导入模块: from cms.db.filecacher import FileCacher [as 别名]
# 或者: from cms.db.filecacher.FileCacher import put_file_from_path [as 别名]
def main():
    if len(sys.argv) != 2:
        print "%s [file delle domande]" % sys.argv[0]
        sys.exit(0)

    lines = file(sys.argv[1]).readlines()
    test = Test()
    test.name = os.path.basename(sys.argv[1]).replace(".txt", "")
    test.description = lines[0].strip()
    test.max_score = 0
    dirname = os.path.dirname(sys.argv[1])
    question = TestQuestion()
    question.text = "<p>\n"
    file_cacher = FileCacher()
    answers = []

    status = "score"
    for l in lines[1:]:
        l = escape(l)
        if l[:3] == '===':
            question.text += "</p>"
            question.answers = json.dumps(answers)
            test.questions.append(question)
            status = "score"
            question = TestQuestion()
            question.text = "<p>\n"
            continue

        if l[:3] == '---':
            status = "choice"
            question.type = "choice"
            answers = []
            continue

        if l[:3] == '+++':
            status = "answer"
            answers = []
            continue

        if status == "score":
            try:
                score, wrong_score = map(int, l.split(","))
                test.max_score += score
            except ValueError:
                continue
            question.score = score
            question.wrong_score = wrong_score
            status = "text"
            continue

        if status == "text":
            if l == "\n":
                question.text += "</p><p>\n"
            elif l[:2] == "[[" and l[-3:] == "]]\n":
                name = l[2:-3]
                digest = file_cacher.put_file_from_path(
                    os.path.join(dirname, "data", name),
                    "Image %s for test %s" % (name, test.name))
                question.text += "<center>"
                question.text += "<img src='/files/%s/%s'/>" % (digest, name)
                question.text += "</center>\n"
                f = QuestionFile(filename=name, digest=digest)
                question.files.append(f)
            elif l[:-1] == "```":
                question.text += "<pre>"
            elif l[:-1] == "'''":
                question.text += "</pre>"
            else:
                question.text += l

        if status == "choice":
            answers.append([l[1:].strip(), l[0] == '*'])

        if status == "answer":
            pos = l.index(":")
            name = l[:pos]
            value = json.loads("[" + l[pos + 1:] + "]")
            if isinstance(value[0], basestring):
                question.type = "string"
            elif not question.type:
                question.type = "number"
            answers.append([name, value])

    if status == "answer":
        question.text += "</p>"
        question.answers = json.dumps(answers)
        test.questions.append(question)

    with SessionGen() as session:
        test.access_level = 7
        session.add(test)
        session.commit()
开发者ID:NextLight,项目名称:oii-web,代码行数:94,代码来源:TestImporter.py

示例8: DumpImporter

# 需要导入模块: from cms.db.filecacher import FileCacher [as 别名]
# 或者: from cms.db.filecacher.FileCacher import put_file_from_path [as 别名]

#.........这里部分代码省略.........
        relationships in a later moment, using the add_relationships
        method.

        Note that both this method and add_relationships don't check if
        the given data has more items than the ones we understand and
        use.

        """

        cls = getattr(class_hook, data["_class"])

        args = dict()

        for prp in cls._col_props:
            if prp.key not in data:
                # We will let the __init__ of the class check if any
                # argument is missing, so it's safe to just skip here.
                continue

            col = prp.columns[0]

            val = data[prp.key]
            args[prp.key] = decode_value(col.type, val)

        return cls(**args)

    def add_relationships(self, data, obj):

        """Add the relationships to the given object, using the given data.

        Do what we didn't in import_objects: importing relationships.
        We already now the class of the object so we simply iterate over
        its relationship properties trying to load them from the data (if
        present), checking wheter they are IDs or collection of IDs,
        dereferencing them (i.e. getting the corresponding object) and
        reflecting all on the given object.

        Note that both this method and import_object don't check if the
        given data has more items than the ones we understand and use.

        """

        cls = type(obj)

        for prp in cls._rel_props:
            if prp.key not in data:
                # Relationships are always optional
                continue

            val = data[prp.key]
            if val is None:
                setattr(obj, prp.key, None)
            elif isinstance(val, str):
                setattr(obj, prp.key, self.objs[val])
            elif isinstance(val, list):
                setattr(obj, prp.key, list(self.objs[i] for i in val))
            elif isinstance(val, dict):
                setattr(obj, prp.key,
                        dict((k, self.objs[v]) for k, v in iteritems(val)))
            else:
                raise RuntimeError(
                    "Unknown RelationshipProperty value: %s" % type(val))

    def safe_put_file(self, path, descr_path):

        """Put a file to FileCacher signaling every error (including
        digest mismatch).

        path (string): the path from which to load the file.
        descr_path (string): same for description.

        return (bool): True if all ok, False if something wrong.

        """

        # TODO - Probably this method could be merged in FileCacher

        # First read the description.
        try:
            with io.open(descr_path, 'rt', encoding='utf-8') as fin:
                description = fin.read()
        except IOError:
            description = ''

        # Put the file.
        try:
            digest = self.file_cacher.put_file_from_path(path, description)
        except Exception as error:
            logger.critical("File %s could not be put to file server (%r), "
                            "aborting.", path, error)
            return False

        # Then check the digest.
        calc_digest = path_digest(path)
        if digest != calc_digest:
            logger.critical("File %s has hash %s, but the server returned %s, "
                            "aborting.", path, calc_digest, digest)
            return False

        return True
开发者ID:Nyrio,项目名称:cms,代码行数:104,代码来源:DumpImporter.py

示例9: test_testcases

# 需要导入模块: from cms.db.filecacher import FileCacher [as 别名]
# 或者: from cms.db.filecacher.FileCacher import put_file_from_path [as 别名]
def test_testcases(base_dir, soluzione, language, assume=None):
    global task, file_cacher

    # Use a disabled FileCacher with a FSBackend in order to avoid to fill
    # the database with junk and to save up space.
    if file_cacher is None:
        file_cacher = FileCacher(path=os.path.join(config.cache_dir,
                                                   'cmsMake'),
                                 enabled=False)

    # Load the task
    if task is None:
        loader = YamlLoader(
            os.path.realpath(os.path.join(base_dir, "..")),
            file_cacher)
        # Normally we should import the contest before, but YamlLoader
        # accepts get_task() even without previous get_contest() calls
        task = loader.get_task(os.path.split(os.path.realpath(base_dir))[1])

    # Prepare the EvaluationJob
    dataset = task.active_dataset
    if dataset.task_type != "OutputOnly":
        digest = file_cacher.put_file_from_path(
            os.path.join(base_dir, soluzione),
            "Solution %s for task %s" % (soluzione, task.name))
        executables = {task.name: Executable(filename=task.name,
                                             digest=digest)}
        jobs = [(t, EvaluationJob(
            language=language,
            task_type=dataset.task_type,
            task_type_parameters=json.loads(dataset.task_type_parameters),
            managers=dict(dataset.managers),
            executables=executables,
            input=dataset.testcases[t].input,
            output=dataset.testcases[t].output,
            time_limit=dataset.time_limit,
            memory_limit=dataset.memory_limit)) for t in dataset.testcases]
        tasktype = get_task_type(dataset=dataset)
    else:
        print("Generating outputs...", end='')
        files = {}
        for t in sorted(dataset.testcases.keys()):
            with file_cacher.get_file(dataset.testcases[t].input) as fin:
                with TemporaryFile() as fout:
                    print(str(t), end='')
                    call(soluzione, stdin=fin, stdout=fout, cwd=base_dir)
                    fout.seek(0)
                    digest = file_cacher.put_file_from_fobj(fout)
                    outname = "output_%s.txt" % t
                    files[outname] = File(filename=outname, digest=digest)
        jobs = [(t, EvaluationJob(
            task_type=dataset.task_type,
            task_type_parameters=json.loads(dataset.task_type_parameters),
            managers=dict(dataset.managers),
            files=files,
            input=dataset.testcases[t].input,
            output=dataset.testcases[t].output,
            time_limit=dataset.time_limit,
            memory_limit=dataset.memory_limit)) for t in dataset.testcases]
        for k, job in jobs:
            job._key = k
        tasktype = get_task_type(dataset=dataset)
        print()

    ask_again = True
    last_status = "ok"
    status = "ok"
    stop = False
    info = []
    points = []
    comments = []
    tcnames = []
    for jobinfo in sorted(jobs):
        print(jobinfo[0], end='')
        sys.stdout.flush()
        job = jobinfo[1]
        # Skip the testcase if we decide to consider everything to
        # timeout
        if stop:
            info.append("Time limit exceeded")
            points.append(0.0)
            comments.append("Timeout.")
            continue

        # Evaluate testcase
        last_status = status
        tasktype.evaluate(job, file_cacher)
        if dataset.task_type != "OutputOnly":
            status = job.plus["exit_status"]
            info.append("Time: %5.3f   Wall: %5.3f   Memory: %s" %
                       (job.plus["execution_time"],
                        job.plus["execution_wall_clock_time"],
                        mem_human(job.plus["execution_memory"])))
        else:
            status = "ok"
            info.append("N/A")
        points.append(float(job.outcome))
        comments.append(format_status_text(job.text))
        tcnames.append(jobinfo[0])

#.........这里部分代码省略.........
开发者ID:gabrfarina,项目名称:oii-web,代码行数:103,代码来源:Test.py


注:本文中的cms.db.filecacher.FileCacher.put_file_from_path方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。