当前位置: 首页>>代码示例>>Python>>正文


Python Assessment.create方法代码示例

本文整理汇总了Python中openassessment.assessment.models.Assessment.create方法的典型用法代码示例。如果您正苦于以下问题:Python Assessment.create方法的具体用法?Python Assessment.create怎么用?Python Assessment.create使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在openassessment.assessment.models.Assessment的用法示例。


在下文中一共展示了Assessment.create方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_create_with_feedback_only_criterion

# 需要导入模块: from openassessment.assessment.models import Assessment [as 别名]
# 或者: from openassessment.assessment.models.Assessment import create [as 别名]
    def test_create_with_feedback_only_criterion(self):
        rubric = self._rubric_with_one_feedback_only_criterion()
        assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")

        # Create assessment parts
        # We can't select an option for the last criterion, but we do
        # provide written feedback.
        selected = {
            u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
            u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
        }
        feedback = {
            u"feedback": u"𝕿𝖍𝖎𝖘 𝖎𝖘 𝖘𝖔𝖒𝖊 𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐."
        }
        AssessmentPart.create_from_option_names(
            assessment, selected, feedback=feedback
        )

        # Check the score (the feedback-only assessment should count for 0 points)
        self.assertEqual(assessment.points_earned, 3)
        self.assertEqual(assessment.points_possible, 4)

        # Check the feedback text
        feedback_only = AssessmentPart.objects.get(criterion__name="feedback")
        self.assertEqual(feedback_only.feedback, u"𝕿𝖍𝖎𝖘 𝖎𝖘 𝖘𝖔𝖒𝖊 𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐.")
开发者ID:edx,项目名称:edx-ora2,代码行数:27,代码来源:test_assessment_models.py

示例2: test_full_assessment_dict_criteria_no_options

# 需要导入模块: from openassessment.assessment.models import Assessment [as 别名]
# 或者: from openassessment.assessment.models.Assessment import create [as 别名]
    def test_full_assessment_dict_criteria_no_options(self):
        # Create a rubric with a criterion that has no options (just feedback)
        rubric_dict = copy.deepcopy(RUBRIC)
        rubric_dict['criteria'].append({
            'order_num': 2,
            'name': 'feedback only',
            'prompt': 'feedback only',
            'options': []
        })
        rubric = rubric_from_dict(rubric_dict)

        # Create an assessment for the rubric
        assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
        selected = {
            u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
            u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
        }
        feedback = {
            u"feedback only": u"enjoy the feedback!"
        }
        AssessmentPart.create_from_option_names(assessment, selected, feedback=feedback)

        # Serialize the assessment
        serialized = full_assessment_dict(assessment)

        # Verify that the assessment dict correctly serialized the criterion with options.
        self.assertEqual(serialized['parts'][0]['criterion']['name'], u"vøȼȺƀᵾłȺɍɏ")
        self.assertEqual(serialized['parts'][0]['option']['name'], u"𝓰𝓸𝓸𝓭")
        self.assertEqual(serialized['parts'][1]['criterion']['name'], u"ﻭɼค๓๓คɼ")
        self.assertEqual(serialized['parts'][1]['option']['name'], u"єχ¢єℓℓєηт")

        # Verify that the assessment dict correctly serialized the criterion with no options.
        self.assertIs(serialized['parts'][2]['option'], None)
        self.assertEqual(serialized['parts'][2]['criterion']['name'], u"feedback only")
开发者ID:Akif-Vohra,项目名称:edx-ora2,代码行数:36,代码来源:test_serializers.py

示例3: _complete_assessment

# 需要导入模块: from openassessment.assessment.models import Assessment [as 别名]
# 或者: from openassessment.assessment.models.Assessment import create [as 别名]
def _complete_assessment(
    rubric_dict,
    scorer_id,
    peer_submission_uuid,
    options_selected,
    criterion_feedback,
    scorer_workflow,
    overall_feedback,
    num_required_grades,
    scored_at,
):
    """
    Internal function for atomic assessment creation. Creates a peer assessment
    and closes the associated peer workflow item in a single transaction.

    Args:
        rubric_dict (dict): The rubric model associated with this assessment
        scorer_id (str): The user ID for the user giving this assessment. This
            is required to create an assessment on a submission.
        peer_submission_uuid (str): The submission uuid for the submission being
            assessed.
        options_selected (dict): Dictionary mapping criterion names to the
            option names the user selected for that criterion.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        scorer_workflow (PeerWorkflow): The PeerWorkflow associated with the
            scorer. Updates the workflow item associated with this assessment.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        num_required_grades (int): The required number of assessments a
            submission requires before it is completed. If this number of
            assessments is reached, the grading_completed_at timestamp is set
            for the Workflow.
        scored_at (datetime): Optional argument to override the time in which
            the assessment took place. If not specified, scored_at is set to
            now.

    Returns:
        The Assessment model

    """
    # Get or create the rubric
    rubric = rubric_from_dict(rubric_dict)

    # Create the peer assessment
    assessment = Assessment.create(
        rubric, scorer_id, peer_submission_uuid, PEER_TYPE, scored_at=scored_at, feedback=overall_feedback
    )

    # Create assessment parts for each criterion in the rubric
    # This will raise an `InvalidRubricSelection` if the selected options do not
    # match the rubric.
    AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)

    # Close the active assessment
    scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades)
    return assessment
开发者ID:robertgerinlajoie,项目名称:edx-ora2,代码行数:60,代码来源:peer.py

示例4: _complete_assessment

# 需要导入模块: from openassessment.assessment.models import Assessment [as 别名]
# 或者: from openassessment.assessment.models.Assessment import create [as 别名]
def _complete_assessment(
        submission_uuid,
        scorer_id,
        options_selected,
        criterion_feedback,
        overall_feedback,
        rubric_dict,
        scored_at,
        scorer_workflow
):
    """
    Internal function for atomic assessment creation. Creates a staff assessment
    in a single transaction.

    Args:
        submission_uuid (str): The submission uuid for the submission being
            assessed.
        scorer_id (str): The user ID for the user giving this assessment. This
            is required to create an assessment on a submission.
        options_selected (dict): Dictionary mapping criterion names to the
            option names the user selected for that criterion.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        rubric_dict (dict): The rubric model associated with this assessment
        scored_at (datetime): Optional argument to override the time in which
            the assessment took place. If not specified, scored_at is set to
            now.

    Returns:
        The Assessment model

    """
    # Get or create the rubric
    rubric = rubric_from_dict(rubric_dict)

    # Create the staff assessment
    assessment = Assessment.create(
        rubric,
        scorer_id,
        submission_uuid,
        STAFF_TYPE,
        scored_at=scored_at,
        feedback=overall_feedback
    )

    # Create assessment parts for each criterion in the rubric
    # This will raise an `InvalidRubricSelection` if the selected options do not
    # match the rubric.
    AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)

    # Close the active assessment
    if scorer_workflow is not None:
        scorer_workflow.close_active_assessment(assessment, scorer_id)
    return assessment
开发者ID:openfun,项目名称:edx-ora2,代码行数:59,代码来源:staff.py

示例5: test_create_from_option_points_all_feedback_only_criteria

# 需要导入模块: from openassessment.assessment.models import Assessment [as 别名]
# 或者: from openassessment.assessment.models.Assessment import create [as 别名]
    def test_create_from_option_points_all_feedback_only_criteria(self):
        rubric = self._rubric_with_all_feedback_only_criteria()
        assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")

        # Since there are no criteria with options, and we're not
        # providing written feedback, pass in an empty selection.
        selected = {}
        AssessmentPart.create_from_option_points(assessment, selected)

        # Score should be zero, since none of the criteria have options
        self.assertEqual(assessment.points_earned, 0)
        self.assertEqual(assessment.points_possible, 0)
开发者ID:edx,项目名称:edx-ora2,代码行数:14,代码来源:test_assessment_models.py

示例6: test_no_feedback_provided_for_feedback_only_criterion

# 需要导入模块: from openassessment.assessment.models import Assessment [as 别名]
# 或者: from openassessment.assessment.models.Assessment import create [as 别名]
    def test_no_feedback_provided_for_feedback_only_criterion(self):
        rubric = self._rubric_with_one_feedback_only_criterion()
        assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")

        # Create assessment parts
        # Do NOT provide feedback for the feedback-only criterion
        selected = {
            u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
            u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
        }
        feedback = {}

        # Expect an error when we try to create the assessment parts
        with self.assertRaises(InvalidRubricSelection):
            AssessmentPart.create_from_option_names(assessment, selected, feedback=feedback)
开发者ID:edx,项目名称:edx-ora2,代码行数:17,代码来源:test_assessment_models.py

示例7: _complete_assessment

# 需要导入模块: from openassessment.assessment.models import Assessment [as 别名]
# 或者: from openassessment.assessment.models.Assessment import create [as 别名]
def _complete_assessment(
        submission_uuid,
        user_id,
        options_selected,
        criterion_feedback,
        overall_feedback,
        rubric_dict,
        scored_at
):
    """
    Internal function for creating an assessment and its parts atomically.

    Args:
        submission_uuid (str): The unique identifier for the submission being
            assessed.
        user_id (str): The ID of the user creating the assessment. This must
            match the ID of the user who made the submission.
        options_selected (dict): Mapping of rubric criterion names to option
            values selected.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        rubric_dict (dict): Serialized Rubric model.
        scored_at (datetime): The timestamp of the assessment.

    Returns:
        Assessment model

    """
    # Get or create the rubric
    rubric = rubric_from_dict(rubric_dict)

    # Create the self assessment
    assessment = Assessment.create(
        rubric,
        user_id,
        submission_uuid,
        SELF_TYPE,
        scored_at=scored_at,
        feedback=overall_feedback
    )

    # This will raise an `InvalidRubricSelection` if the selected options do not match the rubric.
    AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)
    return assessment
开发者ID:Stanford-Online,项目名称:edx-ora2,代码行数:49,代码来源:self.py

示例8: test_create_with_all_feedback_only_criteria

# 需要导入模块: from openassessment.assessment.models import Assessment [as 别名]
# 或者: from openassessment.assessment.models.Assessment import create [as 别名]
    def test_create_with_all_feedback_only_criteria(self):
        rubric = self._rubric_with_all_feedback_only_criteria()
        assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")

        # Create assessment parts, each of which are feedback-only (no points)
        selected = {}
        feedback = {
            u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
            u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
        }
        AssessmentPart.create_from_option_names(
            assessment, selected, feedback=feedback
        )

        # Check the score (should be 0, since we haven't selected any points)
        self.assertEqual(assessment.points_earned, 0)
        self.assertEqual(assessment.points_possible, 0)
开发者ID:edx,项目名称:edx-ora2,代码行数:19,代码来源:test_assessment_models.py

示例9: test_create_from_option_points_feedback_only_criterion

# 需要导入模块: from openassessment.assessment.models import Assessment [as 别名]
# 或者: from openassessment.assessment.models.Assessment import create [as 别名]
    def test_create_from_option_points_feedback_only_criterion(self):
        rubric = self._rubric_with_one_feedback_only_criterion()
        assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")

        # Create assessment parts by providing scores for options
        # but NO feedback.  This simulates how an example-based AI
        # assessment is created.
        selected = {
            u"vøȼȺƀᵾłȺɍɏ": 2,
            u"ﻭɼค๓๓คɼ": 1,
        }
        AssessmentPart.create_from_option_points(assessment, selected)

        # Check the score (the feedback-only assessment should count for 0 points)
        self.assertEqual(assessment.points_earned, 3)
        self.assertEqual(assessment.points_possible, 4)

        # Check the feedback text (should default to an empty string)
        feedback_only = AssessmentPart.objects.get(criterion__name="feedback")
        self.assertEqual(feedback_only.feedback, u"")
开发者ID:edx,项目名称:edx-ora2,代码行数:22,代码来源:test_assessment_models.py

示例10: test_default_feedback_for_feedback_only_criterion

# 需要导入模块: from openassessment.assessment.models import Assessment [as 别名]
# 或者: from openassessment.assessment.models.Assessment import create [as 别名]
    def test_default_feedback_for_feedback_only_criterion(self):
        rubric = self._rubric_with_one_feedback_only_criterion()
        assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")

        # Create assessment parts, but do NOT provide any feedback
        # This simulates how non-peer assessments are created
        # Note that this is different from providing an empty feedback dict;
        # here, we're not providing the `feedback` kwarg at all.
        selected = {
            u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
            u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
        }
        AssessmentPart.create_from_option_names(assessment, selected)

        # Check the score (the feedback-only assessment should count for 0 points)
        self.assertEqual(assessment.points_earned, 3)
        self.assertEqual(assessment.points_possible, 4)

        # Check the feedback text, which should default to an empty string
        feedback_only = AssessmentPart.objects.get(criterion__name="feedback")
        self.assertEqual(feedback_only.feedback, u"")
开发者ID:edx,项目名称:edx-ora2,代码行数:23,代码来源:test_assessment_models.py

示例11: create_assessment

# 需要导入模块: from openassessment.assessment.models import Assessment [as 别名]
# 或者: from openassessment.assessment.models.Assessment import create [as 别名]
def create_assessment(
    scorer_submission_uuid,
    scorer_id,
    options_selected,
    criterion_feedback,
    overall_feedback,
    rubric_dict,
    num_required_grades,
    scored_at=None
):
    """Creates an assessment on the given submission.

    Assessments are created based on feedback associated with a particular
    rubric.

    Args:
        scorer_submission_uuid (str): The submission uuid for the Scorer's
            workflow. The submission being assessed can be determined via the
            peer workflow of the grading student.
        scorer_id (str): The user ID for the user giving this assessment. This
            is required to create an assessment on a submission.
        options_selected (dict): Dictionary mapping criterion names to the
            option names the user selected for that criterion.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        num_required_grades (int): The required number of assessments a
            submission requires before it is completed. If this number of
            assessments is reached, the grading_completed_at timestamp is set
            for the Workflow.

    Kwargs:
        scored_at (datetime): Optional argument to override the time in which
            the assessment took place. If not specified, scored_at is set to
            now.

    Returns:
        dict: the Assessment model, serialized as a dict.

    Raises:
        PeerAssessmentRequestError: Raised when the submission_id is invalid, or
            the assessment_dict does not contain the required values to create
            an assessment.
        PeerAssessmentInternalError: Raised when there is an internal error
            while creating a new assessment.

    Examples:
        >>> options_selected = {"clarity": "Very clear", "precision": "Somewhat precise"}
        >>> criterion_feedback = {"clarity": "I thought this essay was very clear."}
        >>> feedback = "Your submission was thrilling."
        >>> create_assessment("1", "Tim", options_selected, criterion_feedback, feedback, rubric_dict)
    """
    try:
        # Retrieve workflow information
        scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid)
        peer_workflow_item = scorer_workflow.get_latest_open_workflow_item()
        if peer_workflow_item is None:
            message = (
                u"There are no open assessments associated with the scorer's "
                u"submission UUID {}."
            ).format(scorer_submission_uuid)
            logger.warning(message)
            raise PeerAssessmentWorkflowError(message)
        peer_submission_uuid = peer_workflow_item.author.submission_uuid

        # Get or create the rubric
        rubric = rubric_from_dict(rubric_dict)

        # Create the peer assessment
        assessment = Assessment.create(
            rubric,
            scorer_id,
            peer_submission_uuid,
            PEER_TYPE,
            scored_at=scored_at,
            feedback=overall_feedback
        )

        # Create assessment parts for each criterion in the rubric
        # This will raise an `InvalidRubricSelection` if the selected options do not match the rubric.
        AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)

        # Close the active assessment
        scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades)
        _log_assessment(assessment, scorer_workflow)
        return full_assessment_dict(assessment)
    except PeerWorkflow.DoesNotExist:
        message = (
            u"There is no Peer Workflow associated with the given "
            u"submission UUID {}."
        ).format(scorer_submission_uuid)
        logger.exception(message)
        raise PeerAssessmentWorkflowError(message)
    except InvalidRubric:
        msg = u"Rubric definition was not valid"
        logger.exception(msg)
        raise PeerAssessmentRequestError(msg)
    except InvalidRubricSelection:
#.........这里部分代码省略.........
开发者ID:caesar2164,项目名称:edx-ora2,代码行数:103,代码来源:peer.py

示例12: main

# 需要导入模块: from openassessment.assessment.models import Assessment [as 别名]
# 或者: from openassessment.assessment.models.Assessment import create [as 别名]
def main():
    from django.contrib.auth.models import User
    from openassessment.assessment.models import Assessment, AssessmentPart, StaffWorkflow
    from openassessment.workflow.models import AssessmentWorkflow, AssessmentWorkflowStep
    from student.models import anonymous_id_for_user, user_by_anonymous_id
    from submissions.models import Score, ScoreSummary, ScoreAnnotation, Submission

    old_scores = Score.objects.filter(submission__isnull=True, reset=False).order_by('id')
    updated_count = 0
    for score in old_scores:
        try:
            with transaction.atomic():
                # ScoreSummary is updated on Score saves but for this script we don't want that.
                # Correct way is to disconnect post_save signal, but since the receiver function
                # is defined in the class, we can't reference it. Workaround here is to just
                # prefetch the score summary and resave it to maintain its original field values.
                score_summary = ScoreSummary.objects.get(student_item=score.student_item)

                # Update old override with submission from the score preceding it.
                # If none exists, look for it in the submissions table.
                preceding_score = Score.objects.filter(
                    student_item=score.student_item,
                    created_at__lte=score.created_at,
                    submission__isnull=False,
                ).order_by('-created_at')[:1]
                if preceding_score.count():
                    submission = preceding_score.get().submission
                else:
                    submission_qset = Submission.objects.filter(student_item=score.student_item)
                    if submission_qset.count() > 1:
                        raise Exception("MULTIPLE SUBMISSIONS FOR STUDENT_ITEM {}".format(score.student_item))
                    else:
                        submission = submission_qset[:1].get()
                score.submission = submission
                score.save()

                # Offset override reset by 1 second for convenience when sorting db
                override_date = score.created_at - datetime.timedelta(seconds=1)
                # Create reset score
                Score.objects.create(
                    student_item=score.student_item,
                    submission=None,
                    points_earned=0,
                    points_possible=0,
                    created_at=override_date,
                    reset=True,
                )

                # Restore original score summary values
                score_summary.save()

                # Fetch staff id from score course for ScoreAnnotation
                course_id = CourseKey.from_string(score.student_item.course_id)
                staff = User.objects.filter(
                    courseaccessrole__role__in=['instructor', 'staff'],
                    courseaccessrole__course_id=course_id,
                )[:1].get()
                staff_id = anonymous_id_for_user(staff, course_id, save=False)

                # Create ScoreAnnotation
                score_annotation = ScoreAnnotation(
                    score=score,
                    annotation_type="staff_defined",
                    creator=staff_id,
                    reason="A staff member has defined the score for this submission",
                )
                score_annotation.save()

                # ORA2 Table Updates...
                # Fetch rubric from an existing assessment
                assessment = Assessment.objects.filter(submission_uuid=submission.uuid)[:1].get()
                rubric = assessment.rubric

                staff_assessment = Assessment.create(
                    rubric=rubric,
                    scorer_id=staff_id,
                    submission_uuid=submission.uuid,
                    score_type="ST",
                    scored_at=override_date,
                )

                # Fake criterion selections
                rubric_index = rubric.index
                assessment_parts = []

                criteria_without_options = rubric_index.find_criteria_without_options()
                criteria_with_options = set(rubric_index._criteria_index.values()) - criteria_without_options
                ordered_criteria = sorted(criteria_with_options, key=lambda criterion: criterion.order_num)
                criteria_options = [c.options.all() for c in ordered_criteria]
                # Just take the first combination of options which add up to the override point score
                for selection in itertools.product(*criteria_options):
                    total = sum(option.points for option in selection)
                    if total == score.points_earned:
                        for option in selection:
                            assessment_parts.append({
                                'criterion': option.criterion,
                                'option': option
                            })
                        break

#.........这里部分代码省略.........
开发者ID:caesar2164,项目名称:edx-platform,代码行数:103,代码来源:migrate_score_override.py

示例13: create_assessment

# 需要导入模块: from openassessment.assessment.models import Assessment [as 别名]
# 或者: from openassessment.assessment.models.Assessment import create [as 别名]
def create_assessment(
    submission_uuid,
    user_id,
    options_selected,
    criterion_feedback,
    overall_feedback,
    rubric_dict,
    scored_at=None
):
    """
    Create a self-assessment for a submission.

    Args:
        submission_uuid (str): The unique identifier for the submission being assessed.
        user_id (str): The ID of the user creating the assessment.  This must match the ID of the user who made the submission.
        options_selected (dict): Mapping of rubric criterion names to option values selected.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        rubric_dict (dict): Serialized Rubric model.

    Keyword Arguments:
        scored_at (datetime): The timestamp of the assessment; defaults to the current time.

    Returns:
        dict: serialized Assessment model

    Raises:
        SelfAssessmentRequestError: Could not retrieve a submission that the user is allowed to score.
    """
    # Check that there are not any assessments for this submission
    if Assessment.objects.filter(submission_uuid=submission_uuid, score_type=SELF_TYPE).exists():
        msg = (
            u"Cannot submit a self-assessment for the submission {uuid} "
            "because another self-assessment already exists for that submission."
        ).format(uuid=submission_uuid)
        raise SelfAssessmentRequestError(msg)

    # Check that the student is allowed to assess this submission
    try:
        submission = get_submission_and_student(submission_uuid)
        if submission['student_item']['student_id'] != user_id:
            msg = (
                u"Cannot submit a self-assessment for the submission {uuid} "
                u"because it was created by another student "
                u"(submission student ID {student_id} does not match your "
                u"student id {other_id})"
            ).format(
                uuid=submission_uuid,
                student_id=submission['student_item']['student_id'],
                other_id=user_id
            )
            raise SelfAssessmentRequestError(msg)
    except SubmissionNotFoundError:
        msg = (
            "Could not submit a self-assessment because no submission "
            "exists with UUID {uuid}"
        ).format(uuid=submission_uuid)
        raise SelfAssessmentRequestError()

    try:
        # Get or create the rubric
        rubric = rubric_from_dict(rubric_dict)

        # Create the self assessment
        assessment = Assessment.create(
            rubric,
            user_id,
            submission_uuid,
            SELF_TYPE,
            scored_at=scored_at,
            feedback=overall_feedback
        )

        # This will raise an `InvalidRubricSelection` if the selected options do not match the rubric.
        AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)
        _log_assessment(assessment, submission)
    except InvalidRubric as ex:
        msg = "Invalid rubric definition: " + str(ex)
        logger.warning(msg, exc_info=True)
        raise SelfAssessmentRequestError(msg)
    except InvalidRubricSelection as ex:
        msg = "Selected options do not match the rubric: " + str(ex)
        logger.warning(msg, exc_info=True)
        raise SelfAssessmentRequestError(msg)

    # Return the serialized assessment
    return full_assessment_dict(assessment)
开发者ID:ovnicraft,项目名称:edx-ora2,代码行数:92,代码来源:self.py


注:本文中的openassessment.assessment.models.Assessment.create方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。