當前位置: 首頁>>代碼示例>>Python>>正文


Python models.Assessment類代碼示例

本文整理匯總了Python中openassessment.assessment.models.Assessment的典型用法代碼示例。如果您正苦於以下問題:Python Assessment類的具體用法?Python Assessment怎麽用?Python Assessment使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


在下文中一共展示了Assessment類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_assessment_scores_by_criteria

def get_assessment_scores_by_criteria(submission_uuid):
    """Get the median score for each rubric criterion

    Args:
        submission_uuid (str): The submission uuid is used to get the
            assessments used to score this submission, and generate the
            appropriate median score.

    Returns:
        (dict): A dictionary of rubric criterion names, with a median score of
            the peer assessments.

    Raises:
        SelfAssessmentInternalError: If any error occurs while retrieving
            information to form the median scores, an error is raised.
    """
    try:
        assessments = list(
            Assessment.objects.filter(
                score_type=SELF_TYPE, submission_uuid=submission_uuid
            ).order_by('-scored_at')[:1]
        )
        scores = Assessment.scores_by_criterion(assessments)
        return Assessment.get_median_score_dict(scores)
    except DatabaseError:
        error_message = _(u"Error getting self assessment scores for {}").format(submission_uuid)
        logger.exception(error_message)
        raise SelfAssessmentInternalError(error_message)
開發者ID:YoshidaKS,項目名稱:edx-ora2,代碼行數:28,代碼來源:self.py

示例2: get_assessment_scores_by_criteria

def get_assessment_scores_by_criteria(submission_uuid):
    """Get the staff score for each rubric criterion

    Args:
        submission_uuid (str): The submission uuid is used to get the
            assessment used to score this submission.

    Returns:
        (dict): A dictionary of rubric criterion names, with a score of
            the staff assessments.

    Raises:
        StaffAssessmentInternalError: If any error occurs while retrieving
            information from the scores, an error is raised.
    """
    try:
        # This will always create a list of length 1
        assessments = list(
            Assessment.objects.filter(
                score_type=STAFF_TYPE, submission_uuid=submission_uuid
            )[:1]
        )
        scores = Assessment.scores_by_criterion(assessments)
        # Since this is only being sent one score, the median score will be the
        # same as the only score.
        return Assessment.get_median_score_dict(scores)
    except DatabaseError:
        error_message = u"Error getting staff assessment scores for {}".format(submission_uuid)
        logger.exception(error_message)
        raise StaffAssessmentInternalError(error_message)
開發者ID:openfun,項目名稱:edx-ora2,代碼行數:30,代碼來源:staff.py

示例3: get_assessment_median_scores

def get_assessment_median_scores(submission_uuid):
    """Get the median score for each rubric criterion

    For a given assessment, collect the median score for each criterion on the
    rubric. This set can be used to determine the overall score, as well as each
    part of the individual rubric scores.

    If there is a true median score, it is returned. If there are two median
    values, the average of those two values is returned, rounded up to the
    greatest integer value.

    Args:
        submission_uuid (str): The submission uuid is used to get the
            assessments used to score this submission, and generate the
            appropriate median score.

    Returns:
        dict: A dictionary of rubric criterion names,
        with a median score of the peer assessments.

    Raises:
        PeerAssessmentInternalError: If any error occurs while retrieving
            information to form the median scores, an error is raised.
    """
    try:
        workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
        items = workflow.graded_by.filter(scored=True)
        assessments = [item.assessment for item in items]
        scores = Assessment.scores_by_criterion(assessments)
        return Assessment.get_median_score_dict(scores)
    except DatabaseError:
        error_message = (u"Error getting assessment median scores for submission {uuid}").format(uuid=submission_uuid)
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)
開發者ID:robertgerinlajoie,項目名稱:edx-ora2,代碼行數:34,代碼來源:peer.py

示例4: get_assessment_scores_by_criteria

def get_assessment_scores_by_criteria(submission_uuid):
    """Get the score for each rubric criterion

    Args:
        submission_uuid (str): The submission uuid is used to get the
            assessment used to score this submission.

    Returns:
        (dict): A dictionary of rubric criterion names, with a score of
            the example based assessments.

    Raises:
        AIGradingInternalError: If any error occurs while retrieving
            information from the scores, an error is raised.
    """
    try:
        assessments = list(
            Assessment.objects.filter(
                score_type=AI_ASSESSMENT_TYPE, submission_uuid=submission_uuid
            ).order_by('-scored_at')[:1]
        )
        scores = Assessment.scores_by_criterion(assessments)
        return Assessment.get_median_score_dict(scores)
    except DatabaseError:
        error_message = u"Error getting example-based assessment scores for {}".format(submission_uuid)
        logger.exception(error_message)
        raise AIGradingInternalError(error_message)
開發者ID:EDUlib,項目名稱:edx-ora2,代碼行數:27,代碼來源:ai.py

示例5: test_create_with_feedback_only_criterion

    def test_create_with_feedback_only_criterion(self):
        rubric = self._rubric_with_one_feedback_only_criterion()
        assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")

        # Create assessment parts
        # We can't select an option for the last criterion, but we do
        # provide written feedback.
        selected = {
            u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
            u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
        }
        feedback = {
            u"feedback": u"𝕿𝖍𝖎𝖘 𝖎𝖘 𝖘𝖔𝖒𝖊 𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐."
        }
        AssessmentPart.create_from_option_names(
            assessment, selected, feedback=feedback
        )

        # Check the score (the feedback-only assessment should count for 0 points)
        self.assertEqual(assessment.points_earned, 3)
        self.assertEqual(assessment.points_possible, 4)

        # Check the feedback text
        feedback_only = AssessmentPart.objects.get(criterion__name="feedback")
        self.assertEqual(feedback_only.feedback, u"𝕿𝖍𝖎𝖘 𝖎𝖘 𝖘𝖔𝖒𝖊 𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐.")
開發者ID:edx,項目名稱:edx-ora2,代碼行數:25,代碼來源:test_assessment_models.py

示例6: test_full_assessment_dict_criteria_no_options

    def test_full_assessment_dict_criteria_no_options(self):
        # Create a rubric with a criterion that has no options (just feedback)
        rubric_dict = copy.deepcopy(RUBRIC)
        rubric_dict['criteria'].append({
            'order_num': 2,
            'name': 'feedback only',
            'prompt': 'feedback only',
            'options': []
        })
        rubric = rubric_from_dict(rubric_dict)

        # Create an assessment for the rubric
        assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
        selected = {
            u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
            u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
        }
        feedback = {
            u"feedback only": u"enjoy the feedback!"
        }
        AssessmentPart.create_from_option_names(assessment, selected, feedback=feedback)

        # Serialize the assessment
        serialized = full_assessment_dict(assessment)

        # Verify that the assessment dict correctly serialized the criterion with options.
        self.assertEqual(serialized['parts'][0]['criterion']['name'], u"vøȼȺƀᵾłȺɍɏ")
        self.assertEqual(serialized['parts'][0]['option']['name'], u"𝓰𝓸𝓸𝓭")
        self.assertEqual(serialized['parts'][1]['criterion']['name'], u"ﻭɼค๓๓คɼ")
        self.assertEqual(serialized['parts'][1]['option']['name'], u"єχ¢єℓℓєηт")

        # Verify that the assessment dict correctly serialized the criterion with no options.
        self.assertIs(serialized['parts'][2]['option'], None)
        self.assertEqual(serialized['parts'][2]['criterion']['name'], u"feedback only")
開發者ID:Akif-Vohra,項目名稱:edx-ora2,代碼行數:34,代碼來源:test_serializers.py

示例7: _complete_assessment

def _complete_assessment(
    rubric_dict,
    scorer_id,
    peer_submission_uuid,
    options_selected,
    criterion_feedback,
    scorer_workflow,
    overall_feedback,
    num_required_grades,
    scored_at,
):
    """
    Internal function for atomic assessment creation. Creates a peer assessment
    and closes the associated peer workflow item in a single transaction.

    Args:
        rubric_dict (dict): The rubric model associated with this assessment
        scorer_id (str): The user ID for the user giving this assessment. This
            is required to create an assessment on a submission.
        peer_submission_uuid (str): The submission uuid for the submission being
            assessed.
        options_selected (dict): Dictionary mapping criterion names to the
            option names the user selected for that criterion.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        scorer_workflow (PeerWorkflow): The PeerWorkflow associated with the
            scorer. Updates the workflow item associated with this assessment.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        num_required_grades (int): The required number of assessments a
            submission requires before it is completed. If this number of
            assessments is reached, the grading_completed_at timestamp is set
            for the Workflow.
        scored_at (datetime): Optional argument to override the time in which
            the assessment took place. If not specified, scored_at is set to
            now.

    Returns:
        The Assessment model

    """
    # Get or create the rubric
    rubric = rubric_from_dict(rubric_dict)

    # Create the peer assessment
    assessment = Assessment.create(
        rubric, scorer_id, peer_submission_uuid, PEER_TYPE, scored_at=scored_at, feedback=overall_feedback
    )

    # Create assessment parts for each criterion in the rubric
    # This will raise an `InvalidRubricSelection` if the selected options do not
    # match the rubric.
    AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)

    # Close the active assessment
    scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades)
    return assessment
開發者ID:robertgerinlajoie,項目名稱:edx-ora2,代碼行數:58,代碼來源:peer.py

示例8: _complete_assessment

def _complete_assessment(
        submission_uuid,
        scorer_id,
        options_selected,
        criterion_feedback,
        overall_feedback,
        rubric_dict,
        scored_at,
        scorer_workflow
):
    """
    Internal function for atomic assessment creation. Creates a staff assessment
    in a single transaction.

    Args:
        submission_uuid (str): The submission uuid for the submission being
            assessed.
        scorer_id (str): The user ID for the user giving this assessment. This
            is required to create an assessment on a submission.
        options_selected (dict): Dictionary mapping criterion names to the
            option names the user selected for that criterion.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        rubric_dict (dict): The rubric model associated with this assessment
        scored_at (datetime): Optional argument to override the time in which
            the assessment took place. If not specified, scored_at is set to
            now.

    Returns:
        The Assessment model

    """
    # Get or create the rubric
    rubric = rubric_from_dict(rubric_dict)

    # Create the staff assessment
    assessment = Assessment.create(
        rubric,
        scorer_id,
        submission_uuid,
        STAFF_TYPE,
        scored_at=scored_at,
        feedback=overall_feedback
    )

    # Create assessment parts for each criterion in the rubric
    # This will raise an `InvalidRubricSelection` if the selected options do not
    # match the rubric.
    AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)

    # Close the active assessment
    if scorer_workflow is not None:
        scorer_workflow.close_active_assessment(assessment, scorer_id)
    return assessment
開發者ID:openfun,項目名稱:edx-ora2,代碼行數:57,代碼來源:staff.py

示例9: test_choose_score

 def test_choose_score(self):
     self.assertEqual(0, Assessment.get_median_score([]))
     self.assertEqual(5, Assessment.get_median_score([5]))
     # average of 5, 6, rounded down.
     self.assertEqual(6, Assessment.get_median_score([5, 6]))
     self.assertEqual(14, Assessment.get_median_score([5, 6, 12, 16, 22, 53]))
     self.assertEqual(14, Assessment.get_median_score([6, 5, 12, 53, 16, 22]))
     self.assertEqual(16, Assessment.get_median_score([5, 6, 12, 16, 22, 53, 102]))
     self.assertEqual(16, Assessment.get_median_score([16, 6, 12, 102, 22, 53, 5]))
開發者ID:mulby,項目名稱:edx-ora2,代碼行數:9,代碼來源:test_peer.py

示例10: test_create_from_option_points_all_feedback_only_criteria

    def test_create_from_option_points_all_feedback_only_criteria(self):
        rubric = self._rubric_with_all_feedback_only_criteria()
        assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")

        # Since there are no criteria with options, and we're not
        # providing written feedback, pass in an empty selection.
        selected = {}
        AssessmentPart.create_from_option_points(assessment, selected)

        # Score should be zero, since none of the criteria have options
        self.assertEqual(assessment.points_earned, 0)
        self.assertEqual(assessment.points_possible, 0)
開發者ID:edx,項目名稱:edx-ora2,代碼行數:12,代碼來源:test_assessment_models.py

示例11: test_no_feedback_provided_for_feedback_only_criterion

    def test_no_feedback_provided_for_feedback_only_criterion(self):
        rubric = self._rubric_with_one_feedback_only_criterion()
        assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")

        # Create assessment parts
        # Do NOT provide feedback for the feedback-only criterion
        selected = {
            u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
            u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
        }
        feedback = {}

        # Expect an error when we try to create the assessment parts
        with self.assertRaises(InvalidRubricSelection):
            AssessmentPart.create_from_option_names(assessment, selected, feedback=feedback)
開發者ID:edx,項目名稱:edx-ora2,代碼行數:15,代碼來源:test_assessment_models.py

示例12: print_summary

def print_summary(course_id, oa_item, anonymous_student_id):
    # Print submission
    submission = get_submission(course_id, oa_item.location, anonymous_student_id)
    print "Submission status:"
    print_submission(submission, oa_item)

    # Print scored assessment(s)
    scored_items = PeerWorkflowItem.objects.filter(author=submission.id, submission_uuid=submission.submission_uuid, assessment__isnull=False, scored=True).order_by('assessment')
    print "Scored assessment(s):"
    if scored_items:
        scored_assessments = [scored_item.assessment for scored_item in scored_items]
        scored_scores = scores_by_criterion(scored_assessments)
        median_score_dict = Assessment.get_median_score_dict(scored_scores)
        print_peerworkflowitem(scored_items, scored_scores)
    else:
        scored_scores = {}
        print "... No record was found."

    # Print not-scored assessment(s)
    not_scored_items = PeerWorkflowItem.objects.filter(author=submission.id, submission_uuid=submission.submission_uuid, assessment__isnull=False, scored=False).order_by('assessment')
    print "Not-scored assessment(s):"
    if not_scored_items:
        not_scored_assessments = [not_scored_item.assessment for not_scored_item in not_scored_items]
        not_scored_scores = scores_by_criterion(not_scored_assessments)
        print_peerworkflowitem(not_scored_items, not_scored_scores)
    else:
        print "... No record was found."

    # Print latest score
    latest_score = get_latest_score(submission)
    print "Latest score:"
    if latest_score is not None:
        try:
            median_scores = peer_api.get_assessment_median_scores(submission.submission_uuid)
        except:
            median_scores = {}
        latest_score_output = PrettyTable(['Score ID'] + scored_scores.keys() + ['Points earned', 'Points possible', 'Created at'])
        latest_score_output.align = 'l'
        row = []
        row.append(latest_score.id)
        row.extend([median_scores[k] for k in scored_scores.keys()])
        row.append(latest_score.points_earned)
        row.append(latest_score.points_possible)
        row.append(latest_score.created_at)
        latest_score_output.add_row(row)
        print latest_score_output
    else:
        print "... No record was found."
開發者ID:nttks,項目名稱:edx-ora2,代碼行數:48,代碼來源:rescore_oa.py

示例13: _complete_assessment

def _complete_assessment(
        submission_uuid,
        user_id,
        options_selected,
        criterion_feedback,
        overall_feedback,
        rubric_dict,
        scored_at
):
    """
    Internal function for creating an assessment and its parts atomically.

    Args:
        submission_uuid (str): The unique identifier for the submission being
            assessed.
        user_id (str): The ID of the user creating the assessment. This must
            match the ID of the user who made the submission.
        options_selected (dict): Mapping of rubric criterion names to option
            values selected.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        rubric_dict (dict): Serialized Rubric model.
        scored_at (datetime): The timestamp of the assessment.

    Returns:
        Assessment model

    """
    # Get or create the rubric
    rubric = rubric_from_dict(rubric_dict)

    # Create the self assessment
    assessment = Assessment.create(
        rubric,
        user_id,
        submission_uuid,
        SELF_TYPE,
        scored_at=scored_at,
        feedback=overall_feedback
    )

    # This will raise an `InvalidRubricSelection` if the selected options do not match the rubric.
    AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)
    return assessment
開發者ID:Stanford-Online,項目名稱:edx-ora2,代碼行數:47,代碼來源:self.py

示例14: test_create_with_all_feedback_only_criteria

    def test_create_with_all_feedback_only_criteria(self):
        rubric = self._rubric_with_all_feedback_only_criteria()
        assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")

        # Create assessment parts, each of which are feedback-only (no points)
        selected = {}
        feedback = {
            u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
            u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
        }
        AssessmentPart.create_from_option_names(
            assessment, selected, feedback=feedback
        )

        # Check the score (should be 0, since we haven't selected any points)
        self.assertEqual(assessment.points_earned, 0)
        self.assertEqual(assessment.points_possible, 0)
開發者ID:edx,項目名稱:edx-ora2,代碼行數:17,代碼來源:test_assessment_models.py

示例15: test_create_from_option_points_feedback_only_criterion

    def test_create_from_option_points_feedback_only_criterion(self):
        rubric = self._rubric_with_one_feedback_only_criterion()
        assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")

        # Create assessment parts by providing scores for options
        # but NO feedback.  This simulates how an example-based AI
        # assessment is created.
        selected = {
            u"vøȼȺƀᵾłȺɍɏ": 2,
            u"ﻭɼค๓๓คɼ": 1,
        }
        AssessmentPart.create_from_option_points(assessment, selected)

        # Check the score (the feedback-only assessment should count for 0 points)
        self.assertEqual(assessment.points_earned, 3)
        self.assertEqual(assessment.points_possible, 4)

        # Check the feedback text (should default to an empty string)
        feedback_only = AssessmentPart.objects.get(criterion__name="feedback")
        self.assertEqual(feedback_only.feedback, u"")
開發者ID:edx,項目名稱:edx-ora2,代碼行數:20,代碼來源:test_assessment_models.py


注:本文中的openassessment.assessment.models.Assessment類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。