本文整理匯總了Python中openassessment.assessment.models.Assessment.get_median_score_dict方法的典型用法代碼示例。如果您正苦於以下問題:Python Assessment.get_median_score_dict方法的具體用法?Python Assessment.get_median_score_dict怎麽用?Python Assessment.get_median_score_dict使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類openassessment.assessment.models.Assessment
的用法示例。
在下文中一共展示了Assessment.get_median_score_dict方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: get_assessment_median_scores
# 需要導入模塊: from openassessment.assessment.models import Assessment [as 別名]
# 或者: from openassessment.assessment.models.Assessment import get_median_score_dict [as 別名]
def get_assessment_median_scores(submission_uuid):
"""Get the median score for each rubric criterion
For a given assessment, collect the median score for each criterion on the
rubric. This set can be used to determine the overall score, as well as each
part of the individual rubric scores.
If there is a true median score, it is returned. If there are two median
values, the average of those two values is returned, rounded up to the
greatest integer value.
Args:
submission_uuid (str): The submission uuid is used to get the
assessments used to score this submission, and generate the
appropriate median score.
Returns:
dict: A dictionary of rubric criterion names,
with a median score of the peer assessments.
Raises:
PeerAssessmentInternalError: If any error occurs while retrieving
information to form the median scores, an error is raised.
"""
try:
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
items = workflow.graded_by.filter(scored=True)
assessments = [item.assessment for item in items]
scores = Assessment.scores_by_criterion(assessments)
return Assessment.get_median_score_dict(scores)
except DatabaseError:
error_message = (u"Error getting assessment median scores for submission {uuid}").format(uuid=submission_uuid)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
示例2: get_assessment_scores_by_criteria
# 需要導入模塊: from openassessment.assessment.models import Assessment [as 別名]
# 或者: from openassessment.assessment.models.Assessment import get_median_score_dict [as 別名]
def get_assessment_scores_by_criteria(submission_uuid):
"""Get the median score for each rubric criterion
Args:
submission_uuid (str): The submission uuid is used to get the
assessments used to score this submission, and generate the
appropriate median score.
Returns:
(dict): A dictionary of rubric criterion names, with a median score of
the peer assessments.
Raises:
SelfAssessmentInternalError: If any error occurs while retrieving
information to form the median scores, an error is raised.
"""
try:
assessments = list(
Assessment.objects.filter(
score_type=SELF_TYPE, submission_uuid=submission_uuid
).order_by('-scored_at')[:1]
)
scores = Assessment.scores_by_criterion(assessments)
return Assessment.get_median_score_dict(scores)
except DatabaseError:
error_message = _(u"Error getting self assessment scores for {}").format(submission_uuid)
logger.exception(error_message)
raise SelfAssessmentInternalError(error_message)
示例3: get_assessment_scores_by_criteria
# 需要導入模塊: from openassessment.assessment.models import Assessment [as 別名]
# 或者: from openassessment.assessment.models.Assessment import get_median_score_dict [as 別名]
def get_assessment_scores_by_criteria(submission_uuid):
"""Get the score for each rubric criterion
Args:
submission_uuid (str): The submission uuid is used to get the
assessment used to score this submission.
Returns:
(dict): A dictionary of rubric criterion names, with a score of
the example based assessments.
Raises:
AIGradingInternalError: If any error occurs while retrieving
information from the scores, an error is raised.
"""
try:
assessments = list(
Assessment.objects.filter(
score_type=AI_ASSESSMENT_TYPE, submission_uuid=submission_uuid
).order_by('-scored_at')[:1]
)
scores = Assessment.scores_by_criterion(assessments)
return Assessment.get_median_score_dict(scores)
except DatabaseError:
error_message = u"Error getting example-based assessment scores for {}".format(submission_uuid)
logger.exception(error_message)
raise AIGradingInternalError(error_message)
示例4: get_assessment_scores_by_criteria
# 需要導入模塊: from openassessment.assessment.models import Assessment [as 別名]
# 或者: from openassessment.assessment.models.Assessment import get_median_score_dict [as 別名]
def get_assessment_scores_by_criteria(submission_uuid):
"""Get the staff score for each rubric criterion
Args:
submission_uuid (str): The submission uuid is used to get the
assessment used to score this submission.
Returns:
(dict): A dictionary of rubric criterion names, with a score of
the staff assessments.
Raises:
StaffAssessmentInternalError: If any error occurs while retrieving
information from the scores, an error is raised.
"""
try:
# This will always create a list of length 1
assessments = list(
Assessment.objects.filter(
score_type=STAFF_TYPE, submission_uuid=submission_uuid
)[:1]
)
scores = Assessment.scores_by_criterion(assessments)
# Since this is only being sent one score, the median score will be the
# same as the only score.
return Assessment.get_median_score_dict(scores)
except DatabaseError:
error_message = u"Error getting staff assessment scores for {}".format(submission_uuid)
logger.exception(error_message)
raise StaffAssessmentInternalError(error_message)
示例5: print_summary
# 需要導入模塊: from openassessment.assessment.models import Assessment [as 別名]
# 或者: from openassessment.assessment.models.Assessment import get_median_score_dict [as 別名]
def print_summary(course_id, oa_item, anonymous_student_id):
# Print submission
submission = get_submission(course_id, oa_item.location, anonymous_student_id)
print "Submission status:"
print_submission(submission, oa_item)
# Print scored assessment(s)
scored_items = PeerWorkflowItem.objects.filter(author=submission.id, submission_uuid=submission.submission_uuid, assessment__isnull=False, scored=True).order_by('assessment')
print "Scored assessment(s):"
if scored_items:
scored_assessments = [scored_item.assessment for scored_item in scored_items]
scored_scores = scores_by_criterion(scored_assessments)
median_score_dict = Assessment.get_median_score_dict(scored_scores)
print_peerworkflowitem(scored_items, scored_scores)
else:
scored_scores = {}
print "... No record was found."
# Print not-scored assessment(s)
not_scored_items = PeerWorkflowItem.objects.filter(author=submission.id, submission_uuid=submission.submission_uuid, assessment__isnull=False, scored=False).order_by('assessment')
print "Not-scored assessment(s):"
if not_scored_items:
not_scored_assessments = [not_scored_item.assessment for not_scored_item in not_scored_items]
not_scored_scores = scores_by_criterion(not_scored_assessments)
print_peerworkflowitem(not_scored_items, not_scored_scores)
else:
print "... No record was found."
# Print latest score
latest_score = get_latest_score(submission)
print "Latest score:"
if latest_score is not None:
try:
median_scores = peer_api.get_assessment_median_scores(submission.submission_uuid)
except:
median_scores = {}
latest_score_output = PrettyTable(['Score ID'] + scored_scores.keys() + ['Points earned', 'Points possible', 'Created at'])
latest_score_output.align = 'l'
row = []
row.append(latest_score.id)
row.extend([median_scores[k] for k in scored_scores.keys()])
row.append(latest_score.points_earned)
row.append(latest_score.points_possible)
row.append(latest_score.created_at)
latest_score_output.add_row(row)
print latest_score_output
else:
print "... No record was found."