本文整理汇总了Python中submissions.api.get_submission函数的典型用法代码示例。如果您正苦于以下问题:Python get_submission函数的具体用法?Python get_submission怎么用?Python get_submission使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_submission函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_get_old_submission
def test_get_old_submission(self):
# hack in an old-style submission, this can't be created with the ORM (EDUCATOR-1090)
with transaction.atomic():
student_item = StudentItem.objects.create()
connection.cursor().execute("""
INSERT INTO submissions_submission
(id, uuid, attempt_number, submitted_at, created_at, raw_answer, student_item_id, status)
VALUES (
{}, {}, {}, {}, {}, {}, {}, {}
);""".format(
1,
"\'deadbeef-1234-5678-9100-1234deadbeef\'",
1,
"\'2017-07-13 17:56:02.656129\'",
"\'2017-07-13 17:56:02.656129\'",
"\'{\"parts\":[{\"text\":\"raw answer text\"}]}\'",
int(student_item.id),
"\'A\'"
), []
)
with mock.patch.object(
Submission.objects, 'raw',
wraps=Submission.objects.raw
) as mock_raw:
_ = api.get_submission('deadbeef-1234-5678-9100-1234deadbeef')
self.assertEqual(1, mock_raw.call_count)
# On subsequent accesses we still get the submission, but raw() isn't needed
mock_raw.reset_mock()
_ = api.get_submission('deadbeef-1234-5678-9100-1234deadbeef')
self.assertEqual(0, mock_raw.call_count)
示例2: test_load_non_json_answer
def test_load_non_json_answer(self):
# This should never happen, if folks are using the public API.
# Create a submission with a raw answer that is NOT valid JSON
submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
sub_model = Submission.objects.get(uuid=submission['uuid'])
sub_model.raw_answer = ''
sub_model.save()
with self.assertRaises(api.SubmissionInternalError):
api.get_submission(sub_model.uuid)
with self.assertRaises(api.SubmissionInternalError):
api.get_submission_and_student(sub_model.uuid)
示例3: test_caching
def test_caching(self):
sub = api.create_submission(STUDENT_ITEM, "Hello World!")
# The first request to get the submission hits the database...
with self.assertNumQueries(1):
db_sub = api.get_submission(sub["uuid"])
# The next one hits the cache only...
with self.assertNumQueries(0):
cached_sub = api.get_submission(sub["uuid"])
# The data that gets passed back matches the original in both cases
self.assertEqual(sub, db_sub)
self.assertEqual(sub, cached_sub)
示例4: test_load_non_json_answer
def test_load_non_json_answer(self):
submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
sub_model = Submission.objects.get(uuid=submission['uuid'])
# This should never happen, if folks are using the public API.
# Create a submission with a raw answer that is NOT valid JSON
query = "UPDATE submissions_submission SET raw_answer = '}' WHERE id = %s"
connection.cursor().execute(query, [str(sub_model.id)])
transaction.commit_unless_managed()
with self.assertRaises(api.SubmissionInternalError):
api.get_submission(sub_model.uuid)
with self.assertRaises(api.SubmissionInternalError):
api.get_submission_and_student(sub_model.uuid)
示例5: self_path_and_context
def self_path_and_context(self):
"""
Determine the template path and context to use when rendering the self-assessment step.
Returns:
tuple of `(path, context)`, where `path` (str) is the path to the template,
and `context` (dict) is the template context.
Raises:
SubmissionError: Error occurred while retrieving the current submission.
SelfAssessmentRequestError: Error occurred while checking if we had a self-assessment.
"""
context = {}
path = 'openassessmentblock/self/oa_self_unavailable.html'
problem_closed, reason, start_date, due_date = self.is_closed(step="self-assessment")
# We display the due date whether the problem is open or closed.
# If no date is set, it defaults to the distant future, in which
# case we don't display the date.
if due_date < DISTANT_FUTURE:
context['self_due'] = due_date
# If we haven't submitted yet, `workflow` will be an empty dict,
# and `workflow_status` will be None.
workflow = self.get_workflow_info()
workflow_status = workflow.get('status')
self_complete = workflow.get('status_details', {}).get('self', {}).get('complete', False)
if self_complete:
path = 'openassessmentblock/self/oa_self_complete.html'
elif workflow_status == 'self' or problem_closed:
assessment = self_api.get_assessment(workflow.get("submission_uuid"))
if assessment is not None:
path = 'openassessmentblock/self/oa_self_complete.html'
elif problem_closed:
if reason == 'start':
context["self_start"] = start_date
path = 'openassessmentblock/self/oa_self_unavailable.html'
elif reason == 'due':
path = 'openassessmentblock/self/oa_self_closed.html'
else:
submission = submission_api.get_submission(self.submission_uuid)
context["rubric_criteria"] = self.rubric_criteria_with_labels
context["estimated_time"] = "20 minutes" # TODO: Need to configure this.
context["self_submission"] = submission
# Determine if file upload is supported for this XBlock.
context["allow_file_upload"] = self.allow_file_upload
context['self_file_url'] = self.get_download_url_from_submission(submission)
path = 'openassessmentblock/self/oa_self_assessment.html'
else:
# No submission yet or in peer assessment
path = 'openassessmentblock/self/oa_self_unavailable.html'
return path, context
示例6: test_get_submission
def test_get_submission(self):
# Test base case that we can create a submission and get it back
sub_dict1 = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
sub_dict2 = api.get_submission(sub_dict1["uuid"])
self.assertEqual(sub_dict1, sub_dict2)
# Test invalid inputs
with self.assertRaises(api.SubmissionRequestError):
api.get_submission(20)
with self.assertRaises(api.SubmissionRequestError):
api.get_submission({})
# Test not found
with self.assertRaises(api.SubmissionNotFoundError):
api.get_submission("notarealuuid")
with self.assertRaises(api.SubmissionNotFoundError):
api.get_submission("0" * 50) # This is bigger than our field size
示例7: get_submission_to_assess
def get_submission_to_assess(course_id, item_id, scorer_id):
"""
Get a submission for staff evaluation.
Retrieves a submission for assessment for the given staff member.
Args:
course_id (str): The course that we would like to fetch submissions from.
item_id (str): The student_item (problem) that we would like to retrieve submissions for.
scorer_id (str): The user id of the staff member scoring this submission
Returns:
dict: A student submission for assessment. This contains a 'student_item',
'attempt_number', 'submitted_at', 'created_at', and 'answer' field to be
used for assessment.
Raises:
StaffAssessmentInternalError: Raised when there is an internal error
retrieving staff workflow information.
Examples:
>>> get_submission_to_assess("a_course_id", "an_item_id", "a_scorer_id")
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': { ... }
}
"""
student_submission_uuid = StaffWorkflow.get_submission_for_review(course_id, item_id, scorer_id)
if student_submission_uuid:
try:
submission_data = submissions_api.get_submission(student_submission_uuid)
return submission_data
except submissions_api.SubmissionNotFoundError:
error_message = (
u"Could not find a submission with the uuid {}"
).format(student_submission_uuid)
logger.exception(error_message)
raise StaffAssessmentInternalError(error_message)
else:
logger.info(
u"No submission found for staff to assess ({}, {})"
.format(
course_id,
item_id,
)
)
return None
示例8: test_get_submission
def test_get_submission(self):
# Test base case that we can create a submission and get it back
sub_dict1 = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
sub_dict2 = api.get_submission(sub_dict1["uuid"])
self.assertEqual(sub_dict1, sub_dict2)
# Test invalid inputs
with self.assertRaises(api.SubmissionRequestError):
api.get_submission(20)
with self.assertRaises(api.SubmissionRequestError):
api.get_submission({})
# Test not found
with self.assertRaises(api.SubmissionNotFoundError):
api.get_submission("deadbeef-1234-5678-9100-1234deadbeef")
示例9: render_grade_complete
def render_grade_complete(self, workflow):
"""
Render the grade complete state.
Args:
workflow (dict): The serialized Workflow model.
Returns:
tuple of context (dict), template_path (string)
"""
feedback = peer_api.get_assessment_feedback(self.submission_uuid)
feedback_text = feedback.get('feedback', '') if feedback else ''
student_submission = sub_api.get_submission(workflow['submission_uuid'])
peer_assessments = peer_api.get_assessments(student_submission['uuid'])
self_assessment = self_api.get_assessment(student_submission['uuid'])
has_submitted_feedback = peer_api.get_assessment_feedback(workflow['submission_uuid']) is not None
# We retrieve the score from the workflow, which in turn retrieves
# the score for our current submission UUID.
# We look up the score by submission UUID instead of student item
# to ensure that the score always matches the rubric.
score = workflow['score']
context = {
'score': score,
'feedback_text': feedback_text,
'student_submission': student_submission,
'peer_assessments': peer_assessments,
'self_assessment': self_assessment,
'rubric_criteria': self._rubric_criteria_with_feedback(peer_assessments),
'has_submitted_feedback': has_submitted_feedback,
}
# Update the scores we will display to the user
# Note that we are updating a *copy* of the rubric criteria stored in the XBlock field
max_scores = peer_api.get_rubric_max_scores(self.submission_uuid)
median_scores = peer_api.get_assessment_median_scores(student_submission["uuid"])
if median_scores is not None and max_scores is not None:
for criterion in context["rubric_criteria"]:
criterion["median_score"] = median_scores[criterion["name"]]
criterion["total_value"] = max_scores[criterion["name"]]
return ('openassessmentblock/grade/oa_grade_complete.html', context)
示例10: render_self_assessment
def render_self_assessment(self, data, suffix=''):
context = {}
assessment_module = self.get_assessment_module('self-assessment')
path = 'openassessmentblock/self/oa_self_unavailable.html'
problem_closed, reason, date = self.is_closed(step="self-assessment")
if problem_closed:
if date == 'start':
context["self_start"] = self.format_datetime_string(date)
elif date == 'due':
context["self_due"] = self.format_datetime_string(date)
workflow = self.get_workflow_info()
if not workflow:
return self.render_assessment(path, context)
try:
submission = submission_api.get_submission(self.submission_uuid)
assessment = self_api.get_assessment(
workflow["submission_uuid"]
)
except (submission_api.SubmissionError, self_api.SelfAssessmentRequestError):
logger.exception(
u"Could not retrieve self assessment for submission {}"
.format(workflow["submission_uuid"])
)
return self.render_error(_(u"An unexpected error occurred."))
if workflow["status"] == "self":
path = 'openassessmentblock/self/oa_self_assessment.html'
context = {
"rubric_criteria": self.rubric_criteria,
"estimated_time": "20 minutes", # TODO: Need to configure this.
"self_submission": submission,
}
elif assessment is not None:
path = 'openassessmentblock/self/oa_self_complete.html'
elif date == "due" and problem_closed:
path = 'openassessmentblock/self/oa_self_closed.html'
return self.render_assessment(path, context)
示例11: get_user_submission
def get_user_submission(submission_uuid):
"""Return the most recent submission by user in workflow
Return the most recent submission. If no submission is available,
return None. All submissions are preserved, but only the most recent
will be returned in this function, since the active workflow will only
be concerned with the most recent submission.
Args:
submission_uuid (str): The uuid for the submission to retrieve.
Returns:
(dict): A dictionary representation of a submission to render to
the front end.
"""
try:
return api.get_submission(submission_uuid)
except api.SubmissionRequestError:
# This error is actually ok.
return None
示例12: render_grade_complete
def render_grade_complete(self, workflow):
"""
Render the grade complete state.
Args:
workflow (dict): The serialized Workflow model.
Returns:
tuple of context (dict), template_path (string)
"""
feedback = peer_api.get_assessment_feedback(self.submission_uuid)
feedback_text = feedback.get('feedback', '') if feedback else ''
student_submission = sub_api.get_submission(workflow['submission_uuid'])
peer_assessments = peer_api.get_assessments(student_submission['uuid'])
self_assessment = self_api.get_assessment(student_submission['uuid'])
has_submitted_feedback = peer_api.get_assessment_feedback(workflow['submission_uuid']) is not None
context = {
'score': workflow['score'],
'feedback_text': feedback_text,
'student_submission': student_submission,
'peer_assessments': peer_assessments,
'self_assessment': self_assessment,
'rubric_criteria': copy.deepcopy(self.rubric_criteria),
'has_submitted_feedback': has_submitted_feedback,
}
# Update the scores we will display to the user
# Note that we are updating a *copy* of the rubric criteria stored in the XBlock field
max_scores = peer_api.get_rubric_max_scores(self.submission_uuid)
median_scores = peer_api.get_assessment_median_scores(student_submission["uuid"])
if median_scores is not None and max_scores is not None:
for criterion in context["rubric_criteria"]:
criterion["median_score"] = median_scores[criterion["name"]]
criterion["total_value"] = max_scores[criterion["name"]]
return ('openassessmentblock/grade/oa_grade_complete.html', context)
示例13: render_grade_complete
def render_grade_complete(self, workflow):
"""
Render the grade complete state.
Args:
workflow (dict): The serialized Workflow model.
Returns:
tuple of context (dict), template_path (string)
"""
# Import is placed here to avoid model import at project startup.
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
from openassessment.assessment.api import staff as staff_api
from submissions import api as sub_api
# Peer specific stuff...
assessment_steps = self.assessment_steps
submission_uuid = workflow['submission_uuid']
staff_assessment = None
self_assessment = None
feedback = None
peer_assessments = []
has_submitted_feedback = False
if "peer-assessment" in assessment_steps:
peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"])
feedback = peer_api.get_assessment_feedback(submission_uuid)
peer_assessments = [
self._assessment_grade_context(peer_assessment)
for peer_assessment in peer_api.get_assessments(submission_uuid)
]
has_submitted_feedback = feedback is not None
if "self-assessment" in assessment_steps:
self_assessment = self._assessment_grade_context(
self_api.get_assessment(submission_uuid)
)
raw_staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid)
if raw_staff_assessment:
staff_assessment = self._assessment_grade_context(raw_staff_assessment)
feedback_text = feedback.get('feedback', '') if feedback else ''
student_submission = sub_api.get_submission(submission_uuid)
# We retrieve the score from the workflow, which in turn retrieves
# the score for our current submission UUID.
# We look up the score by submission UUID instead of student item
# to ensure that the score always matches the rubric.
# It's possible for the score to be `None` even if the workflow status is "done"
# when all the criteria in the rubric are feedback-only (no options).
score = workflow['score']
context = {
'score': score,
'feedback_text': feedback_text,
'has_submitted_feedback': has_submitted_feedback,
'student_submission': create_submission_dict(student_submission, self.prompts),
'peer_assessments': peer_assessments,
'grade_details': self.grade_details(
submission_uuid,
peer_assessments=peer_assessments,
self_assessment=self_assessment,
staff_assessment=staff_assessment,
),
'file_upload_type': self.file_upload_type,
'allow_latex': self.allow_latex,
'file_urls': self.get_download_urls_from_submission(student_submission),
'xblock_id': self.get_xblock_id()
}
return ('openassessmentblock/grade/oa_grade_complete.html', context)
示例14: get_submission_to_assess
def get_submission_to_assess(submission_uuid, graded_by):
"""Get a submission to peer evaluate.
Retrieves a submission for assessment for the given student. This will
not return a submission submitted by the requesting scorer. Submissions are
returned based on how many assessments are still required, and if there are
peers actively assessing a particular submission. If there are no
submissions requiring assessment, a submission may be returned that will be
'over graded', and the assessment will not be counted towards the overall
grade.
Args:
submission_uuid (str): The submission UUID from the student
requesting a submission for assessment. This is used to explicitly
avoid giving the student their own submission, and determines the
associated Peer Workflow.
graded_by (int): The number of assessments a submission
requires before it has completed the peer assessment process.
Returns:
dict: A peer submission for assessment. This contains a 'student_item',
'attempt_number', 'submitted_at', 'created_at', and 'answer' field to be
used for assessment.
Raises:
PeerAssessmentRequestError: Raised when the request parameters are
invalid for the request.
PeerAssessmentInternalError: Raised when there is an internal error
retrieving peer workflow information.
PeerAssessmentWorkflowError: Raised when an error occurs because this
function, or the student item, is not in the proper workflow state
to retrieve a peer submission.
Examples:
>>> get_submission_to_assess("abc123", 3)
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
if not workflow:
raise PeerAssessmentWorkflowError(
u"A Peer Assessment Workflow does not exist for the student "
u"with submission UUID {}".format(submission_uuid)
)
peer_submission_uuid = workflow.find_active_assessments()
# If there is an active assessment for this user, get that submission,
# otherwise, get the first assessment for review, otherwise,
# get the first submission available for over grading ("over-grading").
if peer_submission_uuid is None:
peer_submission_uuid = workflow.get_submission_for_review(graded_by)
if peer_submission_uuid is None:
peer_submission_uuid = workflow.get_submission_for_over_grading()
if peer_submission_uuid:
try:
submission_data = sub_api.get_submission(peer_submission_uuid)
PeerWorkflow.create_item(workflow, peer_submission_uuid)
_log_workflow(peer_submission_uuid, workflow)
return submission_data
except sub_api.SubmissionNotFoundError:
error_message = (
u"Could not find a submission with the uuid {} for student {} "
u"in the peer workflow."
).format(peer_submission_uuid, workflow.student_id)
logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message)
else:
logger.info(
u"No submission found for {} to assess ({}, {})"
.format(
workflow.student_id,
workflow.course_id,
workflow.item_id,
)
)
return None
示例15: test_get_submission_deep_error
def test_get_submission_deep_error(self, mock_get):
# Test deep explosions are wrapped
mock_get.side_effect = DatabaseError("Kaboom!")
api.get_submission("000000000000000")