本文整理汇总了Python中lms.djangoapps.instructor_task.tests.test_base.InstructorTaskModuleTestCase类的典型用法代码示例。如果您正苦于以下问题:Python InstructorTaskModuleTestCase类的具体用法?Python InstructorTaskModuleTestCase怎么用?Python InstructorTaskModuleTestCase使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了InstructorTaskModuleTestCase类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _assert_task_failure
def _assert_task_failure(self, entry_id, task_type, problem_url_name, expected_message):
"""Confirm that expected values are stored in InstructorTask on task failure."""
instructor_task = InstructorTask.objects.get(id=entry_id)
self.assertEqual(instructor_task.task_state, FAILURE)
self.assertEqual(instructor_task.requester.username, 'instructor')
self.assertEqual(instructor_task.task_type, task_type)
task_input = json.loads(instructor_task.task_input)
self.assertNotIn('student', task_input)
self.assertEqual(task_input['problem_url'], InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string())
status = json.loads(instructor_task.task_output)
self.assertEqual(status['exception'], 'ZeroDivisionError')
self.assertEqual(status['message'], expected_message)
# check status returned:
status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
self.assertEqual(status['message'], expected_message)
示例2: test_rescoring_bad_unicode_input
def test_rescoring_bad_unicode_input(self):
"""Generate a real failure in rescoring a problem, with an answer including unicode"""
# At one point, the student answers that resulted in StudentInputErrors were being
# persisted (even though they were not counted as an attempt). That is not possible
# now, so it's harder to generate a test for how such input is handled.
problem_url_name = 'H1P1'
# set up an option problem -- doesn't matter really what problem it is, but we need
# it to have an answer.
self.define_option_problem(problem_url_name)
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
# return an input error as if it were a numerical response, with an embedded unicode character:
expected_message = u"Could not interpret '2/3\u03a9' as a number"
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
mock_rescore.side_effect = StudentInputError(expected_message)
instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
# check instructor_task returned
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
self.assertEqual(instructor_task.task_state, 'SUCCESS')
self.assertEqual(instructor_task.requester.username, 'instructor')
self.assertEqual(instructor_task.task_type, 'rescore_problem')
task_input = json.loads(instructor_task.task_input)
self.assertNotIn('student', task_input)
self.assertEqual(task_input['problem_url'], InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string())
status = json.loads(instructor_task.task_output)
self.assertEqual(status['attempted'], 1)
self.assertEqual(status['succeeded'], 0)
self.assertEqual(status['total'], 1)
示例3: submit_rescore_all_student_answers
def submit_rescore_all_student_answers(self, instructor, problem_url_name, only_if_higher=False):
"""Submits the particular problem for rescoring"""
return submit_rescore_problem_for_all_students(
self.create_task_request(instructor),
InstructorTaskModuleTestCase.problem_location(problem_url_name),
only_if_higher,
)
示例4: _test_submit_with_long_url
def _test_submit_with_long_url(self, task_function, student=None):
problem_url_name = 'x' * 255
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
with self.assertRaises(ValueError):
if student is not None:
task_function(self.create_task_request(self.instructor), location, student)
else:
task_function(self.create_task_request(self.instructor), location)
示例5: test_delete_failure
def test_delete_failure(self):
"""Simulate a failure in deleting state of a problem"""
problem_url_name = 'H1P1'
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
self.define_option_problem(problem_url_name)
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
expected_message = "bad things happened"
with patch('courseware.models.StudentModule.delete') as mock_delete:
mock_delete.side_effect = ZeroDivisionError(expected_message)
instructor_task = self.delete_problem_state('instructor', location)
self._assert_task_failure(instructor_task.id, 'delete_problem_state', problem_url_name, expected_message)
示例6: test_submit_nonexistent_modules
def test_submit_nonexistent_modules(self):
# confirm that a rescore of a non-existent module returns an exception
problem_url = InstructorTaskModuleTestCase.problem_location("NonexistentProblem")
request = None
with self.assertRaises(ItemNotFoundError):
submit_rescore_problem_for_student(request, problem_url, self.student)
with self.assertRaises(ItemNotFoundError):
submit_rescore_problem_for_all_students(request, problem_url)
with self.assertRaises(ItemNotFoundError):
submit_reset_problem_attempts_for_all_students(request, problem_url)
with self.assertRaises(ItemNotFoundError):
submit_delete_problem_state_for_all_students(request, problem_url)
示例7: render_problem
def render_problem(self, username, problem_url_name):
"""
Use ajax interface to request html for a problem.
"""
# make sure that the requested user is logged in, so that the ajax call works
# on the right problem:
self.login_username(username)
# make ajax call:
modx_url = reverse('xblock_handler', kwargs={
'course_id': self.course.id.to_deprecated_string(),
'usage_id': quote_slashes(InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string()),
'handler': 'xmodule_handler',
'suffix': 'problem_get',
})
resp = self.client.post(modx_url, {})
return resp
示例8: test_rescoring_randomized_problem
def test_rescoring_randomized_problem(self):
"""Run rescore scenario on custom problem that uses randomize"""
# First define the custom response problem:
problem_url_name = 'H1P1'
self.define_randomized_custom_response_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
descriptor = self.module_store.get_item(location)
# run with more than one user
for user in self.users:
# first render the problem, so that a seed will be created for this user
self.render_problem(user.username, problem_url_name)
# submit a bogus answer, in order to get the problem to tell us its real answer
dummy_answer = "1000"
self.submit_student_answer(user.username, problem_url_name, [dummy_answer, dummy_answer])
# we should have gotten the problem wrong, since we're way out of range:
self.check_state(user, descriptor, 0, 1, expected_attempts=1)
# dig the correct answer out of the problem's message
module = self.get_student_module(user.username, descriptor)
state = json.loads(module.state)
correct_map = state['correct_map']
log.info("Correct Map: %s", correct_map)
# only one response, so pull it out:
answer = correct_map.values()[0]['msg']
self.submit_student_answer(user.username, problem_url_name, [answer, answer])
# we should now get the problem right, with a second attempt:
self.check_state(user, descriptor, 1, 1, expected_attempts=2)
# redefine the problem (as stored in Mongo) so that the definition of correct changes
self.define_randomized_custom_response_problem(problem_url_name, redefine=True)
# confirm that simply rendering the problem again does not result in a change
# in the grade (or the attempts):
self.render_problem('u1', problem_url_name)
self.check_state(self.user1, descriptor, 1, 1, expected_attempts=2)
# rescore the problem for only one student -- only that student's grade should change
# (and none of the attempts):
self.submit_rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1'))
for user in self.users:
expected_score = 0 if user.username == 'u1' else 1
self.check_state(user, descriptor, expected_score, 1, expected_attempts=2)
# rescore the problem for all students
self.submit_rescore_all_student_answers('instructor', problem_url_name)
# all grades should change to being wrong (with no change in attempts)
for user in self.users:
self.check_state(user, descriptor, 0, 1, expected_attempts=2)
示例9: test_delete_problem_state
def test_delete_problem_state(self):
"""Run delete-state scenario on option problem"""
# get descriptor:
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
descriptor = self.module_store.get_item(location)
# first store answers for each of the separate users:
for username in self.userlist:
self.submit_student_answer(username, problem_url_name, [OPTION_1, OPTION_1])
# confirm that state exists:
for username in self.userlist:
self.assertIsNotNone(self.get_student_module(username, descriptor))
# run delete task:
self.delete_problem_state('instructor', location)
# confirm that no state can be found:
for username in self.userlist:
with self.assertRaises(StudentModule.DoesNotExist):
self.get_student_module(username, descriptor)
示例10: test_rescoring_code_problem
def test_rescoring_code_problem(self):
"""Run rescore scenario on problem with code submission"""
problem_url_name = 'H1P2'
self.define_code_response_problem(problem_url_name)
# we fully create the CodeResponse problem, but just pretend that we're queuing it:
with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_send_to_queue:
mock_send_to_queue.return_value = (0, "Successfully queued")
self.submit_student_answer('u1', problem_url_name, ["answer1", "answer2"])
instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
self.assertEqual(instructor_task.task_state, FAILURE)
status = json.loads(instructor_task.task_output)
self.assertEqual(status['exception'], 'NotImplementedError')
self.assertEqual(status['message'], "Problem's definition does not support rescoring.")
status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
self.assertEqual(status['message'], "Problem's definition does not support rescoring.")
示例11: test_reset_attempts_on_problem
def test_reset_attempts_on_problem(self):
"""Run reset-attempts scenario on option problem"""
# get descriptor:
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
descriptor = self.module_store.get_item(location)
num_attempts = 3
# first store answers for each of the separate users:
for _ in range(num_attempts):
for username in self.userlist:
self.submit_student_answer(username, problem_url_name, [OPTION_1, OPTION_1])
for username in self.userlist:
self.assertEquals(self.get_num_attempts(username, descriptor), num_attempts)
self.reset_problem_attempts('instructor', location)
for username in self.userlist:
self.assertEquals(self.get_num_attempts(username, descriptor), 0)
示例12: test_submit_task
def test_submit_task(self, task_function, expected_task_type, params=None):
if params is None:
params = {}
if params.get('student'):
params['student'] = self.student
# tests submit, and then tests a second identical submission.
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
instructor_task = task_function(self.create_task_request(self.instructor), location, **params)
self.assertEquals(instructor_task.task_type, expected_task_type)
# test resubmitting, by updating the existing record:
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
instructor_task.task_state = PROGRESS
instructor_task.save()
with self.assertRaises(AlreadyRunningError):
task_function(self.create_task_request(self.instructor), location, **params)
示例13: verify_rescore_results
def verify_rescore_results(self, problem_edit, new_expected_scores, new_expected_max, rescore_if_higher):
"""
Common helper to verify the results of rescoring for a single
student and all students are as expected.
"""
# get descriptor:
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
descriptor = self.module_store.get_item(location)
# first store answers for each of the separate users:
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
self.submit_student_answer('u2', problem_url_name, [OPTION_1, OPTION_2])
self.submit_student_answer('u3', problem_url_name, [OPTION_2, OPTION_1])
self.submit_student_answer('u4', problem_url_name, [OPTION_2, OPTION_2])
# verify each user's grade
expected_original_scores = (2, 1, 1, 0)
expected_original_max = 2
for i, user in enumerate(self.users):
self.check_state(user, descriptor, expected_original_scores[i], expected_original_max)
# update the data in the problem definition so the answer changes.
self.redefine_option_problem(problem_url_name, **problem_edit)
# confirm that simply rendering the problem again does not change the grade
self.render_problem('u1', problem_url_name)
self.check_state(self.user1, descriptor, expected_original_scores[0], expected_original_max)
# rescore the problem for only one student -- only that student's grade should change:
self.submit_rescore_one_student_answer('instructor', problem_url_name, self.user1, rescore_if_higher)
self.check_state(self.user1, descriptor, new_expected_scores[0], new_expected_max)
for i, user in enumerate(self.users[1:], start=1): # everyone other than user1
self.check_state(user, descriptor, expected_original_scores[i], expected_original_max)
# rescore the problem for all students
self.submit_rescore_all_student_answers('instructor', problem_url_name, rescore_if_higher)
for i, user in enumerate(self.users):
self.check_state(user, descriptor, new_expected_scores[i], new_expected_max)
示例14: define_randomized_custom_response_problem
def define_randomized_custom_response_problem(self, problem_url_name, redefine=False):
"""
Defines a custom response problem that uses a random value to determine correctness.
Generated answer is also returned as the `msg`, so that the value can be used as a
correct answer by a test.
If the `redefine` flag is set, then change the definition of correctness (from equals
to not-equals).
"""
factory = CustomResponseXMLFactory()
script = textwrap.dedent("""
def check_func(expect, answer_given):
expected = str(random.randint(0, 100))
return {'ok': answer_given %s expected, 'msg': expected}
""" % ('!=' if redefine else '=='))
problem_xml = factory.build_xml(script=script, cfn="check_func", expect="42", num_responses=1)
if redefine:
descriptor = self.module_store.get_item(
InstructorTaskModuleTestCase.problem_location(problem_url_name)
)
descriptor.data = problem_xml
with self.module_store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, descriptor.location.course_key):
self.module_store.update_item(descriptor, self.user.id)
self.module_store.publish(descriptor.location, self.user.id)
else:
# Use "per-student" rerandomization so that check-problem can be called more than once.
# Using "always" means we cannot check a problem twice, but we want to call once to get the
# correct answer, and call a second time with that answer to confirm it's graded as correct.
# Per-student rerandomization will at least generate different seeds for different users, so
# we get a little more test coverage.
ItemFactory.create(parent_location=self.problem_section.location,
category="problem",
display_name=str(problem_url_name),
data=problem_xml,
metadata={"rerandomize": "per_student"})
示例15: test_rescoring_if_higher_scores_equal
def test_rescoring_if_higher_scores_equal(self):
"""
Specifically tests rescore when the previous and new raw scores are equal. In this case, the scores should
be updated.
"""
problem_edit = dict(num_inputs=2) # this change to the problem means the problem will now have a max score of 4
unchanged_max = 2
new_max = 4
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
descriptor = self.module_store.get_item(location)
# first store answers for each of the separate users:
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
self.submit_student_answer('u2', problem_url_name, [OPTION_2, OPTION_2])
# verify each user's grade
self.check_state(self.user1, descriptor, 2, 2) # user 1 has a 2/2
self.check_state(self.user2, descriptor, 0, 2) # user 2 has a 0/2
# update the data in the problem definition so the answer changes.
self.redefine_option_problem(problem_url_name, **problem_edit)
# confirm that simply rendering the problem again does not change the grade
self.render_problem('u1', problem_url_name)
self.check_state(self.user1, descriptor, 2, 2)
self.check_state(self.user2, descriptor, 0, 2)
# rescore the problem for all students
self.submit_rescore_all_student_answers('instructor', problem_url_name, True)
# user 1's score would go down, so it remains 2/2. user 2's score was 0/2, which is equivalent to the new score
# of 0/4, so user 2's score changes to 0/4.
self.check_state(self.user1, descriptor, 2, unchanged_max)
self.check_state(self.user2, descriptor, 0, new_max)