本文整理汇总了Python中courseware.model_data.ScoresClient类的典型用法代码示例。如果您正苦于以下问题:Python ScoresClient类的具体用法?Python ScoresClient怎么用?Python ScoresClient使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ScoresClient类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _calculate_score_for_modules
def _calculate_score_for_modules(user_id, course, modules):
"""
Calculates the cumulative score (percent) of the given modules
"""
# removing branch and version from exam modules locator
# otherwise student module would not return scores since module usage keys would not match
modules = [m for m in modules]
locations = [
BlockUsageLocator(
course_key=course.id,
block_type=module.location.block_type,
block_id=module.location.block_id
)
if isinstance(module.location, BlockUsageLocator) and module.location.version
else module.location
for module in modules
]
scores_client = ScoresClient(course.id, user_id)
scores_client.fetch_scores(locations)
# Iterate over all of the exam modules to get score percentage of user for each of them
module_percentages = []
ignore_categories = ['course', 'chapter', 'sequential', 'vertical', 'randomize', 'library_content']
for index, module in enumerate(modules):
if module.category not in ignore_categories and (module.graded or module.has_score):
module_score = scores_client.get(locations[index])
if module_score:
correct = module_score.correct or 0
total = module_score.total or 1
module_percentages.append(correct / total)
return sum(module_percentages) / float(len(module_percentages)) if module_percentages else 0
示例2: _calculate_entrance_exam_score
def _calculate_entrance_exam_score(user, course_descriptor, exam_modules):
"""
Calculates the score (percent) of the entrance exam using the provided modules
"""
student_module_dict = {}
scores_client = ScoresClient(course_descriptor.id, user.id)
locations = [exam_module.location for exam_module in exam_modules]
scores_client.fetch_scores(locations)
# Iterate over all of the exam modules to get score of user for each of them
for exam_module in exam_modules:
exam_module_score = scores_client.get(exam_module.location)
if exam_module_score:
student_module_dict[unicode(exam_module.location)] = {
"grade": exam_module_score.correct,
"max_grade": exam_module_score.total,
}
exam_percentage = 0
module_percentages = []
ignore_categories = ["course", "chapter", "sequential", "vertical"]
for module in exam_modules:
if module.graded and module.category not in ignore_categories:
module_percentage = 0
module_location = unicode(module.location)
if module_location in student_module_dict and student_module_dict[module_location]["max_grade"]:
student_module = student_module_dict[module_location]
module_percentage = student_module["grade"] / student_module["max_grade"]
module_percentages.append(module_percentage)
if module_percentages:
exam_percentage = sum(module_percentages) / float(len(module_percentages))
return exam_percentage
示例3: _csm_scores
def _csm_scores(self):
"""
Lazily queries and returns all the scores stored in the user
state (in CSM) for the course, while caching the result.
"""
scorable_locations = [block_key for block_key in self.course_structure if possibly_scored(block_key)]
return ScoresClient.create_for_locations(self.course.id, self.student.id, scorable_locations)
示例4: _grade
def _grade(student, course, keep_raw_scores, course_structure=None):
"""
Unwrapped version of "grade"
This grades a student as quickly as possible. It returns the
output from the course grader, augmented with the final letter
grade. The keys in the output are:
- course: a CourseDescriptor
- keep_raw_scores : if True, then value for key 'raw_scores' contains scores
for every graded module
More information on the format is in the docstring for CourseGrader.
"""
if course_structure is None:
course_structure = get_course_blocks(student, course.location)
grading_context_result = grading_context(course_structure)
scorable_locations = [block.location for block in grading_context_result['all_graded_blocks']]
with outer_atomic():
scores_client = ScoresClient.create_for_locations(course.id, student.id, scorable_locations)
# Dict of item_ids -> (earned, possible) point tuples. This *only* grabs
# scores that were registered with the submissions API, which for the moment
# means only openassessment (edx-ora2)
# We need to import this here to avoid a circular dependency of the form:
# XBlock --> submissions --> Django Rest Framework error strings -->
# Django translation --> ... --> courseware --> submissions
from submissions import api as sub_api # installed from the edx-submissions repository
with outer_atomic():
submissions_scores = sub_api.get_scores(
course.id.to_deprecated_string(),
anonymous_id_for_user(student, course.id)
)
totaled_scores, raw_scores = _calculate_totaled_scores(
student, grading_context_result, submissions_scores, scores_client, keep_raw_scores
)
with outer_atomic():
# Grading policy might be overriden by a CCX, need to reset it
course.set_grading_policy(course.grading_policy)
grade_summary = course.grader.grade(totaled_scores, generate_random_scores=settings.GENERATE_PROFILE_SCORES)
# We round the grade here, to make sure that the grade is a whole percentage and
# doesn't get displayed differently than it gets grades
grade_summary['percent'] = round(grade_summary['percent'] * 100 + 0.05) / 100
letter_grade = grade_for_percentage(course.grade_cutoffs, grade_summary['percent'])
grade_summary['grade'] = letter_grade
grade_summary['totaled_scores'] = totaled_scores # make this available, eg for instructor download & debugging
if keep_raw_scores:
# way to get all RAW scores out to instructor
# so grader can be double-checked
grade_summary['raw_scores'] = raw_scores
return grade_summary
示例5: _calculate_entrance_exam_score
def _calculate_entrance_exam_score(user, course_descriptor, exam_modules):
"""
Calculates the score (percent) of the entrance exam using the provided modules
"""
student_module_dict = {}
scores_client = ScoresClient(course_descriptor.id, user.id)
# removing branch and version from exam modules locator
# otherwise student module would not return scores since module usage keys would not match
locations = [
BlockUsageLocator(
course_key=course_descriptor.id,
block_type=exam_module.location.block_type,
block_id=exam_module.location.block_id
)
if isinstance(exam_module.location, BlockUsageLocator) and exam_module.location.version
else exam_module.location
for exam_module in exam_modules
]
scores_client.fetch_scores(locations)
# Iterate over all of the exam modules to get score of user for each of them
for index, exam_module in enumerate(exam_modules):
exam_module_score = scores_client.get(locations[index])
if exam_module_score:
student_module_dict[unicode(locations[index])] = {
'grade': exam_module_score.correct,
'max_grade': exam_module_score.total
}
exam_percentage = 0
module_percentages = []
ignore_categories = ['course', 'chapter', 'sequential', 'vertical']
for index, module in enumerate(exam_modules):
if module.graded and module.category not in ignore_categories:
module_percentage = 0
module_location = unicode(locations[index])
if module_location in student_module_dict and student_module_dict[module_location]['max_grade']:
student_module = student_module_dict[module_location]
module_percentage = student_module['grade'] / student_module['max_grade']
module_percentages.append(module_percentage)
if module_percentages:
exam_percentage = sum(module_percentages) / float(len(module_percentages))
return exam_percentage
示例6: _prefetch_scores
def _prefetch_scores(self, course_structure, course):
"""
Returns the prefetched scores for the given student and course.
"""
if not self._scores_client:
scorable_locations = [block_key for block_key in course_structure if possibly_scored(block_key)]
self._scores_client = ScoresClient.create_for_locations(
course.id, self.student.id, scorable_locations
)
self._submissions_scores = submissions_api.get_scores(
unicode(course.id), anonymous_id_for_user(self.student, course.id)
)
示例7: _progress_summary
def _progress_summary(student, request, course, field_data_cache=None, scores_client=None):
"""
Unwrapped version of "progress_summary".
This pulls a summary of all problems in the course.
Returns
- courseware_summary is a summary of all sections with problems in the course.
It is organized as an array of chapters, each containing an array of sections,
each containing an array of scores. This contains information for graded and
ungraded problems, and is good for displaying a course summary with due dates,
etc.
Arguments:
student: A User object for the student to grade
course: A Descriptor containing the course to grade
If the student does not have access to load the course module, this function
will return None.
"""
with manual_transaction():
if field_data_cache is None:
field_data_cache = field_data_cache_for_grading(course, student)
if scores_client is None:
scores_client = ScoresClient.from_field_data_cache(field_data_cache)
course_module = get_module_for_descriptor(
student, request, course, field_data_cache, course.id, course=course
)
if not course_module:
return None
course_module = getattr(course_module, '_x_module', course_module)
submissions_scores = sub_api.get_scores(course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id))
max_scores_cache = MaxScoresCache.create_for_course(course)
# For the moment, we have to get scorable_locations from field_data_cache
# and not from scores_client, because scores_client is ignorant of things
# in the submissions API. As a further refactoring step, submissions should
# be hidden behind the ScoresClient.
max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations)
chapters = []
locations_to_children = defaultdict(list)
locations_to_weighted_scores = {}
# Don't include chapters that aren't displayable (e.g. due to error)
for chapter_module in course_module.get_display_items():
# Skip if the chapter is hidden
if chapter_module.hide_from_toc:
continue
sections = []
for section_module in chapter_module.get_display_items():
# Skip if the section is hidden
with manual_transaction():
if section_module.hide_from_toc:
continue
graded = section_module.graded
scores = []
module_creator = section_module.xmodule_runtime.get_module
for module_descriptor in yield_dynamic_descriptor_descendants(
section_module, student.id, module_creator
):
locations_to_children[module_descriptor.parent].append(module_descriptor.location)
(correct, total) = get_score(
student,
module_descriptor,
module_creator,
scores_client,
submissions_scores,
max_scores_cache,
)
if correct is None and total is None:
continue
weighted_location_score = Score(
correct,
total,
graded,
module_descriptor.display_name_with_default,
module_descriptor.location
)
scores.append(weighted_location_score)
locations_to_weighted_scores[module_descriptor.location] = weighted_location_score
scores.reverse()
section_total, _ = graders.aggregate_scores(
scores, section_module.display_name_with_default)
module_format = section_module.format if section_module.format is not None else ''
sections.append({
'display_name': section_module.display_name_with_default,
'url_name': section_module.url_name,
'scores': scores,
'section_total': section_total,
#.........这里部分代码省略.........
示例8: _grade
def _grade(student, request, course, keep_raw_scores, field_data_cache, scores_client):
"""
Unwrapped version of "grade"
This grades a student as quickly as possible. It returns the
output from the course grader, augmented with the final letter
grade. The keys in the output are:
course: a CourseDescriptor
- grade : A final letter grade.
- percent : The final percent for the class (rounded up).
- section_breakdown : A breakdown of each section that makes
up the grade. (For display)
- grade_breakdown : A breakdown of the major components that
make up the final grade. (For display)
- keep_raw_scores : if True, then value for key 'raw_scores' contains scores
for every graded module
More information on the format is in the docstring for CourseGrader.
"""
if field_data_cache is None:
with manual_transaction():
field_data_cache = field_data_cache_for_grading(course, student)
if scores_client is None:
scores_client = ScoresClient.from_field_data_cache(field_data_cache)
# Dict of item_ids -> (earned, possible) point tuples. This *only* grabs
# scores that were registered with the submissions API, which for the moment
# means only openassessment (edx-ora2)
submissions_scores = sub_api.get_scores(course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id))
max_scores_cache = MaxScoresCache.create_for_course(course)
# For the moment, we have to get scorable_locations from field_data_cache
# and not from scores_client, because scores_client is ignorant of things
# in the submissions API. As a further refactoring step, submissions should
# be hidden behind the ScoresClient.
max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations)
grading_context = course.grading_context
raw_scores = []
totaled_scores = {}
# This next complicated loop is just to collect the totaled_scores, which is
# passed to the grader
for section_format, sections in grading_context['graded_sections'].iteritems():
format_scores = []
for section in sections:
section_descriptor = section['section_descriptor']
section_name = section_descriptor.display_name_with_default
# some problems have state that is updated independently of interaction
# with the LMS, so they need to always be scored. (E.g. foldit.,
# combinedopenended)
should_grade_section = any(
descriptor.always_recalculate_grades for descriptor in section['xmoduledescriptors']
)
# If there are no problems that always have to be regraded, check to
# see if any of our locations are in the scores from the submissions
# API. If scores exist, we have to calculate grades for this section.
if not should_grade_section:
should_grade_section = any(
descriptor.location.to_deprecated_string() in submissions_scores
for descriptor in section['xmoduledescriptors']
)
if not should_grade_section:
should_grade_section = any(
descriptor.location in scores_client
for descriptor in section['xmoduledescriptors']
)
# If we haven't seen a single problem in the section, we don't have
# to grade it at all! We can assume 0%
if should_grade_section:
scores = []
def create_module(descriptor):
'''creates an XModule instance given a descriptor'''
# TODO: We need the request to pass into here. If we could forego that, our arguments
# would be simpler
return get_module_for_descriptor(
student, request, descriptor, field_data_cache, course.id, course=course
)
descendants = yield_dynamic_descriptor_descendants(section_descriptor, student.id, create_module)
for module_descriptor in descendants:
(correct, total) = get_score(
student,
module_descriptor,
create_module,
scores_client,
submissions_scores,
max_scores_cache,
)
if correct is None and total is None:
continue
if settings.GENERATE_PROFILE_SCORES: # for debugging!
if total > 1:
#.........这里部分代码省略.........
示例9: _progress_summary
def _progress_summary(student, course, course_structure=None):
"""
Unwrapped version of "progress_summary".
This pulls a summary of all problems in the course.
Returns
- courseware_summary is a summary of all sections with problems in the course.
It is organized as an array of chapters, each containing an array of sections,
each containing an array of scores. This contains information for graded and
ungraded problems, and is good for displaying a course summary with due dates,
etc.
- None if the student does not have access to load the course module.
Arguments:
student: A User object for the student to grade
course: A Descriptor containing the course to grade
"""
if course_structure is None:
course_structure = get_course_blocks(student, course.location)
if not len(course_structure):
return None
scorable_locations = [block_key for block_key in course_structure if possibly_scored(block_key)]
with outer_atomic():
scores_client = ScoresClient.create_for_locations(course.id, student.id, scorable_locations)
# We need to import this here to avoid a circular dependency of the form:
# XBlock --> submissions --> Django Rest Framework error strings -->
# Django translation --> ... --> courseware --> submissions
from submissions import api as sub_api # installed from the edx-submissions repository
with outer_atomic():
submissions_scores = sub_api.get_scores(
unicode(course.id), anonymous_id_for_user(student, course.id)
)
# Check for gated content
gated_content = gating_api.get_gated_content(course, student)
chapters = []
locations_to_weighted_scores = {}
for chapter_key in course_structure.get_children(course_structure.root_block_usage_key):
chapter = course_structure[chapter_key]
sections = []
for section_key in course_structure.get_children(chapter_key):
if unicode(section_key) in gated_content:
continue
section = course_structure[section_key]
graded = getattr(section, 'graded', False)
scores = []
for descendant_key in course_structure.post_order_traversal(
filter_func=possibly_scored,
start_node=section_key,
):
descendant = course_structure[descendant_key]
(correct, total) = get_score(
student,
descendant,
scores_client,
submissions_scores,
)
if correct is None and total is None:
continue
weighted_location_score = Score(
correct,
total,
graded,
block_metadata_utils.display_name_with_default_escaped(descendant),
descendant.location
)
scores.append(weighted_location_score)
locations_to_weighted_scores[descendant.location] = weighted_location_score
escaped_section_name = block_metadata_utils.display_name_with_default_escaped(section)
section_total, _ = graders.aggregate_scores(scores, escaped_section_name)
sections.append({
'display_name': escaped_section_name,
'url_name': block_metadata_utils.url_name_for_block(section),
'scores': scores,
'section_total': section_total,
'format': getattr(section, 'format', ''),
'due': getattr(section, 'due', None),
'graded': graded,
})
chapters.append({
'course': course.display_name_with_default_escaped,
'display_name': block_metadata_utils.display_name_with_default_escaped(chapter),
'url_name': block_metadata_utils.url_name_for_block(chapter),
'sections': sections
})
#.........这里部分代码省略.........
示例10: condition_on_problem_list
def condition_on_problem_list(self, problems):
""" Returns the score for a list of problems """
# pylint: disable=no-member
user_id = self.xmodule_runtime.user_id
scores_client = ScoresClient(self.course_id, user_id)
correct_neutral = {'correct': 0.0}
total_neutral = {'total': 0.0}
total = 0
correct = 0
def _get_usage_key(problem):
loc = self.get_location_string(problem)
try:
uk = UsageKey.from_string(loc)
except InvalidKeyError:
uk = _get_draft_usage_key(problem)
return uk
def _get_draft_usage_key(problem):
loc = self.get_location_string(problem, True)
try:
uk = UsageKey.from_string(loc)
uk = uk.map_into_course(self.course_id)
except InvalidKeyError:
uk = None
return uk
def _to_reducible(score):
correct_default = 0.0
total_default = 1.0
if not score.total:
return {'correct': correct_default, 'total': total_default}
else:
return {'correct': score.correct, 'total': score.total}
def _calculate_correct(first_score, second_score):
correct = first_score['correct'] + second_score['correct']
return {'correct': correct}
def _calculate_total(first_score, second_score):
total = first_score['total'] + second_score['total']
return {'total': total}
usages_keys = map(_get_usage_key, problems)
scores_client.fetch_scores(usages_keys)
scores = map(scores_client.get, usages_keys)
scores = filter(None, scores)
problems_to_answer = [score.total for score in scores]
if self.operator in self.SPECIAL_COMPARISON_DISPATCHER.keys():
evaluation = self.SPECIAL_COMPARISON_DISPATCHER[self.operator](
self,
problems_to_answer)
return evaluation
reducible_scores = map(_to_reducible, scores)
correct = reduce(_calculate_correct, reducible_scores,
correct_neutral)
total = reduce(_calculate_total, reducible_scores,
total_neutral)
return self.compare_scores(correct['correct'], total['total'])
示例11: get_grades
def get_grades(course,student):
field_data_cache = field_data_cache_for_grading(course, student)
scores_client = ScoresClient.from_field_data_cache(field_data_cache)
grading_context = course.grading_context
raw_scores = []
keep_raw_scores = None
# Dict of item_ids -> (earned, possible) point tuples. This *only* grabs
# scores that were registered with the submissions API, which for the moment
# means only openassessment (edx-ora2)
submissions_scores = sub_api.get_scores(
course.id.to_deprecated_string(),
anonymous_id_for_user(student, course.id)
)
max_scores_cache = MaxScoresCache.create_for_course(course)
## Fix for grading certificate subsection issue
tem_set = set()
for set_item in field_data_cache.scorable_locations:
set_item = set_item.version_agnostic()
set_item = set_item.replace(branch=None)
tem_set.update([set_item])
field_data_cache.scorable_locations = tem_set
# For the moment, we have to get scorable_locations from field_data_cache
# and not from scores_client, because scores_client is ignorant of things
# in the submissions API. As a further refactoring step, submissions should
# be hidden behind the ScoresClient.
max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations)
totaled_scores = {}
graded_total = []
# This next complicated loop is just to collect the totaled_scores, which is
# passed to the grader
for section_format, sections in grading_context['graded_sections'].iteritems():
format_scores = []
for section in sections:
section_descriptor = section['section_descriptor']
section_name = section_descriptor.display_name_with_default
# some problems have state that is updated independently of interaction
# with the LMS, so they need to always be scored. (E.g. foldit.,
# combinedopenended)
should_grade_section = any(
descriptor.always_recalculate_grades for descriptor in section['xmoduledescriptors']
)
# If there are no problems that always have to be regraded, check to
# see if any of our locations are in the scores from the submissions
# API. If scores exist, we have to calculate grades for this section.
if not should_grade_section:
should_grade_section = any(
descriptor.location.to_deprecated_string() in submissions_scores
for descriptor in section['xmoduledescriptors']
)
## Fix for grading certificate subsection issue
for descriptor in section['xmoduledescriptors']:
descriptor.location = descriptor.location.version_agnostic()
descriptor.location = descriptor.location.replace(branch=None)
if not should_grade_section:
should_grade_section = any(
descriptor.location in scores_client
for descriptor in section['xmoduledescriptors']
)
# If we haven't seen a single problem in the section, we don't have
# to grade it at all! We can assume 0%
if should_grade_section:
scores = []
try:
create_module = section_descriptor.xmodule_runtime.get_module
except :
def create_module(descriptor):
'''creates an XModule instance given a descriptor'''
#TODO: We need the request to pass into here. If we could forego that, our arguments
# would be simpler
pass
for module_descriptor in yield_dynamic_descriptor_descendants(section_descriptor, student.id, create_module):
(correct, total) = get_score(
student,
module_descriptor,
create_module,
scores_client,
submissions_scores,
max_scores_cache,
)
print "total==============",total
print "correct================",correct
#.........这里部分代码省略.........
示例12: progress_summary
def progress_summary(student, request, course, field_data_cache=None, scores_client=None, grading_type='vertical'):
"""
This pulls a summary of all problems in the course.
Returns
- courseware_summary is a summary of all sections with problems in the course.
It is organized as an array of chapters, each containing an array of sections,
each containing an array of scores. This contains information for graded and
ungraded problems, and is good for displaying a course summary with due dates,
etc.
Arguments:
student: A User object for the student to grade
course: A Descriptor containing the course to grade
If the student does not have access to load the course module, this function
will return None.
"""
with manual_transaction():
if field_data_cache is None:
field_data_cache = field_data_cache_for_grading(course, student)
if scores_client is None:
scores_client = ScoresClient.from_field_data_cache(field_data_cache)
course_module = get_module_for_descriptor(
student, request, course, field_data_cache, course.id, course=course
)
if not course_module:
return None
course_module = getattr(course_module, '_x_module', course_module)
submissions_scores = sub_api.get_scores(
course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id)
)
max_scores_cache = MaxScoresCache.create_for_course(course)
# For the moment, we have to get scorable_locations from field_data_cache
# and not from scores_client, because scores_client is ignorant of things
# in the submissions API. As a further refactoring step, submissions should
# be hidden behind the ScoresClient.
max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations)
blocks_stack = [course_module]
blocks_dict = {}
while blocks_stack:
curr_block = blocks_stack.pop()
with manual_transaction():
# Skip if the block is hidden
if curr_block.hide_from_toc:
continue
key = unicode(curr_block.scope_ids.usage_id)
children = curr_block.get_display_items() if curr_block.category != grading_type else []
block = {
'display_name': curr_block.display_name_with_default,
'block_type': curr_block.category,
'url_name': curr_block.url_name,
'children': [unicode(child.scope_ids.usage_id) for child in children],
}
if curr_block.category == grading_type:
graded = curr_block.graded
scores = []
module_creator = curr_block.xmodule_runtime.get_module
for module_descriptor in yield_dynamic_descriptor_descendants(
curr_block, student.id, module_creator
):
(correct, total) = get_score(
student,
module_descriptor,
module_creator,
scores_client,
submissions_scores,
max_scores_cache,
)
if correct is None and total is None:
continue
scores.append(
Score(
correct,
total,
graded,
module_descriptor.display_name_with_default,
module_descriptor.location
)
)
scores.reverse()
total, _ = aggregate_scores(scores, curr_block.display_name_with_default)
module_format = curr_block.format if curr_block.format is not None else ''
block.update({
'scores': scores,
'total': total,
#.........这里部分代码省略.........
示例13: course_data
def course_data(request, course_id):
"""
Get course's data(title, short description), Total Points/Earned Points
or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
course = get_course_with_access(request.user, 'load', course_key, depth=None, check_if_enrolled=True)
access_response = has_access(request.user, 'load', course, course_key)
context={}
if course.has_started():
staff_access = bool(has_access(request.user, 'staff', course))
student = request.user
# NOTE: To make sure impersonation by instructor works, use
# student instead of request.user in the rest of the function.
# The pre-fetching of groups is done to make auth checks not require an
# additional DB lookup (this kills the Progress page in particular).
student = User.objects.prefetch_related("groups").get(id=student.id)
with outer_atomic():
field_data_cache = grades.field_data_cache_for_grading(course, student)
scores_client = ScoresClient.from_field_data_cache(field_data_cache)
title = course.display_name_with_default
loc = course.location.replace(category='about', name='short_description')
about_module = get_module(
request.user,
request,
loc,
field_data_cache,
log_if_not_found=False,
wrap_xmodule_display=False,
static_asset_path=course.static_asset_path,
course=course
)
short_description = about_module.render(STUDENT_VIEW).content
courseware_summary = grades.progress_summary(
student, request, course, field_data_cache=field_data_cache, scores_client=scores_client
)
grade_summary = grades.grade(
student, request, course, field_data_cache=field_data_cache, scores_client=scores_client
)
total_points = 0
earned_points = 0
for chapter in courseware_summary:
for section in chapter['sections']:
total_points += section['section_total'].possible
earned_points += section['section_total'].earned
percentage_points = float(earned_points)*(100.0/float(total_points))
context = {
"started": course.has_started(),
"course_image": course_image_url(course),
"total": total_points,
"earned": earned_points,
"percentage": percentage_points,
'title': title,
'short_description' : short_description,
'staff_access': staff_access,
'student': student.id,
'passed': is_course_passed(course, grade_summary),
}
else:
context={
"started": course.has_started(),
}
return JsonResponse(context)