本文整理汇总了Python中lms.djangoapps.instructor_task.models.ReportStore.from_config方法的典型用法代码示例。如果您正苦于以下问题:Python ReportStore.from_config方法的具体用法?Python ReportStore.from_config怎么用?Python ReportStore.from_config使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lms.djangoapps.instructor_task.models.ReportStore
的用法示例。
在下文中一共展示了ReportStore.from_config方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: verify_rows_in_csv
# 需要导入模块: from lms.djangoapps.instructor_task.models import ReportStore [as 别名]
# 或者: from lms.djangoapps.instructor_task.models.ReportStore import from_config [as 别名]
def verify_rows_in_csv(self, expected_rows, file_index=0, verify_order=True, ignore_other_columns=False):
"""
Verify that the last ReportStore CSV contains the expected content.
Arguments:
expected_rows (iterable): An iterable of dictionaries,
where each dict represents a row of data in the last
ReportStore CSV. Each dict maps keys from the CSV
header to values in that row's corresponding cell.
file_index (int): Describes which report store file to
open. Files are ordered by last modified date, and 0
corresponds to the most recently modified file.
verify_order (boolean): When True (default), we verify that
both the content and order of `expected_rows` matches
the actual csv rows. When False, we only verify that
the content matches.
ignore_other_columns (boolean): When True, we verify that `expected_rows`
contain data which is the subset of actual csv rows.
"""
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
report_csv_filename = report_store.links_for(self.course.id)[file_index][0]
report_path = report_store.path_to(self.course.id, report_csv_filename)
with report_store.storage.open(report_path) as csv_file:
# Expand the dict reader generator so we don't lose it's content
csv_rows = [row for row in unicodecsv.DictReader(csv_file)]
if ignore_other_columns:
csv_rows = [
{key: row.get(key) for key in expected_rows[index].keys()} for index, row in enumerate(csv_rows)
]
if verify_order:
self.assertEqual(csv_rows, expected_rows)
else:
self.assertItemsEqual(csv_rows, expected_rows)
示例2: upload_csv_to_report_store
# 需要导入模块: from lms.djangoapps.instructor_task.models import ReportStore [as 别名]
# 或者: from lms.djangoapps.instructor_task.models.ReportStore import from_config [as 别名]
def upload_csv_to_report_store(rows, csv_name, course_id, timestamp, config_name='GRADES_DOWNLOAD'):
"""
Upload data as a CSV using ReportStore.
Arguments:
rows: CSV data in the following format (first column may be a
header):
[
[row1_colum1, row1_colum2, ...],
...
]
csv_name: Name of the resulting CSV
course_id: ID of the course
Returns:
report_name: string - Name of the generated report
"""
report_store = ReportStore.from_config(config_name)
report_name = u"{course_prefix}_{csv_name}_{timestamp_str}.csv".format(
course_prefix=course_filename_prefix_generator(course_id),
csv_name=csv_name,
timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M")
)
report_store.store_rows(course_id, report_name, rows)
tracker_emit(csv_name)
return report_name
示例3: push_student_responses_to_s3
# 需要导入模块: from lms.djangoapps.instructor_task.models import ReportStore [as 别名]
# 或者: from lms.djangoapps.instructor_task.models.ReportStore import from_config [as 别名]
def push_student_responses_to_s3(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
For a given `course_id`, generate a responses CSV file for students that
have submitted problem responses, and store using a `ReportStore`. Once
created, the files can be accessed by instantiating another `ReportStore` (via
`ReportStore.from_config()`) and calling `link_for()` on it. Writes are
buffered, so we'll never write part of a CSV file to S3 -- i.e. any files
that are visible in ReportStore will be complete ones.
"""
start_time = datetime.now(UTC)
try:
course = get_course_by_id(course_id)
except ValueError as e:
TASK_LOG.error(e.message)
return "failed"
rows = student_response_rows(course)
# Generate parts of the file name
timestamp_str = start_time.strftime("%Y-%m-%d-%H%M")
course_id_prefix = course_filename_prefix_generator(course_id)
# Perform the actual upload
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
report_store.store_rows(
course_id,
u"{course_id_prefix}_responses_report_{timestamp_str}.csv".format(
course_id_prefix=course_id_prefix,
timestamp_str=timestamp_str,
),
rows
)
return "succeeded"
示例4: create_report_store
# 需要导入模块: from lms.djangoapps.instructor_task.models import ReportStore [as 别名]
# 或者: from lms.djangoapps.instructor_task.models.ReportStore import from_config [as 别名]
def create_report_store(self):
"""
Create and return a DjangoStorageReportStore using the old
S3ReportStore configuration.
"""
connection = boto.connect_s3()
connection.create_bucket(settings.GRADES_DOWNLOAD['BUCKET'])
return ReportStore.from_config(config_name='GRADES_DOWNLOAD')
示例5: download_url_for_last_report
# 需要导入模块: from lms.djangoapps.instructor_task.models import ReportStore [as 别名]
# 或者: from lms.djangoapps.instructor_task.models.ReportStore import from_config [as 别名]
def download_url_for_last_report(self):
""" Get the URL for the last report, if any """
# Unfortunately this is a bit inefficient due to the ReportStore API
if not self.last_export_result or self.last_export_result['error'] is not None:
return None
from lms.djangoapps.instructor_task.models import ReportStore
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
course_key = getattr(self.scope_ids.usage_id, 'course_key', None)
return dict(report_store.links_for(course_key)).get(self.last_export_result['report_filename'])
示例6: get_csv_row_with_headers
# 需要导入模块: from lms.djangoapps.instructor_task.models import ReportStore [as 别名]
# 或者: from lms.djangoapps.instructor_task.models.ReportStore import from_config [as 别名]
def get_csv_row_with_headers(self):
"""
Helper function to return list with the column names from the CSV file (the first row)
"""
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
report_csv_filename = report_store.links_for(self.course.id)[0][0]
report_path = report_store.path_to(self.course.id, report_csv_filename)
with report_store.storage.open(report_path) as csv_file:
rows = unicodecsv.reader(csv_file, encoding='utf-8')
return rows.next()
示例7: test_delete_report
# 需要导入模块: from lms.djangoapps.instructor_task.models import ReportStore [as 别名]
# 或者: from lms.djangoapps.instructor_task.models.ReportStore import from_config [as 别名]
def test_delete_report(self):
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
task_input = {'features': []}
links = report_store.links_for(self.course.id)
self.assertEquals(len(links), 0)
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
upload_students_csv(None, None, self.course.id, task_input, 'calculated')
links = report_store.links_for(self.course.id)
self.assertEquals(len(links), 1)
filename = links[0][0]
report_store.delete_file(self.course.id, filename)
links = report_store.links_for(self.course.id)
self.assertEquals(len(links), 0)
示例8: test_financial_report_overrides
# 需要导入模块: from lms.djangoapps.instructor_task.models import ReportStore [as 别名]
# 或者: from lms.djangoapps.instructor_task.models.ReportStore import from_config [as 别名]
def test_financial_report_overrides(self):
"""
Test that CUSTOM_DOMAIN from FINANCIAL_REPORTS is used to construct file url. instead of domain defined via
AWS_S3_CUSTOM_DOMAIN setting.
"""
with override_settings(FINANCIAL_REPORTS={
'STORAGE_TYPE': 's3',
'BUCKET': 'edx-financial-reports',
'CUSTOM_DOMAIN': 'edx-financial-reports.s3.amazonaws.com',
'ROOT_PATH': 'production',
}):
report_store = ReportStore.from_config(config_name="FINANCIAL_REPORTS")
# Make sure CUSTOM_DOMAIN from FINANCIAL_REPORTS is used to construct file url
self.assertIn("edx-financial-reports.s3.amazonaws.com", report_store.storage.url(""))
示例9: _upload_exec_summary_to_store
# 需要导入模块: from lms.djangoapps.instructor_task.models import ReportStore [as 别名]
# 或者: from lms.djangoapps.instructor_task.models.ReportStore import from_config [as 别名]
def _upload_exec_summary_to_store(data_dict, report_name, course_id, generated_at, config_name='FINANCIAL_REPORTS'):
"""
Upload Executive Summary Html file using ReportStore.
Arguments:
data_dict: containing executive report data.
report_name: Name of the resulting Html File.
course_id: ID of the course
"""
report_store = ReportStore.from_config(config_name)
# Use the data dict and html template to generate the output buffer
output_buffer = StringIO(render_to_string("instructor/instructor_dashboard_2/executive_summary.html", data_dict))
report_store.store(
course_id,
u"{course_prefix}_{report_name}_{timestamp_str}.html".format(
course_prefix=course_filename_prefix_generator(course_id),
report_name=report_name,
timestamp_str=generated_at.strftime("%Y-%m-%d-%H%M")
),
output_buffer,
)
tracker_emit(report_name)
示例10: export_data
# 需要导入模块: from lms.djangoapps.instructor_task.models import ReportStore [as 别名]
# 或者: from lms.djangoapps.instructor_task.models.ReportStore import from_config [as 别名]
def export_data(course_id, source_block_id_str, block_types, user_ids, match_string):
"""
Exports student answers to all supported questions to a CSV file.
"""
start_timestamp = time.time()
logger.debug("Beginning data export")
try:
course_key = CourseKey.from_string(course_id)
usage_key = UsageKey.from_string(source_block_id_str)
except InvalidKeyError:
raise ValueError("Could not find the specified Block ID.")
src_block = modulestore().get_item(usage_key)
course_key_str = unicode(course_key)
type_map = {cls.__name__: cls for cls in [MCQBlock, MRQBlock, RatingBlock, AnswerBlock]}
if not block_types:
block_types = tuple(type_map.values())
else:
block_types = tuple(type_map[class_name] for class_name in block_types)
# Build an ordered list of blocks to include in the export
blocks_to_include = []
def scan_for_blocks(block):
""" Recursively scan the course tree for blocks of interest """
if isinstance(block, block_types):
blocks_to_include.append(block)
elif block.has_children:
for child_id in block.children:
try:
scan_for_blocks(block.runtime.get_block(child_id))
except ItemNotFoundError:
# Blocks may refer to missing children. Don't break in this case.
pass
scan_for_blocks(src_block)
# Define the header row of our CSV:
rows = []
rows.append(
["Section", "Subsection", "Unit", "Type", "Question", "Answer", "Username", "User ID", "User E-mail"]
)
# Collect results for each block in blocks_to_include
for block in blocks_to_include:
if not user_ids:
results = _extract_data(course_key_str, block, None, match_string)
rows += results
else:
for user_id in user_ids:
results = _extract_data(course_key_str, block, user_id, match_string)
rows += results
# Generate the CSV:
filename = u"pb-data-export-{}.csv".format(time.strftime("%Y-%m-%d-%H%M%S", time.gmtime(start_timestamp)))
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
report_store.store_rows(course_key, filename, rows)
generation_time_s = time.time() - start_timestamp
logger.debug("Done data export - took {} seconds".format(generation_time_s))
return {
"error": None,
"report_filename": filename,
"start_timestamp": start_timestamp,
"generation_time_s": generation_time_s,
"display_data": [] if len(rows) == 1 else rows[1:1001] # Limit to preview of 1000 items
}
示例11: upload_user_grades_csv
# 需要导入模块: from lms.djangoapps.instructor_task.models import ReportStore [as 别名]
# 或者: from lms.djangoapps.instructor_task.models.ReportStore import from_config [as 别名]
def upload_user_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=too-many-statements
"""
For a given `course_id`, for given usernames generates a grades CSV file,
and store using a `ReportStore`. Once created, the files can
be accessed by instantiating another `ReportStore` (via
`ReportStore.from_config()`) and calling `link_for()` on it.
Unenrolled users and unknown usernames are stored in *_err_*.csv
This task is very close to the .upload_grades_csv from instructor_tasks.task_helper
The difference is that we filter enrolled students against requested usernames and
we push info about this into PLP
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
extended_kwargs_id = _task_input.get("extended_kwargs_id")
extended_kwargs = InstructorTaskExtendedKwargs.get_kwargs_for_id(extended_kwargs_id)
usernames = extended_kwargs.get("usernames", None)
err_rows = [["id", "username", "error_msg"]]
if usernames is None:
message = "Error occured during edx task execution: no usersnames in InstructorTaskExtendedKwargs."
TASK_LOG.error(u'%s, Task type: %s, ' + message, task_info_string)
err_rows.append(["-1", "__", message])
usernames = []
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
enrolled_students = enrolled_students.filter(username__in=usernames)
total_enrolled_students = enrolled_students.count()
requester_id = _task_input.get("requester_id")
task_progress = TaskProgress(action_name, total_enrolled_students, start_time)
course = get_course_by_id(course_id)
course_is_cohorted = is_course_cohorted(course.id)
teams_enabled = course.teams_enabled
cohorts_header = ['Cohort Name'] if course_is_cohorted else []
teams_header = ['Team Name'] if teams_enabled else []
experiment_partitions = get_split_user_partitions(course.user_partitions)
group_configs_header = [u'Experiment Group ({})'.format(partition.name) for partition in experiment_partitions]
certificate_info_header = ['Certificate Eligible', 'Certificate Delivered', 'Certificate Type']
certificate_whitelist = CertificateWhitelist.objects.filter(course_id=course_id, whitelist=True)
whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist]
# Loop over all our students and build our CSV lists in memory
rows = []
current_step = {'step': 'Calculating Grades'}
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s',
task_info_string,
action_name,
current_step,
total_enrolled_students,
)
found_students = User.objects.filter(username__in=usernames)
# Check invalid usernames
if len(found_students)!= len(usernames):
found_students_usernames = [x.username for x in found_students]
for u in usernames:
if u not in found_students_usernames:
err_rows.append([-1, u, "invalid_username"])
# Check not enrolled requested students
if found_students != enrolled_students:
diff = found_students.exclude(id__in=enrolled_students)
for u in diff:
if u in diff:
err_rows.append([u.id, u.username, "enrollment_for_username_not_found"])
total_enrolled_students = enrolled_students.count()
student_counter = 0
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s',
task_info_string,
action_name,
current_step,
total_enrolled_students
)
graded_assignments = course.grading.graded_assignments(course_id)
grade_header = course.grading.grade_header(graded_assignments)
rows.append(
["Student ID", "Email", "Username", "Last Name", "First Name", "Second Name", "Grade", "Grade Percent"] +
grade_header +
cohorts_header +
group_configs_header +
teams_header +
['Enrollment Track', 'Verification Status'] +
#.........这里部分代码省略.........