本文整理汇总了Python中soil.util.expose_download函数的典型用法代码示例。如果您正苦于以下问题:Python expose_download函数的具体用法?Python expose_download怎么用?Python expose_download使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了expose_download函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cache_file_to_be_served
def cache_file_to_be_served(tmp, checkpoint, download_id, format=None, filename=None, expiry=10*60*60):
"""
tmp can be either either a path to a tempfile or a StringIO
(the APIs for tempfiles vs StringIO are unfortunately... not similar)
"""
if checkpoint:
format = Format.from_format(format)
try:
filename = unidecode(filename)
except Exception:
pass
tmp = Temp(tmp)
payload = tmp.payload
expose_download(payload, expiry,
mimetype=format.mimetype,
content_disposition='attachment; filename=%s.%s' % (filename, format.extension),
extras={'X-CommCareHQ-Export-Token': checkpoint.get_id},
download_id=download_id)
else:
# this just gives you a link saying there wasn't anything there
expose_download("Sorry, there wasn't any data.", expiry,
content_disposition="",
mimetype="text/html",
download_id=download_id).save(expiry)
示例2: export_ucr_async
def export_ucr_async(report_export, download_id, user):
use_transfer = settings.SHARED_DRIVE_CONF.transfer_enabled
ascii_title = report_export.title.encode('ascii', 'replace').decode('utf-8')
filename = '{}.xlsx'.format(ascii_title.replace('/', '?'))
file_path = get_download_file_path(use_transfer, filename)
report_export.create_export(file_path, Format.XLS_2007)
expose_download(use_transfer, file_path, filename, download_id, 'xlsx')
link = reverse("retrieve_download", args=[download_id], params={"get_file": '1'}, absolute=True)
send_report_download_email(report_export.title, user.get_email(), link)
示例3: build_form_multimedia_zip
def build_form_multimedia_zip(
domain,
xmlns,
startdate,
enddate,
app_id,
export_id,
zip_name,
download_id,
export_is_legacy,
user_types=None,
group=None):
form_ids = get_form_ids_having_multimedia(
domain,
app_id,
xmlns,
parse(startdate),
parse(enddate),
group=group,
user_types=user_types,
)
properties = _get_export_properties(export_id, export_is_legacy)
if not app_id:
zip_name = 'Unrelated Form'
forms_info = list()
for form in FormAccessors(domain).iter_forms(form_ids):
if not zip_name:
zip_name = unidecode(form.name or 'unknown form')
forms_info.append(_extract_form_attachment_info(form, properties))
num_forms = len(forms_info)
DownloadBase.set_progress(build_form_multimedia_zip, 0, num_forms)
case_id_to_name = _get_case_names(
domain,
set.union(*[form_info['case_ids'] for form_info in forms_info]) if forms_info else set(),
)
use_transfer = settings.SHARED_DRIVE_CONF.transfer_enabled
if use_transfer:
fpath = _get_download_file_path(xmlns, startdate, enddate, export_id, app_id, num_forms)
else:
_, fpath = tempfile.mkstemp()
_write_attachments_to_file(fpath, use_transfer, num_forms, forms_info, case_id_to_name)
filename = "{}.zip".format(zip_name)
expose_download(use_transfer, fpath, filename, download_id, 'zip')
DownloadBase.set_progress(build_form_multimedia_zip, num_forms, num_forms)
示例4: export_async
def export_async(custom_export, download_id, format=None, filename=None, **kwargs):
try:
tmp, checkpoint = custom_export.get_export_files(format=format, process=export_async, **kwargs)
except SchemaMismatchException, e:
# fire off a delayed force update to prevent this from happening again
rebuild_schemas.delay(custom_export.index)
expiry = 10*60*60
expose_download(
"Sorry, the export failed for %s, please try again later" % custom_export._id,
expiry,
content_disposition="",
mimetype="text/html",
download_id=download_id
).save(expiry)
示例5: post
def post(self, request, *args, **kwargs):
upload = request.FILES.get('bulk_upload_file')
if not upload:
messages.error(request, _('no file uploaded'))
return self.get(request, *args, **kwargs)
if not args:
messages.error(request, _('no domain specified'))
return self.get(request, *args, **kwargs)
domain = args[0]
# stash this in soil to make it easier to pass to celery
file_ref = expose_download(upload.read(),
expiry=1*60*60)
task = import_locations_async.delay(
domain,
file_ref.download_id,
)
file_ref.set_task(task)
return HttpResponseRedirect(
reverse(
LocationImportStatusView.urlname,
args=[domain, file_ref.download_id]
)
)
示例6: post
def post(self, request, *args, **kwargs):
upload = request.FILES.get('locs')
if not upload:
return HttpResponse(_('no file uploaded'))
if not args:
return HttpResponse(_('no domain specified'))
domain = args[0]
update_existing = bool(request.POST.get('update'))
# stash this in soil to make it easier to pass to celery
file_ref = expose_download(upload.read(),
expiry=1*60*60)
task = import_locations_async.delay(
domain,
file_ref.download_id,
update_existing
)
file_ref.set_task(task)
return HttpResponseRedirect(
reverse(
LocationImportStatusView.urlname,
args=[domain, file_ref.download_id]
)
)
示例7: post
def post(self, request):
replace = 'replace' in request.POST
file_ref = expose_download(request.file.read(),
expiry=1*60*60)
# catch basic validation in the synchronous UI
try:
validate_file_format(file_ref.get_filename())
except FixtureUploadError as e:
messages.error(request, unicode(e))
return HttpResponseRedirect(fixtures_home(self.domain))
# hand off to async
task = fixture_upload_async.delay(
self.domain,
file_ref.download_id,
replace,
)
file_ref.set_task(task)
return HttpResponseRedirect(
reverse(
FixtureUploadStatusView.urlname,
args=[self.domain, file_ref.download_id]
)
)
示例8: import_locations_async
def import_locations_async(download_id, domain, file_ref_id, update_existing=False):
"""
Asynchronously import locations. download_id is for showing
the results to the user through soil. file_ref_id is also a
download_id, but should be a pointer to the import file.
"""
download_ref = DownloadBase.get(file_ref_id)
with open(download_ref.get_filename(), 'rb') as f:
results_msg = '\n'.join(import_locations(domain, f, update_existing))
ref = expose_download(results_msg, 60*60*3)
cache.set(download_id, ref)
示例9: historical_import
def historical_import(request, domain):
if request.method == "POST":
file_ref = expose_download(request.FILES['history'].read(),
expiry=1*60*60)
download_id = uuid.uuid4().hex
import_stock_reports_async.delay(download_id, domain, file_ref.download_id)
return _async_in_progress(request, domain, download_id)
return HttpResponse("""
<form method="post" action="" enctype="multipart/form-data">
<div><input type="file" name="history" /></div>
<div><button type="submit">Import historical stock reports</button></div>
</form>
""")
示例10: process_upload
def process_upload(self):
# save the file w/ soil
self.uploaded_file.file.seek(0)
saved_file = expose_download(self.uploaded_file.file.read(), expiry=BulkMultimediaStatusCache.cache_expiry)
processing_id = saved_file.download_id
status = BulkMultimediaStatusCache(processing_id)
status.save()
process_bulk_upload_zip.delay(processing_id, self.domain, self.app_id,
username=self.request.couch_user.username if self.request.couch_user else None,
share_media=self.share_media,
license_name=self.license_used, author=self.author,
attribution_notes=self.attribution_notes, replace_existing=self.replace_existing)
return status.get_response()
示例11: location_import
def location_import(request, domain):
if request.method == "POST":
upload = request.FILES.get('locs')
if not upload:
return HttpResponse('no file uploaded')
# stash this in soil to make it easier to pass to celery
file_ref = expose_download(upload.read(),
expiry=1*60*60)
download_id = uuid.uuid4().hex
import_locations_async.delay(download_id, domain, file_ref.download_id)
return _async_in_progress(request, domain, download_id)
return HttpResponse("""
<form method="post" action="" enctype="multipart/form-data">
<div><input type="file" name="locs" /></div>
<div><button type="submit">Import locations</button></div>
</form>
""")
示例12: prepare_fixture_download
def prepare_fixture_download(table_ids, domain, task, download_id):
"""Prepare fixture data for Excel download
"""
data_types_book, excel_sheets = _prepare_fixture(table_ids, domain, task=task)
header_groups = [("types", excel_sheets["types"]["headers"])]
value_groups = [("types", excel_sheets["types"]["rows"])]
for data_type in data_types_book:
header_groups.append((data_type.tag, excel_sheets[data_type.tag]["headers"]))
value_groups.append((data_type.tag, excel_sheets[data_type.tag]["rows"]))
file = StringIO()
format = Format.XLS_2007
export_raw(tuple(header_groups), tuple(value_groups), file, format)
return expose_download(
file.getvalue(),
60 * 60 * 2,
mimetype=Format.from_format(format).mimetype,
content_disposition='attachment; filename="%s_fixtures.xlsx"' % domain,
download_id=download_id,
)
示例13: post
def post(self, request, *args, **kwargs):
upload = request.FILES.get('bulk_upload_file')
if not upload:
messages.error(request, _('no file uploaded'))
return self.get(request, *args, **kwargs)
elif not upload.name.endswith('.xlsx'):
messages.error(request, _('please use xlsx format only'))
return self.get(request, *args, **kwargs)
domain = args[0]
# stash this in soil to make it easier to pass to celery
file_ref = expose_download(upload.read(),
expiry=1*60*60)
task = import_products_async.delay(
domain,
file_ref.download_id,
)
file_ref.set_task(task)
return HttpResponseRedirect(
reverse(
ProductImportStatusView.urlname,
args=[domain, file_ref.download_id]
)
)
示例14: excel_config
def excel_config(request, domain):
"""
Step one of three.
This is the initial post when the user uploads the excel file
named_columns:
Whether or not the first row of the excel sheet contains
header strings for the columns. This defaults to True and
should potentially not be an option as it is always used
due to how important it is to see column headers
in the rest of the importer.
"""
if request.method != "POST":
return HttpResponseRedirect(base.ImportCases.get_url(domain=domain))
if not request.FILES:
return render_error(request, domain, "Please choose an Excel file to import.")
named_columns = request.POST.get("named_columns") == "on"
uploaded_file_handle = request.FILES["file"]
extension = os.path.splitext(uploaded_file_handle.name)[1][1:].strip().lower()
# NOTE: We may not always be able to reference files from subsequent
# views if your worker changes, so we have to store it elsewhere
# using the soil framework.
if extension not in importer_util.ExcelFile.ALLOWED_EXTENSIONS:
return render_error(
request,
domain,
"The Excel file you chose could not be processed. "
"Please check that it is saved as a Microsoft "
"Excel 97/2000 .xls file.",
)
# stash content in the default storage for subsequent views
file_ref = expose_download(uploaded_file_handle.read(), expiry=1 * 60 * 60)
request.session[EXCEL_SESSION_ID] = file_ref.download_id
spreadsheet = importer_util.get_spreadsheet(file_ref, named_columns)
if not spreadsheet:
return _spreadsheet_expired(request, domain)
columns = spreadsheet.get_header_columns()
row_count = spreadsheet.get_num_rows()
if row_count == 0:
return render_error(
request, domain, "Your spreadsheet is empty. " "Please try again with a different spreadsheet."
)
case_types_from_apps = []
# load types from all modules
for row in ApplicationBase.view(
"app_manager/types_by_module", reduce=True, group=True, startkey=[domain], endkey=[domain, {}]
).all():
if not row["key"][1] in case_types_from_apps:
case_types_from_apps.append(row["key"][1])
case_types_from_cases = []
# load types from all case records
for row in CommCareCase.view(
"hqcase/types_by_domain", reduce=True, group=True, startkey=[domain], endkey=[domain, {}]
).all():
if row["key"][1] and not row["key"][1] in case_types_from_cases:
case_types_from_cases.append(row["key"][1])
# for this we just want cases that have data but aren't being used anymore
case_types_from_cases = filter(lambda x: x not in case_types_from_apps, case_types_from_cases)
if len(case_types_from_apps) == 0 and len(case_types_from_cases) == 0:
return render_error(
request,
domain,
"No cases have been submitted to this domain and there are no "
"applications yet. You cannot import case details from an Excel "
"file until you have existing cases or applications.",
)
return render(
request,
"importer/excel_config.html",
{
"named_columns": named_columns,
"columns": columns,
"case_types_from_cases": case_types_from_cases,
"case_types_from_apps": case_types_from_apps,
"domain": domain,
"report": {"name": "Import: Configuration"},
"slug": base.ImportCases.slug,
},
)
示例15: download_item_lists
#.........这里部分代码省略.........
for data_type in data_types_book:
common_vals = ["N", data_type.tag, yesno(data_type.is_global)]
field_vals = [field.field_name for field in data_type.fields] + empty_padding_list(max_fields - len(data_type.fields))
prop_vals = []
if type_field_properties.has_key(data_type.tag):
props = type_field_properties.get(data_type.tag)
prop_vals.extend([props.get(key, "") for key in field_prop_headers])
row = tuple(common_vals[2 if html_response else 0:] + field_vals + prop_vals)
types_sheet["rows"].append(row)
types_sheet["rows"] = tuple(types_sheet["rows"])
types_sheet["headers"] = tuple(types_sheet["headers"])
excel_sheets["types"] = types_sheet
# Prepare 'items' sheet data for each data-type
for data_type in data_types_book:
item_sheet = {"headers": [], "rows": []}
item_helpers = item_helpers_by_type[data_type.tag]
max_users = item_helpers["max_users"]
max_groups = item_helpers["max_groups"]
max_field_prop_combos = item_helpers["max_field_prop_combos"]
common_headers = ["UID", DELETE_HEADER]
user_headers = ["user %d" % x for x in range(1, max_users + 1)]
group_headers = ["group %d" % x for x in range(1, max_groups + 1)]
field_headers = []
for field in data_type.fields:
if len(field.properties) == 0:
field_headers.append("field: " + field.field_name)
else:
prop_headers = []
for x in range(1, max_field_prop_combos[field.field_name] + 1):
for property in field.properties:
prop_headers.append("%(name)s: %(prop)s %(count)s" % {
"name": field.field_name,
"prop": property,
"count": x
})
prop_headers.append("field: %(name)s %(count)s" % {
"name": field.field_name,
"count": x
})
field_headers.extend(prop_headers)
item_sheet["headers"] = tuple(
common_headers[2 if html_response else 0:] + field_headers + user_headers + group_headers
)
excel_sheets[data_type.tag] = item_sheet
for item_row in data_items_book_by_type[data_type.tag]:
common_vals = [str(_id_from_doc(item_row)), "N"]
user_vals = [user.raw_username for user in item_row.users] + empty_padding_list(max_users - len(item_row.users))
group_vals = [group.name for group in item_row.groups] + empty_padding_list(max_groups - len(item_row.groups))
field_vals = []
for field in data_type.fields:
if len(field.properties) == 0:
if any(item_row.fields.get(field.field_name).field_list):
value = item_row.fields.get(field.field_name).field_list[0].field_value
else:
value = ""
field_vals.append(value)
else:
field_prop_vals = []
cur_combo_count = len(item_row.fields.get(field.field_name).field_list)
cur_prop_count = len(field.properties)
for count, field_prop_combo in enumerate(item_row.fields.get(field.field_name).field_list):
for property in field.properties:
field_prop_vals.append(field_prop_combo.properties.get(property, None) or "")
field_prop_vals.append(field_prop_combo.field_value)
padding_list_len = (max_field_prop_combos[field.field_name] - cur_combo_count) * (cur_prop_count + 1)
field_prop_vals.extend(empty_padding_list(padding_list_len))
# import pdb; pdb.set_trace();
field_vals.extend(field_prop_vals)
row = tuple(
common_vals[2 if html_response else 0:] + field_vals + user_vals + group_vals
)
item_sheet["rows"].append(row)
item_sheet["rows"] = tuple(item_sheet["rows"])
excel_sheets[data_type.tag] = item_sheet
if html_response:
return excel_sheets
header_groups = [("types", excel_sheets["types"]["headers"])]
value_groups = [("types", excel_sheets["types"]["rows"])]
for data_type in data_types_book:
header_groups.append((data_type.tag, excel_sheets[data_type.tag]["headers"]))
value_groups.append((data_type.tag, excel_sheets[data_type.tag]["rows"]))
fd, path = tempfile.mkstemp()
with os.fdopen(fd, 'w') as temp:
export_raw(tuple(header_groups), tuple(value_groups), temp)
format = Format.XLS_2007
fl = open(path, 'r')
fileref = expose_download(
fl.read(),
60 * 10,
mimetype=Format.from_format(format).mimetype,
content_disposition='attachment; filename="%s_fixtures.xlsx"' % domain,
)
return json_response({"download_id": fileref.download_id})