本文整理汇总了Python中django.core.files.uploadedfile.UploadedFile类的典型用法代码示例。如果您正苦于以下问题:Python UploadedFile类的具体用法?Python UploadedFile怎么用?Python UploadedFile使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了UploadedFile类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load
def load(self, file_storage):
field_dict = self.data
field_dict = field_dict.copy()
tmp_name = field_dict.pop('tmp_name')
file_obj = UploadedFile(file=file_storage.open(tmp_name), **field_dict)
file_obj.url = file_storage.url(tmp_name)
return file_obj
示例2: testFileUpload
def testFileUpload(self):
from django.http import QueryDict, HttpRequest
from tardis.tardis_portal.views import upload
from django.core.files import File
from django.core.files.uploadedfile import UploadedFile
from django.utils.datastructures import MultiValueDict
from os import path
# create request.FILES object
django_file = File(self.f1)
uploaded_file = UploadedFile(file=django_file)
uploaded_file.name = self.filename
uploaded_file.size = self.f1_size
post_data = [("enctype", "multipart/form-data")]
post = QueryDict("&".join(["%s=%s" % (k, v) for (k, v) in post_data]))
files = MultiValueDict({"Filedata": [uploaded_file]})
request = HttpRequest()
request.FILES = files
request.POST = post
request.method = "POST"
response = upload(request, self.dataset.id)
test_files_db = models.Dataset_File.objects.filter(dataset__id=self.dataset.id)
self.assertTrue(path.exists(path.join(self.dataset_path, self.filename)))
self.assertTrue(self.dataset.id == 1)
self.assertTrue(test_files_db[0].url == "tardis://testfile.txt")
示例3: __init__
def __init__(self, path, file_name, content_type, charset):
UploadedFile.__init__(self)
self.path = path
self.name = file_name
self.content_type = content_type
self.charset = charset
self.file_handle = None
self.mode = None
示例4: test_unicode_file_names
def test_unicode_file_names(self):
"""
Regression test for #8156: files with unicode names I can't quite figure
out the encoding situation between doctest and this file, but the actual
repr doesn't matter; it just shouldn't return a unicode object.
"""
uf = UploadedFile(name=u'¿Cómo?',content_type='text')
self.assertEqual(type(uf.__repr__()), str)
示例5: stitch_chunks
def stitch_chunks(self):
f = open(os.path.join(settings.MEDIA_ROOT, cloud_path(self, self.filename)), "wb")
for chunk in self.chunks.all().order_by("pk"):
f.write(chunk.chunk.read())
f.close()
f = UploadedFile(open(f.name, "rb"))
self.upload.save(self.filename, f)
self.state = Upload.STATE_COMPLETE
self.save()
f.close()
示例6: document_validator
def document_validator(filepath, ex_first_row, ex_first_col):
try:
with open(os.path.join(settings.MEDIA_ROOT, filepath), 'r') as f:
file = UploadedFile(f)
dialect = csv.Sniffer().sniff(file.readline(), delimiters=[';', ',', '\t'])
mimetype = magic.from_buffer(file.readline(), mime=True)
file.seek(0)
reader = csv.reader(file, dialect)
temp_list = []
for line in iter(reader):
if reader.line_num == 1:
# save first row
temp_list.append(line)
# save last row
temp_list.append(line)
print ex_first_row
if not ex_first_row:
print "Ciao"
# check char in first row and first col
if not ex_first_row and not float(temp_list[0][-1]):
print 'Hola'
raise ValueError
if not ex_first_col and not float(temp_list[-1][0]):
print 'Hello'
raise ValueError
ncol = (len(temp_list[0]) - 1) if ex_first_col else len(temp_list[0])
nrow = (reader.line_num - 1) if ex_first_col else reader.line_num
if nrow <= 2:
print 'Hey'
raise ValueError
is_cubic = True if (ncol == nrow) else False
return_value = {'is_valid': True, 'nrow': nrow, 'ncol': ncol, 'separator': dialect.delimiter,
'mimetype': mimetype, 'is_cubic': is_cubic}
except csv.Error:
print "Csv"
return_value = {'is_valid': False}
file = None
except Exception:
return_value = {'is_valid': False}
print "Exc"
file = None
except ValueError:
return_value = {'is_valid': False}
print "Value"
file = file
return return_value, file
示例7: save
def save(self, *args, **kwargs):
s = BytesIO()
data = zipfile.ZipFile(s,'a')
projectfiles = fileobject.objects.filter(project=self.project)
for filedata in projectfiles:
filed = filedata.filename.read()
pathAndName = str(self.project.title)+filedata.subfolder+os.path.split(str(filedata.filename))[1] #### this is where subfolders will be added to inside the zip file.
data.writestr(pathAndName, filed)
data.close()
s.seek(0)
filedata = UploadedFile(s)
filedata.name = self.project.title+".zip"
self.filename = filedata
super(zippedObjectProxy, self,).save(generate=False, *args, **kwargs)
示例8: value_from_datadict
def value_from_datadict(self, data, files, name):
# we cache the return value of this function, since it is called a
# bunch of time, and is expensive
if self._cached_value is self._sentinel:
upload = super().value_from_datadict(data, files, name)
if upload != FILE_INPUT_CONTRADICTION:
self.signed_path = data.get(self.signed_path_field_name(name), None)
data_uri = data.get(self.data_uri_field_name(name))
has_file = (upload or data_uri)
# the path to the cached uploaded file
path = None
# if we have a cache key, and no file, fetch the file from the cache
if self.signed_path and not has_file:
try:
path = self.signer.unsign(self.signed_path)
except BadSignature:
# False means the field value should be cleared, which
# is the best thing we can do in this case. It
# shouldn't happen anyways.
self.signed_path = ""
self._cached_value = None
return self._cached_value
elif has_file:
# we have a file, so write it to disk, just in case form validation fails
with NamedTemporaryFile(prefix="".join(CHOICES[x % 64] for x in os.urandom(16)), suffix=".jpg", dir=self.tmp_dir, delete=False) as f:
# write the uploaded file to disk, or the data from the dataURI
try:
if upload:
f.write(upload.read())
else:
f.write(b64decode(data_uri[data_uri.find(",")+1:]))
except Error:
pass
else:
path = f.name
self.signed_path = self.signer.sign(f.name)
if path:
upload = UploadedFile(open(path, "rb"), name=path, size=os.path.getsize(path))
# tack on a URL attribute so the parent Widget thinks it
# has an initial value
upload.url = settings.MEDIA_URL + os.path.relpath(upload.file.name, settings.MEDIA_ROOT)
self._cached_value = upload
return self._cached_value
示例9: upload_save_process
def upload_save_process(request):
"""
save file into local storage
"""
f = request.FILES["file"]
wrapper_f = UploadedFile(f)
name, filetype = split_name(wrapper_f.name)
obj = ProcessedFile()
obj.title = name + str(uuid.uuid4()) + "." + filetype
wrapper_f.name = obj.title
obj.file_obj = f
obj.file_type = filetype if filetype != " " else "unknown"
obj.save()
return wrapper_f
示例10: smart_load_from_upload
def smart_load_from_upload(classname: str, f: UploadedFile) -> BasePriceList:
'''
Attempt to intelligently load the given Django UploadedFile,
interpreting it as a price list for the given schedule class name.
If interpreting it under the preferred schedule results in either
a ValidationError or no valid rows, attempts will be made to
re-interpret the file under different schedules. The first found
that yields better interpretations of the data will be returned.
If no better matches are found, the original result or exception
(from interpreting the data under the preferred price list) will
be returned.
'''
original_error = None
pricelist: Optional[BasePriceList] = None
try:
pricelist = load_from_upload(classname, f)
except ValidationError as e:
original_error = e
if original_error or (pricelist and not pricelist.valid_rows):
# See if any of our other registered schedules can make better sense
# of it.
next_best_pricelist = None
for fallback, _ in CHOICES:
if fallback == classname:
continue
try:
f.seek(0)
next_best_pricelist = load_from_upload(fallback, f)
if next_best_pricelist.valid_rows:
pricelist = next_best_pricelist
break
except ValidationError as e:
pass
if pricelist is None:
default_error = ValidationError('Unrecognized price list!')
raise original_error or default_error
return pricelist
示例11: parse_file
def parse_file(self, file: UploadedFile, _) -> Iterable[Article]:
data = file.read()
try:
for para in self.split_file(data):
yield self.parse_document(para)
except ApaError:
log.error("APA parse attempt failed.")
if settings.DEBUG:
log.error("The generated HTML can be found in /tmp/apa_unrtf.html")
raise
示例12: fileupload
def fileupload(request):
if request.method == 'POST':
cc = request.POST.get('cc');
myexcel = request.FILES['files[]']
excel_obj = UploadedFile(myexcel)
workbook = xlrd.open_workbook(file_contents = excel_obj.read())
all_worksheets = workbook.sheet_names()
worksheet_name = all_worksheets[0]
worksheet = workbook.sheet_by_name(worksheet_name)
for rownum in xrange(worksheet.nrows):
tmp = []
for entry in worksheet.row_values(rownum):
tmp.append(entry)
print tmp
return JsonResponse({'status': 'fileupload_ok'})
示例13: file_complete
def file_complete(self, file_size):
mp_file_size = sum([part.size for part in self.mp])
if file_size > self.max_file_size or mp_file_size > self.max_file_size:
raise FileExceedsSizeLimit
if file_size != mp_file_size:
raise Exception("Uploaded file size doesn't match computed file size.")
self.mp.complete_upload()
# save local metadata
self.row.size = file_size
self.row.save()
self.row.releaseLock()
uploaded_file = UploadedFile()
uploaded_file.download_key = self.row.getKey()
uploaded_file.name = self.row.filename
return uploaded_file
示例14: _decode_files
def _decode_files(self, files):
"""
Helper method that when given *files* -- a ``dict`` with the
structure::
{
"<field_name>": {
"file_storage_key": "<unicode>",
"name": "<unicode>",
"content_type": "<unicode>",
"size": "<int>",
"charset": "<unicode>",
},
...
}
a new ``dict`` it returned with the structure::
{
"<field_name>": <UploadedFile object>,
...
}
"""
if files is None:
return None
decoded = {}
for name, data in files.iteritems():
key = data.pop('file_storage_key')
uploaded_file = UploadedFile(file=self.file_storage.open(key),
**data)
# In order to ensure that files aren't repeatedly saved to the file
# storage, the filename of each file in the file storage is added
# to ``UploadedFile`` objects as a ``_wizard_file_storage_key``
# attribute when they're decoded. This acts as a marker to indicate
# that the file already exists in the file storage.
uploaded_file._wizard_file_storage_key = key
decoded[name] = uploaded_file
return decoded
示例15: save_document
def save_document(request_file, content_subdir, related_obj, ashash = True):
uploadedfile = UploadedFile(request_file)
file_content = uploadedfile.read()
doc_obj = Document()
doc_obj.filehash = md5(file_content).hexdigest()
doc_obj.urlencfilename = quote(uploadedfile.name)
doc_obj.filename = uploadedfile.name
doc_obj.content_type = uploadedfile.file.content_type
if ashash:
doc_obj.filepath = settings.BASE_DIR + content_subdir + doc_obj.filehash
else:
doc_obj.filepath = settings.BASE_DIR + content_subdir + doc_obj.filename
if related_obj.__class__.__name__.lower() == "queryset":
if len(related_obj) == 1:
setattr(doc_obj, related_obj[0].__class__.__name__.lower(), related_obj[0])
else:
print "ERROR: The queryset object had %s elements to it" % str(len(related_obj))
else:
setattr(doc_obj, related_obj.__class__.__name__.lower(), related_obj)
doc_obj.save()
wfile = open(doc_obj.filepath, "w")
wfile.write(file_content)
wfile.close()