本文整理汇总了Python中io.BufferedWriter.write方法的典型用法代码示例。如果您正苦于以下问题:Python BufferedWriter.write方法的具体用法?Python BufferedWriter.write怎么用?Python BufferedWriter.write使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类io.BufferedWriter
的用法示例。
在下文中一共展示了BufferedWriter.write方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: consecutive_download
# 需要导入模块: from io import BufferedWriter [as 别名]
# 或者: from io.BufferedWriter import write [as 别名]
def consecutive_download(node_id: str, file: io.BufferedWriter, **kwargs):
"""Keyword args: write_callback"""
r = BackOffRequest.get(get_content_url() + 'nodes/' + node_id + '/content', stream=True)
if r.status_code not in OK_CODES:
raise RequestError(r.status_code, r.text)
write_callback = kwargs.get('write_callback', None)
total_ln = int(r.headers.get('content-length'))
length = kwargs.get('length', None)
if length and total_ln != length:
logging.info('Length mismatch: argument %d, content %d' % (length, total_ln))
pgo = progress.Progress()
curr_ln = 0
try:
for chunk in r.iter_content(chunk_size=FS_RW_CHUNK_SZ):
if chunk: # filter out keep-alive new chunks
file.write(chunk)
file.flush()
if write_callback:
write_callback(chunk)
curr_ln += len(chunk)
pgo.print_progress(total_ln, curr_ln)
except (ConnectionError, ReadTimeoutError) as e:
raise RequestError(RequestError.CODE.READ_TIMEOUT, '[acd_cli] Timeout. ' + e.__str__())
print() # break progress line
r.close()
return
示例2: PANDAUploadBackend
# 需要导入模块: from io import BufferedWriter [as 别名]
# 或者: from io.BufferedWriter import write [as 别名]
class PANDAUploadBackend(AbstractUploadBackend):
"""
Customized backend to handle AJAX uploads.
"""
def update_filename(self, request, filename):
"""
Verify that the filename is unique, if it isn't append and iterate
a counter until it is.
"""
self._original_filename = filename
filename = self._original_filename
root, ext = os.path.splitext(self._original_filename)
path = os.path.join(settings.MEDIA_ROOT, filename)
i = 1
while os.path.exists(path):
filename = '%s%i%s' % (root, i, ext)
path = os.path.join(settings.MEDIA_ROOT, filename)
i += 1
return filename
def setup(self, filename):
"""
Open the destination file for writing.
"""
self._path = os.path.join(settings.MEDIA_ROOT, filename)
try:
os.makedirs(os.path.realpath(os.path.dirname(self._path)))
except:
pass
self._dest = BufferedWriter(FileIO(self._path, "w"))
def upload_chunk(self, chunk):
"""
Write a chunk of data to the destination.
"""
self._dest.write(chunk)
def upload_complete(self, request, filename):
"""
Close the destination file and create an Upload object in the
database recording its existence.
"""
self._dest.close()
root, ext = os.path.splitext(filename)
path = os.path.join(settings.MEDIA_ROOT, filename)
size = os.path.getsize(path)
upload = Upload.objects.create(
filename=filename,
original_filename=self._original_filename,
size=size)
return { 'id': upload.id }
示例3: LocalUploadBackend
# 需要导入模块: from io import BufferedWriter [as 别名]
# 或者: from io.BufferedWriter import write [as 别名]
class LocalUploadBackend(AbstractUploadBackend):
UPLOAD_DIR = ajaxuploader_settings.UPLOAD_DIRECTORY
# TODO: allow this to be overridden per-widget/view
def setup(self, filename):
self._relative_path = os.path.normpath(
os.path.join(
force_unicode(
datetime.datetime.now().strftime( # allow %Y, %s, etc
smart_str(self.UPLOAD_DIR))),
filename))
self._path = os.path.join(settings.MEDIA_ROOT, self._relative_path)
try:
os.makedirs(os.path.realpath(os.path.dirname(self._path)))
except:
pass
self._dest = BufferedWriter(FileIO(self._path, "w"))
def upload_chunk(self, chunk):
self._dest.write(chunk)
def upload_complete(self, request, filename):
self._dest.close()
return {"path": self._relative_path}
def update_filename(self, request, filename):
return ajaxuploader_settings.SANITIZE_FILENAME(filename)
示例4: chunked_download
# 需要导入模块: from io import BufferedWriter [as 别名]
# 或者: from io.BufferedWriter import write [as 别名]
def chunked_download(self, node_id: str, file: io.BufferedWriter, **kwargs):
""":param kwargs:
offset (int): byte offset -- start byte for ranged request
length (int): total file length[!], equal to end + 1
write_callbacks (list[function])
"""
ok_codes = [http.PARTIAL_CONTENT]
write_callbacks = kwargs.get("write_callbacks", [])
chunk_start = kwargs.get("offset", 0)
length = kwargs.get("length", 100 * 1024 ** 4)
retries = 0
while chunk_start < length:
chunk_end = chunk_start + CHUNK_SIZE - 1
if chunk_end >= length:
chunk_end = length - 1
if retries >= CHUNK_MAX_RETRY:
raise RequestError(
RequestError.CODE.FAILED_SUBREQUEST, "[acd_api] Downloading chunk failed multiple times."
)
r = self.BOReq.get(
self.content_url + "nodes/" + node_id + "/content",
stream=True,
acc_codes=ok_codes,
headers={"Range": "bytes=%d-%d" % (chunk_start, chunk_end)},
)
logger.debug("Range %d-%d" % (chunk_start, chunk_end))
# this should only happen at the end of unknown-length downloads
if r.status_code == http.REQUESTED_RANGE_NOT_SATISFIABLE:
logger.debug("Invalid byte range requested %d-%d" % (chunk_start, chunk_end))
break
if r.status_code not in ok_codes:
r.close()
retries += 1
logging.debug("Chunk [%d-%d], retry %d." % (chunk_start, chunk_end, retries))
continue
curr_ln = 0
try:
for chunk in r.iter_content(chunk_size=FS_RW_CHUNK_SZ):
if chunk: # filter out keep-alive new chunks
file.write(chunk)
file.flush()
for wcb in write_callbacks:
wcb(chunk)
curr_ln += len(chunk)
finally:
r.close()
chunk_start += CHUNK_SIZE
retries = 0
return
示例5: LocalUploadBackend
# 需要导入模块: from io import BufferedWriter [as 别名]
# 或者: from io.BufferedWriter import write [as 别名]
class LocalUploadBackend(AbstractUploadBackend):
UPLOAD_DIR = getattr(settings, "UPLOAD_DIR", "uploads")
def setup(self, filename, *args, **kwargs):
self._path = os.path.join(
settings.MEDIA_ROOT, self.UPLOAD_DIR, filename)
try:
os.makedirs(os.path.realpath(os.path.dirname(self._path)))
except:
pass
self._dest = BufferedWriter(FileIO(self._path, "w"))
def upload_chunk(self, chunk, *args, **kwargs):
self._dest.write(chunk)
def upload_complete(self, request, filename, *args, **kwargs):
path = settings.MEDIA_URL + self.UPLOAD_DIR + "/" + filename
self._dest.close()
return {"path": path}
def update_filename(self, request, filename, *args, **kwargs):
"""
Returns a new name for the file being uploaded.
Ensure file with name doesn't exist, and if it does,
create a unique filename to avoid overwriting
"""
filename = os.path.basename(filename)
self._dir = os.path.join(
settings.MEDIA_ROOT, self.UPLOAD_DIR)
unique_filename = False
filename_suffix = 0
# Check if file at filename exists
if os.path.isfile(os.path.join(self._dir, filename)):
while not unique_filename:
try:
if filename_suffix == 0:
open(os.path.join(self._dir, filename))
else:
filename_no_extension, extension = os.path.splitext(filename)
open(os.path.join(self._dir, filename_no_extension + str(filename_suffix) + extension))
filename_suffix += 1
except IOError:
unique_filename = True
if filename_suffix == 0:
return filename
else:
return filename_no_extension + str(filename_suffix) + extension
@property
def path(self):
"""
Return a path of file uploaded
"""
return self._path
示例6: chunked_download
# 需要导入模块: from io import BufferedWriter [as 别名]
# 或者: from io.BufferedWriter import write [as 别名]
def chunked_download(node_id: str, file: io.BufferedWriter, **kwargs):
"""Keyword args:
offset: byte offset
length: total length, equal to end - 1
write_callback
"""
ok_codes = [http.PARTIAL_CONTENT]
write_callback = kwargs.get('write_callback', None)
length = kwargs.get('length', 100 * 1024 ** 4)
pgo = progress.Progress()
chunk_start = kwargs.get('offset', 0)
retries = 0
while chunk_start < length:
chunk_end = chunk_start + CHUNK_SIZE - 1
if chunk_end >= length:
chunk_end = length - 1
if retries >= CHUNK_MAX_RETRY:
raise RequestError(RequestError.CODE.FAILED_SUBREQUEST,
'[acd_cli] Downloading chunk failed multiple times.')
r = BackOffRequest.get(get_content_url() + 'nodes/' + node_id + '/content', stream=True,
acc_codes=ok_codes,
headers={'Range': 'bytes=%d-%d' % (chunk_start, chunk_end)})
logger.debug('Range %d-%d' % (chunk_start, chunk_end))
# this should only happen at the end of unknown-length downloads
if r.status_code == http.REQUESTED_RANGE_NOT_SATISFIABLE:
logger.debug('Invalid byte range requested %d-%d' % (chunk_start, chunk_end))
break
if r.status_code not in ok_codes:
r.close()
retries += 1
logging.debug('Chunk [%d-%d], retry %d.' % (retries, chunk_start, chunk_end))
continue
try:
curr_ln = 0
for chunk in r.iter_content(chunk_size=FS_RW_CHUNK_SZ):
if chunk: # filter out keep-alive new chunks
file.write(chunk)
file.flush()
if write_callback:
write_callback(chunk)
curr_ln += len(chunk)
pgo.print_progress(length, curr_ln + chunk_start)
chunk_start += CHUNK_SIZE
retries = 0
r.close()
except (ConnectionError, ReadTimeoutError) as e:
file.close()
raise RequestError(RequestError.CODE.READ_TIMEOUT, '[acd_cli] Timeout. ' + e.__str__())
print() # break progress line
return
示例7: chunked_download
# 需要导入模块: from io import BufferedWriter [as 别名]
# 或者: from io.BufferedWriter import write [as 别名]
def chunked_download(self, node_id: str, file: io.BufferedWriter, **kwargs):
""":param kwargs:
offset (int): byte offset -- start byte for ranged request
length (int): total file length[!], equal to end + 1
write_callbacks (list[function])
"""
ok_codes = [http.PARTIAL_CONTENT]
write_callbacks = kwargs.get('write_callbacks', [])
chunk_start = kwargs.get('offset', 0)
length = kwargs.get('length', 100 * 1024 ** 4)
dl_chunk_sz = self._conf.getint('transfer', 'dl_chunk_size')
retries = 0
while chunk_start < length:
chunk_end = chunk_start + dl_chunk_sz - 1
if chunk_end >= length:
chunk_end = length - 1
if retries >= self._conf.getint('transfer', 'chunk_retries'):
raise RequestError(RequestError.CODE.FAILED_SUBREQUEST,
'[acd_api] Downloading chunk failed multiple times.')
r = self.BOReq.get(self.content_url + 'nodes/' + node_id + '/content', stream=True,
acc_codes=ok_codes,
headers={'Range': 'bytes=%d-%d' % (chunk_start, chunk_end)})
logger.debug('Node "%s", range %d-%d' % (node_id, chunk_start, chunk_end))
# this should only happen at the end of unknown-length downloads
if r.status_code == http.REQUESTED_RANGE_NOT_SATISFIABLE:
r.close()
logger.debug('Invalid byte range requested %d-%d' % (chunk_start, chunk_end))
break
if r.status_code not in ok_codes:
r.close()
retries += 1
logging.debug('Chunk [%d-%d], retry %d.' % (chunk_start, chunk_end, retries))
continue
curr_ln = 0
try:
for chunk in r.iter_content(chunk_size=self._conf.getint('transfer', 'fs_chunk_size')):
if chunk: # filter out keep-alive new chunks
file.write(chunk)
file.flush()
for wcb in write_callbacks:
wcb(chunk)
curr_ln += len(chunk)
finally:
r.close()
chunk_start = file.tell()
retries = 0
return
示例8: LocalUploadBackend
# 需要导入模块: from io import BufferedWriter [as 别名]
# 或者: from io.BufferedWriter import write [as 别名]
class LocalUploadBackend(AbstractUploadBackend):
UPLOAD_DIR = "uploads"
def setup(self, filename):
self._path = os.path.join(
settings.MEDIA_ROOT, self.UPLOAD_DIR, filename)
try:
os.makedirs(os.path.realpath(os.path.dirname(self._path)))
except:
pass
self._dest = BufferedWriter(FileIO(self._path, "w"))
def upload_chunk(self, chunk):
self._dest.write(chunk)
def upload_complete(self, request, filename):
path = settings.MEDIA_URL + self.UPLOAD_DIR + "/" + filename
self._dest.close()
return {"path": path}
def update_filename(self, request, filename):
"""
Returns a new name for the file being uploaded.
Ensure file with name doesn't exist, and if it does,
create a unique filename to avoid overwriting
"""
self._dir = os.path.join(
settings.MEDIA_ROOT, self.UPLOAD_DIR)
unique_filename = False
filename_suffix = 0
print "orig filename: " + os.path.join(self._dir, filename)
# Check if file at filename exists
if os.path.isfile(os.path.join(self._dir, filename)):
while not unique_filename:
try:
if filename_suffix == 0:
open(os.path.join(self._dir, filename))
else:
filename_no_extension, extension = os.path.splitext(filename)
print "filename all ready exists. Trying " + filename_no_extension + str(filename_suffix) + extension
open(os.path.join(self._dir, filename_no_extension + str(filename_suffix) + extension))
filename_suffix += 1
except IOError:
unique_filename = True
if filename_suffix == 0:
print "using filename: " + os.path.join(self._dir, filename)
return filename
else:
print "using filename: " + filename_no_extension + str(filename_suffix) + extension
return filename_no_extension + str(filename_suffix) + extension
示例9: chunked_download
# 需要导入模块: from io import BufferedWriter [as 别名]
# 或者: from io.BufferedWriter import write [as 别名]
def chunked_download(node_id: str, file: io.BufferedWriter, **kwargs):
"""Keyword args:
offset (int): byte offset -- start byte for ranged request
length (int): total file length[!], equal to end + 1
write_callbacks: (list[function])
"""
ok_codes = [http.PARTIAL_CONTENT]
write_callbacks = kwargs.get('write_callbacks', [])
chunk_start = kwargs.get('offset', 0)
length = kwargs.get('length', 100 * 1024 ** 4)
retries = 0
while chunk_start < length:
chunk_end = chunk_start + CHUNK_SIZE - 1
if chunk_end >= length:
chunk_end = length - 1
if retries >= CHUNK_MAX_RETRY:
raise RequestError(RequestError.CODE.FAILED_SUBREQUEST,
'[acd_cli] Downloading chunk failed multiple times.')
r = BackOffRequest.get(get_content_url() + 'nodes/' + node_id + '/content', stream=True,
acc_codes=ok_codes,
headers={'Range': 'bytes=%d-%d' % (chunk_start, chunk_end)})
logger.debug('Range %d-%d' % (chunk_start, chunk_end))
# this should only happen at the end of unknown-length downloads
if r.status_code == http.REQUESTED_RANGE_NOT_SATISFIABLE:
logger.debug('Invalid byte range requested %d-%d' % (chunk_start, chunk_end))
break
if r.status_code not in ok_codes:
r.close()
retries += 1
logging.debug('Chunk [%d-%d], retry %d.' % (chunk_start, chunk_end, retries))
continue
curr_ln = 0
# connection exceptions occur here
for chunk in r.iter_content(chunk_size=FS_RW_CHUNK_SZ):
if chunk: # filter out keep-alive new chunks
file.write(chunk)
file.flush()
for wcb in write_callbacks:
wcb(chunk)
curr_ln += len(chunk)
chunk_start += CHUNK_SIZE
retries = 0
r.close()
return
示例10: LocalUploadBackend
# 需要导入模块: from io import BufferedWriter [as 别名]
# 或者: from io.BufferedWriter import write [as 别名]
class LocalUploadBackend(AbstractUploadBackend):
UPLOAD_DIR = "uploads"
def setup(self, filename):
self._path = os.path.join(
settings.MEDIA_ROOT, self.UPLOAD_DIR, filename)
try:
os.makedirs(os.path.realpath(os.path.dirname(self._path)))
except:
pass
self._dest = BufferedWriter(FileIO(self._path, "wb"))
def upload_chunk(self, chunk):
self._dest.write(chunk)
def upload_complete(self, request, filename):
path = settings.MEDIA_URL + self.UPLOAD_DIR + "/" + filename
return {"path": path}
示例11: LocalUploadBackend
# 需要导入模块: from io import BufferedWriter [as 别名]
# 或者: from io.BufferedWriter import write [as 别名]
class LocalUploadBackend(AbstractUploadBackend):
UPLOAD_DIR = 'tmp'
def update_filename(self, request, filename):
name, ext = os.path.splitext(filename)
return slughifi(name) + ext
def setup(self, filename):
self._path = os.path.join(self.UPLOAD_DIR, filename)
self.path = default_storage.save(self._path, ContentFile(''))
self._abs_path = default_storage.path(self.path)
self._dest = BufferedWriter(FileIO(self._abs_path, "w"))
def upload_chunk(self, chunk):
self._dest.write(chunk)
def upload_complete(self, request, filename):
self._dest.close()
context = {'thumbnail_path': self._path, 'file_name': filename, }
thumbnail = render_to_string('ajaxupload/includes/thumbnail.html', context)
return {"path": self._path, 'thumbnail': thumbnail}
示例12: LocalUploadBackend
# 需要导入模块: from io import BufferedWriter [as 别名]
# 或者: from io.BufferedWriter import write [as 别名]
class LocalUploadBackend(AbstractUploadBackend):
UPLOAD_DIR = "uploads"
def setup(self, filename, *args, **kwargs):
self._path = os.path.join(
settings.MEDIA_ROOT, self.UPLOAD_DIR, filename)
try:
os.makedirs(os.path.realpath(os.path.dirname(self._path)))
except:
pass
self._dest = BufferedWriter(FileIO(self._path, "w"))
def upload_chunk(self, chunk, *args, **kwargs):
self._dest.write(chunk)
def upload_complete(self, request, filename, *args, **kwargs):
path = settings.MEDIA_URL + self.UPLOAD_DIR + "/" + filename
self._dest.close()
return {"path": path}
def update_filename(self, request, filename, *args, **kwargs):
"""
Returns a new name for the file being uploaded.
Ensure file with name doesn't exist, and if it does,
create a unique filename to avoid overwriting
"""
self._dir = os.path.join(
settings.MEDIA_ROOT, self.UPLOAD_DIR)
unique_filename = False
filename_suffix = 0
# Check if file at filename exists
if os.path.isfile(os.path.join(self._dir, filename)):
while not unique_filename:
try:
if filename_suffix == 0:
open(os.path.join(self._dir, filename))
else:
filename_no_extension, extension = os.path.splitext(filename)
open(os.path.join(self._dir, filename_no_extension + str(filename_suffix) + extension))
filename_suffix += 1
except IOError:
unique_filename = True
if filename_suffix == 0:
return filename
else:
return filename_no_extension + str(filename_suffix) + extension
def resize_for_display(self, filename, width, height):
upload_dir_path = os.path.join(settings.MEDIA_ROOT, self.UPLOAD_DIR) + "/"
original_path = upload_dir_path + filename
filename_no_extension, extension = os.path.splitext(filename)
need_ratio = float(width) / float(height)
im = Image.open(original_path)
real_width, real_height = [float(x) for x in im.size]
real_ratio = real_width / real_height
if real_width > width or real_height > height:
if real_ratio > need_ratio:
displayed_width = width
displayed_height = int(width / real_ratio)
else:
displayed_height = height
displayed_width = int(height * real_ratio)
resized_im = im.resize((displayed_width, displayed_height))
displayed_filename = '%s_displayed%s' % (filename_no_extension, extension)
resized_im.save(upload_dir_path + displayed_filename)
displayed_path = settings.MEDIA_URL + self.UPLOAD_DIR + "/" + displayed_filename
else:
displayed_path = settings.MEDIA_URL + self.UPLOAD_DIR + "/" + filename
return {'displayed_path': displayed_path, 'true_size': im.size}
示例13: write
# 需要导入模块: from io import BufferedWriter [as 别名]
# 或者: from io.BufferedWriter import write [as 别名]
def write(data, wfile: io.BufferedWriter) -> None:
wfile.write(json.dumps(data).encode() + b"\n")
wfile.flush()
示例14: write
# 需要导入模块: from io import BufferedWriter [as 别名]
# 或者: from io.BufferedWriter import write [as 别名]
def write(self, strng):
BufferedWriter.write(self,strng.encode('utf-8'))
示例15: convert_with_google_drive
# 需要导入模块: from io import BufferedWriter [as 别名]
# 或者: from io.BufferedWriter import write [as 别名]
def convert_with_google_drive(note):
""" Upload a local note and download HTML
using Google Drive
:note: a File model instance # FIXME
"""
# TODO: set the permission of the file to permissive so we can use the
# gdrive_url to serve files directly to users
# Get file_type and encoding of uploaded file
# i.e: file_type = 'text/plain', encoding = None
(file_type, encoding) = mimetypes.guess_type(note.note_file.path)
if file_type != None:
media = MediaFileUpload(note.note_file.path, mimetype=file_type,
chunksize=1024*1024, resumable=True)
else:
media = MediaFileUpload(note.note_file.path,
chunksize=1024*1024, resumable=True)
auth = DriveAuth.objects.filter(email=GOOGLE_USER).all()[0]
creds = auth.transform_to_cred()
creds, auth = check_and_refresh(creds, auth)
service, http = build_api_service(creds)
# get the file extension
filename, extension = os.path.splitext(note.note_file.path)
file_dict = upload_to_gdrive(service, media, filename, extension)
content_dict = download_from_gdrive(file_dict, http, extension)
# Get a new copy of the file from the database with the new metadata from filemeta
new_note = Note.objects.get(id=note.id)
if extension.lower() == '.pdf':
new_note.file_type = 'pdf'
elif extension.lower() in ['.ppt', '.pptx']:
new_note.file_type = 'ppt'
now = datetime.datetime.utcnow()
# create a folder path to store the ppt > pdf file with year and month folders
nonce_path = '/ppt_pdf/%s/%s/' % (now.year, now.month)
_path = filename + '.pdf'
try:
# If those folders don't exist, create them
os.makedirs(os.path.realpath(os.path.dirname(_path)))
except:
print "we failed to create those directories"
_writer = BufferedWriter(FileIO(_path, "w"))
_writer.write(content_dict['pdf'])
_writer.close()
new_note.pdf_file = _path
else:
# PPT files do not have this export ability
new_note.gdrive_url = file_dict[u'exportLinks']['application/vnd.oasis.opendocument.text']
new_note.html = content_dict['html']
new_note.text = content_dict['text']
# before we save new html, sanitize a tags in note.html
#new_note.sanitize_html(save=False)
#FIXME: ^^^ disabled until we can get html out of an Etree html element
# Finally, save whatever data we got back from google
new_note.save()