本文整理汇总了Python中io.BufferedWriter类的典型用法代码示例。如果您正苦于以下问题:Python BufferedWriter类的具体用法?Python BufferedWriter怎么用?Python BufferedWriter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了BufferedWriter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: handle
def handle(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.parse_request(): # An error code has been sent, just exit
return
# Avoid passing the raw file object wfile, which can do partial
# writes (Issue 24291)
stdout = BufferedWriter(self.wfile)
try:
handler = ServerHandler(
self.rfile, stdout, self.get_stderr(), self.get_environ()
)
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
finally:
stdout.detach()
示例2: AbstractUploadBackend
class AbstractUploadBackend(object):
BUFFER_SIZE = 10485760 # 10MB
def __init__(self, **kwargs):
self._timedir = get_date_directory()
self.__dict__.update(kwargs)
def update_filename(self, request, filename):
"""Returns a new name for the file being uploaded."""
self.oldname = filename
ext = os.path.splitext(filename)[1]
return md5(filename.encode('utf8')).hexdigest() + ext
def upload_chunk(self, chunk):
"""Called when a string was read from the client, responsible for
writing that string to the destination file."""
self._dest.write(chunk)
def max_size(self):
"""
Checking file max size
"""
if int(self._dest.tell()) > self.upload_size:
self._dest.close()
os.remove(self._path)
return True
def upload(self, uploaded, filename, raw_data):
try:
if raw_data:
# File was uploaded via ajax, and is streaming in.
chunk = uploaded.read(self.BUFFER_SIZE)
while len(chunk) > 0:
self.upload_chunk(chunk)
if self.max_size():
return False
chunk = uploaded.read(self.BUFFER_SIZE)
else:
# File was uploaded via a POST, and is here.
for chunk in uploaded.chunks():
self.upload_chunk(chunk)
if self.max_size():
return False
return True
except:
# things went badly.
return False
def setup(self, filename):
self._path = os.path.join(settings.MEDIA_ROOT, self.upload_dir, self._timedir, filename)
try:
os.makedirs(os.path.realpath(os.path.dirname(self._path)))
except:
pass
self._dest = BufferedWriter(FileIO(self._path, "w"))
def upload_complete(self, request, filename):
path = self.upload_dir + "/" + self._timedir + "/" + filename
self._dest.close()
return {"path": path, 'oldname': self.oldname}
示例3: PANDAUploadBackend
class PANDAUploadBackend(AbstractUploadBackend):
"""
Customized backend to handle AJAX uploads.
"""
def update_filename(self, request, filename):
"""
Verify that the filename is unique, if it isn't append and iterate
a counter until it is.
"""
self._original_filename = filename
filename = self._original_filename
root, ext = os.path.splitext(self._original_filename)
path = os.path.join(settings.MEDIA_ROOT, filename)
i = 1
while os.path.exists(path):
filename = '%s%i%s' % (root, i, ext)
path = os.path.join(settings.MEDIA_ROOT, filename)
i += 1
return filename
def setup(self, filename):
"""
Open the destination file for writing.
"""
self._path = os.path.join(settings.MEDIA_ROOT, filename)
try:
os.makedirs(os.path.realpath(os.path.dirname(self._path)))
except:
pass
self._dest = BufferedWriter(FileIO(self._path, "w"))
def upload_chunk(self, chunk):
"""
Write a chunk of data to the destination.
"""
self._dest.write(chunk)
def upload_complete(self, request, filename):
"""
Close the destination file and create an Upload object in the
database recording its existence.
"""
self._dest.close()
root, ext = os.path.splitext(filename)
path = os.path.join(settings.MEDIA_ROOT, filename)
size = os.path.getsize(path)
upload = Upload.objects.create(
filename=filename,
original_filename=self._original_filename,
size=size)
return { 'id': upload.id }
示例4: consecutive_download
def consecutive_download(node_id: str, file: io.BufferedWriter, **kwargs):
"""Keyword args: write_callback"""
r = BackOffRequest.get(get_content_url() + 'nodes/' + node_id + '/content', stream=True)
if r.status_code not in OK_CODES:
raise RequestError(r.status_code, r.text)
write_callback = kwargs.get('write_callback', None)
total_ln = int(r.headers.get('content-length'))
length = kwargs.get('length', None)
if length and total_ln != length:
logging.info('Length mismatch: argument %d, content %d' % (length, total_ln))
pgo = progress.Progress()
curr_ln = 0
try:
for chunk in r.iter_content(chunk_size=FS_RW_CHUNK_SZ):
if chunk: # filter out keep-alive new chunks
file.write(chunk)
file.flush()
if write_callback:
write_callback(chunk)
curr_ln += len(chunk)
pgo.print_progress(total_ln, curr_ln)
except (ConnectionError, ReadTimeoutError) as e:
raise RequestError(RequestError.CODE.READ_TIMEOUT, '[acd_cli] Timeout. ' + e.__str__())
print() # break progress line
r.close()
return
示例5: handle
def handle(self):
"""
Handle a single HTTP request. Shamelessly copied from Python
3.5 wsgiref simple_server. Adjusted the SimpleHandler to set
multithread=False.
"""
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.parse_request(): # an error code has been sent, exit
return
# Avoid passing the raw file object wfile, which can do partial
# writes (Issue 24291)
stdout = BufferedWriter(self.wfile)
try:
handler = MySimpleHandler(
self.rfile, stdout, self.get_stderr(), self.get_environ(),
multithread=False, multiprocess=False)
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
finally:
stdout.detach()
示例6: LocalUploadBackend
class LocalUploadBackend(AbstractUploadBackend):
UPLOAD_DIR = ajaxuploader_settings.UPLOAD_DIRECTORY
# TODO: allow this to be overridden per-widget/view
def setup(self, filename):
self._relative_path = os.path.normpath(
os.path.join(
force_unicode(
datetime.datetime.now().strftime( # allow %Y, %s, etc
smart_str(self.UPLOAD_DIR))),
filename))
self._path = os.path.join(settings.MEDIA_ROOT, self._relative_path)
try:
os.makedirs(os.path.realpath(os.path.dirname(self._path)))
except:
pass
self._dest = BufferedWriter(FileIO(self._path, "w"))
def upload_chunk(self, chunk):
self._dest.write(chunk)
def upload_complete(self, request, filename):
self._dest.close()
return {"path": self._relative_path}
def update_filename(self, request, filename):
return ajaxuploader_settings.SANITIZE_FILENAME(filename)
示例7: LocalUploadBackend
class LocalUploadBackend(AbstractUploadBackend):
UPLOAD_DIR = getattr(settings, "UPLOAD_DIR", "uploads")
def setup(self, filename, *args, **kwargs):
self._path = os.path.join(
settings.MEDIA_ROOT, self.UPLOAD_DIR, filename)
try:
os.makedirs(os.path.realpath(os.path.dirname(self._path)))
except:
pass
self._dest = BufferedWriter(FileIO(self._path, "w"))
def upload_chunk(self, chunk, *args, **kwargs):
self._dest.write(chunk)
def upload_complete(self, request, filename, *args, **kwargs):
path = settings.MEDIA_URL + self.UPLOAD_DIR + "/" + filename
self._dest.close()
return {"path": path}
def update_filename(self, request, filename, *args, **kwargs):
"""
Returns a new name for the file being uploaded.
Ensure file with name doesn't exist, and if it does,
create a unique filename to avoid overwriting
"""
filename = os.path.basename(filename)
self._dir = os.path.join(
settings.MEDIA_ROOT, self.UPLOAD_DIR)
unique_filename = False
filename_suffix = 0
# Check if file at filename exists
if os.path.isfile(os.path.join(self._dir, filename)):
while not unique_filename:
try:
if filename_suffix == 0:
open(os.path.join(self._dir, filename))
else:
filename_no_extension, extension = os.path.splitext(filename)
open(os.path.join(self._dir, filename_no_extension + str(filename_suffix) + extension))
filename_suffix += 1
except IOError:
unique_filename = True
if filename_suffix == 0:
return filename
else:
return filename_no_extension + str(filename_suffix) + extension
@property
def path(self):
"""
Return a path of file uploaded
"""
return self._path
示例8: chunked_download
def chunked_download(self, node_id: str, file: io.BufferedWriter, **kwargs):
""":param kwargs:
offset (int): byte offset -- start byte for ranged request
length (int): total file length[!], equal to end + 1
write_callbacks (list[function])
"""
ok_codes = [http.PARTIAL_CONTENT]
write_callbacks = kwargs.get("write_callbacks", [])
chunk_start = kwargs.get("offset", 0)
length = kwargs.get("length", 100 * 1024 ** 4)
retries = 0
while chunk_start < length:
chunk_end = chunk_start + CHUNK_SIZE - 1
if chunk_end >= length:
chunk_end = length - 1
if retries >= CHUNK_MAX_RETRY:
raise RequestError(
RequestError.CODE.FAILED_SUBREQUEST, "[acd_api] Downloading chunk failed multiple times."
)
r = self.BOReq.get(
self.content_url + "nodes/" + node_id + "/content",
stream=True,
acc_codes=ok_codes,
headers={"Range": "bytes=%d-%d" % (chunk_start, chunk_end)},
)
logger.debug("Range %d-%d" % (chunk_start, chunk_end))
# this should only happen at the end of unknown-length downloads
if r.status_code == http.REQUESTED_RANGE_NOT_SATISFIABLE:
logger.debug("Invalid byte range requested %d-%d" % (chunk_start, chunk_end))
break
if r.status_code not in ok_codes:
r.close()
retries += 1
logging.debug("Chunk [%d-%d], retry %d." % (chunk_start, chunk_end, retries))
continue
curr_ln = 0
try:
for chunk in r.iter_content(chunk_size=FS_RW_CHUNK_SZ):
if chunk: # filter out keep-alive new chunks
file.write(chunk)
file.flush()
for wcb in write_callbacks:
wcb(chunk)
curr_ln += len(chunk)
finally:
r.close()
chunk_start += CHUNK_SIZE
retries = 0
return
示例9: chunked_download
def chunked_download(self, node_id: str, file: io.BufferedWriter, **kwargs):
""":param kwargs:
offset (int): byte offset -- start byte for ranged request
length (int): total file length[!], equal to end + 1
write_callbacks (list[function])
"""
ok_codes = [http.PARTIAL_CONTENT]
write_callbacks = kwargs.get('write_callbacks', [])
chunk_start = kwargs.get('offset', 0)
length = kwargs.get('length', 100 * 1024 ** 4)
dl_chunk_sz = self._conf.getint('transfer', 'dl_chunk_size')
retries = 0
while chunk_start < length:
chunk_end = chunk_start + dl_chunk_sz - 1
if chunk_end >= length:
chunk_end = length - 1
if retries >= self._conf.getint('transfer', 'chunk_retries'):
raise RequestError(RequestError.CODE.FAILED_SUBREQUEST,
'[acd_api] Downloading chunk failed multiple times.')
r = self.BOReq.get(self.content_url + 'nodes/' + node_id + '/content', stream=True,
acc_codes=ok_codes,
headers={'Range': 'bytes=%d-%d' % (chunk_start, chunk_end)})
logger.debug('Node "%s", range %d-%d' % (node_id, chunk_start, chunk_end))
# this should only happen at the end of unknown-length downloads
if r.status_code == http.REQUESTED_RANGE_NOT_SATISFIABLE:
r.close()
logger.debug('Invalid byte range requested %d-%d' % (chunk_start, chunk_end))
break
if r.status_code not in ok_codes:
r.close()
retries += 1
logging.debug('Chunk [%d-%d], retry %d.' % (chunk_start, chunk_end, retries))
continue
curr_ln = 0
try:
for chunk in r.iter_content(chunk_size=self._conf.getint('transfer', 'fs_chunk_size')):
if chunk: # filter out keep-alive new chunks
file.write(chunk)
file.flush()
for wcb in write_callbacks:
wcb(chunk)
curr_ln += len(chunk)
finally:
r.close()
chunk_start = file.tell()
retries = 0
return
示例10: LocalUploadBackend
class LocalUploadBackend(AbstractUploadBackend):
UPLOAD_DIR = "uploads"
def setup(self, filename):
self._path = os.path.join(
settings.MEDIA_ROOT, self.UPLOAD_DIR, filename)
try:
os.makedirs(os.path.realpath(os.path.dirname(self._path)))
except:
pass
self._dest = BufferedWriter(FileIO(self._path, "w"))
def upload_chunk(self, chunk):
self._dest.write(chunk)
def upload_complete(self, request, filename):
path = settings.MEDIA_URL + self.UPLOAD_DIR + "/" + filename
self._dest.close()
return {"path": path}
def update_filename(self, request, filename):
"""
Returns a new name for the file being uploaded.
Ensure file with name doesn't exist, and if it does,
create a unique filename to avoid overwriting
"""
self._dir = os.path.join(
settings.MEDIA_ROOT, self.UPLOAD_DIR)
unique_filename = False
filename_suffix = 0
print "orig filename: " + os.path.join(self._dir, filename)
# Check if file at filename exists
if os.path.isfile(os.path.join(self._dir, filename)):
while not unique_filename:
try:
if filename_suffix == 0:
open(os.path.join(self._dir, filename))
else:
filename_no_extension, extension = os.path.splitext(filename)
print "filename all ready exists. Trying " + filename_no_extension + str(filename_suffix) + extension
open(os.path.join(self._dir, filename_no_extension + str(filename_suffix) + extension))
filename_suffix += 1
except IOError:
unique_filename = True
if filename_suffix == 0:
print "using filename: " + os.path.join(self._dir, filename)
return filename
else:
print "using filename: " + filename_no_extension + str(filename_suffix) + extension
return filename_no_extension + str(filename_suffix) + extension
示例11: chunked_download
def chunked_download(node_id: str, file: io.BufferedWriter, **kwargs):
"""Keyword args:
offset (int): byte offset -- start byte for ranged request
length (int): total file length[!], equal to end + 1
write_callbacks: (list[function])
"""
ok_codes = [http.PARTIAL_CONTENT]
write_callbacks = kwargs.get('write_callbacks', [])
chunk_start = kwargs.get('offset', 0)
length = kwargs.get('length', 100 * 1024 ** 4)
retries = 0
while chunk_start < length:
chunk_end = chunk_start + CHUNK_SIZE - 1
if chunk_end >= length:
chunk_end = length - 1
if retries >= CHUNK_MAX_RETRY:
raise RequestError(RequestError.CODE.FAILED_SUBREQUEST,
'[acd_cli] Downloading chunk failed multiple times.')
r = BackOffRequest.get(get_content_url() + 'nodes/' + node_id + '/content', stream=True,
acc_codes=ok_codes,
headers={'Range': 'bytes=%d-%d' % (chunk_start, chunk_end)})
logger.debug('Range %d-%d' % (chunk_start, chunk_end))
# this should only happen at the end of unknown-length downloads
if r.status_code == http.REQUESTED_RANGE_NOT_SATISFIABLE:
logger.debug('Invalid byte range requested %d-%d' % (chunk_start, chunk_end))
break
if r.status_code not in ok_codes:
r.close()
retries += 1
logging.debug('Chunk [%d-%d], retry %d.' % (chunk_start, chunk_end, retries))
continue
curr_ln = 0
# connection exceptions occur here
for chunk in r.iter_content(chunk_size=FS_RW_CHUNK_SZ):
if chunk: # filter out keep-alive new chunks
file.write(chunk)
file.flush()
for wcb in write_callbacks:
wcb(chunk)
curr_ln += len(chunk)
chunk_start += CHUNK_SIZE
retries = 0
r.close()
return
示例12: setup
def setup(self, filename):
self._path = os.path.join(settings.MEDIA_ROOT, self.upload_dir, self._timedir, filename)
try:
os.makedirs(os.path.realpath(os.path.dirname(self._path)))
except:
pass
self._dest = BufferedWriter(FileIO(self._path, "w"))
示例13: setup
def setup(self, filename, *args, **kwargs):
self._path = self.get_path(filename, *args, **kwargs)
try:
os.makedirs(os.path.realpath(os.path.dirname(self._path)))
except OSError:
pass
self._dest = BufferedWriter(FileIO(self._path, "w"))
示例14: setup
def setup(self, filename, *args, **kwargs):
self._path = os.path.join(
settings.MEDIA_ROOT, self.UPLOAD_DIR, filename)
try:
os.makedirs(os.path.realpath(os.path.dirname(self._path)))
except:
pass
self._dest = BufferedWriter(FileIO(self._path, "w"))
示例15: setup
def setup(self, filename):
ext = os.path.splitext(filename)[1]
self._filename = md5(filename.encode('utf8')).hexdigest() + ext
self._path = os.path.join(self._upload_dir, self._filename)
try:
os.makedirs(os.path.realpath(os.path.dirname(self._path)))
except:
pass
self._destination = BufferedWriter(FileIO(self._path, "w"))