本文整理汇总了Python中tempfile.SpooledTemporaryFile.tell方法的典型用法代码示例。如果您正苦于以下问题:Python SpooledTemporaryFile.tell方法的具体用法?Python SpooledTemporaryFile.tell怎么用?Python SpooledTemporaryFile.tell使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tempfile.SpooledTemporaryFile
的用法示例。
在下文中一共展示了SpooledTemporaryFile.tell方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _do_execute_direct
# 需要导入模块: from tempfile import SpooledTemporaryFile [as 别名]
# 或者: from tempfile.SpooledTemporaryFile import tell [as 别名]
def _do_execute_direct(self, code):
shell = builtins.__xonsh_shell__
env = builtins.__xonsh_env__
out = io.StringIO()
err = io.StringIO()
enc = env.get('XONSH_ENCODING')
out = SpooledTemporaryFile(max_size=MAX_SIZE, mode='w+t',
encoding=enc, newline='\n')
err = SpooledTemporaryFile(max_size=MAX_SIZE, mode='w+t',
encoding=enc, newline='\n')
try:
with redirect_stdout(out), redirect_stderr(err), \
swap(builtins, '__xonsh_stdout_uncaptured__', out), \
swap(builtins, '__xonsh_stderr_uncaptured__', err), \
env.swap({'XONSH_STORE_STDOUT': False}):
shell.default(code)
interrupted = False
except KeyboardInterrupt:
interrupted = True
output, error = '', ''
if out.tell() > 0:
out.seek(0)
output = out.read()
if err.tell() > 0:
err.seek(0)
error = err.read()
out.close()
err.close()
return output, error, interrupted
示例2: do_execute
# 需要导入模块: from tempfile import SpooledTemporaryFile [as 别名]
# 或者: from tempfile.SpooledTemporaryFile import tell [as 别名]
def do_execute(self, code, silent, store_history=True, user_expressions=None,
allow_stdin=False):
"""Execute user code."""
if len(code.strip()) == 0:
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
env = builtins.__xonsh_env__
shell = builtins.__xonsh_shell__
hist = builtins.__xonsh_history__
enc = env.get('XONSH_ENCODING')
out = SpooledTemporaryFile(max_size=MAX_SIZE, mode='w+t',
encoding=enc, newline='\n')
err = SpooledTemporaryFile(max_size=MAX_SIZE, mode='w+t',
encoding=enc, newline='\n')
try:
with redirect_stdout(out), redirect_stderr(err), \
swap(builtins, '__xonsh_stdout_uncaptured__', out), \
swap(builtins, '__xonsh_stderr_uncaptured__', err), \
env.swap({'XONSH_STORE_STDOUT': False}):
shell.default(code)
interrupted = False
except KeyboardInterrupt:
interrupted = True
if not silent: # stdout response
if out.tell() > 0:
out.seek(0)
self._respond_in_chunks('stdout', out.read())
if err.tell() > 0:
err.seek(0)
self._respond_in_chunks('stderr', err.read())
if hasattr(builtins, '_') and builtins._ is not None:
# rely on sys.displayhook functionality
self._respond_in_chunks('stdout', pformat(builtins._))
builtins._ = None
if len(hist) > 0 and out.tell() == 0 and err.tell() == 0:
self._respond_in_chunks('stdout', hist.outs[-1])
out.close()
err.close()
if interrupted:
return {'status': 'abort', 'execution_count': self.execution_count}
rtn = 0 if len(hist) == 0 else hist.rtns[-1]
if 0 < rtn:
message = {'status': 'error', 'execution_count': self.execution_count,
'ename': '', 'evalue': str(rtn), 'traceback': []}
else:
message = {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
return message
示例3: do_execute
# 需要导入模块: from tempfile import SpooledTemporaryFile [as 别名]
# 或者: from tempfile.SpooledTemporaryFile import tell [as 别名]
def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False):
"""Execute user code."""
if len(code.strip()) == 0:
return {"status": "ok", "execution_count": self.execution_count, "payload": [], "user_expressions": {}}
env = builtins.__xonsh_env__
shell = builtins.__xonsh_shell__
hist = builtins.__xonsh_history__
enc = env.get("XONSH_ENCODING")
out = SpooledTemporaryFile(max_size=MAX_SIZE, mode="w+t", encoding=enc, newline="\n")
err = SpooledTemporaryFile(max_size=MAX_SIZE, mode="w+t", encoding=enc, newline="\n")
try:
with redirect_stdout(out), redirect_stderr(err), swap(builtins, "__xonsh_stdout_uncaptured__", out), swap(
builtins, "__xonsh_stderr_uncaptured__", err
), env.swap({"XONSH_STORE_STDOUT": False}):
shell.default(code)
interrupted = False
except KeyboardInterrupt:
interrupted = True
if not silent: # stdout response
if out.tell() > 0:
out.seek(0)
self._respond_in_chunks("stdout", out.read())
if err.tell() > 0:
err.seek(0)
self._respond_in_chunks("stderr", err.read())
if hasattr(builtins, "_") and builtins._ is not None:
# rely on sys.displayhook functionality
self._respond_in_chunks("stdout", pformat(builtins._))
builtins._ = None
if len(hist) > 0 and out.tell() == 0 and err.tell() == 0:
self._respond_in_chunks("stdout", hist.outs[-1])
out.close()
err.close()
if interrupted:
return {"status": "abort", "execution_count": self.execution_count}
rtn = 0 if len(hist) == 0 else hist.rtns[-1]
if 0 < rtn:
message = {
"status": "error",
"execution_count": self.execution_count,
"ename": "",
"evalue": str(rtn),
"traceback": [],
}
else:
message = {"status": "ok", "execution_count": self.execution_count, "payload": [], "user_expressions": {}}
return message
示例4: __init__
# 需要导入模块: from tempfile import SpooledTemporaryFile [as 别名]
# 或者: from tempfile.SpooledTemporaryFile import tell [as 别名]
def __init__(self, data=None, fp=None, length=-1):
assert bool(data is not None) ^ bool(fp)
if length == -1:
if data is not None:
length = len(data)
else:
length = get_size(fp) # can be -1
# We allow writer reuse, but if we're working with a stream, we cannot
# seek. Copy the data to a tempfile.
if fp and not can_seek(fp):
newfp = SpooledTemporaryFile(MAX_INMEMORY_SIZE)
sendfile(newfp, fp)
length = newfp.tell()
newfp.seek(0)
fp = newfp
self.data = data
self.fp = fp
self.fpreads = 0 # keep track of fp usage
self.length = length
assert length >= 0
self.use_tempfile = length > MAX_INMEMORY_SIZE
示例5: upload_file
# 需要导入模块: from tempfile import SpooledTemporaryFile [as 别名]
# 或者: from tempfile.SpooledTemporaryFile import tell [as 别名]
def upload_file(self, user, stream, expected_size, filename, force_coll_name=''):
"""Upload WARC archive.
:param User user: user
:param stream: file object
:param int expected_size: expected WARC archive size
:param str filename: WARC archive filename
:param str force_coll_name: name of collection to upload into
:returns: upload information
:rtype: dict
"""
temp_file = None
logger.debug('Upload Begin')
logger.debug('Expected Size: ' + str(expected_size))
#is_anon = False
size_rem = user.get_size_remaining()
logger.debug('User Size Rem: ' + str(size_rem))
if size_rem < expected_size:
return {'error': 'out_of_space'}
if force_coll_name and not user.has_collection(force_coll_name):
#if is_anon:
# user.create_collection(force_coll, 'Temporary Collection')
#else:
#status = 'Collection {0} not found'.format(force_coll_name)
return {'error': 'no_such_collection'}
temp_file = SpooledTemporaryFile(max_size=BLOCK_SIZE)
stream = CacheingLimitReader(stream, expected_size, temp_file)
if filename.endswith('.har'):
stream, expected_size = self.har2warc(filename, stream)
temp_file.close()
temp_file = stream
infos = self.parse_uploaded(stream, expected_size)
total_size = temp_file.tell()
if total_size != expected_size:
return {'error': 'incomplete_upload', 'expected': expected_size, 'actual': total_size}
upload_id, upload_key = self._init_upload_status(user, total_size, 1, filename=filename)
return self.handle_upload(temp_file, upload_id, upload_key, infos, filename,
user, force_coll_name, total_size)
示例6: GCloudFile
# 需要导入模块: from tempfile import SpooledTemporaryFile [as 别名]
# 或者: from tempfile.SpooledTemporaryFile import tell [as 别名]
class GCloudFile(File):
"""
Django file object that wraps a SpooledTemporaryFile and remembers changes on
write to reupload the file to GCS on close()
"""
def __init__(self, blob, maxsize=1000):
"""
:type blob: google.cloud.storage.blob.Blob
"""
self._dirty = False
self._tmpfile = SpooledTemporaryFile(
max_size=maxsize,
prefix="django_gcloud_storage_"
)
self._blob = blob
super(GCloudFile, self).__init__(self._tmpfile)
def _update_blob(self):
# Specify explicit size to avoid problems with not yet spooled temporary files
# Djangos File.size property already knows how to handle cases like this
if DJANGO_17 and self._tmpfile.name is None: # Django bug #22307
size = self._tmpfile.tell()
else:
size = self.size
self._blob.upload_from_file(self._tmpfile, size=size, rewind=True)
def write(self, content):
self._dirty = True
super(GCloudFile, self).write(content)
def close(self):
if self._dirty:
self._update_blob()
self._dirty = False
super(GCloudFile, self).close()
示例7: fetch_media
# 需要导入模块: from tempfile import SpooledTemporaryFile [as 别名]
# 或者: from tempfile.SpooledTemporaryFile import tell [as 别名]
def fetch_media(self, url, partial_fetch=False):
"""Retrieves a given media object from a remote (HTTP) location
and returns the content-type and a file-like object containing
the media content.
The file-like object is a temporary file that - depending on the
size - lives in memory or on disk. Once the file is closed, the
contents are removed from storage.
:param url: the URL of the media asset.
:type url: str.
:param partial_fetch: determines if the the complete file should
be fetched, or if only the first 2 MB should be retrieved.
This feature is used to prevent complete retrieval of large
a/v material.
:type partial_fetch: bool.
:returns: a tuple with the ``content-type``, ``content-lenght``
and a file-like object containing the media content. The
value of ``content-length`` will be ``None`` in case
a partial fetch is requested and ``content-length`` is not
returned by the remote server.
"""
http_resp = self.http_session.get(url, stream=True, timeout=(60, 120))
http_resp.raise_for_status()
if not os.path.exists(TEMP_DIR_PATH):
log.debug('Creating temp directory %s' % TEMP_DIR_PATH)
os.makedirs(TEMP_DIR_PATH)
# Create a temporary file to store the media item, write the file
# to disk if it is larger than 1 MB.
media_file = SpooledTemporaryFile(max_size=1024*1024, prefix='ocd_m_',
suffix='.tmp',
dir=TEMP_DIR_PATH)
# When a partial fetch is requested, request up to two MB
partial_target_size = 1024*1024*2
content_length = http_resp.headers.get('content-length')
if content_length and int(content_length) < partial_target_size:
partial_target_size = int(content_length)
retrieved_bytes = 0
for chunk in http_resp.iter_content(chunk_size=512*1024):
if chunk: # filter out keep-alive chunks
media_file.write(chunk)
retrieved_bytes += len(chunk)
if partial_fetch and retrieved_bytes >= partial_target_size:
break
media_file.flush()
log.debug('Fetched media item %s [%s/%s]' % (url, retrieved_bytes,
content_length))
# If the server doens't provide a content-length and this isn't
# a partial fetch, determine the size by looking at the retrieved
# content
if not content_length and not partial_fetch:
media_file.seek(0, 2)
content_length = media_file.tell()
return (
http_resp.headers.get('content-type'),
content_length,
media_file
)
示例8: Buffer
# 需要导入模块: from tempfile import SpooledTemporaryFile [as 别名]
# 或者: from tempfile.SpooledTemporaryFile import tell [as 别名]
class Buffer(FileWrapper):
"""Class implementing buffereing of input and output streams.
This class uses a separate buffer file to hold the contents of the
underlying file while they are being manipulated. As data is read
it is duplicated into the buffer, and data is written from the buffer
back to the file on close.
"""
def __init__(self,fileobj,mode=None,max_size_in_memory=1024*8):
"""Buffered file wrapper constructor."""
self._buffer = SpooledTemporaryFile(max_size=max_size_in_memory)
self._in_eof = False
self._in_pos = 0
super(Buffer,self).__init__(fileobj,mode)
def _buffer_chunks(self):
chunk = self._buffer.read(16*1024)
if chunk == "":
yield chunk
else:
while chunk != "":
yield chunk
chunk = self._buffer.read(16*1024)
def _write_out_buffer(self):
if self._check_mode("r"):
self._read_rest()
if "a" in self.mode:
self._buffer.seek(self._in_pos)
self._fileobj.seek(self._in_pos)
else:
self._fileobj.seek(0)
self._buffer.seek(0)
else:
self._buffer.seek(0)
for chunk in self._buffer_chunks():
self._fileobj.write(chunk)
def flush(self):
# flush the buffer; we only write to the underlying file on close
self._buffer.flush()
def close(self):
if self.closed:
return
if self._check_mode("w"):
self._write_out_buffer()
super(Buffer,self).close()
self._buffer.close()
def _read(self,sizehint=-1):
# First return any data available from the buffer.
# Since we don't flush the buffer after every write, certain OSes
# (guess which!) will happy read junk data from the end of it.
# Instead, we explicitly read only up to self._in_pos.
if not self._in_eof:
buffered_size = self._in_pos - self._buffer.tell()
if sizehint >= 0:
buffered_size = min(sizehint,buffered_size)
else:
buffered_size = sizehint
data = self._buffer.read(buffered_size)
if data != "":
return data
# Then look for more data in the underlying file
if self._in_eof:
return None
data = self._fileobj.read(sizehint)
self._in_pos += len(data)
self._buffer.write(data)
if sizehint < 0 or len(data) < sizehint:
self._in_eof = True
self._buffer.flush()
return data
def _write(self,data,flushing=False):
self._buffer.write(data)
if self._check_mode("r") and not self._in_eof:
diff = self._buffer.tell() - self._in_pos
if diff > 0:
junk = self._fileobj.read(diff)
self._in_pos += len(junk)
if len(junk) < diff:
self._in_eof = True
self._buffer.flush()
def _seek(self,offset,whence):
# Ensure we've read enough to simply do the seek on the buffer
if self._check_mode("r") and not self._in_eof:
if whence == 0:
if offset > self._in_pos:
self._read_rest()
if whence == 1:
if self._buffer.tell() + offset > self._in_pos:
self._read_rest()
if whence == 2:
self._read_rest()
# Then just do it on the buffer...
self._buffer.seek(offset,whence)
#.........这里部分代码省略.........
示例9: Buffer
# 需要导入模块: from tempfile import SpooledTemporaryFile [as 别名]
# 或者: from tempfile.SpooledTemporaryFile import tell [as 别名]
class Buffer(FileWrapper):
"""Class implementing buffering of input and output streams.
This class uses a separate buffer file to hold the contents of the
underlying file while they are being manipulated. As data is read
it is duplicated into the buffer, and data is written from the buffer
back to the file on close.
"""
def __init__(self, fileobj, mode=None, max_size_in_memory=1024 * 8):
"""Buffered file wrapper constructor."""
self._buffer = SpooledTemporaryFile(max_size=max_size_in_memory)
self._in_eof = False
self._in_pos = 0
self._was_truncated = False
super(Buffer, self).__init__(fileobj, mode)
def _buffer_size(self):
try:
return len(self._buffer.file.getvalue())
except AttributeError:
return os.fstat(self._buffer.fileno()).st_size
def _buffer_chunks(self):
chunk = self._buffer.read(16 * 1024)
if chunk == "":
yield chunk
else:
while chunk != "":
yield chunk
chunk = self._buffer.read(16 * 1024)
def _write_out_buffer(self):
if self._check_mode("r"):
self._read_rest()
if "a" in self.mode:
self._buffer.seek(self._in_pos)
self._fileobj.seek(self._in_pos)
else:
self._fileobj.seek(0)
self._buffer.seek(0)
else:
self._buffer.seek(0)
if self._was_truncated:
self._fileobj.truncate(0)
self._was_truncated = False
for chunk in self._buffer_chunks():
self._fileobj.write(chunk)
def flush(self):
# flush the buffer; we only write to the underlying file on close
self._buffer.flush()
def close(self):
if self.closed:
return
if self._check_mode("w"):
self._write_out_buffer()
super(Buffer, self).close()
self._buffer.close()
def _read(self, sizehint=-1):
# First return any data available from the buffer.
# Since we don't flush the buffer after every write, certain OSes
# (guess which!) will happily read junk data from the end of it.
# Instead, we explicitly read only up to self._in_pos.
if not self._in_eof:
buffered_size = self._in_pos - self._buffer.tell()
if sizehint >= 0:
buffered_size = min(sizehint, buffered_size)
else:
buffered_size = sizehint
data = self._buffer.read(buffered_size)
if data != "":
return data
# Then look for more data in the underlying file
if self._in_eof:
return None
data = self._fileobj.read(sizehint)
self._in_pos += len(data)
self._buffer.write(data)
if sizehint < 0 or len(data) < sizehint:
self._in_eof = True
self._buffer.flush()
return data
def _write(self, data, flushing=False):
self._buffer.write(data)
if self._check_mode("r") and not self._in_eof:
diff = self._buffer.tell() - self._in_pos
if diff > 0:
junk = self._fileobj.read(diff)
self._in_pos += len(junk)
if len(junk) < diff:
self._in_eof = True
self._buffer.flush()
def _seek(self, offset, whence):
# Ensure we've read enough to simply do the seek on the buffer
if self._check_mode("r") and not self._in_eof:
#.........这里部分代码省略.........