本文整理汇总了Python中tempfile.TemporaryFile.truncate方法的典型用法代码示例。如果您正苦于以下问题:Python TemporaryFile.truncate方法的具体用法?Python TemporaryFile.truncate怎么用?Python TemporaryFile.truncate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tempfile.TemporaryFile
的用法示例。
在下文中一共展示了TemporaryFile.truncate方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from tempfile import TemporaryFile [as 别名]
# 或者: from tempfile.TemporaryFile import truncate [as 别名]
class TemporaryFileStorage:
def __init__(self):
self.temporaryFile = TemporaryFile()
def __call__(self, data, bytesEncoding='utf8'):
if type(data) != bytes:
data = str(data).encode(bytesEncoding)
# Write string data
self.temporaryFile.seek(0)
self.temporaryFile.truncate()
self.temporaryFile.write(data)
return self
def buffer(self):
self.temporaryFile.seek(0)
return self.temporaryFile
示例2: APRFile
# 需要导入模块: from tempfile import TemporaryFile [as 别名]
# 或者: from tempfile.TemporaryFile import truncate [as 别名]
class APRFile(object):
"""Wrap a Python file-like object as an APR File"""
def __init__(self, pyfile):
self.pyfile = pyfile
self.pool = Pool()
self._as_parameter_ = POINTER(apr_file_t)()
self.tempfile = None
if hasattr(pyfile, "fileno"):
# Looks like this is a real file. We can just write
# directly to said file
osfile = apr_os_file_t(get_osfhandle(pyfile.fileno()))
else:
# Looks like this is a StringIO buffer or a fake file.
# Write to a temporary file and copy the output to the
# buffer when we are closed or flushed
self.tempfile = TemporaryFile()
osfile = apr_os_file_t(get_osfhandle(self.tempfile.fileno()))
apr_os_file_put(byref(self._as_parameter_), byref(osfile),
APR_CREATE | APR_WRITE | APR_BINARY, self.pool)
def flush(self):
"""Flush output to the underlying Python object"""
if self.tempfile:
self.tempfile.seek(0)
copyfileobj(self.tempfile, self.pyfile)
self.tempfile.truncate(0)
def close(self):
"""Close the APR file wrapper, leaving the underlying Python object
untouched"""
self.flush()
if self.tempfile:
self.tempfile.close()
self.tempfile = None
self.pool.destroy()
self.pool = None
def __del__(self):
if self.pool:
self.close()
示例3: Devlin
# 需要导入模块: from tempfile import TemporaryFile [as 别名]
# 或者: from tempfile.TemporaryFile import truncate [as 别名]
class CapturedStdout:
"""Capture sys.out output in temp file to allow function result testing
Thanks to Catherine Devlin (catherinedevlin.blogspot.com) for the idea"""
def __init__(self):
"""Capture stdout"""
self.backupStdout=sys.stdout
self.tmpFile=TemporaryFile()
sys.stdout=self.tmpFile
def readlines(self, reset=True):
"""
@param reset: reset buffer for next usage (default is True)
@return: array of lines captured and reset buffer"""
self.tmpFile.seek(0)
lines=self.tmpFile.readlines()
if reset:
self.reset()
return [line.strip("\n").strip("\x00") for line in lines]
def reset(self):
"""Reset stdout buffer"""
self.tmpFile.truncate(0)
def gotPsyqlException(self, reset=True):
"""Look if captured output has a PysqlException
@param reset: reset buffer for next usage (default is True)
@return: True is got exception, else False"""
lines=self.readlines(reset)
for line in lines:
if "Pysql error" in line:
return True
return False
def echoStdout(self):
"""Echo the current buffer on terminal stdout. Usefull for test debuging"""
self.backupStdout.writelines(["%s\n" % line for line in self.readlines(reset=False)])
def restoreStdout(self):
sys.stdout=self.backupStdout
self.tmpFile.close()
示例4: data
# 需要导入模块: from tempfile import TemporaryFile [as 别名]
# 或者: from tempfile.TemporaryFile import truncate [as 别名]
def data(dataset=None, format=None):
args = request.args
dataset, format = (dataset, format) if dataset and format else (args["dataset"], args["format"])
if dataset not in queries["dataset"] or format not in queries["format"]:
queries["dataset"].append(dataset)
queries["format"].append(format)
temp = TemporaryFile('r+b')
d = experiment_to_dict(dataset+'/'+format)
temp.write(json.dumps(d))
temp.truncate()
if dataset not in cache:
cache[dataset] = {}
cache[dataset][format] = temp
else:
cache[dataset][format].seek(0)
d = json.loads(cache[dataset][format].read())
if dataset and format:
return jsonify(**d)
else:
return jsonify()
示例5: main
# 需要导入模块: from tempfile import TemporaryFile [as 别名]
# 或者: from tempfile.TemporaryFile import truncate [as 别名]
#.........这里部分代码省略.........
# we cannot chain equal signs here, because the two would reference the
# same list, and that would be bad...
matching_read_depths, unmatching_read_depths = [], []
for record1 in g1_intx:
record2 = g2_intx.next()
# these records should match in terms of the interval they represent
if record2.seqname != record1.seqname or \
record2.start != record1.start or \
record2.end != record1.end:
raise ValueError("files must be pre-sorted")
# isolate the read depth info if we need to
if option.read_depth:
rd = []
try:
rd.append(int(record1.attributes["read_depth"].strip("\"")))
except KeyError:
pass
try:
rd.append(int(record2.attributes["read_depth"].strip("\"")))
except KeyError:
pass
# now test if there's concordance
try:
if sorted(record2.attributes["alleles"].strip("\"").split("/")) != \
sorted(record1.attributes["alleles"].strip("\"").split("/")):
unmatching_count += 1
if option.enumerate:
record1.attributes["concordant"] = "false"
record2.attributes["concordant"] = "false"
print record1
print record2
if option.read_depth:
unmatching_read_depths.extend(rd)
else:
matching_count += 1
if option.enumerate:
record1.attributes["concordant"] = "true"
record2.attributes["concordant"] = "true"
print record1
print record2
if option.read_depth:
matching_read_depths.extend(rd)
# no alleles? not a SNP
except KeyError:
continue
# now we print the result, being mindful of possible zero division problems, etc.
if option.enumerate:
pass
elif option.read_depth:
try:
a = "%.1f" % mean(matching_read_depths)
b = "%.1f" % median(matching_read_depths)
except TypeError:
a = "--"
b = "--"
try:
c = "%.1f" % mean(unmatching_read_depths)
d = "%.1f" % median(unmatching_read_depths)
except TypeError:
c = "--"
d = "--"
print ("%s %s : %s %s" % (a, b, c, d)).ljust(col_width),
else:
try:
p = "%.1f%%" % (float(matching_count) / (matching_count + unmatching_count) * 100)
except ZeroDivisionError:
p = "--"
if option.verbose:
total_count = unmatching_count + matching_count
print ("%s %s/%s" % (p, matching_count, total_count)).ljust(col_width),
else:
print p.ljust(col_width),
# now we rewind, delete everything, and start again!
temp_file_1.seek(0)
temp_file_1.truncate()
temp_file_2.seek(0)
temp_file_2.truncate()
# wrap up the line
print ""
# print the legend describing what the column and row headings mean
if not option.enumerate:
print "-" * 8
file_number = 0
for i in gff_files_1:
file_number += 1
print ("[%s]" % excel_column(file_number)).ljust(8),
print i
file_number = 0
for i in gff_files_2:
file_number += 1
print ("[%s]" % file_number).ljust(8),
print i
示例6: parseMultipart
# 需要导入模块: from tempfile import TemporaryFile [as 别名]
# 或者: from tempfile.TemporaryFile import truncate [as 别名]
def parseMultipart(fp, pdict, memfile_max=1024 * 1000, len_max=0):
"""
Parse multipart content
"""
# TODO: Do not store whole parts contents in the memoty
boundary = ''
if 'boundary' in pdict:
boundary = pdict['boundary']
if not isBoundaryValid(boundary):
raise ValueError('Invalid boundary in multipart form: {0}' .
format(boundary))
maxlen = 0
nextpart = b'--' + boundary.encode()
lastpart = b'--' + boundary.encode() + b'--'
partdict = {}
terminator = b''
while terminator != lastpart:
nbytes = -1
data = None
if terminator:
# At start of next part. Read headers first.
headers = parse_headers(fp, memfile_max)
clength = headers.get('content-length')
if clength is not None:
try:
nbytes = int(clength)
except ValueError:
pass
if nbytes > 0:
if maxlen and nbytes > len_max:
raise ValueError('Maximum content length exceeded')
data = fp.read(nbytes)
else:
data = b''
# Read lines until end of part.
part_fp = TemporaryFile(mode='w+b')
while 1:
line = fp.readline(memfile_max)
if line == b'':
terminator = lastpart # End outer loop
break
if _is_termline(line, nextpart):
terminator = nextpart
break
if _is_termline(line, lastpart):
terminator = lastpart
break
part_fp.write(line)
while not line.endswith(b"\n"):
line = fp.readline(memfile_max)
if line == b'':
break
part_fp.write(line)
# Done with part.
if data is None:
continue
if nbytes < 0:
last = pre_last = None
# Strip final line terminator
if part_fp.tell() >= 1:
part_fp.seek(-1, os.SEEK_END)
last = part_fp.read(1)
if part_fp.tell() >= 2:
part_fp.seek(-2, os.SEEK_END)
pre_last = part_fp.read(1)
trunc = 0
if pre_last == b"\r" and last == b"\n":
trunc = 2
elif last == b"\n":
trunk = 1
if trunc > 0:
part_fp.seek(-trunc, os.SEEK_END)
part_fp.truncate()
line = headers['content-disposition']
if not line:
continue
key, params = parse_header(line)
if key != 'form-data':
continue
if 'name' in params:
name = params['name']
else:
continue
#.........这里部分代码省略.........