本文整理汇总了Python中tempfile.NamedTemporaryFile.tell方法的典型用法代码示例。如果您正苦于以下问题:Python NamedTemporaryFile.tell方法的具体用法?Python NamedTemporaryFile.tell怎么用?Python NamedTemporaryFile.tell使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tempfile.NamedTemporaryFile
的用法示例。
在下文中一共展示了NamedTemporaryFile.tell方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _write_local_data_files
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import tell [as 别名]
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
schema = list(map(lambda schema_tuple: schema_tuple[0].replace(' ', '_'), cursor.description))
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles = {self.filename.format(file_no): tmp_file_handle}
for row in cursor:
# Convert if needed
row = map(self.convert_types, row)
row_dict = dict(zip(schema, row))
s = json.dumps(row_dict, sort_keys=True)
s = s.encode('utf-8')
tmp_file_handle.write(s)
# Append newline to make dumps BQ compatible
tmp_file_handle.write(b'\n')
# Stop if the file exceeds the file size limit
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles[self.filename.format(file_no)] = tmp_file_handle
return tmp_file_handles
示例2: _write_local_data_files
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import tell [as 别名]
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
schema = list(map(lambda schema_tuple: schema_tuple[0], cursor.description))
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles = {self.filename.format(file_no): tmp_file_handle}
for row in cursor:
# Convert datetime objects to utc seconds, and decimals to floats
row = map(self.convert_types, row)
row_dict = dict(zip(schema, row))
# TODO validate that row isn't > 2MB. BQ enforces a hard row size of 2MB.
s = json.dumps(row_dict)
if PY3:
s = s.encode('utf-8')
tmp_file_handle.write(s)
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write(b'\n')
# Stop if the file exceeds the file size limit.
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles[self.filename.format(file_no)] = tmp_file_handle
return tmp_file_handles
示例3: _write_local_data_files
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import tell [as 别名]
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles = {self.filename.format(file_no): tmp_file_handle}
for row in cursor:
row_dict = self.generate_data_dict(row._fields, row)
s = json.dumps(row_dict)
if PY3:
s = s.encode('utf-8')
tmp_file_handle.write(s)
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write(b'\n')
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles[self.filename.format(file_no)] = tmp_file_handle
return tmp_file_handles
示例4: TombOutputThread
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import tell [as 别名]
class TombOutputThread(QtCore.QThread):
line_received = QtCore.pyqtSignal(QtCore.QString)
error_received = QtCore.pyqtSignal(QtCore.QString)
progressed = QtCore.pyqtSignal(int) #value in percent
def __init__(self):
QtCore.QThread.__init__(self)
self.buffer = NamedTemporaryFile()
def run(self):
while True:
where = self.buffer.tell()
line = self.buffer.readline()
if not line:
time.sleep(1)
self.buffer.seek(where)
else:
#ansi color escapes messes this up, but it'ok anyway
self.line_received.emit(line)
self.parse_line(line)
def parse_line(self, line):
#This could be simplified, and s/search/match, if --no-color supported
#see #59
#TODO: this should be moved to tomblib.parse
parsed = parse_line(line)
if parsed and parsed['type'] == 'error':
self.error_received.emit(parsed.content)
示例5: xls_export
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import tell [as 别名]
def xls_export(request, username, id_string):
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u"Not shared."))
query = request.GET.get("query")
force_xlsx = request.GET.get("xlsx") == "true"
xls_df_builder = XLSDataFrameBuilder(username, id_string, query)
excel_defs = {
"xls": {"suffix": ".xls", "mime_type": "vnd.ms-excel"},
"xlsx": {"suffix": ".xlsx", "mime_type": "vnd.openxmlformats"}, # TODO: check xlsx mime type
}
ext = "xls" if not force_xlsx else "xlsx"
if xls_df_builder.exceeds_xls_limits:
ext = "xlsx"
try:
temp_file = NamedTemporaryFile(suffix=excel_defs[ext]["suffix"])
xls_df_builder.export_to(temp_file.name)
if request.GET.get("raw"):
id_string = None
response = response_with_mimetype_and_name(excel_defs[ext]["mime_type"], id_string, extension=ext)
response.write(temp_file.read())
temp_file.seek(0, os.SEEK_END)
response["Content-Length"] = temp_file.tell()
temp_file.close()
return response
except NoRecordsFoundError:
return HttpResponse(_("No records found to export"))
示例6: make_image_file
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import tell [as 别名]
def make_image_file(dimensions=(320, 240), extension=".jpeg", force_size=None, orientation=None):
"""
Yields a named temporary file created with the specified image type and
options.
Note the default dimensions are unequal (not a square) ensuring that center-square
cropping logic will be exercised during tests.
The temporary file will be closed and deleted automatically upon exiting
the `with` block.
"""
image = Image.new('RGB', dimensions, "green")
image_file = NamedTemporaryFile(suffix=extension)
try:
if orientation and orientation in xrange(1, 9):
exif_bytes = piexif.dump({'0th': {piexif.ImageIFD.Orientation: orientation}})
image.save(image_file, exif=exif_bytes)
else:
image.save(image_file)
if force_size is not None:
image_file.seek(0, os.SEEK_END)
bytes_to_pad = force_size - image_file.tell()
# write in hunks of 256 bytes
hunk, byte_ = bytearray([0] * 256), bytearray([0])
num_hunks, remainder = divmod(bytes_to_pad, 256)
for _ in xrange(num_hunks):
image_file.write(hunk)
for _ in xrange(remainder):
image_file.write(byte_)
image_file.flush()
image_file.seek(0)
yield image_file
finally:
image_file.close()
示例7: _write_local_data_files
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import tell [as 别名]
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
schema = list(map(lambda schema_tuple: schema_tuple[0], cursor.description))
col_type_dict = self._get_col_type_dict()
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
if self.export_format == 'csv':
file_mime_type = 'text/csv'
else:
file_mime_type = 'application/json'
files_to_upload = [{
'file_name': self.filename.format(file_no),
'file_handle': tmp_file_handle,
'file_mime_type': file_mime_type
}]
if self.export_format == 'csv':
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
for row in cursor:
# Convert datetime objects to utc seconds, and decimals to floats.
# Convert binary type object to string encoded with base64.
row = self._convert_types(schema, col_type_dict, row)
if self.export_format == 'csv':
csv_writer.writerow(row)
else:
row_dict = dict(zip(schema, row))
# TODO validate that row isn't > 2MB. BQ enforces a hard row size of 2MB.
s = json.dumps(row_dict, sort_keys=True)
if PY3:
s = s.encode('utf-8')
tmp_file_handle.write(s)
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write(b'\n')
# Stop if the file exceeds the file size limit.
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
files_to_upload.append({
'file_name': self.filename.format(file_no),
'file_handle': tmp_file_handle,
'file_mime_type': file_mime_type
})
if self.export_format == 'csv':
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
return files_to_upload
示例8: write
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import tell [as 别名]
def write(self, make_backup=True):
"""Write the GUANO file to disk"""
# FIXME: optionally write *other* subchunks for redundant metadata formats
# prepare our metadata for a byte-wise representation
md_bytes = bytearray(self._as_string(), 'utf-8')
if len(md_bytes) % 2:
md_bytes.append(ord('\n')) # pad for alignment on even word boundary
# create tempfile and write our vanilla .WAV ('data' sub-chunk only)
tempfile = NamedTemporaryFile(mode='w+b', prefix='guano_temp-', suffix='.wav', delete=False)
shutil.copystat(self.filename, tempfile.name)
with closing(wave.Wave_write(tempfile)) as wavfile:
wavfile.setparams(self.wav_params)
wavfile.writeframes(self.wav_data)
# add the 'guan' sub-chunk after the 'data' sub-chunk
tempfile.seek(tempfile.tell())
tempfile.write(struct.pack('<4sL', 'guan', len(md_bytes)))
tempfile.write(md_bytes)
# fix the RIFF file length
total_size = tempfile.tell()
tempfile.seek(0x04)
tempfile.write(struct.pack('<L', total_size - 8))
tempfile.close()
# verify it
GuanoFile(tempfile.name)
# finally overwrite the original with our new version
if make_backup:
backup_dir = os.path.join(os.path.dirname(self.filename), 'GUANO_BACKUP')
backup_file = os.path.join(backup_dir, os.path.basename(self.filename))
if not os.path.isdir(backup_dir):
print >> sys.stderr, 'Creating backup dir: ' + backup_dir
os.mkdir(backup_dir)
if os.path.exists(backup_file):
os.remove(backup_file)
os.rename(self.filename, backup_file)
os.rename(tempfile.name, self.filename)
示例9: prepareUploadFile
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import tell [as 别名]
def prepareUploadFile(prefix=""):
""" Helper function for prerare file to uploading """
fp = NamedTemporaryFile(mode='w+', prefix=prefix)
fp.write("google-site-verification: " + fp.name)
fp.seek(0,2)
fsize = fp.tell()
fp.seek(0)
env = {'REQUEST_METHOD':'PUT'}
headers = {'content-type':'text/plain',
'content-length': fsize,
'content-disposition':'attachment; filename=%s' % fp.name}
fs = FieldStorage(fp=fp, environ=env, headers=headers)
return FileUpload(fs), fp
示例10: make_blocks
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import tell [as 别名]
def make_blocks(num_records=2000, codec='null', write_to_disk=False):
records = make_records(num_records)
new_file = NamedTemporaryFile() if write_to_disk else MemoryIO()
fastavro.writer(new_file, schema, records, codec=codec)
bytes = new_file.tell()
new_file.seek(0)
block_reader = fastavro.block_reader(new_file, schema)
blocks = list(block_reader)
new_file.close()
return blocks, records, bytes
示例11: process_askue
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import tell [as 别名]
def process_askue():
e = Exporter()
try:
logging.debug('Trying to connect to FTP server...')
with FTP(S.FTP_SERVER, S.FTP_USER, S.FTP_PASSWORD, timeout=5) as fc:
logging.debug('Looking for files in FTP directory')
# Find files and retrieve it
inbox_files = fc.mlsd(S.REMS_PATH)
filenames = [e[0] for e in inbox_files if askue_filename(e[0])]
if not filenames:
logging.info('Inbox directory is empty...')
return
if len(filenames) > 1:
logging.debug(
'More than 1 file were found: {}'.format(
'\n'.join(filenames)))
rfile = max(filenames, key=date_from_filename)
logging.info('Retrieving {}...'.format(rfile))
tf = NamedTemporaryFile()
fc.retrbinary('RETR {}'.format(j(S.REMS_PATH, rfile)), tf.write)
ftp_pos = tf.tell()
try:
if S.APPEND_ON:
lines = (record_to_csv(rec) for rec in
e.get_routes(datetime.now()))
append_lines(tf, lines)
else:
logging.debug(
'Will not append lines (switched off in settings)')
except Exception:
logging.exception(
'Error appending lines to file! Sending as is')
tf.seek(ftp_pos)
tf.truncate()
tf.seek(0)
dest_path = j(S.IOMM_PATH, rfile)
# Send file back to FTP
logging.info('Sending file... {}'.format(dest_path))
fc.storbinary('STOR {}'.format(dest_path), tf)
logging.info('Cleaning up directory...')
for fname in filenames:
filepath = j(S.REMS_PATH, fname)
fc.delete(filepath)
finally:
e.close_connection()
示例12: download_url
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import tell [as 别名]
def download_url(self, url, partial_fetch=False):
http_resp = self.http_session.get(url, stream=True, timeout=(60, 120))
http_resp.raise_for_status()
if not os.path.exists(TEMP_DIR_PATH):
log.debug('Creating temp directory %s' % TEMP_DIR_PATH)
os.makedirs(TEMP_DIR_PATH)
# Create a temporary file to store the media item, write the file
# to disk if it is larger than 1 MB.
media_file = NamedTemporaryFile(dir=TEMP_DIR_PATH)
# When a partial fetch is requested, request up to two MB
partial_target_size = 1024 * 1024 * 2
content_length = http_resp.headers.get('content-length')
if content_length and int(content_length) < partial_target_size:
partial_target_size = int(content_length)
retrieved_bytes = 0
for chunk in http_resp.iter_content(chunk_size=512 * 1024):
if chunk: # filter out keep-alive chunks
media_file.write(chunk)
retrieved_bytes += len(chunk)
if partial_fetch and retrieved_bytes >= partial_target_size:
break
media_file.flush()
log.debug('Fetched item %s [%s/%s]' % (url, retrieved_bytes, content_length))
# If the server doens't provide a content-length and this isn't
# a partial fetch, determine the size by looking at the retrieved
# content
if not content_length and not partial_fetch:
media_file.seek(0, 2)
content_length = media_file.tell()
media_file.seek(0, 0)
return (
http_resp.headers.get('content-type'),
content_length,
media_file
)
示例13: csv_export
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import tell [as 别名]
def csv_export(request, username, id_string):
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u"Not shared."))
query = request.GET.get("query")
csv_dataframe_builder = CSVDataFrameBuilder(username, id_string, query)
try:
temp_file = NamedTemporaryFile(suffix=".csv")
csv_dataframe_builder.export_to(temp_file)
if request.GET.get("raw"):
id_string = None
response = response_with_mimetype_and_name("application/csv", id_string, extension="csv")
temp_file.seek(0)
response.write(temp_file.read())
temp_file.seek(0, os.SEEK_END)
response["Content-Length"] = temp_file.tell()
temp_file.close()
return response
except NoRecordsFoundError:
return HttpResponse(_("No records found to export"))
示例14: xls_export
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import tell [as 别名]
def xls_export(request, username, id_string):
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
query = request.GET.get("query")
force_xlsx = request.GET.get('xlsx') == 'true'
xls_df_builder = XLSDataFrameBuilder(username, id_string, query)
excel_defs = {
'xls': {
'suffix': '.xls',
'mime_type': 'vnd.ms-excel'
},
'xlsx': {
'suffix': '.xlsx',
'mime_type': 'vnd.openxmlformats' # TODO: check xlsx mime type
}
}
ext = 'xls' if not force_xlsx else 'xlsx'
if xls_df_builder.exceeds_xls_limits:
ext = 'xlsx'
try:
temp_file = NamedTemporaryFile(suffix=excel_defs[ext]['suffix'])
xls_df_builder.export_to(temp_file.name)
if request.GET.get('raw'):
id_string = None
response = response_with_mimetype_and_name(excel_defs[ext]['mime_type'], id_string,
extension=ext)
response.write(temp_file.read())
temp_file.seek(0, os.SEEK_END)
response['Content-Length'] = temp_file.tell()
temp_file.close()
return response
except NoRecordsFoundError:
return HttpResponse(_("No records found to export"))
示例15: test_binary_guesser
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import tell [as 别名]
def test_binary_guesser():
"""Test bio_utils' binary_guesser with binary and text data"""
# Store data
binary_data = b'\x8e\xd2\x837U\xbc\\!H\xc8\xb1O\xac\x9e\xbf\xd4b\x82\xc9' \
b'\xd7\xaa\xb9\x16Uo5m\r\x00\x1e\xdd\x978\x00Rj\xe2Ng\xc3' \
b'=\xe6N}\x92\xf0(+\xa3\x99\\w\xe0\xa6\xb4\xa4\xc2\x90\x81' \
b'\[email protected]\x10\x0f_\xdf\xdeo\r\xdc\xcd<\x7fq\x87\xb4\n\xcd' \
b'\xd2\r=\xfb\x84\xfb\xa5\xc0\x9e\xb4wl6j\xa9\xae\xe5\xc1' \
b'\xfb^\\L\xc8\x0b\xd1fU\xd1\xdd]\x06\x19\xf7\xc6\x90?x' \
b'\x06\x8ab\x0b\x14\xa4\x00z\x83\xe8\x90\[email protected]\xba~\xbb' \
b'\xcf\x90\xb2\xdb>^A\xd1\xd45\xd7\xbc\x99\xf26\xf4\xa0' \
b'\x8f-\x04)\xf9[\x7f\xca\x81\xcd\x04\xefd\x9ci\xe8lH' \
b'\xce\xb8\xe6R\xe4#\xb5\x16\x97a\xd2\xda2\x1d\x9d\xb1#1 ' \
b'\xe1u\x04g2\xe4\xf0B\xa6\xcd\x00q\x9d=N\x1f\xf1%\xa6' \
b'\x89\xc2\xb4j\xeb\x90\x07>kJ\xefi\xd2tp\xb0\xf1\xb7' \
b'\xbb\xc8\xa8cZ\x0c\x88\xe2\x08\x0b\x05\xddS\x86\xa4s' \
b'\x1ck\x90\xa3\t(\x03n\xe8S\x8a\x03\xe3*\xb4\x02\x06%' \
b'\xfe2.?&\x13\x94\xea7\xd1\xb9\xef\xe1\x94Y\xbd58\xf4Y' \
b'\x13\xe9r\x90\x84\x0e{\xe2\x98\x12\xff\xf4f\x87J\xfc:' \
b'\xd7\xd9\xc6\xbf\xd3IU\xf5\\\xa1\xb0\xad\x04#\x9c\x0c' \
b'\x1d\x90\xbb\x93\xee\xbb\r\xa7\x96\t\x8b\xc1\x91\xecl' \
b'\xe1\[email protected]\xa7\x98\re\x9b\x8fy\xb8U\x18\x04z\xe8\rT?' \
b'\xed\xb0\n\xf7*\xc8\xce\xb5N8\xaeh\x06\x84\'\xdd6SI' \
b'\xd6\xf9\xbdz\xd3\xab\xe3\xd9\xb3*BBd\xc0\x9d\xd6\x8a' \
b'\xb1\xe8\xc4\xb9\xacw|>\x80y\x86\xfcM!\x1b\xc9\xff\x93' \
b'\x8d\xb5\x89IL\x93J\x88\x0b\xe5\'\xbd\x13\xa9\xd5\xa0' \
b'\xe9Rs\xce,\x8e%\xdbQ\x85##I\x93\x04\xec\x98V\x8d\x9b' \
b'\xd9B9?z\'>Aq\x10`&\x0e\xa1\xb2\x94\x0c}"QI\x82\xf5.O' \
b'\x9a:uu|\xdd\x86^\xfd\x0bu\xbf05\xea\\e\xc7\\\xbe\xd9' \
b'\x98\x0fFo9\xb1\n`\xe9\x8ccg\n\x13\xcb\x1b!\xb2\xcdt|' \
b'\xc7!\xfawn3\xf0p\xb1n\xb6^\xe1;S\xa0\xf3y.\x8e\x83{' \
b'\x9f\x03\xa1\xfe\x8b\xae\xd4\xfa\xafh\xefP\x8c\xa0\xc1' \
b'\x8dWW\x85\xa0\xfeT\xa8\xa3\xe1\x85\x11G\x0f5\x83\xec' \
b'\xebvJ\x1a(\xbdk\x8c\xbbf\x81\x1d\xc0\x91[\x1c\x9d\xa4' \
b'\x0c\x81\xfe\x94-\xd9\xa0\xd3\x0c\xe0~\r\x8eZ\xc91>\xac' \
b'\x935\x94H\xfeN\x02\t\xe5\xb15X3\xcb3n\xec\x82\xbcl\x05' \
b'\xa7\x07X\xc6\x1a`\x1b\xd3\x85\x0c<c\x81K$\xb9#\x12h' \
b'\xa9gN\xce\x8f:\x0e\xe1r\xf2K\xc1\x05\xa5J6\x12\xf8\xd7' \
b'\xce\[email protected]\xea\xb3\x0c]\x89\xe3\x9b)\xcd\x11\x06\x9bH4\n' \
b'\xad\xbd\xdb\x80U\r\x9e\xf6h$;Gov\xb3\x03\x88a\x81.MA' \
b'\x99\xc2\xc2Q\x1c=3c#)\xfb\xc1\x10f<xI\xef\xb2\xdcP' \
b'\xd9P\x1d\xc68\xec#-\xbd\xf2\x8c\x16a\xaa\x1a\xb6qb\x15' \
b'\xa8\xcct\xb8e\xc9\xbb\xd6S\x01 U\xcfw\xbd\xc0\xab\xb3l' \
b'\x1d\xd2\xa6k\x04\x06G_\x0e\x9bjam\xb4\xc4-\xcf\xad\x07c' \
b'\xf9"N\x8c\xe3r.\x0cq\xe2\x8c\x99\xd5\xa9\xfc\xbevRW7' \
b'\x17y\xfd\xbf\x9bq\t\x92\x1d\xc9\x19E\xd5\xedJ\xea9\xa4' \
b'\xd26~\xcc\x12\x9b\x12\xc4\x96(\xbe\xd7\x05-\xc9\x9f\x02' \
b'\xe2\x08f\xaf\'J\x0c\xb1\xcd\xa6\x80k)s\xa8\xbe\x15\x9d' \
b'\r}P2\xa1u\r~T\xedq\xa1X3o\x0b\xcb\x9dN\x8dAME\xe9\xcb\n' \
b'\xc6 ,\n\xa3\xba\x9a\x15\xc5-\xbaW\x89y?\xe3\x16 T!\xf0' \
b'\xf5\xfd\xa3Ks3\xb7\xe9F#\xdd\xebQ\xa9+#\xf9WG\x05\x93' \
b'\x93\x9a\x127\xf7d\xf2\x1cx\x9a2\x0fB\xber*\xc4\x90\xf8' \
b'\x07\xd7#\xf4\xff\xc0\xdcF\xd7<d\xb0\xdb\xcf\xa1\x1e' \
b'\xd2\x98\xde\xd1=u\xa6\xc4\x81\xf0\x04#x\xb6\xde\x0e\xbe' \
b'\xc6\x1b:\x10\x8f\xdf\xa3\x99E\xa2\xc2W\xde\xa7\x03\xe6x' \
b'\xc3\x07\x9d\xf1\x01$\x1d\xa1L\xad\xe8bnI\x14\xe7\xc1,'
text_data = 'BGwrYz3oUOoys8NJQN0Ju43r28l/bdXne8YbOZWiPMMoZFyxp9Qmc4NK6k' \
'Bs/DA2ZougW3RVZGAs\n3RRPLU78oRpTH3jzSViqj0jEtpMIwpOofhDjyP' \
'8bM7/bHWIa9XruomgdnOxkttqMc/Mxj6ZcODlv\nGADtY86z+/VdfO9lDj' \
'nwYmkkvjPN3qxpy6LIx9ZPMKpwCzTheidJR95u6gG+1ofA5HYaLIReujUn' \
'\ntvtZKu49pmiEuz5tT0VWRPHR/7q2Eg5u7SZAhlWtOW+G/P7QkLFButy8' \
'sArJwCBtEl6DH7B+L570\nZxfBaF1yaFU7VmZNL3e6MIq2Lgkk6TU3Ezvy' \
'LMB1ZLt8Zpst4tL814fMmJ6QazUaafG73YQkmoVg\nGdbemZBu3CLxJ3iX' \
'i9NPZxDionF9yNAt7gdiGqrVC3lRJIgSF1wn5/jqsdv8OhBI98DWOOYGmv' \
'EJ\nM+DztfOx4KQpA4TSunCRK/2H6POolGN1gOXbteUZY4cA2FreVW15QG' \
'/an30epRiKH/cgeNdEuIIe\niFsWt62tFTxXaQZZbc/p/hwUJ7iSMeYpq7' \
'WgYmJQmkdHggKFFZniuI5VyE1YHqVu1bZEhLaI3XSJ\npGF9dvGRCamzGO' \
'xLnz7TsjbVM45maSPXGJVw5OgZrZhqPdZNKgplblL8xvg//lRF582cYQFy' \
'yM8X\nOGqN83/QKo02FwEdqGg6DD5zzbLys4K/HjYguARUHLMBziFCvq2x' \
'9z31pSJUUCaBVit0Z4S4cCiK\narptw/91PnBJCdchBk0T62Kt4E41ClWV' \
'OUWZcLKWVhW689HLrvO4YCBi+qZDtTJFK1cmahAh9xZj\n1KmfvZzM6QFB' \
'RTtH2qzvEsgiA6lu9u1HS8ohHFxEYDJ32XKoNSQtarfOpjw/sA3kUaBi5a' \
'1Josah\nXDyGoXSXdtVq2wdZLLf7uuwbTUZae6j+bl5R7dYTkKzhsaVmpU' \
'zkrCHjl7XB+9YfpNwiCYPIfZSQ\nNluAEf2OeGozMipZ47fh9PMvWHri3g' \
'8pA/7B9Nn8K3mSmEDLBBZgkcKynR6rtSgzj2hIX0qS0/iX\nihk5ZjvZiu' \
'tqPiix6j+SSl59jk2WERh1IVHHWtBJUknbTlV3reTL+aWZHfkUioA0RSRi' \
'cwBTY6ou\nnypnq8l4mPTWUCZReDz7N5OEGWquroD8Fv4+IB5EviVI6Xrj' \
'Yil8m0rIjtbmwgFK0kSvkTEUI0DD\nCH3TY/+tXgLWA6scXG46T9+deuM0' \
'F7H/+4iRfnLV1LMV8J+roIFcg3VPX1yBW4wryXNdERVNhbTk\nI/9c17pC' \
'8fWqhv8kLBvcZcbzn6XDkKWXcQ6VOwiopYw/b6HaPDR7zSeBhNoPPJEw5q' \
'q6ZSs2eA==\n'
binary_handle = NamedTemporaryFile(mode='wb+')
binary_handle.write(binary_data)
binary_handle.seek(0)
binary_guesser(binary_handle)
assert binary_handle.tell() == 0
text_handle = NamedTemporaryFile(mode='wt+')
text_handle.write(text_data)
text_handle.seek(256)
try:
binary_guesser(text_handle, num_bytes=128)
except FormatError as error:
assert error.message == '{0} is probably not a binary ' \
'file'.format(text_handle.name)
#.........这里部分代码省略.........