本文整理汇总了Python中typing.IO类的典型用法代码示例。如果您正苦于以下问题:Python IO类的具体用法?Python IO怎么用?Python IO使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了IO类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: http_get
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
temp_file.write(chunk)
示例2: scrape_variables
def scrape_variables(host: Text, logs_file: IO) -> None:
br = mechanize.Browser()
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
br.set_handle_equiv(True)
# br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
login_url = urlparse.urljoin(host, '/login')
logging.info('Starting login into %s', login_url)
response = br.open(login_url)
br.form = next(iter(br.forms()))
br.form['username'] = 'monitor'
with open('../data/secret_key.txt') as f:
br.form['password'] = f.read()
br.method = 'POST'
br.submit()
br.method = 'GET'
logging.info('Successfully logged into %s', login_url)
variables_url = urlparse.urljoin(host, '/monitor/variables')
while True:
try:
response = br.open(variables_url)
except urllib2.URLError as e:
logging.error('Could not open "%s": %s', variables_url, e)
time.sleep(59 + random.random())
continue
raw_vars = response.read()
logs_file.write(raw_vars)
logs_file.write('\n')
# variables = json.loads(raw_vars)
time.sleep(59 + random.random())
示例3: run
def run(f: t.IO, out: t.IO = sys.stdout) -> None:
r = csv.DictReader(f)
rows = list(r)
w = ColorfulWriter(out, fieldnames=list(rows[0].keys()))
w.writeheader()
w.writerows(rows)
out.write(RESET)
示例4: _get_single_df
def _get_single_df(
stream: IO, filetype: Optional[TypeEnum], **kwargs
) -> Union[pd.DataFrame, Iterable[pd.DataFrame]]:
"""
Read a stream and retrieve the data frame or data frame generator (chunks)
It uses `stream.name`, which is the path to a local file (often temporary)
to avoid closing it. It will be closed at the end of the method.
"""
if filetype is None:
filetype = TypeEnum(detect_type(stream.name))
# Check encoding
encoding = kwargs.get('encoding')
if not validate_encoding(stream.name, encoding):
encoding = detect_encoding(stream.name)
kwargs['encoding'] = encoding
# Check separator for CSV files if it's not set
if filetype is TypeEnum.CSV and 'sep' not in kwargs:
if not validate_sep(stream.name, encoding=encoding):
kwargs['sep'] = detect_sep(stream.name, encoding)
pd_read = getattr(pd, f'read_{filetype}')
try:
df = pd_read(stream.name, **kwargs)
finally:
stream.close()
# In case of sheets, the df can be a dictionary
if kwargs.get('sheet_name', NOTSET) is None:
for sheet_name, _df in df.items():
_df['__sheet__'] = sheet_name
df = pd.concat(df.values(), sort=False)
return df
示例5: _from_io
def _from_io(self, source: IO):
"""
Loads an existing JVM ClassFile from any file-like object.
"""
read = source.read
if unpack('>I', source.read(4))[0] != ClassFile.MAGIC:
raise ValueError('invalid magic number')
# The version is swapped on disk to (minor, major), so swap it back.
self.version = unpack('>HH', source.read(4))[::-1]
self._constants.unpack(source)
# ClassFile access_flags, see section #4.1 of the JVM specs.
self.access_flags.unpack(read(2))
# The CONSTANT_Class indexes for "this" class and its superclass.
# Interfaces are a simple list of CONSTANT_Class indexes.
self._this, self._super, interfaces_count = unpack('>HHH', read(6))
self._interfaces = unpack(
f'>{interfaces_count}H',
read(2 * interfaces_count)
)
self.fields.unpack(source)
self.methods.unpack(source)
self.attributes.unpack(source)
示例6: decode
def decode(input: IO, output: IO) -> None:
"""Decode a file; input and output are binary files."""
while True:
line = input.readline()
if not line:
break
s = binascii.a2b_base64(line)
output.write(s)
示例7: http_get
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = Tqdm.tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
示例8: _download_to_stream
def _download_to_stream(self, blobname: str, stream: IO) -> bool:
try:
resource = self._azure_client.get_object(blobname)
except ObjectDoesNotExistError:
return False
else:
for chunk in resource.as_stream():
stream.write(chunk)
return True
示例9: encode
def encode(input: IO, output: IO) -> None:
"""Encode a file; input and output are binary files."""
while True:
s = input.read(MAXBINSIZE)
if not s:
break
while len(s) < MAXBINSIZE:
ns = input.read(MAXBINSIZE-len(s))
if not ns:
break
s += ns
line = binascii.b2a_base64(s)
output.write(line)
示例10: pack
def pack(self, out: IO):
"""
Write the FieldTable to the file-like object `out`.
.. note::
Advanced usage only. You will typically never need to call this
method as it will be called for you when saving a ClassFile.
:param out: Any file-like object providing `write()`
"""
out.write(pack('>H', len(self)))
for field in self._table:
field.pack(out)
示例11: unpack
def unpack(self, source: IO):
"""
Read the Field from the file-like object `fio`.
.. note::
Advanced usage only. You will typically never need to call this
method as it will be called for you when loading a ClassFile.
:param source: Any file-like object providing `read()`
"""
self.access_flags.unpack(source.read(2))
self._name_index, self._descriptor_index = unpack('>HH', source.read(4))
self.attributes.unpack(source)
示例12: embed_file
def embed_file(self,
input_file: IO,
output_file_path: str,
output_format: str = "all",
batch_size: int = DEFAULT_BATCH_SIZE) -> None:
"""
Computes ELMo embeddings from an input_file where each line contains a sentence tokenized by whitespace.
The ELMo embeddings are written out in HDF5 format, where each sentences is saved in a dataset.
Parameters
----------
input_file : ``IO``, required
A file with one tokenized sentence per line.
output_file_path : ``str``, required
A path to the output hdf5 file.
output_format : ``str``, optional, (default = "all")
The embeddings to output. Must be one of "all", "top", or "average".
batch_size : ``int``, optional, (default = 64)
The number of sentences to process in ELMo at one time.
"""
assert output_format in ["all", "top", "average"]
# Tokenizes the sentences.
sentences = [line.strip() for line in input_file if line.strip()]
split_sentences = [sentence.split() for sentence in sentences]
# Uses the sentence as the key.
embedded_sentences = zip(sentences, self.embed_sentences(split_sentences, batch_size))
logger.info("Processing sentences.")
with h5py.File(output_file_path, 'w') as fout:
for key, embeddings in Tqdm.tqdm(embedded_sentences):
if key in fout.keys():
logger.warning(f"Key already exists in {output_file_path}, skipping: {key}")
else:
if output_format == "all":
output = embeddings
elif output_format == "top":
output = embeddings[2]
elif output_format == "average":
output = numpy.average(embeddings, axis=0)
fout.create_dataset(
key,
output.shape, dtype='float32',
data=output
)
input_file.close()
示例13: unpack
def unpack(self, source: IO):
"""
Read the ConstantPool from the file-like object `source`.
.. note::
Advanced usage only. You will typically never need to call this
method as it will be called for you when loading a ClassFile.
:param source: Any file-like object providing `read()`
"""
count = unpack('>H', source.read(2))[0]
for _ in repeat(None, count):
name_index, length = unpack('>HI', source.read(6))
info_blob = source.read(length)
self._table.append((name_index, info_blob))
示例14: html_table_to_csv
def html_table_to_csv(input_f: IO, output_f: IO, table_num: int) -> None:
doc = bs4.BeautifulSoup(input_f.read(), 'html5lib')
tables = doc.find_all('table')
try:
table = tables[table_num]
trows = table.find_all('tr')
csv_writer = csv.writer(output_f)
for trow in trows:
cells = trow.find_all(RX_TH_OR_TD)
csv_writer.writerow([cell.text.strip() for cell in cells])
except IndexError:
sys.stderr.write('ERROR: no table at index {}\n'.format(table_num))
sys.exit(1)
示例15: _print_truncate
def _print_truncate(
lines: Iterable,
max_lines: int,
outfile: IO,
) -> None:
for i, line in enumerate(itertools.islice(lines, max_lines)):
if i + 1 == max_lines:
outfile.write('... (diff goes on) ...\n')
else:
outfile.write(line)
if not line.endswith('\n'):
outfile.write('<EOF>\n')