本文整理汇总了Python中tensorflow.python.lib.io.file_io.FileIO方法的典型用法代码示例。如果您正苦于以下问题:Python file_io.FileIO方法的具体用法?Python file_io.FileIO怎么用?Python file_io.FileIO使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.lib.io.file_io
的用法示例。
在下文中一共展示了file_io.FileIO方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_request_json
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import FileIO [as 别名]
def make_request_json(self, uri, output_json):
"""Produces a JSON request suitable to send to CloudML Prediction API.
Args:
uri: The input image URI.
output_json: File handle of the output json where request will be written.
"""
def _open_file_read_binary(uri):
try:
return file_io.FileIO(uri, mode='rb')
except errors.InvalidArgumentError:
return file_io.FileIO(uri, mode='r')
with open(output_json, 'w') as outf:
with _open_file_read_binary(uri) as f:
image_bytes = f.read()
image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
image = image.resize((299, 299), Image.BILINEAR)
resized_image = io.BytesIO()
image.save(resized_image, format='JPEG')
encoded_image = base64.b64encode(resized_image.getvalue())
row = json.dumps({'key': uri, 'image_bytes': {'b64': encoded_image}})
outf.write(row)
outf.write('\n')
示例2: process
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import FileIO [as 别名]
def process(self, element):
from tensorflow.python.lib.io import file_io as tf_file_io
uri, label_id = element
try:
with tf_file_io.FileIO(uri, 'r') as f:
img = Image.open(f).convert('RGB')
# A variety of different calling libraries throw different exceptions here.
# They all correspond to an unreadable file so we treat them equivalently.
# pylint: disable broad-except
except Exception as e:
logging.exception('Error processing image %s: %s', uri, str(e))
error_count.inc()
return
# Convert to desired format and output.
output = cStringIO.StringIO()
img.save(output, 'jpeg')
image_bytes = output.getvalue()
yield uri, label_id, image_bytes
示例3: load_images
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import FileIO [as 别名]
def load_images(image_files, resize=True):
"""Load images from files and optionally resize it."""
images = []
for image_file in image_files:
with file_io.FileIO(image_file, 'r') as ff:
images.append(ff.read())
if resize is False:
return images
# To resize, run a tf session so we can reuse 'decode_and_resize()'
# which is used in prediction graph. This makes sure we don't lose
# any quality in prediction, while decreasing the size of the images
# submitted to the model over network.
image_str_tensor = tf.placeholder(tf.string, shape=[None])
image = tf.map_fn(resize_image, image_str_tensor, back_prop=False)
feed_dict = collections.defaultdict(list)
feed_dict[image_str_tensor.name] = images
with tf.Session() as sess:
images_resized = sess.run(image, feed_dict=feed_dict)
return images_resized
示例4: read_vocab_file
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import FileIO [as 别名]
def read_vocab_file(file_path):
"""Reads a vocab file to memeory.
Args:
file_path: Each line of the vocab is in the form "token,example_count"
Returns:
Two lists, one for the vocab, and one for just the example counts.
"""
with file_io.FileIO(file_path, 'r') as f:
vocab_pd = pd.read_csv(
f,
header=None,
names=['vocab', 'count'],
dtype=str, # Prevent pd from converting numerical categories.
na_filter=False) # Prevent pd from converting 'NA' to a NaN.
vocab = vocab_pd['vocab'].tolist()
ex_count = vocab_pd['count'].astype(int).tolist()
return vocab, ex_count
示例5: _download_images
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import FileIO [as 别名]
def _download_images(data, img_cols):
"""Download images given image columns."""
images = collections.defaultdict(list)
for d in data:
for img_col in img_cols:
if d.get(img_col, None):
if isinstance(d[img_col], Image.Image):
# If it is already an Image, just copy and continue.
images[img_col].append(d[img_col])
else:
# Otherwise it is image url. Load the image.
with file_io.FileIO(d[img_col], 'rb') as fi:
im = Image.open(fi)
images[img_col].append(im)
else:
images[img_col].append('')
return images
示例6: read_metadata
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import FileIO [as 别名]
def read_metadata(path):
"""Load metadata in JSON format from a path into a new DatasetMetadata."""
schema_file = os.path.join(path, 'schema.pbtxt')
legacy_schema_file = os.path.join(path, 'v1-json', 'schema.json')
if file_io.file_exists(schema_file):
text_proto = file_io.FileIO(schema_file, 'r').read()
schema_proto = text_format.Parse(text_proto, schema_pb2.Schema(),
allow_unknown_extension=True)
elif file_io.file_exists(legacy_schema_file):
schema_json = file_io.FileIO(legacy_schema_file, 'r').read()
schema_proto = _parse_schema_json(schema_json)
else:
raise IOError(
'Schema file {} does not exist and neither did legacy format file '
'{}'.format(schema_file, legacy_schema_file))
return dataset_metadata.DatasetMetadata(schema_proto)
示例7: _write_vocabulary
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import FileIO [as 别名]
def _write_vocabulary(vocab_counter, vocab_size, destination):
"""Write the top vocab_size number of words to a file.
Returns : A word to index mapping python dictionary for the vocabulary.
"""
# Remove words that occur less than 5 times
vocab_counter = collections.Counter(
{k: v for k, v in vocab_counter.iteritems() if v > 4})
# Filter top words
vocab_list = vocab_counter.most_common(
min(len(vocab_counter), vocab_size - 1))
# Add __UNK__ token to the start of the top_words
vocab_list.insert(0, (__UNK__, 0))
# Write the top_words to destination (line by line fashion)
with file_io.FileIO(destination, 'w+') as f:
for word in vocab_list:
f.write(u'{} {}\n'.format(word[0], word[1]))
# Create a rev_vocab dictionary that returns the index of each word
return dict([(word, i)
for (i, (word, word_count)) in enumerate(vocab_list)])
示例8: _check_params
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import FileIO [as 别名]
def _check_params(gcs_working_dir, version):
"""Check if the data already exists by checking for file 'params.json'."""
data_dir = '{}/v{}/data'.format(gcs_working_dir, version)
# Prefix matching for the path
bucket_name, prefix = data_dir[5:].split('/', 1)
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
blobs = bucket.list_blobs(prefix=prefix)
for blob in blobs:
if blob.name.rsplit('/', 1)[-1] == PARAMS_FILE_NAME:
with file_io.FileIO('{}/{}'.format(data_dir, PARAMS_FILE_NAME),
'r') as f:
return json.load(f)
示例9: write_cam
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import FileIO [as 别名]
def write_cam(file, cam):
# f = open(file, "w")
f = file_io.FileIO(file, "w")
f.write('extrinsic\n')
for i in range(0, 4):
for j in range(0, 4):
f.write(str(cam[0][i][j]) + ' ')
f.write('\n')
f.write('\n')
f.write('intrinsic\n')
for i in range(0, 3):
for j in range(0, 3):
f.write(str(cam[1][i][j]) + ' ')
f.write('\n')
f.write('\n' + str(cam[1][3][0]) + ' ' + str(cam[1][3][1]) + ' ' + str(cam[1][3][2]) + ' ' + str(cam[1][3][3]) + '\n')
f.close()
示例10: load_tfrecord
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import FileIO [as 别名]
def load_tfrecord(fname):
"""Load tfrecord dataset.
Args:
fname (str): filename of the .yml metadata file to be loaded.
dtypes (dict): dtype of dataset.
"""
# dataset
with FileIO(fname, 'r') as f:
format_dict = (yaml.safe_load(f)['format'])
dtypes = {k: format_dict[k]['dtype'] for k in format_dict.keys()}
shapes = {k: format_dict[k]['shape'] for k in format_dict.keys()}
feature_dict = {k: tf.FixedLenFeature([], tf.string) for k in dtypes}
def parser(example): return tf.parse_single_example(example, feature_dict)
def converter(tensors):
tensors = {k: tf.parse_tensor(v, dtypes[k])
for k, v in tensors.items()}
[v.set_shape(shapes[k]) for k, v in tensors.items()]
return tensors
tfr = '.'.join(fname.split('.')[:-1]+['tfr'])
dataset = tf.data.TFRecordDataset(tfr).map(parser).map(converter)
return dataset
示例11: load_class_labels
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import FileIO [as 别名]
def load_class_labels(label_filename):
"""Load class labels.
Assumes the data directory is left unchanged from the original zip.
Args:
root_directory (str): the dataset's root directory
Returns:
arr: an array of class labels
"""
class_labels = []
header = True
with file_io.FileIO(label_filename, mode='r') as file:
for line in file.readlines():
if header:
header = False
continue
line = line.rstrip()
label = line.split('\t')[-1]
class_labels.append(label)
return numpy.array(class_labels)
示例12: _load_class_labels
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import FileIO [as 别名]
def _load_class_labels(label_filename):
"""Load class labels.
Assumes the data directory is left unchanged from the original zip.
Args:
root_directory (str): the dataset's root directory
Returns:
List[(int, str)]: a list of class ids and labels
"""
class_labels = []
header = True
with file_io.FileIO(label_filename, mode='r') as file:
for line in file.readlines():
if header:
class_labels.append((0, 'none'))
header = False
continue
line = line.rstrip()
line = line.split('\t')
label = line[-1]
label_id = int(line[0])
class_labels.append((label_id, label))
return class_labels
示例13: parse_schema_file
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import FileIO [as 别名]
def parse_schema_file(schema_path): # type: (str) -> Schema
"""
Read a schema file and return the proto object.
"""
assert file_io.file_exists(schema_path), "File not found: {}".format(schema_path)
schema = Schema()
with file_io.FileIO(schema_path, "rb") as f:
schema.ParseFromString(f.read())
return schema
示例14: settings
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import FileIO [as 别名]
def settings(cls, settings_dir, settings_filename=None):
# type: (str, str) -> List[Dict[str, Any]]
"""
Read a Featran settings file and return a list of settings
:param settings_dir: Path to the directory containing the settings file
:param settings_filename: Filename of the Featran Settings JSON file
:return: A List of Featran Settings
"""
f = cls.__get_featran_settings_file(settings_dir, settings_filename)
with file_io.FileIO(f, "r") as fio:
settings = json.load(fio)
return settings
示例15: _save_np
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import FileIO [as 别名]
def _save_np(absolute_fn, array):
if absolute_fn.startswith('gs://'):
with file_io.FileIO(absolute_fn, 'w') as f:
np.save(f, array)
else:
np.save(absolute_fn, array)