本文整理汇总了Python中tensorflow.python.platform.gfile.Open方法的典型用法代码示例。如果您正苦于以下问题:Python gfile.Open方法的具体用法?Python gfile.Open怎么用?Python gfile.Open使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.platform.gfile
的用法示例。
在下文中一共展示了gfile.Open方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: generate_tfprof_profile
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def generate_tfprof_profile(profiler, tfprof_file):
"""Generates a tfprof profile, writing it to a file and printing top ops.
Args:
profiler: A tf.profiler.Profiler. `profiler.add_step` must have already been
called.
tfprof_file: The filename to write the ProfileProto to.
"""
profile_proto = profiler.serialize_to_string()
log_fn('Dumping ProfileProto to %s' % tfprof_file)
with gfile.Open(tfprof_file, 'wb') as f:
f.write(profile_proto)
# Print out the execution times of the top operations. Note this
# information can also be obtained with the dumped ProfileProto, but
# printing it means tfprof doesn't have to be used if all the user wants
# is the top ops.
options = tf.profiler.ProfileOptionBuilder.time_and_memory()
options['max_depth'] = _NUM_OPS_TO_PRINT
options['order_by'] = 'accelerator_micros'
profiler.profile_operations(options)
示例2: load_tensor_from_event_file
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def load_tensor_from_event_file(event_file_path):
"""Load a tensor from an event file.
Assumes that the event file contains a `Event` protobuf and the `Event`
protobuf contains a `Tensor` value.
Args:
event_file_path: (`str`) path to the event file.
Returns:
The tensor value loaded from the event file, as a `numpy.ndarray`. For
uninitialized Tensors, returns `None`. For Tensors of data types that
cannot be converted to `numpy.ndarray` (e.g., `tf.resource`), return
`None`.
"""
event = event_pb2.Event()
with gfile.Open(event_file_path, "rb") as f:
event.ParseFromString(f.read())
return load_tensor_from_event(event)
示例3: RetrieveAsset
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def RetrieveAsset(logdir, plugin_name, asset_name):
"""Retrieve a particular plugin asset from a logdir.
Args:
logdir: A directory that was created by a TensorFlow summary.FileWriter.
plugin_name: The plugin we want an asset from.
asset_name: The name of the requested asset.
Returns:
string contents of the plugin asset.
Raises:
KeyError: if the asset does not exist.
"""
asset_path = os.path.join(PluginDirectory(logdir, plugin_name), asset_name)
try:
with gfile.Open(asset_path, "r") as f:
return f.read()
except errors_impl.NotFoundError:
raise KeyError("Asset path %s not found" % asset_path)
except errors_impl.OpError as e:
raise KeyError("Couldn't read asset path: %s, OpError %s" % (asset_path, e))
示例4: write_op_log
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def write_op_log(graph, log_dir, op_log=None, run_meta=None, add_trace=True):
"""Log provided 'op_log', and add additional model information below.
The API also assigns ops in tf.trainable_variables() an op type called
'_trainable_variables'.
The API also logs 'flops' statistics for ops with op.RegisterStatistics()
defined. flops calculation depends on Tensor shapes defined in 'graph',
which might not be complete, 'run_meta', if provided, completes the shape
information with best effort.
Args:
graph: tf.Graph.
log_dir: directory to write the log file.
op_log: (Optional) OpLog proto to be written. If not provided, an new
one is created.
run_meta: (Optional) RunMetadata proto that helps flops computation using
run time shape information.
add_trace: Whether to add op trace information. Used to support "code" view.
"""
op_log = _merge_default_with_oplog(graph, op_log, run_meta, add_trace)
with gfile.Open(os.path.join(log_dir, 'tfprof_log'), 'w') as log:
log.write(op_log.SerializeToString())
示例5: load_csv_with_header
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def load_csv_with_header(filename,
target_dtype,
features_dtype,
target_column=-1):
"""Load dataset from CSV file with a header row."""
with gfile.Open(filename) as csv_file:
data_file = csv.reader(csv_file)
header = next(data_file)
n_samples = int(header[0])
n_features = int(header[1])
data = np.zeros((n_samples, n_features), dtype=features_dtype)
target = np.zeros((n_samples,), dtype=target_dtype)
for i, row in enumerate(data_file):
target[i] = np.asarray(row.pop(target_column), dtype=target_dtype)
data[i] = np.asarray(row, dtype=features_dtype)
return Dataset(data=data, target=target)
示例6: testWriteScreenOutputToFileWorks
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def testWriteScreenOutputToFileWorks(self):
output_path = tempfile.mktemp()
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2>%s\n" % output_path),
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
with gfile.Open(output_path, "r") as f:
self.assertEqual(b"bar\nbar\n", f.read())
# Clean up output file.
gfile.Remove(output_path)
示例7: write_op_log
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def write_op_log(graph, log_dir, op_log=None, run_meta=None):
"""Log provided 'op_log', and add additional model information below.
The API also assigns ops in tf.trainable_variables() an op type called
'_trainable_variables'.
The API also logs 'flops' statistics for ops with op.RegisterStatistics()
defined. flops calculation depends on Tensor shapes defined in 'graph',
which might not be complete, 'run_meta', if provided, completes the shape
information with best effort.
Args:
graph: tf.Graph.
log_dir: directory to write the log file.
op_log: (Optional) OpLog proto to be written. If not provided, an new
one is created.
run_meta: (Optional) RunMetadata proto that helps flops computation using
run time shape information.
"""
op_log = _merge_default_with_oplog(graph, op_log, run_meta)
with gfile.Open(os.path.join(log_dir, 'tfprof_log'), 'w') as log:
log.write(op_log.SerializeToString())
示例8: read_data_files
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def read_data_files(self, subset='train'):
"""Reads from data file and returns images and labels in a numpy array."""
assert self.data_dir, ('Cannot call `read_data_files` when using synthetic '
'data')
if subset == 'train':
filenames = [os.path.join(self.data_dir, 'data_batch_%d' % i)
for i in xrange(1, 6)]
elif subset == 'validation':
filenames = [os.path.join(self.data_dir, 'test_batch')]
else:
raise ValueError('Invalid data subset "%s"' % subset)
inputs = []
for filename in filenames:
with gfile.Open(filename, 'r') as f:
inputs.append(cPickle.load(f))
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
all_images = np.concatenate(
[each_input['data'] for each_input in inputs]).astype(np.float32)
all_labels = np.concatenate(
[each_input['labels'] for each_input in inputs])
return all_images, all_labels
示例9: read_data_files
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def read_data_files(self, subset='train'):
"""Reads from data file and returns images and labels in a numpy
array."""
assert self.data_dir, (
'Cannot call `read_data_files` when using synthetic '
'data')
if subset == 'train':
filenames = [os.path.join(self.data_dir, 'data_batch_%d' % i)
for i in xrange(1, 6)]
elif subset == 'validation':
filenames = [os.path.join(self.data_dir, 'test_batch')]
else:
raise ValueError('Invalid data subset "%s"' % subset)
inputs = []
for filename in filenames:
with gfile.Open(filename, 'r') as f:
inputs.append(cPickle.load(f))
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
all_images = np.concatenate(
[each_input['data'] for each_input in inputs]).astype(np.float32)
all_labels = np.concatenate(
[each_input['labels'] for each_input in inputs])
return all_images, all_labels
示例10: read_data_files
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def read_data_files(self, subset='train'):
"""Reads from data file and return images and labels in a numpy array."""
if subset == 'train':
filenames = [os.path.join(self.data_dir, 'data_batch_%d' % i)
for i in xrange(1, 6)]
elif subset == 'validation':
filenames = [os.path.join(self.data_dir, 'test_batch')]
else:
raise ValueError('Invalid data subset "%s"' % subset)
inputs = []
for filename in filenames:
with gfile.Open(filename, 'r') as f:
inputs.append(cPickle.load(f))
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
all_images = np.concatenate(
[each_input['data'] for each_input in inputs]).astype(np.float32)
all_labels = np.concatenate(
[each_input['labels'] for each_input in inputs])
return all_images, all_labels
示例11: read_data_files
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def read_data_files(self, subset='train'):
"""Reads from data file and return images and labels in a numpy array."""
if subset == 'train':
filenames = [os.path.join(self.data_dir, 'data_batch_%d' % i)
for i in xrange(1, 6)]
elif subset == 'validation':
filenames = [os.path.join(self.data_dir, 'test_batch')]
else:
raise ValueError('Invalid data subset "%s"' % subset)
inputs = []
for filename in filenames:
with gfile.Open(filename, 'r') as f:
inputs.append(cPickle.load(f))
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
all_images = np.concatenate(
[each_input['data'] for each_input in inputs]).astype(np.float32)
all_labels = np.concatenate(
[each_input['labels'] for each_input in inputs])
return all_images, all_labels
示例12: write_op_log
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def write_op_log(graph, log_dir, op_log=None, run_meta=None, add_trace=True):
"""Log provided 'op_log', and add additional model information below.
The API also assigns ops in tf.trainable_variables() an op type called
'_trainable_variables'.
The API also logs 'flops' statistics for ops with op.RegisterStatistics()
defined. flops calculation depends on Tensor shapes defined in 'graph',
which might not be complete. 'run_meta', if provided, completes the shape
information with best effort.
Args:
graph: tf.Graph.
log_dir: directory to write the log file.
op_log: (Optional) OpLogProto proto to be written. If not provided, an new
one is created.
run_meta: (Optional) RunMetadata proto that helps flops computation using
run time shape information.
add_trace: Whether to add python code trace information.
Used to support "code" view.
"""
op_log = _merge_default_with_oplog(graph, op_log, run_meta, add_trace)
with gfile.Open(os.path.join(log_dir, 'tfprof_log'), 'w') as log:
log.write(op_log.SerializeToString())
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:26,代码来源:tfprof_logger.py
示例13: read_data_files
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def read_data_files(self, subset='train'):
"""Reads from data file and returns images and labels in a numpy array."""
assert self.data_dir, ('Cannot call `read_data_files` when using synthetic '
'data')
if subset == 'train':
filenames = [
os.path.join(self.data_dir, 'data_batch_%d' % i)
for i in xrange(1, 6)
]
elif subset == 'validation':
filenames = [os.path.join(self.data_dir, 'test_batch')]
else:
raise ValueError('Invalid data subset "%s"' % subset)
inputs = []
for filename in filenames:
with gfile.Open(filename, 'rb') as f:
# python2 does not have the encoding parameter
encoding = {} if six.PY2 else {'encoding': 'bytes'}
inputs.append(cPickle.load(f, **encoding))
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
all_images = np.concatenate(
[each_input[b'data'] for each_input in inputs]).astype(np.float32)
all_labels = np.concatenate(
[each_input[b'labels'] for each_input in inputs])
return all_images, all_labels
示例14: _load_graph_def_from_event_file
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def _load_graph_def_from_event_file(event_file_path):
event = event_pb2.Event()
with gfile.Open(event_file_path, "rb") as f:
event.ParseFromString(f.read())
return graph_pb2.GraphDef.FromString(event.graph_def)
示例15: _load_log_message_from_event_file
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def _load_log_message_from_event_file(event_file_path):
event = event_pb2.Event()
with gfile.Open(event_file_path, "rb") as f:
event.ParseFromString(f.read())
return event.log_message.message