本文整理汇总了Python中tensorflow.python.client.session.Session方法的典型用法代码示例。如果您正苦于以下问题:Python session.Session方法的具体用法?Python session.Session怎么用?Python session.Session使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.client.session
的用法示例。
在下文中一共展示了session.Session方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: replace_variable_values_with_moving_averages
# 需要导入模块: from tensorflow.python.client import session [as 别名]
# 或者: from tensorflow.python.client.session import Session [as 别名]
def replace_variable_values_with_moving_averages(graph,
current_checkpoint_file,
new_checkpoint_file):
"""Replaces variable values in the checkpoint with their moving averages.
If the current checkpoint has shadow variables maintaining moving averages of
the variables defined in the graph, this function generates a new checkpoint
where the variables contain the values of their moving averages.
Args:
graph: a tf.Graph object.
current_checkpoint_file: a checkpoint containing both original variables and
their moving averages.
new_checkpoint_file: file path to write a new checkpoint.
"""
with graph.as_default():
variable_averages = tf.train.ExponentialMovingAverage(0.0)
ema_variables_to_restore = variable_averages.variables_to_restore()
with tf.Session() as sess:
read_saver = tf.train.Saver(ema_variables_to_restore)
read_saver.restore(sess, current_checkpoint_file)
write_saver = tf.train.Saver()
write_saver.save(sess, new_checkpoint_file)
示例2: testDebugWhileLoopWatchingWholeGraphWorks
# 需要导入模块: from tensorflow.python.client import session [as 别名]
# 或者: from tensorflow.python.client.session import Session [as 别名]
def testDebugWhileLoopWatchingWholeGraphWorks(self):
with session.Session() as sess:
loop_body = lambda i: math_ops.add(i, 2)
loop_cond = lambda i: math_ops.less(i, 16)
i = constant_op.constant(10, name="i")
loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(run_options,
sess.graph,
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
self.assertEqual(
16, sess.run(loop, options=run_options, run_metadata=run_metadata))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(
[[10]], dump.get_tensors("while/Enter", 0, "DebugIdentity"))
self.assertEqual(
[[12], [14], [16]],
dump.get_tensors("while/NextIteration", 0, "DebugIdentity"))
示例3: testDebugQueueOpsDoesNotoErrorOut
# 需要导入模块: from tensorflow.python.client import session [as 别名]
# 或者: from tensorflow.python.client.session import Session [as 别名]
def testDebugQueueOpsDoesNotoErrorOut(self):
with session.Session() as sess:
q = data_flow_ops.FIFOQueue(3, "float", name="fifo_queue")
q_init = q.enqueue_many(([101.0, 202.0, 303.0],), name="enqueue_many")
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_urls=self._debug_urls())
sess.run(q_init, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertTrue(dump.loaded_partition_graphs())
fifo_queue_tensor = dump.get_tensors("fifo_queue", 0, "DebugIdentity")[0]
self.assertIsInstance(fifo_queue_tensor,
debug_data.InconvertibleTensorProto)
self.assertTrue(fifo_queue_tensor.initialized)
self.assertAllClose(
[101.0, 202.0, 303.0],
dump.get_tensors("enqueue_many/component_0", 0, "DebugIdentity")[0])
示例4: _safe_close
# 需要导入模块: from tensorflow.python.client import session [as 别名]
# 或者: from tensorflow.python.client.session import Session [as 别名]
def _safe_close(self, sess):
"""Closes a session without raising an exception.
Just like sess.close() but ignores exceptions.
Args:
sess: A `Session`.
"""
# pylint: disable=broad-except
try:
sess.close()
except Exception:
# Intentionally not logging to avoid user complaints that
# they get cryptic errors. We really do not care that Close
# fails.
pass
# pylint: enable=broad-except
示例5: _try_run_local_init_op
# 需要导入模块: from tensorflow.python.client import session [as 别名]
# 或者: from tensorflow.python.client.session import Session [as 别名]
def _try_run_local_init_op(self, sess):
"""Tries to run _local_init_op, if not None, and is ready for local init.
Args:
sess: A `Session`.
Returns:
A tuple (is_successful, msg), where is_successful is True if
_local_init_op is None, or we ran _local_init_op, and False otherwise;
and msg is a `String` with the reason why the model was not ready to run
local init.
"""
if self._local_init_op is not None:
is_ready_for_local_init, msg = self._model_ready_for_local_init(sess)
if is_ready_for_local_init:
sess.run(self._local_init_op)
return True, None
else:
return False, msg
return True, None
示例6: run_one_epoch
# 需要导入模块: from tensorflow.python.client import session [as 别名]
# 或者: from tensorflow.python.client.session import Session [as 别名]
def run_one_epoch(self):
"""Creates a new 'Graph` and `Session` and runs a single epoch.
Naturally this makes sense only for DataFrames that fit in memory.
Returns:
A dictionary mapping column names to numpy arrays that contain a single
epoch of the `DataFrame`.
"""
# batches is a list of dicts of numpy arrays
batches = [b for b in self.run(num_epochs=1)]
# first invert that to make a dict of lists of numpy arrays
pivoted_batches = {}
for k in batches[0].keys():
pivoted_batches[k] = []
for b in batches:
for k, v in b.items():
pivoted_batches[k].append(v)
# then concat the arrays in each column
result = {k: np.concatenate(column_batches)
for k, column_batches in pivoted_batches.items()}
return result
示例7: _export_graph
# 需要导入模块: from tensorflow.python.client import session [as 别名]
# 或者: from tensorflow.python.client.session import Session [as 别名]
def _export_graph(graph, saver, checkpoint_path, export_dir,
default_graph_signature, named_graph_signatures,
exports_to_keep):
"""Exports graph via session_bundle, by creating a Session."""
with graph.as_default():
with tf_session.Session('') as session:
variables.local_variables_initializer()
lookup_ops.tables_initializer()
saver.restore(session, checkpoint_path)
export = exporter.Exporter(saver)
export.init(
init_op=control_flow_ops.group(
variables.local_variables_initializer(),
lookup_ops.tables_initializer()),
default_graph_signature=default_graph_signature,
named_graph_signatures=named_graph_signatures,
assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS))
return export.export(export_dir, contrib_variables.get_global_step(),
session, exports_to_keep=exports_to_keep)
示例8: setUp
# 需要导入模块: from tensorflow.python.client import session [as 别名]
# 或者: from tensorflow.python.client.session import Session [as 别名]
def setUp(self):
self._tmp_dir = tempfile.mktemp()
self.v = variables.Variable(10.0, name="v")
self.delta = constant_op.constant(1.0, name="delta")
self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")
self.ph = array_ops.placeholder(dtypes.float32, name="ph")
self.xph = array_ops.transpose(self.ph, name="xph")
self.m = constant_op.constant(
[[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]], dtype=dtypes.float32, name="m")
self.y = math_ops.matmul(self.m, self.xph, name="y")
self.sess = session.Session()
# Initialize variable.
self.sess.run(self.v.initializer)
示例9: setUp
# 需要导入模块: from tensorflow.python.client import session [as 别名]
# 或者: from tensorflow.python.client.session import Session [as 别名]
def setUp(self):
self.a = variables.Variable(10.0, name="a")
self.b = variables.Variable(20.0, name="b")
self.c = math_ops.add(self.a, self.b, name="c") # Should be 30.0.
self.d = math_ops.subtract(self.a, self.c, name="d") # Should be -20.0.
self.e = math_ops.multiply(self.c, self.d, name="e") # Should be -600.0.
self.ph = array_ops.placeholder(dtypes.float32, shape=(2, 2), name="ph")
self.f = math_ops.multiply(self.e, self.ph, name="f")
self.opt = gradient_descent.GradientDescentOptimizer(0.1).minimize(
self.e, name="opt")
self.sess = session.Session()
self.sess.run(self.a.initializer)
self.sess.run(self.b.initializer)
示例10: setUp
# 需要导入模块: from tensorflow.python.client import session [as 别名]
# 或者: from tensorflow.python.client.session import Session [as 别名]
def setUp(self):
self.a = variables.Variable(2.0, name="a")
self.b = variables.Variable(3.0, name="b")
self.c = math_ops.multiply(self.a, self.b, name="c") # Should be 6.0.
self.d = math_ops.multiply(self.a, self.a, name="d") # Should be 4.0.
self.e = math_ops.multiply(self.d, self.c, name="e") # Should be 24.0.
self.f_y = constant_op.constant(0.30, name="f_y")
self.f = math_ops.div(self.b, self.f_y, name="f") # Should be 10.0.
# The there nodes x, y and z form a graph with "cross-links" in. I.e., x
# and y are both direct inputs to z, but x is also a direct input to y.
self.x = variables.Variable(2.0, name="x") # Should be 2.0
self.y = math_ops.negative(self.x, name="y") # Should be -2.0.
self.z = math_ops.multiply(self.x, self.y, name="z") # Should be -4.0.
self.sess = session.Session()
self.sess.run(variables.global_variables_initializer())
self.sess = session.Session()
self.sess.run(variables.global_variables_initializer())
示例11: testClearDevices
# 需要导入模块: from tensorflow.python.client import session [as 别名]
# 或者: from tensorflow.python.client.session import Session [as 别名]
def testClearDevices(self):
export_dir = os.path.join(test.get_temp_dir(), "test_clear_devices")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Specify a device and save a variable.
ops.reset_default_graph()
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(
sess, [tag_constants.TRAINING], clear_devices=True)
# Save the SavedModel to disk.
builder.save()
# Restore the graph with a single predefined tag whose variables were saved
# without any device information.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.TRAINING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
示例12: testTFRecordDataset
# 需要导入模块: from tensorflow.python.client import session [as 别名]
# 或者: from tensorflow.python.client.session import Session [as 别名]
def testTFRecordDataset(self):
dataset_dir = tempfile.mkdtemp(prefix=os.path.join(self.get_temp_dir(),
'tfrecord_dataset'))
height = 300
width = 280
with self.test_session():
provider = dataset_data_provider.DatasetDataProvider(
_create_tfrecord_dataset(dataset_dir))
image, label = provider.get(['image', 'label'])
image = _resize_image(image, height, width)
with session.Session('') as sess:
with queues.QueueRunners(sess):
image, label = sess.run([image, label])
self.assertListEqual([height, width, 3], list(image.shape))
self.assertListEqual([1], list(label.shape))
示例13: testTFRecordSeparateGetDataset
# 需要导入模块: from tensorflow.python.client import session [as 别名]
# 或者: from tensorflow.python.client.session import Session [as 别名]
def testTFRecordSeparateGetDataset(self):
dataset_dir = tempfile.mkdtemp(prefix=os.path.join(self.get_temp_dir(),
'tfrecord_separate_get'))
height = 300
width = 280
with self.test_session():
provider = dataset_data_provider.DatasetDataProvider(
_create_tfrecord_dataset(dataset_dir))
[image] = provider.get(['image'])
[label] = provider.get(['label'])
image = _resize_image(image, height, width)
with session.Session('') as sess:
with queues.QueueRunners(sess):
image, label = sess.run([image, label])
self.assertListEqual([height, width, 3], list(image.shape))
self.assertListEqual([1], list(label.shape))
示例14: testIndexedSlicesGradIsClippedCorrectly
# 需要导入模块: from tensorflow.python.client import session [as 别名]
# 或者: from tensorflow.python.client.session import Session [as 别名]
def testIndexedSlicesGradIsClippedCorrectly(self):
sparse_grad_indices = np.array([0, 1, 4])
sparse_grad_dense_shape = [self._grad_vec.size]
values = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
indices = constant_op.constant(sparse_grad_indices, dtype=dtypes.int32)
dense_shape = constant_op.constant(
sparse_grad_dense_shape, dtype=dtypes.int32)
gradient = ops.IndexedSlices(values, indices, dense_shape)
variable = variables_lib.Variable(self._zero_vec, dtype=dtypes.float32)
gradients_to_variables = (gradient, variable)
gradients_to_variables = learning.clip_gradient_norms(
[gradients_to_variables], self._max_norm)[0]
# Ensure the built IndexedSlice has the right form.
self.assertEqual(gradients_to_variables[1], variable)
self.assertEqual(gradients_to_variables[0].indices, indices)
self.assertEqual(gradients_to_variables[0].dense_shape, dense_shape)
with session.Session() as sess:
actual_gradient = sess.run(gradients_to_variables[0].values)
np_testing.assert_almost_equal(actual_gradient, self._clipped_grad_vec)
示例15: write_graph_and_checkpoint
# 需要导入模块: from tensorflow.python.client import session [as 别名]
# 或者: from tensorflow.python.client.session import Session [as 别名]
def write_graph_and_checkpoint(inference_graph_def,
model_path,
input_saver_def,
trained_checkpoint_prefix):
"""Writes the graph and the checkpoint into disk."""
for node in inference_graph_def.node:
node.device = ''
with tf.Graph().as_default():
tf.import_graph_def(inference_graph_def, name='')
with session.Session() as sess:
saver = saver_lib.Saver(saver_def=input_saver_def,
save_relative_paths=True)
saver.restore(sess, trained_checkpoint_prefix)
saver.save(sess, model_path)