本文整理汇总了Python中tensorflow.python.ops.list_ops.tensor_list_pop_back函数的典型用法代码示例。如果您正苦于以下问题:Python tensor_list_pop_back函数的具体用法?Python tensor_list_pop_back怎么用?Python tensor_list_pop_back使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了tensor_list_pop_back函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testTensorListFromTensor
def testTensorListFromTensor(self):
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=[])
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), 2.0)
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), 1.0)
self.assertAllEqual(self.evaluate(list_ops.tensor_list_length(l)), 0)
示例2: testUnknownShape
def testUnknownShape(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=None)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l = list_ops.tensor_list_push_back(l, constant_op.constant([1.0, 2.0]))
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), [1.0, 2.0])
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), 1.0)
示例3: testListFromTensor
def testListFromTensor(self):
with self.cached_session(), self.test_scope():
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=[])
e = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.assertAllEqual(e, 1.0)
l, e0 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(e0, 2.0)
l, e1 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(e1, 1.0)
self.assertAllEqual(list_ops.tensor_list_length(l), 0)
示例4: testAccumulatorElementShape
def testAccumulatorElementShape(self, shape):
def MatchShape(actual_tensor_shape):
# Compare the shapes, treating None dimensions as equal. We do not
# directly check actual_tensor_shape and tf.TensorShape(shape) for
# equality because tf.Dimension.__eq__ returns None if either dimension is
# None.
if shape is None:
self.assertIsNone(actual_tensor_shape.dims)
else:
self.assertListEqual(actual_tensor_shape.as_list(), shape)
def GetAccumulatorForInputAtIndex(while_op, idx):
body_graph = while_v2._get_graph(while_op, "body")
y_input_t = body_graph.inputs[idx]
push_back_node = [c for c in y_input_t.consumers()
if c.type == "TensorListPushBack"][0]
output_idx = body_graph.outputs.index(push_back_node.outputs[0])
return while_op.outputs[output_idx]
x = array_ops.placeholder(dtype=dtypes.float32, shape=shape)
y = array_ops.placeholder(dtype=dtypes.float32, shape=shape)
# Forward pass.
ret = while_loop_v2(lambda v, u: v < 8.,
lambda v, u: (math_ops.pow(v, u), u),
[x, y],
return_same_structure=True)
while_op = ret[0].op.inputs[0].op
# Gradient pass.
grad = gradients_impl.gradients(ret[0], x)
# Note: There is an Identity b/w grad[0] and the While op.
grad_while_op = grad[0].op.inputs[0].op
# Get the TensorList output of While op containing the accumulated values
# of y.
x_input_index = [i for i, inp in enumerate(while_op.inputs) if x == inp][0]
output = GetAccumulatorForInputAtIndex(while_op, x_input_index)
_, val = list_ops.tensor_list_pop_back(output,
element_dtype=dtypes.float32)
MatchShape(val.shape)
# Take second derivative to generate intermediate grad_while_op outputs
gradients_impl.gradients(grad, x)
# Get the TensorList output of gradient While op containing the accumulated
# values of grad_x (note that grad_x is needed by the second derivative).
# grad_while_op.inputs:
grad_output_index = grad_while_op.outputs.index(grad[0].op.inputs[0])
grad_output = GetAccumulatorForInputAtIndex(grad_while_op,
grad_output_index)
_, val = list_ops.tensor_list_pop_back(grad_output,
element_dtype=dtypes.float32)
MatchShape(val.shape)
示例5: testPushPop
def testPushPop(self):
with self.cached_session() as sess, self.test_scope():
num = array_ops.placeholder(dtypes.int32)
l = list_ops.tensor_list_reserve(
element_shape=(7, 15), num_elements=num, element_dtype=dtypes.float32)
l = list_ops.tensor_list_push_back(
l, constant_op.constant(1.0, shape=(7, 15)))
l = list_ops.tensor_list_push_back(
l, constant_op.constant(2.0, shape=(7, 15)))
l, e2 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
_, e1 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(sess.run(e2, {num: 10}), 2.0 * np.ones((7, 15)))
self.assertAllEqual(sess.run(e1, {num: 10}), 1.0 * np.ones((7, 15)))
示例6: testPushPop
def testPushPop(self):
with self.cached_session() as sess, self.test_scope():
l = list_ops.empty_tensor_list(
element_shape=(7, 15),
element_dtype=dtypes.float32,
max_num_elements=10)
l = list_ops.tensor_list_push_back(
l, constant_op.constant(1.0, shape=(7, 15)))
l = list_ops.tensor_list_push_back(
l, constant_op.constant(2.0, shape=(7, 15)))
l, e2 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
_, e1 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(sess.run(e2), 2.0 * np.ones((7, 15)))
self.assertAllEqual(sess.run(e1), 1.0 * np.ones((7, 15)))
示例7: testAccumulatorElementShape
def testAccumulatorElementShape(self, shape):
def MatchShape(actual_tensor_shape):
# Compare the shapes, treating None dimensions as equal. We do not
# directly check actual_tensor_shape and tf.TensorShape(shape) for
# equality because tf.Dimension.__eq__ returns None if either dimension is
# None.
if shape is None:
self.assertIsNone(actual_tensor_shape.dims)
else:
self.assertListEqual(actual_tensor_shape.as_list(), shape)
def GetAccumulatorForInputAtIndex(while_op, idx):
body_graph = while_v2._get_body_graph(while_op)
y_input_t = body_graph.inputs[idx]
push_back_node = [c for c in y_input_t.consumers()
if c.type == "TensorListPushBack"][0]
output_idx = body_graph.outputs.index(push_back_node.outputs[0])
return while_op.outputs[output_idx]
x = constant_op.constant(2.)
y = array_ops.placeholder(dtype=dtypes.float32, shape=shape)
# Forward pass.
ret = while_loop_v2(
lambda v, u: v < 8.,
lambda v, u: (v * v, u), [x, y],
return_same_structure=False)
while_op = ret[0].op.inputs[0].op
# Get the TensorList output of While op containing the accumulated values
# of y.
# while_op.inputs: [counter_arg, x_arg, y_arg, *accumulators]
output = GetAccumulatorForInputAtIndex(while_op, 2)
_, val = list_ops.tensor_list_pop_back(output,
element_dtype=dtypes.float32)
MatchShape(val.shape)
# Gradient pass.
grad = gradients_impl.gradients(ret[1], y)
grad_while_op = grad[0].op.inputs[0].op
# Get the TensorList output of gradient While op containing the accumulated
# values of grad_y.
# grad_while_op.inputs:
# [counter_arg, total_iters_arg, grad_x_arg, grad_y_arg, *other_args]
grad_output = GetAccumulatorForInputAtIndex(grad_while_op, 3)
_, val = list_ops.tensor_list_pop_back(grad_output,
element_dtype=dtypes.float32)
MatchShape(val.shape)
示例8: testSerialize
def testSerialize(self):
# pylint: disable=g-import-not-at-top
try:
import portpicker
except ImportError:
return
with context.graph_mode():
worker_port = portpicker.pick_unused_port()
ps_port = portpicker.pick_unused_port()
cluster_dict = {
"worker": ["localhost:%s" % worker_port],
"ps": ["localhost:%s" % ps_port]
}
cs = server_lib.ClusterSpec(cluster_dict)
worker = server_lib.Server(
cs, job_name="worker", protocol="grpc", task_index=0, start=True)
unused_ps = server_lib.Server(
cs, job_name="ps", protocol="grpc", task_index=0, start=True)
with ops.Graph().as_default(), session.Session(target=worker.target):
with ops.device("/job:worker"):
t = constant_op.constant([[1.0], [2.0]])
l = list_ops.tensor_list_from_tensor(t, element_shape=[1])
with ops.device("/job:ps"):
l_ps = array_ops.identity(l)
l_ps, e = list_ops.tensor_list_pop_back(
l_ps, element_dtype=dtypes.float32)
with ops.device("/job:worker"):
worker_e = array_ops.identity(e)
self.assertAllEqual(worker_e.eval(), [2.0])
示例9: testPushPopSeparateLists
def testPushPopSeparateLists(self):
with self.cached_session() as sess, self.test_scope():
l = list_ops.empty_tensor_list(
element_shape=[],
element_dtype=dtypes.float32,
max_num_elements=20)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l2 = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
l3 = list_ops.tensor_list_push_back(l, constant_op.constant(3.0))
_, e11 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
l2, e21 = list_ops.tensor_list_pop_back(l2, element_dtype=dtypes.float32)
l2, e22 = list_ops.tensor_list_pop_back(l2, element_dtype=dtypes.float32)
l3, e31 = list_ops.tensor_list_pop_back(l3, element_dtype=dtypes.float32)
l3, e32 = list_ops.tensor_list_pop_back(l3, element_dtype=dtypes.float32)
result = sess.run([e11, [e21, e22], [e31, e32]])
self.assertEqual(result, [1.0, [2.0, 1.0], [3.0, 1.0]])
示例10: testCPUGPUCopy
def testCPUGPUCopy(self):
if not context.num_gpus():
return
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=[])
with context.device("gpu:0"):
l_gpu = array_ops.identity(l)
self.assertAllEqual(
self.evaluate(
list_ops.tensor_list_pop_back(
l_gpu, element_dtype=dtypes.float32)[1]), 2.0)
l_cpu = array_ops.identity(l_gpu)
self.assertAllEqual(
self.evaluate(
list_ops.tensor_list_pop_back(
l_cpu, element_dtype=dtypes.float32)[1]), 2.0)
示例11: _testPushPop
def _testPushPop(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[],
max_num_elements=max_num_elements)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), 1.0)
示例12: testPushPopSeparateLists
def testPushPopSeparateLists(self):
with self.cached_session() as sess, self.test_scope():
num = array_ops.placeholder(dtypes.int32)
l = list_ops.tensor_list_reserve(
element_shape=scalar_shape(),
num_elements=num,
element_dtype=dtypes.float32)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l2 = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
l3 = list_ops.tensor_list_push_back(l, constant_op.constant(3.0))
_, e11 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
l2, e21 = list_ops.tensor_list_pop_back(l2, element_dtype=dtypes.float32)
l2, e22 = list_ops.tensor_list_pop_back(l2, element_dtype=dtypes.float32)
l3, e31 = list_ops.tensor_list_pop_back(l3, element_dtype=dtypes.float32)
l3, e32 = list_ops.tensor_list_pop_back(l3, element_dtype=dtypes.float32)
result = sess.run([e11, [e21, e22], [e31, e32]], {num: 20})
self.assertEqual(result, [1.0, [2.0, 1.0], [3.0, 1.0]])
示例13: testEmptyTensorListMax
def testEmptyTensorListMax(self):
with self.cached_session() as sess, self.test_scope():
l = list_ops.empty_tensor_list(
element_shape=(10, 15), element_dtype=dtypes.float32,
max_num_elements=2)
l = list_ops.tensor_list_push_back(
l, array_ops.fill(value=3.0, dims=(10, 15)))
_, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(sess.run(e), 3.0 * np.ones((10, 15)))
示例14: testPopFromEmptyTensorListFails
def testPopFromEmptyTensorListFails(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[],
max_num_elements=max_num_elements)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Trying to pop from an empty list"):
l = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.evaluate(l)
示例15: testDoNotConstantFoldVariants
def testDoNotConstantFoldVariants(self):
with self.cached_session() as sess, self.test_scope():
val = array_ops.placeholder(dtype=dtypes.float32)
l = list_ops.empty_tensor_list(
element_shape=(7, 15),
element_dtype=dtypes.float32,
max_num_elements=10)
# Note: Pushing a Placeholder will force the constant folding code
# to build a Const node with a DT_VARIANT output. This tests that XLA
# passes a cf_consider_fn which prevent folding such nodes.
l = list_ops.tensor_list_push_back(
l, array_ops.fill(value=val, dims=(7, 15)))
l = list_ops.tensor_list_push_back(
l, constant_op.constant(2.0, shape=(7, 15)))
l, e2 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
_, e1 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(sess.run(e2, {val: 1.0}), 2.0 * np.ones((7, 15)))
self.assertAllEqual(sess.run(e1, {val: 1.0}), 1.0 * np.ones((7, 15)))