本文整理汇总了Python中tensorflow.python.ops.list_ops.empty_tensor_list函数的典型用法代码示例。如果您正苦于以下问题:Python empty_tensor_list函数的具体用法?Python empty_tensor_list怎么用?Python empty_tensor_list使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了empty_tensor_list函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testStackEmptyList
def testStackEmptyList(self, max_num_elements):
# Should be able to stack empty lists with fully defined element_shape.
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[1, 2],
max_num_elements=max_num_elements)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t).shape, (0, 1, 2))
# Should not be able to stack empty lists with partially defined
# element_shape.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"non-fully-defined"):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[-1, 2],
max_num_elements=max_num_elements)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.evaluate(t)
# Should not be able to stack empty lists with undefined element_shape.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"non-fully-defined"):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=-1,
max_num_elements=max_num_elements)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.evaluate(t)
示例2: testGatherEmptyList
def testGatherEmptyList(self, max_num_elements):
# Should be able to gather from empty lists with fully defined
# element_shape.
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[1, 2],
max_num_elements=max_num_elements)
t = list_ops.tensor_list_gather(l, [], element_dtype=dtypes.float32)
self.assertAllEqual((0, 1, 2), self.evaluate(t).shape)
# Should not be able to gather from empty lists with partially defined
# element_shape.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"non-fully-defined"):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[None, 2],
max_num_elements=max_num_elements)
t = list_ops.tensor_list_gather(l, [], element_dtype=dtypes.float32)
self.evaluate(t)
# Should not be able to gather from empty lists with undefined
# element_shape.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"non-fully-defined"):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=None,
max_num_elements=max_num_elements)
t = list_ops.tensor_list_gather(l, [], element_dtype=dtypes.float32)
self.evaluate(t)
示例3: testConcatEmptyListWithFullyDefinedElementShape
def testConcatEmptyListWithFullyDefinedElementShape(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[5, 2])
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t).shape, (0, 2))
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[None, 2])
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t).shape, (0, 2))
示例4: testConcatWithNonFullyDefinedElementShape
def testConcatWithNonFullyDefinedElementShape(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[None, 2])
l = list_ops.tensor_list_push_back(l, [[0., 1.]])
l = list_ops.tensor_list_push_back(l, [[2., 3.], [4., 5.]])
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [[0., 1.], [2., 3.], [4., 5.]])
示例5: testUnknownShape
def testUnknownShape(self):
l = list_ops.empty_tensor_list(element_dtype=dtypes.float32,
element_shape=-1)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l = list_ops.tensor_list_push_back(l, constant_op.constant([1.0, 2.0]))
_, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(e, [1.0, 2.0])
示例6: testStack
def testStack(self):
l = list_ops.empty_tensor_list(element_dtype=dtypes.float32,
element_shape=scalar_shape())
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t, [1.0, 2.0])
示例7: _WhileGrad
def _WhileGrad(op, *grads): # pylint: disable=invalid-name
"""The gradient of a While op produced by while_loop."""
body_graph = _get_body_graph(op)
# Replace None gradients with zeros. This is needed because `grads` could have
# None incoming gradients for the TensorLists. If we pass None's through, the
# custom gradient of TensorListPopBack will create an EmptyTensorList inside
# the FuncGraph which is undesirable.
# TODO(b/80444525): There might be an issue with treating no gradient as zero
# gradient in certain cases. Consider replacing None gradients with Zeros
# for accumulators only.
grads = [
g if g is not None else array_ops.zeros_like(output)
for g, output in zip(grads, op.outputs)
]
body_grad_graph, args = _create_grad_func(
body_graph, grads,
util.unique_grad_fn_name(body_graph.name), op)
intermediate_tensors = _get_intermediates(body_grad_graph)
for intermediate_tensor in intermediate_tensors:
tensor_list = list_ops.empty_tensor_list(
element_dtype=intermediate_tensor.dtype,
element_shape=_get_tensor_convertible_shape(intermediate_tensor.shape))
with body_grad_graph.as_default():
tensor_list_ph = body_grad_graph.capture(tensor_list, whitelisted=True)
# Push the intermediate tensor to the tensor list.
appended_tensor_list = list_ops.tensor_list_push_back(tensor_list_ph,
intermediate_tensor)
# Add this modified tensor list to the list of outputs.
body_grad_graph.outputs.append(appended_tensor_list)
def grad_cond(counter, max_iters, *unused_args):
return counter < max_iters
loop_vars = args + body_grad_graph.external_captures
grad_cond_name = util.unique_grad_fn_name(op.get_attr("cond").name)
cond_grad_graph = func_graph_module.func_graph_from_py_func(
grad_cond_name, grad_cond, loop_vars, {},
func_graph=util.WhileCondFuncGraph(grad_cond_name))
assert len(loop_vars) == len(body_grad_graph.inputs)
assert len(loop_vars) == len(body_grad_graph.outputs)
assert len(loop_vars) == len(cond_grad_graph.inputs)
outputs = gen_functional_ops._while(
loop_vars,
util.create_new_tf_function(cond_grad_graph),
util.create_new_tf_function(body_grad_graph),
output_shapes=[t.shape for t in body_grad_graph.outputs],
name="%s_grad" % op.name)
_copy_handle_data(body_grad_graph.outputs, outputs)
_maybe_set_lowering_attr(outputs[0].op)
# outputs[0] is the loop counter.
# outputs[1] is the total number of loop iterations.
return outputs[2:2 + len(op.inputs)]
示例8: testDuplicateAccumulator
def testDuplicateAccumulator(self):
x = constant_op.constant(2.)
tensor_list = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=ScalarShape())
def Cond(x, tl):
del tl # Unused for Cond.
return x < 5.
def Body(x, tl):
# There is an accumulator in the loop already so we should not add
# another.
tl = list_ops.tensor_list_push_back(tl, x)
return x**2., tl
ret = while_loop_v2(Cond, Body, [x, tensor_list])
for op in ops.get_default_graph().get_operations():
if op.type == "While":
while_op = op
body_graph = while_v2._get_body_graph(while_op)
# body_graph.inputs: [counter_arg, x_arg, tl_arg, *accumulators]
x_input_t = body_graph.inputs[1]
accumulator_count = len(
[c for c in x_input_t.consumers() if c.type == "TensorListPushBack"])
self.assertEqual(accumulator_count, 1)
grad = gradients_impl.gradients(ret[0], x)
with self.cached_session() as sess:
self.assertEqual(sess.run(ret[0]), 16.)
self.assertSequenceEqual(sess.run(grad), [32.])
示例9: testPruning
def testPruning(self):
x = constant_op.constant(1)
tensor_list = list_ops.empty_tensor_list(
element_dtype=x.dtype, element_shape=x.shape)
def Cond(x, tl):
del tl # Unused for Cond.
return x < 5
def Body(x, tl):
return x + 1, list_ops.tensor_list_push_back(tl, x)
outputs = while_loop_v1(Cond, Body, [x, tensor_list])
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(outputs[0])
def GetOptimizedGraph():
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
rewriter_config = rewriter_config_pb2.RewriterConfig(
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL)
return tf_optimizer.OptimizeGraph(rewriter_config, mg)
g = GetOptimizedGraph()
self.assertEqual(len([n for n in g.node if n.op == "Enter"]), 1)
stack = list_ops.tensor_list_stack(outputs[1], element_dtype=x.dtype)
train_op.append(stack)
g = GetOptimizedGraph()
self.assertEqual(len([n for n in g.node if n.op == "Enter"]), 2)
示例10: testDefaultGradYs
def testDefaultGradYs(self):
with ops.Graph().as_default():
tl = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=ops.convert_to_tensor([], dtype=dtypes.int32))
a = constant(1.0)
tl = list_ops.tensor_list_push_back(tl, a)
grad_tl = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=ops.convert_to_tensor([], dtype=dtypes.int32))
grad_tl = list_ops.tensor_list_push_back(tl, constant(5.0))
grad = gradients.gradients(tl, a, grad_ys=grad_tl)[0]
with self.cached_session() as sess:
self.assertEquals(self.evaluate(grad), 5.)
示例11: body
def body(i, m, t1):
t1 = control_flow_ops.cond(
math_ops.equal(list_ops.tensor_list_length(t1), 0),
lambda: list_ops.empty_tensor_list(m.shape, m.dtype), lambda: t1)
t1 = list_ops.tensor_list_push_back(t1, m * i)
i += 1.0
return i, m, t1
示例12: testPushInFullListFails
def testPushInFullListFails(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[], max_num_elements=1)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Tried to push item into a full list"):
l = list_ops.tensor_list_push_back(l, 2.)
self.evaluate(l)
示例13: _testPushPop
def _testPushPop(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[],
max_num_elements=max_num_elements)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), 1.0)
示例14: testSetOnEmptyListWithMaxNumElementsFails
def testSetOnEmptyListWithMaxNumElementsFails(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[], max_num_elements=3)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Trying to modify element 0 in a list with 0 elements."):
l = list_ops.tensor_list_set_item(l, 0, 1.)
self.evaluate(l)
示例15: testConcat
def testConcat(self):
c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)
l0 = list_ops.tensor_list_from_tensor(c, element_shape=scalar_shape())
l1 = list_ops.tensor_list_from_tensor([-1.0], element_shape=scalar_shape())
l_batch_0 = array_ops.stack([l0, l1])
l_batch_1 = array_ops.stack([l1, l0])
l_concat_01 = list_ops.tensor_list_concat_lists(
l_batch_0, l_batch_1, element_dtype=dtypes.float32)
l_concat_10 = list_ops.tensor_list_concat_lists(
l_batch_1, l_batch_0, element_dtype=dtypes.float32)
l_concat_00 = list_ops.tensor_list_concat_lists(
l_batch_0, l_batch_0, element_dtype=dtypes.float32)
l_concat_11 = list_ops.tensor_list_concat_lists(
l_batch_1, l_batch_1, element_dtype=dtypes.float32)
expected_00 = [[1.0, 2.0, 1.0, 2.0], [-1.0, -1.0]]
expected_01 = [[1.0, 2.0, -1.0], [-1.0, 1.0, 2.0]]
expected_10 = [[-1.0, 1.0, 2.0], [1.0, 2.0, -1.0]]
expected_11 = [[-1.0, -1.0], [1.0, 2.0, 1.0, 2.0]]
for i, (concat, expected) in enumerate(zip(
[l_concat_00, l_concat_01, l_concat_10, l_concat_11],
[expected_00, expected_01, expected_10, expected_11])):
splitted = array_ops.unstack(concat)
splitted_stacked_ret = self.evaluate(
(list_ops.tensor_list_stack(splitted[0], dtypes.float32),
list_ops.tensor_list_stack(splitted[1], dtypes.float32)))
print("Test concat %d: %s, %s, %s, %s"
% (i, expected[0], splitted_stacked_ret[0],
expected[1], splitted_stacked_ret[1]))
self.assertAllClose(expected[0], splitted_stacked_ret[0])
self.assertAllClose(expected[1], splitted_stacked_ret[1])
# Concatenating mismatched shapes fails.
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
self.evaluate(
list_ops.tensor_list_concat_lists(
l_batch_0,
list_ops.empty_tensor_list(scalar_shape(), dtypes.float32),
element_dtype=dtypes.float32))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"element shapes are not identical at index 0"):
l_batch_of_vec_tls = array_ops.stack(
[list_ops.tensor_list_from_tensor([[1.0]], element_shape=[1])] * 2)
self.evaluate(
list_ops.tensor_list_concat_lists(l_batch_0, l_batch_of_vec_tls,
element_dtype=dtypes.float32))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r"input_b\[0\].dtype != element_dtype."):
l_batch_of_int_tls = array_ops.stack(
[list_ops.tensor_list_from_tensor([1], element_shape=scalar_shape())]
* 2)
self.evaluate(
list_ops.tensor_list_concat_lists(l_batch_0, l_batch_of_int_tls,
element_dtype=dtypes.float32))