本文整理汇总了Python中tensorflow.python.framework.tensor_shape.unknown_shape函数的典型用法代码示例。如果您正苦于以下问题:Python unknown_shape函数的具体用法?Python unknown_shape怎么用?Python unknown_shape使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了unknown_shape函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testAsList
def testAsList(self):
with self.assertRaisesRegexp(ValueError,
"not defined on an unknown TensorShape"):
tensor_shape.unknown_shape().as_list()
self.assertAllEqual([None, None], tensor_shape.unknown_shape(2).as_list())
self.assertAllEqual([2, None, 4], tensor_shape.TensorShape(
(2, None, 4)).as_list())
示例2: testPartialShapes
def testPartialShapes(self):
np.random.seed(1618)
# Input shape is unknown.
reduction_axes = [1, 2]
c_unknown = tf.placeholder(tf.float32)
s_unknown = tf.reduce_sum(c_unknown, reduction_axes)
self.assertEqual(tensor_shape.unknown_shape(), s_unknown.get_shape())
np_input = np.random.randn(3, 3, 3)
self._compareAll(np_input, reduction_axes, {c_unknown: np_input})
# Input shape only has known rank.
c_known_rank = tf.placeholder(tf.float32)
c_known_rank.set_shape(tensor_shape.unknown_shape(ndims=3))
s_known_rank = tf.reduce_sum(c_known_rank, reduction_axes, keep_dims=True)
self.assertEqual(3, s_known_rank.get_shape().ndims)
np_input = np.random.randn(3, 3, 3)
self._compareAll(np_input, reduction_axes, {c_known_rank: np_input})
# Reduction indices are unknown.
unknown_indices = tf.placeholder(tf.int32)
c_unknown_indices = tf.constant([[10.0], [20.0]])
s_unknown_indices = tf.reduce_sum(c_unknown_indices, unknown_indices,
keep_dims=False)
self.assertEqual(tensor_shape.unknown_shape(),
s_unknown_indices.get_shape())
s_unknown_indices_keep = tf.reduce_sum(c_unknown_indices, unknown_indices,
keep_dims=True)
self.assertEqual(2, s_unknown_indices_keep.get_shape().ndims)
示例3: _SliceShape
def _SliceShape(op):
"""Shape function for array_ops.slice."""
input_shape = op.inputs[0].get_shape()
begin_shape = op.inputs[1].get_shape().with_rank_at_most(1)
sizes_shape = op.inputs[2].get_shape().with_rank_at_most(1)
rank_vector_shape = begin_shape.merge_with(sizes_shape)
ndims = rank_vector_shape.num_elements()
if ndims is not None:
input_shape.assert_has_rank(ndims)
begin_value = tensor_util.ConstantValue(op.inputs[1])
sizes_value = tensor_util.ConstantValue(op.inputs[2])
if sizes_value is not None:
returned_dims = []
for i, slice_size in enumerate(sizes_value.ravel()):
if slice_size != -1:
returned_dims.append(slice_size)
elif begin_value is not None:
returned_dims.append(input_shape[i] - begin_value[i])
else:
returned_dims.append(None)
return [tensor_shape.TensorShape(returned_dims)]
else:
if input_shape.ndims is not None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
elif ndims is not None:
return [tensor_shape.unknown_shape(ndims=ndims)]
else:
return [tensor_shape.unknown_shape()]
示例4: testAssignNoShapeNoValidateShape
def testAssignNoShapeNoValidateShape(self):
with self.test_session():
value = self._NewShapelessTensor()
var = state_ops.variable_op([1, 2], tf.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
self.assertEqual(tensor_shape.unknown_shape(),
tf.assign(var, value, validate_shape=False).get_shape())
示例5: _ReductionShape
def _ReductionShape(op):
"""Common shape function for reduction ops."""
input_shape = op.inputs[0].get_shape()
reduction_indices = tensor_util.constant_value(op.inputs[1])
keep_dims = op.get_attr("keep_dims")
if reduction_indices is None or input_shape.ndims is None:
if keep_dims:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
else:
return [tensor_shape.unknown_shape()]
# Turn reduction_indices from scalar to vector if necessary
reduction_indices = np.ravel(reduction_indices)
for reduction_index in reduction_indices:
if reduction_index < 0 or reduction_index >= input_shape.ndims:
raise ValueError("Invalid reduction dimension %d for input with %d "
"dimensions" % (reduction_index, input_shape.ndims))
returned_dims = []
if keep_dims:
for i, dim in enumerate(input_shape.dims):
if i in reduction_indices:
returned_dims.append(1)
else:
returned_dims.append(dim)
else:
for i, dim in enumerate(input_shape.dims):
if i not in reduction_indices:
returned_dims.append(dim)
return [tensor_shape.TensorShape(returned_dims)]
示例6: testAssignNoShape
def testAssignNoShape(self):
with self.cached_session():
value = self._NewShapelessTensor()
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
self.assertEqual(tensor_shape.unknown_shape(),
state_ops.assign(var, value).get_shape())
示例7: testEquality
def testEquality(self):
s1 = tensor_shape.TensorShape([tensor_shape.Dimension(
3), tensor_shape.Dimension(4), tensor_shape.Dimension(7)])
s2 = tensor_shape.TensorShape([tensor_shape.Dimension(
3), tensor_shape.Dimension(4), tensor_shape.Dimension(7)])
s3 = tensor_shape.TensorShape([tensor_shape.Dimension(3),
tensor_shape.Dimension(4), None])
self.assertTrue(s1 == s2)
self.assertFalse(s1 != s2)
self.assertFalse(s1 == "a string")
self.assertTrue(s1 != "a string")
self.assertNotEqual(s1, "347", "Should not equal an ambiguous string.")
self.assertEqual(s1, ["3", "4", "7"])
# Test with an unknown shape in s3
self.assertTrue(s1 != s3)
self.assertFalse(s3 == "a string")
self.assertTrue(s3 != "a string")
# eq and neq are not symmetric for unknown shapes.
unk0 = tensor_shape.unknown_shape()
self.assertFalse(unk0 == s1)
self.assertFalse(s1 == unk0)
with self.assertRaises(ValueError):
unk0 != s1 # pylint: disable=pointless-statement
with self.assertRaises(ValueError):
s1 != unk0 # pylint: disable=pointless-statement
unk1 = tensor_shape.unknown_shape()
self.assertTrue(unk0 == unk1)
self.assertTrue(unk1 == unk0)
with self.assertRaises(ValueError):
unk0 != unk1 # pylint: disable=pointless-statement
with self.assertRaises(ValueError):
unk1 != unk0 # pylint: disable=pointless-statement
示例8: testAsProto
def testAsProto(self):
self.assertTrue(tensor_shape.unknown_shape().as_proto().unknown_rank)
self.assertFalse(
tensor_shape.unknown_shape(rank=3).as_proto().unknown_rank)
self.assertFalse(
tensor_shape.TensorShape([1, 2, 3]).as_proto().unknown_rank)
self.assertFalse(
tensor_shape.TensorShape([1, None, 3]).as_proto().unknown_rank)
示例9: _sparse_shape
def _sparse_shape(op):
"""Shape function for `SparseTensor` result."""
num_rows = (op.inputs[0].get_shape()[0] if
op.type in ("DenseToSparseOperation", "DenseToDenseOperation")
else None)
return [
tensor_shape.TensorShape([num_rows, 2]),
tensor_shape.unknown_shape(1),
tensor_shape.unknown_shape(1),
]
示例10: test_build_raw_serving_input_receiver_fn_without_shape
def test_build_raw_serving_input_receiver_fn_without_shape(self):
"""Test case for issue #21178."""
f = {"feature_1": array_ops.placeholder(dtypes.float32),
"feature_2": array_ops.placeholder(dtypes.int32)}
serving_input_receiver_fn = export.build_raw_serving_input_receiver_fn(f)
v = serving_input_receiver_fn()
self.assertTrue(isinstance(v, export.ServingInputReceiver))
self.assertEqual(
tensor_shape.unknown_shape(),
v.receiver_tensors["feature_1"].shape)
self.assertEqual(
tensor_shape.unknown_shape(),
v.receiver_tensors["feature_2"].shape)
示例11: testStr
def testStr(self):
self.assertEqual("<unknown>", str(tensor_shape.unknown_shape()))
self.assertEqual("(?,)", str(tensor_shape.unknown_shape(ndims=1)))
self.assertEqual("(?, ?)", str(tensor_shape.unknown_shape(ndims=2)))
self.assertEqual("(?, ?, ?)", str(tensor_shape.unknown_shape(ndims=3)))
self.assertEqual("()", str(tensor_shape.scalar()))
self.assertEqual("(7,)", str(tensor_shape.vector(7)))
self.assertEqual("(3, 8)", str(tensor_shape.matrix(3, 8)))
self.assertEqual("(4, 5, 2)", str(tensor_shape.TensorShape([4, 5, 2])))
self.assertEqual("(32, ?, 1, 9)",
str(tensor_shape.TensorShape([32, None, 1, 9])))
示例12: test_to_feature_columns_and_input_fn
def test_to_feature_columns_and_input_fn(self):
df = setup_test_df_3layer()
feature_columns, input_fn = (
estimator_utils.to_feature_columns_and_input_fn(
df,
base_input_keys_with_defaults={"a": 1,
"b": 2,
"c": 3,
"d": 4},
label_keys=["g"],
feature_keys=["a", "b", "f"]))
expected_feature_column_a = feature_column.DataFrameColumn(
"a",
learn.PredefinedSeries(
"a",
parsing_ops.FixedLenFeature(tensor_shape.unknown_shape(),
dtypes.int32, 1)))
expected_feature_column_b = feature_column.DataFrameColumn(
"b",
learn.PredefinedSeries("b", parsing_ops.VarLenFeature(dtypes.int32)))
expected_feature_column_f = feature_column.DataFrameColumn(
"f",
learn.TransformedSeries([
learn.PredefinedSeries("c",
parsing_ops.FixedLenFeature(
tensor_shape.unknown_shape(),
dtypes.int32, 3)),
learn.PredefinedSeries("d", parsing_ops.VarLenFeature(dtypes.int32))
], mocks.Mock2x2Transform("iue", "eui", "snt"), "out2"))
expected_feature_columns = [
expected_feature_column_a, expected_feature_column_b,
expected_feature_column_f
]
self.assertEqual(sorted(expected_feature_columns), sorted(feature_columns))
base_features, labels = input_fn()
expected_base_features = {
"a": mocks.MockTensor("Tensor a", dtypes.int32),
"b": mocks.MockSparseTensor("SparseTensor b", dtypes.int32),
"c": mocks.MockTensor("Tensor c", dtypes.int32),
"d": mocks.MockSparseTensor("SparseTensor d", dtypes.int32)
}
self.assertEqual(expected_base_features, base_features)
expected_labels = mocks.MockTensor("Out iue", dtypes.int32)
self.assertEqual(expected_labels, labels)
self.assertEqual(3, len(feature_columns))
示例13: _SqueezeShape
def _SqueezeShape(op):
"""Determine shape for squeeze op's output tensor.
Args:
op: Operation for which to determine shape.
Returns:
Shape of op's output tensor.
Raises:
ValueError: if squeeze_dims includes a dimension outside of [-rank, rank),
where rank is the number of dimensions in the input tensor. Or, if
squeeze_dims includes a dimension for which input shape has a value
not equal to 1.
"""
input_shape = op.inputs[0].get_shape()
if input_shape.dims is None:
return [tensor_shape.unknown_shape()]
squeeze_dims = op.get_attr("squeeze_dims") or []
wrapped_squeeze_dims = []
input_ndims = input_shape.ndims
for i, squeeze_dim in enumerate(squeeze_dims):
if squeeze_dim < -input_ndims or squeeze_dim >= input_ndims:
raise ValueError(
"squeeze_dims[%d]=%d not in [%d, %d)." % (
i, squeeze_dim, -input_ndims, input_ndims))
if squeeze_dim < 0:
squeeze_dim += input_ndims
wrapped_squeeze_dims.append(squeeze_dim)
result_shape = []
for i, dim in enumerate([d.value for d in input_shape.dims]):
is_explicit_match = i in wrapped_squeeze_dims
if dim is None:
if is_explicit_match:
# Assume that the squeezed dimension will be 1 at runtime.
continue
if not wrapped_squeeze_dims:
# If squeezing all 1 dimensions and we see a None, give up.
return [tensor_shape.unknown_shape()]
elif dim == 1:
if is_explicit_match or not wrapped_squeeze_dims:
continue
elif is_explicit_match:
raise ValueError(
"Can not squeeze dim[%d], expected a dimension of 1, got %d." % (
i, dim))
result_shape.append(dim)
return [tensor_shape.TensorShape(result_shape)]
示例14: _dense_to_dense_shape
def _dense_to_dense_shape(op):
"""Shapes for `SparseTensor` result given 2 dense inputs.
Args:
op: Operation with 2 dense `Tensor` inputs.
Returns:
Tuple of three shapes corresponding to the indices, values, and shape
`Tensor` components of the result `SparseTensor`.
Raises:
ValueError: if either input `Tensor` has rank < 2, or ranks do not match, or
first n-1 dims of input shapes are not compatible.
"""
# The following should stay in sync with `ComputeDenseToDense` shape
# assertions in kernels/set_kernels.cc.
input0_shape = op.inputs[0].get_shape()
input0_rank = input0_shape.ndims
if (input0_rank is not None) and (input0_rank < 2):
raise ValueError("Input 0, expected rank >= 2, got shape %s." %
input0_shape)
# Dimension n contains the set values to be compared, so ranks and the first
# n-1 dimensions of inputs and output must match.
input1_shape = op.inputs[1].get_shape()
input1_rank = input1_shape.ndims
if (input0_rank is not None) and (input1_rank is not None) and (
input0_rank != input1_rank):
raise ValueError(
"Ranks do not match: input 0 with shape %s, input 1 with shape %s." %
(input0_shape, input1_shape))
output_rank = input1_rank if input0_rank is None else input0_rank
output_dim0 = input1_shape[1] if input0_shape[0] is None else input0_shape[0]
input0_dims = input0_shape.dims
if input0_dims is None:
group0_shape = tensor_shape.unknown_shape()
else:
group0_shape = tensor_shape.TensorShape(input0_dims[:-1])
input1_dims = input1_shape.dims
if input1_dims is None:
group1_shape = tensor_shape.unknown_shape()
else:
group1_shape = tensor_shape.TensorShape(input1_dims[:-1])
group0_shape.assert_is_compatible_with(group1_shape)
indices_shape = tensor_shape.TensorShape((output_dim0, output_rank))
values_shape = tensor_shape.unknown_shape(1)
shape_shape = tensor_shape.TensorShape((output_rank,))
return (indices_shape, values_shape, shape_shape)
示例15: constant_value_as_shape
def constant_value_as_shape(tensor): # pylint: disable=invalid-name
"""A version of `constant_value()` that returns a `TensorShape`.
This version should be used when a constant tensor value is
interpreted as a (possibly partial) shape, e.g. in the shape
function for `tf.reshape()`. By explicitly requesting a
`TensorShape` as the return value, it is possible to represent
unknown dimensions; by contrast, `constant_value()` is
all-or-nothing.
Args:
tensor: The rank-1 Tensor to be evaluated.
Returns:
A `TensorShape` based on the constant value of the given `tensor`.
"""
shape = tensor.get_shape().with_rank(1)
if tensor.get_shape() == [0]:
return tensor_shape.scalar()
elif tensor.op.type == "Shape":
return tensor.op.inputs[0].get_shape()
elif tensor.op.type == "Pack":
ret = tensor_shape.scalar() # Empty list.
for pack_input in tensor.op.inputs:
# `pack_input` must be a scalar. Attempt to evaluate it, and append it
# to `ret`.
pack_input_val = constant_value(pack_input)
if pack_input_val is None or pack_input_val < 0:
new_dim = tensor_shape.Dimension(None)
else:
new_dim = tensor_shape.Dimension(pack_input_val)
ret = ret.concatenate([new_dim])
return ret
elif tensor.op.type == "Concat":
# We assume that `tensor.op.inputs[0]` evaluates to 0, as this is
# the only legal value when concatenating vectors, and it will
# have been checked by a previous shape function.
ret = tensor_shape.scalar() # Empty list.
for concat_input in tensor.op.inputs[1:]:
# `concat_input` must be a vector. Attempt to evaluate it as a shape,
# and concatenate it with `ret`.
ret = ret.concatenate(constant_value_as_shape(concat_input))
return ret
elif tensor.op.type == "ConcatV2":
# We assume that `tensor.op.inputs[-1]` evaluates to 0, as this is
# the only legal value when concatenating vectors, and it will
# have been checked by a previous shape function.
ret = tensor_shape.scalar() # Empty list.
for concat_input in tensor.op.inputs[:-1]:
# `concat_input` must be a vector. Attempt to evaluate it as a shape,
# and concatenate it with `ret`.
ret = ret.concatenate(constant_value_as_shape(concat_input))
return ret
else:
ret = tensor_shape.unknown_shape(shape[0].value)
value = constant_value(tensor)
if value is not None:
ret = ret.merge_with(tensor_shape.TensorShape(
[d if d != -1 else None for d in value]))
return ret