本文整理汇总了Python中tensorflow.python.framework.tensor_util.constant_value_as_shape函数的典型用法代码示例。如果您正苦于以下问题:Python constant_value_as_shape函数的具体用法?Python constant_value_as_shape怎么用?Python constant_value_as_shape使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了constant_value_as_shape函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testConstant
def testConstant(self):
np_val = np.random.rand(3).astype(np.int32)
tf_val = tf.constant(np_val)
self.assertEqual(tf.TensorShape(np_val),
tensor_util.constant_value_as_shape(tf_val))
tf_val = tf.constant([], dtype=tf.int32)
self.assertEqual(tf.TensorShape([]),
tensor_util.constant_value_as_shape(tf_val))
示例2: testConcat
def testConcat(self):
tf_val = tf.concat(0, [[16, 37], tf.placeholder(tf.int32, shape=(2,))])
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, None], c_val.as_list())
tf_val = tf.concat(0,
[[16, 37], tf.placeholder(tf.int32, shape=(1,)), [48]])
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, 48], c_val.as_list())
示例3: __init__
def __init__(self, event_shape_out, event_shape_in,
validate_args=False, name=None):
"""Creates a `Reshape` bijector.
Args:
event_shape_out: An `int`-like vector-shaped `Tensor`
representing the fully specified (no -1's) event shape of the
transformed output.
event_shape_in: An `int`-like vector-shaped `Tensor`
representing the fully specified (no -1's) event shape of the
input.
validate_args: Python `bool` indicating whether arguments should
be checked for correctness.
name: Python `str`, name given to ops managed by this object.
Raises:
TypeError: if either `event_shape_in` or `event_shape_out` has
non-vector shape (`rank > 1`), or non-integer `dtype`.
ValueError: if either `event_shape_in` or `event_shape_out`
contains non-positive entries, or if their sizes do not match
(`prod(event_shape_in)` != `prod(event_shape_out)`), or if
their dimensionality(s) cannot be statically inferred.
"""
with ops.name_scope(name, "reshape",
values=[event_shape_out, event_shape_in]):
event_shape_out = ops.convert_to_tensor(event_shape_out,
name="event_shape_out",
preferred_dtype=dtypes.int32)
event_shape_in = ops.convert_to_tensor(event_shape_in,
name="event_shape_in",
preferred_dtype=dtypes.int32)
# check that input shapes are positive integers
assertions = []
assertions += self._maybe_check_valid_shape(
event_shape_out, "event_shape_out",
validate_args=validate_args)
assertions += self._maybe_check_valid_shape(
event_shape_in, "event_shape_in", validate_args=validate_args)
# check that prod(event_shape_in) = prod(event_shape_out)
assertions += self._maybe_check_matching_sizes(
event_shape_in, event_shape_out, validate_args=validate_args)
self._assertions = assertions
self._event_shape_in = event_shape_in
self._event_shape_out = event_shape_out
self._event_shape_in_static = tensor_util.constant_value_as_shape(
event_shape_in)
self._event_shape_out_static = tensor_util.constant_value_as_shape(
event_shape_out)
super(Reshape, self).__init__(is_constant_jacobian=True,
validate_args=validate_args,
name=name or "reshape")
示例4: testConcat
def testConcat(self):
tf_val = array_ops.concat(
[[16, 37], array_ops.placeholder(
dtypes.int32, shape=(2,))], 0)
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, None], c_val.as_list())
tf_val = array_ops.concat(
[[16, 37], array_ops.placeholder(
dtypes.int32, shape=(1,)), [48]], 0)
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, 48], c_val.as_list())
示例5: _replace_event_shape_in_tensorshape
def _replace_event_shape_in_tensorshape(
self, tensorshape_in, event_shape_in, event_shape_out):
"""Replaces the event shape dims of a `TensorShape`.
Args:
tensorshape_in: a `TensorShape` instance in which to attempt replacing
event shape.
event_shape_in: `Tensor` containing the event shape expected to be present
in (rightmost dims of) `tensorshape_in`. Must be compatible with
the rightmost dims of `tensorshape_in`.
event_shape_out: `Tensor` containing the shape values with which to
replace `event_shape_in` in `tensorshape_in`.
Returns:
tensorshape_out_: A `TensorShape` with the event shape replaced, if doing
so is possible given the statically known shape data in
`tensorshape_in` and `event_shape_in`. Else, `tf.TensorShape(None)`.
Raises:
ValueError: if we can determine the event shape portion of
`tensorshape_in` as well as `event_shape_in` both statically, and they
are not compatible. "Compatible" here means that they are identical on
any dims that are not -1 in `event_shape_in`.
"""
# Default to returning unknown shape
tensorshape_out_ = tf.TensorShape(None)
event_ndims_in_ = event_shape_in.shape.num_elements()
if (event_ndims_in_ is not None and
self._is_event_shape_fully_defined(tensorshape_in, event_ndims_in_)):
ndims_ = tensorshape_in.ndims
sample_and_batch_shape = tensorshape_in[:(ndims_ - event_ndims_in_)]
event_shape_ = np.int32(tensorshape_in[ndims_ - event_ndims_in_:])
# If both `event_shape_in` and the event shape dims of `tensorshape_in`
# are statically known, we can statically validate the event shape.
#
# If `event_shape_in` is not statically known, we can only add runtime
# validations to the graph (if enabled).
event_shape_in_ = tensor_util.constant_value(event_shape_in)
if event_shape_in_ is not None:
# Check that `event_shape_` and `event_shape_in` are compatible in
# the sense that they have equal entries in any position that isn't a
# `-1` in `event_shape_in`. Note that our validations at construction
# time ensure there is at most one such entry in `event_shape_in`.
event_shape_specified_ = event_shape_[event_shape_in_ >= 0]
event_shape_in_specified_ = event_shape_in_[event_shape_in_ >= 0]
if not all(event_shape_specified_ == event_shape_in_specified_):
raise ValueError(
'Input `event_shape` does not match `event_shape_in`. ' +
'({} vs {}).'.format(event_shape_, event_shape_in_))
else:
with tf.control_dependencies(self._maybe_validate_event_shape(
event_shape_, event_shape_in)):
event_shape_out = tf.identity(event_shape_out)
tensorshape_out_ = sample_and_batch_shape.concatenate(
tensor_util.constant_value_as_shape(event_shape_out))
return tensorshape_out_
示例6: shape
def shape(self):
"""Get the `TensorShape` representing the shape of the dense tensor.
Returns:
A `TensorShape` object.
"""
return tensor_util.constant_value_as_shape(self._dense_shape)
示例7: _merge_batch_beams
def _merge_batch_beams(self, t, s=None):
"""Merges the tensor from a batch of beams into a batch by beams.
More exactly, t is a tensor of dimension [batch_size, beam_width, s]. We
reshape this into [batch_size*beam_width, s]
Args:
t: Tensor of dimension [batch_size, beam_width, s]
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size * beam_width, s].
"""
if isinstance(s, ops.Tensor):
s = tensor_util.constant_value_as_shape(s)
else:
s = tensor_shape.TensorShape(s)
t_shape = array_ops.shape(t)
static_batch_size = tensor_util.constant_value(self._batch_size)
batch_size_beam_width = (
None if static_batch_size is None
else static_batch_size * self._beam_width)
reshaped_t = array_ops.reshape(
t, array_ops.concat(
([self._batch_size * self._beam_width], t_shape[2:]), 0))
reshaped_t.set_shape(
(tensor_shape.TensorShape([batch_size_beam_width]).concatenate(s)))
return reshaped_t
示例8: _replace_event_shape_in_shape_tensor
def _replace_event_shape_in_shape_tensor(
self, shape_in, event_shape_in, event_shape_out):
"""Replaces the rightmost dims in a `Tensor` representing a shape.
Args:
shape_in: a rank-1 `Tensor` of integers
event_shape_in: the event shape expected to be present in (rightmost dims
of) `shape_in`.
event_shape_out: the event shape with which to replace `event_shape_in` in
`shape_in`
Returns:
shape_out: A rank-1 integer `Tensor` with the same contents as `shape_in`
except for the event dims, which are replaced with `event_shape_out`.
"""
# If possible, extract statically known `TensorShape` and transform that.
tensorshape = tensor_util.constant_value_as_shape(shape_in)
if tensorshape is not None and tensorshape.is_fully_defined():
shape_out_ = self._replace_event_shape_in_tensorshape(
tensorshape, event_shape_in, event_shape_out)
if shape_out_.is_fully_defined():
shape_out = tf.convert_to_tensor(
shape_out_.as_list(), preferred_dtype=tf.int32)
return shape_out
# If not possible statically, use fully dynamic reshaping.
rank = _ndims_from_shape(shape_in)
event_ndims = _ndims_from_shape(event_shape_in)
event_shape = shape_in[rank - event_ndims:]
with tf.control_dependencies(self._maybe_validate_event_shape(
event_shape, event_shape_in)):
sample_and_batch_shape = shape_in[:(rank - event_ndims)]
shape_out = tf.concat([sample_and_batch_shape, event_shape_out], axis=0)
return shape_out
示例9: get_shape
def get_shape(self):
"""Get the `TensorShape` that represents the shape of the dense tensor.
Returns:
A `TensorShape` object.
"""
return tensor_util.constant_value_as_shape(self._shape)
示例10: sample
def sample(self, sample_shape=(), seed=None, name="sample",
**condition_kwargs):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
if sample_shape.get_shape().ndims == 0:
return self.sample_n(sample_shape, seed, **condition_kwargs)
sample_shape, total = self._expand_sample_shape(sample_shape)
samples = self.sample_n(total, seed, **condition_kwargs)
output_shape = array_ops.concat(0, [sample_shape, array_ops.slice(
array_ops.shape(samples), [1], [-1])])
output = array_ops.reshape(samples, output_shape)
output.set_shape(tensor_util.constant_value_as_shape(
sample_shape).concatenate(samples.get_shape()[1:]))
return output
示例11: sample
def sample(self, sample_shape=(), seed=None, name="sample"):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: Rank 1 `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(sample_shape,
dtype=dtypes.int32,
name="sample_shape")
total = math_ops.reduce_prod(sample_shape)
samples = self.sample_n(total, seed)
output_shape = array_ops.concat(0, [sample_shape, array_ops.slice(
array_ops.shape(samples), [1], [-1])])
output = array_ops.reshape(samples, output_shape, name=name)
output.set_shape(tensor_util.constant_value_as_shape(
sample_shape).concatenate(samples.get_shape()[1:]))
return output
示例12: calculate_reshape
def calculate_reshape(original_shape, new_shape, validate=False, name=None):
"""Calculates the reshaped dimensions (replacing up to one -1 in reshape)."""
batch_shape_static = tensor_util.constant_value_as_shape(new_shape)
if batch_shape_static.is_fully_defined():
return np.int32(batch_shape_static.as_list()), batch_shape_static, []
with tf.name_scope(name, "calculate_reshape", [original_shape, new_shape]):
original_size = tf.reduce_prod(original_shape)
implicit_dim = tf.equal(new_shape, -1)
size_implicit_dim = (
original_size // tf.maximum(1, -tf.reduce_prod(new_shape)))
new_ndims = tf.shape(new_shape)
expanded_new_shape = tf.where( # Assumes exactly one `-1`.
implicit_dim, tf.fill(new_ndims, size_implicit_dim), new_shape)
validations = [] if not validate else [
tf.assert_rank(
original_shape, 1, message="Original shape must be a vector."),
tf.assert_rank(new_shape, 1, message="New shape must be a vector."),
tf.assert_less_equal(
tf.count_nonzero(implicit_dim, dtype=tf.int32),
1,
message="At most one dimension can be unknown."),
tf.assert_positive(
expanded_new_shape, message="Shape elements must be >=-1."),
tf.assert_equal(
tf.reduce_prod(expanded_new_shape),
original_size,
message="Shape sizes do not match."),
]
return expanded_new_shape, batch_shape_static, validations
示例13: is_compatible_with
def is_compatible_with(self, value):
try:
value = sparse_tensor_lib.SparseTensor.from_value(value)
except TypeError:
return False
return (isinstance(value, (sparse_tensor_lib.SparseTensor,
sparse_tensor_lib.SparseTensorValue)) and
self._dtype.is_compatible_with(value.dtype) and
self._dense_shape.is_compatible_with(
tensor_util.constant_value_as_shape(value.dense_shape)))
示例14: _event_shape
def _event_shape(self):
# If there's a chance that the event_shape has been overridden, we return
# what we statically know about the `event_shape_override`. This works
# because: `_is_maybe_event_override` means `static_override` is `None` or a
# non-empty list, i.e., we don't statically know the `event_shape` or we do.
#
# Since the `bijector` may change the `event_shape`, we then forward what we
# know to the bijector. This allows the `bijector` to have final say in the
# `event_shape`.
static_override = tensor_util.constant_value_as_shape(
self._override_event_shape)
return self.bijector.forward_event_shape(
static_override
if self._is_maybe_event_override
else self.distribution.event_shape)
示例15: _batch_shape
def _batch_shape(self):
# If there's a chance that the batch_shape has been overridden, we return
# what we statically know about the `batch_shape_override`. This works
# because: `_is_maybe_batch_override` means `static_override` is `None` or a
# non-empty list, i.e., we don't statically know the `batch_shape` or we do.
#
# Notice that this implementation parallels the `_event_shape` except that
# the `bijector` doesn't get to alter the `batch_shape`. Recall that
# `batch_shape` is a property of a distribution while `event_shape` is
# shared between both the `distribution` instance and the `bijector`.
static_override = tensor_util.constant_value_as_shape(
self._override_batch_shape)
return (static_override
if self._is_maybe_batch_override
else self.distribution.batch_shape)