本文整理汇总了Python中tensorflow.python.framework.tensor_util.constant_value方法的典型用法代码示例。如果您正苦于以下问题:Python tensor_util.constant_value方法的具体用法?Python tensor_util.constant_value怎么用?Python tensor_util.constant_value使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.framework.tensor_util
的用法示例。
在下文中一共展示了tensor_util.constant_value方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: smart_cond
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import constant_value [as 别名]
def smart_cond(pred, fn1, fn2, name=None):
"""Return either fn1() or fn2() based on the boolean predicate/value `pred`.
If `pred` is bool or has a constant value it would use `static_cond`,
otherwise it would use `tf.cond`.
Args:
pred: A scalar determining whether to return the result of `fn1` or `fn2`.
fn1: The callable to be performed if pred is true.
fn2: The callable to be performed if pred is false.
name: Optional name prefix when using tf.cond
Returns:
Tensors returned by the call to either `fn1` or `fn2`.
"""
pred_value = constant_value(pred)
if pred_value is not None:
# Use static_cond if pred has a constant value.
return static_cond(pred_value, fn1, fn2)
else:
# Use dynamic cond otherwise.
return control_flow_ops.cond(pred, fn1, fn2, name)
示例2: _merge_batch_beams
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import constant_value [as 别名]
def _merge_batch_beams(self, t, s=None):
"""Merges the tensor from a batch of beams into a batch by beams.
More exactly, t is a tensor of dimension [batch_size, beam_width, s]. We
reshape this into [batch_size*beam_width, s]
Args:
t: Tensor of dimension [batch_size, beam_width, s]
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size * beam_width, s].
"""
if isinstance(s, ops.Tensor):
s = tensor_shape.as_shape(tensor_util.constant_value(s))
else:
s = tensor_shape.TensorShape(s)
t_shape = tf.shape(t)
static_batch_size = tensor_util.constant_value(self._batch_size)
batch_size_beam_width = (
None if static_batch_size is None
else static_batch_size * self._beam_width)
reshaped_t = tf.reshape(
t, tf.concat(
([self._batch_size * self._beam_width], t_shape[2:]), 0))
reshaped_t.set_shape(
(tensor_shape.TensorShape([batch_size_beam_width]).concatenate(s)))
return reshaped_t
开发者ID:hirofumi0810,项目名称:tensorflow_end2end_speech_recognition,代码行数:27,代码来源:beam_search_decoder_from_tensorflow.py
示例3: _event_dims_tensor
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import constant_value [as 别名]
def _event_dims_tensor(self, sample):
"""Return a 1D `int32` tensor: `range(rank(sample))[-event_ndims:]`."""
if self.event_ndims is None:
raise ValueError("Jacobian cannot be computed with unknown event_ndims")
static_event_ndims = tensor_util.constant_value(self.event_ndims)
static_rank = sample.get_shape().ndims
if static_event_ndims is not None and static_rank is not None:
return ops.convert_to_tensor(
static_rank + np.arange(-static_event_ndims, 0).astype(np.int32))
if static_event_ndims is not None:
event_range = np.arange(-static_event_ndims, 0).astype(np.int32)
else:
event_range = math_ops.range(-self.event_ndims, 0, dtype=dtypes.int32)
if static_rank is not None:
return event_range + static_rank
else:
return event_range + array_ops.rank(sample)
示例4: constant_value
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import constant_value [as 别名]
def constant_value(pred):
"""Return the bool value for `pred`, or None if `pred` had a dynamic value.
Arguments:
pred: A scalar, either a Python bool or a TensorFlow boolean variable
or tensor.
Returns:
True or False if `pred` has a constant boolean value, None otherwise.
Raises:
TypeError is pred is not a Variable, Tensor or bool.
"""
if isinstance(pred, bool):
pred_value = pred
elif isinstance(pred, variables.Variable):
pred_value = None
elif isinstance(pred, ops.Tensor):
pred_value = tensor_util.constant_value(pred)
else:
raise TypeError('`pred` must be a Tensor, a Variable, or a Python bool.')
return pred_value
示例5: _length_penalty
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import constant_value [as 别名]
def _length_penalty(sequence_lengths, penalty_factor):
"""Calculates the length penalty. See https://arxiv.org/abs/1609.08144.
Args:
sequence_lengths: The sequence length of all hypotheses, a tensor
of shape [beam_size, vocab_size].
penalty_factor: A scalar that weights the length penalty.
Returns:
The length penalty factor, a tensor fo shape [beam_size].
"""
penalty_factor = ops.convert_to_tensor(penalty_factor, name="penalty_factor")
penalty_factor.set_shape(()) # penalty should be a scalar.
static_penalty = tensor_util.constant_value(penalty_factor)
if static_penalty is not None and static_penalty == 0:
return 1.0
return math_ops.div((5. + math_ops.to_float(sequence_lengths))
**penalty_factor, (5. + 1.)**penalty_factor)
示例6: prefer_static_broadcast_shape
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import constant_value [as 别名]
def prefer_static_broadcast_shape(
shape1, shape2, name="prefer_static_broadcast_shape"):
"""Convenience function which statically broadcasts shape when possible.
Args:
shape1: `1-D` integer `Tensor`. Already converted to tensor!
shape2: `1-D` integer `Tensor`. Already converted to tensor!
name: A string name to prepend to created ops.
Returns:
The broadcast shape, either as `TensorShape` (if broadcast can be done
statically), or as a `Tensor`.
"""
with ops.name_scope(name, values=[shape1, shape2]):
if (tensor_util.constant_value(shape1) is not None and
tensor_util.constant_value(shape2) is not None):
return array_ops.broadcast_static_shape(
tensor_shape.TensorShape(tensor_util.constant_value(shape1)),
tensor_shape.TensorShape(tensor_util.constant_value(shape2)))
return array_ops.broadcast_dynamic_shape(shape1, shape2)
示例7: _assert_non_negative_int32_scalar
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import constant_value [as 别名]
def _assert_non_negative_int32_scalar(self, x):
"""Helper which ensures that input is a non-negative, int32, scalar."""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype != dtypes.int32.base_dtype:
raise TypeError("%s.dtype=%s is not %s" % (x.name, x.dtype, dtypes.int32))
x_value_static = tensor_util.constant_value(x)
if x.get_shape().ndims is not None and x_value_static is not None:
if x.get_shape().ndims != 0:
raise ValueError("%s.ndims=%d is not 0 (scalar)" %
(x.name, x.get_shape().ndims))
if x_value_static < 0:
raise ValueError("%s.value=%d cannot be negative" %
(x.name, x_value_static))
return x
if self.validate_args:
x = control_flow_ops.with_dependencies([
check_ops.assert_rank(x, 0),
check_ops.assert_non_negative(x)], x)
return x
示例8: __init__
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import constant_value [as 别名]
def __init__(self,
event_ndims=0,
validate_args=False,
name="softmax_centered"):
self._graph_parents = []
self._name = name
with self._name_scope("init", values=[event_ndims]):
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
event_ndims = tensor_util.constant_value(event_ndims)
if event_ndims is None or event_ndims not in [0, 1]:
raise ValueError("`event_ndims` must be a TF constant which is 0 or 1")
self._static_event_ndims = event_ndims
super(SoftmaxCentered, self).__init__(
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
示例9: __init__
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import constant_value [as 别名]
def __init__(self,
event_ndims=0,
validate_args=False,
name="softmax_centered"):
self._graph_parents = []
self._name = name
with self._name_scope("init", values=[event_ndims]):
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
event_ndims = tensor_util.constant_value(event_ndims)
if event_ndims is None or event_ndims not in [0, 1]:
raise ValueError("`event_ndims` must be a TF constant which is 0 or 1")
self._static_event_ndims = event_ndims
super(SoftmaxCentered, self).__init__(
batch_ndims=0, # We'll regard all non-event dims as sample dims.
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
示例10: _FillShape
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import constant_value [as 别名]
def _FillShape(op):
"""Shape function for the Fill op.
This op takes a vector of dimensions and a scalar, and produces a
tensor with the given dimensions.
Args:
op: A Fill Operation.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the shapes or arguments are known to be invalid.
"""
op.inputs[0].get_shape().assert_has_rank(1)
op.inputs[1].get_shape().assert_has_rank(0)
fill_dims = tensor_util.constant_value(op.inputs[0])
if fill_dims is not None and any(d < 0 for d in fill_dims):
raise ValueError("Fill dimensions must be >= 0")
return [tensor_util.constant_value_as_shape(op.inputs[0])]
示例11: constant_value
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import constant_value [as 别名]
def constant_value(value_or_tensor_or_var, dtype=None):
"""Returns value if value_or_tensor_or_var has a constant value.
Args:
value_or_tensor_or_var: A value, a `Tensor` or a `Variable`.
dtype: Optional `tf.dtype`, if set it would check it has the right
dtype.
Returns:
The constant value or None if it not constant.
Raises:
ValueError: if value_or_tensor_or_var is None or the tensor_variable has the
wrong dtype.
"""
if value_or_tensor_or_var is None:
raise ValueError('value_or_tensor_or_var cannot be None')
value = value_or_tensor_or_var
if isinstance(value_or_tensor_or_var, (ops.Tensor, variables.Variable)):
if dtype and value_or_tensor_or_var.dtype != dtype:
raise ValueError('It has the wrong type %s instead of %s' % (
value_or_tensor_or_var.dtype, dtype))
if isinstance(value_or_tensor_or_var, variables.Variable):
value = None
else:
value = tensor_util.constant_value(value_or_tensor_or_var)
return value
示例12: _split_batch_beams
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import constant_value [as 别名]
def _split_batch_beams(self, t, s=None):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We
reshape this into [batch_size, beam_width, s]
Args:
t: Tensor of dimension [batch_size*beam_width, s].
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size, beam_width, s].
Raises:
ValueError: If, after reshaping, the new tensor is not shaped
`[batch_size, beam_width, s]` (assuming batch_size and beam_width
are known statically).
"""
if isinstance(s, ops.Tensor):
s = tensor_shape.TensorShape(tensor_util.constant_value(s))
else:
s = tensor_shape.TensorShape(s)
t_shape = tf.shape(t)
reshaped_t = tf.reshape(
t, tf.concat(
([self._batch_size, self._beam_width], t_shape[1:]), 0))
static_batch_size = tensor_util.constant_value(self._batch_size)
expected_reshaped_shape = tensor_shape.TensorShape(
[static_batch_size, self._beam_width]).concatenate(s)
if not reshaped_t.shape.is_compatible_with(expected_reshaped_shape):
raise ValueError("Unexpected behavior when reshaping between beam width "
"and batch size. The reshaped tensor has shape: %s. "
"We expected it to have shape "
"(batch_size, beam_width, depth) == %s. Perhaps you "
"forgot to create a zero_state with "
"batch_size=encoder_batch_size * beam_width?"
% (reshaped_t.shape, expected_reshaped_shape))
reshaped_t.set_shape(expected_reshaped_shape)
return reshaped_t
开发者ID:hirofumi0810,项目名称:tensorflow_end2end_speech_recognition,代码行数:37,代码来源:beam_search_decoder_from_tensorflow.py
示例13: _tensor_gather_helper
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import constant_value [as 别名]
def _tensor_gather_helper(gather_indices, gather_from, batch_size,
range_size, gather_shape):
"""Helper for gathering the right indices from the tensor.
This works by reshaping gather_from to gather_shape (e.g. [-1]) and then
gathering from that according to the gather_indices, which are offset by
the right amounts in order to preserve the batch order.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The input batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
"""
range_ = tf.expand_dims(tf.range(batch_size) * range_size, 1)
gather_indices = tf.reshape(gather_indices + range_, [-1])
output = tf.gather(
tf.reshape(gather_from, gather_shape), gather_indices)
final_shape = tf.shape(gather_from)[:1 + len(gather_shape)]
static_batch_size = tensor_util.constant_value(batch_size)
final_static_shape = (tensor_shape.TensorShape([static_batch_size])
.concatenate(
gather_from.shape[1:1 + len(gather_shape)]))
output = tf.reshape(output, final_shape)
output.set_shape(final_static_shape)
return output
开发者ID:hirofumi0810,项目名称:tensorflow_end2end_speech_recognition,代码行数:34,代码来源:beam_search_decoder_from_tensorflow.py
示例14: dequeue_many
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import constant_value [as 别名]
def dequeue_many(self, n, name=None):
"""Dequeues and concatenates `n` elements from this queue.
This operation concatenates queue-element component tensors along
the 0th dimension to make a single component tensor. All of the
components in the dequeued tuple will have size `n` in the 0th dimension.
If the queue is closed and there are less than `n` elements left, then an
`OutOfRange` exception is raised.
At runtime, this operation may raise an error if the queue is
@{tf.QueueBase.close} before or during its execution. If the
queue is closed, the queue contains fewer than `n` elements, and
there are no pending enqueue operations that can fulfill this
request, `tf.errors.OutOfRangeError` will be raised. If the
session is @{tf.Session.close},
`tf.errors.CancelledError` will be raised.
Args:
n: A scalar `Tensor` containing the number of elements to dequeue.
name: A name for the operation (optional).
Returns:
The tuple of concatenated tensors that was dequeued.
"""
if name is None:
name = "%s_DequeueMany" % self._name
ret = gen_data_flow_ops._queue_dequeue_many_v2(
self._queue_ref, n=n, component_types=self._dtypes, name=name)
# NOTE(mrry): Not using a shape function because we need access to
# the Queue object.
op = ret[0].op
batch_dim = tensor_shape.Dimension(tensor_util.constant_value(op.inputs[1]))
for output, shape in zip(op.values(), self._shapes):
output.set_shape(tensor_shape.TensorShape([batch_dim]).concatenate(shape))
return self._dequeue_return_value(ret)
示例15: _IndexedSlicesToTensor
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import constant_value [as 别名]
def _IndexedSlicesToTensor(value, dtype=None, name=None, as_ref=False):
"""Converts an IndexedSlices object `value` to a Tensor.
NOTE(mrry): This function is potentially expensive.
Args:
value: An ops.IndexedSlices object.
dtype: The dtype of the Tensor to be returned.
name: Optional name to use for the returned Tensor.
as_ref: True if a ref is requested.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
_ = as_ref
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for IndexedSlices with dtype %s" %
(dtype.name, value.dtype.name))
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
# TODO(mrry): Consider adding static shape information to
# IndexedSlices, to avoid using numpy here.
dense_shape_value = tensor_util.constant_value(value.dense_shape)
if dense_shape_value is not None:
num_elements = np.prod(dense_shape_value)
if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor with %d elements. "
"This may consume a large amount of memory." % num_elements)
else:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
"This may consume a large amount of memory.")
return math_ops.unsorted_segment_sum(
value.values, value.indices, value.dense_shape[0], name=name)