本文整理匯總了Python中tensorflow.python.framework.dtypes.int32方法的典型用法代碼示例。如果您正苦於以下問題:Python dtypes.int32方法的具體用法?Python dtypes.int32怎麽用?Python dtypes.int32使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.framework.dtypes
的用法示例。
在下文中一共展示了dtypes.int32方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import int32 [as 別名]
def __init__(self, initialize_fn, sample_fn, next_inputs_fn,
sample_ids_shape=None, sample_ids_dtype=None):
"""Initializer.
Args:
initialize_fn: callable that returns `(finished, next_inputs)`
for the first iteration.
sample_fn: callable that takes `(time, outputs, state)`
and emits tensor `sample_ids`.
next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`
and emits `(finished, next_inputs, next_state)`.
sample_ids_shape: Either a list of integers, or a 1-D Tensor of type
`int32`, the shape of each value in the `sample_ids` batch. Defaults to
a scalar.
sample_ids_dtype: The dtype of the `sample_ids` tensor. Defaults to int32.
"""
self._initialize_fn = initialize_fn
self._sample_fn = sample_fn
self._next_inputs_fn = next_inputs_fn
self._batch_size = None
self._sample_ids_shape = tensor_shape.TensorShape(sample_ids_shape or [])
self._sample_ids_dtype = sample_ids_dtype or dtypes.int32
示例2: finalize
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import int32 [as 別名]
def finalize(self, outputs, final_state, sequence_lengths):
"""Finalize and return the predicted_ids.
Args:
final_state: An instance of BeamSearchDecoderState. Passed through to the
output.
sequence_lengths: An `int32` tensor shaped `[batch_size, beam_width]`.
The sequence lengths determined for each beam during decode.
Returns:
outputs: An instance of FinalBeamSearchDecoderOutput where the
predicted_ids are the result of calling _gather_tree.
final_state: The same input instance of BeamSearchDecoderState.
"""
predicted_ids = gather_tree(
outputs.predicted_ids, outputs.parent_ids,
sequence_length=sequence_lengths)
outputs = FinalBeamSearchDecoderOutput(
beam_search_decoder_output=outputs, predicted_ids=predicted_ids)
return outputs, final_state
開發者ID:hirofumi0810,項目名稱:tensorflow_end2end_speech_recognition,代碼行數:21,代碼來源:beam_search_decoder_from_tensorflow.py
示例3: _build_for_quantization
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import int32 [as 別名]
def _build_for_quantization(self):
"""All Keras build() logic for quantization for fused layers."""
if not self.is_quantized:
return
self._weight_quantizer_vars = self.weight_quantizer.build(
self.weights[0].shape, 'weight', self)
self.optimizer_step = self.add_weight(
'optimizer_step',
initializer=initializers.Constant(-1),
dtype=dtypes.int32,
trainable=False)
# TODO(alanchiao): re-explore if we can handle this with
# QuantizeAwareActivation.
self._activation_min_var = self.add_variable( # pylint: disable=protected-access
'activation_min',
initializer=initializers.Constant(-6.0),
trainable=False)
self._activation_max_var = self.add_variable( # pylint: disable=protected-access
'activation_max',
initializer=initializers.Constant(6.0),
trainable=False)
示例4: set_size
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import int32 [as 別名]
def set_size(a, validate_indices=True):
"""Compute number of unique elements along last dimension of `a`.
Args:
a: `SparseTensor`, with indices sorted in row-major order.
validate_indices: Whether to validate the order and range of sparse indices
in `a`.
Returns:
`int32` `Tensor` of set sizes. For `a` ranked `n`, this is a `Tensor` with
rank `n-1`, and the same 1st `n-1` dimensions as `a`. Each value is the
number of unique elements in the corresponding `[0...n-1]` dimension of `a`.
Raises:
TypeError: If `a` is an invalid types.
"""
a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name="a")
if not isinstance(a, sparse_tensor.SparseTensor):
raise TypeError("Expected `SparseTensor`, got %s." % a)
if a.values.dtype.base_dtype not in _VALID_DTYPES:
raise TypeError("Invalid dtype %s." % a.values.dtype)
# pylint: disable=protected-access
return gen_set_ops.set_size(
a.indices, a.values, a.dense_shape, validate_indices)
示例5: shape
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import int32 [as 別名]
def shape(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
```python
# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
shape(t) ==> [2, 2, 3]
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`.
"""
return shape_internal(input, name, optimize=True, out_type=out_type)
示例6: shape_internal
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import int32 [as 別名]
def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the shape as a constant when possible.
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Shape", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_math_ops.cast(input.dense_shape, out_type)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.is_fully_defined():
return constant(input_shape.as_list(), out_type, name=name)
return gen_array_ops.shape(input, name=name, out_type=out_type)
示例7: size
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import int32 [as 別名]
def size(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the size of a tensor.
This operation returns an integer representing the number of elements in
`input`.
For example:
```python
# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
size(t) ==> 12
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`. Defaults to tf.int32.
"""
return size_internal(input, name, optimize=True, out_type=out_type)
示例8: size_internal
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import int32 [as 別名]
def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin,protected-access
"""Returns the size of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the size as a constant when possible.
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Size", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_math_ops._prod(
gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.is_fully_defined():
return constant(input_shape.num_elements(), out_type, name=name)
return gen_array_ops.size(input, name=name, out_type=out_type)
示例9: _DynamicStitchGrads
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import int32 [as 別名]
def _DynamicStitchGrads(op, grad):
"""Gradients for DynamicStitch."""
num_values = len(op.inputs) // 2
indices_grad = [None] * num_values
def AsInt32(x):
return (x if op.inputs[0].dtype == dtypes.int32 else
math_ops.cast(x, dtypes.int32))
inputs = [AsInt32(op.inputs[i]) for i in xrange(num_values)]
if isinstance(grad, ops.IndexedSlices):
output_shape = array_ops.shape(op.outputs[0])
output_rows = output_shape[0]
grad = math_ops.unsorted_segment_sum(grad.values, grad.indices, output_rows)
values_grad = [array_ops.gather(grad, inp) for inp in inputs]
return indices_grad + values_grad
示例10: bias_add_v1
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import int32 [as 別名]
def bias_add_v1(value, bias, name=None):
"""Adds `bias` to `value`.
This is a deprecated version of bias_add and will soon to be removed.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "BiasAddV1", [value, bias]) as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops._bias_add_v1(value, bias, name=name)
示例11: crelu
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import int32 [as 別名]
def crelu(features, name=None):
"""Computes Concatenated ReLU.
Concatenates a ReLU which selects only the positive part of the activation
with a ReLU which selects only the *negative* part of the activation.
Note that as a result this non-linearity doubles the depth of the activations.
Source: [Understanding and Improving Convolutional Neural Networks via Concatenated Rectified Linear Units. W. Shang, et al.](https://arxiv.org/abs/1603.05201)
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `features`.
"""
with ops.name_scope(name, "CRelu", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
c = array_ops.concat([features, -features], -1, name=name)
return gen_nn_ops.relu(c)
示例12: _neg
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import int32 [as 別名]
def _neg(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
return negative(x, name)
# pylint: enable=g-docstring-has-escape
示例13: square
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import int32 [as 別名]
def square(x, name=None):
r"""Computes square of x element-wise.
I.e., \\(y = x * x = x^2\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Square", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_square = gen_math_ops.square(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_square, dense_shape=x.dense_shape)
else:
return gen_math_ops.square(x, name=name)
示例14: sigmoid
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import int32 [as 別名]
def sigmoid(x, name=None):
"""Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A Tensor with type `float32`, `float64`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32`
otherwise the return type is `quint8`.
@compatibility(numpy)
Equivalent to np.scipy.special.expit
@end_compatibility
"""
with ops.name_scope(name, "Sigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._sigmoid(x, name=name)
示例15: tanh
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import int32 [as 別名]
def tanh(x, name=None):
"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: A Tensor or SparseTensor with type `float`, `double`, `int32`,
`complex64`, `int64`, or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor or SparseTensor respectively with the same type as `x` if
`x.dtype != qint32` otherwise the return type is `quint8`.
"""
with ops.name_scope(name, "Tanh", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_tanh = gen_math_ops._tanh(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_tanh, dense_shape=x.dense_shape)
else:
return gen_math_ops._tanh(x, name=name)