本文整理汇总了Python中tensorflow.python.framework.constant_op.constant方法的典型用法代码示例。如果您正苦于以下问题:Python constant_op.constant方法的具体用法?Python constant_op.constant怎么用?Python constant_op.constant使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.framework.constant_op
的用法示例。
在下文中一共展示了constant_op.constant方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: log_likelihood
# 需要导入模块: from tensorflow.python.framework import constant_op [as 别名]
# 或者: from tensorflow.python.framework.constant_op import constant [as 别名]
def log_likelihood(mu, var, x, muq, varq, a, mask_flat, config):
if config.out_distr == 'bernoulli':
log_lik = log_bernoulli(x, mu, eps=1e-6) # (bs*L, d1*d2)
elif config.out_distr == 'gaussian':
log_lik = log_gaussian(x, mu, var)
log_lik = tf.reduce_sum(log_lik, 1) # (bs*L, )
log_lik = tf.multiply(mask_flat, log_lik)
# TODO: dropout scales the output as input/keep_prob. Issue?
if config.ll_keep_prob < 1.0:
log_lik = tf.layers.dropout(log_lik, config.ll_keep_prob)
# We compute the log-likelihood *per frame*
num_el = tf.reduce_sum(mask_flat)
log_px_given_a = tf.truediv(tf.reduce_sum(log_lik), num_el) # ()
if config.use_vae:
log_qa_given_x = tf.reduce_sum(log_gaussian(a, muq, varq), 1) # (bs*L, )
log_qa_given_x = tf.multiply(mask_flat, log_qa_given_x)
log_qa_given_x = tf.truediv(tf.reduce_sum(log_qa_given_x), num_el) # ()
else:
log_qa_given_x = tf.constant(0.0, dtype=tf.float32, shape=())
LL = log_px_given_a - log_qa_given_x
return LL, log_px_given_a, log_qa_given_x
示例2: __call__
# 需要导入模块: from tensorflow.python.framework import constant_op [as 别名]
# 或者: from tensorflow.python.framework.constant_op import constant [as 别名]
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
if len(shape) == 1:
return constant_op.constant(0., dtype=dtype, shape=shape)
elif len(shape) == 2 and shape[0] == shape[1]:
return constant_op.constant(np.identity(shape[0], dtype))
elif len(shape) == 4 and shape[2] == shape[3]:
array = np.zeros(shape, dtype=float)
cx, cy = shape[0]/2, shape[1]/2
for i in range(shape[2]):
array[cx, cy, i, i] = 1
return constant_op.constant(array, dtype=dtype)
else:
constant_op.constant(0., dtype=dtype, shape=shape)
示例3: dense_to_sparse
# 需要导入模块: from tensorflow.python.framework import constant_op [as 别名]
# 或者: from tensorflow.python.framework.constant_op import constant [as 别名]
def dense_to_sparse(tensor, eos_token=0, outputs_collections=None, scope=None):
"""Converts a dense tensor into a sparse tensor.
An example use would be to convert dense labels to sparse ones
so that they can be fed to the ctc_loss.
Args:
tensor: An `int` `Tensor` to be converted to a `Sparse`.
eos_token: An integer. It is part of the target label that signifies the
end of a sentence.
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
"""
with variable_scope.variable_scope(scope, 'dense_to_sparse', [tensor]) as sc:
tensor = ops.convert_to_tensor(tensor)
indices = array_ops.where(
math_ops.not_equal(tensor, constant_op.constant(eos_token,
tensor.dtype)))
values = array_ops.gather_nd(tensor, indices)
shape = array_ops.shape(tensor, out_type=dtypes.int64)
outputs = sparse_tensor.SparseTensor(indices, values, shape)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
示例4: testDebugWhileLoopWatchingWholeGraphWorks
# 需要导入模块: from tensorflow.python.framework import constant_op [as 别名]
# 或者: from tensorflow.python.framework.constant_op import constant [as 别名]
def testDebugWhileLoopWatchingWholeGraphWorks(self):
with session.Session() as sess:
loop_body = lambda i: math_ops.add(i, 2)
loop_cond = lambda i: math_ops.less(i, 16)
i = constant_op.constant(10, name="i")
loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(run_options,
sess.graph,
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
self.assertEqual(
16, sess.run(loop, options=run_options, run_metadata=run_metadata))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(
[[10]], dump.get_tensors("while/Enter", 0, "DebugIdentity"))
self.assertEqual(
[[12], [14], [16]],
dump.get_tensors("while/NextIteration", 0, "DebugIdentity"))
示例5: shape_internal
# 需要导入模块: from tensorflow.python.framework import constant_op [as 别名]
# 或者: from tensorflow.python.framework.constant_op import constant [as 别名]
def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the shape as a constant when possible.
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Shape", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_math_ops.cast(input.dense_shape, out_type)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.is_fully_defined():
return constant(input_shape.as_list(), out_type, name=name)
return gen_array_ops.shape(input, name=name, out_type=out_type)
示例6: rank_internal
# 需要导入模块: from tensorflow.python.framework import constant_op [as 别名]
# 或者: from tensorflow.python.framework.constant_op import constant [as 别名]
def rank_internal(input, name=None, optimize=True):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the rank as a constant when possible.
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, "Rank", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_array_ops.size(input.dense_shape, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.ndims is not None:
return constant(input_shape.ndims, dtypes.int32, name=name)
return gen_array_ops.rank(input, name=name)
示例7: _kl_normal_normal
# 需要导入模块: from tensorflow.python.framework import constant_op [as 别名]
# 或者: from tensorflow.python.framework.constant_op import constant [as 别名]
def _kl_normal_normal(n_a, n_b, name=None):
"""Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.
Args:
n_a: instance of a Normal distribution object.
n_b: instance of a Normal distribution object.
name: (optional) Name to use for created operations.
default is "kl_normal_normal".
Returns:
Batchwise KL(n_a || n_b)
"""
with ops.name_scope(name, "kl_normal_normal", [n_a.loc, n_b.loc]):
one = constant_op.constant(1, dtype=n_a.dtype)
two = constant_op.constant(2, dtype=n_a.dtype)
half = constant_op.constant(0.5, dtype=n_a.dtype)
s_a_squared = math_ops.square(n_a.scale)
s_b_squared = math_ops.square(n_b.scale)
ratio = s_a_squared / s_b_squared
return (math_ops.square(n_a.loc - n_b.loc) / (two * s_b_squared) +
half * (ratio - one - math_ops.log(ratio)))
示例8: _reduce_join_reduction_dims
# 需要导入模块: from tensorflow.python.framework import constant_op [as 别名]
# 或者: from tensorflow.python.framework.constant_op import constant [as 别名]
def _reduce_join_reduction_dims(x, axis, reduction_indices):
"""Returns range(rank(x) - 1, 0, -1) if reduction_indices is None."""
# TODO(aselle): Remove this after deprecation
if reduction_indices is not None:
if axis is not None:
raise ValueError("Can't specify both 'axis' and 'reduction_indices'.")
axis = reduction_indices
if axis is not None:
return axis
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims - 1, -1, -1), dtype=dtypes.int32)
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return math_ops.range(array_ops.rank(x) - 1, -1, -1)
示例9: multiply_gradients
# 需要导入模块: from tensorflow.python.framework import constant_op [as 别名]
# 或者: from tensorflow.python.framework.constant_op import constant [as 别名]
def multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
gradient_multipliers: A map from either `Variables` or `Variable` op names
to the coefficient by which the associated gradient should be scaled.
Returns:
The updated list of gradient to variable pairs.
Raises:
ValueError: If `grads_and_vars` is not a list or if `gradient_multipliers`
is empty or None or if `gradient_multipliers` is not a dictionary.
"""
if not isinstance(grads_and_vars, list):
raise ValueError('`grads_and_vars` must be a list.')
if not gradient_multipliers:
raise ValueError('`gradient_multipliers` is empty.')
if not isinstance(gradient_multipliers, dict):
raise ValueError('`gradient_multipliers` must be a dict.')
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if var in gradient_multipliers or var.op.name in gradient_multipliers:
key = var if var in gradient_multipliers else var.op.name
if grad is None:
raise ValueError('Requested multiple of `None` gradient.')
multiplier = gradient_multipliers[key]
if not isinstance(multiplier, ops.Tensor):
multiplier = constant_op.constant(multiplier, dtype=grad.dtype)
if isinstance(grad, ops.IndexedSlices):
tmp = grad.values * multiplier
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad *= multiplier
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
示例10: apply_regularization
# 需要导入模块: from tensorflow.python.framework import constant_op [as 别名]
# 或者: from tensorflow.python.framework.constant_op import constant [as 别名]
def apply_regularization(regularizer, weights_list=None):
"""Returns the summed penalty by applying `regularizer` to the `weights_list`.
Adding a regularization penalty over the layer weights and embedding weights
can help prevent overfitting the training data. Regularization over layer
biases is less common/useful, but assuming proper data preprocessing/mean
subtraction, it usually shouldn't hurt much either.
Args:
regularizer: A function that takes a single `Tensor` argument and returns
a scalar `Tensor` output.
weights_list: List of weights `Tensors` or `Variables` to apply
`regularizer` over. Defaults to the `GraphKeys.WEIGHTS` collection if
`None`.
Returns:
A scalar representing the overall regularization penalty.
Raises:
ValueError: If `regularizer` does not return a scalar output, or if we find
no weights.
"""
if not weights_list:
weights_list = ops.get_collection(ops.GraphKeys.WEIGHTS)
if not weights_list:
raise ValueError('No weights to regularize.')
with ops.name_scope('get_regularization_penalty',
values=weights_list) as scope:
penalties = [regularizer(w) for w in weights_list]
penalties = [
p if p is not None else constant_op.constant(0.0) for p in penalties
]
for p in penalties:
if p.get_shape().ndims != 0:
raise ValueError('regularizer must return a scalar Tensor instead of a '
'Tensor with rank %d.' % p.get_shape().ndims)
summed_penalty = math_ops.add_n(penalties, name=scope)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, summed_penalty)
return summed_penalty
示例11: _create_zero_outputs
# 需要导入模块: from tensorflow.python.framework import constant_op [as 别名]
# 或者: from tensorflow.python.framework.constant_op import constant [as 别名]
def _create_zero_outputs(size, dtype, batch_size):
"""Create a zero outputs Tensor structure."""
def _t(s):
return (s if isinstance(s, ops.Tensor) else constant_op.constant(
tensor_shape.TensorShape(s).as_list(),
dtype=dtypes.int32,
name="zero_suffix_shape"))
def _create(s, d):
return array_ops.zeros(
array_ops.concat(
([batch_size], _t(s)), axis=0), dtype=d)
return nest.map_structure(_create, size, dtype)
示例12: _session_run_for_graph_structure_lookup
# 需要导入模块: from tensorflow.python.framework import constant_op [as 别名]
# 或者: from tensorflow.python.framework.constant_op import constant [as 别名]
def _session_run_for_graph_structure_lookup(self):
with session.Session() as sess:
u_name = "testDumpGraphStructureLookup/u"
v_name = "testDumpGraphStructureLookup/v"
w_name = "testDumpGraphStructureLookup/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
return u_name, v_name, w_name, dump
示例13: testOutputSlotWithoutOutgoingEdgeCanBeWatched
# 需要导入模块: from tensorflow.python.framework import constant_op [as 别名]
# 或者: from tensorflow.python.framework.constant_op import constant [as 别名]
def testOutputSlotWithoutOutgoingEdgeCanBeWatched(self):
"""Test watching output slots not attached to any outgoing edges."""
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
u = constant_op.constant(u_init_val, shape=[2, 2], name="u")
# Create a control edge from a node with an output: From u to z.
# Node u will get executed only because of the control edge. The output
# tensor u:0 is not attached to any outgoing edge in the graph. This test
# checks that the debugger can watch such a tensor.
with ops.control_dependencies([u]):
z = control_flow_ops.no_op(name="z")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(z, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Assert that the DebugIdentity watch on u works properly.
self.assertEqual(1, len(dump.dumped_tensor_data))
datum = dump.dumped_tensor_data[0]
self.assertEqual("u", datum.node_name)
self.assertEqual(0, datum.output_slot)
self.assertEqual("DebugIdentity", datum.debug_op)
self.assertAllClose([[5.0, 3.0], [-1.0, 0.0]], datum.get_tensor())
示例14: zeros
# 需要导入模块: from tensorflow.python.framework import constant_op [as 别名]
# 或者: from tensorflow.python.framework.constant_op import constant [as 别名]
def zeros(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to zero.
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to zero.
For example:
```python
tf.zeros([3, 4], tf.int32) ==> [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
```
Args:
shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "zeros", [shape]) as name:
if dtype == dtypes.bool:
zero = False
elif dtype == dtypes.string:
zero = ""
else:
zero = 0
try:
shape = tensor_shape.as_shape(shape)
output = constant(zero, shape=shape, dtype=dtype, name=name)
except (TypeError, ValueError):
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name="shape")
output = fill(shape, constant(zero, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
示例15: zeros_like
# 需要导入模块: from tensorflow.python.framework import constant_op [as 别名]
# 或者: from tensorflow.python.framework.constant_op import constant [as 别名]
def zeros_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
# 'tensor' is [[1, 2, 3], [4, 5, 6]]
tf.zeros_like(tensor) ==> [[0, 0, 0], [0, 0, 0]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `int16`, `int32`, `int64`, `uint8`, `complex64`, or `complex128`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor'
and encode it as a constant.
Returns:
A `Tensor` with all elements set to zero.
"""
with ops.name_scope(name, "zeros_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
if tensor.shape.is_fully_defined():
# We can produce a zeros tensor independent of the value of 'tensor',
# since the shape is known statically.
return zeros(tensor.shape, dtype=dtype or tensor.dtype, name=name)
if dtype is not None and dtype != tensor.dtype:
return zeros(shape_internal(tensor, optimize=optimize), dtype=dtype,
name=name)
else:
return gen_array_ops._zeros_like(tensor, name=name)