本文整理汇总了Python中tensorflow.python.ops.nn_ops.relu方法的典型用法代码示例。如果您正苦于以下问题:Python nn_ops.relu方法的具体用法?Python nn_ops.relu怎么用?Python nn_ops.relu使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.nn_ops
的用法示例。
在下文中一共展示了nn_ops.relu方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: vgg_arg_scope
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import relu [as 别名]
def vgg_arg_scope(weight_decay=0.0005):
"""Defines the VGG arg scope.
Args:
weight_decay: The l2 regularization coefficient.
Returns:
An arg_scope.
"""
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
activation_fn=nn_ops.relu,
weights_regularizer=regularizers.l2_regularizer(weight_decay),
biases_initializer=init_ops.zeros_initializer()):
with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
return arg_sc
示例2: relu_layer
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import relu [as 别名]
def relu_layer(x, weights, biases, name=None):
"""Computes Relu(x * weight + biases).
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"nn_relu_layer" is used.
Returns:
A 2-D Tensor computing relu(matmul(x, weights) + biases).
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "relu_layer", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases)
return nn_ops.relu(xw_plus_b, name=name)
示例3: hinge_loss
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import relu [as 别名]
def hinge_loss(logits, labels=None, scope=None):
"""Method that returns the loss tensor for hinge loss.
Args:
logits: The logits, a float tensor.
labels: The ground truth output tensor. Its shape should match the shape of
logits. The values of the tensor are expected to be 0.0 or 1.0.
scope: The scope for the operations performed in computing the loss.
Returns:
A `Tensor` of same shape as `logits` and `labels` representing the loss
values across the batch.
Raises:
ValueError: If the shapes of `logits` and `labels` don't match.
"""
with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
labels = math_ops.to_float(labels)
all_ones = array_ops.ones_like(labels)
labels = math_ops.subtract(2 * labels, all_ones)
return nn_ops.relu(
math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
示例4: test_unary_ops
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import relu [as 别名]
def test_unary_ops(self):
ops = [
('relu', nn_ops.relu, nn.relu),
('relu6', nn_ops.relu6, nn.relu6),
('crelu', nn_ops.crelu, nn.crelu),
('elu', nn_ops.elu, nn.elu),
('softplus', nn_ops.softplus, nn.softplus),
('l2_loss', nn_ops.l2_loss, nn.l2_loss),
('softmax', nn_ops.softmax, nn.softmax),
('log_softmax', nn_ops.log_softmax, nn.log_softmax),
]
for op_name, tf_op, lt_op in ops:
golden_tensor = tf_op(self.original_lt.tensor)
golden_lt = core.LabeledTensor(golden_tensor, self.axes)
actual_lt = lt_op(self.original_lt)
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(golden_lt, actual_lt)
示例5: _fully_connected_basic_use
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import relu [as 别名]
def _fully_connected_basic_use(self, x, num_output_units, expected_shape):
output = _layers.legacy_fully_connected(
x, num_output_units, activation_fn=nn_ops.relu)
with session.Session() as sess:
with self.assertRaises(errors_impl.FailedPreconditionError):
sess.run(output)
variables_lib.global_variables_initializer().run()
out_value, shape_value = sess.run([output, array_ops.shape(output)])
self.assertAllClose(shape_value, expected_shape)
self.assertEqual(output.get_shape().as_list(), expected_shape)
self.assertTrue(np.all(out_value >= 0), 'Relu should have all values >= 0.')
self.assertEqual(2,
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)))
self.assertEqual(
0, len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)))
示例6: vgg_arg_scope
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import relu [as 别名]
def vgg_arg_scope(weight_decay=0.0005):
"""Defines the VGG arg scope.
Args:
weight_decay: The l2 regularization coefficient.
Returns:
An arg_scope.
"""
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
activation_fn=nn_ops.relu,
weights_regularizer=regularizers.l2_regularizer(weight_decay),
biases_initializer=init_ops.zeros_initializer()
):
with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
return arg_sc
示例7: hinge_loss
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import relu [as 别名]
def hinge_loss(logits, labels=None, scope=None, target=None):
"""Method that returns the loss tensor for hinge loss.
Args:
logits: The logits, a float tensor.
labels: The ground truth output tensor. Its shape should match the shape of
logits. The values of the tensor are expected to be 0.0 or 1.0.
scope: The scope for the operations performed in computing the loss.
target: Deprecated alias for `labels`.
Returns:
A `Tensor` of same shape as logits and target representing the loss values
across the batch.
Raises:
ValueError: If the shapes of `logits` and `labels` don't match.
"""
labels = _labels(labels, target)
with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
labels = math_ops.to_float(labels)
all_ones = array_ops.ones_like(labels)
labels = math_ops.sub(2 * labels, all_ones)
return nn_ops.relu(math_ops.sub(all_ones, math_ops.mul(labels, logits)))
示例8: network_arg_scope
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import relu [as 别名]
def network_arg_scope(is_training=True,
weight_decay=cfg.train.weight_decay,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
'is_training': is_training, 'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon, 'scale': batch_norm_scale,
'updates_collections': ops.GraphKeys.UPDATE_OPS,
#'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
'trainable': cfg.train.bn_training,
}
with slim.arg_scope(
[slim.conv2d, slim.separable_convolution2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
trainable=is_training,
activation_fn=tf.nn.relu6,
#activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params,
padding='SAME'):
with slim.arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
return arg_sc
示例9: __init__
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import relu [as 别名]
def __init__(self, num_units, forget_bias=1.0, reuse_norm=False,
input_size=None, activation=nn_ops.relu,
layer_norm=True, norm_gain=1.0, norm_shift=0.0,
loop_steps=1, decay_rate=0.9, learning_rate=0.5,
dropout_keep_prob=1.0, dropout_prob_seed=None):
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
self._forget_bias = forget_bias
self._reuse_norm = reuse_norm
self._keep_prob = dropout_keep_prob
self._seed = dropout_prob_seed
self._layer_norm = layer_norm
self._S = loop_steps
self._eta = learning_rate
self._lambda = decay_rate
self._g = norm_gain
self._b = norm_shift
示例10: inception_v2_arg_scope
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import relu [as 别名]
def inception_v2_arg_scope(weight_decay=0.00004,
batch_norm_var_collection='moving_vars'):
"""Defines the default InceptionV2 arg scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.9997,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# collection containing update_ops.
'updates_collections': ops.GraphKeys.UPDATE_OPS,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
# Set weight_decay for weights in Conv and FC layers.
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
weights_regularizer=regularizers.l2_regularizer(weight_decay)):
with arg_scope(
[layers.conv2d],
weights_initializer=initializers.variance_scaling_initializer(),
activation_fn=nn_ops.relu,
normalizer_fn=layers_lib.batch_norm,
normalizer_params=batch_norm_params) as sc:
return sc
开发者ID:MingtaoGuo,项目名称:Chinese-Character-and-Calligraphic-Image-Processing,代码行数:41,代码来源:inception_v2.py
示例11: alexnet_v2_arg_scope
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import relu [as 别名]
def alexnet_v2_arg_scope(weight_decay=0.0005):
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
activation_fn=nn_ops.relu,
biases_initializer=init_ops.constant_initializer(0.1),
weights_regularizer=regularizers.l2_regularizer(weight_decay)):
with arg_scope([layers.conv2d], padding='SAME'):
with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
开发者ID:MingtaoGuo,项目名称:Chinese-Character-and-Calligraphic-Image-Processing,代码行数:11,代码来源:alexnet_v2.py
示例12: hinge_loss
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import relu [as 别名]
def hinge_loss(labels, logits, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a hinge loss to the training procedure.
Args:
labels: The ground truth output tensor. Its shape should match the shape of
logits. The values of the tensor are expected to be 0.0 or 1.0.
logits: The logits, a float tensor.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shapes of `logits` and `labels` don't match.
"""
with ops.name_scope(scope, "hinge_loss", (logits, labels)) as scope:
logits = math_ops.to_float(logits)
labels = math_ops.to_float(labels)
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
all_ones = array_ops.ones_like(labels)
labels = math_ops.subtract(2 * labels, all_ones)
losses = nn_ops.relu(
math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
示例13: zero_fraction
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import relu [as 别名]
def zero_fraction(value, name=None):
"""Returns the fraction of zeros in `value`.
If `value` is empty, the result is `nan`.
This is useful in summaries to measure and report sparsity. For example,
```python
z = tf.nn.relu(...)
summ = tf.summary.scalar('sparsity', tf.nn.zero_fraction(z))
```
Args:
value: A tensor of numeric type.
name: A name for the operation (optional).
Returns:
The fraction of zeros in `value`, with type `float32`.
"""
with ops.name_scope(name, "zero_fraction", [value]):
value = ops.convert_to_tensor(value, name="value")
zero = constant_op.constant(0, dtype=value.dtype, name="zero")
return math_ops.reduce_mean(
math_ops.cast(math_ops.equal(value, zero), dtypes.float32))
# pylint: disable=redefined-builtin
示例14: __init__
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import relu [as 别名]
def __init__(self, num_units, num_in_proj=None,
initializer=None, forget_bias=1.0,
y_activation=nn_ops.relu, reuse=None):
"""Initialize the parameters for an +RNN cell.
Args:
num_units: int, The number of units in the +RNN cell
num_in_proj: (optional) int, The input dimensionality for the RNN.
If creating the first layer of an +RNN, this should be set to
`num_units`. Otherwise, this should be set to `None` (default).
If `None`, dimensionality of `inputs` should be equal to `num_units`,
otherwise ValueError is thrown.
initializer: (optional) The initializer to use for the weight matrices.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
of the training.
y_activation: (optional) Activation function of the states passed
through depth. Default is 'tf.nn.relu`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(IntersectionRNNCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._initializer = initializer
self._forget_bias = forget_bias
self._num_input_proj = num_in_proj
self._y_activation = y_activation
self._reuse = reuse
示例15: overfeat_arg_scope
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import relu [as 别名]
def overfeat_arg_scope(weight_decay=0.0005):
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
activation_fn=nn_ops.relu,
weights_regularizer=regularizers.l2_regularizer(weight_decay),
biases_initializer=init_ops.zeros_initializer()):
with arg_scope([layers.conv2d], padding='SAME'):
with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
return arg_sc