本文整理汇总了Python中tensorflow.python.framework.ops.op_scope方法的典型用法代码示例。如果您正苦于以下问题:Python ops.op_scope方法的具体用法?Python ops.op_scope怎么用?Python ops.op_scope使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.framework.ops
的用法示例。
在下文中一共展示了ops.op_scope方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: binary_cross_entropy
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import op_scope [as 别名]
def binary_cross_entropy(preds, targets, name=None):
"""Computes binary cross entropy given `preds`.
For brevity, let `x = `, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
preds: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `preds`.
"""
eps = 1e-12
with ops.op_scope([preds, targets], name, "bce_loss") as name:
preds = ops.convert_to_tensor(preds, name="preds")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(targets * tf.log(preds + eps) +
(1. - targets) * tf.log(1. - preds + eps)))
示例2: binary_cross_entropy
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import op_scope [as 别名]
def binary_cross_entropy(preds, targets, name=None):
"""Computes binary cross entropy given `preds`.
For brevity, let `x = `, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
preds: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `preds`.
"""
eps = 1e-12
with ops.op_scope([preds, targets], name, "bce_loss") as name:
preds = ops.convert_to_tensor(preds, name="preds")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(targets * tf.log(preds + eps) +
(1. - targets) * tf.log(1. - preds + eps)))
示例3: binary_cross_entropy
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import op_scope [as 别名]
def binary_cross_entropy(preds, targets, name=None):
"""Computes binary cross entropy given `preds`.
For brevity, let `x = `, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
preds: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `preds`.
"""
eps = 1e-12
with ops.op_scope([preds, targets], name, "bce_loss") as name:
preds = ops.convert_to_tensor(preds, name="preds")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(targets * tf.log(preds + eps) +
(1. - targets) * tf.log(1. - preds + eps)))
示例4: binary_cross_entropy_with_logits
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import op_scope [as 别名]
def binary_cross_entropy_with_logits(logits, targets, name=None):
"""Computes binary cross entropy given `logits`.
For brevity, let `x = logits`, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
logits: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `logits`.
"""
eps = 1e-12
with ops.op_scope([logits, targets], name, "bce_loss") as name:
logits = ops.convert_to_tensor(logits, name="logits")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(logits * tf.log(targets + eps) +
(1. - logits) * tf.log(1. - targets + eps)))
示例5: binary_cross_entropy
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import op_scope [as 别名]
def binary_cross_entropy(preds, targets, name=None):
"""Computes binary cross entropy given `preds`.
For brevity, let `x = `, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
preds: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `preds`.
"""
eps = 1e-12
with ops.op_scope([preds, targets], name, "bce_loss") as name:
preds = ops.convert_to_tensor(preds, name="preds")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(targets * tf.log(preds + eps) +
(1. - targets) * tf.log(1. - preds + eps)))
示例6: binary_cross_entropy_with_logits
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import op_scope [as 别名]
def binary_cross_entropy_with_logits(logits, targets, name=None):
"""Computes binary cross entropy given `logits`.
For brevity, let `x = logits`, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
logits: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `logits`.
"""
eps = 1e-12
with ops.op_scope([logits, targets], name, "bce_loss") as name:
logits = ops.convert_to_tensor(logits, name="logits")
targets = ops.convert_to_tensor(targets, name="targets")
loss_batch = -(
logits * tf.log(targets + eps) + (1. - logits) * tf.log(1. - targets + eps)
)
loss_mean = tf.reduce_mean(loss_batch)
return loss_batch, loss_mean
示例7: binary_cross_entropy
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import op_scope [as 别名]
def binary_cross_entropy(preds, targets, name=None):
"""Computes binary cross entropy given `preds`.
For brevity, let `x = `, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
preds: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `preds`.
"""
eps = 1e-12
with ops.op_scope([preds, targets], name, "bce_loss") as name:
preds = ops.convert_to_tensor(preds, name="preds")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(targets * tf.log(preds + eps) +
(1. - targets) * tf.log(1. - preds + eps)))
示例8: adjust_gamma
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import op_scope [as 别名]
def adjust_gamma(image, gamma=1, gain=1):
"""Performs Gamma Correction on the input image.
Also known as Power Law Transform. This function transforms the
input image pixelwise according to the equation Out = In**gamma
after scaling each pixel to the range 0 to 1.
Args:
image : A Tensor.
gamma : A scalar. Non negative real number.
gain : A scalar. The constant multiplier.
Returns:
A Tensor. Gamma corrected output image.
Notes:
For gamma greater than 1, the histogram will shift towards left and
the output image will be darker than the input image.
For gamma less than 1, the histogram will shift towards right and
the output image will be brighter than the input image.
References:
[1] http://en.wikipedia.org/wiki/Gamma_correction
"""
with ops.op_scope([image, gamma, gain], None, 'adjust_gamma') as name:
# Convert pixel value to DT_FLOAT for computing adjusted image
img = ops.convert_to_tensor(image, name='img', dtype=dtypes.float32)
# Keep image dtype for computing the scale of corresponding dtype
image = ops.convert_to_tensor(image, name='image')
if gamma < 0:
raise ValueError("Gamma should be a non-negative real number")
# scale = max(dtype) - min(dtype)
scale = constant_op.constant(image.dtype.limits[1] - image.dtype.limits[0], dtype=dtypes.float32)
# According to the definition of gamma correction
adjusted_img = (img / scale) ** gamma * scale * gain
return adjusted_img
示例9: binary_cross_entropy
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import op_scope [as 别名]
def binary_cross_entropy(output, target, epsilon=1e-8, name='bce_loss'):
"""Computes binary cross entropy given `output`.
For brevity, let `x = output`, `z = target`. The binary cross entropy loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Parameters
----------
output : tensor of type `float32` or `float64`.
target : tensor of the same type and shape as `output`.
epsilon : float
A small value to avoid output is zero.
name : string
An optional name to attach to this layer.
References
-----------
- `DRAW <https://github.com/ericjang/draw/blob/master/draw.py#L73>`_
"""
# from tensorflow.python.framework import ops
# with ops.op_scope([output, target], name, "bce_loss") as name:
# output = ops.convert_to_tensor(output, name="preds")
# target = ops.convert_to_tensor(targets, name="target")
with tf.name_scope(name):
return tf.reduce_mean(tf.reduce_sum(-(target * tf.log(output + epsilon) +
(1. - target) * tf.log(1. - output + epsilon)), axis=1))
示例10: do_center_crop
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import op_scope [as 别名]
def do_center_crop(value, size, name=None):
"""Randomly crops a tensor to a given size.
Slices a shape `size` portion out of `value` at a uniformly chosen offset.
Requires `value.shape >= size`.
If a dimension should not be cropped, pass the full size of that dimension.
For example, RGB images can be cropped with
`size = [crop_height, crop_width, 3]`.
Args:
value: Input tensor to crop.
size: 1-D tensor with size the rank of `value`.
seed: Python integer. Used to create a random seed. See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: A name for this operation (optional).
Returns:
A cropped tensor of the same rank as `value` and shape `size`.
"""
# TODO(shlens): Implement edge case to guarantee output size dimensions.
# If size > value.shape, zero pad the result so that it always has shape
# exactly size.
from tensorflow.python.framework import dtypes
with ops.op_scope([value, size], name, "center_crop") as name:
value = ops.convert_to_tensor(value, name="value")
size = ops.convert_to_tensor(size, dtype=dtypes.int32, name="size")
shape = array_ops.shape(value)
check = logging_ops.Assert(
math_ops.reduce_all(shape >= size),
["Need value.shape >= size, got ", shape, size])
shape = control_flow_ops.with_dependencies([check], shape)
limit = shape - size + 1
offset = tf.random_uniform(
array_ops.shape(shape),
dtype=size.dtype,
maxval=size.dtype.max,
seed=0) % limit
offset2 = shape // 2 - size // 2
#import ipdb; ipdb.set_trace()
return array_ops.slice(value, offset, size, name=name)
示例11: assign_moving_average
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import op_scope [as 别名]
def assign_moving_average(variable, value, decay, name=None):
"""Compute the moving average of a variable.
The moving average of 'variable' updated with 'value' is:
variable * decay + value * (1 - decay)
The returned Operation sets 'variable' to the newly computed moving average.
The new value of 'variable' can be set with the 'AssignSub' op as:
variable -= (1 - decay) * (variable - value)
Args:
variable: A Variable.
value: A tensor with the same shape as 'variable'
decay: A float Tensor or float value. The moving average decay.
name: Optional name of the returned operation.
Returns:
An Operation that updates 'variable' with the newly computed
moving average.
"""
with ops.op_scope([variable, value, decay], name, "AssignMovingAvg") as scope:
with ops.colocate_with(variable):
decay = ops.convert_to_tensor(1.0 - decay, name="decay")
if decay.dtype != variable.dtype.base_dtype:
decay = math_ops.cast(decay, variable.dtype.base_dtype)
return state_ops.assign_sub(variable,
(variable - value) * decay,
name=scope)
示例12: adjust_gamma
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import op_scope [as 别名]
def adjust_gamma(image, gamma=1, gain=1):
"""Performs Gamma Correction on the input image.
Also known as Power Law Transform. This function transforms the
input image pixelwise according to the equation Out = In**gamma
after scaling each pixel to the range 0 to 1.
Args:
image : A Tensor.
gamma : A scalar. Non negative real number.
gain : A scalar. The constant multiplier.
Returns:
A Tensor. Gamma corrected output image.
Notes:
For gamma greater than 1, the histogram will shift towards left and
the output image will be darker than the input image.
For gamma less than 1, the histogram will shift towards right and
the output image will be brighter than the input image.
References:
[1] http://en.wikipedia.org/wiki/Gamma_correction
"""
with ops.op_scope([image, gamma, gain], None, 'adjust_gamma') as name:
# Convert pixel value to DT_FLOAT for computing adjusted image
img = ops.convert_to_tensor(image, name='img', dtype=dtypes.float32)
# Keep image dtype for computing the scale of corresponding dtype
image = ops.convert_to_tensor(image, name='image')
if gamma < 0:
raise ValueError("Gamma should be a non-negative real number")
# scale = max(dtype) - min(dtype)
scale = constant_op.constant(image.dtype.limits[1] - image.dtype.limits[0], dtype=dtypes.float32)
# According to the definition of gamma correction
adjusted_img = (img / scale) ** gamma * scale * gain
return adjusted_img
示例13: binary_cross_entropy
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import op_scope [as 别名]
def binary_cross_entropy(preds, targets, name=None):
"""Computes binary cross entropy given `preds`.
For brevity, let `x = `, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
preds: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `preds`.
"""
eps = 1e-12
with ops.op_scope([preds, targets], name, "bce_loss") as name:
preds = ops.convert_to_tensor(preds, name="preds")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(targets * tf.log(preds + eps) +
(1. - targets) * tf.log(1. - preds + eps)))
示例14: sequence_loss_by_example
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import op_scope [as 别名]
def sequence_loss_by_example(logits, targets, weights,
average_across_timesteps=True,
softmax_loss_function=None, name=None):
"""Weighted cross-entropy loss for a sequence of logits (per example).
Args:
logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
targets: List of 1D batch-sized int32 Tensors of the same length as logits.
weights: List of 1D batch-sized float-Tensors of the same length as logits.
average_across_timesteps: If set, divide the returned cost by the total
label weight.
softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: Optional name for this operation, default: "sequence_loss_by_example".
Returns:
1D batch-sized float Tensor: The log-perplexity for each sequence.
Raises:
ValueError: If len(logits) is different from len(targets) or len(weights).
"""
if len(targets) != len(logits) or len(weights) != len(logits):
raise ValueError("Lengths of logits, weights, and targets must be the same "
"%d, %d, %d." % (len(logits), len(weights), len(targets)))
with ops.op_scope(logits + targets + weights, name,
"sequence_loss_by_example"):
log_perp_list = []
for logit, target, weight in zip(logits, targets, weights):
if softmax_loss_function is None:
target = array_ops.reshape(target, [-1])
crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logit, target)
else:
crossent = softmax_loss_function(logit, target)
log_perp_list.append(crossent * weight)
log_perps = math_ops.add_n(log_perp_list)
if average_across_timesteps:
total_size = math_ops.add_n(weights)
total_size += 1e-12 # Just to avoid division by 0 for all-0 weights.
log_perps /= total_size
return log_perps
示例15: sequence_loss
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import op_scope [as 别名]
def sequence_loss(logits, targets, weights,
average_across_timesteps=True, average_across_batch=True,
softmax_loss_function=None, name=None):
"""Weighted cross-entropy loss for a sequence of logits, batch-collapsed.
Args:
logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
targets: List of 1D batch-sized int32 Tensors of the same length as logits.
weights: List of 1D batch-sized float-Tensors of the same length as logits.
average_across_timesteps: If set, divide the returned cost by the total
label weight.
average_across_batch: If set, divide the returned cost by the batch size.
softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: Optional name for this operation, defaults to "sequence_loss".
Returns:
A scalar float Tensor: The average log-perplexity per symbol (weighted).
Raises:
ValueError: If len(logits) is different from len(targets) or len(weights).
"""
with ops.op_scope(logits + targets + weights, name, "sequence_loss"):
cost = math_ops.reduce_sum(sequence_loss_by_example(
logits, targets, weights,
average_across_timesteps=average_across_timesteps,
softmax_loss_function=softmax_loss_function))
if average_across_batch:
batch_size = array_ops.shape(targets[0])[0]
return cost / math_ops.cast(batch_size, dtypes.float32)
else:
return cost