本文整理汇总了Python中tensorflow.python.ops.nn.sigmoid方法的典型用法代码示例。如果您正苦于以下问题:Python nn.sigmoid方法的具体用法?Python nn.sigmoid怎么用?Python nn.sigmoid使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.nn
的用法示例。
在下文中一共展示了nn.sigmoid方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _kl_bernoulli_bernoulli
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import sigmoid [as 别名]
def _kl_bernoulli_bernoulli(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b Bernoulli.
Args:
a: instance of a Bernoulli distribution object.
b: instance of a Bernoulli distribution object.
name: (optional) Name to use for created operations.
default is "kl_bernoulli_bernoulli".
Returns:
Batchwise KL(a || b)
"""
with ops.name_scope(name, "kl_bernoulli_bernoulli",
values=[a.logits, b.logits]):
delta_probs0 = nn.softplus(-b.logits) - nn.softplus(-a.logits)
delta_probs1 = nn.softplus(b.logits) - nn.softplus(a.logits)
return (math_ops.sigmoid(a.logits) * delta_probs0
+ math_ops.sigmoid(-a.logits) * delta_probs1)
示例2: __init__
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import sigmoid [as 别名]
def __init__(self,
p=None,
dtype=dtypes.int32,
validate_args=False,
allow_nan_stats=True,
name="BernoulliWithSigmoidP"):
parameters = locals()
parameters.pop("self")
with ops.name_scope(name) as ns:
super(BernoulliWithSigmoidP, self).__init__(
p=nn.sigmoid(p, name="sigmoid_p"),
dtype=dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
示例3: _kl_bernoulli_bernoulli
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import sigmoid [as 别名]
def _kl_bernoulli_bernoulli(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b Bernoulli.
Args:
a: instance of a Bernoulli distribution object.
b: instance of a Bernoulli distribution object.
name: (optional) Name to use for created operations.
default is "kl_bernoulli_bernoulli".
Returns:
Batchwise KL(a || b)
"""
with ops.name_scope(name, "kl_bernoulli_bernoulli", [a.logits, b.logits]):
return (math_ops.sigmoid(a.logits) * (-nn.softplus(-a.logits) +
nn.softplus(-b.logits)) +
math_ops.sigmoid(-a.logits) * (-nn.softplus(a.logits) +
nn.softplus(b.logits)))
示例4: __init__
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import sigmoid [as 别名]
def __init__(self,
p=None,
dtype=dtypes.int32,
validate_args=False,
allow_nan_stats=True,
name="BernoulliWithSigmoidP"):
parameters = locals()
parameters.pop("self")
with ops.name_scope(name) as ns:
super(BernoulliWithSigmoidP, self).__init__(
p=nn.sigmoid(p),
dtype=dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
示例5: hard_sigmoid
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import sigmoid [as 别名]
def hard_sigmoid(x):
"""Segment-wise linear approximation of sigmoid.
Faster than sigmoid.
Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`.
In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
x = (0.2 * x) + 0.5
zero = _to_tensor(0., x.dtype.base_dtype)
one = _to_tensor(1., x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, one)
return x
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:20,代码来源:backend.py
示例6: focal_loss
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import sigmoid [as 别名]
def focal_loss(labels, logits, gamma = 2.0, alpha = 0.75, normalize = True):
labels = tf.where(labels > 0, tf.ones_like(labels), tf.zeros_like(labels))
labels = tf.cast(labels, tf.float32)
probs = tf.sigmoid(logits)
CE = tf.nn.sigmoid_cross_entropy_with_logits(labels = labels, logits = logits)
alpha_t = tf.ones_like(logits) * alpha
alpha_t = tf.where(labels > 0, alpha_t, 1.0 - alpha_t)
probs_t = tf.where(labels > 0, probs, 1.0 - probs)
focal_matrix = alpha_t * tf.pow((1.0 - probs_t), gamma)
fl = focal_matrix * CE
fl = tf.reduce_sum(fl)
if normalize:
#n_pos = tf.reduce_sum(labels)
#fl = fl / tf.cast(n_pos, tf.float32)
total_weights = tf.stop_gradient(tf.reduce_sum(focal_matrix))
fl = fl / total_weights
return fl
示例7: _entropy
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import sigmoid [as 别名]
def _entropy(self):
return (-self.logits * (math_ops.sigmoid(self.logits) - 1) +
nn.softplus(-self.logits))
示例8: __init__
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import sigmoid [as 别名]
def __init__(self,
logits=None,
dtype=dtypes.int32,
validate_args=False,
allow_nan_stats=True,
name="BernoulliWithSigmoidProbs"):
parameters = locals()
with ops.name_scope(name):
super(BernoulliWithSigmoidProbs, self).__init__(
probs=nn.sigmoid(logits, name="sigmoid_probs"),
dtype=dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
示例9: sigmoid
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import sigmoid [as 别名]
def sigmoid(x):
"""Element-wise sigmoid.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.sigmoid(x)