本文整理汇总了Python中tensorflow.python.ops.array_ops.ones_like函数的典型用法代码示例。如果您正苦于以下问题:Python ones_like函数的具体用法?Python ones_like怎么用?Python ones_like使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ones_like函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sample_n
def sample_n(self, n, seed=None, name="sample_n"):
"""Sample `n` observations from the Beta Distributions.
Args:
n: `Scalar` `Tensor` of type `int32` or `int64`, the number of
observations to sample.
seed: Python integer, the random seed.
name: The name to give this op.
Returns:
samples: `[n, ...]`, a `Tensor` of `n` samples for each
of the distributions determined by broadcasting the hyperparameters.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self.a, self.b, n]):
a = array_ops.ones_like(self._a_b_sum, dtype=self.dtype) * self.a
b = array_ops.ones_like(self._a_b_sum, dtype=self.dtype) * self.b
n = ops.convert_to_tensor(n, name="n")
gamma1_sample = random_ops.random_gamma(
[n,], a, dtype=self.dtype, seed=seed)
gamma2_sample = random_ops.random_gamma(
[n,], b, dtype=self.dtype, seed=seed)
beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample)
n_val = tensor_util.constant_value(n)
final_shape = tensor_shape.vector(n_val).concatenate(
self._a_b_sum.get_shape())
beta_sample.set_shape(final_shape)
return beta_sample
示例2: _BiasAddGradGrad
def _BiasAddGradGrad(op, received_grad):
"""Gradient for the BiasAddGrad op.
Args:
op: BiasAddGrad op for which we are calculating gradients.
received_grad: The gradients passed to the BiasAddGrad op.
Returns:
A single gradient Tensor for the input to BiasAddGrad (which
is the gradient of the bias term in BiasAdd)
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
shape = array_ops.shape(op.inputs[0])
rank = array_ops.rank(op.inputs[0])
bias_shape = array_ops.shape(received_grad)
if data_format == b"NCHW":
expanded_shape = array_ops.concat([
array_ops.ones_like(shape[:-3]), bias_shape,
array_ops.ones_like(shape[-2:])
], 0)
tile_mults = array_ops.concat([shape[:-3], [1], shape[-2:]], 0)
else:
expanded_shape = array_ops.concat(
[array_ops.ones_like(shape[:-1]), bias_shape], 0)
tile_mults = array_ops.concat([shape[:-1], [1]], 0)
expanded_grad = array_ops.reshape(received_grad, expanded_shape)
return array_ops.tile(expanded_grad, tile_mults)
示例3: _sample_n
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
k = self.event_shape_tensor()[0]
# broadcast the total_count and logits to same shape
n_draws = array_ops.ones_like(
self.logits[..., 0], dtype=n_draws.dtype) * n_draws
logits = array_ops.ones_like(
n_draws[..., array_ops.newaxis], dtype=self.logits.dtype) * self.logits
# flatten the total_count and logits
flat_logits = array_ops.reshape(logits, [-1, k]) # [B1B2...Bm, k]
flat_ndraws = n * array_ops.reshape(n_draws, [-1]) # [B1B2...Bm]
# computes each total_count and logits situation by map_fn
def _sample_single(args):
logits, n_draw = args[0], args[1] # [K], []
x = random_ops.multinomial(logits[array_ops.newaxis, ...], n_draw,
seed) # [1, n*n_draw]
x = array_ops.reshape(x, shape=[n, -1]) # [n, n_draw]
x = math_ops.reduce_sum(array_ops.one_hot(x, depth=k), axis=-2) # [n, k]
return x
x = functional_ops.map_fn(
_sample_single, [flat_logits, flat_ndraws],
dtype=self.dtype) # [B1B2...Bm, n, k]
# reshape the results to proper shape
x = array_ops.transpose(x, perm=[1, 0, 2])
final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)
x = array_ops.reshape(x, final_shape) # [n, B1, B2,..., Bm, k]
return x
示例4: grad
def grad(dmm, dr):
return [
math_ops.matmul(dmm, b, transpose_b=True) +
math_ops.matmul(array_ops.ones_like(b * dr), b, transpose_b=True),
math_ops.matmul(a, dmm, transpose_b=True) +
math_ops.matmul(a, array_ops.ones_like(a) * dr, transpose_b=True)
]
示例5: _log_prob
def _log_prob(self, x):
x = self._assert_valid_sample(x)
# broadcast logits or x if need be.
logits = self.logits
if (not x.get_shape().is_fully_defined() or
not logits.get_shape().is_fully_defined() or
x.get_shape() != logits.get_shape()):
logits = array_ops.ones_like(x, dtype=logits.dtype) * logits
x = array_ops.ones_like(logits, dtype=x.dtype) * x
logits_shape = array_ops.shape(math_ops.reduce_sum(logits, axis=[-1]))
logits_2d = array_ops.reshape(logits, [-1, self.event_size])
x_2d = array_ops.reshape(x, [-1, self.event_size])
# compute the normalization constant
k = math_ops.cast(self.event_size, x.dtype)
log_norm_const = (math_ops.lgamma(k)
+ (k - 1.)
* math_ops.log(self.temperature))
# compute the unnormalized density
log_softmax = nn_ops.log_softmax(logits_2d - x_2d * self._temperature_2d)
log_unnorm_prob = math_ops.reduce_sum(log_softmax, [-1], keepdims=False)
# combine unnormalized density with normalization constant
log_prob = log_norm_const + log_unnorm_prob
# Reshapes log_prob to be consistent with shape of user-supplied logits
ret = array_ops.reshape(log_prob, logits_shape)
return ret
示例6: full_exp_with_logits
def full_exp_with_logits(name, labels=None, logits=None):
"""Computes exponential loss given `logits`.
Args:
name: A name for the operation (optional).
labels: A `Tensor` of the same type and shape as `logits`.
logits: A `Tensor` of type `float32` or `float64`.
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
exponential losses.
Raises:
ValueError: If `logits` and `labels` do not have the same shape.
"""
with ops.name_scope(name, "exp_loss", [logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
try:
labels.get_shape().merge_with(logits.get_shape())
except ValueError:
raise ValueError("logits and labels must have the same shape (%s vs %s)"
% (logits.get_shape(), labels.get_shape()))
# Default threshold of 0 to switch between classes
zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
ones = array_ops.ones_like(logits, dtype=logits.dtype)
neg_ones = -array_ops.ones_like(logits, dtype=logits.dtype)
# Convert labels to 1 and -1
cond_labels = (labels > zeros)
labels_converted = array_ops.where(cond_labels, ones, neg_ones)
return math_ops.exp(-1.0 * logits * labels_converted)
示例7: log_prob
def log_prob(self, event, name="log_prob"):
"""Log of the probability mass function.
Args:
event: `int32` or `int64` binary Tensor.
name: A name for this operation (optional).
Returns:
The log-probabilities of the events.
"""
# TODO(jaana): The current sigmoid_cross_entropy_with_logits has
# inconsistent behavior for logits = inf/-inf.
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self.logits, event]):
event = ops.convert_to_tensor(event, name="event")
event = math_ops.cast(event, self.logits.dtype)
logits = self.logits
# sigmoid_cross_entropy_with_logits doesn't broadcast shape,
# so we do this here.
# TODO(b/30637701): Check dynamic shape, and don't broadcast if the
# dynamic shapes are the same.
if (not event.get_shape().is_fully_defined() or
not logits.get_shape().is_fully_defined() or
event.get_shape() != logits.get_shape()):
logits = array_ops.ones_like(event) * logits
event = array_ops.ones_like(logits) * event
return -nn.sigmoid_cross_entropy_with_logits(logits, event)
示例8: _sample_n
def _sample_n(self, n, seed=None):
a = array_ops.ones_like(self.a_b_sum, dtype=self.dtype) * self.a
b = array_ops.ones_like(self.a_b_sum, dtype=self.dtype) * self.b
gamma1_sample = random_ops.random_gamma([n], a, dtype=self.dtype, seed=seed)
gamma2_sample = random_ops.random_gamma([n], b, dtype=self.dtype, seed=seed)
beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample)
return beta_sample
示例9: _sample_n
def _sample_n(self, n, seed=None):
a = array_ops.ones_like(self.a_b_sum, dtype=self.dtype) * self.a
b = array_ops.ones_like(self.a_b_sum, dtype=self.dtype) * self.b
gamma1_sample = random_ops.random_gamma(
[n,], a, dtype=self.dtype, seed=seed)
gamma2_sample = random_ops.random_gamma(
[n,], b, dtype=self.dtype,
seed=distribution_util.gen_new_seed(seed, "beta"))
beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample)
return beta_sample
示例10: grad_fn
def grad_fn(inputs, trainable_variables, unused_outputs,
unused_grad_outputs):
grad_inputs = [
array_ops.ones_like(t) * (i + 1.) for i, t in enumerate(inputs)
]
grad_vars = [
array_ops.ones_like(t) * (i + len(inputs) + 1.)
for i, t in enumerate(trainable_variables)
]
return grad_inputs, grad_vars
示例11: exp_with_logits
def exp_with_logits(name, eps, labels=None, logits=None):
"""Computes exponential loss given `logits`.
The loss returns is exp(-targets*modified_predictions), where
modified_predictions are 1 if sigmoid is >= 0.5+eps (eg we predict positive
class), -1 if sigmoid < 0.5-eps (e.g. we predict negative class) and ax+b in
the interval 0.5-eps, 0.5+eps, where a = 1/eps, b=1/(2eps).
Args:
name: A name for the operation (optional).
eps: For the range (0.5-eps, 0.5+eps) we set the predictions to be ax+b.
labels: A `Tensor` of the same type and shape as `logits`.
logits: A `Tensor` of type `float32` or `float64`.
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
exponential losses.
Raises:
ValueError: If `logits` and `labels` do not have the same shape.
"""
with ops.name_scope(name, "exp_loss", [logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
try:
labels.get_shape().merge_with(logits.get_shape())
except ValueError:
raise ValueError("logits and labels must have the same shape (%s vs %s)"
% (logits.get_shape(), labels.get_shape()))
# Default threshold to switch between classes
zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
ones = array_ops.ones_like(logits, dtype=logits.dtype)
neg_ones = -array_ops.ones_like(logits, dtype=logits.dtype)
# Convert labels to 1 and -1
cond_labels = (labels > zeros)
labels_converted = array_ops.where(cond_labels, ones, neg_ones)
# Convert predictions to 1 and -1
# The loss we build is min(1, max(-1,ax+b))
# where a=1/eps, b=-1/2eps.
a = 1.0 / eps
b = -1.0 / 2 / eps
probs = math_ops.sigmoid(logits)
y = a * probs + b
# Build max(-1, ax+b)
cond = (y < -1)
max_res = array_ops.where(cond, neg_ones, y)
# Build min part
cond = (max_res > 1)
min_res = array_ops.where(cond, ones, max_res)
preds_converted = min_res
return math_ops.exp(-preds_converted * labels_converted)
示例12: _moment
def _moment(self, n):
"""Compute the n'th (uncentered) moment."""
expanded_concentration1 = array_ops.ones_like(
self.total_concentration, dtype=self.dtype) * self.concentration1
expanded_concentration0 = array_ops.ones_like(
self.total_concentration, dtype=self.dtype) * self.concentration0
beta_arg0 = 1 + n / expanded_concentration1
beta_arg = array_ops.stack([beta_arg0, expanded_concentration0], -1)
log_moment = math_ops.log(expanded_concentration0) + special_math_ops.lbeta(
beta_arg)
return math_ops.exp(log_moment)
示例13: _sample_n
def _sample_n(self, n, seed=None):
expanded_concentration1 = array_ops.ones_like(
self.total_concentration, dtype=self.dtype) * self.concentration1
expanded_concentration0 = array_ops.ones_like(
self.total_concentration, dtype=self.dtype) * self.concentration0
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
uniform_sample = random_ops.random_uniform(
shape=shape, minval=0.0, maxval=1.0, dtype=self.dtype, seed=seed)
kumaraswamy_sample = (1 - uniform_sample**(1. / expanded_concentration0))**(
1. / expanded_concentration1)
return kumaraswamy_sample
示例14: _log_prob
def _log_prob(self, counts):
if self.validate_args:
counts = distribution_util.embed_check_nonnegative_discrete(
counts, check_integer=True)
counts *= array_ops.ones_like(self.probs)
probs = self.probs * array_ops.ones_like(counts)
safe_domain = array_ops.where(
math_ops.equal(counts, 0.),
array_ops.zeros_like(probs),
probs)
return counts * math_ops.log1p(-safe_domain) + math_ops.log(probs)
示例15: _log_prob
def _log_prob(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
else:
# For consistency with cdf, we take the floor.
x = math_ops.floor(x)
x *= array_ops.ones_like(self.probs)
probs = self.probs * array_ops.ones_like(x)
safe_domain = array_ops.where(
math_ops.equal(x, 0.),
array_ops.zeros_like(probs),
probs)
return x * math_ops.log1p(-safe_domain) + math_ops.log(probs)