本文整理汇总了Python中tensorflow.python.ops.math_ops.reduce_mean函数的典型用法代码示例。如果您正苦于以下问题:Python reduce_mean函数的具体用法?Python reduce_mean怎么用?Python reduce_mean使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了reduce_mean函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testSampleConsistentStats
def testSampleConsistentStats(self):
loc = np.float32([[-1., 1], [1, -1]])
scale = np.float32([1., 0.5])
n_samp = 1e4
with self.test_session() as sess:
ind = independent_lib.Independent(
distribution=mvn_diag_lib.MultivariateNormalDiag(
loc=loc,
scale_identity_multiplier=scale),
reduce_batch_ndims=1)
x = ind.sample(int(n_samp), seed=42)
sample_mean = math_ops.reduce_mean(x, axis=0)
sample_var = math_ops.reduce_mean(
math_ops.squared_difference(x, sample_mean), axis=0)
sample_std = math_ops.sqrt(sample_var)
sample_entropy = -math_ops.reduce_mean(ind.log_prob(x), axis=0)
[
sample_mean_, sample_var_, sample_std_, sample_entropy_,
actual_mean_, actual_var_, actual_std_, actual_entropy_,
actual_mode_,
] = sess.run([
sample_mean, sample_var, sample_std, sample_entropy,
ind.mean(), ind.variance(), ind.stddev(), ind.entropy(), ind.mode(),
])
self.assertAllClose(sample_mean_, actual_mean_, rtol=0.02, atol=0.)
self.assertAllClose(sample_var_, actual_var_, rtol=0.04, atol=0.)
self.assertAllClose(sample_std_, actual_std_, rtol=0.02, atol=0.)
self.assertAllClose(sample_entropy_, actual_entropy_, rtol=0.01, atol=0.)
self.assertAllClose(loc, actual_mode_, rtol=1e-6, atol=0.)
示例2: npairs_loss
def npairs_loss(labels, embeddings_anchor, embeddings_positive,
reg_lambda=0.002, print_losses=False):
"""Computes the npairs loss.
Npairs loss expects paired data where a pair is composed of samples from the
same labels and each pairs in the minibatch have different labels. The loss
has two components. The first component is the L2 regularizer on the
embedding vectors. The second component is the sum of cross entropy loss
which takes each row of the pair-wise similarity matrix as logits and
the remapped one-hot labels as labels.
See: http://www.nec-labs.com/uploads/images/Department-Images/MediaAnalytics/papers/nips16_npairmetriclearning.pdf
Args:
labels: 1-D tf.int32 `Tensor` of shape [batch_size/2].
embeddings_anchor: 2-D Tensor of shape [batch_size/2, embedding_dim] for the
embedding vectors for the anchor images. Embeddings should not be
l2 normalized.
embeddings_positive: 2-D Tensor of shape [batch_size/2, embedding_dim] for the
embedding vectors for the positive images. Embeddings should not be
l2 normalized.
reg_lambda: Float. L2 regularization term on the embedding vectors.
print_losses: Boolean. Option to print the xent and l2loss.
Returns:
npairs_loss: tf.float32 scalar.
"""
# pylint: enable=line-too-long
# Add the regularizer on the embedding.
reg_anchor = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_anchor), 1))
reg_positive = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_positive), 1))
l2loss = math_ops.multiply(
0.25 * reg_lambda, reg_anchor + reg_positive, name='l2loss')
# Get per pair similarities.
similarity_matrix = math_ops.matmul(
embeddings_anchor, embeddings_positive, transpose_a=False,
transpose_b=True)
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = array_ops.shape(labels)
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
labels_remapped = math_ops.to_float(
math_ops.equal(labels, array_ops.transpose(labels)))
labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keepdims=True)
# Add the softmax loss.
xent_loss = nn.softmax_cross_entropy_with_logits(
logits=similarity_matrix, labels=labels_remapped)
xent_loss = math_ops.reduce_mean(xent_loss, name='xentropy')
if print_losses:
xent_loss = logging_ops.Print(
xent_loss, ['cross entropy:', xent_loss, 'l2loss:', l2loss])
return l2loss + xent_loss
示例3: training_loss
def training_loss(self, logits, target, features, name="training_loss"):
"""Returns training loss tensor for this head.
Training loss is different from the loss reported on the tensorboard as we
should respect the example weights when computing the gradient.
L = sum_{i} w_{i} * l_{i} / B
where B is the number of examples in the batch, l_{i}, w_{i} are individual
losses, and example weight.
Args:
logits: logits, a float tensor.
target: either a tensor for labels or in multihead case, a dict of string
to target tensor.
features: features dict.
name: Op name.
Returns:
Loss tensor.
"""
target = target[self.name] if isinstance(target, dict) else target
loss_unweighted = self._loss_fn(logits, target)
weight_tensor = self.get_weight_tensor(features)
if weight_tensor is None:
return math_ops.reduce_mean(loss_unweighted, name=name)
loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor)
return math_ops.reduce_mean(loss_weighted, name=name)
示例4: _sliced_wasserstein
def _sliced_wasserstein(a, b, random_sampling_count, random_projection_dim):
"""Compute the approximate sliced Wasserstein distance.
Args:
a: (matrix) Distribution "a" of samples (row, col).
b: (matrix) Distribution "b" of samples (row, col).
random_sampling_count: (int) Number of random projections to average.
random_projection_dim: (int) Dimension of the random projection space.
Returns:
Float containing the approximate distance between "a" and "b".
"""
s = array_ops.shape(a)
means = []
for _ in range(random_sampling_count):
# Random projection matrix.
proj = random_ops.random_normal(
[array_ops.shape(a)[1], random_projection_dim])
proj *= math_ops.rsqrt(
math_ops.reduce_sum(math_ops.square(proj), 0, keepdims=True))
# Project both distributions and sort them.
proj_a = math_ops.matmul(a, proj)
proj_b = math_ops.matmul(b, proj)
proj_a = _sort_rows(proj_a, s[0])
proj_b = _sort_rows(proj_b, s[0])
# Pairwise Wasserstein distance.
wdist = math_ops.reduce_mean(math_ops.abs(proj_a - proj_b))
means.append(wdist)
return math_ops.reduce_mean(means)
示例5: test_docstring_example
def test_docstring_example(self):
# Produce the first 1000 members of the Halton sequence in 3 dimensions.
num_results = 1000
dim = 3
with self.test_session():
sample = halton.sample(dim, num_results=num_results, randomized=False)
# Evaluate the integral of x_1 * x_2^2 * x_3^3 over the three dimensional
# hypercube.
powers = math_ops.range(1.0, limit=dim + 1)
integral = math_ops.reduce_mean(
math_ops.reduce_prod(sample ** powers, axis=-1))
true_value = 1.0 / math_ops.reduce_prod(powers + 1.0)
# Produces a relative absolute error of 1.7%.
self.assertAllClose(integral.eval(), true_value.eval(), rtol=0.02)
# Now skip the first 1000 samples and recompute the integral with the next
# thousand samples. The sequence_indices argument can be used to do this.
sequence_indices = math_ops.range(start=1000, limit=1000 + num_results,
dtype=dtypes.int32)
sample_leaped = halton.sample(dim, sequence_indices=sequence_indices,
randomized=False)
integral_leaped = math_ops.reduce_mean(
math_ops.reduce_prod(sample_leaped ** powers, axis=-1))
self.assertAllClose(integral_leaped.eval(), true_value.eval(), rtol=0.05)
示例6: loss_wrapper
def loss_wrapper(labels, logits, weight_tensor):
if weight_tensor is None:
weight_tensor = array_ops.ones(
shape=[array_ops.shape(labels)[0], 1], dtype=dtypes.float32)
weighted_loss, _ = loss_fn(labels, weight_tensor, logits)
average_loss = math_ops.reduce_mean(weighted_loss)
return average_loss, average_loss / math_ops.reduce_mean(weight_tensor)
示例7: _statistics
def _statistics(x, axes):
"""Calculate the mean and mean square of `x`.
Modified from the implementation of `tf.nn.moments`.
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and
variance.
Returns:
Two `Tensor` objects: `mean` and `square mean`.
"""
# The dynamic range of fp16 is too limited to support the collection of
# sufficient statistics. As a workaround we simply perform the operations
# on 32-bit floats before converting the mean and variance back to fp16
y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x
# Compute true mean while keeping the dims for proper broadcasting.
shift = array_ops.stop_gradient(math_ops.reduce_mean(y, axes, keepdims=True))
shifted_mean = math_ops.reduce_mean(y - shift, axes, keepdims=True)
mean = shifted_mean + shift
mean_squared = math_ops.reduce_mean(math_ops.square(y), axes, keepdims=True)
mean = array_ops.squeeze(mean, axes)
mean_squared = array_ops.squeeze(mean_squared, axes)
if x.dtype == dtypes.float16:
return (math_ops.cast(mean, dtypes.float16),
math_ops.cast(mean_squared, dtypes.float16))
else:
return (mean, mean_squared)
示例8: testNegativeBinomialSample
def testNegativeBinomialSample(self):
with self.cached_session() as sess:
probs = [.3, .9]
total_count = [4., 11.]
n = int(100e3)
negbinom = negative_binomial.NegativeBinomial(
total_count=total_count, probs=probs)
samples = negbinom.sample(n, seed=12345)
self.assertEqual([n, 2], samples.get_shape())
sample_mean = math_ops.reduce_mean(samples, axis=0)
sample_var = math_ops.reduce_mean(
(samples - sample_mean[array_ops.newaxis, ...])**2., axis=0)
sample_min = math_ops.reduce_min(samples)
[sample_mean_, sample_var_, sample_min_] = sess.run([
sample_mean, sample_var, sample_min])
self.assertAllEqual(np.ones(sample_min_.shape, dtype=np.bool),
sample_min_ >= 0.0)
for i in range(2):
self.assertAllClose(sample_mean_[i],
stats.nbinom.mean(total_count[i], 1 - probs[i]),
atol=0.,
rtol=.02)
self.assertAllClose(sample_var_[i],
stats.nbinom.var(total_count[i], 1 - probs[i]),
atol=0.,
rtol=.02)
示例9: center_bias
def center_bias(self, center_bias_var, gradients, hessians):
# For in memory, we already have a full batch of gradients and hessians,
# so just take a mean and proceed with centering.
mean_gradients = array_ops.expand_dims(
math_ops.reduce_mean(gradients, 0), 0)
mean_heassians = array_ops.expand_dims(math_ops.reduce_mean(hessians, 0), 0)
return self._center_bias_fn(center_bias_var, mean_gradients, mean_heassians)
示例10: _potential_scale_reduction_single_state
def _potential_scale_reduction_single_state(state, independent_chain_ndims):
"""potential_scale_reduction for one single state `Tensor`."""
# We assume exactly one leading dimension indexes e.g. correlated samples from
# each Markov chain.
state = ops.convert_to_tensor(state, name="state")
sample_ndims = 1
sample_axis = math_ops.range(0, sample_ndims)
chain_axis = math_ops.range(sample_ndims,
sample_ndims + independent_chain_ndims)
sample_and_chain_axis = math_ops.range(0,
sample_ndims + independent_chain_ndims)
n = _axis_size(state, sample_axis)
m = _axis_size(state, chain_axis)
# In the language of [2],
# B / n is the between chain variance, the variance of the chain means.
# W is the within sequence variance, the mean of the chain variances.
b_div_n = _reduce_variance(
math_ops.reduce_mean(state, sample_axis, keepdims=True),
sample_and_chain_axis,
biased=False)
w = math_ops.reduce_mean(
_reduce_variance(state, sample_axis, keepdims=True, biased=True),
sample_and_chain_axis)
# sigma^2_+ is an estimate of the true variance, which would be unbiased if
# each chain was drawn from the target. c.f. "law of total variance."
sigma_2_plus = w + b_div_n
return ((m + 1.) / m) * sigma_2_plus / w - (n - 1.) / (m * n)
示例11: testCovarianceFromSampling
def testCovarianceFromSampling(self):
alpha = np.array([[1., 2, 3],
[2.5, 4, 0.01]], dtype=np.float32)
with self.test_session() as sess:
dist = dirichlet_lib.Dirichlet(alpha) # batch_shape=[2], event_shape=[3]
x = dist.sample(int(250e3), seed=1)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = x - sample_mean[None, ...]
sample_cov = math_ops.reduce_mean(math_ops.matmul(
x_centered[..., None], x_centered[..., None, :]), 0)
sample_var = array_ops.matrix_diag_part(sample_cov)
sample_stddev = math_ops.sqrt(sample_var)
[
sample_mean_,
sample_cov_,
sample_var_,
sample_stddev_,
analytic_mean,
analytic_cov,
analytic_var,
analytic_stddev,
] = sess.run([
sample_mean,
sample_cov,
sample_var,
sample_stddev,
dist.mean(),
dist.covariance(),
dist.variance(),
dist.stddev(),
])
self.assertAllClose(sample_mean_, analytic_mean, atol=0., rtol=0.04)
self.assertAllClose(sample_cov_, analytic_cov, atol=0., rtol=0.06)
self.assertAllClose(sample_var_, analytic_var, atol=0., rtol=0.03)
self.assertAllClose(sample_stddev_, analytic_stddev, atol=0., rtol=0.02)
示例12: _reduce_variance
def _reduce_variance(x, axis=None, biased=True, keepdims=False):
with ops.name_scope("reduce_variance"):
x = ops.convert_to_tensor(x, name="x")
mean = math_ops.reduce_mean(x, axis=axis, keepdims=True)
biased_var = math_ops.reduce_mean(
math_ops.squared_difference(x, mean), axis=axis, keepdims=keepdims)
if biased:
return biased_var
n = _axis_size(x, axis)
return (n / (n - 1.)) * biased_var
示例13: mean_only_frechet_classifier_distance_from_activations
def mean_only_frechet_classifier_distance_from_activations(
real_activations, generated_activations):
"""Classifier distance for evaluating a generative model from activations.
Given two Gaussian distribution with means m and m_w and covariance matrices
C and C_w, this function calcuates
|m - m_w|^2
which captures how different the distributions of real images and generated
images (or more accurately, their visual features) are. Note that unlike the
Inception score, this is a true distance and utilizes information about real
world images.
Note that when computed using sample means and sample covariance matrices,
Frechet distance is biased. It is more biased for small sample sizes. (e.g.
even if the two distributions are the same, for a small sample size, the
expected Frechet distance is large). It is important to use the same
sample size to compute frechet classifier distance when comparing two
generative models.
In this variant, we only compute the difference between the means of the
fitted Gaussians. The computation leads to O(n) vs. O(n^2) memory usage, yet
still retains much of the same information as FID.
Args:
real_activations: 2D array of activations of real images of size
[num_images, num_dims] to use to compute Frechet Inception distance.
generated_activations: 2D array of activations of generated images of size
[num_images, num_dims] to use to compute Frechet Inception distance.
Returns:
The mean-only Frechet Inception distance. A floating-point scalar of the
same type as the output of the activations.
"""
real_activations.shape.assert_has_rank(2)
generated_activations.shape.assert_has_rank(2)
activations_dtype = real_activations.dtype
if activations_dtype != dtypes.float64:
real_activations = math_ops.cast(real_activations, dtypes.float64)
generated_activations = math_ops.cast(generated_activations, dtypes.float64)
# Compute means of activations.
m = math_ops.reduce_mean(real_activations, 0)
m_w = math_ops.reduce_mean(generated_activations, 0)
# Next the distance between means.
mean = math_ops.reduce_sum(
math_ops.squared_difference(m, m_w)) # Equivalent to L2 but more stable.
mofid = mean
if activations_dtype != dtypes.float64:
mofid = math_ops.cast(mofid, activations_dtype)
return mofid
示例14: _loss
def _loss(loss_unweighted, weight, name):
"""Returns loss."""
if weight is None:
loss = math_ops.reduce_mean(loss_unweighted, name=name)
return loss, loss
loss_weighted = _weighted_loss(loss_unweighted, weight)
weighted_average_loss = math_ops.div(
math_ops.reduce_sum(loss_weighted),
math_ops.to_float(math_ops.reduce_sum(weight)),
name="weighted_average_loss")
loss = math_ops.reduce_mean(loss_weighted, name=name)
return loss, weighted_average_loss
示例15: classifier_score
def classifier_score(images, classifier_fn, num_batches=1):
"""Classifier score for evaluating a conditional generative model.
This is based on the Inception Score, but for an arbitrary classifier.
This technique is described in detail in https://arxiv.org/abs/1606.03498. In
summary, this function calculates
exp( E[ KL(p(y|x) || p(y)) ] )
which captures how different the network's classification prediction is from
the prior distribution over classes.
Args:
images: Images to calculate the classifier score for.
classifier_fn: A function that takes images and produces logits based on a
classifier.
num_batches: Number of batches to split `generated_images` in to in order to
efficiently run them through the classifier network.
Returns:
The classifier score. A floating-point scalar.
"""
generated_images_list = array_ops.split(
images, num_or_size_splits=num_batches)
# Compute the classifier splits using the memory-efficient `map_fn`.
logits = functional_ops.map_fn(
fn=classifier_fn,
elems=array_ops.stack(generated_images_list),
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
logits = array_ops.concat(array_ops.unstack(logits), 0)
logits.shape.assert_has_rank(2)
# Use maximum precision for best results.
logits_dtype = logits.dtype
if logits_dtype != dtypes.float64:
logits = math_ops.cast(logits, dtypes.float64)
p = nn_ops.softmax(logits)
q = math_ops.reduce_mean(p, axis=0)
kl = _kl_divergence(p, logits, q)
kl.shape.assert_has_rank(1)
log_score = math_ops.reduce_mean(kl)
final_score = math_ops.exp(log_score)
if logits_dtype != dtypes.float64:
final_score = math_ops.cast(final_score, dtypes.float64)
return final_score