本文整理汇总了Python中tensorflow.compat.v1.reduce_mean方法的典型用法代码示例。如果您正苦于以下问题:Python v1.reduce_mean方法的具体用法?Python v1.reduce_mean怎么用?Python v1.reduce_mean使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.reduce_mean方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: loss_function
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_mean [as 别名]
def loss_function(self, inputs, build_network_result):
logits = build_network_result.logits
# Unpack model output back to locations and confidence scores of predictions
# Shape of pred_loc: [batch_size, NUM_SSD_BOXES, 4]
# Shape of pred_label: [batch_size, NUM_SSD_BOXES, label_num]
pred_loc, pred_label = tf.split(logits, [4, self.label_num], 2)
# Shape of gt_loc: [batch_size, NUM_SSD_BOXES, 4]
# Shape of gt_label: [batch_size, NUM_SSD_BOXES, 1]
# Shape of num_gt: [batch_size]
_, gt_loc, gt_label, num_gt = inputs
gt_label = tf.cast(gt_label, tf.int32)
box_loss = self._localization_loss(pred_loc, gt_loc, gt_label, num_gt)
class_loss = self._classification_loss(pred_label, gt_label, num_gt)
tf.summary.scalar('box_loss', tf.reduce_mean(box_loss))
tf.summary.scalar('class_loss', tf.reduce_mean(class_loss))
return class_loss + box_loss
示例2: loss_function
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_mean [as 别名]
def loss_function(self, inputs, build_network_result):
"""Returns the op to measure the loss of the model."""
logits = build_network_result.logits
_, labels = inputs
# TODO(laigd): consider putting the aux logit in the Inception model,
# which could call super.loss_function twice, once with the normal logits
# and once with the aux logits.
aux_logits = build_network_result.extra_info
with tf.name_scope('xentropy'):
mlperf.logger.log(key=mlperf.tags.MODEL_HP_LOSS_FN, value=mlperf.tags.CCE)
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=labels)
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
if aux_logits is not None:
with tf.name_scope('aux_xentropy'):
aux_cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=aux_logits, labels=labels)
aux_loss = 0.4 * tf.reduce_mean(aux_cross_entropy, name='aux_loss')
loss = tf.add_n([loss, aux_loss])
return loss
示例3: layer_norm
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_mean [as 别名]
def layer_norm(x, reduction_indices, epsilon=1e-9, gain=None, bias=None,
per_element=True, scope=None):
"""DOC."""
reduction_indices = ensure_list(reduction_indices)
mean = tf.reduce_mean(x, reduction_indices, keep_dims=True)
variance = tf.reduce_mean(tf.squared_difference(x, mean),
reduction_indices, keep_dims=True)
normalized = (x - mean) / tf.sqrt(variance + epsilon)
dtype = x.dtype
shape = x.get_shape().as_list()
for i in six.moves.range(len(shape)):
if i not in reduction_indices or not per_element:
shape[i] = 1
with tf.variable_scope(scope or 'layer_norm'):
if gain is None:
gain = tf.get_variable('gain', shape=shape, dtype=dtype,
initializer=tf.ones_initializer())
if bias is None:
bias = tf.get_variable('bias', shape=shape, dtype=dtype,
initializer=tf.zeros_initializer())
return gain*normalized+bias
示例4: two_class_log_likelihood
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_mean [as 别名]
def two_class_log_likelihood(predictions, labels, weights_fn=None):
"""Log-likelihood for two class classification with 0/1 labels.
Args:
predictions: A float valued tensor of shape [`batch_size`]. Each
component should be between 0 and 1.
labels: An int valued tensor of shape [`batch_size`]. Each component
should either be 0 or 1.
weights_fn: unused.
Returns:
A pair, with the average log likelihood in the first component.
"""
del weights_fn
float_predictions = tf.cast(tf.squeeze(predictions), dtype=tf.float64)
batch_probs = tf.stack([1. - float_predictions, float_predictions], axis=-1)
int_labels = tf.cast(tf.squeeze(labels), dtype=tf.int32)
onehot_targets = tf.cast(tf.one_hot(int_labels, 2), dtype=tf.float64)
chosen_probs = tf.einsum(
"ij,ij->i", batch_probs, onehot_targets, name="chosen_probs")
avg_log_likelihood = tf.reduce_mean(tf.log(chosen_probs))
return avg_log_likelihood, tf.constant(1.0)
示例5: testAccuracyTopKMetric
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_mean [as 别名]
def testAccuracyTopKMetric(self):
predictions = np.random.randint(1, 5, size=(12, 12, 12, 1))
targets = np.random.randint(1, 5, size=(12, 12, 12, 1))
expected = np.mean((predictions == targets).astype(float))
with self.test_session() as session:
predicted = tf.one_hot(predictions, depth=5, dtype=tf.float32)
scores1, _ = metrics.padded_accuracy_topk(
predicted, tf.constant(targets, dtype=tf.int32), k=1)
scores2, _ = metrics.padded_accuracy_topk(
predicted, tf.constant(targets, dtype=tf.int32), k=7)
a1 = tf.reduce_mean(scores1)
a2 = tf.reduce_mean(scores2)
session.run(tf.global_variables_initializer())
actual1, actual2 = session.run([a1, a2])
self.assertAlmostEqual(actual1, expected)
self.assertAlmostEqual(actual2, 1.0)
示例6: testTwoClassLogLikelihoodVersusOldImplementation
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_mean [as 别名]
def testTwoClassLogLikelihoodVersusOldImplementation(self):
def alt_two_class_log_likelihood_impl(predictions, labels):
float_labels = tf.cast(labels, dtype=tf.float64)
float_predictions = tf.cast(tf.squeeze(predictions), dtype=tf.float64)
# likelihood should be just p for class 1, and 1 - p for class 0.
# signs is 1 for class 1, and -1 for class 0
signs = 2 * float_labels - tf.ones_like(float_labels)
# constant_term is 1 for class 0, and 0 for class 1.
constant_term = tf.ones_like(float_labels) - float_labels
likelihoods = constant_term + signs * float_predictions
log_likelihoods = tf.log(likelihoods)
avg_log_likelihood = tf.reduce_mean(log_likelihoods)
return avg_log_likelihood
predictions = np.random.rand(1, 10, 1)
targets = np.random.randint(2, size=10)
with self.test_session() as session:
new_log_likelihood, _ = metrics.two_class_log_likelihood(
predictions, targets)
alt_log_likelihood = alt_two_class_log_likelihood_impl(
predictions, targets)
new_impl, alt_impl = session.run([new_log_likelihood, alt_log_likelihood])
self.assertAlmostEqual(new_impl, alt_impl)
示例7: average_sharded_losses
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_mean [as 别名]
def average_sharded_losses(sharded_losses):
"""Average losses across datashards.
Args:
sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss
can be a single Tensor or a 2-tuple (numerator and denominator).
Returns:
losses: dict<str loss_name, Tensor avg_loss>
"""
losses = {}
for loss_name in sorted(sharded_losses[0]):
all_shards = [shard_losses[loss_name] for shard_losses in sharded_losses]
if isinstance(all_shards[0], tuple):
sharded_num, sharded_den = zip(*all_shards)
mean_loss = (
tf.add_n(sharded_num) / tf.maximum(
tf.cast(1.0, sharded_den[0].dtype), tf.add_n(sharded_den)))
else:
mean_loss = tf.reduce_mean(all_shards)
losses[loss_name] = mean_loss
return losses
示例8: summarize_features
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_mean [as 别名]
def summarize_features(features, num_shards=1):
"""Generate summaries for features."""
if not common_layers.should_generate_summaries():
return
with tf.name_scope("input_stats"):
for (k, v) in sorted(six.iteritems(features)):
if (isinstance(v, tf.Tensor) and (v.get_shape().ndims > 1) and
(v.dtype != tf.string)):
tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // num_shards)
tf.summary.scalar("%s_length" % k, tf.shape(v)[1])
nonpadding = tf.to_float(tf.not_equal(v, 0))
nonpadding_tokens = tf.reduce_sum(nonpadding)
tf.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens)
tf.summary.scalar("%s_nonpadding_fraction" % k,
tf.reduce_mean(nonpadding))
示例9: testRougeLMetricE2E
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_mean [as 别名]
def testRougeLMetricE2E(self):
vocab_size = 4
batch_size = 12
seq_length = 12
predictions = tf.one_hot(
np.random.randint(vocab_size, size=(batch_size, seq_length, 1, 1)),
depth=4,
dtype=tf.float32)
targets = np.random.randint(4, size=(12, 12, 1, 1))
with self.test_session() as session:
scores, _ = rouge.rouge_l_fscore(
predictions,
tf.constant(targets, dtype=tf.int32))
a = tf.reduce_mean(scores)
session.run(tf.global_variables_initializer())
session.run(a)
示例10: vq_nearest_neighbor
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_mean [as 别名]
def vq_nearest_neighbor(x, hparams):
"""Find the nearest element in means to elements in x."""
bottleneck_size = 2**hparams.bottleneck_bits
means = hparams.means
x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
scalar_prod = tf.matmul(x, means, transpose_b=True)
dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
if hparams.bottleneck_kind == "em":
x_means_idx = tf.multinomial(-dist, num_samples=hparams.num_samples)
x_means_hot = tf.one_hot(
x_means_idx, depth=bottleneck_size)
x_means_hot = tf.reduce_mean(x_means_hot, axis=1)
else:
x_means_idx = tf.argmax(-dist, axis=-1)
x_means_hot = tf.one_hot(x_means_idx, depth=bottleneck_size)
x_means = tf.matmul(x_means_hot, means)
e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means)))
return x_means_hot, e_loss
示例11: loss
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_mean [as 别名]
def loss(self, logits, features):
"""Loss function for Neural Shuffle-Exchange network.
We use custom loss function as default loss function doesn't
use padding for calculating loss. We assume that output string is same
length as the input. If you need other type of output please feel
free to modify this.
Args:
logits: Logits from model
features: Features, not in one-hot format
Returns:
tf.Tensor: Loss value
"""
onehot_labels = tf.one_hot(features["targets"],
self._problem_hparams.vocab_size["targets"])
cost_vector = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=onehot_labels)
return tf.reduce_mean(cost_vector)
示例12: bottleneck
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_mean [as 别名]
def bottleneck(self, x):
hparams = self.hparams
z_size = hparams.bottleneck_bits
x_shape = common_layers.shape_list(x)
with tf.variable_scope("vae"):
mu = tf.layers.dense(x, z_size, name="mu")
if hparams.mode != tf.estimator.ModeKeys.TRAIN:
return mu, 0.0 # No sampling or kl loss on eval.
log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
epsilon = tf.random_normal(x_shape[:-1] + [z_size])
z = mu + tf.exp(log_sigma / 2) * epsilon
kl = 0.5 * tf.reduce_mean(
tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1)
free_bits = z_size // 4
kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
return z, kl_loss * hparams.kl_beta
示例13: encode
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_mean [as 别名]
def encode(self, features, input_key):
hparams = self._hparams
inputs = common_layers.flatten4d3d(features[input_key])
(encoder_input, encoder_self_attention_bias, _) = (
transformer.transformer_prepare_encoder(inputs, problem.SpaceID.EN_TOK,
hparams))
encoder_input = tf.nn.dropout(encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
encoder_output = transformer.transformer_encoder(
encoder_input,
encoder_self_attention_bias,
hparams,
nonpadding=transformer.features_to_nonpadding(features, input_key))
encoder_output = tf.reduce_mean(encoder_output, axis=1)
return encoder_output
示例14: lossfn
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_mean [as 别名]
def lossfn(real_input, fake_input, compress, hparams, lsgan, name):
"""Loss function."""
eps = 1e-12
with tf.variable_scope(name):
d1 = discriminator(real_input, compress, hparams, "discriminator")
d2 = discriminator(fake_input, compress, hparams, "discriminator",
reuse=True)
if lsgan:
dloss = tf.reduce_mean(
tf.squared_difference(d1, 0.9)) + tf.reduce_mean(tf.square(d2))
gloss = tf.reduce_mean(tf.squared_difference(d2, 0.9))
loss = (dloss + gloss)/2
else: # cross_entropy
dloss = -tf.reduce_mean(
tf.log(d1 + eps)) - tf.reduce_mean(tf.log1p(eps - d2))
gloss = -tf.reduce_mean(tf.log(d2 + eps))
loss = (dloss + gloss)/2
return loss
示例15: _create_greedy_infer_model
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_mean [as 别名]
def _create_greedy_infer_model(self):
"""Creates model for greedy inference testing.
Returns:
model: A t2t model.
features: An map of string to tensor.
"""
model, features = get_model(transformer.transformer_tiny())
out_logits, _ = model(features)
out_logits = tf.squeeze(out_logits, axis=[2, 3])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]),
labels=tf.reshape(features["targets"], [-1]))
loss = tf.reduce_mean(loss)
apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss)
with self.test_session():
tf.global_variables_initializer().run()
for _ in range(10):
apply_grad.run()
model.set_mode(tf.estimator.ModeKeys.PREDICT)
return model, features