本文整理汇总了Python中tensorflow.reduce_mean方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.reduce_mean方法的具体用法?Python tensorflow.reduce_mean怎么用?Python tensorflow.reduce_mean使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.reduce_mean方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_adam
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_mean [as 别名]
def test_adam(self):
with self.test_session() as sess:
w = tf.get_variable(
"w",
shape=[3],
initializer=tf.constant_initializer([0.1, -0.2, -0.1]))
x = tf.constant([0.4, 0.2, -0.5])
loss = tf.reduce_mean(tf.square(x - w))
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
global_step = tf.train.get_or_create_global_step()
optimizer = optimization.AdamWeightDecayOptimizer(learning_rate=0.2)
train_op = optimizer.apply_gradients(zip(grads, tvars), global_step)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
for _ in range(100):
sess.run(train_op)
w_np = sess.run(w)
self.assertAllClose(w_np.flat, [0.4, 0.2, -0.5], rtol=1e-2, atol=1e-2)
示例2: _build_input
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_mean [as 别名]
def _build_input(self):
self.tails = tf.placeholder(tf.int32, [None])
self.heads = tf.placeholder(tf.int32, [None])
self.targets = tf.one_hot(indices=self.heads, depth=self.num_entity)
if not self.query_is_language:
self.queries = tf.placeholder(tf.int32, [None, self.num_step])
self.query_embedding_params = tf.Variable(self._random_uniform_unit(
self.num_query + 1, # <END> token
self.query_embed_size),
dtype=tf.float32)
rnn_inputs = tf.nn.embedding_lookup(self.query_embedding_params,
self.queries)
else:
self.queries = tf.placeholder(tf.int32, [None, self.num_step, self.num_word])
self.vocab_embedding_params = tf.Variable(self._random_uniform_unit(
self.num_vocab + 1, # <END> token
self.vocab_embed_size),
dtype=tf.float32)
embedded_query = tf.nn.embedding_lookup(self.vocab_embedding_params,
self.queries)
rnn_inputs = tf.reduce_mean(embedded_query, axis=2)
return rnn_inputs
示例3: minibatch_stddev_layer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_mean [as 别名]
def minibatch_stddev_layer(x, group_size=4):
with tf.variable_scope('MinibatchStddev'):
group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size.
s = x.shape # [NCHW] Input shape.
y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) # [GMCHW] Split minibatch into M groups of size G.
y = tf.cast(y, tf.float32) # [GMCHW] Cast to FP32.
y -= tf.reduce_mean(y, axis=0, keep_dims=True) # [GMCHW] Subtract mean over group.
y = tf.reduce_mean(tf.square(y), axis=0) # [MCHW] Calc variance over group.
y = tf.sqrt(y + 1e-8) # [MCHW] Calc stddev over group.
y = tf.reduce_mean(y, axis=[1,2,3], keep_dims=True) # [M111] Take average over fmaps and pixels.
y = tf.cast(y, x.dtype) # [M111] Cast back to original data type.
y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [N1HW] Replicate over group and pixels.
return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap.
#----------------------------------------------------------------------------
# Generator network used in the paper.
示例4: fprop
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_mean [as 别名]
def fprop(self, x, y, **kwargs):
kwargs.update(self.kwargs)
if self.attack is not None:
x = x, self.attack(x)
else:
x = x,
# Catching RuntimeError: Variable -= value not supported by tf.eager.
try:
y -= self.smoothing * (y - 1. / tf.cast(y.shape[-1], y.dtype))
except RuntimeError:
y.assign_sub(self.smoothing * (y - 1. / tf.cast(y.shape[-1],
y.dtype)))
logits = [self.model.get_logits(x, **kwargs) for x in x]
loss = sum(
tf.reduce_mean(softmax_cross_entropy_with_logits(labels=y,
logits=logit))
for logit in logits)
return loss
示例5: createLinearModel
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_mean [as 别名]
def createLinearModel(dimension):
np.random.seed(1024)
# 定义 x 和 y
x = tf.placeholder(tf.float64, shape=[None, dimension], name='x')
# 写成矩阵形式会大大加快运算速度
y = tf.placeholder(tf.float64, shape=[None, 1], name='y')
# 定义参数估计值和预测值
betaPred = tf.Variable(np.random.random([dimension, 1]))
yPred = tf.matmul(x, betaPred, name='y_pred')
# 定义损失函数
loss = tf.reduce_mean(tf.square(yPred - y))
model = {
'loss_function': loss,
'independent_variable': x,
'dependent_variable': y,
'prediction': yPred,
'model_params': betaPred
}
return model
示例6: testTrainEvalWithReuse
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_mean [as 别名]
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 231, 231
eval_height, eval_width = 281, 281
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = overfeat.overfeat(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = overfeat.overfeat(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
示例7: testTrainEvalWithReuse
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_mean [as 别名]
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 300, 400
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = alexnet.alexnet_v2(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 4, 7, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
示例8: testTrainEvalWithReuse
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_mean [as 别名]
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_a(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_a(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
示例9: loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_mean [as 别名]
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
示例10: test_position_sensitive_with_single_bin
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_mean [as 别名]
def test_position_sensitive_with_single_bin(self):
num_spatial_bins = [1, 1]
image_shape = [2, 3, 3, 4]
crop_size = [2, 2]
image = tf.random_uniform(image_shape)
boxes = tf.random_uniform((6, 4))
box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32)
# When a single bin is used, position-sensitive crop and pool should be
# the same as non-position sensitive crop and pool.
crop = tf.image.crop_and_resize(image, boxes, box_ind, crop_size)
crop_and_pool = tf.reduce_mean(crop, [1, 2], keep_dims=True)
ps_crop_and_pool = ops.position_sensitive_crop_regions(
image, boxes, box_ind, crop_size, num_spatial_bins, global_pool=True)
with self.test_session() as sess:
expected_output, output = sess.run((crop_and_pool, ps_crop_and_pool))
self.assertAllClose(output, expected_output)
示例11: _BuildLoss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_mean [as 别名]
def _BuildLoss(self):
# 1. reconstr_loss seems doesn't do better than l2 loss.
# 2. Only works when using reduce_mean. reduce_sum doesn't work.
# 3. It seems kl loss doesn't play an important role.
self.loss = 0
with tf.variable_scope('loss'):
if self.params['l2_loss']:
l2_loss = tf.reduce_mean(tf.square(self.diff_output - self.diffs[1]))
tf.summary.scalar('l2_loss', l2_loss)
self.loss += l2_loss
if self.params['reconstr_loss']:
reconstr_loss = (-tf.reduce_mean(
self.diffs[1] * (1e-10 + self.diff_output) +
(1-self.diffs[1]) * tf.log(1e-10 + 1 - self.diff_output)))
reconstr_loss = tf.check_numerics(reconstr_loss, 'reconstr_loss')
tf.summary.scalar('reconstr_loss', reconstr_loss)
self.loss += reconstr_loss
if self.params['kl_loss']:
kl_loss = (0.5 * tf.reduce_mean(
tf.square(self.z_mean) + tf.square(self.z_stddev) -
2 * self.z_stddev_log - 1))
tf.summary.scalar('kl_loss', kl_loss)
self.loss += kl_loss
tf.summary.scalar('loss', self.loss)
示例12: _define_experience
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_mean [as 别名]
def _define_experience(self, observ, action, reward):
"""Implement the branch of experience() entered during training."""
update_filters = tf.summary.merge([
self._observ_filter.update(observ),
self._reward_filter.update(reward)])
with tf.control_dependencies([update_filters]):
if self._config.train_on_agent_action:
# NOTE: Doesn't seem to change much.
action = self._last_action
batch = observ, action, self._last_mean, self._last_logstd, reward
append = self._episodes.append(batch, tf.range(len(self._batch_env)))
with tf.control_dependencies([append]):
norm_observ = self._observ_filter.transform(observ)
norm_reward = tf.reduce_mean(self._reward_filter.transform(reward))
# pylint: disable=g-long-lambda
summary = tf.cond(self._should_log, lambda: tf.summary.merge([
update_filters,
self._observ_filter.summary(),
self._reward_filter.summary(),
tf.summary.scalar('memory_size', self._memory_index),
tf.summary.histogram('normalized_observ', norm_observ),
tf.summary.histogram('action', self._last_action),
tf.summary.scalar('normalized_reward', norm_reward)]), str)
return summary
示例13: _update_value
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_mean [as 别名]
def _update_value(self, observ, reward, length):
"""Perform multiple update steps of the value baseline.
We need to decide for the summary of one iteration, and thus choose the one
after half of the iterations.
Args:
observ: Sequences of observations.
reward: Sequences of reward.
length: Batch of sequence lengths.
Returns:
Summary tensor.
"""
with tf.name_scope('update_value'):
loss, summary = tf.scan(
lambda _1, _2: self._update_value_step(observ, reward, length),
tf.range(self._config.update_epochs_value),
[0., ''], parallel_iterations=1)
print_loss = tf.Print(0, [tf.reduce_mean(loss)], 'value loss: ')
with tf.control_dependencies([loss, print_loss]):
return summary[self._config.update_epochs_value // 2]
示例14: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_mean [as 别名]
def __init__(
self, sequence_length, vocab_size, embedding_size, hidden_units, l2_reg_lambda, batch_size, trainableEmbeddings):
# Placeholders for input, output and dropout
self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1")
self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2")
self.input_y = tf.placeholder(tf.float32, [None], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0, name="l2_loss")
# Embedding layer
with tf.name_scope("embedding"):
self.W = tf.Variable(
tf.constant(0.0, shape=[vocab_size, embedding_size]),
trainable=trainableEmbeddings,name="W")
self.embedded_words1 = tf.nn.embedding_lookup(self.W, self.input_x1)
self.embedded_words2 = tf.nn.embedding_lookup(self.W, self.input_x2)
print self.embedded_words1
# Create a convolution + maxpool layer for each filter size
with tf.name_scope("output"):
self.out1=self.stackedRNN(self.embedded_words1, self.dropout_keep_prob, "side1", embedding_size, sequence_length, hidden_units)
self.out2=self.stackedRNN(self.embedded_words2, self.dropout_keep_prob, "side2", embedding_size, sequence_length, hidden_units)
self.distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1,self.out2)),1,keep_dims=True))
self.distance = tf.div(self.distance, tf.add(tf.sqrt(tf.reduce_sum(tf.square(self.out1),1,keep_dims=True)),tf.sqrt(tf.reduce_sum(tf.square(self.out2),1,keep_dims=True))))
self.distance = tf.reshape(self.distance, [-1], name="distance")
with tf.name_scope("loss"):
self.loss = self.contrastive_loss(self.input_y,self.distance, batch_size)
#### Accuracy computation is outside of this class.
with tf.name_scope("accuracy"):
self.temp_sim = tf.subtract(tf.ones_like(self.distance),tf.rint(self.distance), name="temp_sim") #auto threshold 0.5
correct_predictions = tf.equal(self.temp_sim, self.input_y)
self.accuracy=tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
示例15: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_mean [as 别名]
def __init__(
self, sequence_length, vocab_size, embedding_size, hidden_units, l2_reg_lambda, batch_size):
# Placeholders for input, output and dropout
self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1")
self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2")
self.input_y = tf.placeholder(tf.float32, [None], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0, name="l2_loss")
# Embedding layer
with tf.name_scope("embedding"):
self.W = tf.Variable(
tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
trainable=True,name="W")
self.embedded_chars1 = tf.nn.embedding_lookup(self.W, self.input_x1)
#self.embedded_chars_expanded1 = tf.expand_dims(self.embedded_chars1, -1)
self.embedded_chars2 = tf.nn.embedding_lookup(self.W, self.input_x2)
#self.embedded_chars_expanded2 = tf.expand_dims(self.embedded_chars2, -1)
# Create a convolution + maxpool layer for each filter size
with tf.name_scope("output"):
self.out1=self.BiRNN(self.embedded_chars1, self.dropout_keep_prob, "side1", embedding_size, sequence_length, hidden_units)
self.out2=self.BiRNN(self.embedded_chars2, self.dropout_keep_prob, "side2", embedding_size, sequence_length, hidden_units)
self.distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1,self.out2)),1,keep_dims=True))
self.distance = tf.div(self.distance, tf.add(tf.sqrt(tf.reduce_sum(tf.square(self.out1),1,keep_dims=True)),tf.sqrt(tf.reduce_sum(tf.square(self.out2),1,keep_dims=True))))
self.distance = tf.reshape(self.distance, [-1], name="distance")
with tf.name_scope("loss"):
self.loss = self.contrastive_loss(self.input_y,self.distance, batch_size)
#### Accuracy computation is outside of this class.
with tf.name_scope("accuracy"):
self.temp_sim = tf.subtract(tf.ones_like(self.distance),tf.rint(self.distance), name="temp_sim") #auto threshold 0.5
correct_predictions = tf.equal(self.temp_sim, self.input_y)
self.accuracy=tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")