本文整理汇总了Python中tensorflow.reduce_mean函数的典型用法代码示例。如果您正苦于以下问题:Python reduce_mean函数的具体用法?Python reduce_mean怎么用?Python reduce_mean使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了reduce_mean函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, config):
self.config = config
self.input = tf.placeholder('int32', [self.config.batch_size, config.max_seq_len], name='input')
self.labels = tf.placeholder('int64', [self.config.batch_size], name='labels')
self.labels_one_hot = tf.one_hot(indices=self.labels,
depth=config.output_dim,
on_value=1.0,
off_value=0.0,
axis=-1)
self.gru = GRUCell(config.hidden_state_dim)
embeddings_we = tf.get_variable('word_embeddings', initializer=tf.random_uniform([config.vocab_size, config.embedding_dim], -1.0, 1.0))
self.emb = embed_input = tf.nn.embedding_lookup(embeddings_we, self.input)
inputs = [tf.squeeze(i, squeeze_dims=[1]) for i in tf.split(1, config.max_seq_len, embed_input)]
outputs, last_slu_state = tf.nn.rnn(
cell=self.gru,
inputs=inputs,
dtype=tf.float32,)
w_project = tf.get_variable('project2labels', initializer=tf.random_uniform([config.hidden_state_dim, config.output_dim], -1.0, 1.0))
self.logits = logits_bo = tf.matmul(last_slu_state, w_project)
tf.histogram_summary('logits', logits_bo)
self.probabilities = tf.nn.softmax(logits_bo)
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits_bo, self.labels_one_hot))
self.predict = tf.nn.softmax(logits_bo)
# TensorBoard
self.accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.predict, 1), self.labels), 'float32'), name='accuracy')
tf.scalar_summary('CCE loss', self.loss)
tf.scalar_summary('Accuracy', self.accuracy)
self.tb_info = tf.merge_all_summaries()
示例2: fprop_noscope
def fprop_noscope(self, x):
mean = tf.reduce_mean(x, (1, 2), keep_dims=True)
x = x - mean
std = tf.sqrt(1e-7 +
tf.reduce_mean(tf.square(x), (1, 2), keep_dims=True))
x = x / std
return x * self.gamma + self.beta
示例3: get_rebar_gradient
def get_rebar_gradient(self):
"""Get the rebar gradient."""
hardELBO, nvil_gradient, logQHard = self._create_hard_elbo()
if self.hparams.quadratic:
gumbel_cv, _ = self._create_gumbel_control_variate_quadratic(logQHard)
else:
gumbel_cv, _ = self._create_gumbel_control_variate(logQHard)
f_grads = self.optimizer_class.compute_gradients(tf.reduce_mean(-nvil_gradient))
eta = {}
h_grads, eta_statistics = self.multiply_by_eta_per_layer(
self.optimizer_class.compute_gradients(tf.reduce_mean(gumbel_cv)),
eta)
model_grads = U.add_grads_and_vars(f_grads, h_grads)
total_grads = model_grads
# Construct the variance objective
variance_objective = tf.reduce_mean(tf.square(U.vectorize(model_grads, set_none_to_zero=True)))
debug = { 'ELBO': hardELBO,
'etas': eta_statistics,
'variance_objective': variance_objective,
}
return total_grads, debug, variance_objective
示例4: standard_reg
def standard_reg():
reg = tf.constant(0.0, dtype=tf.float32)
reg = reg + standard_w_weight_reg * tf.reduce_mean(tf.square(net_params['sDW1']))
#reg = reg + standard_w_weight_reg * tf.reduce_mean(tf.square(net_params['sDW2']))
reg = reg + regressor_w_weight_reg * tf.reduce_mean(tf.square(net_params['sRW']))
return reg
示例5: func_for_scan
def func_for_scan(prev_output, current_element):
# Sample decoder weights __, [1], [1]
W, log_pW, log_qW = decoder.sample_weights()
# Sample z [P,B,Z], [P,B], [P,B]
z, log_pz, log_qz = self.sample_z(x, encoder, decoder, W)
# z: [PB,Z]
z = tf.reshape(z, [self.n_z_particles*self.batch_size, self.z_size])
# Decode [PB,X]
y = decoder.feedforward(W, z)
# y: [P,B,X]
y = tf.reshape(y, [self.n_z_particles, self.batch_size, self.x_size])
# Likelihood p(x|z) [P,B]
log_px = log_bern(x,y)
#Store for later
# log_pW_list.append(tf.reduce_mean(log_pW))
# log_qW_list.append(tf.reduce_mean(log_qW))
# log_pz_list.append(tf.reduce_mean(log_pz))
# log_qz_list.append(tf.reduce_mean(log_qz))
# log_px_list.append(tf.reduce_mean(log_px))
to_output = []
to_output.append(tf.reduce_mean(log_px))
to_output.append(tf.reduce_mean(log_pz))
to_output.append(tf.reduce_mean(log_qz))
to_output.append(tf.reduce_mean(log_pW))
to_output.append(tf.reduce_mean(log_qW))
return tf.stack(to_output)
示例6: __init__
def __init__(self, nA,
learning_rate,decay,grad_clip,entropy_beta,
state_shape=[84,84,4],
master=None, device_name='/gpu:0', scope_name='master'):
with tf.device(device_name) :
self.state = tf.placeholder(tf.float32,[None]+state_shape)
block, self.scope = ActorCritic._build_shared_block(self.state,scope_name)
self.policy, self.log_softmax_policy = ActorCritic._build_policy(block,nA,scope_name)
self.value = ActorCritic._build_value(block,scope_name)
self.train_vars = sorted(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope.name), key=lambda v:v.name)
if( master is not None ) :
self.sync_op= self._sync_op(master)
self.action = tf.placeholder(tf.int32,[None,])
self.target_value = tf.placeholder(tf.float32,[None,])
advantage = self.target_value - self.value
entropy = tf.reduce_sum(-1. * self.policy * self.log_softmax_policy,axis=1)
log_p_s_a = tf.reduce_sum(self.log_softmax_policy * tf.one_hot(self.action,nA),axis=1)
self.policy_loss = tf.reduce_mean(tf.stop_gradient(advantage)*log_p_s_a)
self.entropy_loss = tf.reduce_mean(entropy)
self.value_loss = tf.reduce_mean(advantage**2)
loss = -self.policy_loss - entropy_beta* self.entropy_loss + self.value_loss
self.gradients = tf.gradients(loss,self.train_vars)
clipped_gs = [tf.clip_by_average_norm(g,grad_clip) for g in self.gradients]
self.train_op = master.optimizer.apply_gradients(zip(clipped_gs,master.train_vars))
else :
#self.optimizer = tf.train.AdamOptimizer(learning_rate,beta1=BETA)
self.optimizer = tf.train.RMSPropOptimizer(learning_rate,decay=decay,use_locking=True)
示例7: eval_summary
def eval_summary(self, ground_truth, prediction):
"""
Compute evaluation metrics (for EVAL mode).
Args:
ground_truth: Ground truth, shape: (?, #priors, 4 + #classes).
prediction: Dictionary of predicted tensors, shape: {'locs' : (?, #priors, 4), \
'confs' : (?, #priors, #classes), \
'logits': (?, #priors, #classes)}.
Returns:
Loss stub, shape: (1,).
"""
localization_loss = self._localization_loss(ground_truth[:, :, :4],
prediction['locs']) # shape: (batch_size, num_priors)
classification_loss = self._classification_loss(ground_truth[:, :, 4:],
prediction['logits']) # shape: (batch_size, num_priors)
positives = tf.reduce_max(ground_truth[:, :, 5:], axis=-1) # shape: (batch_size, num_priors)
num_positives = tf.reduce_sum(positives) # shape: (1,)
loc_loss = tf.reduce_sum(localization_loss * positives, axis=-1) # shape: (batch_size,)
classification_loss = tf.reduce_sum(classification_loss, axis=-1) # shape: (batch_size,)
evaluation_tensors = {
'total_classification_loss': tf.reduce_mean(classification_loss),
'total_localization_loss': tf.reduce_mean(loc_loss),
}
self.__add_evaluation(evaluation_tensors)
total_loss = tf.reduce_mean(classification_loss + self.loc_weight * loc_loss) / tf.maximum(1.0, num_positives)
return total_loss
示例8: get_train
def get_train(train_ph_dict,var_dict,var_ph_dict):
mid0 = tf.one_hot(train_ph_dict['choice_0'], 9, axis=-1, dtype=tf.float32)
mid0 = mid0 * get_q(train_ph_dict['state_0'],var_dict)
mid0 = tf.reduce_sum(mid0, reduction_indices=[1])
mid1 = get_q(train_ph_dict['state_1'],var_ph_dict)
mid1 = tf.reduce_max(mid1, reduction_indices=[1])
mid1 = mid1 * train_ph_dict['cont']
mid1 = mid1 * tf.constant(TRAIN_BETA)
l2r = tf.constant(0.0)
cell_count = tf.constant(0.0)
for v in var_dict.values():
l2r = l2r + get_l2(v)
cell_count = cell_count + tf.to_float(tf.size(v))
l2r = l2r / cell_count
l2r = l2r / tf.constant(ELEMENT_L2_FACTOR*ELEMENT_L2_FACTOR)
l2r = l2r * tf.constant(L2_WEIGHT)
mid = mid0-mid1-train_ph_dict['reward_1']
# mid = mid * mid
mid = tf.abs(mid)
mid = tf.reduce_mean(mid)
score_diff = mid
mid = mid + l2r
mid = mid + ( tf.abs( tf.reduce_mean(var_dict['b5']) ) * tf.constant(L2_WEIGHT) )
loss = mid
mid = tf.train.GradientDescentOptimizer(0.5).minimize(mid,var_list=var_dict.values())
train = mid
return train, loss, score_diff
示例9: _potential_scale_reduction_single_state
def _potential_scale_reduction_single_state(state, independent_chain_ndims):
"""potential_scale_reduction for one single state `Tensor`."""
with tf.name_scope(
'potential_scale_reduction_single_state',
values=[state, independent_chain_ndims]):
# We assume exactly one leading dimension indexes e.g. correlated samples
# from each Markov chain.
state = tf.convert_to_tensor(state, name='state')
sample_ndims = 1
sample_axis = tf.range(0, sample_ndims)
chain_axis = tf.range(sample_ndims,
sample_ndims + independent_chain_ndims)
sample_and_chain_axis = tf.range(
0, sample_ndims + independent_chain_ndims)
n = _axis_size(state, sample_axis)
m = _axis_size(state, chain_axis)
# In the language of Brooks and Gelman (1998),
# B / n is the between chain variance, the variance of the chain means.
# W is the within sequence variance, the mean of the chain variances.
b_div_n = _reduce_variance(
tf.reduce_mean(state, sample_axis, keepdims=True),
sample_and_chain_axis,
biased=False)
w = tf.reduce_mean(
_reduce_variance(state, sample_axis, keepdims=True, biased=True),
sample_and_chain_axis)
# sigma^2_+ is an estimate of the true variance, which would be unbiased if
# each chain was drawn from the target. c.f. "law of total variance."
sigma_2_plus = w + b_div_n
return ((m + 1.) / m) * sigma_2_plus / w - (n - 1.) / (m * n)
示例10: init_opt
def init_opt(self):
is_recurrent = int(self.policy.recurrent)
obs_var = self.env.observation_space.new_tensor_variable(
'obs',
extra_dims=1 + is_recurrent,
)
action_var = self.env.action_space.new_tensor_variable(
'action',
extra_dims=1 + is_recurrent,
)
advantage_var = tensor_utils.new_tensor(
'advantage',
ndim=1 + is_recurrent,
dtype=tf.float32,
)
dist = self.policy.distribution
old_dist_info_vars = {
k: tf.placeholder(tf.float32, shape=[None] * (1 + is_recurrent) + list(shape), name='old_%s' % k)
for k, shape in dist.dist_info_specs
}
old_dist_info_vars_list = [old_dist_info_vars[k] for k in dist.dist_info_keys]
state_info_vars = {
k: tf.placeholder(tf.float32, shape=[None] * (1 + is_recurrent) + list(shape), name=k)
for k, shape in self.policy.state_info_specs
}
state_info_vars_list = [state_info_vars[k] for k in self.policy.state_info_keys]
if is_recurrent:
valid_var = tf.placeholder(tf.float32, shape=[None, None], name="valid")
else:
valid_var = None
dist_info_vars = self.policy.dist_info_sym(obs_var, state_info_vars)
kl = dist.kl_sym(old_dist_info_vars, dist_info_vars)
lr = dist.likelihood_ratio_sym(action_var, old_dist_info_vars, dist_info_vars)
if is_recurrent:
mean_kl = tf.reduce_sum(kl * valid_var) / tf.reduce_sum(valid_var)
surr_loss = - tf.reduce_sum(lr * advantage_var * valid_var) / tf.reduce_sum(valid_var)
else:
mean_kl = tf.reduce_mean(kl)
surr_loss = - tf.reduce_mean(lr * advantage_var)
input_list = [
obs_var,
action_var,
advantage_var,
] + state_info_vars_list + old_dist_info_vars_list
if is_recurrent:
input_list.append(valid_var)
self.optimizer.update_opt(
loss=surr_loss,
target=self.policy,
leq_constraint=(mean_kl, self.step_size),
inputs=input_list,
constraint_name="mean_kl"
)
return dict()
示例11: build_graph
def build_graph(self, image_pos):
image_pos = image_pos / 128.0 - 1
z = tf.random_normal([self.batch, self.zdim], name='z_train')
z = tf.placeholder_with_default(z, [None, self.zdim], name='z')
with argscope([Conv2D, Conv2DTranspose, FullyConnected],
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02)):
with tf.variable_scope('gen'):
image_gen = self.generator(z)
tf.summary.image('generated-samples', image_gen, max_outputs=30)
alpha = tf.random_uniform(shape=[self.batch, 1, 1, 1],
minval=0., maxval=1., name='alpha')
interp = image_pos + alpha * (image_gen - image_pos)
with tf.variable_scope('discrim'):
vecpos = self.discriminator(image_pos)
vecneg = self.discriminator(image_gen)
vec_interp = self.discriminator(interp)
# the Wasserstein-GAN losses
self.d_loss = tf.reduce_mean(vecneg - vecpos, name='d_loss')
self.g_loss = tf.negative(tf.reduce_mean(vecneg), name='g_loss')
# the gradient penalty loss
gradients = tf.gradients(vec_interp, [interp])[0]
gradients = tf.sqrt(tf.reduce_sum(tf.square(gradients), [1, 2, 3]))
gradients_rms = symbolic_functions.rms(gradients, 'gradient_rms')
gradient_penalty = tf.reduce_mean(tf.square(gradients - 1), name='gradient_penalty')
add_moving_summary(self.d_loss, self.g_loss, gradient_penalty, gradients_rms)
self.d_loss = tf.add(self.d_loss, 10 * gradient_penalty)
self.collect_variables()
示例12: testSampleConsistentStats
def testSampleConsistentStats(self):
loc = np.float32([[-1., 1], [1, -1]])
scale = np.float32([1., 0.5])
n_samp = 1e4
with self.test_session() as sess:
ind = tfd.Independent(
distribution=tfd.MultivariateNormalDiag(
loc=loc, scale_identity_multiplier=scale),
reinterpreted_batch_ndims=1)
x = ind.sample(int(n_samp), seed=42)
sample_mean = tf.reduce_mean(x, axis=0)
sample_var = tf.reduce_mean(tf.squared_difference(x, sample_mean), axis=0)
sample_std = tf.sqrt(sample_var)
sample_entropy = -tf.reduce_mean(ind.log_prob(x), axis=0)
[
sample_mean_, sample_var_, sample_std_, sample_entropy_,
actual_mean_, actual_var_, actual_std_, actual_entropy_,
actual_mode_,
] = sess.run([
sample_mean, sample_var, sample_std, sample_entropy,
ind.mean(), ind.variance(), ind.stddev(), ind.entropy(), ind.mode(),
])
self.assertAllClose(sample_mean_, actual_mean_, rtol=0.02, atol=0.)
self.assertAllClose(sample_var_, actual_var_, rtol=0.04, atol=0.)
self.assertAllClose(sample_std_, actual_std_, rtol=0.02, atol=0.)
self.assertAllClose(sample_entropy_, actual_entropy_, rtol=0.01, atol=0.)
self.assertAllClose(loc, actual_mode_, rtol=1e-6, atol=0.)
示例13: batchnormalize
def batchnormalize(X, eps=1e-8, g=None, b=None):
if X.get_shape().ndims == 4:
mean = tf.reduce_mean(X, [0,1,2])
std = tf.reduce_mean( tf.square(X-mean), [0,1,2] )
X = (X-mean) / tf.sqrt(std+eps)
if g is not None and b is not None:
g = tf.reshape(g, [1,1,1,-1])
b = tf.reshape(b, [1,1,1,-1])
X = X*g + b
elif X.get_shape().ndims == 2:
mean = tf.reduce_mean(X, 0)
std = tf.reduce_mean(tf.square(X-mean), 0)
X = (X-mean) / tf.sqrt(std+eps)#std
if g is not None and b is not None:
g = tf.reshape(g, [1,-1])
b = tf.reshape(b, [1,-1])
X = X*g + b
else:
raise NotImplementedError
return X
示例14: create_graph
def create_graph(self):
with self.__graph.as_default():
self.__featurePlaceHolder = tf.placeholder(dtype=tf.int32, shape=[None, self.__window_size * 2])
self.__labelPlaceHolder = tf.placeholder(dtype=tf.int32, shape=[None, 1])
onehot_lookup_tables = tf.Variable(
initial_value=tf.truncated_normal(shape=[self.__vocabulary_size, self.__embedding_size])
)
embedding = tf.nn.embedding_lookup(params=onehot_lookup_tables, ids = self.__featurePlaceHolder)
projection_out = tf.reduce_mean(embedding, axis=1)
softmax_weight = tf.Variable(initial_value=tf.truncated_normal(
shape=[self.__vocabulary_size, self.__embedding_size]
))
softmax_biases = tf.Variable(initial_value=tf.zeros([self.__vocabulary_size]))
sampled_loss_per_batch = tf.nn.sampled_softmax_loss(
weights=softmax_weight,
biases=softmax_biases,
inputs=projection_out,
labels=self.__labelPlaceHolder,
num_sampled=self.__num_sampled,
num_classes=self.__vocabulary_size
)
self.__loss = tf.reduce_mean(sampled_loss_per_batch)
self.__optimizer = tf.train.AdagradOptimizer(1.0).minimize(self.__loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(onehot_lookup_tables), 1, keep_dims=True))
self.__normalized_embedding = onehot_lookup_tables / norm
示例15: _summarize_input
def _summarize_input(self, groundtruth_boxes_list, match_list):
"""Creates tensorflow summaries for the input boxes and anchors.
This function creates four summaries corresponding to the average
number (over images in a batch) of (1) groundtruth boxes, (2) anchors
marked as positive, (3) anchors marked as negative, and (4) anchors marked
as ignored.
Args:
groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4]
containing corners of the groundtruth boxes.
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
"""
num_boxes_per_image = tf.stack(
[tf.shape(x)[0] for x in groundtruth_boxes_list])
pos_anchors_per_image = tf.stack(
[match.num_matched_columns() for match in match_list])
neg_anchors_per_image = tf.stack(
[match.num_unmatched_columns() for match in match_list])
ignored_anchors_per_image = tf.stack(
[match.num_ignored_columns() for match in match_list])
tf.summary.scalar('Input/AvgNumGroundtruthBoxesPerImage',
tf.reduce_mean(tf.to_float(num_boxes_per_image)))
tf.summary.scalar('Input/AvgNumPositiveAnchorsPerImage',
tf.reduce_mean(tf.to_float(pos_anchors_per_image)))
tf.summary.scalar('Input/AvgNumNegativeAnchorsPerImage',
tf.reduce_mean(tf.to_float(neg_anchors_per_image)))
tf.summary.scalar('Input/AvgNumIgnoredAnchorsPerImage',
tf.reduce_mean(tf.to_float(ignored_anchors_per_image)))