本文整理汇总了Python中tensorflow.add_to_collection方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.add_to_collection方法的具体用法?Python tensorflow.add_to_collection怎么用?Python tensorflow.add_to_collection使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.add_to_collection方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _variable_with_weight_decay
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import add_to_collection [as 别名]
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
示例2: loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import add_to_collection [as 别名]
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
示例3: _variable_with_weight_decay
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import add_to_collection [as 别名]
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
示例4: l2_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import add_to_collection [as 别名]
def l2_loss(tensor, weight=1.0, scope=None):
"""Define a L2Loss, useful for regularize, i.e. weight decay.
Args:
tensor: tensor to regularize.
weight: an optional weight to modulate the loss.
scope: Optional scope for name_scope.
Returns:
the L2 loss op.
"""
with tf.name_scope(scope, 'L2Loss', [tensor]):
weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='loss_weight')
loss = tf.multiply(weight, tf.nn.l2_loss(tensor), name='value')
tf.add_to_collection(LOSSES_COLLECTION, loss)
return loss
示例5: add_variable
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import add_to_collection [as 别名]
def add_variable(var, restore=True):
"""Adds a variable to the MODEL_VARIABLES collection.
Optionally it will add the variable to the VARIABLES_TO_RESTORE collection.
Args:
var: a variable.
restore: whether the variable should be added to the
VARIABLES_TO_RESTORE collection.
"""
collections = [MODEL_VARIABLES]
if restore:
collections.append(VARIABLES_TO_RESTORE)
for collection in collections:
if var not in tf.get_collection(collection):
tf.add_to_collection(collection, var)
示例6: dense
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import add_to_collection [as 别名]
def dense(x, size, name, weight_init=None, bias_init=0, weight_loss_dict=None, reuse=None):
with tf.variable_scope(name, reuse=reuse):
assert (len(tf.get_variable_scope().name.split('/')) == 2)
w = tf.get_variable("w", [x.get_shape()[1], size], initializer=weight_init)
b = tf.get_variable("b", [size], initializer=tf.constant_initializer(bias_init))
weight_decay_fc = 3e-4
if weight_loss_dict is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(w), weight_decay_fc, name='weight_decay_loss')
if weight_loss_dict is not None:
weight_loss_dict[w] = weight_decay_fc
weight_loss_dict[b] = 0.0
tf.add_to_collection(tf.get_variable_scope().name.split('/')[0] + '_' + 'losses', weight_decay)
return tf.nn.bias_add(tf.matmul(x, w), b)
示例7: _variable_with_weight_decay
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import add_to_collection [as 别名]
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd:
# weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
示例8: loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import add_to_collection [as 别名]
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
示例9: _alphabet
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import add_to_collection [as 别名]
def _alphabet(mode, config):
""" Creates alphabet for alphabetized dihedral prediction. """
# prepare initializer
if config['alphabet'] is not None:
alphabet_initializer = tf.constant_initializer(config['alphabet']) # user-defined alphabet
else:
alphabet_initializer = dict_to_init(config['alphabet_init'], config['alphabet_seed']) # random initialization
# alphabet variable, possibly trainable
alphabet = tf.get_variable(name='alphabet',
shape=[config['alphabet_size'], NUM_DIHEDRALS],
initializer=alphabet_initializer,
trainable=config['alphabet_trainable']) # [OUTPUT_SIZE, NUM_DIHEDRALS]
if mode == 'training' and config['alphabet_trainable']:
tf.add_to_collection(tf.GraphKeys.WEIGHTS, alphabet) # add to WEIGHTS collection if trainable
return alphabet
示例10: _drmsds
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import add_to_collection [as 别名]
def _drmsds(config, coordinates, targets, weights):
""" Computes reduced weighted dRMSD loss (as specified by weights)
between predicted tertiary structures and targets. """
# lose end residues if desired
if config['num_edge_residues'] > 0:
coordinates = coordinates[:-(config['num_edge_residues'] * NUM_DIHEDRALS)]
# if only c_alpha atoms are requested then subsample
if config['atoms'] == 'c_alpha': # starts at 1 because c_alpha atoms are the second atoms
coordinates = coordinates[1::NUM_DIHEDRALS] # [NUM_STEPS - NUM_EDGE_RESIDUES, BATCH_SIZE, NUM_DIMENSIONS]
targets = targets[1::NUM_DIHEDRALS] # [NUM_STEPS - NUM_EDGE_RESIDUES, BATCH_SIZE, NUM_DIMENSIONS]
# compute per structure dRMSDs
drmsds = drmsd(coordinates, targets, weights, name='drmsds') # [BATCH_SIZE]
# add to relevant collections for summaries, etc.
if config['log_model_summaries']: tf.add_to_collection(config['name'] + '_drmsdss', drmsds)
return drmsds
示例11: variable_with_weight_decay
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import add_to_collection [as 别名]
def variable_with_weight_decay(name, shape, stddev, wd, mean=0.0, values=None):
if values is None:
initializer = tf.truncated_normal_initializer(mean=mean, stddev=stddev, dtype=tf.float32)
else:
initializer = tf.constant_initializer(values)
"""Get a TF variable with optional l2-loss attached."""
var = tf.get_variable(
name,
shape,
initializer=initializer,
dtype=tf.float32)
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
tf.add_to_collection('weight_losses', weight_decay)
return var
示例12: __variable_with_weight_decay
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import add_to_collection [as 别名]
def __variable_with_weight_decay(kernel_shape, initializer, wd):
"""
Create a variable with L2 Regularization (Weight Decay)
:param kernel_shape: the size of the convolving weight kernel.
:param initializer: The initialization scheme, He et al. normal or Xavier normal are recommended.
:param wd:(weight decay) L2 regularization parameter.
:return: The weights of the kernel initialized. The L2 loss is added to the loss collection.
"""
w = tf.get_variable('weights', kernel_shape, tf.float32, initializer=initializer)
collection_name = tf.GraphKeys.REGULARIZATION_LOSSES
if wd and (not tf.get_variable_scope().reuse):
weight_decay = tf.multiply(tf.nn.l2_loss(w), wd, name='w_loss')
tf.add_to_collection(collection_name, weight_decay)
return w
# Summaries for variables
示例13: MHGD_embedding
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import add_to_collection [as 别名]
def MHGD_embedding(student_feature, teacher_feature):
with tf.variable_scope('MHGD'):
with tf.contrib.framework.arg_scope([tf.contrib.layers.fully_connected], trainable = True,
weights_regularizer=None, variables_collections = [tf.GraphKeys.GLOBAL_VARIABLES,'MHA']):
with tf.contrib.framework.arg_scope([tf.contrib.layers.batch_norm], activation_fn=None, trainable = True,
param_regularizers = None, variables_collections=[tf.GraphKeys.GLOBAL_VARIABLES,'MHA']):
V_T = teacher_feature
V_S = student_feature
B, D2 = student_feature.get_shape().as_list()
G_T = Attention_head(V_T, V_T, D2, num_head, 'Attention', is_training = True)
V_T_ = Estimator(V_T, G_T, D, num_head, 'Estimator')
tf.add_to_collection('MHA_loss', tf.reduce_mean(1-tf.reduce_sum(V_T_*V_T, -1)) )
G_T = Attention_head(V_T, V_T, D2, num_head, 'Attention', reuse = True)
G_S = Attention_head(V_S, V_S, D2, num_head, 'Attention', reuse = True)
mean = tf.reduce_mean(G_T, -1, keepdims=True)
G_T = tf.tanh(G_T-mean)
G_S = tf.tanh(G_S-mean)
GNN_losses = kld_loss(G_S, G_T)
return GNN_losses
示例14: get_ema_hooks
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import add_to_collection [as 别名]
def get_ema_hooks(self, train_op, var_list, params_moving_average_decay, scope, mode,
**kargs):
self.ema = model_io_utils.track_params_averages(
params_moving_average_decay,
scope,
**kargs)
if mode == tf.estimator.ModeKeys.TRAIN:
with tf.control_dependencies([train_op]):
if not var_list:
tvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
else:
tvars = var_list
params_averages_op = self.ema.apply(tvars)
return params_averages_op, None
# tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, tf.group(params_averages_op))
elif mode == tf.estimator.ModeKeys.EVAL or tf.estimator.ModeKeys.PREDICT:
hooks = model_io_utils.RestoreParametersAverageValues(self.ema)
return None, hooks
else:
return None, None
示例15: get_weight_variable
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import add_to_collection [as 别名]
def get_weight_variable(shape, regularizer):
weights = tf.get_variable(
"weigths", shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
# 如果给出了正则生成函数,加入 losses 集合
if regularizer is not None:
tf.add_to_collection('losses', regularizer(weights))
return weights
# 定义前向传播