本文整理汇总了Python中tensorflow.scatter_sub方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.scatter_sub方法的具体用法?Python tensorflow.scatter_sub怎么用?Python tensorflow.scatter_sub使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.scatter_sub方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: center_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_sub [as 别名]
def center_loss(features, label, alfa, nrof_classes):
"""Center loss based on the paper
"A Discriminative Feature Learning Approach for Deep Face Recognition"
(http://ydwen.github.io/papers/WenECCV16.pdf)
"""
logger.info(msg="center_loss called")
nrof_features = features.get_shape()[1]
centers = tf.get_variable('centers', [nrof_classes, nrof_features],
dtype=tf.float32,
initializer=tf.constant_initializer(0),
trainable=False)
label = tf.reshape(label, [-1])
centers_batch = tf.gather(centers, label)
diff = (1 - alfa) * (centers_batch - features)
centers = tf.scatter_sub(centers, label, diff)
with tf.control_dependencies([centers]):
loss = tf.reduce_mean(tf.square(features - centers_batch))
return loss, centers
示例2: center_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_sub [as 别名]
def center_loss(features, labels, num_classes, alpha=0.99, weight=1.0):
"""Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
(http://ydwen.github.io/papers/WenECCV16.pdf)
"""
num_features = features.get_shape()[1]
centers = tf.get_variable('centers', [num_classes, num_features], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
labels = tf.reshape(labels, [-1])
centers_batch = tf.gather(centers, labels)
diffs = (1 - alpha) * (centers_batch - features)
centers = tf.scatter_sub(centers, labels, diffs)
center_loss_mean = tf.reduce_mean(tf.square(features - centers_batch))
tf.add_to_collection('losses', weight*center_loss_mean)
return center_loss_mean, centers
示例3: center_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_sub [as 别名]
def center_loss(embedding, label, num_classes, alpha=0.1, scope="center_loss"):
r"""Center-Loss as described in the paper
`A Discriminative Feature Learning Approach for Deep Face Recognition`
<http://ydwen.github.io/papers/WenECCV16.pdf> by Wen et al.
Args:
embedding (tf.Tensor): features produced by the network
label (tf.Tensor): ground-truth label for each feature
num_classes (int): number of different classes
alpha (float): learning rate for updating the centers
Returns:
tf.Tensor: center loss
"""
nrof_features = embedding.get_shape()[1]
centers = tf.get_variable('centers', [num_classes, nrof_features], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
label = tf.reshape(label, [-1])
centers_batch = tf.gather(centers, label)
diff = (1 - alpha) * (centers_batch - embedding)
centers = tf.scatter_sub(centers, label, diff)
loss = tf.reduce_mean(tf.square(embedding - centers_batch), name=scope)
return loss
示例4: center_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_sub [as 别名]
def center_loss(features, label, alfa, nrof_classes):
"""Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
(http://ydwen.github.io/papers/WenECCV16.pdf)
"""
nrof_features = features.get_shape()[1]
centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
label = tf.reshape(label, [-1])
centers_batch = tf.gather(centers, label)
diff = (1 - alfa) * (centers_batch - features)
centers = tf.scatter_sub(centers, label, diff)
with tf.control_dependencies([centers]):
loss = tf.reduce_mean(tf.square(features - centers_batch))
return loss, centers
示例5: center_loss_v2
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_sub [as 别名]
def center_loss_v2(config, features, labels, centers=None, **kargs):
alpha = config.alpha
num_classes = config.num_classes
with tf.variable_scope(config.scope+"_center_loss"):
print("==center loss==")
len_features = features.get_shape()[1]
if not centers:
centers = tf.get_variable('centers',
[num_classes, len_features],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer(),
trainable=False)
print("==add center parameters==")
centers_batch = tf.gather(centers, labels)
loss = tf.nn.l2_loss(features - centers_batch)
diff = centers_batch - features
unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
appear_times = tf.gather(unique_count, unique_idx)
appear_times = tf.reshape(appear_times, [-1, 1])
diff = diff / tf.cast((1 + appear_times), tf.float32)
diff = alpha * diff
centers_update_op = tf.scatter_sub(centers, labels, diff)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, centers_update_op)
return loss, centers
示例6: minimize
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_sub [as 别名]
def minimize(self, loss, variables=None):
""""""
variables = variables or tf.trainable_variables()
gradients = tf.gradients(loss, variables,
colocate_gradients_with_ops=True,
gate_gradients=True,
aggregation_method=2)
gradients = {variable: gradient for variable, gradient in zip(variables, gradients) if gradient is not None}
variable_steps = {}
variable_indices = {}
updates = [tf.assign_add(self.global_step, 1)]
for variable, gradient in six.iteritems(gradients):
if isinstance(gradient, tf.Tensor):
step, update = self.dense_update(gradient, variable)
variable_steps[variable] = step
updates.extend(update)
else:
step, indices, update = self.sparse_update(gradient, variable)
variable_steps[variable] = step
variable_indices[variable] = indices
updates.extend(update)
variable_steps = self.clip_by_global_norm(variable_steps)
for variable, step in six.iteritems(variable_steps):
if variable in variable_indices:
indices = variable_indices[variable]
updates.append(tf.scatter_sub(variable, indices, step))
else:
updates.append(tf.assign_sub(variable, step))
return tf.tuple(updates)[0]
#=============================================================
示例7: center_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_sub [as 别名]
def center_loss(features, label, alfa, nrof_classes):
"""Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
(http://ydwen.github.io/papers/WenECCV16.pdf)
"""
nrof_features = features.get_shape()[1]
centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
label = tf.reshape(label, [-1])
centers_batch = tf.gather(centers, label)
diff = (1 - alfa) * (centers_batch - features)
centers = tf.scatter_sub(centers, label, diff)
loss = tf.reduce_mean(tf.square(features - centers_batch))
return loss, centers
示例8: testSubRandom
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_sub [as 别名]
def testSubRandom(self):
# Random shapes of rank 4, random indices
for _ in range(5):
shape = np.random.randint(1, 20, size=4)
indices = np.random.randint(shape[0], size=2 * shape[0])
self._TestCase(_AsLong(list(shape)), list(indices),
tf.scatter_sub)
示例9: testVariableRankSub
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_sub [as 别名]
def testVariableRankSub(self):
self._VariableRankTests(tf.scatter_sub)
示例10: testRepeatIndicesSub
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_sub [as 别名]
def testRepeatIndicesSub(self):
self._VariableRankTests(tf.scatter_sub, True)
示例11: center_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_sub [as 别名]
def center_loss(self, features, label, alpha, num_classes):
"""Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
(http://ydwen.github.io/papers/WenECCV16.pdf)
copy from facenet: https://github.com/davidsandberg/facenet
"""
num_features = features.get_shape()[1]
centers = tf.get_variable('centers', [num_classes, num_features], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
label = tf.reshape(label, [-1])
centers_batch = tf.gather(centers, label)
diff = (1 - alpha) * (centers_batch - features)
centers = tf.scatter_sub(centers, label, diff)
with tf.control_dependencies([centers]):
loss = tf.reduce_mean(tf.square(features - centers_batch))
return loss, centers
示例12: get_center_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_sub [as 别名]
def get_center_loss(features, labels, alpha, num_classes):
"""
Arguments:
features: Tensor,shape [batch_size, feature_length].
labels: Tensor,shape [batch_size].#not the one hot label
alpha: center upgrade learning rate
num_classes: how many classes.
Return:
loss: Tensor,
centers: Tensor
centers_update_op:
"""
len_features = features.get_shape()[1]
centers = tf.get_variable('centers', [num_classes, len_features], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
labels = tf.reshape(labels, [-1])
centers_batch = tf.gather(centers, labels)
loss = tf.nn.l2_loss(features - centers_batch)
diff = centers_batch - features
unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
appear_times = tf.gather(unique_count, unique_idx)
appear_times = tf.reshape(appear_times, [-1, 1])
diff = diff / tf.cast((1 + appear_times), tf.float32)
diff = alpha * diff
centers_update_op = tf.scatter_sub(centers, labels, diff)
# need to update after every epoch, the key is to update the center of the classes.
return loss, centers, centers_update_op
示例13: update_intra_anchor
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_sub [as 别名]
def update_intra_anchor(intra_anchors, intra_anchors_n, features_cam, labels_cam):
# update intra-anchor for each camera
for i in range(FLAGS.num_cams):
# compute the difference between old anchors and the new given data
diff = tf.gather(intra_anchors_n[i], labels_cam[i]) - features_cam[i]
# update the intra-anchors under each camera
intra_anchors[i] = tf.scatter_sub(intra_anchors[i], labels_cam[i], FLAGS.eta * diff)
return intra_anchors
示例14: center_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_sub [as 别名]
def center_loss(features, labels, num_classes, alpha=0.5, updates_collections=tf.GraphKeys.UPDATE_OPS, scope=None):
# modified from https://github.com/EncodeTS/TensorFlow_Center_Loss/blob/master/center_loss.py
assert features.shape.ndims == 2, 'The rank of `features` should be 2!'
assert 0 <= alpha <= 1, '`alpha` should be in [0, 1]!'
with tf.variable_scope(scope, 'center_loss', [features, labels]):
centers = tf.get_variable('centers', shape=[num_classes, features.get_shape()[-1]], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
centers_batch = tf.gather(centers, labels)
diff = centers_batch - features
_, unique_idx, unique_count = tf.unique_with_counts(labels)
appear_times = tf.gather(unique_count, unique_idx)
appear_times = tf.reshape(appear_times, [-1, 1])
diff = diff / tf.cast((1 + appear_times), tf.float32)
diff = alpha * diff
update_centers = tf.scatter_sub(centers, labels, diff)
center_loss = 0.5 * tf.reduce_mean(tf.reduce_sum((centers_batch - features)**2, axis=-1))
if updates_collections is None:
with tf.control_dependencies([update_centers]):
center_loss = tf.identity(center_loss)
else:
tf.add_to_collections(updates_collections, update_centers)
return center_loss, centers
示例15: get_git_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_sub [as 别名]
def get_git_loss(features, labels, num_classes):
len_features = features.get_shape()[1]
centers = tf.get_variable('centers', [num_classes, len_features], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
labels = tf.reshape(labels, [-1])
centers_batch = tf.gather(centers, labels)
loss = tf.reduce_mean(tf.square(features - centers_batch))
# Pairwise differences
diffs = (features[:, tf.newaxis] - centers_batch[tf.newaxis, :])
diffs_shape = tf.shape(diffs)
# Mask diagonal (where i == j)
mask = 1 - tf.eye(diffs_shape[0], diffs_shape[1], dtype=diffs.dtype)
diffs = diffs * mask[:, :, tf.newaxis]
# combinaton of two losses
loss2 = tf.reduce_mean(tf.divide(1, 1 + tf.square(diffs)))
diff = centers_batch - features
unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
appear_times = tf.gather(unique_count, unique_idx)
appear_times = tf.reshape(appear_times, [-1, 1])
diff = tf.divide(diff, tf.cast((1 + appear_times), tf.float32))
diff = CENTER_LOSS_ALPHA * diff
centers_update_op = tf.scatter_sub(centers, labels, diff) # diff is used to get updated centers.
# combo_loss = value_factor * loss + new_factor * loss2
combo_loss = FLAGS.lambda_c * loss + FLAGS.lambda_g * loss2
return combo_loss, centers_update_op