本文整理汇总了Python中tensorflow.unique_with_counts方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.unique_with_counts方法的具体用法?Python tensorflow.unique_with_counts怎么用?Python tensorflow.unique_with_counts使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.unique_with_counts方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: knn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unique_with_counts [as 别名]
def knn(X_test, X_ref, Y_ref, K = 5):
nearest_neighbors=tf.Variable(tf.zeros([K]))
distance = tf.negative(tf.reduce_sum(tf.abs(tf.subtract(X_ref, X_test[0])),axis=1)) #L1
values,indices=tf.nn.top_k(distance,k=K,sorted=False)
nn = []
for k in range(K):
nn.append(tf.argmax(Y_ref[indices[k]], 0))
nearest_neighbors=nn
y, idx, count = tf.unique_with_counts(nearest_neighbors)
preds = tf.slice(y, begin=[tf.argmax(count, 0)], size=tf.constant([1], dtype=tf.int64))[0]
return preds
示例2: sparse_tensor_left_align
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unique_with_counts [as 别名]
def sparse_tensor_left_align(sparse_tensor):
"""Re-arranges a `tf.SparseTensor` and returns a left-aligned version of it.
This mapper can be useful when returning a sparse tensor that may not be
left-aligned from a preprocessing_fn.
Args:
sparse_tensor: A `tf.SparseTensor`.
Returns:
A left-aligned version of sparse_tensor as a `tf.SparseTensor`.
"""
reordered_tensor = tf.sparse.reorder(sparse_tensor)
transposed_indices = tf.transpose(reordered_tensor.indices)
row_indices = transposed_indices[0]
row_counts = tf.unique_with_counts(row_indices, out_idx=tf.int64).count
column_indices = tf.ragged.range(row_counts).flat_values
return tf.SparseTensor(
indices=tf.transpose(tf.stack([row_indices, column_indices])),
values=reordered_tensor.values,
dense_shape=reordered_tensor.dense_shape)
示例3: repeat_with_index
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unique_with_counts [as 别名]
def repeat_with_index(x: tf.Tensor, index: tf.Tensor, axis: int = 1):
"""
Given an tensor x (N*M*K), repeat the middle axis (axis=1)
according to the index tensor index (G, )
for example, if axis=1 and n = Tensor([0, 0, 0, 1, 2, 2])
then M = 3 (3 unique values),
and the final tensor would have the shape (N*6*3) with the
first one in M repeated 3 times,
second 1 time and third 2 times.
Args:
x: (3d Tensor) tensor to be augmented
index: (1d Tensor) repetition tensor
axis: (int) axis for repetition
Returns:
(3d Tensor) tensor after repetition
"""
index = tf.reshape(index, (-1,))
_, _, n = tf.unique_with_counts(index)
return _repeat(x, n, axis)
示例4: get_distances
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unique_with_counts [as 别名]
def get_distances(features, labels, num_classes):
len_features = features.get_shape()[1]
centers = tf.get_variable('centers', [num_classes, len_features], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
labels = tf.reshape(labels, [-1])
centers_batch = tf.gather(centers, labels)
# distances = features - centers_batch
diff = centers_batch - features
unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
appear_times = tf.gather(unique_count, unique_idx)
appear_times = tf.reshape(appear_times, [-1, 1])
diff = tf.divide(diff, tf.cast((1 + appear_times), tf.float32))
return diff
示例5: weighted_loss_ratio
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unique_with_counts [as 别名]
def weighted_loss_ratio(config, losses, labels, ratio_weight):
unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
appear_times = tf.gather(unique_count, unique_idx)
# appear_times = tf.reshape(appear_times, [-1, 1])
weighted_loss = losses * ratio_weight
weighted_loss = weighted_loss / tf.cast((EPSILON+appear_times), tf.float32)
return weighted_loss, None
示例6: center_loss_v2
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unique_with_counts [as 别名]
def center_loss_v2(config, features, labels, centers=None, **kargs):
alpha = config.alpha
num_classes = config.num_classes
with tf.variable_scope(config.scope+"_center_loss"):
print("==center loss==")
len_features = features.get_shape()[1]
if not centers:
centers = tf.get_variable('centers',
[num_classes, len_features],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer(),
trainable=False)
print("==add center parameters==")
centers_batch = tf.gather(centers, labels)
loss = tf.nn.l2_loss(features - centers_batch)
diff = centers_batch - features
unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
appear_times = tf.gather(unique_count, unique_idx)
appear_times = tf.reshape(appear_times, [-1, 1])
diff = diff / tf.cast((1 + appear_times), tf.float32)
diff = alpha * diff
centers_update_op = tf.scatter_sub(centers, labels, diff)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, centers_update_op)
return loss, centers
示例7: get_top_elements
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unique_with_counts [as 别名]
def get_top_elements(list_of_elements, max_user_contribution):
"""Gets the top max_user_contribution words from the input list.
Note that the returned set of top words will not necessarily be sorted.
Args:
list_of_elements: A tensor containing a list of elements.
max_user_contribution: The maximum number of elements to keep.
Returns:
A tensor of a list of strings.
If the total number of unique words is less than or equal to
max_user_contribution, returns the set of unique words.
"""
words, _, counts = tf.unique_with_counts(list_of_elements)
if tf.size(words) > max_user_contribution:
# This logic is influenced by the focus on global heavy hitters and
# thus implements clipping by chopping the tail of the distribution
# of the words as present on a single client. Another option could
# be to provide pick max_words_per_user random words out of the unique
# words present locally.
top_indices = tf.argsort(
counts, axis=-1, direction='DESCENDING')[:max_user_contribution]
top_words = tf.gather(words, top_indices)
return top_words
return words
示例8: testInt32
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unique_with_counts [as 别名]
def testInt32(self):
x = np.random.randint(2, high=10, size=7000)
with self.test_session() as sess:
y, idx, count = tf.unique_with_counts(x)
tf_y, tf_idx, tf_count = sess.run([y, idx, count])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]])
for value, count in zip(tf_y, tf_count):
self.assertEqual(count, np.sum(x == value))
示例9: testString
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unique_with_counts [as 别名]
def testString(self):
indx = np.random.randint(65, high=122, size=7000)
x = [chr(i) for i in indx]
with self.test_session() as sess:
y, idx, count = tf.unique_with_counts(x)
tf_y, tf_idx, tf_count = sess.run([y, idx, count])
self.assertEqual(len(x), len(tf_idx))
self.assertEqual(len(tf_y), len(np.unique(x)))
for i in range(len(x)):
self.assertEqual(x[i], tf_y[tf_idx[i]].decode('ascii'))
for value, count in zip(tf_y, tf_count):
v = [1 if x[i] == value.decode('ascii') else 0 for i in range(7000)]
self.assertEqual(count, sum(v))
示例10: reduce_batch_weighted_counts
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unique_with_counts [as 别名]
def reduce_batch_weighted_counts(x, weights=None):
"""Performs batch-wise reduction to produce (possibly weighted) counts.
Args:
x: Input `Tensor`.
weights: (Optional) Weights input `Tensor`.
Returns:
a named tuple of...
The unique values in x
The sum of the weights for each unique value in x if weights are provided,
else None
"""
if isinstance(x, tf.SparseTensor):
x = x.values
if weights is None:
# TODO(b/112916494): Always do batch wise reduction once possible.
return ReducedBatchWeightedCounts(tf.reshape(x, [-1]), None, None, None)
# TODO(b/134075780): Revisit expected weights shape when input is sparse.
x, weights = assert_same_shape(x, weights)
weights = tf.reshape(weights, [-1])
x = tf.reshape(x, [-1])
unique_x_values, unique_idx, _ = tf.unique_with_counts(x, out_idx=tf.int64)
summed_weights_per_x = tf.math.unsorted_segment_sum(
weights, unique_idx, tf.size(input=unique_x_values))
return ReducedBatchWeightedCounts(unique_x_values, summed_weights_per_x, None,
None)
示例11: reduce_batch_count_or_sum_per_key
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unique_with_counts [as 别名]
def reduce_batch_count_or_sum_per_key(x, key, reduce_instance_dims):
"""Computes per-key sums or counts in the given tensor.
Args:
x: A `Tensor` or `SparseTensor`. If x is None, return count per key.
key: A `Tensor` or `SparseTensor` (cannot be None).
Must meet one of the following conditions:
1. Both x and key are dense,
2. Both x and key are sparse and `key` must exactly match `x` in
everything except values,
3. The axis=1 index of each x matches its index of dense key.
reduce_instance_dims: A bool, if True - collapses the batch and instance
dimensions to arrive at a single scalar output. Otherwise, only
collapses the batch dimension and outputs a `Tensor` of the same shape
as the input. Not supported for `SparseTensor`s.
Returns:
A 2-tuple containing the `Tensor`s (key_vocab, count-or-sum).
"""
if isinstance(x, tf.SparseTensor) and not reduce_instance_dims:
raise NotImplementedError(
'Sum per key only supports reduced dims for SparseTensors')
key = _to_string(key)
if x is not None:
x, key = _validate_and_get_dense_value_key_inputs(x, key)
unique = tf.unique(key, out_idx=tf.int64)
if reduce_instance_dims and x.get_shape().ndims > 1:
sums = tf.math.reduce_sum(x, axis=list(range(1, x.get_shape().ndims)))
else:
sums = x
sums = tf.math.unsorted_segment_sum(sums, unique.idx, tf.size(unique.y))
else:
if isinstance(key, tf.SparseTensor):
key = key.values
key.set_shape([None])
unique = tf.unique_with_counts(key, out_idx=tf.int64)
sums = unique.count
return unique.y, sums
示例12: get_center_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unique_with_counts [as 别名]
def get_center_loss(features, labels, alpha, num_classes):
"""
Arguments:
features: Tensor,shape [batch_size, feature_length].
labels: Tensor,shape [batch_size].#not the one hot label
alpha: center upgrade learning rate
num_classes: how many classes.
Return:
loss: Tensor,
centers: Tensor
centers_update_op:
"""
len_features = features.get_shape()[1]
centers = tf.get_variable('centers', [num_classes, len_features], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
labels = tf.reshape(labels, [-1])
centers_batch = tf.gather(centers, labels)
loss = tf.nn.l2_loss(features - centers_batch)
diff = centers_batch - features
unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
appear_times = tf.gather(unique_count, unique_idx)
appear_times = tf.reshape(appear_times, [-1, 1])
diff = diff / tf.cast((1 + appear_times), tf.float32)
diff = alpha * diff
centers_update_op = tf.scatter_sub(centers, labels, diff)
# need to update after every epoch, the key is to update the center of the classes.
return loss, centers, centers_update_op
示例13: build
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unique_with_counts [as 别名]
def build(self, predictions, targets, inputs=None):
""" Prints the number of each kind of prediction """
self.built = True
pshape = predictions.get_shape()
self.inner_metric.build(predictions, targets, inputs)
with tf.name_scope(self.name):
if len(pshape) == 1 or (len(pshape) == 2 and int(pshape[1]) == 1):
self.name = self.name or "binary_prediction_counts"
y, idx, count = tf.unique_with_counts(tf.argmax(predictions))
self.tensor = tf.Print(self.inner_metric, [y, count], name=self.inner_metric.name)
else:
self.name = self.name or "categorical_prediction_counts"
y, idx, count = tf.unique_with_counts(tf.argmax(predictions, dimension=1))
self.tensor = tf.Print(self.inner_metric.tensor, [y, count], name=self.inner_metric.name)
示例14: call
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unique_with_counts [as 别名]
def call(self, inputs, mask=None):
features, feature_graph_index = inputs
feature_graph_index = tf.reshape(feature_graph_index, (-1,))
_, _, count = tf.unique_with_counts(feature_graph_index)
m = kb.dot(features, self.m_weight)
if self.use_bias:
m += self.m_bias
self.h = tf.zeros(tf.stack(
[tf.shape(input=features)[0], tf.shape(input=count)[0], self.n_hidden]))
self.c = tf.zeros(tf.stack(
[tf.shape(input=features)[0], tf.shape(input=count)[0], self.n_hidden]))
q_star = tf.zeros(tf.stack(
[tf.shape(input=features)[0], tf.shape(input=count)[0], 2 * self.n_hidden]))
for i in range(self.T):
self.h, c = self._lstm(q_star, self.c)
e_i_t = tf.reduce_sum(
input_tensor=m * repeat_with_index(self.h, feature_graph_index), axis=-1)
exp = tf.exp(e_i_t)
# print('exp shape ', exp.shape)
seg_sum = tf.transpose(
a=tf.math.segment_sum(
tf.transpose(a=exp, perm=[1, 0]),
feature_graph_index),
perm=[1, 0])
seg_sum = tf.expand_dims(seg_sum, axis=-1)
# print('seg_sum shape', seg_sum.shape)
interm = repeat_with_index(seg_sum, feature_graph_index)
# print('interm shape', interm.shape)
a_i_t = exp / interm[..., 0]
# print(a_i_t.shape)
r_t = tf.transpose(a=tf.math.segment_sum(
tf.transpose(a=tf.multiply(m, a_i_t[:, :, None]), perm=[1, 0, 2]),
feature_graph_index), perm=[1, 0, 2])
q_star = kb.concatenate([self.h, r_t], axis=-1)
return q_star
示例15: center_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unique_with_counts [as 别名]
def center_loss(features, labels, num_classes, alpha=0.5, updates_collections=tf.GraphKeys.UPDATE_OPS, scope=None):
# modified from https://github.com/EncodeTS/TensorFlow_Center_Loss/blob/master/center_loss.py
assert features.shape.ndims == 2, 'The rank of `features` should be 2!'
assert 0 <= alpha <= 1, '`alpha` should be in [0, 1]!'
with tf.variable_scope(scope, 'center_loss', [features, labels]):
centers = tf.get_variable('centers', shape=[num_classes, features.get_shape()[-1]], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
centers_batch = tf.gather(centers, labels)
diff = centers_batch - features
_, unique_idx, unique_count = tf.unique_with_counts(labels)
appear_times = tf.gather(unique_count, unique_idx)
appear_times = tf.reshape(appear_times, [-1, 1])
diff = diff / tf.cast((1 + appear_times), tf.float32)
diff = alpha * diff
update_centers = tf.scatter_sub(centers, labels, diff)
center_loss = 0.5 * tf.reduce_mean(tf.reduce_sum((centers_batch - features)**2, axis=-1))
if updates_collections is None:
with tf.control_dependencies([update_centers]):
center_loss = tf.identity(center_loss)
else:
tf.add_to_collections(updates_collections, update_centers)
return center_loss, centers