本文整理汇总了Python中tensorflow.python.ops.nn_impl.l2_normalize方法的典型用法代码示例。如果您正苦于以下问题:Python nn_impl.l2_normalize方法的具体用法?Python nn_impl.l2_normalize怎么用?Python nn_impl.l2_normalize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.nn_impl
的用法示例。
在下文中一共展示了nn_impl.l2_normalize方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _compute_cosine_distance
# 需要导入模块: from tensorflow.python.ops import nn_impl [as 别名]
# 或者: from tensorflow.python.ops.nn_impl import l2_normalize [as 别名]
def _compute_cosine_distance(cls, inputs, clusters, inputs_normalized=True):
"""Computes cosine distance between each input and each cluster center.
Args:
inputs: list of input Tensor.
clusters: cluster Tensor
inputs_normalized: if True, it assumes that inp and clusters are
normalized and computes the dot product which is equivalent to the cosine
distance. Else it L2 normalizes the inputs first.
Returns:
list of Tensors, where each element corresponds to each element in inp.
The value is the distance of each row to all the cluster centers.
"""
output = []
if not inputs_normalized:
with ops.colocate_with(clusters):
clusters = nn_impl.l2_normalize(clusters, dim=1)
for inp in inputs:
with ops.colocate_with(inp):
if not inputs_normalized:
inp = nn_impl.l2_normalize(inp, dim=1)
output.append(1 - math_ops.matmul(inp, clusters, transpose_b=True))
return output
示例2: _infer_graph
# 需要导入模块: from tensorflow.python.ops import nn_impl [as 别名]
# 或者: from tensorflow.python.ops.nn_impl import l2_normalize [as 别名]
def _infer_graph(self, inputs, clusters):
"""Maps input to closest cluster and the score.
Args:
inputs: list of input Tensors.
clusters: Tensor of cluster centers.
Returns:
List of tuple, where each value in tuple corresponds to a value in inp.
The tuple has following three elements:
all_scores: distance of each input to each cluster center.
score: distance of each input to closest cluster center.
cluster_idx: index of cluster center closest to the corresponding input.
"""
assert isinstance(inputs, list)
# Pairwise distances are used only by transform(). In all other cases, this
# sub-graph is not evaluated.
scores = self._distance_graph(inputs, clusters, self._distance_metric)
output = []
if (self._distance_metric == COSINE_DISTANCE and
not self._clusters_l2_normalized()):
# The cosine distance between normalized vectors x and y is the same as
# 2 * squared_euclidian_distance. We are using this fact and reusing the
# nearest_neighbors op.
# TODO(ands): Support COSINE distance in nearest_neighbors and remove
# this.
with ops.colocate_with(clusters):
clusters = nn_impl.l2_normalize(clusters, dim=1)
for inp, score in zip(inputs, scores):
with ops.colocate_with(inp):
(indices,
distances) = gen_clustering_ops.nearest_neighbors(inp, clusters, 1)
if self._distance_metric == COSINE_DISTANCE:
distances *= 0.5
output.append(
(score, array_ops.squeeze(distances), array_ops.squeeze(indices)))
return zip(*output)
示例3: _l2_normalize_data
# 需要导入模块: from tensorflow.python.ops import nn_impl [as 别名]
# 或者: from tensorflow.python.ops.nn_impl import l2_normalize [as 别名]
def _l2_normalize_data(cls, inputs):
"""Normalized the input data."""
output = []
for inp in inputs:
with ops.colocate_with(inp):
output.append(nn_impl.l2_normalize(inp, dim=1))
return output
示例4: _full_batch_training_op
# 需要导入模块: from tensorflow.python.ops import nn_impl [as 别名]
# 或者: from tensorflow.python.ops.nn_impl import l2_normalize [as 别名]
def _full_batch_training_op(self, inputs, cluster_idx_list, cluster_centers):
"""Creates an op for training for full batch case.
Args:
inputs: list of input Tensors.
cluster_idx_list: A vector (or list of vectors). Each element in the
vector corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
cluster_centers: Tensor Ref of cluster centers.
Returns:
An op for doing an update of mini-batch k-means.
"""
cluster_sums = []
cluster_counts = []
epsilon = constant_op.constant(1e-6, dtype=inputs[0].dtype)
for inp, cluster_idx in zip(inputs, cluster_idx_list):
with ops.colocate_with(inp):
cluster_sums.append(
math_ops.unsorted_segment_sum(inp, cluster_idx, self._num_clusters))
cluster_counts.append(
math_ops.unsorted_segment_sum(
array_ops.reshape(
array_ops.ones(
array_ops.reshape(array_ops.shape(inp)[0], [-1])),
[-1, 1]), cluster_idx, self._num_clusters))
with ops.colocate_with(cluster_centers):
new_clusters_centers = math_ops.add_n(cluster_sums) / (math_ops.cast(
math_ops.add_n(cluster_counts), cluster_sums[0].dtype) + epsilon)
if self._clusters_l2_normalized():
new_clusters_centers = nn_impl.l2_normalize(new_clusters_centers, dim=1)
return state_ops.assign(cluster_centers, new_clusters_centers)
示例5: _init_clusters
# 需要导入模块: from tensorflow.python.ops import nn_impl [as 别名]
# 或者: from tensorflow.python.ops.nn_impl import l2_normalize [as 别名]
def _init_clusters(self):
"""Initialization of clusters.
Returns:
Tuple with following elements:
cluster_centers: a Tensor for storing cluster centers
cluster_counts: a Tensor for storing counts of points assigned to this
cluster. This is used by mini-batch training.
"""
init = self._initial_clusters
if init == RANDOM_INIT:
clusters_init = self._init_clusters_random()
elif init == KMEANS_PLUS_PLUS_INIT:
# Points from only the first shard are used for initializing centers.
# TODO(ands): Use all points.
clusters_init = gen_clustering_ops.kmeans_plus_plus_initialization(
self._inputs[0], self._num_clusters, self._random_seed,
self._kmeans_plus_plus_num_retries)
elif callable(init):
clusters_init = init(self._inputs, self._num_clusters)
elif not isinstance(init, str):
clusters_init = init
else:
assert False, 'Unsupported init passed to Kmeans %s' % str(init)
if self._distance_metric == COSINE_DISTANCE and clusters_init is not None:
clusters_init = nn_impl.l2_normalize(clusters_init, dim=1)
clusters_init = clusters_init if clusters_init is not None else []
cluster_centers = variables.Variable(
clusters_init, name='clusters', validate_shape=False)
cluster_counts = (variables.Variable(
array_ops.ones(
[self._num_clusters], dtype=dtypes.int64)) if self._use_mini_batch
else None)
return cluster_centers, cluster_counts
示例6: training_graph
# 需要导入模块: from tensorflow.python.ops import nn_impl [as 别名]
# 或者: from tensorflow.python.ops.nn_impl import l2_normalize [as 别名]
def training_graph(self):
"""Generate a training graph for kmeans algorithm.
Returns:
A tuple consisting of:
all_scores: A matrix (or list of matrices) of dimensions (num_input,
num_clusters) where the value is the distance of an input vector and a
cluster center.
cluster_idx: A vector (or list of vectors). Each element in the vector
corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
scores: Similar to cluster_idx but specifies the distance to the
assigned cluster instead.
training_op: an op that runs an iteration of training.
"""
# Implementation of kmeans.
inputs = self._inputs
cluster_centers_var, total_counts = self._init_clusters()
cluster_centers = cluster_centers_var
if self._distance_metric == COSINE_DISTANCE:
inputs = self._l2_normalize_data(inputs)
if not self._clusters_l2_normalized():
cluster_centers = nn_impl.l2_normalize(cluster_centers, dim=1)
all_scores, scores, cluster_idx = self._infer_graph(inputs, cluster_centers)
if self._use_mini_batch:
training_op = self._mini_batch_training_op(inputs, cluster_idx,
cluster_centers,
cluster_centers_var,
total_counts)
else:
assert cluster_centers == cluster_centers_var
training_op = self._full_batch_training_op(inputs, cluster_idx,
cluster_centers_var)
return all_scores, cluster_idx, scores, training_op
示例7: _sample_n
# 需要导入模块: from tensorflow.python.ops import nn_impl [as 别名]
# 或者: from tensorflow.python.ops.nn_impl import l2_normalize [as 别名]
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
w = control_flow_ops.cond(gen_math_ops.equal(self.__m, 3),
lambda: self.__sample_w3(n, seed),
lambda: self.__sample_w_rej(n, seed))
v = nn_impl.l2_normalize(array_ops.transpose(
array_ops.transpose(random_ops.random_normal(shape, dtype=self.dtype, seed=seed))[1:]), axis=-1)
x = array_ops.concat((w, math_ops.sqrt(1 - w ** 2) * v), axis=-1)
z = self.__householder_rotation(x)
return z
示例8: __householder_rotation
# 需要导入模块: from tensorflow.python.ops import nn_impl [as 别名]
# 或者: from tensorflow.python.ops.nn_impl import l2_normalize [as 别名]
def __householder_rotation(self, x):
u = nn_impl.l2_normalize(self.__e1 - self._loc, axis=-1)
z = x - 2 * math_ops.reduce_sum(x * u, axis=-1, keepdims=True) * u
return z
示例9: _sample_n
# 需要导入模块: from tensorflow.python.ops import nn_impl [as 别名]
# 或者: from tensorflow.python.ops.nn_impl import l2_normalize [as 别名]
def _sample_n(self, n, seed=None):
return nn_impl.l2_normalize(random_ops.random_normal(shape=array_ops.concat(([n], [self._dim + 1]), 0),
dtype=self.dtype, seed=seed), axis=-1)
示例10: _compute_weights
# 需要导入模块: from tensorflow.python.ops import nn_impl [as 别名]
# 或者: from tensorflow.python.ops.nn_impl import l2_normalize [as 别名]
def _compute_weights(self):
"""Generate weights by combining the direction of weight vector
with it's norm """
with variable_scope.variable_scope("compute_weights"):
self.layer.kernel = (
nn_impl.l2_normalize(self.layer.v, axis=self.norm_axes) * self.layer.g
)
示例11: _test_l2_normalization
# 需要导入模块: from tensorflow.python.ops import nn_impl [as 别名]
# 或者: from tensorflow.python.ops.nn_impl import l2_normalize [as 别名]
def _test_l2_normalization(data, axis, fused_activation_function=None):
""" One iteration of L2_NORMALIZATION """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = nn_impl.l2_normalize(in_data, axis)
out = with_fused_activation_function(out, fused_activation_function)
compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])
示例12: _initialize_clusters
# 需要导入模块: from tensorflow.python.ops import nn_impl [as 别名]
# 或者: from tensorflow.python.ops.nn_impl import l2_normalize [as 别名]
def _initialize_clusters(self,
cluster_centers,
cluster_centers_initialized,
cluster_centers_updated):
"""Returns an op to initialize the cluster centers."""
init = self._initial_clusters
if init == RANDOM_INIT:
clusters_init = self._init_clusters_random()
elif init == KMEANS_PLUS_PLUS_INIT:
# Points from only the first shard are used for initializing centers.
# TODO(ands): Use all points.
inp = self._inputs[0]
if self._distance_metric == COSINE_DISTANCE:
inp = nn_impl.l2_normalize(inp, dim=1)
clusters_init = gen_clustering_ops.kmeans_plus_plus_initialization(
inp, self._num_clusters, self._random_seed,
self._kmeans_plus_plus_num_retries)
elif callable(init):
clusters_init = init(self._inputs, self._num_clusters)
elif not isinstance(init, str):
clusters_init = init
else:
assert False, 'Unsupported init passed to Kmeans %s' % str(init)
if self._distance_metric == COSINE_DISTANCE and clusters_init is not None:
clusters_init = nn_impl.l2_normalize(clusters_init, dim=1)
with ops.colocate_with(cluster_centers_initialized):
initialized = control_flow_ops.with_dependencies(
[clusters_init],
array_ops.identity(cluster_centers_initialized))
with ops.colocate_with(cluster_centers):
assign_centers = state_ops.assign(cluster_centers, clusters_init,
validate_shape=False)
if cluster_centers_updated != cluster_centers:
assign_centers = control_flow_ops.group(
assign_centers,
state_ops.assign(cluster_centers_updated, clusters_init,
validate_shape=False))
assign_centers = control_flow_ops.with_dependencies(
[assign_centers],
state_ops.assign(cluster_centers_initialized, True))
return control_flow_ops.cond(initialized,
control_flow_ops.no_op,
lambda: assign_centers).op
示例13: training_graph
# 需要导入模块: from tensorflow.python.ops import nn_impl [as 别名]
# 或者: from tensorflow.python.ops.nn_impl import l2_normalize [as 别名]
def training_graph(self):
"""Generate a training graph for kmeans algorithm.
Returns:
A tuple consisting of:
all_scores: A matrix (or list of matrices) of dimensions (num_input,
num_clusters) where the value is the distance of an input vector and a
cluster center.
cluster_idx: A vector (or list of vectors). Each element in the vector
corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
scores: Similar to cluster_idx but specifies the distance to the
assigned cluster instead.
cluster_centers_initialized: scalar indicating whether clusters have been
initialized.
init_op: an op to initialize the clusters.
training_op: an op that runs an iteration of training.
"""
# Implementation of kmeans.
inputs = self._inputs
(cluster_centers_var,
cluster_centers_initialized,
total_counts,
cluster_centers_updated,
update_in_steps) = self._create_variables()
init_op = self._initialize_clusters(cluster_centers_var,
cluster_centers_initialized,
cluster_centers_updated)
cluster_centers = cluster_centers_var
if self._distance_metric == COSINE_DISTANCE:
inputs = self._l2_normalize_data(inputs)
if not self._clusters_l2_normalized():
cluster_centers = nn_impl.l2_normalize(cluster_centers, dim=1)
all_scores, scores, cluster_idx = self._infer_graph(inputs, cluster_centers)
if self._use_mini_batch:
sync_updates_op = self._mini_batch_sync_updates_op(
update_in_steps,
cluster_centers_var, cluster_centers_updated,
total_counts)
assert sync_updates_op is not None
with ops.control_dependencies([sync_updates_op]):
training_op = self._mini_batch_training_op(
inputs, cluster_idx, cluster_centers_updated, total_counts)
else:
assert cluster_centers == cluster_centers_var
training_op = self._full_batch_training_op(inputs, cluster_idx,
cluster_centers_var)
return (all_scores, cluster_idx, scores,
cluster_centers_initialized, init_op, training_op)