本文整理汇总了Python中tensorflow.compat.v1.norm方法的典型用法代码示例。如果您正苦于以下问题:Python v1.norm方法的具体用法?Python v1.norm怎么用?Python v1.norm使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.norm方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: group_norm
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import norm [as 别名]
def group_norm(x, filters=None, num_groups=8, epsilon=1e-5):
"""Group normalization as in https://arxiv.org/abs/1803.08494."""
x_shape = shape_list(x)
if filters is None:
filters = x_shape[-1]
assert len(x_shape) == 4
assert filters % num_groups == 0
# Prepare variables.
scale = tf.get_variable(
"group_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"group_norm_bias", [filters], initializer=tf.zeros_initializer())
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
# Reshape and compute group norm.
x = tf.reshape(x, x_shape[:-1] + [num_groups, filters // num_groups])
# Calculate mean and variance on heights, width, channels (not groups).
mean, variance = tf.nn.moments(x, [1, 2, 4], keep_dims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return tf.reshape(norm_x, x_shape) * scale + bias
示例2: get_in_out_from_ray
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import norm [as 别名]
def get_in_out_from_ray(points_from_ray, sample_factor=10, std=0.01):
"""Get sample points from points from ray.
Args:
points_from_ray: [npts, 6], where first 3 dims are xyz, last 3 are ray dir.
sample_factor: int, number of samples to pick per surface point.
std: float, std of samples to generate.
Returns:
near_surface_samples: [npts*sample_factor, 4], where last dimension is
distance to surface point.
"""
surface_point_samples = points_from_ray[:, :3]
surface_point_normals = points_from_ray[:, 3:]
# make sure normals are normalized to unit length
n = surface_point_normals
surface_point_normals = n / (np.linalg.norm(n, axis=1, keepdims=True)+1e-8)
npoints = points_from_ray.shape[0]
offsets = np.random.randn(npoints, sample_factor, 1) * std
near_surface_samples = (surface_point_samples[:, np.newaxis, :] +
surface_point_normals[:, np.newaxis, :] * offsets)
near_surface_samples = np.concatenate([near_surface_samples, offsets],
axis=-1)
near_surface_samples = near_surface_samples.reshape([-1, 4])
return near_surface_samples
示例3: plot_distances
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import norm [as 别名]
def plot_distances(pregrasp, goal, postgrasp):
"""Plot evaluation metrics for grasp2vec."""
correct_distances = tf.norm(pregrasp - (goal + postgrasp), axis=1)
incorrect_distances = tf.norm(pregrasp - pregrasp[::-1], axis=1)
goal_distances = tf.norm(goal - goal[::-1], axis=1)
tf.summary.histogram('correct_distances', correct_distances)
tf.summary.histogram('goal_distances', goal_distances)
tf.summary.histogram('incorrect_distances', incorrect_distances)
tf.summary.histogram('pregrasp_sizes', tf.norm(pregrasp, axis=1))
tf.summary.histogram('postgrasp_sizes', tf.norm(postgrasp, axis=1))
tf.summary.histogram('goal_sizes', tf.norm(goal, axis=1))
# Cosine similarity metric between adjacent minibatch elements.
goal_normalized = goal / (1e-7 + tf.norm(goal, axis=1, keep_dims=True))
similarity = tf.reduce_sum(
goal_normalized[:-1] * goal_normalized[1:], axis=1)
tf.summary.histogram('goal_cosine_similarity', similarity)
示例4: project_weights_to_r
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import norm [as 别名]
def project_weights_to_r(self, force=False):
"""Normalize the weights to the R-ball.
Args:
force: True to normalize regardless of previous weight values.
False to check if weights > R-ball and only normalize then.
Raises:
Exception: If not called from inside this optimizer context.
"""
if not self._is_init:
raise Exception('This method must be called from within the optimizer\'s '
'context.')
radius = self.loss.radius()
for layer in self.layers:
weight_norm = tf.norm(layer.kernel, axis=0)
if force:
layer.kernel = layer.kernel / (weight_norm / radius)
else:
layer.kernel = tf.cond(
tf.reduce_sum(tf.cast(weight_norm > radius, dtype=self.dtype)) > 0,
lambda k=layer.kernel, w=weight_norm, r=radius: k / (w / r), # pylint: disable=cell-var-from-loop
lambda k=layer.kernel: k # pylint: disable=cell-var-from-loop
)
示例5: proto_maml_fc_bias
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import norm [as 别名]
def proto_maml_fc_bias(self, prototypes, zero_pad_to_max_way=False):
"""Computes the Prototypical MAML fc layer's bias.
Args:
prototypes: Tensor of shape [num_classes, embedding_size]
zero_pad_to_max_way: Whether to zero padd to max num way.
Returns:
fc_bias: Tensor of shape [num_classes] or [self.logit_dim]
when zero_pad_to_max_way is True.
"""
fc_bias = -tf.square(tf.norm(prototypes, axis=1))
if zero_pad_to_max_way:
paddings = [[0, self.logit_dim - tf.shape(fc_bias)[0]]]
fc_bias = tf.pad(fc_bias, paddings, 'CONSTANT', constant_values=0)
return fc_bias
示例6: create_grads
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import norm [as 别名]
def create_grads(optimizer, loss, scopes, num_expected_missing_gradients=0):
"""Compute, apply gradients and add summaries for norms."""
logging.info('Creating gradient updates for scopes %r', scopes)
grouped_vars, _ = group_vars_by_scope(scopes, log=True)
ordered_vars = _order_grouped_vars(grouped_vars)
grads_and_vars = optimizer.compute_gradients(
loss, ordered_vars,
aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
grads, _ = zip(*grads_and_vars)
num_missing_grads = sum(grad is None for grad in grads)
# Check that the gradient flow is not broken inadvertently. All
# trainable variables should have gradients.
if num_missing_grads > 0:
for grad, var in grads_and_vars:
if grad is None:
logging.info('NO GRADIENT for var %s', var.name)
else:
logging.info('Gradients found for %s', var.name)
assert num_missing_grads <= num_expected_missing_gradients, (
'%s variables have no gradients. Expected at most %s.' %
(num_missing_grads, num_expected_missing_gradients))
summaries = []
for grad, var in grads_and_vars:
summaries.append(
tf.summary.scalar(escape_summary_name(var.name + '_grad_norm'),
tf.norm(grad)))
return grads_and_vars, summaries
示例7: mlp
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import norm [as 别名]
def mlp(feature, hparams, name="mlp"):
"""Multi layer perceptron with dropout and relu activation."""
with tf.variable_scope(name, "mlp", values=[feature]):
num_mlp_layers = hparams.num_mlp_layers
mlp_size = hparams.mlp_size
for _ in range(num_mlp_layers):
feature = common_layers.dense(feature, mlp_size, activation=None)
utils.collect_named_outputs("norms", "mlp_feature",
tf.norm(feature, axis=-1))
feature = common_layers.layer_norm(feature)
feature = tf.nn.relu(feature)
feature = tf.nn.dropout(feature, keep_prob=1.-hparams.dropout)
return feature
示例8: layer_norm_vars
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import norm [as 别名]
def layer_norm_vars(filters):
"""Create Variables for layer norm."""
scale = tf.get_variable(
"layer_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"layer_norm_bias", [filters], initializer=tf.zeros_initializer())
return scale, bias
示例9: l2_norm
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import norm [as 别名]
def l2_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):
"""Layer normalization with l2 norm."""
if filters is None:
filters = shape_list(x)[-1]
with tf.variable_scope(name, default_name="l2_norm", values=[x], reuse=reuse):
scale = tf.get_variable(
"l2_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"l2_norm_bias", [filters], initializer=tf.zeros_initializer())
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
l2norm = tf.reduce_sum(
tf.squared_difference(x, mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(l2norm + epsilon)
return norm_x * scale + bias
示例10: apply_spectral_norm
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import norm [as 别名]
def apply_spectral_norm(x):
"""Normalizes x using the spectral norm.
The implementation follows Algorithm 1 of
https://arxiv.org/abs/1802.05957. If x is not a 2-D Tensor, then it is
reshaped such that the number of channels (last-dimension) is the same.
Args:
x: Tensor with the last dimension equal to the number of filters.
Returns:
x: Tensor with the same shape as x normalized by the spectral norm.
assign_op: Op to be run after every step to update the vector "u".
"""
weights_shape = shape_list(x)
other, num_filters = tf.reduce_prod(weights_shape[:-1]), weights_shape[-1]
# Reshape into a 2-D matrix with outer size num_filters.
weights_2d = tf.reshape(x, (other, num_filters))
# v = Wu / ||W u||
with tf.variable_scope("u", reuse=tf.AUTO_REUSE):
u = tf.get_variable(
"u", [num_filters, 1],
initializer=tf.truncated_normal_initializer(),
trainable=False)
v = tf.nn.l2_normalize(tf.matmul(weights_2d, u))
# u_new = vW / ||v W||
u_new = tf.nn.l2_normalize(tf.matmul(tf.transpose(v), weights_2d))
# s = v*W*u
spectral_norm = tf.squeeze(
tf.matmul(tf.transpose(v), tf.matmul(weights_2d, tf.transpose(u_new))))
# set u equal to u_new in the next iteration.
assign_op = tf.assign(u, tf.transpose(u_new))
return tf.divide(x, spectral_norm), assign_op
示例11: unit_targeting
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import norm [as 别名]
def unit_targeting(w, k):
"""Unit-level magnitude pruning."""
k = tf.to_int32(k)
w_shape = shape_list(w)
size = tf.to_int32(tf.reduce_prod(w_shape[:-1]))
w = tf.reshape(w, [size, w_shape[-1]])
norm = tf.norm(w, axis=0)
thres = contrib.framework().sort(norm, axis=0)[k]
mask = to_float(thres >= norm)[None, :]
mask = tf.tile(mask, [size, 1])
return tf.reshape(mask, w_shape)
示例12: _init_norm
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import norm [as 别名]
def _init_norm(self, weights):
"""Set the norm of the weight vector."""
with tf.variable_scope("init_norm"):
flat = tf.reshape(weights, [-1, self.layer_depth])
return tf.reshape(tf.norm(flat, axis=0), (self.layer_depth,))
示例13: random_stochastic_matrix
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import norm [as 别名]
def random_stochastic_matrix(dim, num_cols=None, dtype=tf.float32):
"""Generates a random left stochastic matrix."""
mat_shape = (dim, dim) if num_cols is None else (dim, num_cols)
mat = tf.random.uniform(shape=mat_shape, dtype=dtype)
mat /= tf.norm(mat, ord=1, axis=0, keepdims=True)
return mat
示例14: tf_logs
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import norm [as 别名]
def tf_logs(tmpdir_factory):
import numpy as np
try:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
except ImportError:
import tensorflow as tf
x = np.random.rand(5)
y = 3 * x + 1 + 0.05 * np.random.rand(5)
a = tf.Variable(0.1)
b = tf.Variable(0.)
err = a*x+b-y
loss = tf.norm(err)
tf.summary.scalar("loss", loss)
tf.summary.scalar("a", a)
tf.summary.scalar("b", b)
merged = tf.summary.merge_all()
optimizor = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
with tf.Session() as sess:
log_dir = tmpdir_factory.mktemp("logs", numbered=False)
log_dir = str(log_dir)
train_write = tf.summary.FileWriter(log_dir, sess.graph)
tf.global_variables_initializer().run()
for i in range(1000):
_, merged_ = sess.run([optimizor, merged])
train_write.add_summary(merged_, i)
return log_dir
示例15: compute_prototypes
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import norm [as 别名]
def compute_prototypes(embeddings, labels):
"""Computes class prototypes over features.
Flattens and reshapes the features if they are not already flattened.
Args:
embeddings: Tensor of examples of shape [num_examples, embedding_size] or
[num_examples, spatial_dim, spatial_dim n_features].
labels: Tensor of one-hot encoded labels of shape [num_examples,
num_classes].
Returns:
prototypes: Tensor of class prototypes of shape [num_classes,
embedding_size].
"""
if len(embeddings.shape) > 2:
feature_shape = embeddings.shape.as_list()[1:]
n_images = tf.shape(embeddings)[0]
n_classes = tf.shape(labels)[-1]
vectorized_embedding = tf.reshape(embeddings, [n_images, -1])
vectorized_prototypes = _compute_prototypes(vectorized_embedding, labels)
prototypes = tf.reshape(vectorized_prototypes, [n_classes] + feature_shape)
else:
prototypes = _compute_prototypes(embeddings, labels)
return prototypes
# TODO(tylerzhu): Accumulate batch norm statistics (moving {var, mean})
# during training and use them during testing. However need to be careful
# about leaking information across episodes.
# Note: we should use ema object to accumulate the statistics for compatibility
# with TF Eager.