本文整理汇总了Python中tensorflow.svd方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.svd方法的具体用法?Python tensorflow.svd怎么用?Python tensorflow.svd使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.svd方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_value_updater
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import svd [as 别名]
def get_value_updater(self, data, new_mean, gamma_weighted, gamma_sum):
tf_new_differences = tf.subtract(data, tf.expand_dims(new_mean, 0))
tf_sq_dist_matrix = tf.matmul(tf.expand_dims(tf_new_differences, 2), tf.expand_dims(tf_new_differences, 1))
tf_new_covariance = tf.reduce_sum(tf_sq_dist_matrix * tf.expand_dims(tf.expand_dims(gamma_weighted, 1), 2), 0)
if self.has_prior:
tf_new_covariance = self.get_prior_adjustment(tf_new_covariance, gamma_sum)
tf_s, tf_u, _ = tf.svd(tf_new_covariance)
tf_required_eigvals = tf_s[:self.rank]
tf_required_eigvecs = tf_u[:, :self.rank]
tf_new_baseline = (tf.trace(tf_new_covariance) - tf.reduce_sum(tf_required_eigvals)) / self.tf_rest
tf_new_eigvals = tf_required_eigvals - tf_new_baseline
tf_new_eigvecs = tf.transpose(tf_required_eigvecs)
return tf.group(
self.tf_baseline.assign(tf_new_baseline),
self.tf_eigvals.assign(tf_new_eigvals),
self.tf_eigvecs.assign(tf_new_eigvecs)
)
示例2: _symmetric_matrix_square_root
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import svd [as 别名]
def _symmetric_matrix_square_root(mat, eps=1e-10):
"""Compute square root of a symmetric matrix.
Note that this is different from an elementwise square root. We want to
compute M' where M' = sqrt(mat) such that M' * M' = mat.
Also note that this method **only** works for symmetric matrices.
Args:
mat: Matrix to take the square root of.
eps: Small epsilon such that any element less than eps will not be square
rooted to guard against numerical instability.
Returns:
Matrix square root of mat.
"""
# Unlike numpy, tensorflow's return order is (s, u, v)
s, u, v = tf.svd(mat)
# sqrt is unstable around 0, just use 0 in such case
si = tf.where(tf.less(s, eps), s, tf.sqrt(s))
# Note that the v returned by Tensorflow is v = V
# (when referencing the equation A = U S V^T)
# This is unlike Numpy which returns v = V^T
return tf.matmul(
tf.matmul(u, tf.diag(si)), v, transpose_b=True)
示例3: SVD
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import svd [as 别名]
def SVD(X, n, name = None):
with tf.variable_scope(name):
sz = X.get_shape().as_list()
if len(sz)>2:
x = tf.reshape(X,[-1,sz[1]*sz[2],sz[3]])
n = min(n, sz[1]*sz[2], sz[3])
else:
x = tf.expand_dims(X, 1)
n = 1
with tf.device('CPU'):
g = tf.get_default_graph()
with g.gradient_override_map({"Svd": "Svd_"}):
s,u,v = tf.svd(x,full_matrices=False)
s = removenan(s)
v = removenan(v)
u = removenan(u)
s = tf.nn.l2_normalize(tf.slice(s,[0,0],[-1,n]),1)
U = tf.nn.l2_normalize(tf.slice(u,[0,0,0],[-1,-1,n]),1)
V = tf.nn.l2_normalize(tf.slice(v,[0,0,0],[-1,-1,n]),1)
return s, U, V
示例4: guarded_matrix_solve_ls
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import svd [as 别名]
def guarded_matrix_solve_ls(A, b, W, condition_number_cap=1e5):
# Solve weighted least square ||\sqrt(W)(Ax-b)||^2
# A - BxNxD
# b - BxNx1
# W - BxN
sqrt_W = tf.sqrt(tf.maximum(W, SQRT_EPS)) # BxN
A *= tf.expand_dims(sqrt_W, axis=2) # BxNxD
b *= tf.expand_dims(sqrt_W, axis=2) # BxNx1
# Compute singular value, trivializing the problem when condition number is too large
AtA = tf.matmul(a=A, b=A, transpose_a=True)
s, _, _ = [tf.stop_gradient(u) for u in tf.svd(AtA)] # s will be BxD
mask = tf.less(s[:, 0] / s[:, -1], condition_number_cap) # B
A *= tf.to_float(tf.expand_dims(tf.expand_dims(mask, axis=1), axis=2)) # zero out badly conditioned data
x = tf.matrix_solve_ls(A, b, l2_regularizer=LS_L2_REGULARIZER, fast=True) # BxDx1
return tf.squeeze(x, axis=2) # BxD
示例5: lipschitz
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import svd [as 别名]
def lipschitz(self):
"""Return the Lipschitz constant as a Tensor.
This assumes that only contractive nonlinearities are used! Examples
are ReLUs and Sigmoids.
Returns
-------
lipschitz : Tensor
The Lipschitz constant of the neural network.
"""
lipschitz = tf.constant(1, config.dtype)
for W, b in self._parameter_iter():
# lipschitz *= tf.reduce_max(tf.svd(W, compute_uv=False))
lipschitz *= tf.reduce_max(self._svd(W))
return lipschitz
示例6: _svd
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import svd [as 别名]
def _svd(A, name=None):
"""Tensorflow svd with gradient.
Parameters
----------
A : Tensor
The matrix for which to compute singular values.
name : string, optional
Returns
-------
s : Tensor
The singular values of A.
"""
S0, U0, V0 = map(tf.stop_gradient,
tf.svd(A, full_matrices=True, name=name))
# A = U * S * V.T
# S = inv(U) * A * inv(V.T) = U.T * A * V (orthogonal matrices)
S = tf.matmul(U0, tf.matmul(A, V0),
transpose_a=True)
return tf.matrix_diag_part(S)
示例7: estimate_hom
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import svd [as 别名]
def estimate_hom(src, dst):
rx = src[:,:,0:1]
ry = src[:,:,1:2]
x = dst[:,:,0:1]
y = dst[:,:,1:2]
num_batch = tf.shape(src)[0]
num_pts = tf.shape(src)[1]
_0 = tf.zeros([num_batch, num_pts, 3])
_1 = tf.ones([num_batch, num_pts, 1])
A_even_rows = tf.concat([-rx, -ry, -_1, _0, rx*x, ry*x, x], axis=-1)
A_odd_rows = tf.concat([_0, -rx, -ry, -_1, rx*y, ry*y, y], axis=-1)
A = tf.concat([A_even_rows, A_odd_rows], axis=-1)
A = tf.reshape(A, [num_batch, 2*num_pts, 9])
_, _, V = tf.svd(A, full_matrices=True)
return tf.reshape(V[:,:,-1], [num_batch, 3, 3])
示例8: spectral_norm_variable_initializer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import svd [as 别名]
def spectral_norm_variable_initializer(shape, dtype=tf.float32, partition_info=None):
""" This function provides customized initializer for tf.get_variable()
:param shape:
:param dtype:
:param partition_info: this is required by tf.layers, but ignored in many tf.initializer. Here we ignore it.
:return:
"""
variable = tf.random_normal(shape=shape, stddev=1.0, dtype=dtype)
if len(shape) > 2:
var_reshaped = tf.reshape(variable, shape=[-1, shape[-1]])
sigma = tf.svd(var_reshaped, full_matrices=False, compute_uv=False)[0]
else:
sigma = tf.svd(variable, full_matrices=False, compute_uv=False)[0]
return variable / (sigma + FLAGS.EPSI)
########################################################################
示例9: set_similarity
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import svd [as 别名]
def set_similarity(self, valid_examples=None, pca=True):
if valid_examples == None:
if pca:
valid_examples = np.array(range(20))
else:
valid_examples = np.array(range(self.num_vocabulary))
self.valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
self.norm = tf.sqrt(tf.reduce_sum(tf.square(self.g_embeddings), 1, keep_dims=True))
self.normalized_embeddings = self.g_embeddings / self.norm
# PCA
if self.num_vocabulary >= 20 and pca == True:
emb = tf.matmul(self.normalized_embeddings, tf.transpose(self.normalized_embeddings))
s, u, v = tf.svd(emb)
u_r = tf.strided_slice(u, begin=[0, 0], end=[20, self.num_vocabulary], strides=[1, 1])
self.normalized_embeddings = tf.matmul(u_r, self.normalized_embeddings)
self.valid_embeddings = tf.nn.embedding_lookup(
self.normalized_embeddings, self.valid_dataset)
self.similarity = tf.matmul(self.valid_embeddings, tf.transpose(self.normalized_embeddings))
示例10: set_similarity
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import svd [as 别名]
def set_similarity(self, valid_examples=None, pca=True):
one_hot = tf.constant(np.eye(self.num_vocabulary))
if valid_examples == None:
if pca:
valid_examples = np.array(range(20))
else:
valid_examples = np.array(range(self.num_vocabulary))
self.valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
self.norm = tf.sqrt(tf.reduce_sum(tf.square(one_hot), 1, keep_dims=True))
self.normalized_embeddings = one_hot / self.norm
# PCA
if self.num_vocabulary >= 20 and pca == True:
emb = tf.matmul(self.normalized_embeddings, tf.transpose(self.normalized_embeddings))
s, u, v = tf.svd(emb)
u_r = tf.strided_slice(u, begin=[0, 0], end=[20, self.num_vocabulary], strides=[1, 1])
self.normalized_embeddings = tf.matmul(u_r, self.normalized_embeddings)
self.valid_embeddings = tf.nn.embedding_lookup(
self.normalized_embeddings, self.valid_dataset)
self.similarity = tf.matmul(self.valid_embeddings, tf.transpose(self.normalized_embeddings))
示例11: random_orthonormal_initializer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import svd [as 别名]
def random_orthonormal_initializer(shape, dtype=tf.float32,
partition_info=None): # pylint: disable=unused-argument
"""Variable initializer that produces a random orthonormal matrix."""
if len(shape) != 2 or shape[0] != shape[1]:
raise ValueError("Expecting square shape, got %s" % shape)
_, u, _ = tf.svd(tf.random_normal(shape, dtype=dtype), full_matrices=True)
return u
示例12: test_Svd
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import svd [as 别名]
def test_Svd(self):
t = tf.svd(self.random(4, 5, 3, 2).astype("float32"))
self.check(t, ndigits=4, abs=True)
#
# complex number ops
#
示例13: pca
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import svd [as 别名]
def pca(mat):
mat = tf.constant(mat, dtype=tf.float32)
mean = tf.reduce_mean(mat, 0)
less = mat - mean
s, u, v = tf.svd(less, full_matrices=True, compute_uv=True)
s2 = s ** 2
variance_ratio = s2 / tf.reduce_sum(s2)
with tf.Session() as session:
run = session.run([variance_ratio])
return run
示例14: posdef_eig_svd
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import svd [as 别名]
def posdef_eig_svd(mat):
"""Computes the singular values and left singular vectors of a matrix."""
evals, evecs, _ = tf.svd(mat)
return evals, evecs
示例15: posdef_eig_self_adjoint
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import svd [as 别名]
def posdef_eig_self_adjoint(mat):
"""Computes eigendecomposition using self_adjoint_eig."""
evals, evecs = tf.self_adjoint_eig(mat)
evals = tf.abs(evals) # Should be equivalent to svd approach.
return evals, evecs