本文整理汇总了Python中tensorflow.python.ops.linalg_ops.svd函数的典型用法代码示例。如果您正苦于以下问题:Python svd函数的具体用法?Python svd怎么用?Python svd使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了svd函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testSpectralNormalize
def testSpectralNormalize(self):
weights = variable_scope.get_variable(
'w', dtype=dtypes.float32, shape=[2, 3, 50, 100])
weights = math_ops.multiply(weights, 10.0)
normalized_weights = spectral_normalization.spectral_normalize(
weights, power_iteration_rounds=1)
unnormalized_sigma = linalg_ops.svd(
array_ops.reshape(weights, [-1, weights.shape[-1]]),
compute_uv=False)[..., 0]
normalized_sigma = linalg_ops.svd(
array_ops.reshape(normalized_weights, [-1, weights.shape[-1]]),
compute_uv=False)[..., 0]
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
s0 = sess.run(unnormalized_sigma)
for i in range(50):
sigma = sess.run(normalized_sigma)
if i < 1:
s1 = sigma
if i < 5:
s5 = sigma
if i < 10:
s10 = sigma
s50 = sigma
self.assertAlmostEqual(1., s50, 0)
self.assertGreater(abs(s10 - 1.), abs(s50 - 1.))
self.assertGreater(abs(s5 - 1.), abs(s10 - 1.))
self.assertGreater(abs(s1 - 1.), abs(s5 - 1.))
self.assertGreater(abs(s0 - 1.), abs(s1 - 1.))
示例2: benchmarkSVDOp
def benchmarkSVDOp(self):
for shape_ in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix_value = np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(np.float32)
matrix = variables.Variable(matrix_value)
u, s, v = linalg_ops.svd(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(u, s, v),
min_iters=25,
name="SVD_cpu_{shape}".format(shape=shape_))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/device:GPU:0"):
matrix_value = np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(np.float32)
matrix = variables.Variable(matrix_value)
u, s, v = linalg_ops.svd(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(u, s, v),
min_iters=25,
name="SVD_gpu_{shape}".format(shape=shape_))
示例3: testConcurrentExecutesWithoutError
def testConcurrentExecutesWithoutError(self):
with self.test_session(use_gpu=True) as sess:
all_ops = []
for compute_uv_ in True, False:
for full_matrices_ in True, False:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
if compute_uv_:
s1, u1, v1 = linalg_ops.svd(
matrix1, compute_uv=compute_uv_, full_matrices=full_matrices_)
s2, u2, v2 = linalg_ops.svd(
matrix2, compute_uv=compute_uv_, full_matrices=full_matrices_)
all_ops += [s1, u1, v1, s2, u2, v2]
else:
s1 = linalg_ops.svd(
matrix1, compute_uv=compute_uv_, full_matrices=full_matrices_)
s2 = linalg_ops.svd(
matrix2, compute_uv=compute_uv_, full_matrices=full_matrices_)
all_ops += [s1, s2]
val = sess.run(all_ops)
for i in range(2):
s = 6 * i
self.assertAllEqual(val[s], val[s + 3]) # s1 == s2
self.assertAllEqual(val[s + 1], val[s + 4]) # u1 == u2
self.assertAllEqual(val[s + 2], val[s + 5]) # v1 == v2
for i in range(2):
s = 12 + 2 * i
self.assertAllEqual(val[s], val[s + 1]) # s1 == s2
示例4: Test
def Test(self):
is_complex = dtype_ in (np.complex64, np.complex128)
is_single = dtype_ in (np.float32, np.complex64)
tol = 3e-4 if is_single else 1e-12
if test.is_gpu_available():
# The gpu version returns results that are much less accurate.
tol *= 100
np.random.seed(42)
x_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
if is_complex:
x_np += 1j * np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
with self.test_session(use_gpu=True) as sess:
if use_static_shape_:
x_tf = constant_op.constant(x_np)
else:
x_tf = array_ops.placeholder(dtype_)
if compute_uv_:
s_tf, u_tf, v_tf = linalg_ops.svd(
x_tf, compute_uv=compute_uv_, full_matrices=full_matrices_)
if use_static_shape_:
s_tf_val, u_tf_val, v_tf_val = sess.run([s_tf, u_tf, v_tf])
else:
s_tf_val, u_tf_val, v_tf_val = sess.run(
[s_tf, u_tf, v_tf], feed_dict={x_tf: x_np})
else:
s_tf = linalg_ops.svd(
x_tf, compute_uv=compute_uv_, full_matrices=full_matrices_)
if use_static_shape_:
s_tf_val = sess.run(s_tf)
else:
s_tf_val = sess.run(s_tf, feed_dict={x_tf: x_np})
if compute_uv_:
u_np, s_np, v_np = np.linalg.svd(
x_np, compute_uv=compute_uv_, full_matrices=full_matrices_)
else:
s_np = np.linalg.svd(
x_np, compute_uv=compute_uv_, full_matrices=full_matrices_)
# We explicitly avoid the situation where numpy eliminates a first
# dimension that is equal to one.
s_np = np.reshape(s_np, s_tf_val.shape)
CompareSingularValues(self, s_np, s_tf_val, tol)
if compute_uv_:
CompareSingularVectors(self, u_np, u_tf_val, min(shape_[-2:]), tol)
CompareSingularVectors(self,
np.conj(np.swapaxes(v_np, -2, -1)), v_tf_val,
min(shape_[-2:]), tol)
CheckApproximation(self, x_np, u_tf_val, s_tf_val, v_tf_val,
full_matrices_, tol)
CheckUnitary(self, u_tf_val, tol)
CheckUnitary(self, v_tf_val, tol)
示例5: Test
def Test(self):
np.random.seed(1)
x_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
if is_complex:
x_np += 1j * np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
for compute_uv in False, True:
for full_matrices in False, True:
with self.test_session(use_gpu = use_gpu_) as sess:
if use_static_shape_:
x_tf = constant_op.constant(x_np)
else:
x_tf = array_ops.placeholder(dtype_)
if compute_uv:
s_tf, u_tf, v_tf = linalg_ops.svd(x_tf,
compute_uv=compute_uv,
full_matrices=full_matrices)
if use_static_shape_:
s_tf_val, u_tf_val, v_tf_val = sess.run([s_tf, u_tf, v_tf])
else:
s_tf_val, u_tf_val, v_tf_val = sess.run([s_tf, u_tf, v_tf],
feed_dict={x_tf: x_np})
else:
s_tf = linalg_ops.svd(x_tf,
compute_uv=compute_uv,
full_matrices=full_matrices)
if use_static_shape_:
s_tf_val = sess.run(s_tf)
else:
s_tf_val = sess.run(s_tf, feed_dict={x_tf: x_np})
if compute_uv:
u_np, s_np, v_np = np.linalg.svd(x_np,
compute_uv=compute_uv,
full_matrices=full_matrices)
else:
s_np = np.linalg.svd(x_np,
compute_uv=compute_uv,
full_matrices=full_matrices)
# We explicitly avoid the situation where numpy eliminates a first
# dimension that is equal to one
s_np = np.reshape(s_np, s_tf_val.shape)
CompareSingularValues(self, s_np, s_tf_val)
if compute_uv:
CompareSingularVectors(self, u_np, u_tf_val, min(shape_[-2:]))
CompareSingularVectors(self,
np.conj(np.swapaxes(v_np, -2, -1)), v_tf_val,
min(shape_[-2:]))
CheckApproximation(self, x_np, u_tf_val, s_tf_val, v_tf_val,
full_matrices)
CheckUnitary(self, u_tf_val)
CheckUnitary(self, v_tf_val)
示例6: testWrongDimensions
def testWrongDimensions(self):
# The input to svd should be a tensor of at least rank 2.
scalar = constant_op.constant(1.)
with self.assertRaisesRegexp(ValueError,
"Shape must be at least rank 2 but is rank 0"):
linalg_ops.svd(scalar)
vector = constant_op.constant([1., 2.])
with self.assertRaisesRegexp(ValueError,
"Shape must be at least rank 2 but is rank 1"):
linalg_ops.svd(vector)
示例7: _compute_power_svd
def _compute_power_svd(self, var, mat_g, mat_g_size, alpha, mat_h_slot_name):
"""Computes mat_h = mat_g^alpha using svd. mat_g is a symmetric PSD matrix.
Args:
var: the variable we are updating.
mat_g: the symmetric PSD matrix whose power it to be computed
mat_g_size: size of mat_g
alpha: a real number
mat_h_slot_name: name of slot to store the power, if needed.
Returns:
mat_h = mat_g^alpha
Stores mat_h in the appropriate slot, if it exists.
Note that mat_g is PSD. So we could use linalg_ops.self_adjoint_eig.
"""
if mat_g_size == 1:
mat_h = math_ops.pow(mat_g + self._epsilon, alpha)
else:
damping = self._epsilon * linalg_ops.eye(math_ops.to_int32(mat_g_size))
diag_d, mat_u, mat_v = linalg_ops.svd(mat_g + damping, full_matrices=True)
mat_h = math_ops.matmul(
mat_v * math_ops.pow(math_ops.maximum(diag_d, self._epsilon), alpha),
array_ops.transpose(mat_u))
if mat_h_slot_name is not None:
return state_ops.assign(self.get_slot(var, mat_h_slot_name), mat_h)
return mat_h
示例8: testComputeSpectralNorm
def testComputeSpectralNorm(self):
weights = variable_scope.get_variable(
'w', dtype=dtypes.float32, shape=[2, 3, 50, 100])
weights = math_ops.multiply(weights, 10.0)
s = linalg_ops.svd(
array_ops.reshape(weights, [-1, weights.shape[-1]]), compute_uv=False)
true_sn = s[..., 0]
estimated_sn = spectral_normalization.compute_spectral_norm(weights)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
np_true_sn = sess.run(true_sn)
for i in range(50):
est = sess.run(estimated_sn)
if i < 1:
np_est_1 = est
if i < 4:
np_est_5 = est
if i < 9:
np_est_10 = est
np_est_50 = est
# Check that the estimate improves with more iterations.
self.assertAlmostEqual(np_true_sn, np_est_50, 0)
self.assertGreater(
abs(np_true_sn - np_est_10), abs(np_true_sn - np_est_50))
self.assertGreater(
abs(np_true_sn - np_est_5), abs(np_true_sn - np_est_10))
self.assertGreater(abs(np_true_sn - np_est_1), abs(np_true_sn - np_est_5))
示例9: _symmetric_matrix_square_root
def _symmetric_matrix_square_root(mat, eps=1e-10):
"""Compute square root of a symmetric matrix.
Note that this is different from an elementwise square root. We want to
compute M' where M' = sqrt(mat) such that M' * M' = mat.
Also note that this method **only** works for symmetric matrices.
Args:
mat: Matrix to take the square root of.
eps: Small epsilon such that any element less than eps will not be square
rooted to guard against numerical instability.
Returns:
Matrix square root of mat.
"""
# Unlike numpy, tensorflow's return order is (s, u, v)
s, u, v = linalg_ops.svd(mat)
# sqrt is unstable around 0, just use 0 in such case
si = array_ops.where(math_ops.less(s, eps), s, math_ops.sqrt(s))
# Note that the v returned by Tensorflow is v = V
# (when referencing the equation A = U S V^T)
# This is unlike Numpy which returns v = V^T
return math_ops.matmul(
math_ops.matmul(u, array_ops.diag(si)), v, transpose_b=True)
示例10: _testSvdCorrectness
def _testSvdCorrectness(self, dtype, shape):
np.random.seed(1)
x_np = np.random.uniform(low=-1.0, high=1.0, size=shape).astype(dtype)
m, n = shape[-2], shape[-1]
_, s_np, _ = np.linalg.svd(x_np)
with self.cached_session() as sess:
x_tf = array_ops.placeholder(dtype)
with self.test_scope():
s, u, v = linalg_ops.svd(x_tf, full_matrices=True)
s_val, u_val, v_val = sess.run([s, u, v], feed_dict={x_tf: x_np})
u_diff = np.matmul(u_val, np.swapaxes(u_val, -1, -2)) - np.eye(m)
v_diff = np.matmul(v_val, np.swapaxes(v_val, -1, -2)) - np.eye(n)
# Check u_val and v_val are orthogonal matrices.
self.assertLess(np.linalg.norm(u_diff), 1e-2)
self.assertLess(np.linalg.norm(v_diff), 1e-2)
# Check that the singular values are correct, i.e., close to the ones from
# numpy.lingal.svd.
self.assertLess(np.linalg.norm(s_val - s_np), 1e-2)
# The tolerance is set based on our tests on numpy's svd. As our tests
# have batch dimensions and all our operations are on float32, we set the
# tolerance a bit larger. Numpy's svd calls LAPACK's svd, which operates
# on double precision.
self.assertLess(
np.linalg.norm(self._compute_usvt(s_val, u_val, v_val) - x_np), 2e-2)
# Check behavior with compute_uv=False. We expect to still see 3 outputs,
# with a sentinel scalar 0 in the last two outputs.
with self.test_scope():
no_uv_s, no_uv_u, no_uv_v = gen_linalg_ops.svd(
x_tf, full_matrices=True, compute_uv=False)
no_uv_s_val, no_uv_u_val, no_uv_v_val = sess.run(
[no_uv_s, no_uv_u, no_uv_v], feed_dict={x_tf: x_np})
self.assertAllClose(no_uv_s_val, s_val, atol=1e-4, rtol=1e-4)
self.assertEqual(no_uv_u_val, 0.0)
self.assertEqual(no_uv_v_val, 0.0)
示例11: __call__
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize must be "
"at least two-dimensional")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (num_rows, num_cols)
# Generate a random matrix
a = random_ops.random_uniform(flat_shape, dtype=dtype, seed=self.seed)
# Compute the svd
_, u, v = linalg_ops.svd(a, full_matrices=False)
# Pick the appropriate singular value decomposition
if num_rows > num_cols:
q = u
else:
# Tensorflow departs from numpy conventions
# such that we need to transpose axes here
q = array_ops.transpose(v)
return self.gain * array_ops.reshape(q, shape)
示例12: testTwoInputsSameOp
def testTwoInputsSameOp(self):
g = ops.Graph()
with g.as_default():
m = array_ops.placeholder(dtypes.float32)
s, u, v = linalg_ops.svd(m)
ss = math_ops.reduce_sum(s)
uu = math_ops.reduce_sum(u)
vv = math_ops.reduce_sum(v)
result = ss + uu + vv
f = graph_to_function_def.graph_to_function_def(
g,
g.get_operations()[1:], # skip the placeholder
[s, u, v],
[result])
self.assertEqual(len(f.signature.input_arg), 3)
示例13: _assert_non_singular
def _assert_non_singular(self):
"""Private default implementation of _assert_non_singular."""
logging.warn(
"Using (possibly slow) default implementation of assert_non_singular."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
return self.assert_positive_definite()
else:
singular_values = linalg_ops.svd(self.to_dense(), compute_uv=False)
# TODO(langmore) Add .eig and .cond as methods.
cond = (math_ops.reduce_max(singular_values, axis=-1) /
math_ops.reduce_min(singular_values, axis=-1))
return check_ops.assert_less(
cond,
self._max_condition_number_to_be_non_singular(),
message="Singular matrix up to precision epsilon.")
示例14: _NormalizingSvd
def _NormalizingSvd(tf_a):
tf_s, tf_u, tf_v = linalg_ops.svd(tf_a, compute_uv=True, full_matrices=True)
# Singular vectors are only unique up to an arbitrary phase. We normalize
# the vectors such that the first component of u (if m >=n) or v (if n > m)
# have phase 0.
m = tf_a.shape[-2]
n = tf_a.shape[-1]
if m >= n:
top_rows = tf_u[..., 0:1, :]
else:
top_rows = tf_v[..., 0:1, :]
if tf_u.dtype.is_complex:
angle = -math_ops.angle(top_rows)
phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
else:
phase = math_ops.sign(top_rows)
tf_u *= phase[..., :m]
tf_v *= phase[..., :n]
return tf_s, tf_u, tf_v
示例15: _matrix_square_root
def _matrix_square_root(mat, eps=1e-10):
"""Compute symmetric square root of matrix.
Equivalent to matrix square root when matrix is invertible; note that this is
different from an elementwise square root. We want to compute M' where M' =
sqrt(mat) such that M' * M' = mat.
Args:
mat: Matrix to take the square root of.
eps: Small epsilon such that any element less than eps will not be square
rooted to guard against numerical instability.
Returns:
Matrix square root of mat.
"""
s, u, v = linalg_ops.svd(mat)
# sqrt is unstable around 0, just use 0 in such case
si = array_ops.where(math_ops.less(s, eps), s, math_ops.sqrt(s))
return math_ops.matmul(
math_ops.matmul(u, array_ops.diag(si)), v, transpose_b=True)