本文整理汇总了Python中tensorflow.svd函数的典型用法代码示例。如果您正苦于以下问题:Python svd函数的具体用法?Python svd怎么用?Python svd使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了svd函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Test
def Test(self):
np.random.seed(1)
x_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
if is_complex:
x_np += 1j * np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
for compute_uv in False, True:
for full_matrices in False, True:
with self.test_session() as sess:
if use_static_shape_:
x_tf = tf.constant(x_np)
else:
x_tf = tf.placeholder(dtype_)
if compute_uv:
s_tf, u_tf, v_tf = tf.svd(x_tf,
compute_uv=compute_uv,
full_matrices=full_matrices)
if use_static_shape_:
s_tf_val, u_tf_val, v_tf_val = sess.run([s_tf, u_tf, v_tf])
else:
s_tf_val, u_tf_val, v_tf_val = sess.run([s_tf, u_tf, v_tf],
feed_dict={x_tf: x_np})
else:
s_tf = tf.svd(x_tf,
compute_uv=compute_uv,
full_matrices=full_matrices)
if use_static_shape_:
s_tf_val = sess.run(s_tf)
else:
s_tf_val = sess.run(s_tf, feed_dict={x_tf: x_np})
if compute_uv:
u_np, s_np, v_np = np.linalg.svd(x_np,
compute_uv=compute_uv,
full_matrices=full_matrices)
else:
s_np = np.linalg.svd(x_np,
compute_uv=compute_uv,
full_matrices=full_matrices)
# We explicitly avoid the situation where numpy eliminates a first
# dimension that is equal to one
s_np = np.reshape(s_np, s_tf_val.shape)
CompareSingularValues(self, s_np, s_tf_val)
if compute_uv:
CompareSingularVectors(self, u_np, u_tf_val, min(shape_[-2:]))
CompareSingularVectors(self,
np.conj(np.swapaxes(v_np, -2, -1)), v_tf_val,
min(shape_[-2:]))
CheckApproximation(self, x_np, u_tf_val, s_tf_val, v_tf_val,
full_matrices)
CheckUnitary(self, u_tf_val)
CheckUnitary(self, v_tf_val)
示例2: Test
def Test(self):
np.random.seed(1)
if dtype_ in (np.float32, np.float64):
x = np.random.uniform(low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
elif dtype == np.complex64:
x = np.random.uniform(low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(np.float32)
+ 1j * np.random.uniform(low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(np.float32)
else:
x = np.random.uniform(low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(np.float64)
+ 1j * np.random.uniform(low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(np.float64)
for compute_uv in False, True:
for full_matrices in False, True:
with self.test_session():
if x.ndim == 2:
if compute_uv:
tf_s, tf_u, tf_v = tf.svd(tf.constant(x),
compute_uv=compute_uv,
full_matrices=full_matrices)
else:
tf_s = tf.svd(tf.constant(x),
compute_uv=compute_uv,
full_matrices=full_matrices)
else:
if compute_uv:
tf_s, tf_u, tf_v = tf.batch_svd(
tf.constant(x),
compute_uv=compute_uv,
full_matrices=full_matrices)
else:
tf_s = tf.batch_svd(
tf.constant(x),
compute_uv=compute_uv,
full_matrices=full_matrices)
if compute_uv:
np_u, np_s, np_v = np.linalg.svd(x,
compute_uv=compute_uv,
full_matrices=full_matrices)
else:
np_s = np.linalg.svd(x,
compute_uv=compute_uv,
full_matrices=full_matrices)
CompareSingularValues(self, np_s, tf_s.eval())
if compute_uv:
CompareSingularVectors(self, np_u, tf_u.eval(), min(shape_[-2:]))
CompareSingularVectors(self, np.conj(np.swapaxes(np_v, -2, -1)),
tf_v.eval(), min(shape_[-2:]))
CheckApproximation(self, x, tf_u, tf_s, tf_v, full_matrices)
CheckUnitary(self, tf_u)
CheckUnitary(self, tf_v)
示例3: testBatchAndSvd
def testBatchAndSvd(self):
with self.cached_session():
mat = [[1., 2.], [2., 3.]]
batched_mat = tf.expand_dims(mat, [0])
result = tf.matmul(mat, mat).eval()
result_batched = tf.batch_matmul(batched_mat, batched_mat).eval()
self.assertAllEqual(result_batched, np.expand_dims(result, 0))
self.assertAllEqual(
tf.svd(mat, False, True).eval(),
tf.svd(mat, compute_uv=False, full_matrices=True).eval())
示例4: testWrongDimensions
def testWrongDimensions(self):
# The input to batch_svd should be a tensor of at least rank 2.
scalar = tf.constant(1.)
with self.assertRaisesRegexp(ValueError,
"Shape must be at least rank 2 but is rank 0"):
tf.svd(scalar)
vector = tf.constant([1., 2.])
with self.assertRaisesRegexp(ValueError,
"Shape must be at least rank 2 but is rank 1"):
tf.svd(vector)
示例5: __tensor_norm__
def __tensor_norm__(self,tensor,order):
if order in ['Si']: # Schatten inf norm
s,U,V=tf.svd(tensor,full_matrices=False)
return tf.norm(s,ord=np.inf)
elif order[0]=='S': # Schatten norm
s,U,V=tf.svd(tensor,full_matrices=False)
sub_order=int(order[1:])
return tf.norm(s,ord=sub_order)
else:
sub_order=int(order)
return tf.norm(tensor,ord=sub_order)
示例6: Test
def Test(self):
np.random.seed(1)
x = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
if dtype_ == np.float32:
atol = 1e-4
else:
atol = 1e-14
for compute_uv in False, True:
for full_matrices in False, True:
with self.test_session():
if x.ndim == 2:
if compute_uv:
tf_s, tf_u, tf_v = tf.svd(tf.constant(x),
compute_uv=compute_uv,
full_matrices=full_matrices)
else:
tf_s = tf.svd(tf.constant(x),
compute_uv=compute_uv,
full_matrices=full_matrices)
else:
if compute_uv:
tf_s, tf_u, tf_v = tf.batch_svd(
tf.constant(x),
compute_uv=compute_uv,
full_matrices=full_matrices)
else:
tf_s = tf.batch_svd(
tf.constant(x),
compute_uv=compute_uv,
full_matrices=full_matrices)
if compute_uv:
np_u, np_s, np_v = np.linalg.svd(x,
compute_uv=compute_uv,
full_matrices=full_matrices)
else:
np_s = np.linalg.svd(x,
compute_uv=compute_uv,
full_matrices=full_matrices)
self.assertAllClose(np_s, tf_s.eval(), atol=atol)
if compute_uv:
CompareSingularVectors(self, np_u, tf_u.eval(), min(shape_[-2:]),
atol)
CompareSingularVectors(self, np.swapaxes(np_v, -2, -1), tf_v.eval(),
min(shape_[-2:]), atol)
CheckApproximation(self, x, tf_u, tf_s, tf_v, full_matrices, atol)
CheckUnitary(self, tf_u)
CheckUnitary(self, tf_v)
示例7: random_orthonormal_initializer
def random_orthonormal_initializer(shape, dtype=tf.float32,
partition_info=None): # pylint: disable=unused-argument
"""Variable initializer that produces a random orthonormal matrix."""
if len(shape) != 2 or shape[0] != shape[1]:
raise ValueError("Expecting square shape, got %s" % shape)
_, u, _ = tf.svd(tf.random_normal(shape, dtype=dtype), full_matrices=True)
return u
示例8: pseudo_inverse
def pseudo_inverse(mat, eps=1e-10):
"""Computes pseudo-inverse of mat, treating eigenvalues below eps as 0."""
s, u, v = tf.svd(mat)
eps = 1e-10 # zero threshold for eigenvalues
si = tf.where(tf.less(s, eps), s, 1./s)
return u @ tf.diag(si) @ tf.transpose(v)
示例9: symsqrt
def symsqrt(mat, eps=1e-7):
"""Symmetric square root."""
s, u, v = tf.svd(mat)
# sqrt is unstable around 0, just use 0 in such case
print("Warning, cutting off at eps")
si = tf.where(tf.less(s, eps), s, tf.sqrt(s))
return u @ tf.diag(si) @ tf.transpose(v)
示例10: _iso_from_svd_decomp
def _iso_from_svd_decomp(env, decomp_device=None):
with tf.device(decomp_device):
env_r = tf.reshape(env, (env.shape[0], -1))
s, u, v = tf.svd(env_r)
vh = tf.linalg.adjoint(v)
vh = tf.reshape(vh, (vh.shape[0], env.shape[1], env.shape[2]))
return u, s, vh
示例11: __init__
def __init__(self, target, name, do_inverses=False):
self.name = name
self.target = target
self.do_inverses = do_inverses
self.tf_svd = SvdTuple(tf.svd(target))
self.update_counter = 0
self.init = SvdTuple(
ones(target.shape[0], name=name+"_s_init"),
Identity(target.shape[0], name=name+"_u_init"),
Identity(target.shape[0], name=name+"_v_init"),
Identity(target.shape[0], name=name+"_inv_init"),
)
assert self.tf_svd.s.shape == self.init.s.shape
assert self.tf_svd.u.shape == self.init.u.shape
assert self.tf_svd.v.shape == self.init.v.shape
# assert self.tf_svd.inv.shape == self.init.inv.shape
self.cached = SvdTuple(
tf.Variable(self.init.s, name=name+"_s"),
tf.Variable(self.init.u, name=name+"_u"),
tf.Variable(self.init.v, name=name+"_v"),
tf.Variable(self.init.inv, name=name+"_inv"),
)
self.s = self.cached.s
self.u = self.cached.u
self.v = self.cached.v
self.inv = self.cached.inv
self.holder = SvdTuple(
tf.placeholder(default_dtype, shape=self.cached.s.shape, name=name+"_s_holder"),
tf.placeholder(default_dtype, shape=self.cached.u.shape, name=name+"_u_holder"),
tf.placeholder(default_dtype, shape=self.cached.v.shape, name=name+"_v_holder"),
tf.placeholder(default_dtype, shape=self.cached.inv.shape, name=name+"_inv_holder")
)
self.update_tf_op = tf.group(
self.cached.s.assign(self.tf_svd.s),
self.cached.u.assign(self.tf_svd.u),
self.cached.v.assign(self.tf_svd.v),
self.cached.inv.assign(self.tf_svd.inv)
)
self.update_external_op = tf.group(
self.cached.s.assign(self.holder.s),
self.cached.u.assign(self.holder.u),
self.cached.v.assign(self.holder.v),
)
self.update_externalinv_op = tf.group(
self.cached.inv.assign(self.holder.inv),
)
self.init_ops = (self.s.initializer, self.u.initializer, self.v.initializer,
self.inv.initializer)
示例12: posterior_mean_and_sample
def posterior_mean_and_sample(self, candidates):
"""Draw samples for test predictions.
Given a Tensor of 'candidates' inputs, returns samples from the posterior
and the posterior mean prediction for those inputs.
Args:
candidates: A (num-examples x num-dims) Tensor containing the inputs for
which to return predictions.
Returns:
y_mean: The posterior mean prediction given these inputs
y_sample: A sample from the posterior of the outputs given these inputs
"""
# Cross-covariance for test predictions
w = tf.identity(self.weights_train)
inds = tf.squeeze(
tf.reshape(
tf.tile(
tf.reshape(tf.range(self.n_out), (self.n_out, 1)),
(1, tf.shape(candidates)[0])), (-1, 1)))
cross_cov = self.cov(tf.tile(candidates, [self.n_out, 1]), self.x_train)
cross_task_cov = self.task_cov(tf.one_hot(inds, self.n_out), w)
cross_cov *= cross_task_cov
# Test mean prediction
y_mean = tf.matmul(cross_cov, tf.matmul(self.input_inv, self.y_train))
# Test sample predictions
# Note this can be done much more efficiently using Kronecker products
# if all tasks are fully observed (which we won't assume)
test_cov = (
self.cov(tf.tile(candidates, [self.n_out, 1]),
tf.tile(candidates, [self.n_out, 1])) *
self.task_cov(tf.one_hot(inds, self.n_out),
tf.one_hot(inds, self.n_out)) -
tf.matmul(cross_cov,
tf.matmul(self.input_inv,
tf.transpose(cross_cov))))
# Get the matrix square root through an SVD for drawing samples
# This seems more numerically stable than the Cholesky
s, _, v = tf.svd(test_cov, full_matrices=True)
test_sqrt = tf.matmul(v, tf.matmul(tf.diag(s), tf.transpose(v)))
y_sample = (
tf.matmul(
test_sqrt,
tf.random_normal([tf.shape(test_sqrt)[0], 1], dtype=tf.float64)) +
y_mean)
y_sample = (
tf.transpose(tf.reshape(y_sample,
(self.n_out, -1))) * self.input_std +
self.input_mean)
return y_mean, y_sample
示例13: s_norm
def s_norm(tensor,order):
s,U,V=tf.svd(tensor,full_matrices=False)
result=None
if type(order) in [int,float]:
result=tf.norm(s,ord=order)
elif type(order) in [list,tuple]:
result=[tf.norm(s,ord=order_item) for order_item in order]
else:
raise ValueError('Unrecognized order of s_norm: %s'%str(order))
return s,result
示例14: set_similarity
def set_similarity(self, valid_examples=None, pca=True):
if valid_examples == None:
if pca:
valid_examples = np.array(range(20))
else:
valid_examples = np.array(range(self.num_vocabulary))
self.valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
self.norm = tf.sqrt(tf.reduce_sum(tf.square(self.g_embeddings), 1, keep_dims=True))
self.normalized_embeddings = self.g_embeddings / self.norm
# PCA
if self.num_vocabulary >= 20 and pca == True:
emb = tf.matmul(self.normalized_embeddings, tf.transpose(self.normalized_embeddings))
s, u, v = tf.svd(emb)
u_r = tf.strided_slice(u, begin=[0, 0], end=[20, self.num_vocabulary], strides=[1, 1])
self.normalized_embeddings = tf.matmul(u_r, self.normalized_embeddings)
self.valid_embeddings = tf.nn.embedding_lookup(
self.normalized_embeddings, self.valid_dataset)
self.similarity = tf.matmul(self.valid_embeddings, tf.transpose(self.normalized_embeddings))
示例15: _uinv_decomp
def _uinv_decomp(X_sq, cutoff=0.0, decomp_mode="eigh", decomp_device=None):
with tf.device(decomp_device):
if decomp_mode == "svd":
# hermitian, positive matrix, so eigvals = singular values
e, v, _ = tf.svd(X_sq)
elif decomp_mode == "eigh":
e, v = tf.linalg.eigh(X_sq)
e = tf.cast(
e, e.dtype.real_dtype) # The values here should be real anyway
else:
raise ValueError("Invalid decomp_mode: {}".format(decomp_mode))
# NOTE: Negative values are always due to precision problems.
# NOTE: Inaccuracies here mean the final tensor is not exactly isometric!
e_pinvsqrt = tf.where(e <= cutoff, tf.zeros_like(e), 1 / tf.sqrt(e))
e_pinvsqrt_mat = tf.diag(tf.cast(e_pinvsqrt, v.dtype))
X_uinv = tf.matmul(v @ e_pinvsqrt_mat, v, adjoint_b=True)
return X_uinv, e