本文整理汇总了Python中tensorflow.trace方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.trace方法的具体用法?Python tensorflow.trace怎么用?Python tensorflow.trace使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.trace方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_value_updater
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trace [as 别名]
def get_value_updater(self, data, new_mean, gamma_weighted, gamma_sum):
tf_new_differences = tf.subtract(data, tf.expand_dims(new_mean, 0))
tf_sq_dist_matrix = tf.matmul(tf.expand_dims(tf_new_differences, 2), tf.expand_dims(tf_new_differences, 1))
tf_new_covariance = tf.reduce_sum(tf_sq_dist_matrix * tf.expand_dims(tf.expand_dims(gamma_weighted, 1), 2), 0)
if self.has_prior:
tf_new_covariance = self.get_prior_adjustment(tf_new_covariance, gamma_sum)
tf_s, tf_u, _ = tf.svd(tf_new_covariance)
tf_required_eigvals = tf_s[:self.rank]
tf_required_eigvecs = tf_u[:, :self.rank]
tf_new_baseline = (tf.trace(tf_new_covariance) - tf.reduce_sum(tf_required_eigvals)) / self.tf_rest
tf_new_eigvals = tf_required_eigvals - tf_new_baseline
tf_new_eigvecs = tf.transpose(tf_required_eigvecs)
return tf.group(
self.tf_baseline.assign(tf_new_baseline),
self.tf_eigvals.assign(tf_new_eigvals),
self.tf_eigvecs.assign(tf_new_eigvecs)
)
示例2: _mmd2
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trace [as 别名]
def _mmd2(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
m = tf.cast(K_XX.get_shape()[0], tf.float32)
n = tf.cast(K_YY.get_shape()[0], tf.float32)
if biased:
mmd2 = (tf.reduce_sum(K_XX) / (m * m)
+ tf.reduce_sum(K_YY) / (n * n)
- 2 * tf.reduce_sum(K_XY) / (m * n))
else:
if const_diagonal is not False:
trace_X = m * const_diagonal
trace_Y = n * const_diagonal
else:
trace_X = tf.trace(K_XX)
trace_Y = tf.trace(K_YY)
mmd2 = ((tf.reduce_sum(K_XX) - trace_X) / (m * (m - 1))
+ (tf.reduce_sum(K_YY) - trace_Y) / (n * (n - 1))
- 2 * tf.reduce_sum(K_XY) / (m * n))
return mmd2
示例3: general_orthog_correction
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trace [as 别名]
def general_orthog_correction(mean, std, k, scale_svs=None):
std = tf.clip_by_value(std, 1e-2, np.inf)
if len(std.get_shape()) > 1:
# largest singular value of the covariance matrix for each row
iso_std = tf.expand_dims(tf.reduce_max(std, axis=1), axis=1)
else:
iso_std = std
r = mean/iso_std
A = .5 * tf.matmul(tf.transpose(r), r)
tr = tf.trace(A)
svs = tf.sqrt(util.differentiable_sq_singular_vals(A))
if scale_svs is not None:
svs *= scale_svs
tr *= scale_svs
lb = lpbessel_svs(svs, k)
return tr - lb
示例4: _mmd2
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trace [as 别名]
def _mmd2(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
m = tf.cast(tf.shape(K_XX)[0], tf.float32)
n = tf.cast(tf.shape(K_YY)[0], tf.float32)
if biased:
mmd2 = (tf.reduce_sum(K_XX, keep_dims=True) / (m * m)
+ tf.reduce_sum(K_YY, keep_dims=True) / (n * n)
- 2 * tf.reduce_sum(K_XY, keep_dims=True) / (m * n))
else:
if const_diagonal is not False:
trace_X = m * const_diagonal
trace_Y = n * const_diagonal
else:
trace_X = tf.trace(K_XX)
trace_Y = tf.trace(K_YY)
mmd2 = ((tf.reduce_sum(K_XX) - trace_X) / (m * (m - 1))
+ (tf.reduce_sum(K_YY) - trace_Y) / (n * (n - 1))
- 2 * tf.reduce_sum(K_XY) / (m * n))
return mmd2
示例5: test_trace
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trace [as 别名]
def test_trace(self):
t = tf.trace(self.random(3, 3))
self.check(t)
示例6: compare
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trace [as 别名]
def compare(self, x):
np_ans = np.trace(x, axis1=-2, axis2=-1)
with self.test_session(use_gpu=True):
tf_ans = tf.trace(x).eval()
self.assertAllClose(tf_ans, np_ans)
示例7: trace
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trace [as 别名]
def trace(self, a):
return tf.trace(a)
示例8: block_trace
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trace [as 别名]
def block_trace(self, X, m, n):
blocks = []
for i in range(n):
blocks.append([])
for j in range(n):
block = self.trace(X[..., i*m:(i+1)*m, j*m:(j+1)*m])
blocks[-1].append(block)
return self.pack([
self.pack([
b for b in block
])
for block in blocks
])
示例9: pose_smoothness
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trace [as 别名]
def pose_smoothness(poses, global_only=False):
"""
# Poses is F x 24 x 3 x 3
Computes \sum ||p_i - p_{i+1}||
On the pose in Rotation matrices space.
It compues the angle between the two rotations:
(tr(R) - 1) / 2 = cos(theta)
So penalize acos((tr(R) - 1) / 2) --> this nans
So:
minimize: (1 - tr(R_1*R_2')) / 2 = -cos(theta) of R_1*R_2'
min at -1.
"""
# These are F-1 x 24 x 3 x 3 (Ok this is exactly the same..)
curr_pose = poses[:-1]
next_pose = poses[1:]
RRt = tf.matmul(curr_pose, next_pose, transpose_b=True)
# For min (1-tr(RR_T)) / 2
costheta = (tf.trace(RRt) - 1) / 2.
target = tf.ones_like(costheta)
if global_only:
print('Pose smoothness increased on global!')
weights_global = 10 * tf.expand_dims(tf.ones_like(costheta[:, 0]), 1)
weights_joints = tf.ones_like(costheta[:, 1:])
weights = tf.concat([weights_global, weights_joints], 1)
else:
weights = tf.ones_like(costheta)
return tf.losses.mean_squared_error(target, costheta, weights=weights)
示例10: init_pose
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trace [as 别名]
def init_pose(pred_Rs, init_pose, weights=None):
"""
Should stay close to initial weights
pred_Rs is N x 24 x 3 x 3
init_pose is 72D, need to conver to Rodrigues
"""
init_Rs = batch_rodrigues(tf.reshape(init_pose, [-1, 3]))
init_Rs = tf.reshape(init_Rs, [-1, 24, 3, 3])
RRt = tf.matmul(init_Rs, pred_Rs, transpose_b=True)
costheta = (tf.trace(RRt) - 1) / 2.
target = tf.ones_like(costheta)
if weights is None:
weights = tf.ones_like(costheta)
return tf.losses.mean_squared_error(target, costheta, weights=weights)
示例11: _logp
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trace [as 别名]
def _logp(self, result, mean, std, **kwargs):
n, k = self.shape
base_logp = tf.reduce_sum(util.dists.gaussian_log_density(result, mean=mean, stddev=std))
cxu = tf.matmul(tf.transpose(result/std), mean/std)
svs = tf.sqrt(util.differentiable_sq_singular_vals(cxu))
lb = lpbessel_svs(svs, k)
lp = base_logp + lb - tf.trace(cxu)
return lp
示例12: HSIC
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trace [as 别名]
def HSIC(self, c_v, c_w):
N = tf.shape(c_v)[0]
H = tf.ones((N, N)) * tf.cast((1/N), tf.float32) * (-1) + tf.eye(N)
K_1 = tf.matmul(c_v, tf.transpose(c_v))
K_2 = tf.matmul(c_w, tf.transpose(c_w))
rst = tf.matmul(K_1, H)
rst = tf.matmul(rst, K_2)
rst = tf.matmul(rst, H)
rst = tf.trace(rst)
return rst
示例13: compute_eigenvals
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trace [as 别名]
def compute_eigenvals(A):
A_11 = A[:, :, 0, 0] # (N, P)
A_12 = A[:, :, 0, 1]
A_13 = A[:, :, 0, 2]
A_22 = A[:, :, 1, 1]
A_23 = A[:, :, 1, 2]
A_33 = A[:, :, 2, 2]
I = tf.eye(3)
p1 = tf.square(A_12) + tf.square(A_13) + tf.square(A_23) # (N, P)
q = tf.trace(A) / 3 # (N, P)
p2 = tf.square(A_11 - q) + tf.square(A_22 - q) + tf.square(A_33 - q) + 2 * p1 # (N, P)
p = tf.sqrt(p2 / 6) + 1e-8 # (N, P)
N = tf.shape(A)[0]
q_4d = tf.reshape(q, (N, -1, 1, 1)) # (N, P, 1, 1)
p_4d = tf.reshape(p, (N, -1, 1, 1))
B = (1 / p_4d) * (A - q_4d * I) # (N, P, 3, 3)
r = tf.clip_by_value(compute_determinant(B) / 2, -1, 1) # (N, P)
phi = tf.acos(r) / 3 # (N, P)
eig1 = q + 2 * p * tf.cos(phi) # (N, P)
eig3 = q + 2 * p * tf.cos(phi + (2 * math.pi / 3))
eig2 = 3 * q - eig1 - eig3
return tf.abs(tf.stack([eig1, eig2, eig3], axis=2)) # (N, P, 3)
# P shape is (N, P, 3), N shape is (N, P, K, 3)
# return shape is (N, P)
示例14: _compute_pi_tracenorm
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trace [as 别名]
def _compute_pi_tracenorm(left_cov, right_cov):
left_norm = tf.trace(left_cov) * right_cov.shape.as_list()[0]
right_norm = tf.trace(right_cov) * left_cov.shape.as_list()[0]
return tf.sqrt(left_norm / right_norm)
示例15: _objective
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trace [as 别名]
def _objective(xx, obj):
"""Objective function as custom op so that we can overload gradients."""
with tf.name_scope('objective'):
chol = tf.cholesky(xx)
choli = tf.linalg.inv(chol)
rq = tf.matmul(choli, tf.matmul(obj, choli, transpose_b=True))
eigval = tf.matrix_diag_part(rq)
loss = tf.trace(rq)
grad = functools.partial(_objective_grad, xx, obj)
return (loss, eigval, chol), grad