本文整理汇总了Python中tensorflow.matrix_set_diag方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.matrix_set_diag方法的具体用法?Python tensorflow.matrix_set_diag怎么用?Python tensorflow.matrix_set_diag使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.matrix_set_diag方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fit
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_set_diag [as 别名]
def fit(self, x=None, y=None):
# p(coeffs | x, y) = Normal(coeffs |
# mean = (1/noise_variance) (1/noise_variance x^T x + I)^{-1} x^T y,
# covariance = (1/noise_variance x^T x + I)^{-1})
# TODO(trandustin): We newly fit the data at each call. Extend to do
# Bayesian updating.
kernel_matrix = tf.matmul(x, x, transpose_a=True) / self.noise_variance
coeffs_precision = tf.matrix_set_diag(
kernel_matrix, tf.matrix_diag_part(kernel_matrix) + 1.)
coeffs_precision_tril = tf.linalg.cholesky(coeffs_precision)
self.coeffs_precision_tril_op = tf.linalg.LinearOperatorLowerTriangular(
coeffs_precision_tril)
self.coeffs_mean = self.coeffs_precision_tril_op.solvevec(
self.coeffs_precision_tril_op.solvevec(tf.einsum('nm,n->m', x, y)),
adjoint=True) / self.noise_variance
# TODO(trandustin): To be fully Keras-compatible, return History object.
return
示例2: testSquareBatch
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_set_diag [as 别名]
def testSquareBatch(self):
with self.test_session(use_gpu=self._use_gpu):
v_batch = np.array([[-1.0, -2.0, -3.0],
[-4.0, -5.0, -6.0]])
mat_batch = np.array(
[[[1.0, 0.0, 3.0],
[0.0, 2.0, 0.0],
[1.0, 0.0, 3.0]],
[[4.0, 0.0, 4.0],
[0.0, 5.0, 0.0],
[2.0, 0.0, 6.0]]])
mat_set_diag_batch = np.array(
[[[-1.0, 0.0, 3.0],
[0.0, -2.0, 0.0],
[1.0, 0.0, -3.0]],
[[-4.0, 0.0, 4.0],
[0.0, -5.0, 0.0],
[2.0, 0.0, -6.0]]])
output = tf.matrix_set_diag(mat_batch, v_batch)
self.assertEqual((2, 3, 3), output.get_shape())
self.assertAllEqual(mat_set_diag_batch, output.eval())
示例3: testRectangularBatch
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_set_diag [as 别名]
def testRectangularBatch(self):
with self.test_session(use_gpu=self._use_gpu):
v_batch = np.array([[-1.0, -2.0],
[-4.0, -5.0]])
mat_batch = np.array(
[[[1.0, 0.0, 3.0],
[0.0, 2.0, 0.0]],
[[4.0, 0.0, 4.0],
[0.0, 5.0, 0.0]]])
mat_set_diag_batch = np.array(
[[[-1.0, 0.0, 3.0],
[0.0, -2.0, 0.0]],
[[-4.0, 0.0, 4.0],
[0.0, -5.0, 0.0]]])
output = tf.matrix_set_diag(mat_batch, v_batch)
self.assertEqual((2, 2, 3), output.get_shape())
self.assertAllEqual(mat_set_diag_batch, output.eval())
示例4: _pos_to_proximity
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_set_diag [as 别名]
def _pos_to_proximity(self, pos, reuse=True): #[batch_size, n_max, 3]
with tf.variable_scope('pos_to_proximity', reuse=reuse):
pos_1 = tf.expand_dims(pos, axis = 2)
pos_2 = tf.expand_dims(pos, axis = 1)
pos_sub = tf.subtract(pos_1, pos_2)
proximity = tf.square(pos_sub)
proximity = tf.reduce_sum(proximity, 3)
proximity = tf.sqrt(proximity + 1e-5)
proximity = tf.reshape(proximity, [self.batch_size, self.n_max, self.n_max])
proximity = tf.multiply(proximity, self.mask)
proximity = tf.multiply(proximity, tf.transpose(self.mask, perm = [0, 2, 1]))
proximity = tf.matrix_set_diag(proximity, [[0] * self.n_max] * self.batch_size)
return proximity
示例5: _get_normed_sym_tf
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_set_diag [as 别名]
def _get_normed_sym_tf(X_, batch_size):
"""
Compute the normalized and symmetrized probability matrix from
relative probabilities X_, where X_ is a Tensorflow Tensor
Parameters
----------
X_ : 2-d Tensor (N, N)
asymmetric probabilities. For instance, X_(i, j) = P(i|j)
Returns
-------
P : 2-d Tensor (N, N)
symmetric probabilities, making the assumption that P(i|j) = P(j|i)
Diagonals are all 0s."""
toset = tf.constant(0, shape=[batch_size], dtype=X_.dtype)
X_ = tf.matrix_set_diag(X_, toset)
norm_facs = tf.reduce_sum(X_, axis=0, keep_dims=True)
X_ = X_ / norm_facs
X_ = 0.5*(X_ + tf.transpose(X_))
return X_
示例6: _update_memory
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_set_diag [as 别名]
def _update_memory(self, old_memory, w_samples, new_z_mean, new_z_var):
"""Setting new_z_var=0 for sample based update."""
old_mean, old_cov = old_memory
wR = self.get_w_to_z_mean(w_samples, old_memory.M_mean)
wU, wUw = self._read_cov(w_samples, old_memory)
sigma_z = wUw + new_z_var + self._obs_noise_stddev**2 # [S, B]
delta = new_z_mean - wR # [S, B, C]
c_z = wU / tf.expand_dims(sigma_z, -1) # [S, B, M]
posterior_mean = old_mean + tf.einsum('sbm,sbc->bmc', c_z, delta)
posterior_cov = old_cov - tf.einsum('sbm,sbn->bmn', c_z, wU)
# Clip diagonal elements for numerical stability
posterior_cov = tf.matrix_set_diag(
posterior_cov,
tf.clip_by_value(tf.matrix_diag_part(posterior_cov), EPSILON, 1e10))
new_memory = MemoryState(M_mean=posterior_mean, M_cov=posterior_cov)
return new_memory
示例7: CombineArcAndRootPotentials
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_set_diag [as 别名]
def CombineArcAndRootPotentials(arcs, roots):
"""Combines arc and root potentials into a single set of potentials.
Args:
arcs: [B,N,N] tensor of batched arc potentials.
roots: [B,N] matrix of batched root potentials.
Returns:
[B,N,N] tensor P of combined potentials where
P_{b,s,t} = s == t ? roots[b,t] : arcs[b,s,t]
"""
# All arguments must have statically-known rank.
check.Eq(arcs.get_shape().ndims, 3, 'arcs must be rank 3')
check.Eq(roots.get_shape().ndims, 2, 'roots must be a matrix')
# All arguments must share the same type.
dtype = arcs.dtype.base_dtype
check.Same([dtype, roots.dtype.base_dtype], 'dtype mismatch')
roots_shape = tf.shape(roots)
arcs_shape = tf.shape(arcs)
batch_size = roots_shape[0]
num_tokens = roots_shape[1]
with tf.control_dependencies([
tf.assert_equal(batch_size, arcs_shape[0]),
tf.assert_equal(num_tokens, arcs_shape[1]),
tf.assert_equal(num_tokens, arcs_shape[2])]):
return tf.matrix_set_diag(arcs, roots)
示例8: correlation_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_set_diag [as 别名]
def correlation_loss(self, opts, input_):
"""
Independence test based on Pearson's correlation.
Keep in mind that this captures only linear dependancies.
However, for multivariate Gaussian independence is equivalent
to zero correlation.
"""
batch_size = self.get_batch_size(opts, input_)
dim = int(input_.get_shape()[1])
transposed = tf.transpose(input_, perm=[1, 0])
mean = tf.reshape(tf.reduce_mean(transposed, axis=1), [-1, 1])
centered_transposed = transposed - mean # Broadcasting mean
cov = tf.matmul(centered_transposed, centered_transposed, transpose_b=True)
cov = cov / (batch_size - 1)
#cov = tf.Print(cov, [cov], "cov")
sigmas = tf.sqrt(tf.diag_part(cov) + 1e-5)
#sigmas = tf.Print(sigmas, [sigmas], "sigmas")
sigmas = tf.reshape(sigmas, [1, -1])
sigmas = tf.matmul(sigmas, sigmas, transpose_a=True)
#sigmas = tf.Print(sigmas, [sigmas], "sigmas")
# Pearson's correlation
corr = cov / sigmas
triangle = tf.matrix_set_diag(tf.matrix_band_part(corr, 0, -1), tf.zeros(dim))
#triangle = tf.Print(triangle, [triangle], "triangle")
loss = tf.reduce_sum(tf.square(triangle)) / ((dim * dim - dim) / 2.0)
#loss = tf.Print(loss, [loss], "Correlation loss")
return loss
示例9: _link
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_set_diag [as 别名]
def _link(self, prev_link, prev_precedence_weights, write_weights):
"""Calculates the new link graphs.
For each write head, the link is a directed graph (represented by a matrix
with entries in range [0, 1]) whose vertices are the memory locations, and
an edge indicates temporal ordering of writes.
Args:
prev_link: A tensor of shape `[batch_size, num_writes, memory_size,
memory_size]` representing the previous link graphs for each write
head.
prev_precedence_weights: A tensor of shape `[batch_size, num_writes,
memory_size]` which is the previous "aggregated" write weights for
each write head.
write_weights: A tensor of shape `[batch_size, num_writes, memory_size]`
containing the new locations in memory written to.
Returns:
A tensor of shape `[batch_size, num_writes, memory_size, memory_size]`
containing the new link graphs for each write head.
"""
with tf.name_scope('link'):
batch_size = tf.shape(prev_link)[0]
write_weights_i = tf.expand_dims(write_weights, 3)
write_weights_j = tf.expand_dims(write_weights, 2)
prev_precedence_weights_j = tf.expand_dims(prev_precedence_weights, 2)
prev_link_scale = 1 - write_weights_i - write_weights_j
new_link = write_weights_i * prev_precedence_weights_j
link = prev_link_scale * prev_link + new_link
# Return the link with the diagonal set to zero, to remove self-looping
# edges.
return tf.matrix_set_diag(
link,
tf.zeros(
[batch_size, self._num_writes, self._memory_size],
dtype=link.dtype))
示例10: build_variational
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_set_diag [as 别名]
def build_variational(hps, kernel, z_pos, x, n_particles):
bn = zs.BayesianNet()
z_mean = tf.get_variable(
'z/mean', [hps.n_z], hps.dtype, tf.zeros_initializer())
z_cov_raw = tf.get_variable(
'z/cov_raw', initializer=tf.eye(hps.n_z, dtype=hps.dtype))
z_cov_tril = tf.matrix_set_diag(
tf.matrix_band_part(z_cov_raw, -1, 0),
tf.nn.softplus(tf.matrix_diag_part(z_cov_raw)))
fz = bn.multivariate_normal_cholesky(
'fz', z_mean, z_cov_tril, n_samples=n_particles)
bn.stochastic('fx', gp_conditional(z_pos, fz, x, False, kernel))
return bn
示例11: get_moments
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_set_diag [as 别名]
def get_moments(x):
"""Gets first and second moments of input."""
if isinstance(x, ed.RandomVariable):
mean = x.distribution.mean()
variance = x.distribution.variance()
try:
covariance = x.distribution.covariance()
except NotImplementedError:
covariance = tf.zeros(x.shape.concatenate(x.shape[-1]), dtype=x.dtype)
covariance = tf.matrix_set_diag(covariance, variance)
else:
mean = x
variance = tf.zeros_like(x)
covariance = tf.zeros(x.shape.concatenate(x.shape[-1]), dtype=x.dtype)
return mean, variance, covariance
示例12: quadratic_regression_pd
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_set_diag [as 别名]
def quadratic_regression_pd(SA, costs, diag_cost=False):
assert not diag_cost
global global_step
dsa = SA.shape[-1]
C = tf.get_variable('cost_mat{}'.format(global_step), shape=[dsa, dsa],
dtype=tf.float32,
initializer=tf.random_uniform_initializer(minval=-0.1, maxval=0.1))
L = tf.matrix_band_part(C, -1, 0)
L = tf.matrix_set_diag(L, tf.maximum(tf.matrix_diag_part(L), 0.0))
LL = tf.matmul(L, tf.transpose(L))
c = tf.get_variable('cost_vec{}'.format(global_step), shape=[dsa],
dtype=tf.float32, initializer=tf.zeros_initializer())
b = tf.get_variable('cost_bias{}'.format(global_step), shape=[],
dtype=tf.float32, initializer=tf.zeros_initializer())
s_ = tf.placeholder(tf.float32, [None, dsa])
c_ = tf.placeholder(tf.float32, [None])
pred_cost = 0.5 * tf.einsum('na,ab,nb->n', s_, LL, s_) + \
tf.einsum('na,a->n', s_, c) + b
mse = tf.reduce_mean(tf.square(pred_cost - c_))
opt = tf.train.MomentumOptimizer(1e-3, 0.9).minimize(mse)
N = SA.shape[0]
SA = SA.reshape([-1, dsa])
costs = costs.reshape([-1])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for itr in tqdm.trange(1000, desc='Fitting cost'):
_, m = sess.run([opt, mse], feed_dict={
s_: SA,
c_: costs,
})
if itr == 0 or itr == 999:
print('mse itr {}: {}'.format(itr, m))
cost_mat, cost_vec = sess.run((LL, c))
global_step += 1
return cost_mat, cost_vec
示例13: testSquare
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_set_diag [as 别名]
def testSquare(self):
with self.test_session(use_gpu=self._use_gpu):
v = np.array([1.0, 2.0, 3.0])
mat = np.array([[0.0, 1.0, 0.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 1.0]])
mat_set_diag = np.array([[1.0, 1.0, 0.0],
[1.0, 2.0, 1.0],
[1.0, 1.0, 3.0]])
output = tf.matrix_set_diag(mat, v)
self.assertEqual((3, 3), output.get_shape())
self.assertAllEqual(mat_set_diag, output.eval())
示例14: testRectangular
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_set_diag [as 别名]
def testRectangular(self):
with self.test_session(use_gpu=self._use_gpu):
v = np.array([3.0, 4.0])
mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0]])
expected = np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]])
output = tf.matrix_set_diag(mat, v)
self.assertEqual((2, 3), output.get_shape())
self.assertAllEqual(expected, output.eval())
v = np.array([3.0, 4.0])
mat = np.array([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
expected = np.array([[3.0, 1.0], [1.0, 4.0], [1.0, 1.0]])
output = tf.matrix_set_diag(mat, v)
self.assertEqual((3, 2), output.get_shape())
self.assertAllEqual(expected, output.eval())
示例15: testInvalidShape
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_set_diag [as 别名]
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must be at least rank 2"):
tf.matrix_set_diag(0, [0])
with self.assertRaisesRegexp(ValueError, "must be at least rank 1"):
tf.matrix_set_diag([[0]], 0)