本文整理汇总了Python中tensorflow.matrix_band_part函数的典型用法代码示例。如果您正苦于以下问题:Python matrix_band_part函数的具体用法?Python matrix_band_part怎么用?Python matrix_band_part使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了matrix_band_part函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _random_cholesky_array
def _random_cholesky_array(self, shape):
mat = self._rng.rand(*shape)
chol = distribution_util.matrix_diag_transform(
mat, transform=tf.nn.softplus)
# Zero the upper triangle because we're using this as a true Cholesky factor
# in our tests.
return tf.matrix_band_part(chol, -1, 0).eval()
示例2: gauss_kl
def gauss_kl(q_mu, q_sqrt, K):
"""
Compute the KL divergence from
q(x) = N(q_mu, q_sqrt^2)
to
p(x) = N(0, K)
We assume multiple independent distributions, given by the columns of
q_mu and the last dimension of q_sqrt.
q_mu is a matrix, each column contains a mean.
q_sqrt is a 3D tensor, each matrix within is a lower triangular square-root
matrix of the covariance of q.
K is a positive definite matrix: the covariance of p.
"""
L = tf.cholesky(K)
alpha = tf.matrix_triangular_solve(L, q_mu, lower=True)
KL = 0.5 * tf.reduce_sum(tf.square(alpha)) # Mahalanobis term.
num_latent = tf.cast(tf.shape(q_sqrt)[2], float_type)
KL += num_latent * 0.5 * tf.reduce_sum(tf.log(tf.square(tf.diag_part(L)))) # Prior log-det term.
KL += -0.5 * tf.cast(tf.reduce_prod(tf.shape(q_sqrt)[1:]), float_type) # constant term
Lq = tf.matrix_band_part(tf.transpose(q_sqrt, (2, 0, 1)), -1, 0) # force lower triangle
KL += -0.5*tf.reduce_sum(tf.log(tf.square(tf.matrix_diag_part(Lq)))) # logdet
L_tiled = tf.tile(tf.expand_dims(L, 0), tf.pack([tf.shape(Lq)[0], 1, 1]))
LiLq = tf.matrix_triangular_solve(L_tiled, Lq, lower=True)
KL += 0.5 * tf.reduce_sum(tf.square(LiLq)) # Trace term
return KL
示例3: random_tril_matrix
def random_tril_matrix(
shape, dtype, force_well_conditioned=False, remove_upper=True):
"""[batch] lower triangular matrix.
Args:
shape: `TensorShape` or Python `list`. Shape of the returned matrix.
dtype: `TensorFlow` `dtype` or Python dtype
force_well_conditioned: Python `bool`. If `True`, returned matrix will have
eigenvalues with modulus in `(1, 2)`. Otherwise, eigenvalues are unit
normal random variables.
remove_upper: Python `bool`.
If `True`, zero out the strictly upper triangle.
If `False`, the lower triangle of returned matrix will have desired
properties, but will not not have the strictly upper triangle zero'd out.
Returns:
`Tensor` with desired shape and dtype.
"""
with tf.name_scope("random_tril_matrix"):
# Totally random matrix. Has no nice properties.
tril = random_normal(shape, dtype=dtype)
if remove_upper:
tril = tf.matrix_band_part(tril, -1, 0)
# Create a diagonal with entries having modulus in [1, 2].
if force_well_conditioned:
maxval = tf.convert_to_tensor(np.sqrt(2.), dtype=dtype.real_dtype)
diag = random_sign_uniform(
shape[:-1], dtype=dtype, minval=1., maxval=maxval)
tril = tf.matrix_set_diag(tril, diag)
return tril
示例4: _operator_and_mat_and_feed_dict
def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
shape = list(shape)
diag_shape = shape[:-1]
# Upper triangle will be ignored.
# Use a diagonal that ensures this matrix is well conditioned.
tril = tf.random_normal(shape=shape, dtype=dtype.real_dtype)
diag = tf.random_uniform(
shape=diag_shape, dtype=dtype.real_dtype, minval=2., maxval=3.)
if dtype.is_complex:
tril = tf.complex(
tril, tf.random_normal(shape, dtype=dtype.real_dtype))
diag = tf.complex(
diag, tf.random_uniform(
shape=diag_shape, dtype=dtype.real_dtype, minval=2., maxval=3.))
tril = tf.matrix_set_diag(tril, diag)
tril_ph = tf.placeholder(dtype=dtype)
if use_placeholder:
# Evaluate the tril here because (i) you cannot feed a tensor, and (ii)
# tril is random and we want the same value used for both mat and
# feed_dict.
tril = tril.eval()
operator = linalg.LinearOperatorTriL(tril_ph)
feed_dict = {tril_ph: tril}
else:
operator = linalg.LinearOperatorTriL(tril)
feed_dict = None
mat = tf.matrix_band_part(tril, -1, 0)
return operator, mat, feed_dict
示例5: call
def call(self, x, mask=None):
x1 ,x2 = x
outer = tf.matmul(tf.expand_dims(x1, axis=2), tf.expand_dims(x2, axis=1))
outer = tf.matrix_band_part(outer, 0, self.ans_limit)
output1 = tf.reshape(tf.cast(tf.argmax(tf.reduce_max(outer, axis=2), axis=1), tf.float32),(-1,1))
output2 = tf.reshape(tf.cast(tf.argmax(tf.reduce_max(outer, axis=1), axis=1), tf.float32),(-1,1))
return [output1, output2]
示例6: _forward
def _forward(self, x):
if self.validate_args:
is_matrix = tf.assert_rank_at_least(x, 2)
shape = tf.shape(x)
is_square = tf.assert_equal(shape[-2], shape[-1])
x = control_flow_ops.with_dependencies([is_matrix, is_square], x)
# For safety, explicitly zero-out the upper triangular part.
x = tf.matrix_band_part(x, -1, 0)
return tf.matmul(x, x, adjoint_b=True)
示例7: CheckUnitary
def CheckUnitary(self, x):
# Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
xx = tf.matmul(x, x, adjoint_a=True)
identity = tf.matrix_band_part(tf.ones_like(xx), 0, 0)
if is_single:
tol = 1e-5
else:
tol = 1e-14
self.assertAllClose(identity.eval(), xx.eval(), atol=tol)
示例8: _sample_n
def _sample_n(self, n, seed):
batch_shape = self.batch_shape_tensor()
event_shape = self.event_shape_tensor()
batch_ndims = tf.shape(batch_shape)[0]
ndims = batch_ndims + 3 # sample_ndims=1, event_ndims=2
shape = tf.concat([[n], batch_shape, event_shape], 0)
stream = seed_stream.SeedStream(seed, salt="Wishart")
# Complexity: O(nbk**2)
x = tf.random_normal(
shape=shape, mean=0., stddev=1., dtype=self.dtype, seed=stream())
# Complexity: O(nbk)
# This parametrization is equivalent to Chi2, i.e.,
# ChiSquared(k) == Gamma(alpha=k/2, beta=1/2)
expanded_df = self.df * tf.ones(
self.scale_operator.batch_shape_tensor(),
dtype=self.df.dtype.base_dtype)
g = tf.random_gamma(
shape=[n],
alpha=self._multi_gamma_sequence(0.5 * expanded_df, self.dimension),
beta=0.5,
dtype=self.dtype,
seed=stream())
# Complexity: O(nbk**2)
x = tf.matrix_band_part(x, -1, 0) # Tri-lower.
# Complexity: O(nbk)
x = tf.matrix_set_diag(x, tf.sqrt(g))
# Make batch-op ready.
# Complexity: O(nbk**2)
perm = tf.concat([tf.range(1, ndims), [0]], 0)
x = tf.transpose(x, perm)
shape = tf.concat([batch_shape, [event_shape[0]], [-1]], 0)
x = tf.reshape(x, shape)
# Complexity: O(nbM) where M is the complexity of the operator solving a
# vector system. For LinearOperatorLowerTriangular, each matmul is O(k^3) so
# this step has complexity O(nbk^3).
x = self.scale_operator.matmul(x)
# Undo make batch-op ready.
# Complexity: O(nbk**2)
shape = tf.concat([batch_shape, event_shape, [n]], 0)
x = tf.reshape(x, shape)
perm = tf.concat([[ndims - 1], tf.range(0, ndims - 1)], 0)
x = tf.transpose(x, perm)
if not self.input_output_cholesky:
# Complexity: O(nbk**3)
x = tf.matmul(x, x, adjoint_b=True)
return x
示例9: get_right_context_mask
def get_right_context_mask(time_steps):
""" Generates the mask preventing the decoder from attending to unseen positions. """
# Generate mask that limits decoder self-attention up to and including the current position
attn_mask = tf.matrix_band_part(tf.ones([time_steps, time_steps]), -1, 0)
# Expand mask to 4d. so as to be compatible with attention weights
attn_mask = tf.expand_dims(tf.expand_dims(attn_mask, 0), 0)
# Illegal connections will be set to -inf when fed into the softmax function
# Padding for non-masked positions is applied to prevent NaNs
attn_mask = -1e9 * (1.0 - attn_mask)
return attn_mask
示例10: Test
def Test(self):
shape = batch_shape_ + shape_
x = tf.constant(np.random.rand(*shape), dtype=dtype_)
with self.test_session(use_gpu=True):
for lower in -1, 0, 1, shape_[-2] - 1:
for upper in -1, 0, 1, shape_[-1] - 1:
y = tf.matrix_band_part(x, lower, upper)
error = tf.test.compute_gradient_error(x, x.get_shape().as_list(), y,
y.get_shape().as_list())
self.assertLess(error, 1e-4)
示例11: mask_leq
def mask_leq(target_length, source_length):
"""A mask with 1.0 wherever source_pos <= target_pos and 0.0 elsewhere.
Args:
target_length: an integer
source_length: an integer
Returns:
a Tensor with shape [1, target_length, source_length]
"""
return tf.expand_dims(
tf.matrix_band_part(tf.ones([target_length, source_length]), -1, 0), 0)
示例12: attention_bias_lower_triangle
def attention_bias_lower_triangle(length):
""" Create a bias tensor to be added to attention logits.
Allows a query to attend to all positions up to and including its own.
Args:
length: A scalar.
Returns: A float Tensor of shape [1, 1, length, length], with -1e9 in
padding positions and 0 in non-padding positions.
"""
lower_triangle = tf.matrix_band_part(tf.ones([length, length]), -1, 0)
ret = FLOAT_MIN * (1. - lower_triangle)
return tf.reshape(ret, [1, 1, length, length])
示例13: get_decoder_self_attention_bias
def get_decoder_self_attention_bias(length):
"""Calculate bias for decoder that maintains model's autoregressive property.
Creates a tensor that masks out locations that correspond to illegal
connections, so prediction at position i cannot draw information from future
positions.
Args:
length: int length of sequences in batch.
Returns:
float tensor of shape [1, 1, length, length]
"""
with tf.name_scope("decoder_self_attention_bias"):
valid_locs = tf.matrix_band_part(tf.ones([length, length]), -1, 0)
valid_locs = tf.reshape(valid_locs, [1, 1, length, length])
decoder_bias = _NEG_INF * (1.0 - valid_locs)
return decoder_bias
示例14: _assertions
def _assertions(self, x):
if not self.validate_args:
return []
x_shape = tf.shape(x)
is_matrix = tf.assert_rank_at_least(
x, 2,
message="Input must have rank at least 2.")
is_square = tf.assert_equal(
x_shape[-2], x_shape[-1],
message="Input must be a square matrix.")
diag_part_x = tf.matrix_diag_part(x)
is_lower_triangular = tf.assert_equal(
tf.matrix_band_part(x, 0, -1), # Preserves triu, zeros rest.
tf.matrix_diag(diag_part_x),
message="Input must be lower triangular.")
is_positive_diag = tf.assert_positive(
diag_part_x,
message="Input must have all positive diagonal entries.")
return [is_matrix, is_square, is_lower_triangular, is_positive_diag]
示例15: testNonDefaultsYieldCorrectShapesAndValues
def testNonDefaultsYieldCorrectShapesAndValues(self):
batch_shape = [4, 3]
x_size = 3
mvn_size = 5
x_ = np.random.randn(*np.concatenate([batch_shape, [x_size]]))
x = tf.constant(x_)
mvn = tfp.trainable_distributions.multivariate_normal_tril(
x,
dims=mvn_size,
loc_fn=tf.zeros_like,
scale_fn=lambda x: tfd.fill_triangular(tf.ones_like(x)))
scale = mvn.scale.to_dense()
expected_scale = tf.matrix_band_part(
tf.ones(np.concatenate([batch_shape, [mvn_size, mvn_size]]),
scale.dtype),
num_lower=-1,
num_upper=0)
self.evaluate(tf.global_variables_initializer())
[
batch_shape_,
event_shape_,
loc_,
scale_,
expected_scale_,
] = self.evaluate([
mvn.batch_shape_tensor(),
mvn.event_shape_tensor(),
mvn.loc,
scale,
expected_scale,
])
self.assertAllEqual(batch_shape, mvn.batch_shape)
self.assertAllEqual(batch_shape, batch_shape_)
self.assertAllEqual([mvn_size], mvn.event_shape)
self.assertAllEqual([mvn_size], event_shape_)
self.assertAllEqual(np.zeros_like(loc_), loc_)
self.assertAllEqual(expected_scale_, scale_)