本文整理匯總了Python中numpy.tril_indices_from方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.tril_indices_from方法的具體用法?Python numpy.tril_indices_from怎麽用?Python numpy.tril_indices_from使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類numpy
的用法示例。
在下文中一共展示了numpy.tril_indices_from方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_design_r
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import tril_indices_from [as 別名]
def test_design_r(self):
design = simple_mv_velocity_design(3)
batch_design = design.for_batch(2, 1)
cov = batch_design.R(0)[0]
self.assertTupleEqual(cov.size(), (3, 3))
self.assertTrue(cov.requires_grad)
cholesky_log_diag = design.measure_covariance.param_dict()['cholesky_log_diag']
cholesky_off_diag = design.measure_covariance.param_dict()['cholesky_off_diag']
cov = cov.data.numpy()
self.assertTrue(np.isclose(cov, cov.T).all(), msg="Covariance is not symmetric.")
chol = cholesky(cov)
for a, b in zip(torch.exp(cholesky_log_diag).tolist(), np.diag(chol).tolist()):
self.assertAlmostEqual(a, b, places=4)
for a, b in zip(cholesky_off_diag.tolist(), chol[np.tril_indices_from(chol, k=-1)].tolist()):
self.assertAlmostEqual(a, b, places=4)
示例2: get_matrix
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import tril_indices_from [as 別名]
def get_matrix(self):
"""Return the current internal matrix.
Returns
-------
M : ndarray, shape (n, n)
Dense matrix containing either the Hessian or its inverse
(depending on how `approx_type` was defined).
"""
if self.approx_type == 'hess':
M = np.copy(self.B)
else:
M = np.copy(self.H)
li = np.tril_indices_from(M, k=-1)
M[li] = M.T[li]
return M
示例3: cho_invert
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import tril_indices_from [as 別名]
def cho_invert(A):
""" Returns the inverse of a positive definite matrix, using a Cholesky decomposition
via calls to LAPACK dpotrf and dpotri in the F2PY module.
:param A: Matrix (symmetric and positive definite, left-hand side).
:type A: numpy array
:return: The inverse matrix
:rtype: numpy array
"""
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix')
I = np.asfortranarray(A)
fcho_invert(I)
# Matrix to store the inverse
i_lower = np.tril_indices_from(A)
# Copy lower triangle to upper
I.T[i_lower] = I[i_lower]
return I
示例4: bkf_invert
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import tril_indices_from [as 別名]
def bkf_invert(A):
""" Returns the inverse of a positive definite matrix, using a Cholesky decomposition
via calls to LAPACK dpotrf and dpotri in the F2PY module.
:param A: Matrix (symmetric and positive definite, left-hand side).
:type A: numpy array
:return: The inverse matrix
:rtype: numpy array
"""
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix')
I = np.asfortranarray(A)
fbkf_invert(I)
# Matrix to store the inverse
i_lower = np.tril_indices_from(A)
# Copy lower triangle to upper
I.T[i_lower] = I[i_lower]
return I
示例5: test_tensor_iterator
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import tril_indices_from [as 別名]
def test_tensor_iterator():
a = np.arange(16).reshape((4, 4))
test_tensor = Tensor(tensor=a)
assert np.allclose(test_tensor.data, a)
assert test_tensor.size == 16
assert isinstance(test_tensor.basis, Bijection)
a_triu = a[np.triu_indices_from(a)]
a_tril = a[np.tril_indices_from(a)]
counter = 0
for val, idx in test_tensor.utri_iterator():
assert val == a[tuple(idx)]
assert val == a_triu[counter]
counter += 1
assert counter == 4 * (4 + 1) / 2
counter = 0
for val, idx in test_tensor.ltri_iterator():
assert val == a[tuple(idx)]
assert val == a_tril[counter]
counter += 1
assert counter == 4 * (4 + 1) / 2
counter = 0
for val, idx in test_tensor.all_iterator():
assert val == a[tuple(idx)]
counter += 1
assert np.allclose(test_tensor.vectorize(), a.reshape((-1, 1), order='C'))
with pytest.raises(TypeError):
list(test_tensor._iterator('blah'))
示例6: read_self
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import tril_indices_from [as 別名]
def read_self(self):
logger.log(5, 'READING: {}'.format(self.filename))
stuff = re.search(
'Atomic numbers\s+I\s+N=\s+(?P<num_atoms>\d+)'
'\n\s+(?P<anums>.*?)'
'Nuclear charges.*?Current cartesian coordinates.*?\n(?P<coords>.*?)'
'Force Field'
'.*?Real atomic weights.*?\n(?P<masses>.*?)'
'Atom fragment info.*?Cartesian Gradient.*?\n(?P<evals>.*?)'
'Cartesian Force Constants.*?\n(?P<hess>.*?)'
'Dipole Moment',
open(self.path, 'r').read(), flags=re.DOTALL)
anums = [int(x) for x in stuff.group('anums').split()]
masses = [float(x) for x in stuff.group('masses').split()]
coords = [float(x) for x in stuff.group('coords').split()]
coords = [coords[i:i+3] for i in range(0, len(coords), 3)]
for anum, mass, coord in zip(anums, masses, coords):
self.atoms.append(
Atom(
atomic_num = anum,
coords = coord,
exact_mass = mass)
)
logger.log(5, ' -- Read {} atoms.'.format(len(self.atoms)))
self.evals = np.array(
[float(x) for x in stuff.group('evals').split()], dtype=float)
logger.log(5, ' -- Read {} eigenvectors.'.format(len(self.evals)))
self.low_tri = np.array(
[float(x) for x in stuff.group('hess').split()], dtype=float)
one_dim = len(anums) * 3
self._hess = np.empty([one_dim, one_dim], dtype=float)
self._hess[np.tril_indices_from(self._hess)] = self.low_tri
self._hess += np.tril(self._hess, -1).T
# Convert to MacroModel units.
self._hess *= co.HESSIAN_CONVERSION
logger.log(5, ' -- Read {} Hessian.'.format(self._hess.shape))
示例7: _1d_to_2d
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import tril_indices_from [as 別名]
def _1d_to_2d(values, n):
from numpy import zeros, tril_indices_from, tril
K = zeros((n, n))
K[tril_indices_from(K)] = values
K = K + tril(K, -1).T
return K
示例8: cho_solve
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import tril_indices_from [as 別名]
def cho_solve(A, y):
""" Solves the equation
:math:`A x = y`
for x using a Cholesky decomposition via calls to LAPACK dpotrf and dpotrs in the F2PY module. Preserves the input matrix A.
:param A: Matrix (symmetric and positive definite, left-hand side).
:type A: numpy array
:param y: Vector (right-hand side of the equation).
:type y: numpy array
:return: The solution vector.
:rtype: numpy array
"""
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix')
if len(y.shape) != 1 or y.shape[0] != A.shape[1]:
raise ValueError('expected matrix and vector of same stride size')
n = A.shape[0]
# Backup diagonal before Cholesky-decomposition
A_diag = A[np.diag_indices_from(A)]
x = np.zeros(n)
fcho_solve(A, y, x)
# Reset diagonal after Cholesky-decomposition
A[np.diag_indices_from(A)] = A_diag
# Copy lower triangle to upper
i_lower = np.tril_indices_from(A)
A.T[i_lower] = A[i_lower]
return x
示例9: bkf_solve
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import tril_indices_from [as 別名]
def bkf_solve(A, y):
""" Solves the equation
:math:`A x = y`
for x using a Cholesky decomposition via calls to LAPACK dpotrf and dpotrs in the F2PY module. Preserves the input matrix A.
:param A: Matrix (symmetric and positive definite, left-hand side).
:type A: numpy array
:param y: Vector (right-hand side of the equation).
:type y: numpy array
:return: The solution vector.
:rtype: numpy array
"""
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix')
if len(y.shape) != 1 or y.shape[0] != A.shape[1]:
raise ValueError('expected matrix and vector of same stride size')
n = A.shape[0]
# Backup diagonal before Cholesky-decomposition
A_diag = A[np.diag_indices_from(A)]
x = np.zeros(n)
fbkf_solve(A, y, x)
# Reset diagonal after Cholesky-decomposition
A[np.diag_indices_from(A)] = A_diag
# Copy lower triangle to upper
i_lower = np.tril_indices_from(A)
A.T[i_lower] = A[i_lower]
return x
示例10: _test_score
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import tril_indices_from [as 別名]
def _test_score(estimator, p_mat, graph):
np.random.seed(8888)
graph = graph.copy()
p_mat = p_mat.copy()
estimator.fit(graph)
estimator.p_mat_ = p_mat # hack just for testing likelihood
if is_symmetric(graph):
inds = np.triu_indices_from(graph, k=1)
else:
xu, yu = np.triu_indices_from(graph, k=1)
xl, yl = np.tril_indices_from(graph, k=-1)
x = np.concatenate((xl, xu))
y = np.concatenate((yl, yu))
inds = (x, y)
p_rav = p_mat[inds]
g_rav = graph[inds]
lik = np.zeros(g_rav.shape)
c = 1 / p_mat.size
for i, (g, p) in enumerate(zip(g_rav, p_rav)):
if p < c:
p = c
if p > 1 - c:
p = 1 - c
if g == 1:
lik[i] = p
else:
lik[i] = 1 - p
# lik = np.reshape(lik_rav, p_mat.shape)
lik[lik < 1e-10] = 1
lik = np.log(lik)
assert_allclose(lik, estimator.score_samples(graph))
assert np.sum(lik) == estimator.score(graph)
示例11: test_tril_indices_from
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import tril_indices_from [as 別名]
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
示例12: slotted_autocorrelation
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import tril_indices_from [as 別名]
def slotted_autocorrelation(
self, data, time, T, K, second_round=False, K1=100
):
slots, i = np.zeros((K, 1)), 1
# make time start from 0
time = time - np.min(time)
# subtract mean from mag values
m = np.mean(data)
data = data - m
prod = np.zeros((K, 1))
pairs = np.subtract.outer(time, time)
pairs[np.tril_indices_from(pairs)] = 10000000
ks = np.int64(np.floor(np.abs(pairs) / T + 0.5))
# We calculate the slotted autocorrelation for k=0 separately
idx = np.where(ks == 0)
prod[0] = (sum(data ** 2) + sum(data[idx[0]] * data[idx[1]])) / (
len(idx[0]) + len(data)
)
slots[0] = 0
# We calculate it for the rest of the ks
if second_round is False:
for k in np.arange(1, K):
idx = np.where(ks == k)
if len(idx[0]) != 0:
prod[k] = sum(data[idx[0]] * data[idx[1]]) / (len(idx[0]))
slots[i] = k
i = i + 1
else:
prod[k] = np.infty
else:
for k in np.arange(K1, K):
idx = np.where(ks == k)
if len(idx[0]) != 0:
prod[k] = sum(data[idx[0]] * data[idx[1]]) / (len(idx[0]))
slots[i - 1] = k
i = i + 1
else:
prod[k] = np.infty
np.trim_zeros(prod, trim="b")
slots = np.trim_zeros(slots, trim="b")
return prod / prod[0], np.int64(slots).flatten()
示例13: score_samples
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import tril_indices_from [as 別名]
def score_samples(self, graph, clip=None):
"""
Compute the weighted log probabilities for each potential edge.
Note that this implicitly assumes the input graph is indexed like the
fit model.
Parameters
----------
graph : np.ndarray
Input graph. Must be same shape as model's ``p_mat_`` attribute
clip : scalar or None, optional (default=None)
Values for which to clip probability matrix, entries less than c or more
than 1 - c are set to c or 1 - c, respectively.
If None, values will not be clipped in the likelihood calculation, which may
result in poorly behaved likelihoods depending on the model.
Returns
-------
sample_scores : np.ndarray (size of ``graph``)
log-likelihood per potential edge in the graph
"""
check_is_fitted(self, "p_mat_")
# P.ravel() <dot> graph * (1 - P.ravel()) <dot> (1 - graph)
graph = import_graph(graph)
if not is_unweighted(graph):
raise ValueError("Model only implemented for unweighted graphs")
p_mat = self.p_mat_.copy()
if np.shape(p_mat) != np.shape(graph):
raise ValueError("Input graph size must be the same size as P matrix")
inds = None
if not self.directed and self.loops:
inds = np.triu_indices_from(graph) # ignore lower half of graph, symmetric
elif not self.directed and not self.loops:
inds = np.triu_indices_from(graph, k=1) # ignore the diagonal
elif self.directed and not self.loops:
xu, yu = np.triu_indices_from(graph, k=1)
xl, yl = np.tril_indices_from(graph, k=-1)
x = np.concatenate((xl, xu))
y = np.concatenate((yl, yu))
inds = (x, y)
if inds is not None:
p_mat = p_mat[inds]
graph = graph[inds]
# clip the probabilities that are degenerate
if clip is not None:
p_mat[p_mat < clip] = clip
p_mat[p_mat > 1 - clip] = 1 - clip
# TODO: use nonzero inds here will be faster
successes = np.multiply(p_mat, graph)
failures = np.multiply((1 - p_mat), (1 - graph))
likelihood = successes + failures
return np.log(likelihood)
示例14: slotted_autocorrelation
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import tril_indices_from [as 別名]
def slotted_autocorrelation(self, data, time, T, K,
second_round=False, K1=100):
slots = np.zeros((K, 1))
i = 1
# make time start from 0
time = time - np.min(time)
# subtract mean from mag values
m = np.mean(data)
data = data - m
prod = np.zeros((K, 1))
pairs = np.subtract.outer(time, time)
pairs[np.tril_indices_from(pairs)] = 10000000
ks = np.int64(np.floor(np.abs(pairs) / T + 0.5))
# We calculate the slotted autocorrelation for k=0 separately
idx = np.where(ks == 0)
prod[0] = ((sum(data ** 2) + sum(data[idx[0]] *
data[idx[1]])) / (len(idx[0]) + len(data)))
slots[0] = 0
# We calculate it for the rest of the ks
if second_round is False:
for k in np.arange(1, K):
idx = np.where(ks == k)
if len(idx[0]) != 0:
prod[k] = sum(data[idx[0]] * data[idx[1]]) / (len(idx[0]))
slots[i] = k
i = i + 1
else:
prod[k] = np.infty
else:
for k in np.arange(K1, K):
idx = np.where(ks == k)
if len(idx[0]) != 0:
prod[k] = sum(data[idx[0]] * data[idx[1]]) / (len(idx[0]))
slots[i - 1] = k
i = i + 1
else:
prod[k] = np.infty
np.trim_zeros(prod, trim='b')
slots = np.trim_zeros(slots, trim='b')
return prod / prod[0], np.int64(slots).flatten()