本文整理汇总了Python中numpy.tril_indices函数的典型用法代码示例。如果您正苦于以下问题:Python tril_indices函数的具体用法?Python tril_indices怎么用?Python tril_indices使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了tril_indices函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: actor_critic_update
def actor_critic_update(X0, A, R, X1, gamma=0.99, learnrate=0.0001):
global WP1, WP2, WV_A, WV_b, WV
global grad_WP1, grad_WP2, grad_N
global grad_WP1s, grad_WP2s
global noise_cov
S0 = XtoS(X0)
S1 = XtoS(X1)
SS0 = np.outer(S0, S0)[np.tril_indices(7)]
SS1 = np.outer(S1, S1)[np.tril_indices(7)]
deltaV = SS0 - gamma * SS1
WV_A += np.outer(deltaV, deltaV)
WV_b += R * deltaV
WV = np.linalg.solve(WV_A, WV_b)
advantage = R + gamma * np.dot(SS1, WV) - np.dot(SS0, WV)
g1 = -0.5*np.outer(A, A) * advantage
grad_WP1 += g1
grad_WP1s += g1*g1
g2 = np.outer(A, SS0) * advantage
grad_WP2 += g2
grad_WP2s += g2*g2
grad_N += 1
# if X0[4] > 4:
# print 'V(s0)', np.dot(SS0, WV), 'V(s1)', np.dot(SS1, WV), 'R', R
# print 'A', A, 'adv', advantage # , 'g_wp1\n', grad_WP1, 'g_wp2\n', grad_WP2
noise_cov = np.linalg.inv(np.linalg.cholesky(
WP1 + 1e-2 + np.eye(2))).T
示例2: __init__
def __init__(self, value):
self.value = value
self.value_ = np.linalg.cholesky(value)[np.tril_indices(value.shape[0])]
self.shape = value.shape
self.size = value.size
self.free = np.resize(True, self.value_.shape)
self.to_external = lambda val: np.linalg.cholesky(val)[np.tril_indices(self.shape[0])]
示例3: find_stationary_var
def find_stationary_var(amat=None, bmat=None, cmat=None):
"""Find fixed point of H = CC' + AHA' + BHB' given A, B, C.
Parameters
----------
amat, bmat, cmat : (nstocks, nstocks) arrays
Parameter matrices
Returns
-------
(nstocks, nstocks) array
Unconditional variance matrix
"""
nstocks = amat.shape[0]
kwargs = {'amat': amat, 'bmat': bmat, 'ccmat': cmat.dot(cmat.T)}
fun = partial(ParamGeneric.fixed_point, **kwargs)
try:
with np.errstate(divide='ignore', invalid='ignore'):
hvar = np.eye(nstocks)
sol = sco.fixed_point(fun, hvar[np.tril_indices(nstocks)])
hvar[np.tril_indices(nstocks)] = sol
hvar[np.triu_indices(nstocks, 1)] \
= hvar.T[np.triu_indices(nstocks, 1)]
return hvar
except RuntimeError:
# warnings.warn('Could not find stationary varaince!')
return None
示例4: test_frozen
def test_frozen(self):
# Test that the frozen and non-frozen inverse Wishart gives the same
# answers
# Construct an arbitrary positive definite scale matrix
dim = 4
scale = np.diag(np.arange(dim)+1)
scale[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
scale = np.dot(scale.T, scale)
# Construct a collection of positive definite matrices to test the PDF
X = []
for i in range(5):
x = np.diag(np.arange(dim)+(i+1)**2)
x[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
x = np.dot(x.T, x)
X.append(x)
X = np.array(X).T
# Construct a 1D and 2D set of parameters
parameters = [
(10, 1, np.linspace(0.1, 10, 5)), # 1D case
(10, scale, X)
]
for (df, scale, x) in parameters:
iw = invwishart(df, scale)
assert_equal(iw.var(), invwishart.var(df, scale))
assert_equal(iw.mean(), invwishart.mean(df, scale))
assert_equal(iw.mode(), invwishart.mode(df, scale))
assert_allclose(iw.pdf(x), invwishart.pdf(x, df, scale))
示例5: amplitudes_to_cisdvec
def amplitudes_to_cisdvec(c0, c1, c2):
nocc, nvir = c1.shape
ooidx = numpy.tril_indices(nocc, -1)
vvidx = numpy.tril_indices(nvir, -1)
c2tril = lib.take_2d(c2.reshape(nocc**2,nvir**2),
ooidx[0]*nocc+ooidx[1], vvidx[0]*nvir+vvidx[1])
return numpy.hstack((c0, c1.ravel(), c2tril.ravel()))
示例6: test_tril_indices
def test_tril_indices():
# indices without and with offset
il1 = tril_indices(4)
il2 = tril_indices(4, 2)
a = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
# indexing:
yield (assert_array_equal, a[il1],
array([ 1, 5, 6, 9, 10, 11, 13, 14, 15, 16]) )
# And for assigning values:
a[il1] = -1
yield (assert_array_equal, a,
array([[-1, 2, 3, 4],
[-1, -1, 7, 8],
[-1, -1, -1, 12],
[-1, -1, -1, -1]]) )
# These cover almost the whole array (two diagonals right of the main one):
a[il2] = -10
yield (assert_array_equal, a,
array([[-10, -10, -10, 4],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]]) )
示例7: full_to_unique
def full_to_unique(y_full, feedmap, feedmask=None):
if feedmask is None:
feedmask = np.ones(feedmap.shape, dtype=np.bool)
y_full[np.tril_indices(feedmap.shape[0])] = y_full[np.tril_indices(feedmap.shape[0])].conj()
y_unique = y_full[np.where(feedmask)][np.unique(feedmap[np.where(feedmask)], return_index=True)[1]]
return y_unique
示例8: unique_to_full
def unique_to_full(y_unique, feedmap, feedmask=None):
y_full = y_unique[feedmap]
y_full[np.tril_indices(feedmap.shape[0])] = y_full[np.tril_indices(feedmap.shape[0])].conj()
if feedmask is not None:
y_full[np.where(np.logical_not(feedmask))] = 0.0
return y_full
示例9: test_equivalence
def test_equivalence(self):
"""
The Equivalence covariance structure can represent an
exchangeable covariance structure. Here we check that the
results are identical using the two approaches.
"""
np.random.seed(3424)
endog = np.random.normal(size=20)
exog = np.random.normal(size=(20, 2))
exog[:, 0] = 1
groups = np.kron(np.arange(5), np.ones(4))
groups[12:] = 3 # Create unequal size groups
# Set up an Equivalence covariance structure to mimic an
# Exchangeable covariance structure.
pairs = {}
start = [0, 4, 8, 12]
for k in range(4):
pairs[k] = {}
# Diagonal values (variance parameters)
if k < 3:
pairs[k][0] = (start[k] + np.r_[0, 1, 2, 3],
start[k] + np.r_[0, 1, 2, 3])
else:
pairs[k][0] = (start[k] + np.r_[0, 1, 2, 3, 4, 5, 6, 7],
start[k] + np.r_[0, 1, 2, 3, 4, 5, 6, 7])
# Off-diagonal pairs (covariance parameters)
if k < 3:
a, b = np.tril_indices(4, -1)
pairs[k][1] = (start[k] + a, start[k] + b)
else:
a, b = np.tril_indices(8, -1)
pairs[k][1] = (start[k] + a, start[k] + b)
ex = sm.cov_struct.Exchangeable()
model1 = sm.GEE(endog, exog, groups, cov_struct=ex)
result1 = model1.fit()
for return_cov in False, True:
ec = sm.cov_struct.Equivalence(pairs, return_cov=return_cov)
model2 = sm.GEE(endog, exog, groups, cov_struct=ec)
result2 = model2.fit()
# Use large atol/rtol for the correlation case since there
# are some small differences in the results due to degree
# of freedom differences.
if return_cov == True:
atol, rtol = 1e-6, 1e-6
else:
atol, rtol = 1e-3, 1e-3
assert_allclose(result1.params, result2.params, atol=atol, rtol=rtol)
assert_allclose(result1.bse, result2.bse, atol=atol, rtol=rtol)
assert_allclose(result1.scale, result2.scale, atol=atol, rtol=rtol)
示例10: tril_index_matrix
def tril_index_matrix(self):
n = self.global_size
num_tril_entries = self.num_tril_entries
tril_index_matrix = np.zeros([n, n], dtype=int)
tril_index_matrix[np.tril_indices(n)] = np.arange(num_tril_entries)
tril_index_matrix[
np.tril_indices(n)[::-1]
] = np.arange(num_tril_entries)
return tril_index_matrix
示例11: scrape_args
def scrape_args(self, records, scale=1, guide_tree=None, niters=10, keep_topology=False):
# local lists
distances = []
variances = []
headers = []
for rec in records:
distances.append(rec.parameters.partitions.distances)
variances.append(rec.parameters.partitions.variances)
headers.append(rec.get_names())
num_matrices = len(records)
label_set = reduce(lambda x, y: x.union(y), (set(l) for l in headers))
labels_len = len(label_set)
# labels string can be built straight away
labels_string = '{0}\n{1}\n'.format(labels_len, ' '.join(label_set))
# distvar and genome_map need to be built up
distvar_list = [str(num_matrices)]
genome_map_list = ['{0} {1}'.format(num_matrices, labels_len)]
# build up lists to turn into strings
for i in range(num_matrices):
labels = headers[i]
dim = len(labels)
dmatrix = np.array(distances[i])
vmatrix = np.array(variances[i])
matrix = np.zeros(dmatrix.shape)
matrix[np.triu_indices(len(dmatrix), 1)] = dmatrix[np.triu_indices(len(dmatrix), 1)]
matrix[np.tril_indices(len(vmatrix), -1)] = vmatrix[np.tril_indices(len(vmatrix), -1)]
if scale:
matrix[np.triu_indices(dim, 1)] *= scale
matrix[np.tril_indices(dim, -1)] *= scale * scale
if isinstance(matrix, np.ndarray):
matrix_string = '\n'.join([' '.join(str(x) for x in row)
for row in matrix]) + '\n'
else:
matrix_string = matrix
distvar_list.append('{0} {0} {1}\n{2}'.format(dim, i + 1,
matrix_string))
genome_map_entry = ' '.join((str(labels.index(lab) + 1)
if lab in labels else '-1')
for lab in label_set)
genome_map_list.append(genome_map_entry)
distvar_string = '\n'.join(distvar_list)
genome_map_string = '\n'.join(genome_map_list)
if guide_tree is None:
guide_tree = Tree.new_iterative_rtree(labels_len, names=label_set, rooted=True)
tree_string = guide_tree.scale(scale).newick.replace('\'', '')
return distvar_string, genome_map_string, labels_string, tree_string, niters, keep_topology
示例12: test_cl_ldl
def test_cl_ldl(AA):
""" Test the CL implentation of LDL algorithm.
This tests a series (cl_size) of matrices against the Python implementation.
"""
# Convert to single float
AA = AA.astype(DTYPE)
# First calculate the Python based values for each matrix in AA
py_ldl_D = np.empty((AA.shape[0], AA.shape[2]), dtype=AA.dtype)
py_ldl_L = np.empty(AA.shape, dtype=AA.dtype)
for i in range(AA.shape[2]):
py_ldl_D[..., i], py_ldl_L[..., i] = ldl(AA[..., i])
# Setup CL context
import pyopencl as cl
from pycllp.ldl import cl_krnl_ldl
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
# Result arrays
m, n, cl_size = AA.shape
L = np.empty(cl_size*m*(m+1)/2, dtype=DTYPE)
D = np.empty(cl_size*m, dtype=DTYPE)
mf = cl.mem_flags
A_g = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=AA)
# Create and compile kernel
prg = cl_krnl_ldl(ctx)
L_g = cl.Buffer(ctx, mf.READ_WRITE, L.nbytes)
D_g = cl.Buffer(ctx, mf.READ_WRITE, D.nbytes)
# Test normal LDL (unmodified)
prg.ldl(queue, (cl_size,), None, np.int32(m), np.int32(n), A_g, L_g, D_g)
cl.enqueue_copy(queue, L, L_g)
cl.enqueue_copy(queue, D, D_g)
# Compare each matrix decomposition with the python equivalent.
for i in range(cl_size):
np.testing.assert_allclose(py_ldl_D[..., i], D[i::cl_size], rtol=1e-6, atol=1e-7)
np.testing.assert_allclose(py_ldl_L[..., i][np.tril_indices(m)], L[i::cl_size], rtol=1e-6, atol=1e-7)
# Now test the modified algorithm ...
beta = np.sqrt(np.amax(AA))
prg.modified_ldl(queue, (cl_size,), None, np.int32(m), np.int32(n), A_g, L_g, D_g,
DTYPE(beta), DTYPE(1e-6))
cl.enqueue_copy(queue, L, L_g)
cl.enqueue_copy(queue, D, D_g)
# Compare each matrix decomposition with the python equivalent.
for i in range(cl_size):
np.testing.assert_allclose(py_ldl_D[..., i], D[i::cl_size], rtol=1e-6, atol=1e-7)
np.testing.assert_allclose(py_ldl_L[..., i][np.tril_indices(m)], L[i::cl_size], rtol=1e-6, atol=1e-7)
示例13: get_grad_tril
def get_grad_tril(mo_coeff_kpts, mo_occ_kpts, fock):
if is_khf:
grad_kpts = []
for k, mo in enumerate(mo_coeff_kpts):
f_mo = reduce(numpy.dot, (mo.T.conj(), fock[k], mo))
nmo = f_mo.shape[0]
grad_kpts.append(f_mo[numpy.tril_indices(nmo, -1)])
return numpy.hstack(grad_kpts)
else:
f_mo = reduce(numpy.dot, (mo_coeff_kpts.T.conj(), fock, mo_coeff_kpts))
nmo = f_mo.shape[0]
return f_mo[numpy.tril_indices(nmo, -1)]
示例14: impute_missing_bins
def impute_missing_bins(hic_matrix, regions=None, per_chromosome=True, stat=np.ma.mean):
"""
Impute missing contacts in a Hi-C matrix.
For inter-chromosomal data uses the mean of all inter-chromosomal contacts,
for intra-chromosomal data uses the mean of intra-chromosomal counts at the corresponding diagonal.
:param hic_matrix: A square numpy array
:param regions: A list of :class:`~GenomicRegion`s - if omitted, will create a dummy list
:param per_chromosome: Do imputation on a per-chromosome basis (recommended)
:param stat: The aggregation statistic to be used for imputation, defaults to the mean.
"""
if regions is None:
for i in range(hic_matrix.shape[0]):
regions.append(GenomicRegion(chromosome='', start=i, end=i))
chr_bins = dict()
for i, region in enumerate(regions):
if region.chromosome not in chr_bins:
chr_bins[region.chromosome] = [i, i]
else:
chr_bins[region.chromosome][1] = i
n = len(regions)
if not hasattr(hic_matrix, "mask"):
hic_matrix = masked_matrix(hic_matrix)
imputed = hic_matrix.copy()
if per_chromosome:
for c_start, c_end in chr_bins.itervalues():
# Correcting intrachromoc_startmal contacts by mean contact count at each diagonal
for i in range(c_end - c_start):
ind = kth_diag_indices(c_end - c_start, -i)
diag = imputed[c_start:c_end, c_start:c_end][ind]
diag[diag.mask] = stat(diag)
imputed[c_start:c_end, c_start:c_end][ind] = diag
# Correcting interchromoc_startmal contacts by mean of all contact counts between
# each set of chromoc_startmes
for other_start, other_end in chr_bins.itervalues():
# Only correct upper triangle
if other_start <= c_start:
continue
inter = imputed[c_start:c_end, other_start:other_end]
inter[inter.mask] = stat(inter)
imputed[c_start:c_end, other_start:other_end] = inter
else:
for i in range(n):
diag = imputed[kth_diag_indices(n, -i)]
diag[diag.mask] = stat(diag)
imputed[kth_diag_indices(n, -i)] = diag
# Copying upper triangle to lower triangle
imputed[np.tril_indices(n)] = imputed.T[np.tril_indices(n)]
return imputed
示例15: structure_function
def structure_function(self, bins):
"""
compute the structure function of the light curve at given time lags
"""
dt = np.subtract.outer(self.t,self.t)[np.tril_indices(self.t.shape[0], k=-1)]
dm = np.subtract.outer(self.y,self.y)[np.tril_indices(self.y.shape[0], k=-1)]
sqrsum, bins, _ = binned_statistic(dt, dm**2, bins=bins, statistic='sum')
n, _, _ = binned_statistic(dt, dm**2, bins=bins, statistic='count')
SF = np.sqrt(sqrsum/n)
lags = 0.5*(bins[1:] + bins[:-1])
return lags, SF