本文整理汇总了Python中numpy.triu_indices函数的典型用法代码示例。如果您正苦于以下问题:Python triu_indices函数的具体用法?Python triu_indices怎么用?Python triu_indices使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了triu_indices函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: dynamically_bin
def dynamically_bin(hic1, hic2, chrom, binbounds):
unbinned1, map1 = hic1.cis_heatmap(chrom, start=binbounds[0, 0], stop=binbounds[-1, 1], datatype='fend',
arraytype='full', returnmapping=True)
unbinned2, map2 = hic2.cis_heatmap(chrom, start=binbounds[0, 0], stop=binbounds[-1, 1], datatype='fend',
arraytype='full', returnmapping=True)
map1[:, 2] = (map1[:, 0] + map1[:, 1])
map2[:, 2] = (map2[:, 0] + map2[:, 1])
allmap = numpy.vstack((map1, map2))
allmap = allmap[numpy.argsort(allmap[:, 2]), :]
indices1 = numpy.searchsorted(allmap[:, 2], map1[:, 2])
indices1_1 = (indices1.reshape(-1, 1) * allmap.shape[0] + indices1.reshape(1, -1)).ravel()
indices2 = numpy.searchsorted(allmap[:, 2], map2[:, 2])
indices2_1 = (indices2.reshape(-1, 1) * allmap.shape[0] + indices2.reshape(1, -1)).ravel()
unbinned = numpy.zeros((allmap.shape[0], allmap.shape[0], 2), dtype=numpy.float32)
unbinned[:, :, 0] += numpy.bincount(indices1_1, minlength=allmap.shape[0] ** 2,
weights=unbinned1[:, :, 0].ravel()).reshape(allmap.shape[0], -1)
unbinned[:, :, 1] += numpy.bincount(indices1_1, minlength=allmap.shape[0] ** 2,
weights=unbinned1[:, :, 1].ravel()).reshape(allmap.shape[0], -1)
unbinned[:, :, 0] += numpy.bincount(indices2_1, minlength=allmap.shape[0] ** 2,
weights=unbinned2[:, :, 0].ravel()).reshape(allmap.shape[0], -1)
unbinned[:, :, 1] += numpy.bincount(indices2_1, minlength=allmap.shape[0] ** 2,
weights=unbinned2[:, :, 1].ravel()).reshape(allmap.shape[0], -1)
indices = numpy.triu_indices(allmap.shape[0], 1)
unbinned = unbinned[indices[0], indices[1], :]
binned, binmap = hic1.cis_heatmap(chrom, binbounds=binbounds, datatype='fend', arraytype='full',
returnmapping=True)
binned += hic2.cis_heatmap(chrom, binbounds=binbounds, datatype='fend', arraytype='full')
indices = numpy.triu_indices(binbounds.shape[0], 1)
upper = binned[indices[0], indices[1], :]
hifive.hic_binning.dynamically_bin_cis_array(unbinned, allmap, upper, binmap,
expansion_binsize=0, minobservations=25)
binned[indices[0], indices[1], :] = upper
binned[indices[1], indices[0], :] = upper
return binned
示例2: mvn_msr
def mvn_msr(self, corrDS=None, abstol=1e-12, reltol=1e-12, intLb=-10, intUb=10):
systype = self.systype
beta = self.beta
nls = len(self.comps)
if corrDS is None:
correl = self.syscorrDS[np.triu_indices(nls, 1)]
else:
correl = corrDS[np.triu_indices(nls, 1)]
if corrDS is None:
corrDS = self.syscorrDS
i = 1
n = 10000
syspf0 = 0.0
dpf = 1.0
# while i!=0:
# n +=10000
# v,res,i = stats.mvn.mvndst(intLb*np.ones(nls), beta, np.zeros(nls, dtype=int), correl, [nls*n,1e-12, 1e-12])
while i != 0:
n += 10000
res, i = stats.mvn.mvnun(-10 * np.ones(nls), beta, np.zeros(nls), corrDS, [nls * n, abstol, reltol])
# if abs(res-res1)/(0.5*(res+res1))>1e-3:
# print 'warning: abnormal difference between mvnun and mvndst results'
if systype.lower() == "series":
syspf = 1.0 - res
sysbeta = -stats.norm.ppf(syspf)
results = ReliabilityResults(sysbeta, syspf)
elif systype.lower() == "parallel":
syspf = res
sysbeta = -stats.norm.ppf(syspf)
results = ReliabilityResults(sysbeta, syspf)
else:
print ("mvn_msr only supports series or parallel system")
sys.exit(0)
return results
示例3: normalization
def normalization(self):
"""
Normalize the equilibrium steady state correlations according to
Eq 76 in Lorenzo's writeup
"""
N = self.latsize
#First disconnect
self.disconnect(self.steady_state)
norm_1 = N+np.sum(self.steady_state[2*N:3*N])
sxxpsyy = self.steady_state[3*N:].reshape(3,3,N,N)[0,0,:,:] +\
self.steady_state[3*N:].reshape(3,3,N,N)[1,1,:,:]
sxymsyx = self.steady_state[3*N:].reshape(3,3,N,N)[0,1,:,:] -\
self.steady_state[3*N:].reshape(3,3,N,N)[1,0,:,:]
norms = []
for kvec in self.kvecs:
argmat = np.zeros((N,N))
for (m,n) in combinations(np.arange(N),2):
argmat[m,n] = kvec.dot(self.atoms[m].coords-self.atoms[n].coords)
norm_2 = np.sum(\
np.cos(argmat[np.triu_indices(N, k=1)]) *\
sxxpsyy[np.triu_indices(N, k=1)] +\
np.sin(argmat[np.triu_indices(N, k=1)]) *\
sxymsyx[np.triu_indices(N, k=1)])
norms.append(0.5*(norm_1+norm_2))
#Reconnect before exit
self.reconnect(self.steady_state)
return np.array(norms).flatten()
示例4: get_query_clusters
def get_query_clusters(points, k):
'''
points [n,m] - array for n points with dimention m - encoded query
'''
# normalize input
points = normalize(points.astype(np.float))
# get similarity matrix (cosine distance)
dist = points.dot(points.T)
# initialize variables
n_pt = len(points)
cluster_old, cluster_new = np.ones(n_pt), np.zeros(n_pt)
# special case, no clustering
if k==1 or n_pt==1:
return np.zeros(n_pt), 1 if n_pt==1 else np.mean(dist[np.triu_indices(n_pt,k=1)])
# randomly choose k starting centroids
centroids = points[np.random.permutation(n_pt)[:k]]
while not np.array_equal(cluster_old, cluster_new):
cluster_old = cluster_new
# get cluster index for each point
cluster_new = np.argmax(points.dot(centroids.T), axis=1)
# get new centroids, and within class mean distance/similarity
centroids, in_dist = [], []
for c in np.unique(cluster_new):
pid = cluster_new==c
# set new centroid as the one who has minimum total distance to rest of the points in the cluster
cid = np.argmax(np.sum(dist[np.ix_(pid, pid)], axis=1))
centroids.append(points[pid][cid])
in_dist.append(1 if sum(pid)==1 else np.mean(dist[np.ix_(pid,pid)][np.triu_indices(sum(pid),k=1)]))
centroids = np.array(centroids)
# traditional way to get new centroid, not working well for cosine distance
# centroids = normalize([np.mean(points[cluster_new==c], axis=0) for c in np.unique(cluster_new)])
return cluster_new, np.mean(in_dist), centroids
示例5: example_one
def example_one():
"""
Generates a set of sample data for the
examples page of the hetaira web tool.
"""
np.random.seed(5)
ids = ['Pr'] + list(ascii_lowercase) + ['Sp']
# make some data where all activities are the same
data = np.ones((26,26))
# make some random activites to pull from
y = np.random.uniform(1000, 2500, (26,26))
# this will replace the ones with numbers from the uniform
# distribution, increasing by one at each column
# using the upper triangular matrix
data[np.triu_indices(26)] = y[np.triu_indices(26)]
# stack a perfectly promiscuous and a perfectly (almost)
# specific column on either side of the data
data = np.hstack((np.full((26,1), 1e-10), data, np.ones((26,1))))
data[0,0] = 100
descriptors = None
example = Promiscuity(ids, np.fliplr(data), descriptors)
return example.hetaira_results()
示例6: test_syrk
def test_syrk(k, n, dtype, rng):
tols = tolerances[dtype]
A = np.zeros((n, k), dtype=dtype)
C = np.zeros((n, n), dtype=dtype)
D = np.zeros((k, k), dtype=dtype)
A[...] = rng.uniform(-1, 1, size=A.shape)
C[...] = rng.uniform(-1, 1, size=C.shape)
D[...] = rng.uniform(-1, 1, size=D.shape)
clA, clC, clD = map(to_ocl, [A, C, D])
a = 0.9
b = 0.5
try:
blas.setup()
# normal syrk
up = np.triu_indices(n)
event = blas.syrk(queue, clA, clC, alpha=a, beta=b)
assert np.allclose(clC.get()[up], (a*np.dot(A, A.T) + b*C)[up], **tols)
assert isinstance(event, cl.Event)
# transposed syrk
up = np.triu_indices(k)
blas.syrk(queue, clA, clD, transA=True, alpha=a, beta=b)
assert np.allclose(clD.get()[up], (a*np.dot(A.T, A) + b*D)[up], **tols)
finally:
blas.teardown()
示例7: loss_function
def loss_function(mapping12):
"""Computes the loss function of a given mapping.
Using the graph kernel of two sets of distance.
"""
global tractography1, tractography2
global dm1_all, dm1_all_small, dm2_all, dm2_all_small
global kdt1, kdt2
k = 10
radius = 150
loss = 0.0
for sid in np.arange(len(tractography1)):
#idx1 = kdt1.query_radius(dm1_all_small[sid], radius)[0]
idx1 = kdt1.query(dm1_all_small[sid], k)[1][0]
dm_small1 = dm1_all[idx1][:,idx1]
e1 = dm_small1[np.triu_indices(dm_small1.shape[0],1)]
#idx2 = kdt2.query_radius(dm2_all_small[mapping12[sid]], radius)[0]
idx2 = kdt2.query(dm2_all_small[mapping12[sid]], k)[1][0]
dm_small2 = dm2_all[idx2][:,idx2]
e2 = dm_small2[np.triu_indices(dm_small2.shape[0],1)]
#loss = loss + Graph_KN(e1, e2, weight=1., num_bins = 128)
#similarity = similarity + Pyramid_KN(e1, e2, weight=1., num_bins = 128)
loss = loss + Pyramid_KN(e1, e2, weight=1., num_bins = 128)
return loss
示例8: sim_matrix_within_group_means
def sim_matrix_within_group_means(matrix, n1):
"""
Computes the mean of the upper triangle (k=1) for the blocks
(0,n-1)*(0,n-1) and (n,2n-1)*(n,2n-1), and their difference
(for convenience).
Parameters
----------
matrix : 2D symmetric numpy array
1 or 2 dimensional numpy array, the n1 first indices in
the zeroth axis of the array, should correspond to the
values of the first group.
The value of ``matrix[i][j]`` should correspond to
n1 : int
the number of elements in the first group
Returns
-------
mean1 : float
the average similarity between members in the first group
mean2: float
the average similarity between members in the second group
mean1-mean2: float
just mean1-mean2 (as a convenience for stat. testing)
"""
n2 = matrix.shape[0] - n1
indices1 = np.triu_indices(n1, k=1)
indices2base = np.triu_indices(n2, k=1)
indices2I = indices2base[0].copy() + n1
indices2J = indices2base[1].copy() + n1
indices2 = (indices2I, indices2J)
mean1 = np.average(matrix[indices1])
mean2 = np.average(matrix[indices2])
return mean1, mean2, mean1 - mean2
示例9: test_triu_indices
def test_triu_indices(self):
iu1 = triu_indices(4)
iu2 = triu_indices(4, 2)
a = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
# Both for indexing:
yield (assert_array_equal, a[iu1],
array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16]))
# And for assigning values:
a[iu1] = -1
yield (assert_array_equal, a,
array([[-1, -1, -1, -1],
[ 5, -1, -1, -1],
[ 9, 10, -1, -1],
[13, 14, 15, -1]]) )
# These cover almost the whole array (two diagonals right of the main one):
a[iu2] = -10
yield ( assert_array_equal, a,
array([[ -1, -1, -10, -10],
[ 5, -1, -1, -10],
[ 9, 10, -1, -1],
[ 13, 14, 15, -1]]) )
示例10: calcH2Continuous
def calcH2Continuous(XXT, phe, keepArr, prev, h2coeff):
t = stats.norm(0,1).isf(prev)
phit = stats.norm(0,1).pdf(t)
K1 = 1 - prev
K2 = 1 - K1
P = np.sum(phe<t) / float(phe.shape[0])
P2 = 1.0
P1 = K2*P2*P / (K1*(1-P))
R = P2 / P1
XXT = XXT[np.ix_(keepArr, keepArr)]
phe = phe[keepArr]
xCoeff = (((R-1)*phit*t + K1 + R*K2)**2 * (K1+R*K2)**2 - ((R-1)*phit)**4) / (K1 + R*K2)**4
x = (xCoeff * h2coeff) * XXT
pheMean = 0
pheVar = 1
y = np.outer((phe-pheMean) / np.sqrt(pheVar), (phe-pheMean)/np.sqrt(pheVar))
y -= ((R-1)*phit / (K1+R*K2))**2
y = y[np.triu_indices(y.shape[0], 1)]
x = x[np.triu_indices(x.shape[0], 1)]
slope, intercept, rValue, pValue, stdErr = stats.linregress(x,y)
return slope
示例11: __init__
def __init__(self, endmembers, alphas, energy_interaction, volume_interaction=None, entropy_interaction=None):
self.n_endmembers = len(endmembers)
# Create array of van Laar parameters
self.alphas = np.array(alphas)
# Create 2D arrays of interaction parameters
self.We = np.triu(2. / (self.alphas[:, np.newaxis] + self.alphas), 1)
self.We[np.triu_indices(self.n_endmembers, 1)] *= np.array([i for row in energy_interaction
for i in row])
if entropy_interaction is not None:
self.Ws = np.triu(2. / (self.alphas[:, np.newaxis] + self.alphas), 1)
self.Ws[np.triu_indices(self.n_endmembers, 1)] *= np.array([i for row in entropy_interaction
for i in row])
else:
self.Ws = np.zeros((self.n_endmembers, self.n_endmembers))
if volume_interaction is not None:
self.Wv = np.triu(2. / (self.alphas[:, np.newaxis] + self.alphas), 1)
self.Wv[np.triu_indices(self.n_endmembers, 1)] *= np.array([i for row in volume_interaction
for i in row])
else:
self.Wv = np.zeros((self.n_endmembers, self.n_endmembers))
# initialize ideal solution model
IdealSolution.__init__(self, endmembers)
示例12: scoring2B_behavior
def scoring2B_behavior():
t_clusters = np.zeros((600,3))
t_clusters[0:200,0] = 1
t_clusters[200:400,1] = 1
t_clusters[400:,2] = 1
t_ccm = np.dot(t_clusters,t_clusters.T)
n_uniq = len(np.triu_indices(t_ccm.shape[0],k=1)[0])
res = []
concentrations = [1000,100,50,25,10,5,3,1]
for c in concentrations:
for i in range(50):
ccm = np.copy(t_ccm)
ccm[np.triu_indices(t_ccm.shape[0],k=1)] -= np.random.beta(1,c,n_uniq)
#ccm[np.tril_indices(t_ccm.shape[0],k=-1)] = ccm[np.triu_indices(t_ccm.shape[0],k=1)]
ccm[np.tril_indices(t_ccm.shape[0],k=-1)] = 0
ccm = ccm + ccm.T
np.fill_diagonal(ccm,1)
ccm = np.abs(ccm)
res.append([c,calculate2(ccm,t_ccm)])
res = [map(str,x) for x in res]
res = ['\t'.join(x) for x in res]
f = open('scoring2B_beta.tsv', 'w')
f.write('\n'.join(res))
f.close()
示例13: calcH2Binary
def calcH2Binary(XXT, phe, probs, thresholds, keepArr, prev, h2coeff):
K = prev
P = np.sum(phe>0) / float(phe.shape[0])
XXT = XXT[np.ix_(keepArr, keepArr)]
phe = phe[keepArr]
if (thresholds is None):
t = stats.norm(0,1).isf(K)
phit = stats.norm(0,1).pdf(t)
xCoeff = P*(1-P) / (K**2 * (1-K)**2) * phit**2 * h2coeff
y = np.outer((phe-P) / np.sqrt(P*(1-P)), (phe-P) / np.sqrt(P*(1-P)))
x = xCoeff * XXT
else:
probs = probs[keepArr]
thresholds = thresholds[keepArr]
Ki = K*(1-P) / (P*(1-K)) * probs / (1 + K*(1-P) / (P*(1-K))*probs - probs)
phit = stats.norm(0,1).pdf(thresholds)
probsInvOuter = np.outer(probs*(1-probs), probs*(1-probs))
y = np.outer(phe-probs, phe-probs) / np.sqrt(probsInvOuter)
sumProbs = np.tile(np.column_stack(probs).T, (1,probs.shape[0])) + np.tile(probs, (probs.shape[0], 1))
Atag0 = np.outer(phit, phit) * (1 - (sumProbs)*(P-K)/(P*(1-K)) + np.outer(probs, probs)*(((P-K)/(P*(1-K)))**2)) / np.sqrt(probsInvOuter)
B0 = np.outer(Ki + (1-Ki)*(K*(1-P))/(P*(1-K)), Ki + (1-Ki)*(K*(1-P))/(P*(1-K)))
x = (Atag0 / B0 * h2coeff) * XXT
y = y[np.triu_indices(y.shape[0], 1)]
x = x[np.triu_indices(x.shape[0], 1)]
slope, intercept, rValue, pValue, stdErr = stats.linregress(x,y)
return slope
示例14: calcH2Continuous_twotails
def calcH2Continuous_twotails(XXT, phe, keepArr, prev, h2coeff):
print 'computing h2 for a two-tails ascertained study...'
XXT = XXT[np.ix_(keepArr, keepArr)]
phe = phe[keepArr]
t1 = stats.norm(0,1).ppf(prev)
t2 = stats.norm(0,1).isf(prev)
phit1 = stats.norm(0,1).pdf(t1)
phit2 = stats.norm(0,1).pdf(t2)
K1 = prev
K2 = prev
xCoeff = ((phit2*t2 - phit1*t1 + K1 + K2)**2 * (K1+K2)**2 - (phit2-phit1)**4) / (K1 + K2)**4
intersect = ((phit2-phit1) / (K1+K2))**2
pheMean = 0
pheVar = 1
x = (xCoeff * h2coeff) * XXT
y = np.outer((phe-pheMean)/np.sqrt(pheVar), (phe-pheMean)/np.sqrt(pheVar))
y -= intersect
y = y[np.triu_indices(y.shape[0], 1)]
x = x[np.triu_indices(x.shape[0], 1)]
slope, intercept, rValue, pValue, stdErr = stats.linregress(x,y)
return slope
示例15: loss_function2
def loss_function2(mapping12):
"""Computes the loss function of a given mapping.
Using the graph kernel of two sets of distance.
"""
global dis_1, dis_2
global kdt_1, kdt_2
global dm1, dm2, dm1_all, dm2_all
k = 15
radius = 100
similarity = 0.0
for sid in np.arange(len(pro_1)):
idx1 = kdt_1.query_radius(dm1[sid], radius)[0]
#idx1 = kdt_1.query(dm1[sid], k)[1][0]
dm_small1 = dm1_all[idx1][:,idx1]
e1 = dm_small1[np.triu_indices(dm_small1.shape[0],1)]
idx2 = kdt_2.query_radius(dis_2[mapping12[sid]], radius)[0]
#idx2 = kdt_2.query(dis_2[mapping12[sid]], k)[1][0]
dm_small2 = dm2_all[idx2][:,idx2]
e2 = dm_small2[np.triu_indices(dm_small2.shape[0],1)]
#loss = loss + Graph_KN(e1, e2, weight=1., num_bins = 128)
similarity = similarity + Pyramid_KN(e1, e2, weight=1., num_bins = 128)
return 1./similarity