当前位置: 首页>>代码示例>>Python>>正文


Python numpy.diag_indices_from函数代码示例

本文整理汇总了Python中numpy.diag_indices_from函数的典型用法代码示例。如果您正苦于以下问题:Python diag_indices_from函数的具体用法?Python diag_indices_from怎么用?Python diag_indices_from使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了diag_indices_from函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: moments

 def moments(self):
     """Calculate covariance and correlation matrices,
     trait, genotipic and ontogenetic means"""
     zs = np.array([ind["z"] for ind in self.pop])
     xs = np.array([ind["x"] for ind in self.pop])
     ys = np.array([ind["y"] for ind in self.pop])
     bs = np.array([ind["b"] for ind in self.pop])
     ymean = ys.mean(axis=0)
     zmean = zs.mean(axis=0)
     xmean = xs.mean(axis=0)
     ymean = ys.mean(axis=0)
     bmean = bs.mean(axis=0)
     phenotipic = np.cov(zs, rowvar=0, bias=1)
     genetic = np.cov(xs, rowvar=0, bias=1)
     heridability = genetic[np.diag_indices_from(genetic)] / phenotipic[np.diag_indices_from(phenotipic)]
     corr_phenotipic = np.corrcoef(zs, rowvar=0, bias=1)
     corr_genetic = np.corrcoef(xs, rowvar=0, bias=1)
     avgP = avg_ratio(corr_phenotipic, self.modules)
     avgG = avg_ratio(corr_genetic, self.modules)
     return {
         "y.mean": ymean,
         "b.mean": bmean,
         "z.mean": zmean,
         "x.mean": xmean,
         "P": phenotipic,
         "G": genetic,
         "h2": heridability,
         "avgP": avgP,
         "avgG": avgG,
         "corrP": corr_phenotipic,
         "corrG": corr_genetic,
     }
开发者ID:lem-usp,项目名称:evomod,代码行数:32,代码来源:pop.py

示例2: transform_covars_grad

 def transform_covars_grad(self, internal_grad):
     grad = np.empty((self.num_latent, self.get_covar_size()), dtype=np.float32)
     for j in range(self.num_latent):
         tmp = self._theano_transform_covars_grad(internal_grad[0, j], self.covars_cholesky[j])
         tmp[np.diag_indices_from(tmp)] *= self.covars_cholesky[j][np.diag_indices_from(tmp)]
         grad[j] = tmp[np.tril_indices_from(self.covars_cholesky[j])]
     return grad.flatten()
开发者ID:Karl-Krauth,项目名称:Sparse-GP,代码行数:7,代码来源:full_gaussian_mixture.py

示例3: _get_raw_covars

 def _get_raw_covars(self):
     flattened_covars = np.empty([self.num_latent, self.get_covar_size()], dtype=np.float32)
     for i in xrange(self.num_latent):
         raw_covars = self.covars_cholesky[i].copy()
         raw_covars[np.diag_indices_from(raw_covars)] = np.log(raw_covars[np.diag_indices_from(raw_covars)])
         flattened_covars[i] = raw_covars[np.tril_indices_from(raw_covars)]
     return flattened_covars.flatten()
开发者ID:Karl-Krauth,项目名称:Sparse-GP,代码行数:7,代码来源:full_gaussian_mixture.py

示例4: newCostFunction

	def newCostFunction(self, xs, ys, test=False):
	    xs = np.array(xs)
	    ys = np.array(ys)
	    s1 = xs.dot(ys.T).T
	    s2 = ys.dot(xs.T).T

	    s1 = np.maximum(0, 1 - np.diag(s1) + s1).T
	    s2 = np.maximum(0, 1 - np.diag(s2) + s2).T

	    s1[np.diag_indices_from(s1)] = 0
	    s2[np.diag_indices_from(s2)] = 0
	    ns1 = s1
	    ns2 = s2
	    cost = np.sum(s1)+np.sum(s2)
	    if abs(cost - 2) < 1e-5:
	    	import pdb
	    	pdb.set_trace()
	    if test:
	    	return cost
	    s1t = s1 > 0
	    s2t = s2 > 0
	    tx1 = (ys[:,:,None].T - ys[:,:,None]).transpose([0,2,1])*s1t[:,:,None]
	    ty1 = (xs[:,:,None].T - xs[:,:,None]).transpose([0,2,1])*s2t[:,:,None]
	    tx2 = (ys * np.ones((len(xs),len(xs),xs[0].size))).transpose(1,0,2) * s2t[:,:,None]
	    ty2 = (xs * np.ones((len(xs),len(xs),xs[0].size))).transpose(1,0,2) * s1t[:,:,None]
	    tx3 = (s2t.T)[:,:,None]*ys
	    ty3 = (s1t.T)[:,:,None]*xs
	    xd = np.sum(tx1 - tx2 + tx3, 1)
	    yd = np.sum(ty1 - ty2 + ty3, 1)
	    #print 'xd norm: %.4f, yd norm: %.4f'%(np.linalg.norm(xd), np.linalg.norm(yd))
	    return cost, list(xd), list(yd)
开发者ID:Peratham,项目名称:imgcap,代码行数:31,代码来源:twin.py

示例5: Voigt_6x6_to_cubic

def Voigt_6x6_to_cubic(C):
    """
    Convert the Voigt 6x6 representation into the cubic elastic constants
    C11, C12 and C44.
    """

    tol = 1e-6

    C_check = np.zeros_like(C)
    C_check[np.diag_indices_from(C_check)] = C[np.diag_indices_from(C)]
    C_check[0:3,0:3] = C[0:3,0:3]
    if np.any(np.abs(C-C_check) > tol):
        raise ValueError('"C" does not have cubic symmetry.')

    C11s = np.array([C[0,0], C[1,1], C[2,2]])
    C12s = np.array([C[1,2], C[0,2], C[0,1]])
    C44s = np.array([C[3,3], C[4,4], C[5,5]])

    C11 = np.mean(C11s)
    C12 = np.mean(C12s)
    C44 = np.mean(C44s)

    if np.any(np.abs(C11-C11s) > tol) or np.any(np.abs(C12-C12s) > tol) or \
            np.any(np.abs(C44-C44s) > tol):
        raise ValueError('"C" does not have cubic symmetry.')

    return np.array([C11, C12, C44])
开发者ID:libAtoms,项目名称:matscipy,代码行数:27,代码来源:elasticity.py

示例6: test_cosine_distances

def test_cosine_distances():
    # Check the pairwise Cosine distances computation
    rng = np.random.RandomState(1337)
    x = np.abs(rng.rand(910))
    XA = np.vstack([x, x])
    D = cosine_distances(XA)
    assert_array_almost_equal(D, [[0., 0.], [0., 0.]])
    # check that all elements are in [0, 2]
    assert np.all(D >= 0.)
    assert np.all(D <= 2.)
    # check that diagonal elements are equal to 0
    assert_array_almost_equal(D[np.diag_indices_from(D)], [0., 0.])

    XB = np.vstack([x, -x])
    D2 = cosine_distances(XB)
    # check that all elements are in [0, 2]
    assert np.all(D2 >= 0.)
    assert np.all(D2 <= 2.)
    # check that diagonal elements are equal to 0 and non diagonal to 2
    assert_array_almost_equal(D2, [[0., 2.], [2., 0.]])

    # check large random matrix
    X = np.abs(rng.rand(1000, 5000))
    D = cosine_distances(X)
    # check that diagonal elements are equal to 0
    assert_array_almost_equal(D[np.diag_indices_from(D)], [0.] * D.shape[0])
    assert np.all(D >= 0.)
    assert np.all(D <= 2.)
开发者ID:scikit-learn,项目名称:scikit-learn,代码行数:28,代码来源:test_pairwise.py

示例7: set_covars

 def set_covars(self, raw_covars):
     raw_covars = raw_covars.reshape([self.num_latent, self.get_covar_size()])
     for j in xrange(self.num_latent):
         cholesky = np.zeros([self.num_dim, self.num_dim], dtype=np.float32)
         cholesky[np.tril_indices_from(cholesky)] = raw_covars[j]
         cholesky[np.diag_indices_from(cholesky)] = np.exp(cholesky[np.diag_indices_from(cholesky)])
         self.covars_cholesky[j] = cholesky
         self.covars[j] = mdot(self.covars_cholesky[j], self.covars_cholesky[j].T)
开发者ID:Karl-Krauth,项目名称:Sparse-GP,代码行数:8,代码来源:full_gaussian_mixture.py

示例8: _update

 def _update(self):
     self.parameters = self.get_parameters()
     for k in range(self.num_comp):
         for j in range(self.num_process):
             temp = np.zeros((self.num_dim, self.num_dim))
             temp[np.tril_indices_from(temp)] = self.L_flatten[k,j,:].copy()
             temp[np.diag_indices_from(temp)] = np.exp(temp[np.diag_indices_from(temp)])
             # temp[np.diag_indices_from(temp)] = temp[np.diag_indices_from(temp)] ** 2
             self.L[k,j,:,:] = temp
             self.s[k,j] = mdot(self.L[k,j,:,:], self.L[k,j,:,:].T)
开发者ID:jfutoma,项目名称:savigp,代码行数:10,代码来源:mog_single_comp.py

示例9: update_covariance

 def update_covariance(self, j, Sj):
     Sj = Sj.copy()
     mm = min(Sj[np.diag_indices_from(Sj)])
     if mm < 0:
         Sj[np.diag_indices_from(Sj)] = Sj[np.diag_indices_from(Sj)] - 1.1 * mm
     for k in range(self.num_comp):
         self.s[k,j] = Sj.copy()
         self.L[k,j] = jitchol(Sj,10)
         tmp = self.L[k,j].copy()
         tmp[np.diag_indices_from(tmp)] = np.log(tmp[np.diag_indices_from(tmp)])
         self.L_flatten[k,j] = tmp[np.tril_indices_from(tmp)]
     self._update()
开发者ID:jfutoma,项目名称:savigp,代码行数:12,代码来源:mog_single_comp.py

示例10: getNormDistFluct

    def getNormDistFluct(self, coords):
        """Normalized distance fluctuation
        """
            
        model = self.getModel()
        LOGGER.info('Number of chains: {0}, chains: {1}.'
                     .format(len(list(set(coords.getChids()))), \
                                 list(set(coords.getChids()))))

        try:
            #coords = coords.select('protein and name CA')
            coords = (coords._getCoords() if hasattr(coords, '_getCoords') else
                coords.getCoords())
        except AttributeError:
            try:
                checkCoords(coords)
            except TypeError:
                raise TypeError('coords must be a Numpy array or an object '
                                                'with `getCoords` method')
        
        if not isinstance(model, NMA):
            LOGGER.info('Calculating new model')
            model = GNM('prot analysis')
            model.buildKirchhoff(coords)
            model.calcModes() 
            
        linalg = importLA()
        n_atoms = model.numAtoms()
        n_modes = model.numModes()
        LOGGER.timeit('_ndf')
    
        from .analysis import calcCrossCorr
        from numpy import linalg as LA
        # <dRi, dRi>, <dRj, dRj> = 1
        crossC = 2-2*calcCrossCorr(model)
        r_ij = np.zeros((n_atoms,n_atoms,3))

        for i in range(n_atoms):
           for j in range(i+1,n_atoms):
               r_ij[i][j] = coords[j,:] - coords[i,:]
               r_ij[j][i] = r_ij[i][j]
               r_ij_n = LA.norm(r_ij, axis=2)

        #with np.errstate(divide='ignore'):
        r_ij_n[np.diag_indices_from(r_ij_n)] = 1e-5  # div by 0
        crossC=abs(crossC)
        normdistfluct = np.divide(np.sqrt(crossC),r_ij_n)
        LOGGER.report('NDF calculated in %.2lfs.', label='_ndf')
        normdistfluct[np.diag_indices_from(normdistfluct)] = 0  # div by 0
        return normdistfluct
开发者ID:sixpi,项目名称:ProDy,代码行数:50,代码来源:gnm.py

示例11: ExpandNode

def ExpandNode(fringe,node):
    col_sum = np.sum(node.attacked_cells,0)
    dict_sum = {}
    for i in range(8):
        if col_sum[0,i] == 8:
            continue
        dict_sum[i] = col_sum[0,i]
    sorted_sum = sorted(dict_sum.items(),key=operator.\
                        itemgetter(1),reverse=True)
    for i in range(len(sorted_sum)):
        col = sorted_sum[i][0]
        for row in range(8):
            if node.attacked_cells[row,col]:
                continue
            attacked_cells = copy.deepcopy(node.attacked_cells)
            attacked_cells[:,col] = 1
            attacked_cells[row,:] = 1
            k = row-col
            rows, cols = np.diag_indices_from(attacked_cells)
            if k < 0:
                rows,cols = rows[:k],cols[-k:]
            elif k > 0:
                rows,cols = rows[k:],cols[:-k]
            attacked_cells[rows,cols] = 1

            attacked_cells = np.fliplr(attacked_cells)
            ncol = 7-col
            k = row-ncol
            rows, cols = np.diag_indices_from(attacked_cells)
            if k < 0:
                rows,cols = rows[:k],cols[-k:]
            elif k > 0:
                rows,cols = rows[k:],cols[:-k]
            attacked_cells[rows,cols] = 1
            attacked_cells = np.fliplr(attacked_cells)

            valid = True
            for i in range(node.depth+1,8):
                if np.sum(attacked_cells[i,:]) == 8:
                    valid = False
                    break
            if not valid:
                continue
            
            nstate = copy.deepcopy(node.state)
            nstate[row,col] = 1
            new_node = Node(parent=node,depth=node.depth\
                 +1,state=nstate,attacked_cells=attacked_cells)
            fringe.insert(0,new_node)
开发者ID:harishrithish7,项目名称:aima-python,代码行数:49,代码来源:expand_node.py

示例12: test_map_diag_and_offdiag

    def test_map_diag_and_offdiag(self):

        vars = ["x", "y", "z"]
        g = ag.PairGrid(self.df)
        g.map_offdiag(plt.scatter)
        g.map_diag(plt.hist)

        for ax in g.diag_axes:
            nt.assert_equal(len(ax.patches), 10)

        for i, j in zip(*np.triu_indices_from(g.axes, 1)):
            ax = g.axes[i, j]
            x_in = self.df[vars[j]]
            y_in = self.df[vars[i]]
            x_out, y_out = ax.collections[0].get_offsets().T
            npt.assert_array_equal(x_in, x_out)
            npt.assert_array_equal(y_in, y_out)

        for i, j in zip(*np.tril_indices_from(g.axes, -1)):
            ax = g.axes[i, j]
            x_in = self.df[vars[j]]
            y_in = self.df[vars[i]]
            x_out, y_out = ax.collections[0].get_offsets().T
            npt.assert_array_equal(x_in, x_out)
            npt.assert_array_equal(y_in, y_out)

        for i, j in zip(*np.diag_indices_from(g.axes)):
            ax = g.axes[i, j]
            nt.assert_equal(len(ax.collections), 0)
开发者ID:GeorgeMcIntire,项目名称:seaborn,代码行数:29,代码来源:test_axisgrid.py

示例13: _generate_noise

def _generate_noise(covar_matrix, time=1000, use_inverse=False):
    """
    Generate a multivariate normal distribution using correlated innovations.

    Parameters
    ----------
    covar_matrix : array
        Covariance matrix of the random variables

    time : int
        Sample size

    use_inverse : bool, optional
        Negate the off-diagonal elements and invert the covariance matrix
        before use

    Returns
    -------
    noise : array
        Random noise generated according to covar_matrix
    """
    # Pull out the number of nodes from the shape of the covar_matrix
    n_nodes = covar_matrix.shape[0]
    # Make a deep copy for use in the inverse case
    this_covar = covar_matrix
    # Take the negative inverse if needed
    if use_inverse:
        this_covar = copy.deepcopy(covar_matrix)
        this_covar *= -1
        this_covar[np.diag_indices_from(this_covar)] *= -1
        this_covar = np.linalg.inv(this_covar)
    # Return the noise distribution
    return np.random.multivariate_normal(mean=np.zeros(n_nodes),
                                            cov=this_covar,
                                            size=time)
开发者ID:jakobrunge,项目名称:tigramite,代码行数:35,代码来源:data_processing.py

示例14: report_clustering_dot_product

def report_clustering_dot_product(loci, thresholds_pack, method, feature_labels):

    thr_occ, thr_crisp, cluster_thresholds = thresholds_pack

    M = scores.generate_dot_product_score_matrix(feature_labels, method, loci=loci)
    M += np.transpose(M)
    M = -1 * np.log(M)
    M[np.diag_indices_from(M)] = 0
    M[np.where(M==np.inf)] = 100

    reports_dir_base = os.path.join(gv.project_data_path, 'cas4/reports/')

    cluster2summary_file_path = os.path.join(gv.project_data_path, 'cas4/reports/cluster_summary.tab')

    for threshold in cluster_thresholds:

        repors_dir = reports_dir_base + 'dot_%s_%d_%.2f_%.2f'%(method, thr_occ, thr_crisp, threshold)
        # print "Thresholds:", thr_occ, thr_crisp, threshold
        # print repors_dir
        # if os.path.exists(repors_dir):
        #     sh.rmtree(repors_dir)
        # os.mkdir(repors_dir)

        singles, cluster_packs, entropies = dendrogram.classify_by_scores_cas4(M, threshold, loci)

        _local_thresholds_pack = (thr_occ, thr_crisp, threshold)

        generate_cluster_reports_cas4(cluster_packs,
                                      loci,
                                      repors_dir,
                                      feature_labels,
                                      method,
                                      _local_thresholds_pack)

        generate_cas4_gi_summary_file(singles, cluster_packs, loci, repors_dir, cluster2summary_file_path)
开发者ID:kyrgyzbala,项目名称:NewSystems,代码行数:35,代码来源:reporting.py

示例15: active_passive_collisions

def active_passive_collisions(active_tl, active_br, passive_tl, passive_br):
    '''
    Returns an NxN array, where element at [i, j] says if
    thing i's active hitbox crosses thing j's active hitbox.
    An active hitbox isn't considered if any of its dimensions is not-positive.

    active/passive_tl/br must be arrays of shape (N, 2) - the boxes' corners in
    global coordinates

    See comment for passive_passive_collisions for longer explanation.
    The main difference is that we can't cheat here and do half the checks,
    then transpose, we need to do all checks.
    '''
    passive_tl_3d = passive_tl.reshape(1, -1, 2)
    passive_br_3d = passive_br.reshape(1, -1, 2)

    active_tl_3d = active_tl.reshape(-1, 1, 2)
    active_br_3d = active_br.reshape(-1, 1, 2)

    negcheck = numpy.logical_or(numpy.any(active_tl_3d > passive_br_3d, axis=2),
                                numpy.any(active_br_3d < passive_tl_3d, axis=2))

    legible = numpy.all(active_tl < active_br, axis=1).reshape(-1, 1)

    result = numpy.logical_and(numpy.logical_not(negcheck), legible)

    # Remove self collisions
    result[numpy.diag_indices_from(result)] = False
    return result
开发者ID:moshev,项目名称:project-viking,代码行数:29,代码来源:collisions.py


注:本文中的numpy.diag_indices_from函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。