当前位置: 首页>>代码示例>>Python>>正文


Python sparse.spdiags方法代码示例

本文整理汇总了Python中scipy.sparse.spdiags方法的典型用法代码示例。如果您正苦于以下问题:Python sparse.spdiags方法的具体用法?Python sparse.spdiags怎么用?Python sparse.spdiags使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在scipy.sparse的用法示例。


在下文中一共展示了sparse.spdiags方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_term_topic

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import spdiags [as 别名]
def get_term_topic(self, X):
        n_features = X.shape[1]
        id2word = self.vocabulary_
        word2topic = {}

        with open('word_topic.txt', 'r') as f:
            for line in f:
                strs = line.decode('utf-8').strip('\n').split('\t')
                word2topic[strs[0]] = strs[2]

        topic = np.zeros((len(id2word),))

        for i, key in enumerate(id2word):
            if key in word2topic:
                topic[id2word[key]] = word2topic[key]
            else:
                print key

        topic = preprocessing.MinMaxScaler().fit_transform(topic)
        # topic = sp.spdiags(topic, diags=0, m=n_features,
        #                    n=n_features, format='csr')
        return topic 
开发者ID:prozhuchen,项目名称:2016CCF-sougou,代码行数:24,代码来源:STFIWF.py

示例2: setUp

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import spdiags [as 别名]
def setUp(self):
        random.seed(0)  # make tests repeatable
        self.real_matrices = []
        self.real_matrices.append(spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]],
                                          [0, 1], 5, 5))
        self.real_matrices.append(spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]],
                                          [0, 1], 4, 5))
        self.real_matrices.append(spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]],
                                          [0, 2], 5, 5))
        self.real_matrices.append(rand(3,3))
        self.real_matrices.append(rand(5,4))
        self.real_matrices.append(rand(4,5))

        self.real_matrices = [csc_matrix(x).astype('d') for x
                in self.real_matrices]
        self.complex_matrices = [x.astype(np.complex128)
                                 for x in self.real_matrices]

        _DeprecationAccept.setUp(self)

# Skip methods if umfpack not present 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:23,代码来源:test_umfpack.py

示例3: test_twodiags

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import spdiags [as 别名]
def test_twodiags(self):
        A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5)
        b = array([1, 2, 3, 4, 5])

        # condition number of A
        cond_A = norm(A.todense(),2) * norm(inv(A.todense()),2)

        for t in ['f','d','F','D']:
            eps = finfo(t).eps  # floating point epsilon
            b = b.astype(t)

            for format in ['csc','csr']:
                Asp = A.astype(t).asformat(format)

                x = spsolve(Asp,b)

                assert_(norm(b - Asp*x) < 10 * cond_A * eps) 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:19,代码来源:test_linsolve.py

示例4: sakurai

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import spdiags [as 别名]
def sakurai(n):
    """ Example taken from
        T. Sakurai, H. Tadano, Y. Inadomi and U. Nagashima
        A moment-based method for large-scale generalized eigenvalue problems
        Appl. Num. Anal. Comp. Math. Vol. 1 No. 2 (2004) """

    A = sparse.eye(n, n)
    d0 = array(r_[5,6*ones(n-2),5])
    d1 = -4*ones(n)
    d2 = ones(n)
    B = sparse.spdiags([d2,d1,d0,d1,d2],[-2,-1,0,1,2],n,n)

    k = arange(1,n+1)
    w_ex = sort(1./(16.*pow(cos(0.5*k*pi/(n+1)),4)))  # exact eigenvalues

    return A,B, w_ex 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:18,代码来源:large_scale.py

示例5: expansion_matrix

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import spdiags [as 别名]
def expansion_matrix(mapping, canonical_order=False):
    """ Returns an n x m matrix E where n is the dimension of 
        the original data and m is the dimension of the reduced data.

        Expands data vector x with E x'
        Reduces workload matrix W with W E
    """
    assert mapping.ndim == 1, "Can only handle 1-dimesional mappings for now, domain should be flattened"

    unique, indices, inverse, counts = mapping_statistics(mapping)

    if canonical_order:
        mapping = canonical_ordering(mapping)

    n = mapping.size
    m = unique.size
    data = np.ones(n)
    cols = np.arange(n)
    rows = inverse

    R = sparse.csr_matrix((data, (rows, cols)), shape=(m, n), dtype=int)
    scale = sparse.spdiags(1.0 /counts, 0, m, m)

    return EkteloMatrix(R.T * scale) 
开发者ID:ektelo,项目名称:ektelo,代码行数:26,代码来源:support.py

示例6: _setup_metric

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import spdiags [as 别名]
def _setup_metric(X, true_labels, inv_psp=None, k=5):
    assert compatible_shapes(X, true_labels), \
        "ground truth and prediction matrices must have same shape."
    num_instances, num_labels = true_labels.shape
    indices = _get_topk(X, num_labels, k)
    ps_indices = None
    if inv_psp is not None:
        ps_indices = _get_topk(
            true_labels.dot(
                sp.spdiags(inv_psp, diags=0,
                           m=num_labels, n=num_labels)),
            num_labels, k)
        inv_psp = np.hstack([inv_psp, np.zeros((1))])

    true_labels = sp.hstack([true_labels,
                             sp.lil_matrix((num_instances, 1),
                                           dtype=np.int32)]).tocsr()
    return indices, true_labels, ps_indices, inv_psp 
开发者ID:kunaldahiya,项目名称:pyxclib,代码行数:20,代码来源:xc_metrics.py

示例7: compute_laplacian_sparse

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import spdiags [as 别名]
def compute_laplacian_sparse(W):

  num_node = W.shape[0]
  D = np.sum(W, axis=0).A1

  # unnormalized graph laplacian
  # L = np.diag(D) - W

  # normalized graph laplacian
  idx = D != 0
  diag_vec = np.zeros_like(D)
  diag_vec[idx] = 1.0

  row = np.nonzero(D)[0]
  col = row
  diag_val = csr_matrix(
      (diag_vec, (row, col)), shape=[num_node, num_node], dtype=np.float32)

  D_inv = np.zeros_like(D)
  D_inv[idx] = 1.0 / D[idx]

  L = diag_val - spdiags(D_inv, 0, num_node, num_node) * W

  return L 
开发者ID:microsoft,项目名称:graph-partition-neural-network-samples,代码行数:26,代码来源:spectral_graph_partition.py

示例8: setUp

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import spdiags [as 别名]
def setUp(self):
        random.seed(0)  # make tests repeatable
        real_matrices = []
        real_matrices.append(spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]],
                                     [0, 1], 5, 5))
        real_matrices.append(spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]],
                                     [0, 1], 4, 5))
        real_matrices.append(spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]],
                                     [0, 2], 5, 5))
        real_matrices.append(rand(3,3))
        real_matrices.append(rand(5,4))
        real_matrices.append(rand(4,5))

        self.real_matrices = [csc_matrix(x).astype('d')
                              for x in real_matrices]
        self.complex_matrices = [x.astype(np.complex128)
                                 for x in self.real_matrices]

        self.real_int64_matrices = [_to_int64(x)
                                   for x in self.real_matrices]
        self.complex_int64_matrices = [_to_int64(x)
                                      for x in self.complex_matrices]

        _DeprecationAccept.setUp(self) 
开发者ID:scikit-umfpack,项目名称:scikit-umfpack,代码行数:26,代码来源:test_umfpack.py

示例9: _baseline_als

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import spdiags [as 别名]
def _baseline_als(self,y, lam, p, niter=10):
        '''
        see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
        "Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
        http://stackoverflow.com/questions/29156532/python-baseline-correction-library
        "There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
        tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
        (for a signal with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
        '''
        L = len(y)
        D = sparse.csc_matrix(np.diff(np.eye(L), 2))
        w = np.ones(L)
        for i in xrange(niter):
            W = sparse.spdiags(w, 0, L, L)
            Z = W + lam * D.dot(D.transpose())
            z = sparse.linalg.spsolve(Z, w*y)
            w = p * (y > z) + (1-p) * (y < z)
        return z 
开发者ID:qkitgroup,项目名称:qkit,代码行数:20,代码来源:calibration.py

示例10: fit

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import spdiags [as 别名]
def fit(self, X, y=None):
        """Learn the idf vector (global term weights)

        Parameters
        ----------
        X : sparse matrix, [n_samples, n_features]
            a matrix of term/token counts
        """
        if not sp.issparse(X):
            X = sp.csc_matrix(X)
        if self.use_idf:
            n_samples, n_features = X.shape
            df = _document_frequency(X)

            # perform idf smoothing if required
            df += int(self.smooth_idf)
            n_samples += int(self.smooth_idf)

            # log+1 instead of log makes sure terms with zero idf don't get
            # suppressed entirely.
            idf = np.log(float(n_samples) / df) + 1.0
            self._idf_diag = sp.spdiags(idf, diags=0, m=n_features,
                                        n=n_features, format='csr')

        return self 
开发者ID:nccgroup,项目名称:Splunking-Crime,代码行数:27,代码来源:text.py

示例11: train

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import spdiags [as 别名]
def train(self, contexts, responses):
        """Fit the tf-idf transform and compute idf statistics."""
        with ignore_warnings():
            # Ignore deprecated `non_negative` warning.
            self._vectorizer = HashingVectorizer(non_negative=True)
        self._tfidf_transform = TfidfTransformer()
        count_matrix = self._tfidf_transform.fit_transform(
            self._vectorizer.transform(contexts + responses))
        n_samples, n_features = count_matrix.shape
        df = _document_frequency(count_matrix)
        idf = np.log((n_samples - df + 0.5) / (df + 0.5))
        self._idf_diag = sp.spdiags(
            idf, diags=0, m=n_features, n=n_features
        )
        document_lengths = count_matrix.sum(axis=1)
        self._average_document_length = np.mean(document_lengths)
        print(self._average_document_length) 
开发者ID:PolyAI-LDN,项目名称:conversational-datasets,代码行数:19,代码来源:keyword_based.py

示例12: __init__

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import spdiags [as 别名]
def __init__(self, verts, tris, m=1.0):
        self._verts = verts
        self._tris = tris
        # precompute some stuff needed later on
        e01 = verts[tris[:,1]] - verts[tris[:,0]]
        e12 = verts[tris[:,2]] - verts[tris[:,1]]
        e20 = verts[tris[:,0]] - verts[tris[:,2]]
        self._triangle_area = .5 * veclen(np.cross(e01, e12))
        unit_normal = normalized(np.cross(normalized(e01), normalized(e12)))
        self._un = unit_normal
        self._unit_normal_cross_e01 = np.cross(unit_normal, -e01)
        self._unit_normal_cross_e12 = np.cross(unit_normal, -e12)
        self._unit_normal_cross_e20 = np.cross(unit_normal, -e20)
        # parameters for heat method
        h = np.mean(map(veclen, [e01, e12, e20]))
        t = m * h ** 2
        # pre-factorize poisson systems
        Lc, vertex_area = compute_mesh_laplacian(verts, tris, area_type='lumped_mass')
        A = sparse.spdiags(vertex_area, 0, len(verts), len(verts))
        #self._factored_AtLc = splu((A - t * Lc).tocsc()).solve
        self._factored_AtLc = factorized((A - t * Lc).tocsc())
        #self._factored_L = splu(Lc.tocsc()).solve
        self._factored_L = factorized(Lc.tocsc()) 
开发者ID:tneumann,项目名称:cmm,代码行数:25,代码来源:geodesic.py

示例13: manifold_harmonics

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import spdiags [as 别名]
def manifold_harmonics(verts, tris, K, scaled=True, return_D=False, return_eigenvalues=False):
    Q, vertex_area = compute_mesh_laplacian(
        verts, tris, 'cotangent', 
        return_vertex_area=True, area_type='lumped_mass'
    )
    if scaled:
        D = sparse.spdiags(vertex_area, 0, len(verts), len(verts))
    else:
        D = sparse.spdiags(np.ones_like(vertex_area), 0, len(verts), len(verts))

    try:
        lambda_dense, Phi_dense = eigsh(-Q, M=D, k=K, sigma=0)
    except RuntimeError, e:
        if e.message == 'Factor is exactly singular':
            logging.warn("factor is singular, trying some regularization and cholmod")
            chol_solve = factorized(-Q + sparse.eye(Q.shape[0]) * 1.e-9)
            OPinv = sparse.linalg.LinearOperator(Q.shape, matvec=chol_solve)
            lambda_dense, Phi_dense = eigsh(-Q, M=D, k=K, sigma=0, OPinv=OPinv)
        else:
            raise e 
开发者ID:tneumann,项目名称:cmm,代码行数:22,代码来源:cmm.py

示例14: transform

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import spdiags [as 别名]
def transform(self, X):
        tp = self._tp
        fp = self._fp
        fn = self._fn
        tn = self._tn

        f = self._n_features
        tpr = tp / self._p
        fpr = fp / self._n
        min_bound, max_bound = 0.0005, 1 - 0.0005
        tpr[tpr < min_bound]  = min_bound
        tpr[tpr > max_bound]  = max_bound
        fpr[fpr < min_bound]  = min_bound
        fpr[fpr > max_bound]  = max_bound
        k = np.abs(norm.ppf(tpr) - norm.ppf(fpr))
        X = X * sp.spdiags(k, 0, f, f)
        if self.norm:
            X = normalize(X, self.norm, copy=False)
        return X 
开发者ID:textvec,项目名称:textvec,代码行数:21,代码来源:vectorizers.py

示例15: fit

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import spdiags [as 别名]
def fit(self, X, y=None):
        """Learn the idf vector (global term weights)

        Parameters
        ----------
        X : sparse matrix, [n_samples, n_features]
            a matrix of term/token counts
        """
        if not sp.issparse(X):
            X = sp.csc_matrix(X)
        if self.use_idf:
            n_samples, n_features = X.shape
            n_samples=float(n_samples)
            df = _document_frequency(X)

            # log+1 instead of log makes sure terms with zero idf don't get
            # suppressed entirely.
            # idf = np.log(df / n_samples)
            idf = old_div(df, n_samples)
            self._idf_diag = sp.spdiags(idf,
                diags=0, m=n_features, n=n_features)
        return self 
开发者ID:opentargets,项目名称:data_pipeline,代码行数:24,代码来源:DataDrivenRelation.py


注:本文中的scipy.sparse.spdiags方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。