当前位置: 首页>>代码示例>>Python>>正文


Python sparse.lil_matrix方法代码示例

本文整理汇总了Python中scipy.sparse.lil_matrix方法的典型用法代码示例。如果您正苦于以下问题:Python sparse.lil_matrix方法的具体用法?Python sparse.lil_matrix怎么用?Python sparse.lil_matrix使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在scipy.sparse的用法示例。


在下文中一共展示了sparse.lil_matrix方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: random_lil

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import lil_matrix [as 别名]
def random_lil(shape, dtype, nnz):
    rval = sp.lil_matrix(shape, dtype=dtype)
    huge = 2 ** 30
    for k in range(nnz):
        # set non-zeros in random locations (row x, col y)
        idx = numpy.random.random_integers(huge, size=2) % shape
        value = numpy.random.rand()
        # if dtype *int*, value will always be zeros!
        if "int" in dtype:
            value = int(value * 100)
        # The call to tuple is needed as scipy 0.13.1 do not support
        # ndarray with lenght 2 as idx tuple.
        rval.__setitem__(
            tuple(idx),
            value)
    return rval 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:18,代码来源:test_basic.py

示例2: toSparse

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import lil_matrix [as 别名]
def toSparse(baseX, X, dictionary):
    # convert baseX & X (a list of dictionaries), to a sparse matrix, using dictionary to map to indices
    out = lil_matrix((len(X),len(dictionary)))
    for i, (basex, x) in enumerate(zip(baseX, X)) :
        for key in basex :
            if key not in dictionary :
                continue
            out[i,dictionary[key]] = basex[key]  
        for key in x :
            if key not in dictionary :
                continue
            out[i,dictionary[key]] = x[key]
            
    out = out.tocsr()
    return out

    
# classifiers define :
#  train(X,y)
#  predict(X)
#  params()
#  load(params)
#  X is a sparse matrix, y is a vector of class labels (ints) 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:25,代码来源:Classifier.py

示例3: get_to_std

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import lil_matrix [as 别名]
def get_to_std(self):
        '''
        Retrieve the matrix that transforms a vector from this basis to the
        standard basis of this basis's dimension.

        Returns
        -------
        numpy array or scipy.sparse.lil_matrix
            An array of shape `(dim, size)` where `dim` is the dimension
            of this basis (the length of its vectors) and `size` is the
            size of this basis (its number of vectors).
        '''
        if self.sparse:
            toStd = _sps.lil_matrix((self.dim, self.size), dtype='complex')
        else:
            toStd = _np.zeros((self.dim, self.size), 'complex')

        for i, vel in enumerate(self.vector_elements):
            toStd[:, i] = vel
        return toStd 
开发者ID:pyGSTio,项目名称:pyGSTi,代码行数:22,代码来源:basis.py

示例4: _lazy_build_vector_elements

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import lil_matrix [as 别名]
def _lazy_build_vector_elements(self):
        if self.sparse:
            compMxs = []
        else:
            compMxs = _np.zeros((self.size, self.dim), 'complex')

        i, start = 0, 0
        for compbasis in self.component_bases:
            for lbl, vel in zip(compbasis.labels, compbasis.vector_elements):
                assert(_sps.issparse(vel) == self.sparse), "Inconsistent sparsity!"
                if self.sparse:
                    mx = _sps.lil_matrix((self.dim, 1))
                    mx[start:start + compbasis.dim, 0] = vel
                    compMxs.append(mx)
                else:
                    compMxs[i, start:start + compbasis.dim] = vel
                i += 1
            start += compbasis.dim

        assert(i == self.size)
        self._vector_elements = compMxs 
开发者ID:pyGSTio,项目名称:pyGSTi,代码行数:23,代码来源:basis.py

示例5: get_new_term_doc_mat

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import lil_matrix [as 别名]
def get_new_term_doc_mat(self, doc_domains, non_text=False):
		'''
		Combines documents together that are in the same domain

		Parameters
		----------
		doc_domains : array-like

		Returns
		-------
		scipy.sparse.csr_matrix

		'''
		assert len(doc_domains) == self.term_doc_matrix.get_num_docs()
		doc_domain_set = set(doc_domains)
		num_terms = self.term_doc_matrix.get_num_metadata() if non_text else self.term_doc_matrix.get_num_terms()
		num_domains = len(doc_domain_set)
		domain_mat = lil_matrix((num_domains, num_terms), dtype=int)
		X = self.term_doc_matrix.get_metadata_doc_mat() if non_text else self.term_doc_matrix.get_term_doc_mat()
		for i, domain in enumerate(doc_domain_set):
			domain_mat[i, :] = X[np.array(doc_domains == domain)].sum(axis=0)
		return domain_mat.tocsr() 
开发者ID:JasonKessler,项目名称:scattertext,代码行数:24,代码来源:CombineDocsIntoDomains.py

示例6: _flip_random_edges

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import lil_matrix [as 别名]
def _flip_random_edges(A, percent):
    """
    Flips values of A randomly.
    :param A: binary scipy sparse matrix.
    :param percent: percent of the edges to flip.
    :return: binary scipy sparse matrix.
    """
    if not A.shape[0] == A.shape[1]:
        raise ValueError('A must be a square matrix.')
    dtype = A.dtype
    A = sp.lil_matrix(A).astype(np.bool)
    n_elem = A.shape[0] ** 2
    n_elem_to_flip = round(percent * n_elem)
    unique_idx = np.random.choice(n_elem, replace=False, size=n_elem_to_flip)
    row_idx = unique_idx // A.shape[0]
    col_idx = unique_idx % A.shape[0]
    idxs = np.stack((row_idx, col_idx)).T
    for i in idxs:
        i = tuple(i)
        A[i] = np.logical_not(A[i])
    A = A.tocsr().astype(dtype)
    A.eliminate_zeros()
    return A 
开发者ID:danielegrattarola,项目名称:spektral,代码行数:25,代码来源:mnist.py

示例7: solve

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import lil_matrix [as 别名]
def solve(self):

        shape = self.bcs.shape
        if self._L is None:
            self._L = self.lhs.matrix(shape) # expensive operation, so cache it

        L = sparse.lil_matrix(self._L)
        f = self.rhs.reshape(-1, 1)

        nz = list(self.bcs.row_inds())

        L[nz, :] = self.bcs.lhs[nz, :]
        f[nz] = np.array(self.bcs.rhs[nz].toarray()).reshape(-1, 1)

        L = sparse.csr_matrix(L)
        return spsolve(L, f).reshape(shape) 
开发者ID:maroba,项目名称:findiff,代码行数:18,代码来源:pde.py

示例8: __setitem__

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import lil_matrix [as 别名]
def __setitem__(self, key, value):

        lng_inds = self.long_indices[key]

        if isinstance(value, tuple): # Neumann BC
            op, value = value
            # Avoid calling matrix for the whole grid! Optimize later!
            mat = sparse.lil_matrix(op.matrix(self.shape))
            self.lhs[lng_inds, :] = mat[lng_inds, :]
        else: # Dirichlet BC
            self.lhs[lng_inds, lng_inds] = 1

        if isinstance(value, np.ndarray):
            value = value.reshape(-1)[lng_inds]
            for i, v in zip(lng_inds, value):
                self.rhs[i] = v
        else:
            self.rhs[lng_inds] = value 
开发者ID:maroba,项目名称:findiff,代码行数:20,代码来源:pde.py

示例9: test_score_samples

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import lil_matrix [as 别名]
def test_score_samples():
    # Test score_samples (pseudo-likelihood) method.
    # Assert that pseudo-likelihood is computed without clipping.
    # See Fabian's blog, http://bit.ly/1iYefRk
    rng = np.random.RandomState(42)
    X = np.vstack([np.zeros(1000), np.ones(1000)])
    rbm1 = BernoulliRBM(n_components=10, batch_size=2,
                        n_iter=10, random_state=rng)
    rbm1.fit(X)
    assert (rbm1.score_samples(X) < -300).all()

    # Sparse vs. dense should not affect the output. Also test sparse input
    # validation.
    rbm1.random_state = 42
    d_score = rbm1.score_samples(X)
    rbm1.random_state = 42
    s_score = rbm1.score_samples(lil_matrix(X))
    assert_almost_equal(d_score, s_score)

    # Test numerical stability (#2785): would previously generate infinities
    # and crash with an exception.
    with np.errstate(under='ignore'):
        rbm1.score_samples([np.arange(1000) * 100]) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:25,代码来源:test_rbm.py

示例10: test_label_binarize_multilabel

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import lil_matrix [as 别名]
def test_label_binarize_multilabel():
    y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
    classes = [0, 1, 2]
    pos_label = 2
    neg_label = 0
    expected = pos_label * y_ind
    y_sparse = [sparse_matrix(y_ind)
                for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
                                      dok_matrix, lil_matrix]]

    for y in [y_ind] + y_sparse:
        check_binarized_results(y, classes, pos_label, neg_label,
                                expected)

    assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
                  pos_label=pos_label, sparse_output=True) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:18,代码来源:test_label.py

示例11: _setup_metric

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import lil_matrix [as 别名]
def _setup_metric(X, true_labels, inv_psp=None, k=5):
    assert compatible_shapes(X, true_labels), \
        "ground truth and prediction matrices must have same shape."
    num_instances, num_labels = true_labels.shape
    indices = _get_topk(X, num_labels, k)
    ps_indices = None
    if inv_psp is not None:
        ps_indices = _get_topk(
            true_labels.dot(
                sp.spdiags(inv_psp, diags=0,
                           m=num_labels, n=num_labels)),
            num_labels, k)
        inv_psp = np.hstack([inv_psp, np.zeros((1))])

    true_labels = sp.hstack([true_labels,
                             sp.lil_matrix((num_instances, 1),
                                           dtype=np.int32)]).tocsr()
    return indices, true_labels, ps_indices, inv_psp 
开发者ID:kunaldahiya,项目名称:pyxclib,代码行数:20,代码来源:xc_metrics.py

示例12: vectorize

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import lil_matrix [as 别名]
def vectorize(features, vocab):
    """ Transform a features list into a numeric vector
        with a given vocab

    :type dpvocab: dict
    :param dpvocab: vocab for distributional representation

    :type projmat: scipy.lil_matrix
    :param projmat: projection matrix for disrep
    """
    vec = lil_matrix((1, len(vocab)))

    for feat in features:
        try:
            fidx = vocab[feat]
            vec[0, fidx] += 1.0
        except KeyError:
            pass
    # Normalization
    vec = normalize(vec)
    return vec 
开发者ID:yizhongw,项目名称:StageDP,代码行数:23,代码来源:other.py

示例13: compute_cooccurrence_constraint

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import lil_matrix [as 别名]
def compute_cooccurrence_constraint(self, nodes):
        """
        Co-occurrence constraint as described in the paper.

        Parameters
        ----------
        nodes: np.array
            Nodes whose features are considered for change

        Returns
        -------
        np.array [len(nodes), D], dtype bool
            Binary matrix of dimension len(nodes) x D. A 1 in entry n,d indicates that
            we are allowed to add feature d to the features of node n.

        """

        words_graph = self.cooc_matrix.copy()
        D = self.X_obs.shape[1]
        words_graph.setdiag(0)
        words_graph = (words_graph > 0)
        word_degrees = np.sum(words_graph, axis=0).A1

        inv_word_degrees = np.reciprocal(word_degrees.astype(float) + 1e-8)

        sd = np.zeros([self.N])
        for n in range(self.N):
            n_idx = self.X_obs[n, :].nonzero()[1]
            sd[n] = np.sum(inv_word_degrees[n_idx.tolist()])

        scores_matrix = sp.lil_matrix((self.N, D))

        for n in nodes:
            common_words = words_graph.multiply(self.X_obs[n])
            idegs = inv_word_degrees[common_words.nonzero()[1]]
            nnz = common_words.nonzero()[0]
            scores = np.array([idegs[nnz == ix].sum() for ix in range(D)])
            scores_matrix[n] = scores
        self.cooc_constraint = sp.csr_matrix(scores_matrix - 0.5 * sd[:, None] > 0) 
开发者ID:danielzuegner,项目名称:nettack,代码行数:41,代码来源:nettack.py

示例14: feature_scores

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import lil_matrix [as 别名]
def feature_scores(self):
        """
        Compute feature scores for all possible feature changes.
        """

        if self.cooc_constraint is None:
            self.compute_cooccurrence_constraint(self.influencer_nodes)
        logits = self.compute_logits()
        best_wrong_class = self.strongest_wrong_class(logits)
        gradient = self.gradient_wrt_x(self.label_u) - self.gradient_wrt_x(best_wrong_class)
        surrogate_loss = logits[self.label_u] - logits[best_wrong_class]

        gradients_flipped = (gradient * -1).tolil()
        gradients_flipped[self.X_obs.nonzero()] *= -1

        X_influencers = sp.lil_matrix(self.X_obs.shape)
        X_influencers[self.influencer_nodes] = self.X_obs[self.influencer_nodes]
        gradients_flipped = gradients_flipped.multiply((self.cooc_constraint + X_influencers) > 0)
        nnz_ixs = np.array(gradients_flipped.nonzero()).T

        sorting = np.argsort(gradients_flipped[tuple(nnz_ixs.T)]).A1
        sorted_ixs = nnz_ixs[sorting]
        grads = gradients_flipped[tuple(nnz_ixs[sorting].T)]

        scores = surrogate_loss - grads
        return sorted_ixs[::-1], scores.A1[::-1] 
开发者ID:danielzuegner,项目名称:nettack,代码行数:28,代码来源:nettack.py

示例15: vxc_lil

# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import lil_matrix [as 别名]
def vxc_lil(self, **kw):
  """
    Computes the exchange-correlation matrix elements
    Args:
      sv : (System Variables), this must have arrays of coordinates and species, etc
    Returns:
      fxc,vxc,exc
  """
  from pyscf.nao.m_xc_scalar_ni import xc_scalar_ni
  from pyscf.nao.m_ao_matelem import ao_matelem_c
  from scipy.sparse import lil_matrix

  #dm, xc_code, deriv, ao_log=None, dtype=float64, **kvargs

  dm = kw['dm'] if 'dm' in kw else self.make_rdm1()
  kernel = kw['kernel'] if 'kernel' in kw else None
  ao_log = kw['ao_log'] if 'ao_log' in kw else self.ao_log
  xc_code = kw['xc_code'] if 'xc_code' in kw else self.xc_code
  kw.pop('xc_code',None)
  dtype = kw['dtype'] if 'dtype' in kw else self.dtype
  
  aome = ao_matelem_c(self.ao_log.rr, self.ao_log.pp, self, dm)
  me = aome.init_one_set(self.ao_log) if ao_log is None else aome.init_one_set(ao_log)
  atom2s = zeros((self.natm+1), dtype=int64)
  for atom,sp in enumerate(self.atom2sp): atom2s[atom+1]=atom2s[atom]+me.ao1.sp2norbs[sp]
  
  lil = [lil_matrix((atom2s[-1],atom2s[-1]), dtype=dtype) for i in range((self.nspin-1)*2+1)]

  for atom1,[sp1,rv1,s1,f1] in enumerate(zip(self.atom2sp,self.atom2coord,atom2s,atom2s[1:])):
    for atom2,[sp2,rv2,s2,f2] in enumerate(zip(self.atom2sp,self.atom2coord,atom2s,atom2s[1:])):
      blk = xc_scalar_ni(me,sp1,rv1,sp2,rv2,xc_code=xc_code,**kw)
      for i,b in enumerate(blk): lil[i][s1:f1,s2:f2] = b[:,:]

  return lil 
开发者ID:pyscf,项目名称:pyscf,代码行数:36,代码来源:m_vxc_lil.py


注:本文中的scipy.sparse.lil_matrix方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。