當前位置: 首頁>>代碼示例>>Python>>正文


Python sparse.lil_matrix方法代碼示例

本文整理匯總了Python中scipy.sparse.lil_matrix方法的典型用法代碼示例。如果您正苦於以下問題:Python sparse.lil_matrix方法的具體用法?Python sparse.lil_matrix怎麽用?Python sparse.lil_matrix使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在scipy.sparse的用法示例。


在下文中一共展示了sparse.lil_matrix方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: random_lil

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import lil_matrix [as 別名]
def random_lil(shape, dtype, nnz):
    rval = sp.lil_matrix(shape, dtype=dtype)
    huge = 2 ** 30
    for k in range(nnz):
        # set non-zeros in random locations (row x, col y)
        idx = numpy.random.random_integers(huge, size=2) % shape
        value = numpy.random.rand()
        # if dtype *int*, value will always be zeros!
        if "int" in dtype:
            value = int(value * 100)
        # The call to tuple is needed as scipy 0.13.1 do not support
        # ndarray with lenght 2 as idx tuple.
        rval.__setitem__(
            tuple(idx),
            value)
    return rval 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:18,代碼來源:test_basic.py

示例2: toSparse

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import lil_matrix [as 別名]
def toSparse(baseX, X, dictionary):
    # convert baseX & X (a list of dictionaries), to a sparse matrix, using dictionary to map to indices
    out = lil_matrix((len(X),len(dictionary)))
    for i, (basex, x) in enumerate(zip(baseX, X)) :
        for key in basex :
            if key not in dictionary :
                continue
            out[i,dictionary[key]] = basex[key]  
        for key in x :
            if key not in dictionary :
                continue
            out[i,dictionary[key]] = x[key]
            
    out = out.tocsr()
    return out

    
# classifiers define :
#  train(X,y)
#  predict(X)
#  params()
#  load(params)
#  X is a sparse matrix, y is a vector of class labels (ints) 
開發者ID:ConvLab,項目名稱:ConvLab,代碼行數:25,代碼來源:Classifier.py

示例3: get_to_std

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import lil_matrix [as 別名]
def get_to_std(self):
        '''
        Retrieve the matrix that transforms a vector from this basis to the
        standard basis of this basis's dimension.

        Returns
        -------
        numpy array or scipy.sparse.lil_matrix
            An array of shape `(dim, size)` where `dim` is the dimension
            of this basis (the length of its vectors) and `size` is the
            size of this basis (its number of vectors).
        '''
        if self.sparse:
            toStd = _sps.lil_matrix((self.dim, self.size), dtype='complex')
        else:
            toStd = _np.zeros((self.dim, self.size), 'complex')

        for i, vel in enumerate(self.vector_elements):
            toStd[:, i] = vel
        return toStd 
開發者ID:pyGSTio,項目名稱:pyGSTi,代碼行數:22,代碼來源:basis.py

示例4: _lazy_build_vector_elements

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import lil_matrix [as 別名]
def _lazy_build_vector_elements(self):
        if self.sparse:
            compMxs = []
        else:
            compMxs = _np.zeros((self.size, self.dim), 'complex')

        i, start = 0, 0
        for compbasis in self.component_bases:
            for lbl, vel in zip(compbasis.labels, compbasis.vector_elements):
                assert(_sps.issparse(vel) == self.sparse), "Inconsistent sparsity!"
                if self.sparse:
                    mx = _sps.lil_matrix((self.dim, 1))
                    mx[start:start + compbasis.dim, 0] = vel
                    compMxs.append(mx)
                else:
                    compMxs[i, start:start + compbasis.dim] = vel
                i += 1
            start += compbasis.dim

        assert(i == self.size)
        self._vector_elements = compMxs 
開發者ID:pyGSTio,項目名稱:pyGSTi,代碼行數:23,代碼來源:basis.py

示例5: get_new_term_doc_mat

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import lil_matrix [as 別名]
def get_new_term_doc_mat(self, doc_domains, non_text=False):
		'''
		Combines documents together that are in the same domain

		Parameters
		----------
		doc_domains : array-like

		Returns
		-------
		scipy.sparse.csr_matrix

		'''
		assert len(doc_domains) == self.term_doc_matrix.get_num_docs()
		doc_domain_set = set(doc_domains)
		num_terms = self.term_doc_matrix.get_num_metadata() if non_text else self.term_doc_matrix.get_num_terms()
		num_domains = len(doc_domain_set)
		domain_mat = lil_matrix((num_domains, num_terms), dtype=int)
		X = self.term_doc_matrix.get_metadata_doc_mat() if non_text else self.term_doc_matrix.get_term_doc_mat()
		for i, domain in enumerate(doc_domain_set):
			domain_mat[i, :] = X[np.array(doc_domains == domain)].sum(axis=0)
		return domain_mat.tocsr() 
開發者ID:JasonKessler,項目名稱:scattertext,代碼行數:24,代碼來源:CombineDocsIntoDomains.py

示例6: _flip_random_edges

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import lil_matrix [as 別名]
def _flip_random_edges(A, percent):
    """
    Flips values of A randomly.
    :param A: binary scipy sparse matrix.
    :param percent: percent of the edges to flip.
    :return: binary scipy sparse matrix.
    """
    if not A.shape[0] == A.shape[1]:
        raise ValueError('A must be a square matrix.')
    dtype = A.dtype
    A = sp.lil_matrix(A).astype(np.bool)
    n_elem = A.shape[0] ** 2
    n_elem_to_flip = round(percent * n_elem)
    unique_idx = np.random.choice(n_elem, replace=False, size=n_elem_to_flip)
    row_idx = unique_idx // A.shape[0]
    col_idx = unique_idx % A.shape[0]
    idxs = np.stack((row_idx, col_idx)).T
    for i in idxs:
        i = tuple(i)
        A[i] = np.logical_not(A[i])
    A = A.tocsr().astype(dtype)
    A.eliminate_zeros()
    return A 
開發者ID:danielegrattarola,項目名稱:spektral,代碼行數:25,代碼來源:mnist.py

示例7: solve

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import lil_matrix [as 別名]
def solve(self):

        shape = self.bcs.shape
        if self._L is None:
            self._L = self.lhs.matrix(shape) # expensive operation, so cache it

        L = sparse.lil_matrix(self._L)
        f = self.rhs.reshape(-1, 1)

        nz = list(self.bcs.row_inds())

        L[nz, :] = self.bcs.lhs[nz, :]
        f[nz] = np.array(self.bcs.rhs[nz].toarray()).reshape(-1, 1)

        L = sparse.csr_matrix(L)
        return spsolve(L, f).reshape(shape) 
開發者ID:maroba,項目名稱:findiff,代碼行數:18,代碼來源:pde.py

示例8: __setitem__

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import lil_matrix [as 別名]
def __setitem__(self, key, value):

        lng_inds = self.long_indices[key]

        if isinstance(value, tuple): # Neumann BC
            op, value = value
            # Avoid calling matrix for the whole grid! Optimize later!
            mat = sparse.lil_matrix(op.matrix(self.shape))
            self.lhs[lng_inds, :] = mat[lng_inds, :]
        else: # Dirichlet BC
            self.lhs[lng_inds, lng_inds] = 1

        if isinstance(value, np.ndarray):
            value = value.reshape(-1)[lng_inds]
            for i, v in zip(lng_inds, value):
                self.rhs[i] = v
        else:
            self.rhs[lng_inds] = value 
開發者ID:maroba,項目名稱:findiff,代碼行數:20,代碼來源:pde.py

示例9: test_score_samples

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import lil_matrix [as 別名]
def test_score_samples():
    # Test score_samples (pseudo-likelihood) method.
    # Assert that pseudo-likelihood is computed without clipping.
    # See Fabian's blog, http://bit.ly/1iYefRk
    rng = np.random.RandomState(42)
    X = np.vstack([np.zeros(1000), np.ones(1000)])
    rbm1 = BernoulliRBM(n_components=10, batch_size=2,
                        n_iter=10, random_state=rng)
    rbm1.fit(X)
    assert (rbm1.score_samples(X) < -300).all()

    # Sparse vs. dense should not affect the output. Also test sparse input
    # validation.
    rbm1.random_state = 42
    d_score = rbm1.score_samples(X)
    rbm1.random_state = 42
    s_score = rbm1.score_samples(lil_matrix(X))
    assert_almost_equal(d_score, s_score)

    # Test numerical stability (#2785): would previously generate infinities
    # and crash with an exception.
    with np.errstate(under='ignore'):
        rbm1.score_samples([np.arange(1000) * 100]) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:25,代碼來源:test_rbm.py

示例10: test_label_binarize_multilabel

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import lil_matrix [as 別名]
def test_label_binarize_multilabel():
    y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
    classes = [0, 1, 2]
    pos_label = 2
    neg_label = 0
    expected = pos_label * y_ind
    y_sparse = [sparse_matrix(y_ind)
                for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
                                      dok_matrix, lil_matrix]]

    for y in [y_ind] + y_sparse:
        check_binarized_results(y, classes, pos_label, neg_label,
                                expected)

    assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
                  pos_label=pos_label, sparse_output=True) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:18,代碼來源:test_label.py

示例11: _setup_metric

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import lil_matrix [as 別名]
def _setup_metric(X, true_labels, inv_psp=None, k=5):
    assert compatible_shapes(X, true_labels), \
        "ground truth and prediction matrices must have same shape."
    num_instances, num_labels = true_labels.shape
    indices = _get_topk(X, num_labels, k)
    ps_indices = None
    if inv_psp is not None:
        ps_indices = _get_topk(
            true_labels.dot(
                sp.spdiags(inv_psp, diags=0,
                           m=num_labels, n=num_labels)),
            num_labels, k)
        inv_psp = np.hstack([inv_psp, np.zeros((1))])

    true_labels = sp.hstack([true_labels,
                             sp.lil_matrix((num_instances, 1),
                                           dtype=np.int32)]).tocsr()
    return indices, true_labels, ps_indices, inv_psp 
開發者ID:kunaldahiya,項目名稱:pyxclib,代碼行數:20,代碼來源:xc_metrics.py

示例12: vectorize

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import lil_matrix [as 別名]
def vectorize(features, vocab):
    """ Transform a features list into a numeric vector
        with a given vocab

    :type dpvocab: dict
    :param dpvocab: vocab for distributional representation

    :type projmat: scipy.lil_matrix
    :param projmat: projection matrix for disrep
    """
    vec = lil_matrix((1, len(vocab)))

    for feat in features:
        try:
            fidx = vocab[feat]
            vec[0, fidx] += 1.0
        except KeyError:
            pass
    # Normalization
    vec = normalize(vec)
    return vec 
開發者ID:yizhongw,項目名稱:StageDP,代碼行數:23,代碼來源:other.py

示例13: compute_cooccurrence_constraint

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import lil_matrix [as 別名]
def compute_cooccurrence_constraint(self, nodes):
        """
        Co-occurrence constraint as described in the paper.

        Parameters
        ----------
        nodes: np.array
            Nodes whose features are considered for change

        Returns
        -------
        np.array [len(nodes), D], dtype bool
            Binary matrix of dimension len(nodes) x D. A 1 in entry n,d indicates that
            we are allowed to add feature d to the features of node n.

        """

        words_graph = self.cooc_matrix.copy()
        D = self.X_obs.shape[1]
        words_graph.setdiag(0)
        words_graph = (words_graph > 0)
        word_degrees = np.sum(words_graph, axis=0).A1

        inv_word_degrees = np.reciprocal(word_degrees.astype(float) + 1e-8)

        sd = np.zeros([self.N])
        for n in range(self.N):
            n_idx = self.X_obs[n, :].nonzero()[1]
            sd[n] = np.sum(inv_word_degrees[n_idx.tolist()])

        scores_matrix = sp.lil_matrix((self.N, D))

        for n in nodes:
            common_words = words_graph.multiply(self.X_obs[n])
            idegs = inv_word_degrees[common_words.nonzero()[1]]
            nnz = common_words.nonzero()[0]
            scores = np.array([idegs[nnz == ix].sum() for ix in range(D)])
            scores_matrix[n] = scores
        self.cooc_constraint = sp.csr_matrix(scores_matrix - 0.5 * sd[:, None] > 0) 
開發者ID:danielzuegner,項目名稱:nettack,代碼行數:41,代碼來源:nettack.py

示例14: feature_scores

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import lil_matrix [as 別名]
def feature_scores(self):
        """
        Compute feature scores for all possible feature changes.
        """

        if self.cooc_constraint is None:
            self.compute_cooccurrence_constraint(self.influencer_nodes)
        logits = self.compute_logits()
        best_wrong_class = self.strongest_wrong_class(logits)
        gradient = self.gradient_wrt_x(self.label_u) - self.gradient_wrt_x(best_wrong_class)
        surrogate_loss = logits[self.label_u] - logits[best_wrong_class]

        gradients_flipped = (gradient * -1).tolil()
        gradients_flipped[self.X_obs.nonzero()] *= -1

        X_influencers = sp.lil_matrix(self.X_obs.shape)
        X_influencers[self.influencer_nodes] = self.X_obs[self.influencer_nodes]
        gradients_flipped = gradients_flipped.multiply((self.cooc_constraint + X_influencers) > 0)
        nnz_ixs = np.array(gradients_flipped.nonzero()).T

        sorting = np.argsort(gradients_flipped[tuple(nnz_ixs.T)]).A1
        sorted_ixs = nnz_ixs[sorting]
        grads = gradients_flipped[tuple(nnz_ixs[sorting].T)]

        scores = surrogate_loss - grads
        return sorted_ixs[::-1], scores.A1[::-1] 
開發者ID:danielzuegner,項目名稱:nettack,代碼行數:28,代碼來源:nettack.py

示例15: vxc_lil

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import lil_matrix [as 別名]
def vxc_lil(self, **kw):
  """
    Computes the exchange-correlation matrix elements
    Args:
      sv : (System Variables), this must have arrays of coordinates and species, etc
    Returns:
      fxc,vxc,exc
  """
  from pyscf.nao.m_xc_scalar_ni import xc_scalar_ni
  from pyscf.nao.m_ao_matelem import ao_matelem_c
  from scipy.sparse import lil_matrix

  #dm, xc_code, deriv, ao_log=None, dtype=float64, **kvargs

  dm = kw['dm'] if 'dm' in kw else self.make_rdm1()
  kernel = kw['kernel'] if 'kernel' in kw else None
  ao_log = kw['ao_log'] if 'ao_log' in kw else self.ao_log
  xc_code = kw['xc_code'] if 'xc_code' in kw else self.xc_code
  kw.pop('xc_code',None)
  dtype = kw['dtype'] if 'dtype' in kw else self.dtype
  
  aome = ao_matelem_c(self.ao_log.rr, self.ao_log.pp, self, dm)
  me = aome.init_one_set(self.ao_log) if ao_log is None else aome.init_one_set(ao_log)
  atom2s = zeros((self.natm+1), dtype=int64)
  for atom,sp in enumerate(self.atom2sp): atom2s[atom+1]=atom2s[atom]+me.ao1.sp2norbs[sp]
  
  lil = [lil_matrix((atom2s[-1],atom2s[-1]), dtype=dtype) for i in range((self.nspin-1)*2+1)]

  for atom1,[sp1,rv1,s1,f1] in enumerate(zip(self.atom2sp,self.atom2coord,atom2s,atom2s[1:])):
    for atom2,[sp2,rv2,s2,f2] in enumerate(zip(self.atom2sp,self.atom2coord,atom2s,atom2s[1:])):
      blk = xc_scalar_ni(me,sp1,rv1,sp2,rv2,xc_code=xc_code,**kw)
      for i,b in enumerate(blk): lil[i][s1:f1,s2:f2] = b[:,:]

  return lil 
開發者ID:pyscf,項目名稱:pyscf,代碼行數:36,代碼來源:m_vxc_lil.py


注:本文中的scipy.sparse.lil_matrix方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。