當前位置: 首頁>>代碼示例>>Python>>正文


Python sparse.save_npz方法代碼示例

本文整理匯總了Python中scipy.sparse.save_npz方法的典型用法代碼示例。如果您正苦於以下問題:Python sparse.save_npz方法的具體用法?Python sparse.save_npz怎麽用?Python sparse.save_npz使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在scipy.sparse的用法示例。


在下文中一共展示了sparse.save_npz方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: save_adj

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import save_npz [as 別名]
def save_adj(self, root=r'/tmp/', name='mod_adj'):
        """Save attacked adjacency matrix.

        Parameters
        ----------
        root :
            root directory where the variable should be saved
        name : str
            saved file name

        Returns
        -------
        None.

        """
        assert self.modified_adj is not None, \
                'modified_adj is None! Please perturb the graph first.'
        name = name + '.npz'
        modified_adj = self.modified_adj

        if type(modified_adj) is torch.Tensor:
            sparse_adj = utils.to_scipy(modified_adj)
            sp.save_npz(osp.join(root, name), sparse_adj)
        else:
            sp.save_npz(osp.join(root, name), modified_adj) 
開發者ID:DSE-MSU,項目名稱:DeepRobust,代碼行數:27,代碼來源:base_attack.py

示例2: save_state

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import save_npz [as 別名]
def save_state(self, folderpath):
        state = {
            'num_evals': len(self.vecs_lst),
            'vals_lst': self.vals_lst,
        }
        ut.write_jsonfile(state,
                          ut.join_paths([folderpath, 'hash_model_state.json']))
        for i, vecs in enumerate(self.vecs_lst):
            sp.save_npz(ut.join_paths([folderpath, str(i) + '.npz']), vecs)

    # TODO: improve 
開發者ID:negrinho,項目名稱:deep_architect,代碼行數:13,代碼來源:hashing.py

示例3: save_features

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import save_npz [as 別名]
def save_features(self, root=r'/tmp/', name='mod_features'):
        """Save attacked node feature matrix.

        Parameters
        ----------
        root :
            root directory where the variable should be saved
        name : str
            saved file name

        Returns
        -------
        None.

        """

        assert self.modified_features is not None, \
                'modified_features is None! Please perturb the graph first.'
        name = name + '.npz'
        modified_features = self.modified_features

        if type(modified_features) is torch.Tensor:
            sparse_features = utils.to_scipy(modified_features)
            sp.save_npz(osp.join(root, name), sparse_features)
        else:
            sp.save_npz(osp.join(root, name), modified_features) 
開發者ID:DSE-MSU,項目名稱:DeepRobust,代碼行數:28,代碼來源:base_attack.py

示例4: load_term_counts

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import save_npz [as 別名]
def load_term_counts(reddit, path='../dat/reddit/', force_redo=False):
	count_filename = path  + 'term_counts'
	vocab_filename = path + 'vocab'

	if os.path.exists(count_filename + '.npz') and not force_redo:
		return sparse.load_npz(count_filename + '.npz').toarray(), np.load(vocab_filename + '.npy')

	post_docs = reddit['post_text'].values
	counts, vocab, _ = tokenize_documents(post_docs)    
	sparse.save_npz(count_filename, counts)
	np.save(vocab_filename, vocab)
	return counts.toarray(), np.array(vocab) 
開發者ID:blei-lab,項目名稱:causal-text-embeddings,代碼行數:14,代碼來源:reddit_output_att.py

示例5: load_term_counts

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import save_npz [as 別名]
def load_term_counts(df, path='../dat/PeerRead/', force_redo=False, text_col='abstract_text'):
	count_filename = path  + 'term_counts'
	vocab_filename = path + 'vocab'

	if os.path.exists(count_filename + '.npz') and not force_redo:
		return sparse.load_npz(count_filename + '.npz').toarray(), np.load(vocab_filename + '.npy')

	post_docs = df[text_col].values
	counts, vocab, _ = tokenize_documents(post_docs)    
	sparse.save_npz(count_filename, counts)
	np.save(vocab_filename, vocab)
	return counts.toarray(), np.array(vocab) 
開發者ID:blei-lab,項目名稱:causal-text-embeddings,代碼行數:14,代碼來源:peerread_output_att.py

示例6: load_term_counts

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import save_npz [as 別名]
def load_term_counts(path='../dat/', force_redo=False):
    count_filename = path  + 'reddit_term_counts'
    vocab_filename = path + 'vocab'

    if os.path.exists(count_filename + '.npz') and not force_redo:
        return sparse.load_npz(count_filename + '.npz'), np.load(vocab_filename + '.npy')
    
    reddit = load_reddit()
    post_docs = reddit['post_text'].values
    counts, vocab = tokenize_documents(post_docs)
    sparse.save_npz(path + 'reddit_term_counts', counts)
    np.save(path + 'vocab', vocab)
    return counts, vocab 
開發者ID:blei-lab,項目名稱:causal-text-embeddings,代碼行數:15,代碼來源:reddit_posts.py

示例7: load_term_counts

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import save_npz [as 別名]
def load_term_counts(reddit, path='../dat/reddit/', force_redo=False):
	count_filename = path  + 'term_counts'
	vocab_filename = path + 'vocab'

	if os.path.exists(count_filename + '.npz') and not force_redo:
		return sparse.load_npz(count_filename + '.npz'), np.load(vocab_filename + '.npy')

	post_docs = reddit['post_text'].values
	counts, vocab, _ = tokenize_documents(post_docs)    
	sparse.save_npz(count_filename, counts)
	np.save(vocab_filename, vocab)
	return counts, np.array(vocab) 
開發者ID:blei-lab,項目名稱:causal-text-embeddings,代碼行數:14,代碼來源:reddit_fit_topics.py

示例8: load_term_counts

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import save_npz [as 別名]
def load_term_counts(df, path='../dat/PeerRead/', force_redo=False, text_col='abstract_text'):
	count_filename = path  + 'term_counts'
	vocab_filename = path + 'vocab'

	if os.path.exists(count_filename + '.npz') and not force_redo:
		return sparse.load_npz(count_filename + '.npz'), np.load(vocab_filename + '.npy')

	post_docs = df[text_col].values
	counts, vocab, _ = tokenize_documents(post_docs)    
	sparse.save_npz(count_filename, counts)
	np.save(vocab_filename, vocab)
	return counts, np.array(vocab) 
開發者ID:blei-lab,項目名稱:causal-text-embeddings,代碼行數:14,代碼來源:peerread_fit_topics.py

示例9: create_matrix

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import save_npz [as 別名]
def create_matrix(mf, mfname, ofname_cnt):
    indptr = np.zeros(LIM+1, dtype=np.int32)
    indices = array.array('I')
    ofname = mfname.rsplit('.', 2)[0] + '.csr_matrix'.format(ofname_cnt)
    j = 0
    for j, d in enumerate(mf):
        if j>LIM: break
        terms = d.decode('utf-8').strip().split(',')
        if len(terms)<1: continue
        i, terms = int(terms[0]), terms[1:]
        indices.extend([_get(t) for t in terms])
        indptr[j%LIM+1] = len(indices)
        if j % 10000 == 0:
            print("Done {}".format(j))
 
    # print("Saving: j={} start: {} stop: {}".format(j, start, stop))
    if j>0:
        print("Saving... {}".format(ofname))
        if len(indptr) > j:
            indptr = indptr[:j+2]
        print(len(indices), indptr)
         
        M = sps.csr_matrix(
            (np.ones(len(indices)), indices, indptr),
            shape=(len(indptr)-1, num_apps),
            dtype=bool
        )
        print(M.nnz)
        sps.save_npz(ofname, M)
        create_matrix(mf, mfname, ofname_cnt+1) 
開發者ID:stopipv,項目名稱:isdi,代碼行數:32,代碼來源:make-trie.py

示例10: join_smart_mat

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import save_npz [as 別名]
def join_smart_mat(fnames):
    """Join arrays in Mlist inplace"""
    # M.indptr M.indices
    indptr = np.zeros(num_devices+1, dtype=np.int32)
    indices = np.zeros(Msize, dtype=np.int32)    
    i_indptr, i_indices = 0, 0
    ofname = 'joined_mat.npz'
    M = [None for _ in fnames]
    for i, mf in enumerate(fnames) :
        M[i] = sps.load_npz(mf)
        print("Loaded matrix={}. shape={}. nnz={}".format(mf, M[i].shape, M[i].nnz))
        # Mindptr = M.indptr
        # Mindices = M.indices
        # indptr[i_indptr+1:i_indptr+len(Mindptr)] = Mindptr[1:] + indptr[i_indptr]
        # i_indptr += len(Mindptr)-1
        # indices[i_indices:i_indices+len(Mindices)] = Mindices
        # i_indices += i_indices
        # del M
    print("Saving the file...")
    M = sps.csr_matrix(
        (np.ones(len(indices)), indices, indptr),
        shape=(len(indptr)-1, num_apps),
        dtype=bool
    )
    print(M.nnz)
    sps.save_npz(ofname, M) 
開發者ID:stopipv,項目名稱:isdi,代碼行數:28,代碼來源:make-trie.py

示例11: join_mats

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import save_npz [as 別名]
def join_mats(fnames, s, e):
    ofname="mat_{}_{}".format(s, e)
    print(ofname, fnames)
    M = [sps.load_npz(f) for f in fnames]
    print("Done reading..")
    sps.save_npz(
        ofname,
        sps.vstack(M)
    ) 
開發者ID:stopipv,項目名稱:isdi,代碼行數:11,代碼來源:make-trie.py

示例12: get_kg_feature

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import save_npz [as 別名]
def get_kg_feature(self, kg_feat_file):
        try:
            kg_feat_mat = sp.load_npz(kg_feat_file)
            print('already load item kg feature mat', kg_feat_mat.shape)
        except Exception:
            kg_feat_mat = self._create_kg_feat_mat()
            sp.save_npz(kg_feat_file, kg_feat_mat)
            print('already save item kg feature mat:', kg_feat_file)
        return kg_feat_mat 
開發者ID:xiangwang1223,項目名稱:knowledge_graph_attention_network,代碼行數:11,代碼來源:loader_nfm.py

示例13: get_adj_mat

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import save_npz [as 別名]
def get_adj_mat(self):
        try:
            t1 = time()
            adj_mat = sp.load_npz(self.path + '/s_adj_mat.npz')
            norm_adj_mat = sp.load_npz(self.path + '/s_norm_adj_mat.npz')
            mean_adj_mat = sp.load_npz(self.path + '/s_mean_adj_mat.npz')
            print('already load adj matrix', adj_mat.shape, time() - t1)

        except Exception:
            adj_mat, norm_adj_mat, mean_adj_mat = self.create_adj_mat()
            sp.save_npz(self.path + '/s_adj_mat.npz', adj_mat)
            sp.save_npz(self.path + '/s_norm_adj_mat.npz', norm_adj_mat)
            sp.save_npz(self.path + '/s_mean_adj_mat.npz', mean_adj_mat)
        return adj_mat, norm_adj_mat, mean_adj_mat 
開發者ID:xiangwang1223,項目名稱:neural_graph_collaborative_filtering,代碼行數:16,代碼來源:load_data.py

示例14: save_matrix

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import save_npz [as 別名]
def save_matrix(self, fname):
        sparse.save_npz(fname, self.mat) 
開發者ID:wetneb,項目名稱:opentapioca,代碼行數:4,代碼來源:wikidatagraph.py

示例15: main

# 需要導入模塊: from scipy import sparse [as 別名]
# 或者: from scipy.sparse import save_npz [as 別名]
def main():
    args = get_args()
    if args.nfs:
        from nsml import NSML_NFS_OUTPUT
        args.dump_dir = os.path.join(NSML_NFS_OUTPUT, args.dump_dir)
        args.out_dir = os.path.join(NSML_NFS_OUTPUT, args.out_dir)
        args.ranker_path = os.path.join(NSML_NFS_OUTPUT, args.ranker_path)
    args.ranker_path = os.path.join(args.ranker_path, 'docs-tfidf-ngram=2-hash=16777216-tokenizer=simple.npz')
    os.makedirs(args.out_dir)
    assert os.path.isdir(args.dump_dir)
    dump_paths = sorted([os.path.join(args.dump_dir, name) for name in os.listdir(args.dump_dir) if 'hdf5' in name])[
                 args.start:args.end]
    print(dump_paths)
    dump_names = [os.path.splitext(os.path.basename(path))[0] for path in dump_paths]
    dump_ranges = [list(map(int, name.split('-'))) for name in dump_names]
    phrase_dumps = [h5py.File(path, 'r') for path in dump_paths]

    ranker = None
    ranker = MyTfidfDocRanker(
        tfidf_path=args.ranker_path,
        strict=False
    )

    print('Ranker shape {} from {}'.format(ranker.doc_mat.shape, args.ranker_path))
    # new_mat = ranker.doc_mat.T.tocsr()
    # sp.save_npz('doc_tfidf.npz', new_mat)
    dump_tfidf(ranker, phrase_dumps, dump_names, args) 
開發者ID:uwnlp,項目名稱:denspi,代碼行數:29,代碼來源:dump_tfidf.py


注:本文中的scipy.sparse.save_npz方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。