當前位置: 首頁>>代碼示例>>Python>>正文


Python networkx.from_scipy_sparse_matrix方法代碼示例

本文整理匯總了Python中networkx.from_scipy_sparse_matrix方法的典型用法代碼示例。如果您正苦於以下問題:Python networkx.from_scipy_sparse_matrix方法的具體用法?Python networkx.from_scipy_sparse_matrix怎麽用?Python networkx.from_scipy_sparse_matrix使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在networkx的用法示例。


在下文中一共展示了networkx.from_scipy_sparse_matrix方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: text_to_graph

# 需要導入模塊: import networkx [as 別名]
# 或者: from networkx import from_scipy_sparse_matrix [as 別名]
def text_to_graph(text):
    import networkx as nx
    from sklearn.feature_extraction.text import TfidfVectorizer
    from sklearn.neighbors import kneighbors_graph

    # use tfidf to transform texts into feature vectors
    vectorizer = TfidfVectorizer()
    vectors = vectorizer.fit_transform(text)

    # build the graph which is full-connected
    N = vectors.shape[0]
    mat = kneighbors_graph(vectors, N, metric='cosine', mode='distance', include_self=True)
    mat.data = 1 - mat.data  # to similarity

    g = nx.from_scipy_sparse_matrix(mat, create_using=nx.Graph())

    return g 
開發者ID:thunlp,項目名稱:OpenNE,代碼行數:19,代碼來源:20newsgroup.py

示例2: calculate_max_depth_over_max_width

# 需要導入模塊: import networkx [as 別名]
# 或者: from networkx import from_scipy_sparse_matrix [as 別名]
def calculate_max_depth_over_max_width(comment_tree):
    comment_tree_nx = nx.from_scipy_sparse_matrix(comment_tree, create_using=nx.Graph())

    if len(comment_tree_nx) == 0:
        max_depth_over_max_width = 0.0
    else:
        node_to_depth = nx.shortest_path_length(comment_tree_nx, 0)
        depth_to_nodecount = collections.defaultdict(int)

        for k, v in node_to_depth.items():
            depth_to_nodecount[v] += 1

        max_depth = max(node_to_depth.values())
        max_width = max(depth_to_nodecount.values())

        max_depth_over_max_width = max_depth/max_width

    return max_depth_over_max_width 
開發者ID:MKLab-ITI,項目名稱:news-popularity-prediction,代碼行數:20,代碼來源:comment_tree.py

示例3: calculate_comment_tree_hirsch

# 需要導入模塊: import networkx [as 別名]
# 或者: from networkx import from_scipy_sparse_matrix [as 別名]
def calculate_comment_tree_hirsch(comment_tree):
    comment_tree_nx = nx.from_scipy_sparse_matrix(comment_tree, create_using=nx.Graph())

    if len(comment_tree_nx) == 0:
        comment_tree_hirsch = 0.0
    else:
        node_to_depth = nx.shortest_path_length(comment_tree_nx, 0)

        depth_to_nodecount = collections.defaultdict(int)

        for k, v in node_to_depth.items():
            depth_to_nodecount[v] += 1

        comment_tree_hirsch = max(node_to_depth.values())
        while True:
            if depth_to_nodecount[comment_tree_hirsch] >= comment_tree_hirsch:
                break
            else:
                comment_tree_hirsch -= 1

    return comment_tree_hirsch 
開發者ID:MKLab-ITI,項目名稱:news-popularity-prediction,代碼行數:23,代碼來源:comment_tree.py

示例4: init_setup

# 需要導入模塊: import networkx [as 別名]
# 或者: from networkx import from_scipy_sparse_matrix [as 別名]
def init_setup():
    data = Dataset(root='/tmp/', name=args.dataset, setting='gcn')

    data.features = normalize_feature(data.features)
    adj, features, labels = data.adj, data.features, data.labels

    StaticGraph.graph = nx.from_scipy_sparse_matrix(adj)
    dict_of_lists = nx.to_dict_of_lists(StaticGraph.graph)

    idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
    device = torch.device('cuda') if args.ctx == 'gpu' else 'cpu'

    # black box setting
    adj, features, labels = preprocess(adj, features, labels, preprocess_adj=False, sparse=True, device=device)
    victim_model = load_victim_model(data, device=device, file_path=args.saved_model)
    setattr(victim_model, 'norm_tool',  GraphNormTool(normalize=True, gm='gcn', device=device))
    output = victim_model.predict(features, adj)
    loss_test = F.nll_loss(output[idx_test], labels[idx_test])
    acc_test = accuracy(output[idx_test], labels[idx_test])
    print("Test set results:",
          "loss= {:.4f}".format(loss_test.item()),
          "accuracy= {:.4f}".format(acc_test.item()))

    return features, labels, idx_val, idx_test, victim_model, dict_of_lists, adj 
開發者ID:DSE-MSU,項目名稱:DeepRobust,代碼行數:26,代碼來源:test_rl_s2v.py

示例5: textrank_tfidf

# 需要導入模塊: import networkx [as 別名]
# 或者: from networkx import from_scipy_sparse_matrix [as 別名]
def textrank_tfidf(sentences, topk=6):
    """
        使用tf-idf作為相似度, networkx.pagerank獲取中心句子作為摘要
    :param sentences: str, docs of text
    :param topk:int
    :return:list
    """
    # 切句子
    sentences = list(cut_sentence(sentences))
    # tf-idf相似度
    matrix_norm = tdidf_sim(sentences)
    # 構建相似度矩陣
    tfidf_sim = nx.from_scipy_sparse_matrix(matrix_norm * matrix_norm.T)
    # nx.pagerank
    sens_scores = nx.pagerank(tfidf_sim)
    # 得分排序
    sen_rank = sorted(sens_scores.items(), key=lambda x: x[1], reverse=True)
    # 保留topk個, 防止越界
    topk = min(len(sentences), topk)
    # 返回原句子和得分
    return [(sr[1], sentences[sr[0]]) for sr in sen_rank][0:topk] 
開發者ID:yongzhuo,項目名稱:nlg-yongzhuo,代碼行數:23,代碼來源:textrank_sklearn.py

示例6: textrank_text_summarizer

# 需要導入模塊: import networkx [as 別名]
# 或者: from networkx import from_scipy_sparse_matrix [as 別名]
def textrank_text_summarizer(documents, num_sentences=2,
                             feature_type='frequency'):
    
    vec, dt_matrix = build_feature_matrix(norm_sentences, 
                                      feature_type='tfidf')
    similarity_matrix = (dt_matrix * dt_matrix.T)
        
    similarity_graph = networkx.from_scipy_sparse_matrix(similarity_matrix)
    scores = networkx.pagerank(similarity_graph)   
    
    ranked_sentences = sorted(((score, index) 
                                for index, score 
                                in scores.items()), 
                              reverse=True)

    top_sentence_indices = [ranked_sentences[index][1] 
                            for index in range(num_sentences)]
    top_sentence_indices.sort()
    
    for index in top_sentence_indices:
        print sentences[index] 
開發者ID:dipanjanS,項目名稱:text-analytics-with-python,代碼行數:23,代碼來源:document_summarization.py

示例7: test_differential_operator

# 需要導入模塊: import networkx [as 別名]
# 或者: from networkx import from_scipy_sparse_matrix [as 別名]
def test_differential_operator(self, n_vertices=98):
        r"""The Laplacian must always be the divergence of the gradient,
        whether the Laplacian is combinatorial or normalized, and whether the
        graph is directed or weighted."""
        def test_incidence_nx(graph):
            r"""Test that the incidence matrix corresponds to NetworkX."""
            incidence_pg = np.sign(graph.D.toarray())
            G = nx.OrderedDiGraph if graph.is_directed() else nx.OrderedGraph
            graph_nx = nx.from_scipy_sparse_matrix(graph.W, create_using=G)
            incidence_nx = nx.incidence_matrix(graph_nx, oriented=True)
            np.testing.assert_equal(incidence_pg, incidence_nx.toarray())
        for graph in [graphs.Graph(np.zeros((n_vertices, n_vertices))),
                      graphs.Graph(np.identity(n_vertices)),
                      graphs.Graph([[0, 0.8], [0.8, 0]]),
                      graphs.Graph([[1.3, 0], [0.4, 0.5]]),
                      graphs.ErdosRenyi(n_vertices, directed=False, seed=42),
                      graphs.ErdosRenyi(n_vertices, directed=True, seed=42)]:
            for lap_type in ['combinatorial', 'normalized']:
                graph.compute_laplacian(lap_type)
                graph.compute_differential_operator()
                L = graph.D.dot(graph.D.T)
                np.testing.assert_allclose(L.toarray(), graph.L.toarray())
                test_incidence_nx(graph) 
開發者ID:epfl-lts2,項目名稱:pygsp,代碼行數:25,代碼來源:test_graphs.py

示例8: draw_adjacency_graph

# 需要導入模塊: import networkx [as 別名]
# 或者: from networkx import from_scipy_sparse_matrix [as 別名]
def draw_adjacency_graph(adjacency_matrix,
                         node_color=None,
                         size=10,
                         layout='graphviz',
                         prog='neato',
                         node_size=80,
                         colormap='autumn'):
    """draw_adjacency_graph."""
    graph = nx.from_scipy_sparse_matrix(adjacency_matrix)

    plt.figure(figsize=(size, size))
    plt.grid(False)
    plt.axis('off')

    if layout == 'graphviz':
        pos = nx.graphviz_layout(graph, prog=prog)
    else:
        pos = nx.spring_layout(graph)

    if len(node_color) == 0:
        node_color = 'gray'
    nx.draw_networkx_nodes(graph, pos,
                           node_color=node_color,
                           alpha=0.6,
                           node_size=node_size,
                           cmap=plt.get_cmap(colormap))
    nx.draw_networkx_edges(graph, pos, alpha=0.5)
    plt.show()


# draw a whole set of graphs:: 
開發者ID:fabriziocosta,項目名稱:EDeN,代碼行數:33,代碼來源:__init__.py

示例9: calculate_max_depth

# 需要導入模塊: import networkx [as 別名]
# 或者: from networkx import from_scipy_sparse_matrix [as 別名]
def calculate_max_depth(comment_tree):
    comment_tree_nx = nx.from_scipy_sparse_matrix(comment_tree, create_using=nx.Graph())

    if len(comment_tree_nx) == 0:
        max_depth = 0.0
    else:
        node_to_depth = nx.shortest_path_length(comment_tree_nx, 0)
        max_depth = max(node_to_depth.values())

    return max_depth 
開發者ID:MKLab-ITI,項目名稱:news-popularity-prediction,代碼行數:12,代碼來源:comment_tree.py

示例10: calculate_avg_depth

# 需要導入模塊: import networkx [as 別名]
# 或者: from networkx import from_scipy_sparse_matrix [as 別名]
def calculate_avg_depth(comment_tree):
    comment_tree_nx = nx.from_scipy_sparse_matrix(comment_tree, create_using=nx.Graph())

    if len(comment_tree_nx) == 0:
        avg_depth = 0.0
    else:
        node_to_depth = nx.shortest_path_length(comment_tree_nx, 0)
        avg_depth = statistics.mean(node_to_depth.values())

    return avg_depth 
開發者ID:MKLab-ITI,項目名稱:news-popularity-prediction,代碼行數:12,代碼來源:comment_tree.py

示例11: calculate_max_width

# 需要導入模塊: import networkx [as 別名]
# 或者: from networkx import from_scipy_sparse_matrix [as 別名]
def calculate_max_width(comment_tree):
    comment_tree_nx = nx.from_scipy_sparse_matrix(comment_tree, create_using=nx.Graph())

    if len(comment_tree_nx) == 0:
        max_width = 1.0
    else:
        node_to_depth = nx.shortest_path_length(comment_tree_nx, 0)
        depth_to_nodecount = collections.defaultdict(int)

        for k, v in node_to_depth.items():
            depth_to_nodecount[v] += 1

        max_width = max(depth_to_nodecount.values())

    return max_width 
開發者ID:MKLab-ITI,項目名稱:news-popularity-prediction,代碼行數:17,代碼來源:comment_tree.py

示例12: calculate_avg_width

# 需要導入模塊: import networkx [as 別名]
# 或者: from networkx import from_scipy_sparse_matrix [as 別名]
def calculate_avg_width(comment_tree):
    comment_tree_nx = nx.from_scipy_sparse_matrix(comment_tree, create_using=nx.Graph())

    if len(comment_tree_nx) == 0:
        avg_width = 1.0
    else:
        node_to_depth = nx.shortest_path_length(comment_tree_nx, 0)
        depth_to_nodecount = collections.defaultdict(int)

        for k, v in node_to_depth.items():
            depth_to_nodecount[v] += 1

        avg_width = statistics.mean(depth_to_nodecount.values())

    return avg_width 
開發者ID:MKLab-ITI,項目名稱:news-popularity-prediction,代碼行數:17,代碼來源:comment_tree.py

示例13: overlay_skeleton_networkx

# 需要導入模塊: import networkx [as 別名]
# 或者: from networkx import from_scipy_sparse_matrix [as 別名]
def overlay_skeleton_networkx(csr_graph, coordinates, *, axis=None,
                              image=None, cmap=None, **kwargs):
    """Draw the skeleton as a NetworkX graph, optionally overlaid on an image.

    Due to the size of NetworkX drawing elements, this is only recommended
    for very small skeletons.

    Parameters
    ----------
    csr_graph : SciPy Sparse matrix
        The skeleton graph in SciPy CSR format.
    coordinates : array, shape (N_points, 2)
        The coordinates of each point in the skeleton. ``coordinates.shape[0]``
        should be equal to ``csr_graph.shape[0]``.

    Other Parameters
    ----------------
    axis : Matplotlib Axes object, optional
        The Axes on which to plot the data. If None, a new figure and axes will
        be created.
    image : array, shape (M, N[, 3])
        An image on which to overlay the skeleton. ``image.shape`` should be
        greater than ``np.max(coordinates, axis=0)``.
    **kwargs : keyword arguments
        Arguments passed on to `nx.draw_networkx`. Particularly useful ones
        include ``node_size=`` and ``font_size=``.
    """
    if axis is None:
        _, axis = plt.subplots()
    if image is not None:
        cmap = cmap or 'gray'
        axis.imshow(image, cmap=cmap)
    gnx = nx.from_scipy_sparse_matrix(csr_graph)
    # Note: we invert the positions because Matplotlib uses x/y for
    # scatterplot, but the coordinates are row/column NumPy indexing
    positions = dict(zip(range(coordinates.shape[0]), coordinates[:, ::-1]))
    _clean_positions_dict(positions, gnx)  # remove nodes not in Graph
    nx.draw_networkx(gnx, pos=positions, ax=axis, **kwargs)
    return axis 
開發者ID:jni,項目名稱:skan,代碼行數:41,代碼來源:draw.py

示例14: make_blogcatalog

# 需要導入模塊: import networkx [as 別名]
# 或者: from networkx import from_scipy_sparse_matrix [as 別名]
def make_blogcatalog(edgelist="../data/blogcatalog.mat",
                    dedupe=True):
    """
    Graph with cluster labels from blogcatalog
    
    Dedupe: Whether to deduplicate results (else some nodes have multilabels)
    """
    mat = scipy.io.loadmat(edgelist)
    nodes = mat['network'].tocsr()
    groups = mat['group']
    G = nx.from_scipy_sparse_matrix(nodes)
    labels = (
        pd.DataFrame(groups.todense())
        .idxmax(axis=1)
        .reset_index(drop=False)
    )
    labels.columns = ['node', 'label']
    labels.node = labels.node.astype(int)
    if dedupe:
        labels = labels.loc[~labels.node.duplicated()
                        ].reset_index(drop=True)
        labels.label = labels.label.astype(int) - 1
        return G, labels
    else:
        df = pd.DataFrame(groups.todense())
        labels_list = df.apply(lambda row: list((row.loc[row > 0]).index), axis=1)
        return G, pd.DataFrame({'node': list(G), 'mlabels': pd.Series(labels_list)}) 
開發者ID:VHRanger,項目名稱:nodevectors,代碼行數:29,代碼來源:graph_eval.py

示例15: init_setup

# 需要導入模塊: import networkx [as 別名]
# 或者: from networkx import from_scipy_sparse_matrix [as 別名]
def init_setup():
    data = Dataset(root='/tmp/', name=args.dataset, setting='nettack')
    injecting_nodes(data)

    adj, features, labels = data.adj, data.features, data.labels

    StaticGraph.graph = nx.from_scipy_sparse_matrix(adj)
    dict_of_lists = nx.to_dict_of_lists(StaticGraph.graph)

    idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
    device = torch.device('cuda') if args.ctx == 'gpu' else 'cpu'

    # gray box setting
    adj, features, labels = preprocess(adj, features, labels, preprocess_adj=False, sparse=True, device=device)
    # Setup victim model
    victim_model = GCN(nfeat=features.shape[1], nclass=labels.max().item()+1,
                    nhid=16, dropout=0.5, weight_decay=5e-4, device=device)

    victim_model = victim_model.to(device)
    victim_model.fit(features, adj, labels, idx_train, idx_val)
    setattr(victim_model, 'norm_tool',  GraphNormTool(normalize=True, gm='gcn', device=device))

    output = victim_model.predict(features, adj)
    loss_test = F.nll_loss(output[idx_test], labels[idx_test])
    acc_test = accuracy(output[idx_test], labels[idx_test])
    print("Test set results:",
          "loss= {:.4f}".format(loss_test.item()),
          "accuracy= {:.4f}".format(acc_test.item()))

    return features, labels, idx_train, idx_val, idx_test, victim_model, dict_of_lists, adj 
開發者ID:DSE-MSU,項目名稱:DeepRobust,代碼行數:32,代碼來源:test_nipa.py


注:本文中的networkx.from_scipy_sparse_matrix方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。