当前位置: 首页>>代码示例>>Python>>正文


Python json_graph.node_link_graph方法代码示例

本文整理汇总了Python中networkx.readwrite.json_graph.node_link_graph方法的典型用法代码示例。如果您正苦于以下问题:Python json_graph.node_link_graph方法的具体用法?Python json_graph.node_link_graph怎么用?Python json_graph.node_link_graph使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在networkx.readwrite.json_graph的用法示例。


在下文中一共展示了json_graph.node_link_graph方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: compute_feature_smoothness

# 需要导入模块: from networkx.readwrite import json_graph [as 别名]
# 或者: from networkx.readwrite.json_graph import node_link_graph [as 别名]
def compute_feature_smoothness(path, times=0):
    G_org = json_graph.node_link_graph(json.load(open(path+'-G.json')))
    # G_org = remove_unlabeled(G_org)
    if nx.is_directed(G_org):
        G_org = G_org.to_undirected()
    edge_num = G_org.number_of_edges()
    G = pygsp.graphs.Graph(nx.adjacency_matrix(G_org))
    feats = np.load(path+'-feats.npy')
    # smooth
    for i in range(times):
        feats = feature_broadcast(feats, G_org)
    np.save(path+'-feats_'+str(times)+'.npy', feats)

    min_max_scaler = preprocessing.MinMaxScaler()
    feats = min_max_scaler.fit_transform(feats)
    smoothness = np.zeros(feats.shape[1])
    for src, dst in G_org.edges():
        smoothness += (feats[src]-feats[dst])*(feats[src]-feats[dst])
    smoothness = np.linalg.norm(smoothness,ord=1)
    print('The smoothness is: ', 2*smoothness/edge_num/feats.shape[1]) 
开发者ID:yifan-h,项目名称:CS-GNN,代码行数:22,代码来源:smoothness.py

示例2: compute_label_smoothness

# 需要导入模块: from networkx.readwrite import json_graph [as 别名]
# 或者: from networkx.readwrite.json_graph import node_link_graph [as 别名]
def compute_label_smoothness(path, rate=0.):
    G_org = json_graph.node_link_graph(json.load(open(path+'-G.json')))
    # G_org = remove_unlabeled(G_org)
    if nx.is_directed(G_org):
        G_org = G_org.to_undirected()
    class_map = json.load(open(path+'-class_map.json'))
    for k, v in class_map.items():
        if type(v) != list:
            class_map = convert_list(class_map)
        break
    labels = convert_ndarray(class_map)
    labels = np.squeeze(label_to_vector(labels))

    # smooth
    G_org = label_broadcast(G_org, labels, rate)
    with open(path+'-G_'+str(rate)+'.json', 'w') as f:
        f.write(json.dumps(json_graph.node_link_data(G_org)))

    edge_num = G_org.number_of_edges()
    G = pygsp.graphs.Graph(nx.adjacency_matrix(G_org))
    smoothness = 0
    for src, dst in G_org.edges():
        if labels[src] != labels[dst]:
            smoothness += 1
    print('The smoothness is: ', 2*smoothness/edge_num) 
开发者ID:yifan-h,项目名称:CS-GNN,代码行数:27,代码来源:smoothness.py

示例3: as_tree

# 需要导入模块: from networkx.readwrite import json_graph [as 别名]
# 或者: from networkx.readwrite.json_graph import node_link_graph [as 别名]
def as_tree(graph, root=OPENSTACK_CLUSTER, reverse=False):
        if nx.__version__ >= '2.0':
            linked_graph = json_graph.node_link_graph(
                graph, attrs={'name': 'graph_index'})
        else:
            linked_graph = json_graph.node_link_graph(graph)
        if 0 == nx.number_of_nodes(linked_graph):
            return {}
        if reverse:
            linked_graph = linked_graph.reverse()
        if nx.__version__ >= '2.0':
            return json_graph.tree_data(
                linked_graph,
                root=root,
                attrs={'id': 'graph_index', 'children': 'children'})
        else:
            return json_graph.tree_data(linked_graph, root=root) 
开发者ID:openstack,项目名称:vitrage,代码行数:19,代码来源:topology.py

示例4: deserialize

# 需要导入模块: from networkx.readwrite import json_graph [as 别名]
# 或者: from networkx.readwrite.json_graph import node_link_graph [as 别名]
def deserialize(data: Dict) -> nx.MultiDiGraph:
        """
        Deserialize a networkx.MultiDiGraph from a dictionary.

        Parameters
        ----------
        data: dict
            Dictionary containing nodes and edges

        Returns
        -------
        networkx.MultiDiGraph
            A networkx.MultiDiGraph representation

        """
        g = json_graph.node_link_graph(data)
        return g 
开发者ID:NCATS-Tangerine,项目名称:kgx,代码行数:19,代码来源:transformer.py

示例5: _node_link_data_to_eden

# 需要导入模块: from networkx.readwrite import json_graph [as 别名]
# 或者: from networkx.readwrite.json_graph import node_link_graph [as 别名]
def _node_link_data_to_eden(serialized_list):
    """Takes a string list in the serialised node_link_data JSON format and yields networkx graphs."""
    for serial_data in serialized_list:
        py_obj = json.loads(serial_data)
        graph = json_graph.node_link_graph(py_obj)
        yield graph 
开发者ID:fabriziocosta,项目名称:EDeN,代码行数:8,代码来源:node_link_data.py

示例6: _load

# 需要导入模块: from networkx.readwrite import json_graph [as 别名]
# 或者: from networkx.readwrite.json_graph import node_link_graph [as 别名]
def _load(self):
        """Loads input data.

        train/test/valid_graph.json => the graph data used for training,
          test and validation as json format;
        train/test/valid_feats.npy => the feature vectors of nodes as
          numpy.ndarry object, it's shape is [n, v],
          n is the number of nodes, v is the feature's dimension;
        train/test/valid_labels.npy=> the labels of the input nodes, it
          is a numpy ndarry, it's like[[0, 0, 1, ... 0], 
          [0, 1, 1, 0 ...1]], shape of it is n*h, n is the number of nodes,
          h is the label's dimension;
        train/test/valid/_graph_id.npy => the element in it indicates which
          graph the nodes belong to, it is a one dimensional numpy.ndarray
          object and the length of it is equal the number of nodes,
          it's like [1, 1, 2, 1...20]. 
        """
        print('Loading G...')
        if self.mode == 'train':
            with open('{}/ppi/train_graph.json'.format(self._dir)) as jsonfile:
                g_data = json.load(jsonfile)
            self.labels = np.load('{}/ppi/train_labels.npy'.format(self._dir))
            self.features = np.load('{}/ppi/train_feats.npy'.format(self._dir))
            self.graph = DGLGraph(nx.DiGraph(json_graph.node_link_graph(g_data)))
            self.graph_id = np.load('{}/ppi/train_graph_id.npy'.format(self._dir))
        if self.mode == 'valid':
            with open('{}/ppi/valid_graph.json'.format(self._dir)) as jsonfile:
                g_data = json.load(jsonfile)
            self.labels = np.load('{}/ppi/valid_labels.npy'.format(self._dir))
            self.features = np.load('{}/ppi/valid_feats.npy'.format(self._dir))
            self.graph = DGLGraph(nx.DiGraph(json_graph.node_link_graph(g_data)))
            self.graph_id = np.load('{}/ppi/valid_graph_id.npy'.format(self._dir))
        if self.mode == 'test':
            with open('{}/ppi/test_graph.json'.format(self._dir)) as jsonfile:
                g_data = json.load(jsonfile)
            self.labels = np.load('{}/ppi/test_labels.npy'.format(self._dir))
            self.features = np.load('{}/ppi/test_feats.npy'.format(self._dir))
            self.graph = DGLGraph(nx.DiGraph(json_graph.node_link_graph(g_data)))
            self.graph_id = np.load('{}/ppi/test_graph_id.npy'.format(self._dir)) 
开发者ID:dmlc,项目名称:dgl,代码行数:41,代码来源:ppi.py

示例7: loadG

# 需要导入模块: from networkx.readwrite import json_graph [as 别名]
# 或者: from networkx.readwrite.json_graph import node_link_graph [as 别名]
def loadG(x, d):
    return json_graph.node_link_graph(json.load(open(x+'-G.json')), d) 
开发者ID:yifan-h,项目名称:CS-GNN,代码行数:4,代码来源:utils.py

示例8: process

# 需要导入模块: from networkx.readwrite import json_graph [as 别名]
# 或者: from networkx.readwrite.json_graph import node_link_graph [as 别名]
def process(self):
        for s, split in enumerate(['train', 'valid', 'test']):
            path = osp.join(self.raw_dir, '{}_graph.json').format(split)
            with open(path, 'r') as f:
                G = nx.DiGraph(json_graph.node_link_graph(json.load(f)))

            x = np.load(osp.join(self.raw_dir, '{}_feats.npy').format(split))
            x = torch.from_numpy(x).to(torch.float)

            y = np.load(osp.join(self.raw_dir, '{}_labels.npy').format(split))
            y = torch.from_numpy(y).to(torch.float)

            data_list = []
            path = osp.join(self.raw_dir, '{}_graph_id.npy').format(split)
            idx = torch.from_numpy(np.load(path)).to(torch.long)
            idx = idx - idx.min()

            for i in range(idx.max().item() + 1):
                mask = idx == i

                G_s = G.subgraph(mask.nonzero().view(-1).tolist())
                edge_index = torch.tensor(list(G_s.edges)).t().contiguous()
                edge_index = edge_index - edge_index.min()
                edge_index, _ = remove_self_loops(edge_index)

                data = Data(edge_index=edge_index, x=x[mask], y=y[mask])

                if self.pre_filter is not None and not self.pre_filter(data):
                    continue

                if self.pre_transform is not None:
                    data = self.pre_transform(data)

                data_list.append(data)
            torch.save(self.collate(data_list), self.processed_paths[s]) 
开发者ID:rusty1s,项目名称:pytorch_geometric,代码行数:37,代码来源:ppi.py

示例9: from_dict

# 需要导入模块: from networkx.readwrite import json_graph [as 别名]
# 或者: from networkx.readwrite.json_graph import node_link_graph [as 别名]
def from_dict(input_data):

        data = dict(input_data)

        nodes = list(
            map(Cell.from_dict,
                filter(
                    lambda item: not isinstance(item['value'], dict),
                    data['nodes'])))
        cellmap = {n.address(): n for n in nodes}

        def cell_from_dict(d):
            return Cell.from_dict(d, cellmap=cellmap)

        nodes.extend(
            list(
                map(cell_from_dict,
                    filter(
                        lambda item: isinstance(item['value'], dict),
                        data['nodes']))))

        data["nodes"] = [{'id': node} for node in nodes]

        links = []
        idmap = { node.address(): node for node in nodes }
        for el in data['links']:
            source_address = el['source']
            target_address = el['target']
            link = {
                'source': idmap[source_address],
                'target': idmap[target_address],
            }
            links.append(link)

        data['links'] = links

        G = json_graph.node_link_graph(data)
        cellmap = {n.address(): n for n in G.nodes()}

        named_ranges = data["named_ranges"]
        inputs = data["inputs"]
        outputs = data["outputs"]

        spreadsheet = Spreadsheet()
        spreadsheet.build_spreadsheet(
            G, cellmap, named_ranges,
            inputs=inputs, outputs=outputs)
        return spreadsheet 
开发者ID:vallettea,项目名称:koala,代码行数:50,代码来源:Spreadsheet.py

示例10: load_data_ori

# 需要导入模块: from networkx.readwrite import json_graph [as 别名]
# 或者: from networkx.readwrite.json_graph import node_link_graph [as 别名]
def load_data_ori(prefix, normalize=True, load_walks=False):
    G_data = json.load(open(prefix + "-G.json"))
    G = json_graph.node_link_graph(G_data)
    if isinstance(G.nodes()[0], int):
        conversion = lambda n : int(n)
    else:
        conversion = lambda n : n

    if os.path.exists(prefix + "-feats.npy"):
        feats = np.load(prefix + "-feats.npy")
    else:
        print("No features present.. Only identity features will be used.")
        feats = None
    id_map = json.load(open(prefix + "-id_map.json"))
    id_map = {conversion(k):int(v) for k,v in id_map.items()}
    walks = []
    class_map = json.load(open(prefix + "-class_map.json"))
    if isinstance(list(class_map.values())[0], list):
        lab_conversion = lambda n : n
    else:
        lab_conversion = lambda n : int(n)

    class_map = {conversion(k):lab_conversion(v) for k,v in class_map.items()}

    ## Remove all nodes that do not have val/test annotations
    ## (necessary because of networkx weirdness with the Reddit data)
    broken_count = 0
    for node in G.nodes():
        if not 'val' in G.node[node] or not 'test' in G.node[node]:
            G.remove_node(node)
            broken_count += 1
    print("Removed {:d} nodes that lacked proper annotations due to networkx versioning issues".format(broken_count))

    ## Make sure the graph has edge train_removed annotations
    ## (some datasets might already have this..)
    print("Loaded data.. now preprocessing..")
    for edge in G.edges():
        if (G.node[edge[0]]['val'] or G.node[edge[1]]['val'] or
            G.node[edge[0]]['test'] or G.node[edge[1]]['test']):
            G[edge[0]][edge[1]]['train_removed'] = True
        else:
            G[edge[0]][edge[1]]['train_removed'] = False

    if normalize and not feats is None:
        from sklearn.preprocessing import StandardScaler
        train_ids = np.array([id_map[n] for n in G.nodes() if not G.node[n]['val'] and not G.node[n]['test']])
        train_feats = feats[train_ids]
        scaler = StandardScaler()
        scaler.fit(train_feats)
        feats = scaler.transform(feats)
    
    if load_walks:
        with open(prefix + "-walks.txt") as fp:
            for line in fp:
                walks.append(map(conversion, line.split()))

    return G, feats, id_map, walks, class_map 
开发者ID:safe-graph,项目名称:DGFraud,代码行数:59,代码来源:utils.py


注:本文中的networkx.readwrite.json_graph.node_link_graph方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。