本文整理汇总了Python中networkx.readwrite.json_graph.node_link_graph函数的典型用法代码示例。如果您正苦于以下问题:Python node_link_graph函数的具体用法?Python node_link_graph怎么用?Python node_link_graph使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了node_link_graph函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: iso_json
def iso_json(string1,string2):
dataG1 = json.loads(string1)
graph1 = json_graph.node_link_graph(dataG1)
dataG2 = json.loads(string2)
graph2 = json_graph.node_link_graph(dataG2)
# return nx.is_isomorphic(graph1, graph2)
return nx.faster_could_be_isomorphic(graph1, graph2)
示例2: restore_anm_nidb_from_json
def restore_anm_nidb_from_json(data):
d = ank_json_custom_loads(data)
anm = autonetkit.anm.AbstractNetworkModel()
nidb = autonetkit.nidb.NIDB()
for overlay_id, overlay_data in d.items():
if overlay_id == "nidb":
continue # don't restore nidb graph to anm
anm._overlays[overlay_id] = json_graph.node_link_graph(overlay_data)
nidb._graph = json_graph.node_link_graph(d['nidb'])
rebind_interfaces(anm)
return anm, nidb
示例3: graphs_json2networkx
def graphs_json2networkx(input_dict):
from json import loads
from networkx.readwrite import json_graph
gtext = loads(input_dict['graph'])
g = json_graph.node_link_graph(gtext)
return {'nxgraph': g}
示例4: main
def main(json_file, output_prefix, source, target):
with open(json_file) as data_file:
data = json.load(data_file)
G = json_graph.node_link_graph(data, directed=False)
print "Finished Reading in Graph: {0}".format(datetime.datetime.now())
id_seq = networkx.get_node_attributes(G, "sequence")
seq_id = { seq : node_id for node_id, seq in id_seq.items()}
print "Created inverse lookup table: {0}".format(datetime.datetime.now())
if ',' in target:
targets = target.split(',')
for target in targets:
paths = networkx.all_shortest_paths(G, seq_id[source], seq_id[target])
with open("{0}_paths_{1}_{2}.txt".format(output_prefix, source, target), 'w') as o:
for path in paths:
o.write(",".join( [id_seq[node_id] for node_id in path ] ))
o.write("\n")
print "Output paths: {0}".format(datetime.datetime.now())
示例5: restore_anm_nidb_from_json
def restore_anm_nidb_from_json(data):
# This can be used to extract from the json used to send to webserver
d = ank_json_custom_loads(data)
anm = autonetkit.anm.AbstractNetworkModel()
nidb = autonetkit.nidb.DeviceModel()
for overlay_id, overlay_data in d.items():
if overlay_id == "nidb":
continue # don't restore nidb graph to anm
anm._overlays[overlay_id] = json_graph.node_link_graph(overlay_data)
nidb._graph = json_graph.node_link_graph(d['nidb'])
rebind_interfaces(anm)
return anm, nidb
示例6: graph
def graph(self, node_links_data):
self.g = json_graph.node_link_graph(node_links_data)
remove = list()
for _id in self.g.node:
if self.g.node[_id]["node_type"] not in self.valid_type:
remove.append(_id)
self.g.remove_nodes_from(remove)
示例7: as_tree
def as_tree(graph, root=OPENSTACK_CLUSTER, reverse=False):
linked_graph = json_graph.node_link_graph(graph)
if 0 == nx.number_of_nodes(linked_graph):
return {}
if reverse:
linked_graph = linked_graph.reverse()
return json_graph.tree_data(linked_graph, root=root)
示例8: load_json
def load_json(stream):
"""
Args:
stream: Open stream containing js
Assumes the js is in networkx link-node format
"""
js = json.load(stream)
g = json_graph.node_link_graph(js)
assert all([nd.has_key('coords') for nd in g.node.values()]),\
"json node-link graph must have nodes with coords for GeoGraph"
# get coords
coords = [v['coords'] for v in g.node.values()]
# set default projection
input_proj = ""
if gm.is_in_lon_lat(coords):
input_proj = gm.PROJ4_LATLONG
else:
input_proj = gm.PROJ4_FLAT_EARTH
coords_dict = {k: v['coords'] for k, v in g.node.items()}
# now get rid of 'coords' key,val for each node
for node in g.node.values():
node.pop('coords', None)
geo_nodes = GeoGraph(srs=input_proj, coords=coords_dict, data=g)
return geo_nodes
示例9: get_selected_reaction
def get_selected_reaction(jsonGraph, nodeDic, reacIDs, org):
"""
Filtering selected Reactions and show Results from PyNetMet calculation.
It returns a subgraph of the Graph from the jsonGraph. The output is a DOT-Language String.
@param jsonGraph: Graph in JSON-Format
@param nodeDic: dict mapping names to ids
@param reacIDs: Name of reactions that contained in the nodeDic
@param org: organism
@return Subgraph
"""
# Translate reac names to IDs
# Get substrates and products of all reacs
metabolites = []
for reac in reacIDs:
metabolites += org.get_reaction(reac).metabolites
met_ids = list(map(lambda x: nodeDic[x], metabolites))
g = json_graph.node_link_graph(jsonGraph)
g.remove_edges_from(list(filter(lambda x: g.get_edge_data(*x)["object"].name not in reacIDs, g.edges(met_ids))))
# Get products/substrates directly connected to filter
#reacIDs += flatten(g.in_edges(reacIDs)) + flatten(g.out_edges(reacIDs))
h = g.subgraph(met_ids)
return h
示例10: read_from_json_gexf
def read_from_json_gexf(fname=None,label_field_name='APIs',conv_undir = False):
'''
Load the graph files (.gexf or .json only supported)
:param fname: graph file name
:param label_field_name: filed denoting the node label
:param conv_undir: convert to undirected graph or not
:return: graph in networkx format
'''
if not fname:
logging.error('no valid path or file name')
return None
else:
try:
try:
with open(fname, 'rb') as File:
org_dep_g = json_graph.node_link_graph(json.load(File))
except:
org_dep_g = nx.read_gexf (path=fname)
g = nx.DiGraph()
for n, d in org_dep_g.nodes_iter(data=True):
g.add_node(n, attr_dict={'label': '-'.join(d[label_field_name].split('\n'))})
g.add_edges_from(org_dep_g.edges_iter())
except:
logging.error("unable to load graph from file: {}".format(fname))
# return 0
logging.debug('loaded {} a graph with {} nodes and {} egdes'.format(fname, g.number_of_nodes(),g.number_of_edges()))
if conv_undir:
g = nx.Graph (g)
logging.debug('converted {} as undirected graph'.format (g))
return g
示例11: find_min_spanning_tree
def find_min_spanning_tree(A):
"""
Input:
A : Adjecency matrix in scipy.sparse format.
Output:
T : Minimum spanning tree.
run_time : Total runtime to find minimum spanning tree
"""
# Record start time.
start = time.time()
# Check if graph is pre-processed, if yes then don't process it again.
if os.path.exists('../Data/dcg_graph.json'):
with open('../Data/dcg_graph.json') as data:
d = json.load(data)
G = json_graph.node_link_graph(d)
# If graph is not preprocessed then convert it to a Graph and save it to a JSON file.
else:
G = from_scipy_sparse_matrix(A)
data = json_graph.node_link_data(G)
with open('../Data/dcg_graph.json', 'w') as outfile:
json.dump(data, outfile)
# Find MST.
T = minimum_spanning_tree(G)
#Record total Runtime
run_time = time.time()-start
return T, run_time
示例12: main
def main(json_file, output_prefix, metric):
with open(json_file) as data_file:
data = json.load(data_file)
G = json_graph.node_link_graph(data)
metrics = {}
#metrics["degree"] = degree(G)
metrics["closeness"] = closeness_centrality(G).values()
#TODO: add any other metrics here using a similar format to above line.
sequences = {}
cleaved_seq = { key : val for key, val in sequences.items() if val["type"] == "CLEAVED" }
if metric != "metrics":
labels_to_plot = [metric]
else:
labels_to_plot = metrics.keys()
n_to_plot = len(labels_to_plot)
fig, axarr = pconv.create_ax(n_to_plot, 1, shx=False, shy=False)
nbins = 20
for ind, key in enumerate(labels_to_plot):
normed = True
hist.draw_actual_plot(axarr[0,ind], metrics["key"], "", key.capitalize(), normed=normed, nbins=nbins)
axarr[0,ind].ticklabel_format(axis='x', style='sci', scilimits=(-2,2))
#pconv.add_legend(axarr[0,ind], location="middle right")
pconv.save_fig(fig, output_prefix, "metrics", n_to_plot*5, 5, tight=True, size=12)
示例13: transferRedditDataFormat
def transferRedditDataFormat(dataset_dir, output_file):
G = json_graph.node_link_graph(json.load(open(dataset_dir + "/reddit-G.json")))
labels = json.load(open(dataset_dir + "/reddit-class_map.json"))
train_ids = [n for n in G.nodes() if not G.node[n]['val'] and not G.node[n]['test']]
test_ids = [n for n in G.nodes() if G.node[n]['test']]
val_ids = [n for n in G.nodes() if G.node[n]['val']]
train_labels = [labels[i] for i in train_ids]
test_labels = [labels[i] for i in test_ids]
val_labels = [labels[i] for i in val_ids]
feats = np.load(dataset_dir + "/reddit-feats.npy")
## Logistic gets thrown off by big counts, so log transform num comments and score
feats[:, 0] = np.log(feats[:, 0] + 1.0)
feats[:, 1] = np.log(feats[:, 1] - min(np.min(feats[:, 1]), -1))
feat_id_map = json.load(open(dataset_dir + "reddit-id_map.json"))
feat_id_map = {id: val for id, val in feat_id_map.iteritems()}
# train_feats = feats[[feat_id_map[id] for id in train_ids]]
# test_feats = feats[[feat_id_map[id] for id in test_ids]]
# numNode = len(feat_id_map)
# adj = sp.lil_matrix(np.zeros((numNode,numNode)))
# for edge in G.edges():
# adj[feat_id_map[edge[0]], feat_id_map[edge[1]]] = 1
train_index = [feat_id_map[id] for id in train_ids]
val_index = [feat_id_map[id] for id in val_ids]
test_index = [feat_id_map[id] for id in test_ids]
np.savez(output_file, feats = feats, y_train=train_labels, y_val=val_labels, y_test = test_labels, train_index = train_index,
val_index=val_index, test_index = test_index)
示例14: read_json_graph
def read_json_graph(istream):
"""
Reads a json graph output by the algorithm and returns it
"""
data = json.loads(istream.read())
G = json_graph.node_link_graph(data)
return G
示例15: simple_to_nx
def simple_to_nx(j_data):
port_to_index_mapping = defaultdict(dict)
for node in j_data['nodes']:
if not "ports" in node:
continue
node_id = node['id']
# first check for loopback zero
ports = node['ports']
_ports = {} # output format
try:
lo_zero = [p for p in ports if p['id'] == "Loopback0"].pop()
except IndexError:
# can't pop -> no loopback zero, append
lo_zero = {'category': 'loopback',
'description': "Loopback Zero"}
else:
ports.remove(lo_zero)
finally:
_ports[0] = lo_zero
'''Sharad: below change is for 2nd loopback. currently commenting it out.
change start in below loop to 2 while adding another loopback
lo_one = {'category': 'loopback',
'description': "Loopback One",
'id':'loopback1'}
_ports[1] = lo_one
'''
for index, port in enumerate(ports, start=1):
_ports[index] = port
port_to_index_mapping[node_id][port['id']] = index
del node['ports']
node['_ports'] = _ports
nodes_by_id = {n['id']: i for i, n
in enumerate(j_data['nodes'])}
unmapped_links = []
if "links" in j_data:
mapped_links = j_data['links']
for link in mapped_links:
src = link['src']
dst = link['dst']
src_pos = nodes_by_id[src]
dst_pos = nodes_by_id[dst]
src_port_id = port_to_index_mapping[src][link['src_port']]
dst_port_id = port_to_index_mapping[dst][link['dst_port']]
interfaces = {src: src_port_id,
dst: dst_port_id}
unmapped_links.append({'source': src_pos,
'target': dst_pos,
'_ports': interfaces,
'link_type': link['link_type']
})
j_data['links'] = unmapped_links
return json_graph.node_link_graph(j_data)