本文整理汇总了Python中networkx.readwrite.json_graph.node_link_data函数的典型用法代码示例。如果您正苦于以下问题:Python node_link_data函数的具体用法?Python node_link_data怎么用?Python node_link_data使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了node_link_data函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_network
def build_network(seed_network,utility_params,network_length):
now_start = datetime.datetime.now()
start_node = len(seed_network)
for i in range(len(seed_network),network_length):
print ("sim network length = " + str(len(seed_network)+1))
seed_network.expansion_utilities(utility_params)
if seed_network.expansion_city_ghs:
new_nodes = {node:data for node,data in seed_network.expansion_cache.items() if node in seed_network.expansion_city_ghs}
best_node = max(new_nodes, key= lambda x: new_nodes[x][-1])#last value in array
print (best_node)
else:
print ("no expansion cities found at node " + str(i))
file_name = "simulated_networks/" + str(start_node) + "_" + str(len(seed_network)) + "_" + "_".join([str(param) for param in utility_params.tolist()]) + ".pickle"
with open(file_name, 'wb') as outfile:
pickle.dump(seed_network, outfile)
file_name = "simulated_networks/" + str(start_node) + "_" + str(len(seed_network)) + "_" + "_".join([str(param) for param in utility_params.tolist()]) + ".json"
with open(file_name, 'w') as outfile:
json.dump(json_graph.node_link_data(seed_network), outfile)
return seed_network
seed_network.add_SC(best_node)
if len(seed_network[best_node].keys()) == 0:
print ("added node has no conenctions")
print (seed_network.expansion_cache[best_node])
file_name = "simulated_networks/" + str(start_node) + "_" + str(len(seed_network)) + "_" + "_".join([str(param) for param in utility_params.tolist()]) + ".pickle"
with open(file_name, 'wb') as outfile:
pickle.dump(seed_network, outfile)
file_name = "simulated_networks/" + str(start_node) + "_" + str(len(seed_network)) + "_" + "_".join([str(param) for param in utility_params.tolist()]) + ".json"
with open(file_name, 'w') as outfile:
json.dump(json_graph.node_link_data(seed_network), outfile)
now_finish = datetime.datetime.now()
elapsedTime = now_finish- now_start
print(elapsedTime / timedelta(minutes=1))
return seed_network
示例2: test_job_set_accepted
def test_job_set_accepted(self):
# Arrange
job_id = "1"
job_name = "new job"
g = nx.Graph()
expected_result = {'name': job_name, 'graph': json_graph.node_link_data(g), 'status': "Accepted", 'id': job_id}
class TestJobManager(object):
def __init__(self):
self.job = None
def get_job(self, job_id):
return self.job
def insert_job(self, job_name, graph):
self.job = {'name': job_name, 'graph': graph, 'status': "Received", "id": job_id}
return job_id
def update_job(self, job_id, job):
self.job['status'] = job['status']
api.config['REPOSITORY'] = JobManagerRepository(TestJobManager())
self.app = api.test_client()
# Act
data = {'job_name': job_name, 'graph': json_graph.node_link_data(g)}
rv = self.app.post('/jobs', data=json.dumps(data))
rv2 = self.app.put('/jobs/' + job_id + '/status', data=json.dumps({'status': 'Accepted'}))
rv3 = self.app.get('/jobs/' + job_id)
# Assert
final_job = json.loads(rv3.data.decode(rv.charset))
self.assertEqual(expected_result, final_job)
示例3: send_network
def send_network(self):
if self.mininet:
d = json_graph.node_link_data(self.mininet_to_nx_graph())
else:
d = json_graph.node_link_data(self.net.to_nx_graph())
d['message_type'] = 'network'
d['mininet'] = bool(self.mininet)
self.send_to_ws(json.dumps(d))
示例4: computebwmod
def computebwmod(graph,path,protlist,path_lenght,alone):
import itertools
fixed=[]
combination=[]
betweennees=nx.betweenness_centrality(graph,normalized=True)
print "------Starting Graph------"
print nx.info(graph)
d = json_graph.node_link_data(graph) # node-link format to serialize
# write json
json.dump(d, open('mcn.json','w'))
count={}
for i in graph.nodes():
if i in protlist:
continue
else:
count[i]=0
combination=list(itertools.combinations(path,2))
pa={}
for i in path:
pa[i]=[]
for j in path[i]:
if path.has_key(i):
pa[i].extend(j[1:len(j)-1])
else:
pa[i].extend(j[1:len(j)-1])
for i in path:
pa[i]=list(set(sum(path[i],[])))
for i in pa:
for j in list(set(pa[i])):
if j in protlist:
continue
else:
count[j]=count[j]+1
countsort = sorted(count, key=count.get)
removable=set(countsort).difference(set(protlist))
print len(protlist)
graphred=check(graph,path_lenght,removable,protlist,path)
for i in graphred.nodes():
if i in protlist:
graphred.node[i]["group"]=5
else:
graphred.node[i]["group"]=10
f1=open("bwmodproteins.txt","w")
for i in graphred.nodes():
f1.write(i+"\n")
d = json_graph.node_link_data(graphred) # node-link format to serialize
# write json
json.dump(d, open('filteredgraph.json','w'))
nx.write_gpickle(graphred,"bwmodfiltgraph.gpickle")
示例5: main
def main():
Motifset = Motifsets()
patterns2 = enumerate2()
output_file1 = "/net/data/graph-models/sim-graphs/approx3-json"
with open(output_file1, 'w') as fout:
for item in patterns2:
string_item = json.dumps(json_graph.node_link_data(item))
fout.write(string_item + "\n")
subprocess.check_call("hdfs dfs -put /net/data/graph-models/sim-graphs/approx3-json approx3-json", shell=True)
approx3Motifs = sc.textFile("hdfs://scrapper/user/xiaofeng/approx3-json", 192)
#.number of partitions
collapsed_patterns = approx3Motifs.flatMap(lambda line: worker_all_collapse(Motifset, line))
subprocess.check_call("hdfs dfs -rm -r patterns_queue", shell=True)
collapsed_patterns.saveAsTextFile("hdfs://scrapper/user/xiaofeng/patterns_queue")
#save to HDFS, as a text file, and keep using that RDD
collapsed_patterns.persist()
non_iso_set = set()
# while not collapsed_patterns.isEmpty(): #or use count() != 0 as an alternative
# povet = collapsed_patterns.take(1)[0]#BROADCAST
# povet_broad = sc.broadcast(povet)
# print type(povet)
# non_iso_set.add(povet)
# collapsed_patterns = collapsed_patterns.filter(lambda x: not nx.is_isomorphic(x, povet_broad.value))
#
###########write to hard disk the queue of elements waiting to be processed
while True:
collapsed_patterns = sc.textFile("hdfs://scrapper/user/xiaofeng/patterns_queue")
if collapsed_patterns.count() == 0:
break
else:
povet = collapsed_patterns.take(1)[0]#BROADCAST
povet_broad = sc.broadcast(povet)
non_iso_set.add(povet)
collapsed_patterns_new = collapsed_patterns.filter(lambda x: not nx.is_isomorphic(x, povet_broad.value))
subprocess.check_call("hdfs dfs -rm -r patterns_queue", shell=True)
collapsed_patterns_new.saveAsTextFile("hdfs://scrapper/user/xiaofeng/patterns_queue")
print collapsed_patterns.count()
output_file2 = "/net/data/graph-models/sim-graphs/approx5-json"
with open(output_file2, 'w') as fout:
for item in non_iso_set:
string_item = json.dumps(json_graph.node_link_data(item))
fout.write(string_item + '\n')
示例6: store_network_as_jason
def store_network_as_jason(up_path_edges,down_path_edges,upband=2000,downband=2000):
'''
Description:
This function export the node and link information to the frontend lib, to visiualize the
topo and the calculated path between hosts.
'''
graph1=nx.DiGraph()
graph2=nx.DiGraph()
for edge in edges:
if edge['localnode'] not in graph1.nodes():
group=1
if edge['localaddr'] in ipmap.keys():
group=0
graph1.add_node(edge['localnode'],{'localinterface':edge['localifname'],'IPAdd':edge['localaddr'],'group':group})
if edge['remotenode'] not in graph1.nodes():
group=1
if edge['remoteaddr'] in ipmap.keys():
group=0
graph1.add_node(edge['remotenode'],{'remoteinterface':edge['remoteifname'],'IPAdd':edge['remoteaddr'],'group':group})
graph1.add_edge(edge['localnode'],edge['remotenode'],{'AvailableBandwidth':edge['available_bandwidth'],'value':edge['available_bandwidth']})
for edge in up_path_edges:
if edge['localnode'] not in graph2.nodes():
group=1
if edge['localaddr'] in ipmap.keys():
group=0
graph2.add_node(edge['localnode'],{'localinterface':edge['localifname'],'IPAdd':edge['localaddr'],'group':group})
if edge['remotenode'] not in graph2.nodes():
group=1
if edge['remoteaddr'] in ipmap.keys():
group=0
graph2.add_node(edge['remotenode'],{'remoteinterface':edge['remoteifname'],'IPAdd':edge['remoteaddr'],'group':group})
graph2.add_edge(edge['localnode'],edge['remotenode'],{'UpBandwidth':upband,'value':upband})
for edge in down_path_edges:
if edge['localnode'] not in graph2.nodes():
group=1
if edge['localaddr'] in ipmap.keys():
group=0
graph2.add_node(edge['localnode'],{'localinterface':edge['localifname'],'IPAdd':edge['localaddr'],'group':group})
if edge['remotenode'] not in graph2.nodes():
group=1
if edge['remoteaddr'] in ipmap.keys():
group=0
graph2.add_node(edge['remotenode'],{'remoteinterface':edge['remoteifname'],'IPAdd':edge['remoteaddr'],'group':group})
graph2.add_edge(edge['localnode'],edge['remotenode'],{'DownBandwidth':downband,'value':downband})
for node in host.keys():
graph1.add_node(host[node],{'IPAdd':node,'group':0})
graph2.add_node(host[node],{'IPAdd':node,'group':0})
graph1.add_edge(host[node],ipmap[node])
graph2.add_edge(host[node],ipmap[node])
d1=json_graph.node_link_data(graph1)
d2=json_graph.node_link_data(graph2)
json.dump(d1,open('/Users/eric/Desktop/topo/1.json','w'))
json.dump(d2,open('/Users/eric/Desktop/topo/2.json','w'))
示例7: vis_hierarchy
def vis_hierarchy(sieve, column_label, max_edges=200, prefix=''):
"""Visualize a hierarchy of representations."""
import textwrap
column_label = map(lambda q: '\n'.join(textwrap.wrap(q, width=20)), column_label)
def f(j):
if j < sieve.nv:
return j
else:
return (1, j - sieve.nv)
# Construct non-tree graph
g = nx.DiGraph()
max_node_weight = np.max(sieve.tcs)
for i, c in enumerate(column_label):
if i < sieve.nv:
g.add_node(i)
g.node[i]['weight'] = 1
g.node[i]['label'] = c
g.node[i]['name'] = c # JSON uses this field
else:
g.add_node(f(i))
g.node[f(i)]['weight'] = 0.33 * np.clip(sieve.tcs[i - sieve.nv] / max_node_weight, 0.33, 1)
if i >= sieve.nv:
g.add_weighted_edges_from([(f(j), (1, i - sieve.nv), sieve.mi_j(i - sieve.nv)[j]) for j in range(i)])
# Display pruned version
h = g.copy() # trim(g.copy(), max_parents=max_parents, max_children=max_children)
h.remove_edges_from(sorted(h.edges(data=True), key=lambda q: q[2]['weight'])[:-max_edges])
edge2pdf(h, prefix + '/graphs/graph_%d' % max_edges, labels='label', directed=True, makepdf=True)
# Display tree version
tree = g.copy()
tree = trim(tree, max_parents=1, max_children=False)
edge2pdf(tree, prefix + '/graphs/tree', labels='label', directed=True, makepdf=True)
# Output JSON files
try:
import os
print os.path.dirname(os.path.realpath(__file__))
copyfile(os.path.dirname(os.path.realpath(__file__)) + '/tests/d3_files/force.html', prefix + '/graphs/force.html')
except:
print "Couldn't find 'force.html' file for visualizing d3 output"
import json
from networkx.readwrite import json_graph
mapping = dict([(n, tree.node[n].get('label', str(n))) for n in tree.nodes()])
tree = nx.relabel_nodes(tree, mapping)
json.dump(json_graph.node_link_data(tree), safe_open(prefix + '/graphs/force.json', 'w+'))
json.dump(json_graph.node_link_data(h), safe_open(prefix + '/graphs/force_nontree.json', 'w+'))
return g
示例8: NetworkAnalysis
def NetworkAnalysis(jsonGraph):
"""gets graph defined by for export json and computes the top 3
most connected subgraphs"""
G = json_graph.node_link_graph(jsonGraph)
graphs = sorted(nx.connected_component_subgraphs(G), key = len, reverse=True)
(GC1, GC2, GC3, GC4, GC5) = graphs[0:5]
top5 = nx.compose_all([GC1,GC2,GC3,GC4,GC5])
deg = top5.degree()
nx.set_node_attributes(top5, "degree", deg)
take = {
"nodes": json_graph.node_link_data(top5)["nodes"],
"links": json_graph.node_link_data(top5)["links"]
}
return take
示例9: vis_hierarchy
def vis_hierarchy(corexes, column_label=None, max_edges=100, prefix='topics', n_anchors=0):
"""Visualize a hierarchy of representations."""
if column_label is None:
column_label = map(str, range(corexes[0].alpha.shape[1]))
# make l1 label
alpha = corexes[0].alpha
mis = corexes[0].mis
l1_labels = []
annotate = lambda q, s: q if s > 0 else '~' + q
for j in range(corexes[0].n_hidden):
# inds = np.where(alpha[j] * mis[j] > 0)[0]
inds = np.where(alpha[j] >= 1.)[0]
inds = inds[np.argsort(-alpha[j, inds] * mis[j, inds])]
group_number = u"red_" + unicode(j) if j < n_anchors else unicode(j)
label = group_number + u':' + u' '.join([annotate(column_label[ind], corexes[0].sign[j,ind]) for ind in inds[:6]])
label = textwrap.fill(label, width=25)
l1_labels.append(label)
# Construct non-tree graph
weights = [corex.alpha.clip(0, 1) * corex.mis for corex in corexes[1:]]
node_weights = [corex.tcs for corex in corexes[1:]]
g = make_graph(weights, node_weights, l1_labels, max_edges=max_edges)
# Display pruned version
h = g.copy() # trim(g.copy(), max_parents=max_parents, max_children=max_children)
edge2pdf(h, prefix + '/graphs/graph_prune_' + str(max_edges), labels='label', directed=True, makepdf=True)
# Display tree version
tree = g.copy()
tree = trim(tree, max_parents=1, max_children=False)
edge2pdf(tree, prefix + '/graphs/tree', labels='label', directed=True, makepdf=True)
# Output JSON files
try:
import os
copyfile(os.path.dirname(os.path.realpath(__file__)) + '/tests/d3_files/force.html', prefix + '/graphs/force.html')
except:
print "Couldn't find 'force.html' file for visualizing d3 output"
import json
from networkx.readwrite import json_graph
mapping = dict([(n, tree.node[n].get('label', str(n))) for n in tree.nodes()])
tree = nx.relabel_nodes(tree, mapping)
json.dump(json_graph.node_link_data(tree), safe_open(prefix + '/graphs/force.json', 'w+'))
json.dump(json_graph.node_link_data(h), safe_open(prefix + '/graphs/force_nontree.json', 'w+'))
return g
示例10: plot_graph
def plot_graph(graph, scope=None, parent=None,
excludes=(), d3page='fixedforce.html', minimal=False):
"""Open up a display of the graph in a browser window."""
tmpdir = tempfile.mkdtemp()
fdir = os.path.dirname(os.path.abspath(__file__))
shutil.copy(os.path.join(fdir, 'd3.js'), tmpdir)
shutil.copy(os.path.join(fdir, d3page), tmpdir)
graph = _clean_graph(graph, excludes=excludes,
scope=scope, parent=parent, minimal=minimal)
data = node_link_data(graph)
tmp = data.get('graph', [])
data['graph'] = [dict(tmp)]
startdir = os.getcwd()
os.chdir(tmpdir)
try:
# write out the json as a javascript var
# so we we're not forced to start our own webserver
# to avoid cross-site issues
with open('__graph.js', 'w') as f:
f.write("__mygraph__json = ")
json.dump(data, f)
f.write(";\n")
# open URL in web browser
wb = webbrowser.get()
wb.open('file://'+os.path.join(tmpdir, d3page))
except Exception as err:
print str(err)
finally:
os.chdir(startdir)
print "remember to remove temp directory '%s'" % tmpdir
示例11: _measurement
def _measurement(self):
while True:
print 'ActiveFlows: ', self.active_flows
print 'FlowRate: ', self.flow_rate
print 'Graph: ', json.dumps(json_graph.node_link_data(self.graph))
self._send_measure_request()
hub.sleep(1)
示例12: draw_to_browser
def draw_to_browser(agents):
data1 = {}
data2= []
for i in range(0,250): #param
for j in range(20):
#print graphStrings[i]
G = create_graph_of_agent(agents[str(i)][str(j)])
d = json_graph.node_link_data(G)
d["directed"] = 1
d["multigraph"] = 1
text= "s " +str(i + 1)+" | " + str(j + 1)
data1[text] = d
data2.append({"text": text, "value": text})
data = {"0": data1,
"codes": data2}
write_html(data)
json.dump(data, open('IPD_output/data.json', 'w'))
print('Wrote node-link JSON data to temp.json')
# open URL in running web browser
http_server.load_url('IPD_output/overall.html')
print('Or copy all files to webserver and load graph.html')
示例13: generate_weak_links_map
def generate_weak_links_map(self):
weak_nodes = self.detect_weak_nodes(-5,25)
active_weak_nodes = [node[0] for node in weak_nodes if max([l[1] for l in node[1]]) > 10]
ap_nodes = [node for node in self.g.nodes() if self.g.in_degree(node) > 0]
edges = self.g.edges(active_weak_nodes)
snr_g = nx.DiGraph()
snr_g.add_nodes_from(active_weak_nodes + ap_nodes)
snr_g.add_edges_from(edges)
for node in active_weak_nodes:
snr_g.node[node]['type'] = 'sta'
for node in ap_nodes:
snr_g.node[node]['type'] = 'ap'
nx.write_gpickle(snr_g,'graph_pickle_connectivity_%d.pkl' % time.time())
#nx.draw(snr_g,with_labels=False)
#pylab.savefig("connectivity-graph-%d.png" % (int(time.time())))
d = json_graph.node_link_data(snr_g) # node-link format to serialize
# write json
json.dump(d, open('force/force.json','w'))
print ap_nodes
示例14: _install_one_route
def _install_one_route(self, chain_graph, res_graph, s, t, backroute=False):
self._debug('Install route between %s - %s (backroute=%s)' %
(s, t, backroute))
route_id = self.next_route_id()
self.routes[route_id] = { 'chain': [],
'res': [],
'status': RouteChanged.PENDING,
'res_graph': res_graph,
}
if backroute:
#route_search = DefaultRouteAlgorithm()
#route_search.graph(json_graph.node_link_data(res_graph))
# send backward traffic directly to the source:
chain_hops = [(s, t)]
else:
route_search = self.chain_route_search
route_search.graph(json_graph.node_link_data(chain_graph))
chain_hops = route_search.chain_hops(s, t)
self.routes[route_id]['chain'] = chain_hops
self._fire_route_state_change(None, route_id)
self.install_pending_routes(res_graph)
示例15: simpleDisplay
def simpleDisplay(ipaddress = "localhost",port = "9999"):
'''
利用每次处理后保存的图来进行恢复展示
:return:
'''
# client,repo,stargazers,user = getRespond()
# g = addTOGraph(repo,stargazers,user)
# addEdge(stargazers,client,g)
# getPopular(g)
# savaGraph1(g)
# top10(g)
g = nx.read_gpickle("data/github.1")
print nx.info(g)
print
mtsw_users = [n for n in g if g.node[n]['type'] == 'user']
h = g.subgraph(mtsw_users)
print nx.info(h)
print
d = json_graph.node_link_data(h)
json.dump(d, open('data/githubRec.json', 'w'))
cmdstr = "python3 -m http.server %s" % port
webbrowser.open_new_tab("http://%s:%s/%s.html"%(ipaddress,port, "display_githubRec"))
os.system(cmdstr)