本文整理汇总了Python中networkx.read_edgelist函数的典型用法代码示例。如果您正苦于以下问题:Python read_edgelist函数的具体用法?Python read_edgelist怎么用?Python read_edgelist使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了read_edgelist函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: gen_random_graphs
def gen_random_graphs(seed, db):
print "generating random graph with seed " + str(seed)
directory = db.get_rnd_graphs_path()
if not path.exists(directory):
makedirs(directory)
filename = db.get_rnd_graph_full_name(str(seed), str(db.get_final_time()))
if(path.exists(filename)):
print "random graph with seed " + str(seed) + " already exists! Skipping..."
return
pathD = db.get_graphs_path()
filename = pathD + db.get_windowed_graph_name(0)
G=nx.read_edgelist(filename, nodetype = int, data=(('weight',float),))
GR = get_random_graph_from(G, seed)
save_random_graph(GR,1, db)
for i in range(2,db.get_final_time()+1):
filename = pathD + db.get_windowed_graph_name(str(i))
if(not path.exists(filename)):
f = open(filename,'w')
f.close()
G=nx.read_edgelist(filename, nodetype = int, data=(('weight',float),))
GRnew = get_random_graph_from(G, seed)
GR.graph['nmerges'] = i-2
GR = merge_temporal_graphs(GR, GRnew)
GR = compute_edge_features(GR)
save_random_graph(GR,i, db)
print("G_RND[" + str(i) + "] has " + str(GR.number_of_edges()) + " edges")
示例2: gen_random_graphs
def gen_random_graphs(seed):
# create windowed random graphs for each real graph
# obtain aggreggated graph
# calculate features of random graph
print "GENERATING RANDOM GRAPHS"
day = 1
final_day = which_day(_maxtime)+1
filename = str(results_folder) + "Graphs_Data/windowed_graph_" + str(day) + str(".txt")
print filename
G = nx.read_edgelist(filename, nodetype = int, data = (('top',float),))
# print G
GR = get_random_graph_from(G, seed)
for i in range(2,final_day):
day = i
filename = str(results_folder) + "Graphs_Data/windowed_graph_" + str(day) + str(".txt")
G = nx.read_edgelist(filename, nodetype = int, data = (('top',float),))
GRnew = get_random_graph_from(G, seed)
GR.graph['nmerges'] = i - 2
GR = merge_temporal_graphs(GR, GRnew)
GR = compute_edge_features(GR)
save_random_graph(GR,i,seed)
示例3: incorrectness_uncertain_from_file
def incorrectness_uncertain_from_file(before_file, after_file, sample_file, n_samples, bins):
# compute sig_list_b, bucket_list_b ONCE !
start = time.clock()
bG = nx.read_edgelist(before_file, '#', '\t', None, nodetype=int)
# G = nx.read_edgelist(after_file, '#', '\t', None, nodetype=int, data=True)
print "read bG: DONE, elapsed :", time.clock() - start
h2_list = equivalence_class_H2_open(bG, None)
cand_size, bin_size, sig_list_b, bucket_list_b = bucket_H2(h2_list, bins)
# print "len B:", len(sig_list_b), len(bucket_list_b)
# H1 score, H2 score
start = time.clock()
score_H1 = 0.0
score_H2 = 0.0
count = 0
for i in range(n_samples):
file_name = sample_file + str(i)
aG = nx.read_edgelist(file_name, '#', '\t', create_using=nx.MultiGraph(), nodetype=int, data=False) # IMPORTANT: MultiGraph
# H1
sum_re_prob, re_prob_dict = incorrectness_H1(bG, aG, bins)
score_H1 += sum_re_prob
# H2
sum_re_prob, re_prob_dict = incorrectness_H2_open(aG, sig_list_b, bucket_list_b, bins)
score_H2 += sum_re_prob
print "count =", count
count += 1
#
score_H1 = score_H1/n_samples
score_H2 = score_H2/n_samples
print "compute score_H1, score_H2: DONE, elapsed :", time.clock() - start
#
return score_H1, score_H2
示例4: loadNwU
def loadNwU(dsName, path, cd, wccOnly, revEdges, undir):
print(" Opening " + dsName + " and loading graph... ")
t1 = time.clock()
fh = open(path + dsName, "rb")
if undir:
if cd:
prodNet = nx.read_edgelist(fh, delimiter=",")
else:
prodNet = nx.read_edgelist(fh)
# prodNet = prodNet.to_directed()
else:
if cd:
prodNet = nx.read_edgelist(fh, delimiter=",", create_using=nx.DiGraph())
else:
prodNet = nx.read_edgelist(fh, create_using=nx.DiGraph())
fh.close()
if wccOnly:
prodNet = nx.algorithms.weakly_connected.weakly_connected_component_subgraphs(prodNet)[0]
prodNet.remove_edges_from(prodNet.selfloop_edges())
if revEdges:
prodNet.reverse(False)
numNodes = str(prodNet.__len__())
numEdges = str(prodNet.size())
t2 = time.clock()
print(" -> graph loaded: " + numNodes + " nodes, " + numEdges + " edges (" + str(t2 - t1) + " sec).")
return prodNet
示例5: k_obfuscation_measure
def k_obfuscation_measure(before_file, after_file, n_nodes, k_arr, data=True):
print "n_nodes =", n_nodes
# before_file
bG = nx.read_edgelist(before_file, '#', '\t', None, nodetype=int)
print "read bG - DONE"
# if bG.number_of_nodes() < n_nodes:
# bG.add_nodes_from(range(n_nodes)) # only for er_100k
# Case 1 - aG = bG
if after_file == before_file: # after_file is before_file
for e in bG.edges_iter():
bG[e[0]][e[1]]['p'] = 1.0
return compute_eps_multi(bG, bG, k_arr)
# Case 2 - aG is a sample
# after_file
if data == True:
aG = nx.read_edgelist(after_file, '#', '\t', None, nodetype=int, data=True)
else:
aG = nx.read_edgelist(after_file, '#', '\t', None, nodetype=int, data=False)
# if aG.number_of_nodes() < n_nodes:
# aG.add_nodes_from(range(n_nodes)) # only for the cases of KeyError !
for e in aG.edges_iter():
aG[e[0]][e[1]]['p'] = 1.0
print "read aG - DONE"
return compute_eps_multi(bG, aG, k_arr)
示例6: main
def main():
"""
Pre-processing:
load data, compute centrality measures, write files with node data
"""
print(nx.__version__)
# Load network data, create storage dict, and extract main component
depends=nx.read_edgelist("data/depends.csv",delimiter=",",create_using=nx.DiGraph(),nodetype=str,data=(("weight",time_from_today),))
depends.name="depends"
suggests=nx.read_edgelist("data/suggests.csv",delimiter=",",create_using=nx.DiGraph(),nodetype=str,data=(("weight",time_from_today),))
suggests.name="suggests"
imports=nx.read_edgelist("data/imports.csv",delimiter=",",create_using=nx.DiGraph(),nodetype=str,data=(("weight",time_from_today),))
imports.name="imports"
nets_dict={"depends":depends,"suggests":suggests,"imports":imports}
for k in nets_dict.keys():
main_component=nx.connected_component_subgraphs(nets_dict[k].to_undirected())[0].nodes()
nets_dict[k]=nx.subgraph(nets_dict[k],main_component)
# Run multiple measures on graphs and normalize weights
measure_list=[nx.in_degree_centrality,nx.betweenness_centrality,nx.pagerank]
for g in nets_dict.values():
multiple_measures(g,measure_list)
normalize_weights(g)
# Output networks in GraphML format (to store node attributes)
for i in nets_dict.items():
# print(i[1].edges(data=True))
nx.write_graphml(i[1],"data/"+i[0]+"_data.graphml")
print("")
print("All files written with data")
"""Visualization:
示例7: main
def main():
parser = createParser()
options = parser.parse_args()
gtGraphNames = glob.glob("{0}/*.sim.cut".format(options.gtruth))
gtGraphs = { fn.split("/")[-1][:-8] : nx.read_edgelist(fn) for fn in gtGraphNames }
print(gtGraphs)
print(gtGraphNames)
oGraphNames = [ "{0}/{1}.out.ppi".format(options.other, k) for k in gtGraphs.keys() ]
oGraphs = { fn.split("/")[-1][:-8] : nx.read_weighted_edgelist(fn) for fn in oGraphNames }
inputGraphNames = glob.glob("{0}/bZIP*.cut".format(options.other))
print(inputGraphNames)
inputGraph = nx.read_edgelist(inputGraphNames[0])
print(oGraphNames)
cutoff = 0.99
paranaGraph = graphWithCutoff(options.parana, 0.0)
c = findSuggestedCutoff( paranaGraph, inputGraph, cutoff )
evaluation.printStats( filteredGraph(paranaGraph, inputGraph.nodes(), cutoff=c ), inputGraph )
print >>sys.stderr, "Parana 2.0 : {0}".format(getCurve(paranaGraph, inputGraph))
for gtName, gtGraph in gtGraphs.iteritems():
print(gtName)
c = findSuggestedCutoff( paranaGraph, gtGraph, cutoff )
print("Parana cutoff = {0}".format(c))
print("==================")
evaluation.printStats( filteredGraph(oGraphs[gtName], gtGraph.nodes()), gtGraph )
print >>sys.stderr, "Pinney et. al : {0}".format(getCurve(oGraphs[gtName], gtGraph))
evaluation.printStats( filteredGraph(paranaGraph, gtGraph.nodes(), cutoff=c ), gtGraph )
print >>sys.stderr, "Parana 2.0 : {0}".format(getCurve(paranaGraph, gtGraph))
print("\n")
sys.exit(0)
示例8: graph_properties
def graph_properties(filename, directed=False):
# Read in rec as undirected graph
if directed:
G=nx.read_edgelist(filename, nodetype=int, create_using=nx.DiGraph())
else:
G=nx.read_edgelist(filename, nodetype=int, create_using=nx.Graph())
props = {}
# Calculate number of edges
props['num_edges'] = G.number_of_edges()
# Calculate number of nodes
props['num_nodes'] = len(G)
# Calculate largest connected component
largest_component = nx.connected_component_subgraphs(G)[0]
props['size_largestcc'] = len(largest_component)
props['proportion_in_largestcc'] = float(len(largest_component)) / len(G)
# Calculate clustering coefficient
props['average_clustering'] = nx.average_clustering(G)
# Calculate diameter of largest connected component
# props['diameter'] = nx.diameter(largest_component)
return props
示例9: calGraph
def calGraph(infile, mode = 1):
#init Parameter
inputpath = 'edge_list/'
outputpath = 'network_output/'
n = mode
Data_G = inputpath+infile+'_'+str(n)+'.edgelist'
#init Graph
G = nx.read_edgelist(Data_G, create_using=nx.DiGraph())
GU = nx.read_edgelist(Data_G)
#basci info
print nx.info(G),'\n', nx.info(GU)
average_degree = float(sum(nx.degree(G).values()))/len(G.nodes())
print 'average degree :', average_degree
degree_histogram = nx.degree_histogram(G)
print 'degree histogram max :', degree_histogram[1]
desity = nx.density(G)
print 'desity :', desity
#Approximation
#Centrality
degree_centrality = nx.degree_centrality(G)
print 'degree centrality top 10 !', sorted_dict(degree_centrality)[:2]
out_degree_centrality = nx.out_degree_centrality(G)
print 'out degree centrality top 10 !', sorted_dict(out_degree_centrality)[:2]
示例10: load
def load(self,fname):
fext = (str(fname).split("."))[1]
self.fname = (str(fname).split("."))[0]
if self.directed_graph == False:
self.G = nx.read_edgelist(path=fname)
else:
self.G = nx.read_edgelist(path=fname,create_using=nx.DiGraph())
示例11: test_edgelist_integers
def test_edgelist_integers(self):
G=nx.convert_node_labels_to_integers(self.G)
(fd,fname)=tempfile.mkstemp()
nx.write_edgelist(G,fname)
H=nx.read_edgelist(fname,nodetype=int)
H2=nx.read_edgelist(fname,nodetype=int)
G.remove_node(5) # isolated nodes are not written in edgelist
assert_equal(sorted(H.nodes()),sorted(G.nodes()))
assert_equal(sorted(H.edges()),sorted(G.edges()))
os.close(fd)
os.unlink(fname)
示例12: test_edgelist_multidigraph
def test_edgelist_multidigraph(self):
G = self.XDG
(fd, fname) = tempfile.mkstemp()
nx.write_edgelist(G, fname)
H = nx.read_edgelist(fname, nodetype=int, create_using=nx.MultiDiGraph())
H2 = nx.read_edgelist(fname, nodetype=int, create_using=nx.MultiDiGraph())
assert_not_equal(H, H2) # they should be different graphs
assert_nodes_equal(list(H), list(G))
assert_edges_equal(list(H.edges()), list(G.edges()))
os.close(fd)
os.unlink(fname)
示例13: test_edgelist_digraph
def test_edgelist_digraph(self):
G = self.DG
(fd, fname) = tempfile.mkstemp()
nx.write_edgelist(G, fname)
H = nx.read_edgelist(fname, create_using=nx.DiGraph())
G.remove_node('g') # isolated nodes are not written in edgelist
H2 = nx.read_edgelist(fname, create_using=nx.DiGraph())
assert_not_equal(H, H2) # they should be different graphs
assert_nodes_equal(list(H), list(G))
assert_edges_equal(list(H.edges()), list(G.edges()))
os.close(fd)
os.unlink(fname)
示例14: test_edgelist_graph
def test_edgelist_graph(self):
G=self.G
(fd,fname)=tempfile.mkstemp()
nx.write_edgelist(G,fname)
H=nx.read_edgelist(fname)
H2=nx.read_edgelist(fname)
assert_not_equal(H,H2) # they should be different graphs
G.remove_node('g') # isolated nodes are not written in edgelist
assert_equal(sorted(H.nodes()),sorted(G.nodes()))
assert_equal(sorted(H.edges()),sorted(G.edges()))
os.close(fd)
os.unlink(fname)
示例15: calGraph
def calGraph(infile, mode = 1):
#init Parameter
inputpath = 'edge_list/'
n = mode
Data_G = inputpath+infile+'_'+str(n)+'.edgelist'
#init Graph
G = nx.read_edgelist(Data_G, create_using=nx.DiGraph())
GU = nx.read_edgelist(Data_G)
average_clustering = nx.average_clustering(GU)
transitivity = nx.transitivity(G)
return [average_clustering, transitivity]