本文整理汇总了Python中networkx.betweenness_centrality函数的典型用法代码示例。如果您正苦于以下问题:Python betweenness_centrality函数的具体用法?Python betweenness_centrality怎么用?Python betweenness_centrality使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了betweenness_centrality函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: compareGraphs
def compareGraphs(g1, g2):
"""#Compares the quantitative properties of two graph. So I can check the coarse graining. """
#Nodes and edges
print 'Graph1: #(Nodes, Edges) = (' + str(len(g1.nodes())) + ', ' + str(len(g1.edges())) + ')'
print 'Graph2: #(Nodes, Edges) = (' + str(len(g2.nodes())) + ', ' + str(len(g2.edges())) + ')'
#Connected Components
#print '\n#CCs for graph 1: ' + str(len(nx.connected_components(g1)))
#print '#CCs for graph 2: ' + str(len(nx.connected_components(g2)))
plt.hist([len(i) for i in nx.connected_components(g1)])
plt.hist([len(i) for i in nx.connected_components(g2)])
plt.title('Cluster Size')
plt.xlabel('Cluster Size')
plt.ylabel('#Cluster')
show()
#Degree Distribution
plt.hist(nx.degree_histogram(g1))
plt.hist(nx.degree_histogram(g2))
plt.title('Degree Distribution' )
plt.xlabel('Degree')
plt.ylabel('#Nodes')
show()
#Betweeness --- this is by far the most compuationally demanding.
plt.hist(nx.betweenness_centrality(g1, normalized = False).values())
plt.hist(nx.betweenness_centrality(g2, normalized = False).values())
plt.title('Distribution of Betweenness' )
plt.xlabel('Betweenness')
plt.ylabel('#Nodes')
show()
示例2: compute_static_graph_statistics
def compute_static_graph_statistics(G,start_time,end_time):
verts = G.vertices
n = len(verts)
m = float(end_time - start_time)
agg_statistics = [dict.fromkeys(verts,0),dict.fromkeys(verts,0),dict.fromkeys(verts,0)]*3
avg_statistics = [dict.fromkeys(verts,0),dict.fromkeys(verts,0),dict.fromkeys(verts,0)]*3
aggregated_graph = nx.Graph()
aggregated_graph.add_nodes_from(verts)
start_time = max(1,start_time)
for t in xrange(start_time,end_time+1):
aggregated_graph.add_edges_from(G.snapshots[t].edges_iter())
dc = G.snapshots[t].degree()
cc = nx.closeness_centrality(G.snapshots[t])
bc = nx.betweenness_centrality(G.snapshots[t])
for v in verts:
avg_statistics[0][v] += dc[v]/(n-1.0)
avg_statistics[1][v] += cc[v]
avg_statistics[2][v] += bc[v]
for v in verts:
avg_statistics[0][v] = avg_statistics[0][v]/m
avg_statistics[1][v] = avg_statistics[1][v]/m
avg_statistics[2][v] = avg_statistics[2][v]/m
dc = nx.degree_centrality(aggregated_graph)
cc = nx.closeness_centrality(aggregated_graph)
bc = nx.betweenness_centrality(aggregated_graph)
for v in verts:
agg_statistics[0][v] = dc[v]
agg_statistics[1][v] = cc[v]
agg_statistics[2][v] = bc[v]
return (agg_statistics, avg_statistics)
示例3: show_network_metrics
def show_network_metrics(G):
'''
Print the local and global metrics of the network
'''
print(nx.info(G))
# density
print("Density of the network")
print(nx.density(G))
# average betweeness
print("Average betweeness of the network")
print(np.sum(list(nx.betweenness_centrality(G).values()))/len(nx.betweenness_centrality(G)))
# Average clustering coefficient
print("Average clustering coefficient:")
print(nx.average_clustering(G))
#create metrics dataframe
by_node_metrics = pd.DataFrame({"Betweeness_Centrality":nx.betweenness_centrality(G),"Degree_Centrality":nx.degree_centrality(G),
"Clustering_Coefficient":nx.clustering(G), "Triangels":nx.algorithms.cluster.triangles(G)})
print(by_node_metrics)
by_node_metrics.to_excel("metrics.xlsx")
示例4: betweenness_fracture
def betweenness_fracture(infile, outfile, fraction, recalculate = False):
"""
Removes given fraction of nodes from infile network in reverse order of
betweenness centrality (with or without recalculation of centrality values
after each node removal) and saves the network in outfile.
"""
g = networkx.read_gml(infile)
m = networkx.betweenness_centrality(g)
l = sorted(m.items(), key = operator.itemgetter(1), reverse = True)
largest_component = max(networkx.connected_components(g), key = len)
n = len(g.nodes())
for i in range(1, n):
g.remove_node(l.pop(0)[0])
if recalculate:
m = networkx.betweenness_centrality(g)
l = sorted(m.items(), key = operator.itemgetter(1),
reverse = True)
largest_component = max(networkx.connected_components(g), key = len)
if i * 1. / n >= fraction:
break
components = networkx.connected_components(g)
component_id = 1
for component in components:
for node in component:
g.node[node]["component"] = component_id
component_id += 1
networkx.write_gml(g, outfile)
示例5: betweenness
def betweenness(infile, recalculate = False):
"""
Performs robustness analysis based on betweenness centrality,
on the network specified by infile using sequential (recalculate = True)
or simultaneous (recalculate = False) approach. Returns a list
with fraction of nodes removed, a list with the corresponding sizes of
the largest component of the network, and the overall vulnerability
of the network.
"""
g = networkx.read_gml(infile)
m = networkx.betweenness_centrality(g)
l = sorted(m.items(), key = operator.itemgetter(1), reverse = True)
x = []
y = []
largest_component = max(networkx.connected_components(g), key = len)
n = len(g.nodes())
x.append(0)
y.append(len(largest_component) * 1. / n)
R = 0.0
for i in range(1, n):
g.remove_node(l.pop(0)[0])
if recalculate:
m = networkx.betweenness_centrality(g)
l = sorted(m.items(), key = operator.itemgetter(1),
reverse = True)
largest_component = max(networkx.connected_components(g), key = len)
x.append(i * 1. / n)
R += len(largest_component) * 1. / n
y.append(len(largest_component) * 1. / n)
return x, y, 0.5 - R / n
示例6: betweenness_removal
def betweenness_removal(g, recalculate=False):
"""
Performs robustness analysis based on betweenness centrality,
on the network specified by infile using sequential (recalculate = True)
or simultaneous (recalculate = False) approach. Returns a list
with fraction of nodes removed, a list with the corresponding sizes of
the largest component of the network, and the overall vulnerability
of the network.
"""
m = nx.betweenness_centrality(g)
l = sorted(m.items(), key=operator.itemgetter(1), reverse=True)
x = []
y = []
dimension = fd.fractal_dimension(g, iterations=100, debug=False)
n = len(g.nodes())
x.append(0)
y.append(dimension)
for i in range(1, n-1):
g.remove_node(l.pop(0)[0])
if recalculate:
m = nx.betweenness_centrality(g)
l = sorted(m.items(), key=operator.itemgetter(1),
reverse=True)
dimension = fd.fractal_dimension(g, iterations=100, debug=False)
x.append(i * 1. / n)
y.append(dimension)
return x, y
示例7: recalculated_betweenness
def recalculated_betweenness(ex):
# Copy the graph
ex = ex.copy()
# Calculate betweenness of full graph
between = nx.betweenness_centrality(ex, weight='distance', normalized=False)
# Create a copy to track the recalculated betweenness
rebetween = between
while len(ex.edges()) > 0:
# Recalculate betweenness
between = nx.betweenness_centrality(ex, weight='distance', normalized=False)
# Store recalculated values if they're higher
for node, value in between.iteritems():
if value > rebetween[node]:
rebetween[node] = value
# Remove all edges from most central node
node, value = sorted(between.items(), key=lambda x: x[1], reverse=True)[0]
if (value == 0):
# All remaining edges are trivial shortest paths
break
for tail, head in ex.edges(node):
ex.remove_edge(tail, head)
sys.stdout.write('.')
sys.stdout.flush()
print
return rebetween
示例8: sna_calculations
def sna_calculations(g, play_file):
"""
:param g: a NetworkX graph object
:type g: object
:param play_file: the location of a play in .txt format
:type play_file: string
:return: returns a dictionary containing various network related figures
:rtype: dict
:note: also writes into results/file_name-snaCalculations.csv and results/allCharacters.csv
"""
file_name = os.path.splitext(os.path.basename(play_file))[0]
sna_calculations_list = dict()
sna_calculations_list['playType'] = file_name[0]
sna_calculations_list['avDegreeCentrality'] = numpy.mean(numpy.fromiter(iter(nx.degree_centrality(g).values()),
dtype=float))
sna_calculations_list['avDegreeCentralityStd'] = numpy.std(
numpy.fromiter(iter(nx.degree_centrality(g).values()), dtype=float))
sna_calculations_list['avInDegreeCentrality'] = numpy.mean(
numpy.fromiter(iter(nx.in_degree_centrality(g).values()), dtype=float))
sna_calculations_list['avOutDegreeCentrality'] = numpy.mean(
numpy.fromiter(iter(nx.out_degree_centrality(g).values()), dtype=float))
try:
sna_calculations_list['avShortestPathLength'] = nx.average_shortest_path_length(g)
except:
sna_calculations_list['avShortestPathLength'] = 'not connected'
sna_calculations_list['density'] = nx.density(g)
sna_calculations_list['avEigenvectorCentrality'] = numpy.mean(
numpy.fromiter(iter(nx.eigenvector_centrality(g).values()), dtype=float))
sna_calculations_list['avBetweennessCentrality'] = numpy.mean(
numpy.fromiter(iter(nx.betweenness_centrality(g).values()), dtype=float))
sna_calculations_list['DegreeCentrality'] = nx.degree_centrality(g)
sna_calculations_list['EigenvectorCentrality'] = nx.eigenvector_centrality(g)
sna_calculations_list['BetweennessCentrality'] = nx.betweenness_centrality(g)
# sna_calculations.txt file
sna_calc_file = csv.writer(open('results/' + file_name + '-snaCalculations.csv', 'wb'), quoting=csv.QUOTE_ALL,
delimiter=';')
for key, value in sna_calculations_list.items():
sna_calc_file.writerow([key, value])
# all_characters.csv file
if not os.path.isfile('results/allCharacters.csv'):
with open('results/allCharacters.csv', 'w') as f:
f.write(
'Name;PlayType;play_file;DegreeCentrality;EigenvectorCentrality;BetweennessCentrality;speech_amount;AverageUtteranceLength\n')
all_characters = open('results/allCharacters.csv', 'a')
character_speech_amount = speech_amount(play_file)
for character in sna_calculations_list['DegreeCentrality']:
all_characters.write(character + ';' + str(sna_calculations_list['playType']) + ';' + file_name + ';' + str(
sna_calculations_list['DegreeCentrality'][character]) + ';' + str(
sna_calculations_list['EigenvectorCentrality'][character]) + ';' + str(
sna_calculations_list['BetweennessCentrality'][character]) + ';' + str(
character_speech_amount[0][character]) + ';' + str(character_speech_amount[1][character]) + '\n')
all_characters.close()
return sna_calculations
示例9: __init__
def __init__(self, view, controller, use_ego_betw=False, **kwargs):
super(CacheLessForMore, self).__init__(view, controller)
topology = view.topology()
if use_ego_betw:
self.betw = dict((v, nx.betweenness_centrality(nx.ego_graph(topology, v))[v])
for v in topology.nodes_iter())
else:
self.betw = nx.betweenness_centrality(topology)
示例10: weighted_betweenness_centrality_distribution
def weighted_betweenness_centrality_distribution(G, return_dictionary=False):
"""Return a distribution of weighted betweenness centralities.
If return_dictionary is specified, we return a dictionary indexed by
vertex name, rather than just the values (as returned by default).
"""
if return_dictionary:
return nx.betweenness_centrality(G, weighted_edges=True)
else:
return nx.betweenness_centrality(G, weighted_edges=True).values()
示例11: betweenness_centrality_distribution
def betweenness_centrality_distribution(G, return_dictionary=False):
"""Return a distribution of unweighted betweenness centralities,
as used in Borges, Coppersmith, Meyer, and Priebe 2011.
If return_dictionary is specified, we return a dictionary indexed by
vertex name, rather than just the values (as returned by default).
"""
if return_dictionary:
return nx.betweenness_centrality(G)
else:
return nx.betweenness_centrality(G).values()
示例12: centrality_measures
def centrality_measures(self):
centrality_measures = []
txt = ''
# betweenness
# unweighted
self.unweighted_betweenness_distribution = nx.betweenness_centrality(self.G)
statistics = self.Stats.get_distribution_info(self.unweighted_betweenness_distribution)
centrality_measures.extend(statistics[:5])
centrality_measures.extend(statistics[5])
txt += ',average betweenness centrality (unweighted)' + self.standard_text_distribution
# # weighted
self.weighted_betweenness_distribution = nx.betweenness_centrality(self.G, weight = self.weight_id)
# statistics = self.Stats.get_distribution_info(self.weighted_betweenness_distribution)
# centrality_measures.extend(statistics[:5])
# centrality_measures.extend(statistics[5])
# txt += ',average betweenness centrality (weighted)' + self.standard_text_distribution
# closeness
# unweighted
self.unweighted_closeness_distribution = nx.closeness_centrality(self.G)
statistics = self.Stats.get_distribution_info(self.unweighted_closeness_distribution)
centrality_measures.extend(statistics[:5])
centrality_measures.extend(statistics[5])
txt += ',average closeness centrality (unweighted)' + self.standard_text_distribution
# eigen vector
# right
try:
self.right_eigenvector_distribution = nx.eigenvector_centrality(self.G)
statistics = self.Stats.get_distribution_info(self.right_eigenvector_distribution)
centrality_measures.extend(statistics[:5])
centrality_measures.extend(statistics[5])
except:
centrality_measures.extend([0,0,0,0,0])
centrality_measures.extend([0]*len(statistics[5]))
txt += ',average right eigenvector' + self.standard_text_distribution
# left
try:
G_rev = self.G.reverse()
self.lef_eigenvector_distribution = nx.eigenvector_centrality(G_rev)
statistics = self.Stats.get_distribution_info(self.lef_eigenvector_distribution)
centrality_measures.extend(statistics[:5])
centrality_measures.extend(statistics[5])
except:
centrality_measures.extend([0,0,0,0,0])
centrality_measures.extend([0]*len(statistics[5]))
txt += ',average left eigenvector' + self.standard_text_distribution
return [centrality_measures, txt]
示例13: genSeedsMaxDegree
def genSeedsMaxDegree(self,p,bwness):
"""Generate seeds based on maximum degree. Also handles Betweenness.
Optional input argument sets randomization. 0<p<1"""
numSeeds = self.numSeeds
if bwness:
numSeeds = numSeeds*1.5
if bwness:
k_val = int(2000/math.sqrt(len(self.adj)))
if k_val > len(self.adj):
bw_node = nx.betweenness_centrality(self.nxgraph)
else:
bw_node = nx.betweenness_centrality(self.nxgraph, k = k_val )
numMax=int(self.numSeeds/(1.0*p))
seeds=[None]*numMax
deg=[0]*numMax
for key,value in self.adj.iteritems():
#fill seeds
curr_deg=len(value)
for j in range(numMax):
if curr_deg>deg[j]:
deg.insert(j,curr_deg)
seeds.insert(j,key)
break
seeds=seeds[:numMax]
deg=deg[:numMax]
if bwness:
numMax=int(self.numSeeds/(1.0*p))
dict_bw = bw_node
seeds_degree = seeds
seeds = dict()
for node in seeds_degree:
value = dict_bw.get(node)
key = node
seeds[key] = value
seeds_fin = dict(sorted(seeds.iteritems(), key=operator.itemgetter(1), reverse=True)[:numMax])
seeds = seeds_fin.keys()
#shuffle
if p!=1:
random.shuffle(seeds)
return seeds[:self.numSeeds]
示例14: node_graph
def node_graph(tup):
h=nx.Graph()
h.add_edges_from(tup)
print "edges:" ,h.edges()
#%matplotlib inline
BLUE="#99CCFF"
nx.draw(h, node_color=BLUE,with_labels=True)
print "Degree Distribution:",h.degree()
print "Degree Centrality:",nx.degree_centrality(h)
print "Betweenness Centrality : ",nx.betweenness_centrality(h)
print "Betweenness Centrality Non-Normalized : ",nx.betweenness_centrality(h, normalized=False)
print "Closeness Centrality:", nx.closeness_centrality(h)
pyplot.show()
示例15: betweenness_apl
def betweenness_apl(g, recalculate=False):
"""
Performs robustness analysis based on betweenness centrality,
on the network specified by infile using sequential (recalculate = True)
or simultaneous (recalculate = False) approach. Returns a list
with fraction of nodes removed, a list with the corresponding sizes of
the largest component of the network, and the overall vulnerability
of the network.
"""
m = networkx.betweenness_centrality(g)
l = sorted(m.items(), key=operator.itemgetter(1), reverse=True)
x = []
y = []
average_path_length = 0.0
number_of_components = 0
n = len(g.nodes())
for sg in networkx.connected_component_subgraphs(g):
average_path_length += networkx.average_shortest_path_length(sg)
number_of_components += 1
average_path_length = average_path_length / number_of_components
initial_apl = average_path_length
x.append(0)
y.append(average_path_length * 1. / initial_apl)
r = 0.0
for i in range(1, n):
g.remove_node(l.pop(0)[0])
if recalculate:
m = networkx.betweenness_centrality(g)
l = sorted(m.items(), key=operator.itemgetter(1),
reverse=True)
average_path_length = 0.0
number_of_components = 0
for sg in networkx.connected_component_subgraphs(g):
if len(sg.nodes()) > 1:
average_path_length += networkx.average_shortest_path_length(sg)
number_of_components += 1
average_path_length = average_path_length / number_of_components
x.append(i * 1. / initial_apl)
r += average_path_length
y.append(average_path_length)
return x, y, r / initial_apl