本文整理汇总了Python中networkx.eigenvector_centrality函数的典型用法代码示例。如果您正苦于以下问题:Python eigenvector_centrality函数的具体用法?Python eigenvector_centrality怎么用?Python eigenvector_centrality使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了eigenvector_centrality函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: eigenvector
def eigenvector(g, recalculate=False):
"""
Performs robustness analysis based on eigenvector centrality,
on the network specified by infile using sequential (recalculate = True)
or simultaneous (recalculate = False) approach. Returns a list
with fraction of nodes removed, a list with the corresponding sizes of
the largest component of the network, and the overall vulnerability
of the network.
"""
m = networkx.eigenvector_centrality(g, max_iter=5000)
l = sorted(m.items(), key=operator.itemgetter(1), reverse=True)
x = []
y = []
largest_component = max(networkx.connected_components(g), key=len)
n = len(g.nodes())
x.append(0)
y.append(len(largest_component) * 1. / n)
r = 0.0
for i in range(1, n - 1):
g.remove_node(l.pop(0)[0])
if recalculate:
try:
m = networkx.eigenvector_centrality(g, max_iter=5000)
except networkx.NetworkXError:
break
l = sorted(m.items(), key=operator.itemgetter(1),
reverse=True)
largest_component = max(networkx.connected_components(g), key=len)
x.append(i * 1. / n)
r += len(largest_component) * 1. / n
y.append(len(largest_component) * 1. / n)
return x, y, r / n
示例2: plot_co_x
def plot_co_x(cox, start, end, size = (20,20), title = '', weighted=False, weight_threshold=10):
""" Plotting function for keyword graphs
Parameters
--------------------
cox: the coword networkx graph; assumes that nodes have attribute 'topic'
start: start year
end: end year
"""
plt.figure(figsize=size)
plt.title(title +' %s - %s'%(start,end), fontsize=18)
if weighted:
elarge=[(u,v) for (u,v,d) in cox.edges(data=True) if d['weight'] >weight_threshold]
esmall=[(u,v) for (u,v,d) in cox.edges(data=True) if d['weight'] <=weight_threshold]
pos=nx.graphviz_layout(cox) # positions for all nodes
nx.draw_networkx_nodes(cox,pos,
node_color= [s*4500 for s in nx.eigenvector_centrality(cox).values()],
node_size = [s*6+20 for s in nx.degree(cox).values()],
alpha=0.7)
# edges
nx.draw_networkx_edges(cox,pos,edgelist=elarge,
width=1, alpha=0.5, edge_color='black') #, edge_cmap=plt.cm.Blues
nx.draw_networkx_edges(cox,pos,edgelist=esmall,
width=0.3,alpha=0.5,edge_color='yellow',style='dotted')
# labels
nx.draw_networkx_labels(cox,pos,font_size=10,font_family='sans-serif')
plt.axis('off')
else:
nx.draw_graphviz(cox, with_labels=True,
alpha = 0.8, width=0.1,
fontsize=9,
node_color = [s*4 for s in nx.eigenvector_centrality(cox).values()],
node_size = [s*6+20 for s in nx.degree(cox).values()])
示例3: buildGraphFromTwitterFollowing
def buildGraphFromTwitterFollowing(self):
while True:
twitter_id=self.userq.get()
#print "======================================"
twitter_id_dict=json.loads(twitter_id.AsJsonString())
#print twitter_id_dict["name"]
#print i.AsJsonString()
#pprint.pprint(i.GetCreatedAt())
#pprint.pprint(i.GetGeo())
#pprint.pprint(i.GetLocation())
#pprint.pprint(i.GetText())
for f in self.api.GetFollowers(twitter_id):
try:
follower_id_dict=json.loads(f.AsJsonString())
#print follower_id_dict["name"]
self.tng.add_edge(twitter_id_dict["name"],follower_id_dict["name"])
self.userq.put(f)
self.no_of_vertices+=1
except:
pass
if self.no_of_vertices > 50:
break
print "======================================"
nx.shell_layout(self.tng)
nx.draw_networkx(self.tng)
print "==========================================================================================="
print "Bonacich Power Centrality of the Social Network (Twitter) Crawled - computed using PageRank"
print "(a degree centrality based on social prestige)"
print "==========================================================================================="
print sorted(nx.pagerank(self.tng).items(),key=operator.itemgetter(1),reverse=True)
print "==========================================================================================="
print "Eigen Vector Centrality"
print "==========================================================================================="
print nx.eigenvector_centrality(self.tng)
plt.show()
示例4: eigenvector_component
def eigenvector_component(seed_num, graph_json_filename=None, graph_json_str=None):
if graph_json_filename is None and graph_json_str is None:
return []
G = None
if graph_json_str is None:
G = util.load_graph(graph_json_filename=graph_json_filename)
else:
G = util.load_graph(graph_json_str=graph_json_str)
components = list(nx.connected_components(G))
components = filter(lambda x: len(x) > 0.1 * len(G), components)
total_size = sum(map(lambda x: len(x), components))
total_nodes = 0
rtn = []
for comp in components[1:]:
num_nodes = int(float(len(comp)) / total_size * seed_num)
component = G.subgraph(list(comp))
clse_cent = nx.eigenvector_centrality(component)
collector = collections.Counter(clse_cent)
clse_cent = collector.most_common(num_nodes)
rtn += map(lambda (x, y): x, clse_cent)
total_nodes += num_nodes
num_nodes = seed_num - total_nodes
component = G.subgraph(list(components[0]))
clse_cent = nx.eigenvector_centrality(component)
collector = collections.Counter(clse_cent)
clse_cent = collector.most_common(num_nodes)
rtn += map(lambda (x, y): x, clse_cent)
return rtn
示例5: sna_calculations
def sna_calculations(g, play_file):
"""
:param g: a NetworkX graph object
:type g: object
:param play_file: the location of a play in .txt format
:type play_file: string
:return: returns a dictionary containing various network related figures
:rtype: dict
:note: also writes into results/file_name-snaCalculations.csv and results/allCharacters.csv
"""
file_name = os.path.splitext(os.path.basename(play_file))[0]
sna_calculations_list = dict()
sna_calculations_list['playType'] = file_name[0]
sna_calculations_list['avDegreeCentrality'] = numpy.mean(numpy.fromiter(iter(nx.degree_centrality(g).values()),
dtype=float))
sna_calculations_list['avDegreeCentralityStd'] = numpy.std(
numpy.fromiter(iter(nx.degree_centrality(g).values()), dtype=float))
sna_calculations_list['avInDegreeCentrality'] = numpy.mean(
numpy.fromiter(iter(nx.in_degree_centrality(g).values()), dtype=float))
sna_calculations_list['avOutDegreeCentrality'] = numpy.mean(
numpy.fromiter(iter(nx.out_degree_centrality(g).values()), dtype=float))
try:
sna_calculations_list['avShortestPathLength'] = nx.average_shortest_path_length(g)
except:
sna_calculations_list['avShortestPathLength'] = 'not connected'
sna_calculations_list['density'] = nx.density(g)
sna_calculations_list['avEigenvectorCentrality'] = numpy.mean(
numpy.fromiter(iter(nx.eigenvector_centrality(g).values()), dtype=float))
sna_calculations_list['avBetweennessCentrality'] = numpy.mean(
numpy.fromiter(iter(nx.betweenness_centrality(g).values()), dtype=float))
sna_calculations_list['DegreeCentrality'] = nx.degree_centrality(g)
sna_calculations_list['EigenvectorCentrality'] = nx.eigenvector_centrality(g)
sna_calculations_list['BetweennessCentrality'] = nx.betweenness_centrality(g)
# sna_calculations.txt file
sna_calc_file = csv.writer(open('results/' + file_name + '-snaCalculations.csv', 'wb'), quoting=csv.QUOTE_ALL,
delimiter=';')
for key, value in sna_calculations_list.items():
sna_calc_file.writerow([key, value])
# all_characters.csv file
if not os.path.isfile('results/allCharacters.csv'):
with open('results/allCharacters.csv', 'w') as f:
f.write(
'Name;PlayType;play_file;DegreeCentrality;EigenvectorCentrality;BetweennessCentrality;speech_amount;AverageUtteranceLength\n')
all_characters = open('results/allCharacters.csv', 'a')
character_speech_amount = speech_amount(play_file)
for character in sna_calculations_list['DegreeCentrality']:
all_characters.write(character + ';' + str(sna_calculations_list['playType']) + ';' + file_name + ';' + str(
sna_calculations_list['DegreeCentrality'][character]) + ';' + str(
sna_calculations_list['EigenvectorCentrality'][character]) + ';' + str(
sna_calculations_list['BetweennessCentrality'][character]) + ';' + str(
character_speech_amount[0][character]) + ';' + str(character_speech_amount[1][character]) + '\n')
all_characters.close()
return sna_calculations
示例6: eigenvector_centrality
def eigenvector_centrality(self, iterations, withme=False, node=None, average=False):
my_dict = nx.eigenvector_centrality(self.mynet,
max_iter = iterations)
if node==None:
if withme:
my_dict =nx.eigenvector_centrality(self.mynet,
max_iter = iterations)
new = {}
new2={}
for i in my_dict:
new[self.id_to_name(i)] = my_dict[i]
new2[i] = my_dict[i]
if average:
print "The average is " + str(round(sum(new.values())/float(len(new.values())),4))
else:
for i,j in new.items():
print i, round(j,4)
return new2
else:
my_dict = nx.eigenvector_centrality(self.no_ego_net,
max_iter = iterations)
new = {}
new2={}
for i in my_dict:
new[self.id_to_name(i)] = my_dict[i]
new2[i] = my_dict[i]
if average:
print "The average is " + str(round(sum(new.values())/float(len(new.values())),4))
else:
for i,j in new.items():
print i, round(j,4)
return new2
else:
if withme:
my_dict = nx.eigenvector_centrality(self.mynet,max_iter = iterations)
try:
print "The coefficient for node "+str(node)+ "is "+ str(round(my_dict[node],4))
except:
try:
return my_dict[self.name_to_id(node)]
except:
print "Invalid node name"
else:
my_dict = nx.eigenvector_centrality(self.no_ego_net,max_iter = iterations)
try:
print "The coefficient for node "+str(node)+ "is "+ str(round(my_dict[node],4))
except:
try:
print "The coefficient for node "+str(node)+ "is "+ str(round(my_dict[[self.name_to_id(node)]],4))
except:
print "Invalid node name"
示例7: centrality_measures
def centrality_measures(self):
centrality_measures = []
txt = ''
# betweenness
# unweighted
self.unweighted_betweenness_distribution = nx.betweenness_centrality(self.G)
statistics = self.Stats.get_distribution_info(self.unweighted_betweenness_distribution)
centrality_measures.extend(statistics[:5])
centrality_measures.extend(statistics[5])
txt += ',average betweenness centrality (unweighted)' + self.standard_text_distribution
# # weighted
self.weighted_betweenness_distribution = nx.betweenness_centrality(self.G, weight = self.weight_id)
# statistics = self.Stats.get_distribution_info(self.weighted_betweenness_distribution)
# centrality_measures.extend(statistics[:5])
# centrality_measures.extend(statistics[5])
# txt += ',average betweenness centrality (weighted)' + self.standard_text_distribution
# closeness
# unweighted
self.unweighted_closeness_distribution = nx.closeness_centrality(self.G)
statistics = self.Stats.get_distribution_info(self.unweighted_closeness_distribution)
centrality_measures.extend(statistics[:5])
centrality_measures.extend(statistics[5])
txt += ',average closeness centrality (unweighted)' + self.standard_text_distribution
# eigen vector
# right
try:
self.right_eigenvector_distribution = nx.eigenvector_centrality(self.G)
statistics = self.Stats.get_distribution_info(self.right_eigenvector_distribution)
centrality_measures.extend(statistics[:5])
centrality_measures.extend(statistics[5])
except:
centrality_measures.extend([0,0,0,0,0])
centrality_measures.extend([0]*len(statistics[5]))
txt += ',average right eigenvector' + self.standard_text_distribution
# left
try:
G_rev = self.G.reverse()
self.lef_eigenvector_distribution = nx.eigenvector_centrality(G_rev)
statistics = self.Stats.get_distribution_info(self.lef_eigenvector_distribution)
centrality_measures.extend(statistics[:5])
centrality_measures.extend(statistics[5])
except:
centrality_measures.extend([0,0,0,0,0])
centrality_measures.extend([0]*len(statistics[5]))
txt += ',average left eigenvector' + self.standard_text_distribution
return [centrality_measures, txt]
示例8: eigenvector_apl
def eigenvector_apl(g, recalculate=False):
"""
Performs robustness analysis based on eigenvector centrality,
on the network specified by infile using sequential (recalculate = True)
or simultaneous (recalculate = False) approach. Returns a list
with fraction of nodes removed, a list with the corresponding sizes of
the largest component of the network, and the overall vulnerability
of the network.
"""
m = networkx.eigenvector_centrality(g)
l = sorted(m.items(), key=operator.itemgetter(1), reverse=True)
x = []
y = []
average_path_length = 0.0
number_of_components = 0
n = len(g.nodes())
for sg in networkx.connected_component_subgraphs(g):
average_path_length += networkx.average_shortest_path_length(sg)
number_of_components += 1
average_path_length /= number_of_components
initial_apl = average_path_length
r = 0.0
for i in range(1, n - 1):
g.remove_node(l.pop(0)[0])
if recalculate:
try:
m = networkx.eigenvector_centrality(g, max_iter=5000)
except networkx.NetworkXError:
break
l = sorted(m.items(), key=operator.itemgetter(1),
reverse=True)
average_path_length = 0.0
number_of_components = 0
for sg in networkx.connected_component_subgraphs(g):
if len(sg.nodes()) > 1:
average_path_length += networkx.average_shortest_path_length(sg)
number_of_components += 1
average_path_length = average_path_length / number_of_components
x.append(i * 1. / initial_apl)
r += average_path_length * 1. / initial_apl
y.append(average_path_length * 1. / initial_apl)
return x, y, r / initial_apl
示例9: eigValue
def eigValue(charList, graphFile, bookNetworksPath):
# Compute eigenvectors for all characters in the current chapter graph.
g = nx.read_gexf(graphFile)
eigCentrality = nx.eigenvector_centrality(g, max_iter=100, tol=1.0e-6, nstart=None, weight="Weight")
eigValues = eigCentrality.values()
# NORMALISE eigenvector values
d = decimal.Decimal
maxEig = max(eigValues)
minEig = min(eigValues)
maxMinusMin = d(maxEig) - d(minEig)
if not charList:
# Get top 10 overall characters from overall.gexf graph
overallGraphFile = bookNetworksPath + "overall.gexf"
overall_g = nx.read_gexf(overallGraphFile)
overallEigCent = nx.eigenvector_centrality(overall_g, max_iter=100, tol=1.0e-6, nstart=None, weight="Weight")
# sortedCentrality = dict(sorted(overallEigCent.iteritems(), key=itemgetter(1), reverse=True)[:10])
sortedCentrality = dict(sorted(overallEigCent.iteritems(), key=itemgetter(1), reverse=True))
sortedCentrality = sorted(sortedCentrality.iteritems(), key=itemgetter(1), reverse=True)
charList = [seq[0] for seq in sortedCentrality]
return charList
else:
charList = [item for item in charList]
for index, item in enumerate(charList):
currentChar = None
for key, value in eigCentrality.iteritems():
if key == item:
# Unnormalised version...
charList[index] = (key, str(value))
currentChar = key
# if key == item:
# nummerator = d(value)-d(minEig)
# if nummerator==0:
# charList[index] = (key, str(0))
# else:
# norm_value = (d(value)-d(minEig))/d(maxMinusMin)
# charList[index] = (key, str(norm_value))
# currentChar = key
# If current character is not present in the current chapter assign 0 influence.
if not currentChar:
charList[index] = (item, 0)
return charList
示例10: centralities
def centralities(self):
'''
Get info on centralities of data
Params:
None
Returns:
dictionary of centrality metrics with keys(centralities supported):
degree - degree centrality
betweeness - betweeness centrality
eigenvector - eigenvector centrality
hub - hub scores - not implemented
authority - authority scores - not implemented
katz - katz centrality with params X Y
pagerank - pagerank centrality with params X Y
'''
output = {}
output['degree'] = nx.degree_centrality(self.G)
output['betweeness'] = nx.betweenness_centrality(self.G)
try:
output['eigenvector'] = nx.eigenvector_centrality(self.G)
output['katz'] = nx.katz_centrality(self.G)
except:
output['eigenvector'] = 'empty or exception'
output['katz'] = 'empty or exception'
# output['hub'] = 'Not implemented'
# output['authority'] = 'Not implemented'
# output['pagerank'] = 'Not implemented'
return output
示例11: describe
def describe(G, ny_tri, chems):
global describeNetwork
'''
Describe the network: degrees, clustering, and centrality measures
'''
# Degree
# The number of connections a node has to other nodes.
degrees= nx.degree(G)
degrees_df = pd.DataFrame(degrees.items(), columns=['Facility', 'Degrees'])
values = sorted(set(degrees.values()))
hist = [degrees.values().count(x) for x in values]
plt.figure()
plt.plot(values, hist,'ro-') # degree
plt.xlabel('Degree')
plt.ylabel('Number of nodes')
plt.title('Degree Distribution')
plt.savefig('output/degree_distribution.png')
# Clustering coefficients
# The bipartie clustering coefficient is a measure of local density of connections.
clust_coefficients = nx.clustering(G)
clust_coefficients_df = pd.DataFrame(clust_coefficients.items(), columns=['Facility', 'Clustering Coefficient'])
clust_coefficients_df = clust_coefficients_df.sort('Clustering Coefficient', ascending=False)
#print clust_coefficients_df
# Node centrality measures
FCG=list(nx.connected_component_subgraphs(G, copy=True))[0]
# Current flow betweenness centrality
# Current-flow betweenness centrality uses an electrical current model for information spreading
# in contrast to betweenness centrality which uses shortest paths.
betweeness = nx.current_flow_betweenness_centrality(FCG)
betweeness_df = pd.DataFrame(betweeness.items(), columns=['Facility', 'Betweeness'])
betweeness_df = betweeness_df.sort('Betweeness', ascending=False)
# Closeness centrality
# The closeness of a node is the distance to all other nodes in the graph
# or in the case that the graph is not connected to all other nodes in the connected component containing that node.
closeness = nx.closeness_centrality(FCG)
closeness_df = pd.DataFrame(closeness.items(), columns=['Facility', 'Closeness'])
closeness_df = closeness_df.sort('Closeness', ascending=False)
# Eigenvector centrality
# Eigenvector centrality computes the centrality for a node based on the centrality of its neighbors.
# In other words, how connected a node is to other highly connected nodes.
eigenvector = nx.eigenvector_centrality(FCG)
eigenvector_df = pd.DataFrame(eigenvector.items(), columns=['Facility', 'Eigenvector'])
eigenvector_df = eigenvector_df.sort('Eigenvector', ascending=False)
# Create dataframe of facility info
fac_info = ny_tri[['tri_facility_id','facility_name', 'primary_naics', 'parent_company_name']].drop_duplicates()
fac_info.rename(columns={'facility_name':'Facility'}, inplace=True)
# Merge everything
describeNetwork = degrees_df.merge(
clust_coefficients_df,on='Facility').merge(
betweeness_df,on='Facility').merge(
closeness_df, on='Facility').merge(
eigenvector_df, on='Facility').merge(
fac_info, on='Facility', how='left').merge(
chems, on='Facility', how='left')
describeNetwork = describeNetwork.sort('Degrees', ascending=False)
describeNetwork.to_csv('output/describeNetwork.csv')
示例12: attack_based_max_eigenvector
def attack_based_max_eigenvector(G):
""" Recalculate eigenvector centrality attack
"""
n = G.number_of_nodes()
tot_ND = [0] * (n+1)
tot_T = [0] * (n+1)
ND, ND_lambda = ECT.get_number_of_driver_nodes(G)
tot_ND[0] = ND
tot_T[0] = 0
for i in range(1, n+1):
# calculate all nodes' eigenvector centrality
allEigenvectorCentrality = nx.eigenvector_centrality(G, max_iter=1000, weight=None)
# get node with max eigenvector centrality
node = max(allEigenvectorCentrality, key=allEigenvectorCentrality.get)
# remove all the edges adjacent to node
if not nx.is_directed(G): # undirected graph
for key in G[node].keys():
G.remove_edge(node, key)
else: # directed graph
for x in [v for u, v in G.out_edges_iter(node)]:
G.remove_edge(node, x)
for x in [u for u, v in G.in_edges_iter(node)]:
G.remove_edge(x, node)
ND, ND_lambda = ECT.get_number_of_driver_nodes(G)
tot_ND[i] = ND
tot_T[i] = i
return (tot_ND, tot_T)
示例13: _graph_centrality_measures
def _graph_centrality_measures(self, df_totals):
'''
INPUT: DataFrame
OUTPUT: dict, dict, dict
For every participant, calculates degree centrality, Eigenvector centrality, and
weighted Eigenvector centrality (the last being weighted by the df's 'cnt' column).
'''
df = df_totals.copy()
df = df[df['participantID'] > df['participantID.B']]
G = from_pandas_dataframe(df, 'participantID', 'participantID.B', 'cnt')
degree_centrality = nx.degree_centrality(G)
eigen_centrality = nx.eigenvector_centrality(G)
eigen_centrality_weighted = nx.eigenvector_centrality(G, weight='cnt')
return degree_centrality, eigen_centrality, eigen_centrality_weighted
示例14: test_K5
def test_K5(self):
"""Eigenvector centrality: K5"""
G = nx.complete_graph(5)
b = nx.eigenvector_centrality(G)
v = math.sqrt(1 / 5.0)
b_answer = dict.fromkeys(G, v)
for n in sorted(G):
assert_almost_equal(b[n], b_answer[n])
nstart = dict([(n, 1) for n in G])
b = nx.eigenvector_centrality(G, nstart=nstart)
for n in sorted(G):
assert_almost_equal(b[n], b_answer[n])
b = nx.eigenvector_centrality_numpy(G)
for n in sorted(G):
assert_almost_equal(b[n], b_answer[n], places=3)
示例15: node_eigenvector_centrality
def node_eigenvector_centrality(X):
"""
based on networkx function: eigenvector_centrality
"""
XX = np.zeros((X.shape[0], np.sqrt(X.shape[1])))
for i, value in enumerate(X):
adj_mat = value.reshape((np.sqrt(len(value)),-1))
adj_mat = (adj_mat - np.min(adj_mat)) / (np.max(adj_mat) - np.min(adj_mat))
adj_mat = 1 - adj_mat
# th = np.mean(adj_mat) - 0.2
# adj_mat = np.where(adj_mat < th, adj_mat, 0.)
percent, th, adj_mat, triu = percentage_removed(adj_mat, 0.78)
print("percent = {0}, threshold position = {1}, threshold = {2}\n".format(percent, th, triu[th]))
g = nx.from_numpy_matrix(adj_mat)
print "Graph Nodes = {0}, Graph Edges = {1} ".format(g.number_of_nodes(), g.number_of_edges())
print "\nEdge kept ratio, {0}".format(float(g.number_of_edges())/((g.number_of_nodes()*(g.number_of_nodes()-1))/2))
deg_cent = nx.eigenvector_centrality(g, max_iter=10000)
node_cent = np.zeros(g.number_of_nodes())
for k in deg_cent:
node_cent[k] = deg_cent[k]
XX[i] = node_cent
print "graph {0} => mean {1}, min {2}, max {3}".format(i, np.mean(XX[i]), np.min(XX[i]), np.max(XX[i]))
# XX = XX*100
ss = StandardScaler()
XX = ss.fit_transform(XX.T).T
return XX