本文整理汇总了Python中networkx.eccentricity函数的典型用法代码示例。如果您正苦于以下问题:Python eccentricity函数的具体用法?Python eccentricity怎么用?Python eccentricity使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了eccentricity函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_eccentricity
def test_eccentricity(self):
assert_equal(networkx.eccentricity(self.G, 1), 6)
e = networkx.eccentricity(self.G)
assert_equal(e[1], 6)
sp = dict(networkx.shortest_path_length(self.G))
e = networkx.eccentricity(self.G, sp=sp)
assert_equal(e[1], 6)
e = networkx.eccentricity(self.G, v=1)
assert_equal(e, 6)
# This behavior changed in version 1.8 (ticket #739)
e = networkx.eccentricity(self.G, v=[1, 1])
assert_equal(e[1], 6)
e = networkx.eccentricity(self.G, v=[1, 2])
assert_equal(e[1], 6)
# test against graph with one node
G = networkx.path_graph(1)
e = networkx.eccentricity(G)
assert_equal(e[0], 0)
e = networkx.eccentricity(G, v=0)
assert_equal(e, 0)
assert_raises(networkx.NetworkXError, networkx.eccentricity, G, 1)
# test against empty graph
G = networkx.empty_graph()
e = networkx.eccentricity(G)
assert_equal(e, {})
示例2: test_eccentricity
def test_eccentricity(self):
assert_equal(networkx.eccentricity(self.G,1),6)
e=networkx.eccentricity(self.G)
assert_equal(e[1],6)
sp=networkx.shortest_path_length(self.G)
e=networkx.eccentricity(self.G,sp=sp)
assert_equal(e[1],6)
e=networkx.eccentricity(self.G,v=1)
assert_equal(e,6)
e=networkx.eccentricity(self.G,v=[1,1])
assert_equal(e,6)
示例3: search20q
def search20q(G,ranking):
seen = []
seen.append("Emotion")
Q = deque()
s = bfs_successors(G,"Emotion")
#aggregateRanking[q
#questions = sorted(s["Emotion"], key=ranking.get)
#questions = sorted(s["Emotion"], key=G.out_degree)
questions = sorted(s["Emotion"], key=lambda x: nx.eccentricity(U,x))
print questions
Q.extend(questions)
seen.extend(questions)
history = []
count = 0
while(Q):
count = count+1
print count
#Q = sorted(Q, key=ranking.get)
#Q = deque(Q)
print Q
t = Q.pop()
if isinstance(t,tuple): qgloss = t[0]
else: qgloss = t
print "history"
print history
if history:
tmp1 = zip(*history)
if qgloss in tmp1[0]: continue
ans = ask(qgloss)
history.append((qgloss,ans))
if not isinstance(t,tuple): # we are dealing w/ guess as opposed to
# question
if ans == "yes": #found it
print "awesome!"
return t
else:
pass # no inference/action for wrong guess
elif isinstance(t,tuple): # we were asking a question, as opp to guess
try:
successors = bfs_successors(G,(qgloss,ans))
#questions = sorted(s[t], key=ranking.get)
questions = sorted(s[t], key=lambda x: nx.eccentricity(U,x))
for q in questions:
if not q in seen:
seen.append(q)
#if(ans == "yes"):
Q.append(q)
#else:
# Q.appendleft(q)
except KeyError:
successors = []
示例4: calculate_max_ecc
def calculate_max_ecc(graph, nodes):
max_ecc = 0
for node in nodes:
ecc = nx.eccentricity(graph, node)
if ecc > max_ecc:
max_ecc = ecc
return max_ecc
示例5: nodes_by_eccentricity
def nodes_by_eccentricity(graph):
if len(graph) == 1:
return graph.nodes()
# need to crop the global shortest paths otherwise get
#NetworkXError: Graph not connected: infinite path length
eccentricities = nx.eccentricity(graph)
return sorted(eccentricities.keys(), key = lambda n: eccentricities[n])
示例6: whole_graph_metrics
def whole_graph_metrics(graph, weighted=False):
graph_metrics = {}
# Shortest average path length
graph_metrics['avg_shortest_path'] = \
nx.average_shortest_path_length(graph, weight=weighted)
# Average eccentricity
ecc_dict = nx.eccentricity(graph)
graph_metrics['avg_eccentricity'] = np.mean(np.array(ecc_dict.values()))
# Average clustering coefficient
# NOTE: Option to include or exclude zeros
graph_metrics['avg_ccoeff'] = \
nx.average_clustering(graph, weight=weighted, count_zeros=True)
# Average node betweeness
avg_node_btwn_dict = nx.betweenness_centrality(graph, normalized=True)
graph_metrics['avg_node_btwn'] = \
np.mean(np.array(avg_node_btwn_dict.values()))
# Average edge betweeness
avg_edge_btwn_dict = nx.edge_betweenness_centrality(graph, normalized=True)
graph_metrics['avg_edge_btwn'] = \
np.mean(np.array(avg_edge_btwn_dict.values()))
# Number of isolates
graph_metrics['isolates'] = len(nx.isolates(graph))
return graph_metrics
示例7: basic_stats
def basic_stats(self):
#not decided on what level to deal with this yet:
#either return error un not dealing with unconnected files,
#or making it deal with unconnected files: the latter.
#How about with dealing with each independently.
# if not nx.is_connected(g):
# conl= nx.connected_components(g)
# for n in conl:
# turn n into graph if it isnt
# calculate ec, per, cnt
# how and when to visualise the subgraphs?
# iterate to next n
if nx.is_connected(self.nx_graph):
ec = nx.eccentricity(self.nx_graph)
else:
ec = 'NA - graph is not connected'
per = nx.periphery(self.nx_graph)
cnt = nx.center(self.nx_graph)
result = { #"""fast betweenness algorithm"""
'bbc': nx.brandes_betweenness_centrality(self.nx_graph),
'tn': nx.triangles(self.nx_graph), # number of triangles
'ec': ec,
'per': per,
'cnt': cnt,
'Per': self.nx_graph.subgraph(per),
'Cnt': self.nx_graph.subgraph(cnt)
}
return result
示例8: eccentricityAttributes
def eccentricityAttributes(graph):
return_values = []
#Average effective eccentricity
eccVals = []
e = 0
for n in graph.nodes():
try:
eccVals.append(nx.eccentricity(graph, v=n))
except nx.NetworkXError:
eccVals.append(0)
eccSum = 0
center_nodes = 0
phobic = 0
diameter = max(eccVals)
radius = min(eccVals)
for i in range(len(eccVals)):
if eccVals[i] == radius:
center_nodes += 1
if graph.node[i]['hydro'] == 'phobic':
phobic += 1
eccSum += eccVals[i]
return_values.append(eccSum / float(nx.number_of_nodes(graph)))
#Effective diameter
return_values.append(diameter)
#Effective radius
return_values.append(radius)
#Percentage central nodes
return_values.append(center_nodes / float(nx.number_of_nodes(graph)))
#Percentage central nodes that are hydrophobic
return_values.append(phobic / float(center_nodes))
return return_values
示例9: connected_components
def connected_components(self):
"""
Returns basic statistics about the connected components of the
graph. This includes their number, order, size, diameter, radius,
average clusttering coefficient, transitivity, in addition to basic
info about the largest and smallest connected components.
"""
cc_stats = {}
cc = nx.connected_components(self.graph.structure)
for index, component in enumerate(cc):
cc_stats[index] = {}
this_cc = cc_stats[index]
this_cc["order"] = len(component)
this_cc["size"] = len(self.graph.structure.edges(component))
subgraph = self.graph.structure.subgraph(component)
this_cc["avg_cluster"] = nx.average_clustering(subgraph)
this_cc["transitivity"] = nx.transitivity(subgraph)
eccentricity = nx.eccentricity(subgraph)
ecc_values = eccentricity.values()
this_cc["diameter"] = max(ecc_values)
this_cc["radius"] = min(ecc_values)
return cc_stats
示例10: print_graph_info
def print_graph_info(graph):
e = nx.eccentricity(graph)
print 'graph with %u nodes, %u edges' % (len(graph.nodes()), len(graph.edges()))
print 'radius: %s' % nx.radius(graph, e) # min e
print 'diameter: %s' % nx.diameter(graph, e) # max e
print 'len(center): %s' % len(nx.center(graph, e)) # e == radius
print 'len(periphery): %s' % len(nx.periphery(graph, e)) # e == diameter
示例11: get_nations_network_by_year
def get_nations_network_by_year(year):
cursor = get_db().cursor()
cursor.execute("""SELECT reporting, reporting_slug, partner, partner_slug, Flow, expimp,
reporting_continent, partner_continent,reporting_type,partner_type
FROM flow_joined
WHERE reporting NOT LIKE "Worl%%"
AND partner NOT LIKE "Worl%%"
AND Flow != "null"
AND year = %s
"""%(year)
)
table = [list(r) for r in cursor]
json_sql_response=[]
for row in table:
json_sql_response.append({
"reporting": row[0],
"reporting_id": row[1],
"partner": row[2],
"partner_id": row[3],
"flow": row[4],
"expimp": row[5],
"reporting_continent": row[6],
"partner_continent": row[7],
"reporting_type": row[8],
"partner_type": row[9]
})
# Create a graph instance
G=nx.Graph()
nodes = []
for row in table:
nodes.append(row[1])
nodes.append(row[3])
# add edge to the graph
G.add_edge(row[1], row[3])
nodes = set(nodes)
# add nodes to graph
G.add_nodes_from(nodes)
if len(G.nodes())>0:
stats = {
"average_clustering": nx.average_clustering(G),
"center": nx.center(G),
"diameter": nx.diameter(G),
"eccentricity": nx.eccentricity(G)
}
else:
stats=[]
json_response = {}
json_response["stats"] = stats
json_response["network"] = json_sql_response
return json.dumps(json_response,encoding="UTF8")
示例12: graph_diameter
def graph_diameter(graph):
sp = nx.shortest_path_length(graph,weight='weight')
ecc = nx.eccentricity(graph,sp=sp)
if ecc:
dia = nx.diameter(graph,e=ecc)
else:
dia = 0
return dia
示例13: graph_radius
def graph_radius(graph):
sp = nx.shortest_path_length(graph,weight='weight')
ecc = nx.eccentricity(graph,sp=sp)
if ecc:
rad = nx.radius(graph,e=ecc)
else:
rad = 0
return rad
示例14: OrigEccentricity
def OrigEccentricity(self):
''' returns a 2d array containing the eccentricity of the origin node for all edges
'''
sp = self.get_shortest_path_dict()
probas = np.dot(
np.array(nx.eccentricity(self, sp = sp).values(),dtype=float).reshape(-1,1),
np.ones((1,self.number_of_nodes())))
return probas
示例15: nodes_by_eccentricity
def nodes_by_eccentricity(graph):
if len(graph) == 1:
return graph.nodes()
# need to crop the global shortest paths otherwise get
#NetworkXError: Graph not connected: infinite path length
eccentricities = {}
try:
eccentricities = nx.eccentricity(graph)
except nx.exception.NetworkXError:
# If not strongly connected, perform eccentricities per connected component
if not nx.is_strongly_connected(graph):
#TODO: provide this function inside ANK, add memoization for intensive operation
for component_nodes in nx.strongly_connected_components(graph):
eccentricities.update(nx.eccentricity(graph.subgraph(component_nodes)))
# sort nodes by name, stability sort ensures that lexical order is used as tie-breaker for equal eccen.
nodes_sorted = sorted(graph.nodes(), key = lambda x: x.fqdn)
return sorted(nodes_sorted, key = lambda n: eccentricities[n])