本文整理汇总了Python中networkx.load_centrality函数的典型用法代码示例。如果您正苦于以下问题:Python load_centrality函数的具体用法?Python load_centrality怎么用?Python load_centrality使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了load_centrality函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_component
def load_component(seed_num, graph_json_filename=None, graph_json_str=None):
if graph_json_filename is None and graph_json_str is None:
return []
G = None
if graph_json_str is None:
G = util.load_graph(graph_json_filename=graph_json_filename)
else:
G = util.load_graph(graph_json_str=graph_json_str)
components = list(nx.connected_components(G))
components = filter(lambda x: len(x) > 0.1 * len(G), components)
total_size = sum(map(lambda x: len(x), components))
total_nodes = 0
rtn = []
for comp in components[1:]:
num_nodes = int(float(len(comp)) / total_size * seed_num)
component = G.subgraph(list(comp))
clse_cent = nx.load_centrality(component)
collector = collections.Counter(clse_cent)
clse_cent = collector.most_common(num_nodes)
rtn += map(lambda (x, y): x, clse_cent)
total_nodes += num_nodes
num_nodes = seed_num - total_nodes
component = G.subgraph(list(components[0]))
clse_cent = nx.load_centrality(component)
collector = collections.Counter(clse_cent)
clse_cent = collector.most_common(num_nodes)
rtn += map(lambda (x, y): x, clse_cent)
return rtn
示例2: test_not_strongly_connected
def test_not_strongly_connected(self):
b = nx.load_centrality(self.D)
result = {0: 5./12,
1: 1./4,
2: 1./12,
3: 1./4,
4: 0.000}
for n in sorted(self.D):
assert_almost_equal(result[n], b[n], places=3)
assert_almost_equal(result[n], nx.load_centrality(self.D, n), places=3)
示例3: test_p3_load
def test_p3_load(self):
G=self.P3
c=nx.load_centrality(G)
d={0: 0.000,
1: 1.000,
2: 0.000}
for n in sorted(G):
assert_almost_equal(c[n],d[n],places=3)
c=nx.load_centrality(G,v=1)
assert_almost_equal(c,1.0)
c=nx.load_centrality(G,v=1,normalized=True)
assert_almost_equal(c,1.0)
示例4: load_centrality_month_airports
def load_centrality_month_airports(data):
df = data.copy()
df['DateOfDeparture'] = pd.to_datetime(df['DateOfDeparture'])
df['month'] = df['DateOfDeparture'].dt.week.astype(str)
df['year'] = df['DateOfDeparture'].dt.year.astype(str)
df['year_month'] = df[['month','year']].apply(lambda x: '-'.join(x),axis=1)
df['year_month_dep'] = df[['Departure','month','year']].apply(lambda x: '-'.join(x),axis=1)
df['year_month_arr'] = df[['Arrival','month','year']].apply(lambda x: '-'.join(x),axis=1)
year_month = pd.unique(df['year_month'])
G = nx.Graph()
load_centrality = {}
for i, item in enumerate(year_month):
sub_df = df[df['year_month'] == item][['Departure','Arrival']]
list_dep_arr = zip(sub_df['Departure'], sub_df['Arrival'])
G.add_edges_from(list_dep_arr)
#G.number_of_nodes()
#G.number_of_edges()
centrality_month = nx.load_centrality(G)
centrality_month = pd.DataFrame(centrality_month.items())
centrality_month['year_month'] = [item] * centrality_month.shape[0]
centrality_month['airport_year_month'] = centrality_month[centrality_month.columns[[0,2]]].apply(lambda x: '-'.join(x),axis=1)
centrality_month =dict(zip(centrality_month['airport_year_month'], centrality_month[1]))
z = load_centrality.copy()
z.update(centrality_month)
load_centrality = z
df['load_centrality_month_dep'] = df['year_month_dep'].map(load_centrality)
df['load_centrality_month_arr'] = df['year_month_arr'].map(load_centrality)
return df
示例5: node_load_centrality
def node_load_centrality(X):
"""
based on networkx function: load_centrality
"""
XX = np.zeros((X.shape[0], np.sqrt(X.shape[1])))
for i, value in enumerate(X):
adj_mat = value.reshape((np.sqrt(len(value)),-1))
adj_mat = (adj_mat - np.min(adj_mat)) / (np.max(adj_mat) - np.min(adj_mat))
adj_mat = 1 - adj_mat
# th = np.mean(adj_mat) - 0.05
# adj_mat = np.where(adj_mat < th, adj_mat, 0.)
percent, th, adj_mat, triu = percentage_removed(adj_mat, 0.86)
print("percent = {0}, threshold position = {1}, threshold = {2}\n".format(percent, th, triu[th]))
g = nx.from_numpy_matrix(adj_mat)
print "Graph Nodes = {0}, Graph Edges = {1} ".format(g.number_of_nodes(), g.number_of_edges())
print "\nEdge kept ratio, {0}".format(float(g.number_of_edges())/((g.number_of_nodes()*(g.number_of_nodes()-1))/2))
deg_cent = nx.load_centrality(g, weight = 'weight')
node_cent = np.zeros(g.number_of_nodes())
for k in deg_cent:
node_cent[k] = deg_cent[k]
XX[i] = node_cent
print "graph {0} => mean {1}, min {2}, max {3}".format(i, np.mean(XX[i]), np.min(XX[i]), np.max(XX[i]))
# XX = XX*100
ss = StandardScaler()
XX = ss.fit_transform(XX.T).T
return XX
示例6: most_central
def most_central(self,F=1,cent_type='betweenness'):
if cent_type == 'betweenness':
ranking = nx.betweenness_centrality(self.G).items()
elif cent_type == 'closeness':
ranking = nx.closeness_centrality(self.G).items()
elif cent_type == 'eigenvector':
ranking = nx.eigenvector_centrality(self.G).items()
elif cent_type == 'harmonic':
ranking = nx.harmonic_centrality(self.G).items()
elif cent_type == 'katz':
ranking = nx.katz_centrality(self.G).items()
elif cent_type == 'load':
ranking = nx.load_centrality(self.G).items()
elif cent_type == 'degree':
ranking = nx.degree_centrality(self.G).items()
ranks = [r for n,r in ranking]
cent_dict = dict([(self.lab[n],r) for n,r in ranking])
m_centrality = sum(ranks)
if len(ranks) > 0:
m_centrality = m_centrality/len(ranks)
#Create a graph with the nodes above the cutoff centrality- remove the low centrality nodes
thresh = F*m_centrality
lab = {}
for k in self.lab:
lab[k] = self.lab[k]
g = Graph(self.adj.copy(),self.char_list)
for n,r in ranking:
if r < thresh:
g.G.remove_node(n)
del g.lab[n]
return (cent_dict,thresh,g)
示例7: load_neighbors
def load_neighbors(seed_num, graph=None, graph_json_filename=None, graph_json_str=None):
if graph_json_filename is None and graph_json_str is None and graph is None:
return []
G = None
if graph is not None:
G = graph
elif graph_json_str is None:
G = util.load_graph(graph_json_filename=graph_json_filename)
else:
G = util.load_graph(graph_json_str=graph_json_str)
clse_cent = nx.get_node_attributes(G, "centrality")
if len(clse_cent) == 0:
clse_cent = nx.load_centrality(G)
nx.set_node_attributes(G, "centrality", clse_cent)
print "hi load neighbors"
collector = collections.Counter(clse_cent)
clse_cent = collector.most_common(SURROUND_TOP)
nodes = map(lambda (x, y): x, clse_cent)
current_seed = 0
rtn = []
while current_seed < seed_num:
current_node = nodes[current_seed % len(nodes)]
current_neighbors = G.neighbors(current_node)
rtn += random.sample(set(current_neighbors) - set(rtn) - set(nodes), 1)
current_seed += 1
return rtn
示例8: augmentNodes
def augmentNodes(g):
r1 = nx.eigenvector_centrality_numpy(g)
r2 = nx.degree_centrality(g) # DP MY
r3 = nx.betweenness_centrality(g)
r5 = nx.load_centrality(g,weight='weight') # DY, WY-writename # Scientific collaboration networks: II. Shortest paths, weighted networks, and centrality, M. E. J. Newman, Phys. Rev. E 64, 016132 (2001).
r6 = nx.pagerank(g, alpha=0.85, personalization=None, max_iter=100, tol=1e-08, nstart=None, weight='weight')
if nx.is_directed(g) == True:
r8 = nx.in_degree_centrality(g)
r9 = nx.out_degree_centrality(g)
# r10 = nx.hits(g, max_iter=100, tol=1e-08, nstart=None)
else:
r4 = nx.communicability_centrality(g)
r7 = nx.clustering(g, weight='weight')
for x in g.nodes():
g.node[x]['eigenvector_centrality_numpy'] = r1[x]
g.node[x]['degree_centrality'] = r2[x]
g.node[x]['betweenness_centrality'] = r3[x]
g.node[x]['load_centrality'] = r5[x]
g.node[x]['pagerank'] = r6[x]
if nx.is_directed(g) == True:
g.node[x]['in_degree_centrality'] = r8[x]
g.node[x]['out_degree_centrality'] = r9[x]
# g.node[x]['hits'] = r10[x]
else:
g.node[x]['communicability_centrality'] = r4[x]
g.node[x]['clustering'] = r7[x]
return g
示例9: centrality
def centrality(net):
values ={}
close = nx.closeness_centrality(net, normalized= True)
eigen = nx.eigenvector_centrality_numpy(net)
page = nx.pagerank(net)
bet = nx.betweenness_centrality(net,normalized= True)
flow_c = nx.current_flow_closeness_centrality(net,normalized= True)
flow_b = nx.current_flow_betweenness_centrality(net,normalized= True)
load = nx.load_centrality(net, normalized = True)
com_c = nx.communicability_centrality(net)
com_b = nx.communicability_betweenness_centrality(net, normalized= True)
degree = net.degree()
file3 = open("bl.csv",'w')
for xt in [bet,load,degree,page,flow_b,com_c,com_b,eigen,close,flow_c]:#[impo,bet,flow_b,load,com_c,com_b] :
for yt in [bet,load,degree,page,flow_b,com_c,com_b,eigen,close,flow_c]:#[impo,bet,flow_b,load,com_c,com_b] :
corr(xt.values(),yt.values(),file3)
print
file3.write("\n")
file3.close()
#plt.plot(x,y, 'o')
#plt.plot(x, m*x + c, 'r', label='Fitted line')
#plt.show()
#for key,item in close.iteritems() :
#values[key] = [impo.get(key),bet.get(key),flow_b.get(key), load.get(key),com_c.get(key),com_b.get(key)]
return values
示例10: analyze_graph
def analyze_graph(G):
#centralities and node metrics
out_degrees = G.out_degree()
in_degrees = G.in_degree()
betweenness = nx.betweenness_centrality(G)
eigenvector = nx.eigenvector_centrality_numpy(G)
closeness = nx.closeness_centrality(G)
pagerank = nx.pagerank(G)
avg_neighbour_degree = nx.average_neighbor_degree(G)
redundancy = bipartite.node_redundancy(G)
load = nx.load_centrality(G)
hits = nx.hits(G)
vitality = nx.closeness_vitality(G)
for name in G.nodes():
G.node[name]['out_degree'] = out_degrees[name]
G.node[name]['in_degree'] = in_degrees[name]
G.node[name]['betweenness'] = betweenness[name]
G.node[name]['eigenvector'] = eigenvector[name]
G.node[name]['closeness'] = closeness[name]
G.node[name]['pagerank'] = pagerank[name]
G.node[name]['avg-neigh-degree'] = avg_neighbour_degree[name]
G.node[name]['redundancy'] = redundancy[name]
G.node[name]['load'] = load[name]
G.node[name]['hits'] = hits[name]
G.node[name]['vitality'] = vitality[name]
#communities
partitions = community.best_partition(G)
for member, c in partitions.items():
G.node[member]['community'] = c
return G
示例11: test_p2_load
def test_p2_load(self):
G=nx.path_graph(2)
c=nx.load_centrality(G)
d={0: 0.000,
1: 0.000}
for n in sorted(G):
assert_almost_equal(c[n],d[n],places=3)
示例12: f36
def f36(self):
start = 0
s = nx.load_centrality(self.G).values()
res = sum(s)
stop = 0
# self.feature_time.append(stop - start)
return res
示例13: test_unnormalized_p3_load
def test_unnormalized_p3_load(self):
G=self.P3
c=nx.load_centrality(G,normalized=False)
d={0: 0.000,
1: 2.000,
2: 0.000}
for n in sorted(G):
assert_almost_equal(c[n],d[n],places=3)
示例14: test_p3_load
def test_p3_load(self):
G=self.P3
c=nx.load_centrality(G)
d={0: 0.000,
1: 1.000,
2: 0.000}
for n in sorted(G):
assert_almost_equal(c[n],d[n],places=3)
示例15: forUndirected
def forUndirected(G):
myList = [nx.eigenvector_centrality_numpy(G),
nx.degree_centrality(G),
nx.betweenness_centrality(G),
nx.communicability_centrality(G),
nx.load_centrality(G),
nx.pagerank(G, alpha=0.85, personalization=None, max_iter=100, tol=1e-08, nstart=None, weight='weight'),
nx.clustering(G, weight='weight')]
return myList