本文整理汇总了Python中networkx.from_numpy_matrix函数的典型用法代码示例。如果您正苦于以下问题:Python from_numpy_matrix函数的具体用法?Python from_numpy_matrix怎么用?Python from_numpy_matrix使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了from_numpy_matrix函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_from_numpy_matrix_parallel_edges
def test_from_numpy_matrix_parallel_edges(self):
"""Tests that the :func:`networkx.from_numpy_matrix` function
interprets integer weights as the number of parallel edges when
creating a multigraph.
"""
A = np.matrix([[1, 1], [1, 2]])
# First, with a simple graph, each integer entry in the adjacency
# matrix is interpreted as the weight of a single edge in the graph.
expected = nx.DiGraph()
edges = [(0, 0), (0, 1), (1, 0)]
expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges])
expected.add_edge(1, 1, weight=2)
actual = nx.from_numpy_matrix(A, parallel_edges=True,
create_using=nx.DiGraph())
assert_graphs_equal(actual, expected)
actual = nx.from_numpy_matrix(A, parallel_edges=False,
create_using=nx.DiGraph())
assert_graphs_equal(actual, expected)
# Now each integer entry in the adjacency matrix is interpreted as the
# number of parallel edges in the graph if the appropriate keyword
# argument is specified.
edges = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 1)]
expected = nx.MultiDiGraph()
expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges])
actual = nx.from_numpy_matrix(A, parallel_edges=True,
create_using=nx.MultiDiGraph())
assert_graphs_equal(actual, expected)
expected = nx.MultiDiGraph()
expected.add_edges_from(set(edges), weight=1)
# The sole self-loop (edge 0) on vertex 1 should have weight 2.
expected[1][1][0]['weight'] = 2
actual = nx.from_numpy_matrix(A, parallel_edges=False,
create_using=nx.MultiDiGraph())
assert_graphs_equal(actual, expected)
示例2: plot_reconstruction_result
def plot_reconstruction_result(res):
""" Plot original and reconstructed graph plus time series
"""
fig = plt.figure(figsize=(32, 8))
gs = mpl.gridspec.GridSpec(1, 4)
# original graph
orig_ax = plt.subplot(gs[0])
plot_graph(nx.from_numpy_matrix(res.A.orig), orig_ax)
orig_ax.set_title('Original graph')
# time series
ax = plt.subplot(gs[1:3])
sns.tsplot(
time='time', value='theta',
unit='source', condition='oscillator',
estimator=np.mean, legend=False,
data=compute_solutions(res),
ax=ax)
ax.set_title(r'$A_{{err}} = {:.2}, B_{{err}} = {:.2}$'.format(*compute_error(res)))
# reconstructed graph
rec_ax = plt.subplot(gs[3])
tmp = res.A.rec
tmp[abs(tmp) < 1e-1] = 0
plot_graph(nx.from_numpy_matrix(tmp), rec_ax)
rec_ax.set_title('Reconstructed graph')
plt.tight_layout()
save(fig, 'reconstruction_overview')
示例3: make_connections
def make_connections(n,density=0.25):
"""
This function will return a random adjacency matrix of size
n x n. You read the matrix like this:
if matrix[2,7] = 1, then cities '2' and '7' are connected.
if matrix[2,7] = 0, then the cities are _not_ connected.
:param n: number of cities
:param density: controls the ratio of 1s to 0s in the matrix
:returns: an n x n adjacency matrix
"""
import networkx
# Generate a random adjacency matrix and use it to build a networkx graph
a=numpy.int32(numpy.triu((numpy.random.random_sample(size=(n,n))<density)))
G=networkx.from_numpy_matrix(a)
# If the network is 'not connected' (i.e., there are isolated nodes)
# generate a new one. Keep doing this until we get a connected one.
# Yes, there are more elegant ways to do this, but I'm demonstrating
# while loops!
while not networkx.is_connected(G):
a=numpy.int32(numpy.triu((numpy.random.random_sample(size=(n,n))<density)))
G=networkx.from_numpy_matrix(a)
# Cities should be connected to themselves.
numpy.fill_diagonal(a,1)
return a + numpy.triu(a,1).T
示例4: calc_covariance
def calc_covariance(self, method="graphlassocv", values="cov"):
"""
Cacl coveriance matrix to make graph
parameters
----------
method: string
Type of algorithm for covariance, graphlassocv
values: string
Type of values for matrix for graph
cov: covariance_
pre: precision_
"""
if method == "graphlassocv":
self._model = covariance.GraphLassoCV()
else:
assert NotImplementedError
self._model_name = method
self._model.fit(self._data)
if values == "cov":
self._graph = nx.from_numpy_matrix(self._model.covariance_)
elif values == "pre":
self._graph = nx.from_numpy_matrix(self._model.precision_)
else:
assert NotImplementedError
self._modeled = True
示例5: _run_interface
def _run_interface(self, runtime):
if not have_cv:
raise ImportError("cviewer library is not available")
THRESH = self.inputs.threshold
K = self.inputs.number_of_permutations
TAIL = self.inputs.t_tail
edge_key = self.inputs.edge_key
details = edge_key + '-thresh-' + str(THRESH) + '-k-' + str(K) + '-tail-' + TAIL + '.pck'
# Fill in the data from the networks
X = ntwks_to_matrices(self.inputs.in_group1, edge_key)
Y = ntwks_to_matrices(self.inputs.in_group2, edge_key)
PVAL, ADJ, _ = nbs.compute_nbs(X, Y, THRESH, K, TAIL)
iflogger.info('p-values:')
iflogger.info(PVAL)
pADJ = ADJ.copy()
for idx, _ in enumerate(PVAL):
x, y = np.where(ADJ == idx + 1)
pADJ[x, y] = PVAL[idx]
# Create networkx graphs from the adjacency matrix
nbsgraph = nx.from_numpy_matrix(ADJ)
nbs_pval_graph = nx.from_numpy_matrix(pADJ)
# Relabel nodes because they should not start at zero for our convention
nbsgraph = nx.relabel_nodes(nbsgraph, lambda x: x + 1)
nbs_pval_graph = nx.relabel_nodes(nbs_pval_graph, lambda x: x + 1)
if isdefined(self.inputs.node_position_network):
node_ntwk_name = self.inputs.node_position_network
else:
node_ntwk_name = self.inputs.in_group1[0]
node_network = nx.read_gpickle(node_ntwk_name)
iflogger.info('Populating node dictionaries with attributes from %s',
node_ntwk_name)
for nid, ndata in node_network.nodes(data=True):
nbsgraph.nodes[nid] = ndata
nbs_pval_graph.nodes[nid] = ndata
path = op.abspath('NBS_Result_' + details)
iflogger.info(path)
nx.write_gpickle(nbsgraph, path)
iflogger.info('Saving output NBS edge network as %s', path)
pval_path = op.abspath('NBS_P_vals_' + details)
iflogger.info(pval_path)
nx.write_gpickle(nbs_pval_graph, pval_path)
iflogger.info('Saving output p-value network as %s', pval_path)
return runtime
示例6: get_connection_densities
def get_connection_densities(network, community_affiliation):
#================================
# Get density of within and between module connections
#================================
"""
inputs:
network: adjacency_matrix (NumPy array)
community_affiliation: array that indicates which community/module an node belongs to
outputs:
density of connections within modules
density of connections between modules
"""
import networkx as nx
import numpy as np
network[network > 0] = 1. # binarizing the network
G = nx.from_numpy_matrix(network) # original network
for node in G.nodes():
G.node[node]['community'] = community_affiliation[node]
within_weights = list()
between_weights = list()
for edge in G.edges():
if G.node[edge[0]]['community'] == G.node[edge[1]]['community']:
within_weights.append(G.edge[edge[0]][edge[1]]['weight'])
else:
between_weights.append(G.edge[edge[0]][edge[1]]['weight'])
connected_G = nx.from_numpy_matrix(np.ones(shape=network.shape)) # fully-connected network
full_within_weights = list()
full_between_weights = list()
for node in connected_G.nodes():
connected_G.node[node]['community'] = community_affiliation[node]
for edge in connected_G.edges():
if connected_G.node[edge[0]]['community'] == connected_G.node[edge[1]]['community']:
full_within_weights.append(connected_G.edge[edge[0]][edge[1]]['weight'])
else:
full_between_weights.append(connected_G.edge[edge[0]][edge[1]]['weight'])
within_density = sum(within_weights)/sum(full_within_weights)
between_density = sum(between_weights)/sum(full_between_weights)
return(within_density, between_density)
示例7: export_networkx_graph
def export_networkx_graph(adjacency_matrix, weights):
"""Export networkx graph object for an inferred network.
Export a weighted, directed graph object from the network of inferred
(multivariate) interactions (e.g., multivariate TE), using the networkx
class for directed graphs (DiGraph). Multiple options for the weight are
available (see documentation of method get_adjacency_matrix for details).
Args:
adjacency_matrix : 2D numpy array
adjacency matrix to be exported, returned by get_adjacency_matrix()
method of Results() class
weights : str
weights for the adjacency matrix (see documentation of method
get_adjacency_matrix for details)
fdr : bool [optional]
return FDR-corrected results (default=True)
Returns: DiGraph instance
directed graph of networkx package's DiGraph() class
"""
# use 'weights' parameter (string) as networkx edge property name and use
# adjacency matrix entries as edge property values
custom_type = [(weights, type(adjacency_matrix[0, 0]))]
custom_npmatrix = np.matrix(adjacency_matrix, dtype=custom_type)
return nx.from_numpy_matrix(custom_npmatrix, create_using=nx.DiGraph())
示例8: sort_sentences
def sort_sentences(sentences, words, sim_func = get_similarity, pagerank_config = {'alpha': 0.85,}):
"""将句子按照关键程度从大到小排序
Keyword arguments:
sentences -- 列表,元素是句子
words -- 二维列表,子列表和sentences中的句子对应,子列表由单词组成
sim_func -- 计算两个句子的相似性,参数是两个由单词组成的列表
pagerank_config -- pagerank的设置
"""
sorted_sentences = []
_source = words
sentences_num = len(_source)
graph = np.zeros((sentences_num, sentences_num))
for x in xrange(sentences_num):
for y in xrange(x, sentences_num):
similarity = sim_func( _source[x], _source[y] )
graph[x, y] = similarity
graph[y, x] = similarity
nx_graph = nx.from_numpy_matrix(graph)
scores = nx.pagerank(nx_graph, **pagerank_config) # this is a dict
sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)
for index, score in sorted_scores:
item = AttrDict(index=index, sentence=sentences[index], weight=score)
sorted_sentences.append(item)
return sorted_sentences
示例9: atoms_to_nxgraph
def atoms_to_nxgraph(atoms, cutoff):
ni, nj = neighbour_list('ij', atoms, cutoff)
adjacency_matrix = np.zeros((len(atoms), len(atoms))).astype(np.int)
for i, j in zip (ni, nj):
adjacency_matrix[i,j] = 1
graph = nx.from_numpy_matrix(np.array(adjacency_matrix))
return graph
示例10: node_closeness_centrality
def node_closeness_centrality(X):
"""
based on networkx function: closeness_centrality
"""
XX = np.zeros((X.shape[0], np.sqrt(X.shape[1])))
for i, value in enumerate(X):
adj_mat = value.reshape((np.sqrt(len(value)), -1))
adj_mat = (adj_mat - np.min(adj_mat)) / (np.max(adj_mat) - np.min(adj_mat))
adj_mat = 1 - adj_mat
# th = np.mean(adj_mat) - 0.23
# adj_mat = np.where(adj_mat < th, adj_mat, 0.)
percent, th, adj_mat, triu = percentage_removed(adj_mat, 0.27) # in this context the percentage
print("percent = {0}, threshold position = {1}, threshold = {2}\n".format(percent, th, triu[th]))
g = nx.from_numpy_matrix(adj_mat)
print "Graph Nodes = {0}, Graph Edges = {1} ".format(g.number_of_nodes(), g.number_of_edges())
print "\nEdge kept ratio, {0}".format(float(g.number_of_edges())/((g.number_of_nodes()*(g.number_of_nodes()-1))/2))
deg_cent = nx.closeness_centrality(g, normalized=True)
node_cent = np.zeros(g.number_of_nodes())
for k in deg_cent:
node_cent[k] = deg_cent[k]
XX[i] = node_cent
print "graph {0} => mean {1}, min {2}, max {3}".format(i, np.mean(XX[i]), np.min(XX[i]), np.max(XX[i]))
return XX
示例11: edge_betweeness_centrality
def edge_betweeness_centrality(X):
"""
based on networkx function: edge_betweenness_centrality
"""
XX = np.zeros(X.shape)
for i, value in enumerate(X):
adj_mat = value.reshape((np.sqrt(len(value)),-1))
adj_mat = (adj_mat - np.min(adj_mat)) / (np.max(adj_mat) - np.min(adj_mat))
adj_mat = 1 - adj_mat
# th = np.mean(adj_mat) + 0.1
# adj_mat = np.where(adj_mat < th, adj_mat, 0.)
percent, th, adj_mat, triu = percentage_removed(adj_mat, 0.43) # 43 #63 #73
print("percent = {0}, threshold position = {1}, threshold = {2}\n".format(percent, th, triu[th]))
g = nx.from_numpy_matrix(adj_mat)
print "Graph Nodes = {0}, Graph Edges = {1} ".format(g.number_of_nodes(), g.number_of_edges())
print "\nEdge kept ratio, {0}".format(float(g.number_of_edges())/((g.number_of_nodes()*(g.number_of_nodes()-1))/2))
bet_cent = nx.edge_betweenness_centrality(g, weight = 'weight', normalized = True)
edge_cent = np.zeros(adj_mat.shape)
for k in bet_cent:
edge_cent[k[0],k[1]] = bet_cent[k]
XX[i] = edge_cent.reshape(-1)
print "graph {0} => mean {1}, min {2}, max {3}".format(i, np.mean(XX[i]), np.min(XX[i]), np.max(XX[i]))
return XX
示例12: draw_network_by_years
def draw_network_by_years(df, start_year, end_year, trim):
""" Constructs and draws the co-word networks for the years
Parameters
-----------------------------------------
df: WoS references
start_year:
end_year:
trim: degree of nodes to include in the graph
Returns
----------------------------------
coword networkx object
"""
df_sub = df[(df.PY> start_year) & (df.PY <= end_year)]
keys = keyword_counts(df_sub)
print('Calculating co-word matrix')
coword_df = coword_matrix(df_sub,keys.keys())
coword_array = coword_df.as_matrix()
np.fill_diagonal(coword_array, 0)
coword_net = nx.from_numpy_matrix(coword_array)
col_names = coword_df.columns.tolist()
labels = {col_names.index(l):l for l in col_names}
nx.set_node_attributes(coword_net, 'keyword', labels)
nx.set_node_attributes(coword_net, 'between_central', nx.betweenness_centrality(coword_net))
if trim > 0:
coword_net = trim_nodes(coword_net, trim)
labels = {n:labels[n] for n in coword_net.nodes()}
return coword_net
示例13: r_perturbSa
def r_perturbSa(g,p=None):
'''固定参数的随机扰动方法,p伯努利实验成功的概率'''
A=nx.to_scipy_sparse_matrix(g)
B=sparse.triu(A).toarray()
#print B
n=len(g)
e_num=len(g.edges())#图中存在的边数
q = e_num * (1 - p) / ((n * (n - 1)) / 2 - e_num)
#print q
i = 0
ts=0
listp=stats.bernoulli.rvs(p,size=e_num)
listp=listp.tolist()
listq=stats.bernoulli.rvs(q,size=(n * (n - 1)) / 2 - e_num)
listq=listq.tolist()
while i<n:
j=i+1#略过对角线上的0
while j<n:
if(B[i,j]==1):
B[i,j] = listp.pop()#参数p伯努利实验成功的概率
#ts=ts + 1
# print "+",ts, ":", i, ",", j, ",", B[i, j]
else:
B[i,j] = listq.pop()#参数q伯努利实验成功的概率
#ts=ts + 1
# print "-",ts, ":", i, ",", j, ",", B[i, j]
j = j + 1
i=i+1
return nx.from_numpy_matrix(B,create_using=nx.Graph())#重新构建了Graph类型的返回对象
示例14: __init__
def __init__(self, graph, communities=None):
""" initialize partition of graph, with optional communities
Parameters
----------
graph : networkx graph
communities : list of sets, optional
a list of sets with nodes in each set
if communities is None, will initialize with
one per node
Returns
-------
part : WeightedPartition object
"""
# assert graph has edge weights, and no negative weights
mat = nx.adjacency_matrix(graph).todense()
if mat.min() < 0:
raise ValueError("Graph has invalid negative weights")
self.graph = nx.from_numpy_matrix(mat)
if communities is None:
self._communities = self._init_communities_from_nodes()
else:
self.set_communities(communities)
self.total_edge_weight = graph.size(weight="weight")
self.degrees = graph.degree(weight="weight")
示例15: sortSentences
def sortSentences(sentences, words, sim_func = getSimilarity, pagerank_config = {'alpha': 0.85,}):
'''
:param sentences: 用于计算权重的句子列表
:param words: 与sentences相对应的每个句子的单词列表,该参数的类型为二维列表
:param sim_func: 用于计算句子相似度的函数名
:param pagerank_config:
:return:
'''
sortedSentences = []
_source = words
sentencesNum = len(_source) #获得图的大小
graph = np.zeros((sentencesNum, sentencesNum))
for x in xrange(sentencesNum):
for y in xrange(x, sentencesNum):
similarity = sim_func( _source[x], _source[y] )
graph[x, y] = similarity
graph[y, x] = similarity
nx_graph = nx.from_numpy_matrix(graph)
scores = nx.pagerank(nx_graph, **pagerank_config) # this is a dict
sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)
for index, score in sorted_scores:
item = AttrDict(sentence=sentences[index], weight=score)
sortedSentences.append(item)
return sortedSentences