本文整理汇总了Python中networkx.adjacency_matrix方法的典型用法代码示例。如果您正苦于以下问题:Python networkx.adjacency_matrix方法的具体用法?Python networkx.adjacency_matrix怎么用?Python networkx.adjacency_matrix使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类networkx
的用法示例。
在下文中一共展示了networkx.adjacency_matrix方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _fit
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import adjacency_matrix [as 别名]
def _fit(self):
# Versions using sparse matrices
# adj = nx.adjacency_matrix(self._G)
# ident = sparse.identity(len(self._G.nodes)).tocsc()
# sim = inv(ident - adj.multiply(self.beta).T) - ident
# adj = nx.adjacency_matrix(self._G)
# aux = adj.multiply(-self.beta).T
# aux.setdiag(1+aux.diagonal(), k=0)
# sim = inv(aux)
# sim.setdiag(sim.diagonal()-1)
# print(sim.nnz)
# print(adj.nnz)
# Version using dense matrices
adj = nx.adjacency_matrix(self._G)
aux = adj.T.multiply(-self.beta).todense()
np.fill_diagonal(aux, 1+aux.diagonal())
sim = np.linalg.inv(aux)
np.fill_diagonal(sim, sim.diagonal()-1)
return sim
示例2: __getitem__
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import adjacency_matrix [as 别名]
def __getitem__(self, item):
"""
Returns an rdkit mol object
:param item:
:return:
"""
smiles = self.df['smiles'][item]
mol = Chem.MolFromSmiles(smiles)
return mol
# # TESTS
# path = 'gdb13.rand1M.smi.gz'
# dataset = gdb_dataset(path)
#
# print(len(dataset))
# mol,_ = dataset[0]
# graph = mol_to_nx(mol)
# graph_sub = graph.subgraph([0,3,5,7,9])
# graph_sub_new = nx.convert_node_labels_to_integers(graph_sub,label_attribute='old')
# graph_sub_node = graph_sub.nodes()
# graph_sub_new_node = graph_sub_new.nodes()
# matrix = nx.adjacency_matrix(graph_sub)
# np_matrix = matrix.toarray()
# print(np_matrix)
# print('end')
示例3: _update_embedding
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import adjacency_matrix [as 别名]
def _update_embedding(self, graph, original_embedding):
r"""Performs the Network Embedding Update on the original embedding.
Args:
original_embedding (Numpy array): An array containing an embedding.
graph (NetworkX graph): The embedded graph.
Return types:
embedding (Numpy array): An array containing the updated embedding.
"""
embedding = self._normalize_embedding(original_embedding)
adjacency = nx.adjacency_matrix(graph, nodelist=range(graph.number_of_nodes()))
normalized_adjacency = normalize(adjacency, norm='l1', axis=1)
for _ in range(self.iterations):
embedding = (embedding +
self.L1*(normalized_adjacency @ embedding) +
self.L2*(normalized_adjacency @ (normalized_adjacency @ embedding)))
return embedding
示例4: fit
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import adjacency_matrix [as 别名]
def fit(self, graph):
"""
Fitting a GraphWave model.
Arg types:
* **graph** *(NetworkX graph)* - The graph to be embedded.
"""
self._set_seed()
self._check_graph(graph)
graph.remove_edges_from(nx.selfloop_edges(graph))
self._create_evaluation_points()
self._check_size(graph)
self._G = pygsp.graphs.Graph(nx.adjacency_matrix(graph))
if self.mechanism == "exact":
self._exact_structural_wavelet_embedding()
elif self.mechanism == "approximate":
self._approximate_structural_wavelet_embedding()
else:
raise NameError("Unknown method.")
示例5: fit
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import adjacency_matrix [as 别名]
def fit(self, graph):
"""
Fitting a NodeSketch model.
Arg types:
* **graph** *(NetworkX graph)* - The graph to be embedded.
"""
self._set_seed()
self._check_graph(graph)
self._graph = graph
self._num_nodes = len(graph.nodes)
self._hash_values = self._generate_hash_values()
self._sla = nx.adjacency_matrix(self._graph, nodelist=range(self._num_nodes)).tocoo()
self._sla.data = np.array([1 for _ in range(len(self._sla.data))])
self._sla_original = self._sla.copy()
self._do_single_sketch()
for _ in range(self.iterations-1):
self._augment_sla()
self._do_single_sketch()
示例6: _create_target_matrix
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import adjacency_matrix [as 别名]
def _create_target_matrix(self, graph):
"""
Creating a normalized sparse adjacency matrix power target.
Arg types:
* **graph** *(NetworkX graph)* - The graph to be embedded.
Return types:
* **A_tilde** *(Scipy COO matrix) - The target matrix.
"""
weighted_graph = nx.Graph()
for (u, v) in graph.edges():
weighted_graph.add_edge(u, v, weight=1.0/graph.degree(u))
weighted_graph.add_edge(v, u, weight=1.0/graph.degree(v))
A_hat = nx.adjacency_matrix(weighted_graph,
nodelist=range(graph.number_of_nodes()))
A_tilde = A_hat.dot(A_hat)
return coo_matrix(A_tilde)
示例7: train
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import adjacency_matrix [as 别名]
def train(self, G):
self.num_node = G.number_of_nodes()
self.matrix0 = sp.csr_matrix(nx.adjacency_matrix(G))
t_1 = time.time()
features_matrix = self._pre_factorization(self.matrix0, self.matrix0)
t_2 = time.time()
embeddings_matrix = self._chebyshev_gaussian(
self.matrix0, features_matrix, self.step, self.mu, self.theta
)
t_3 = time.time()
print("sparse NE time", t_2 - t_1)
print("spectral Pro time", t_3 - t_2)
self.embeddings = embeddings_matrix
return self.embeddings
示例8: calculate_edge_lengths
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import adjacency_matrix [as 别名]
def calculate_edge_lengths(G, verbose=True):
# Calculate the lengths of the edges
if verbose:
print('Calculating edge lengths...')
x = np.matrix(G.nodes.data('x'))[:, 1]
y = np.matrix(G.nodes.data('y'))[:, 1]
node_coordinates = np.concatenate([x, y], axis=1)
node_distances = squareform(pdist(node_coordinates, 'euclidean'))
adjacency_matrix = np.array(nx.adjacency_matrix(G).todense())
adjacency_matrix = adjacency_matrix.astype('float')
adjacency_matrix[adjacency_matrix == 0] = np.nan
edge_lengths = np.multiply(node_distances, adjacency_matrix)
edge_attr_dict = {index: v for index, v in np.ndenumerate(edge_lengths) if ~np.isnan(v)}
nx.set_edge_attributes(G, edge_attr_dict, 'length')
return G
示例9: compute_feature_smoothness
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import adjacency_matrix [as 别名]
def compute_feature_smoothness(path, times=0):
G_org = json_graph.node_link_graph(json.load(open(path+'-G.json')))
# G_org = remove_unlabeled(G_org)
if nx.is_directed(G_org):
G_org = G_org.to_undirected()
edge_num = G_org.number_of_edges()
G = pygsp.graphs.Graph(nx.adjacency_matrix(G_org))
feats = np.load(path+'-feats.npy')
# smooth
for i in range(times):
feats = feature_broadcast(feats, G_org)
np.save(path+'-feats_'+str(times)+'.npy', feats)
min_max_scaler = preprocessing.MinMaxScaler()
feats = min_max_scaler.fit_transform(feats)
smoothness = np.zeros(feats.shape[1])
for src, dst in G_org.edges():
smoothness += (feats[src]-feats[dst])*(feats[src]-feats[dst])
smoothness = np.linalg.norm(smoothness,ord=1)
print('The smoothness is: ', 2*smoothness/edge_num/feats.shape[1])
示例10: compute_label_smoothness
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import adjacency_matrix [as 别名]
def compute_label_smoothness(path, rate=0.):
G_org = json_graph.node_link_graph(json.load(open(path+'-G.json')))
# G_org = remove_unlabeled(G_org)
if nx.is_directed(G_org):
G_org = G_org.to_undirected()
class_map = json.load(open(path+'-class_map.json'))
for k, v in class_map.items():
if type(v) != list:
class_map = convert_list(class_map)
break
labels = convert_ndarray(class_map)
labels = np.squeeze(label_to_vector(labels))
# smooth
G_org = label_broadcast(G_org, labels, rate)
with open(path+'-G_'+str(rate)+'.json', 'w') as f:
f.write(json.dumps(json_graph.node_link_data(G_org)))
edge_num = G_org.number_of_edges()
G = pygsp.graphs.Graph(nx.adjacency_matrix(G_org))
smoothness = 0
for src, dst in G_org.edges():
if labels[src] != labels[dst]:
smoothness += 1
print('The smoothness is: ', 2*smoothness/edge_num)
示例11: train
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import adjacency_matrix [as 别名]
def train(self, G):
A = sp.csr_matrix(nx.adjacency_matrix(G))
if not self.is_large:
print("Running NetMF for a small window size...")
deepwalk_matrix = self._compute_deepwalk_matrix(
A, window=self.window_size, b=self.negative
)
else:
print("Running NetMF for a large window size...")
vol = float(A.sum())
evals, D_rt_invU = self._approximate_normalized_laplacian(
A, rank=self.rank, which="LA"
)
deepwalk_matrix = self._approximate_deepwalk_matrix(
evals, D_rt_invU, window=self.window_size, vol=vol, b=self.negative
)
# factorize deepwalk matrix with SVD
u, s, _ = sp.linalg.svds(deepwalk_matrix, self.dimension)
self.embeddings = sp.diags(np.sqrt(s)).dot(u.T).T
return self.embeddings
示例12: __init__
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import adjacency_matrix [as 别名]
def __init__(self, graph, hardConstraintPenalty):
"""
:param graph: a NetworkX graph to be colored
:param hardConstraintPenalty: penalty for hard constraint (coloring violation)
"""
# initialize instance variables:
self.graph = graph
self.hardConstraintPenalty = hardConstraintPenalty
# a list of the nodes in the graph:
self.nodeList = list(self.graph.nodes)
# adjacency matrix of the nodes -
# matrix[i,j] equals '1' if nodes i and j are connected, or '0' otherwise:
self.adjMatrix = nx.adjacency_matrix(graph).todense()
示例13: optimize
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import adjacency_matrix [as 别名]
def optimize(self):
"""
Method to run the optimization and halt it when overfitting started.
The output matrices are all saved when optimization has finished.
"""
self.best_modularity = 0
self.stop_index = 0
with tf.Session(graph=self.computation_graph) as session:
self.init.run()
self.logs = log_setup(self.args)
print("Optimization started.\n")
self.build_graph()
feed_dict = {self.S_0: overlap_generator(self.G), self.B1: np.array(nx.adjacency_matrix(self.G).todense()), self.B2:modularity_generator(self.G)}
for i in tqdm(range(self.args.iteration_number)):
start = time.time()
H = session.run(self.H, feed_dict=feed_dict)
current_modularity = self.update_state(H)
end = time.time()
log_updater(self.logs, i, end-start, current_modularity)
if self.stop_index > self.args.early_stopping:
break
self.initiate_dump(session, feed_dict)
示例14: get_subgraph
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import adjacency_matrix [as 别名]
def get_subgraph(triples, triple_dict, whole_graph):
# Only handle 1-hop for now
# Data Types for Nodes are INT
in_graph = set()
for triple in triples:
head = triple[0]
tail = triple[1]
in_graph.add(tuple(triple))
for tri in triple_dict[head.item()]:
single1 = (head, tri[0], tri[1])
in_graph.add(single1)
for tri in triple_dict[tail.item()]:
single2 = (tail, tri[0], tri[1])
in_graph.add(single2)
in_kg = KnowledgeGraph()
in_kg.load_triple_noweight(in_graph)
in_kg.triple2graph_noweight()
included_nodes = list(in_kg.G)
adj_ingraph = nx.adjacency_matrix(whole_graph.G, nodelist=included_nodes).todense()
return np.array(included_nodes), adj_ingraph
示例15: __graph_transition_matrix
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import adjacency_matrix [as 别名]
def __graph_transition_matrix(G, sparse=True):
A = nx.adjacency_matrix(G).astype('float')
# normalize rows to sum to 1
degs = A.sum(axis=1)
# take care of zero degree
degs[degs == 0] = 1
N = len(degs)
if sparse == True:
rev_degs = 1 / degs
diag = scipy.sparse.dia_matrix((rev_degs.reshape((1, N)), np.array([0])), shape=(N, N))
A = diag.dot(A)
else:
A = A.todense()
A = A / degs.reshape((A.shape[0], 1))
return A