本文整理汇总了Python中networkx.info方法的典型用法代码示例。如果您正苦于以下问题:Python networkx.info方法的具体用法?Python networkx.info怎么用?Python networkx.info使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类networkx
的用法示例。
在下文中一共展示了networkx.info方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: printGraph
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import info [as 别名]
def printGraph(graphity):
# TODO add more info to print, alias and stuff, sample info
# print dangling APIs
# print dangling strings
for item in graphity.nodes(data=True):
print item[0]
if 'alias' in item[1]:
print "Node alias: " + item[1]['alias']
# mix up API calls and strings and sort by offset
callStringMerge = item[1]['calls'] + item[1]['strings']
callStringMerge.sort(key=lambda x: x[0])
for cx in callStringMerge:
print cx
# Printing all the meta info to cmdline
示例2: parse_seq_file
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import info [as 别名]
def parse_seq_file(path_to_seq_file):
seq_file_dict = input_parser(path_to_seq_file)
A_seq_label_dict = {}
A_input_path_dict = {}
ordered_paths_list = []
anno_path_dict = {}
for a_seq_file in seq_file_dict:
logging.info(a_seq_file)
A_seq_label_dict[a_seq_file['aln_name']] = a_seq_file['seq_name']
A_input_path_dict[a_seq_file['seq_name']] = a_seq_file['seq_path']
ordered_paths_list.append(a_seq_file['seq_path'])
anno_path_dict[a_seq_file['seq_name']] = a_seq_file['annotation_path']
return A_seq_label_dict, A_input_path_dict, ordered_paths_list, anno_path_dict
示例3: realign_all_nodes
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import info [as 别名]
def realign_all_nodes(inGraph, input_dict):
logging.info('Running realign_all_nodes')
realign_node_list = []
iso_list = inGraph.graph['isolates'].split(',')
# Load genomes into memory
# Only need to realign nodes with more than one isolate in them
for node, data in inGraph.nodes(data=True):
# print(data)
if len(data['ids'].split(',')) > 1:
realign_node_list.append(node)
# Realign the nodes. This is where multiprocessing will come in.
for a_node in realign_node_list:
inGraph = local_node_realign_new(inGraph, a_node, input_dict[1])
nx.write_graphml(inGraph, 'intermediate_split_unlinked.xml')
return inGraph
示例4: test_info
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import info [as 别名]
def test_info(self):
G=nx.path_graph(5)
info=nx.info(G)
expected_graph_info='\n'.join(['Name: path_graph(5)',
'Type: Graph',
'Number of nodes: 5',
'Number of edges: 4',
'Average degree: 1.6000'])
assert_equal(info,expected_graph_info)
info=nx.info(G,n=1)
expected_node_info='\n'.join(
['Node 1 has the following properties:',
'Degree: 2',
'Neighbors: 0 2'])
assert_equal(info,expected_node_info)
示例5: test_info_digraph
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import info [as 别名]
def test_info_digraph(self):
G=nx.DiGraph(name='path_graph(5)')
G.add_path([0,1,2,3,4])
info=nx.info(G)
expected_graph_info='\n'.join(['Name: path_graph(5)',
'Type: DiGraph',
'Number of nodes: 5',
'Number of edges: 4',
'Average in degree: 0.8000',
'Average out degree: 0.8000'])
assert_equal(info,expected_graph_info)
info=nx.info(G,n=1)
expected_node_info='\n'.join(
['Node 1 has the following properties:',
'Degree: 2',
'Neighbors: 2'])
assert_equal(info,expected_node_info)
assert_raises(nx.NetworkXError,nx.info,G,n=-1)
示例6: test_info
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import info [as 别名]
def test_info(self):
G = nx.path_graph(5)
G.name = "path_graph(5)"
info = nx.info(G)
expected_graph_info = '\n'.join(['Name: path_graph(5)',
'Type: Graph',
'Number of nodes: 5',
'Number of edges: 4',
'Average degree: 1.6000'])
assert_equal(info, expected_graph_info)
info = nx.info(G, n=1)
expected_node_info = '\n'.join(
['Node 1 has the following properties:',
'Degree: 2',
'Neighbors: 0 2'])
assert_equal(info, expected_node_info)
示例7: test_info_digraph
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import info [as 别名]
def test_info_digraph(self):
G = nx.DiGraph(name='path_graph(5)')
nx.add_path(G, [0, 1, 2, 3, 4])
info = nx.info(G)
expected_graph_info = '\n'.join(['Name: path_graph(5)',
'Type: DiGraph',
'Number of nodes: 5',
'Number of edges: 4',
'Average in degree: 0.8000',
'Average out degree: 0.8000'])
assert_equal(info, expected_graph_info)
info = nx.info(G, n=1)
expected_node_info = '\n'.join(
['Node 1 has the following properties:',
'Degree: 2',
'Neighbors: 2'])
assert_equal(info, expected_node_info)
assert_raises(nx.NetworkXError, nx.info, G, n=-1)
示例8: computeMAP
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import info [as 别名]
def computeMAP(predicted_edge_list, true_digraph, max_k=-1):
"""This function computers the Mean average precision.
Args:
predicted_edge_list (Array): Consists of predicted edge list for each node.
true_digraph (object): True network graph object consists of the original nodes and edges.
max_k (Int): Maximum number of edges to be considered for computing the precsion.
Returns:
Array: MAP values.
"""
true_digraph = true_digraph.to_directed()
node_num = true_digraph.number_of_nodes()
node_edges = []
for i in range(node_num):
node_edges.append([])
for (st, ed, w) in predicted_edge_list:
node_edges[st].append((st, ed, w))
node_AP = [0.0] * node_num
count = 0
###debug
### change undirected into direct when needed
print(nx.info(true_digraph))
for i in range(node_num):
if true_digraph.out_degree(i) == 0:
continue
count += 1
precision_scores, delta_factors = computePrecisionCurve(node_edges[i], true_digraph, max_k)
precision_rectified = [p * d for p,d in zip(precision_scores,delta_factors)]
if(sum(delta_factors) == 0):
node_AP[i] = 0
else:
node_AP[i] = float(sum(precision_rectified) / sum(delta_factors))
try:
map_val = sum(node_AP) / count
except ZeroDivisionError:
map_val = 0
return map_val
示例9: info
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import info [as 别名]
def info(G):
"""
Wrapper for nx.info with some other helpers.
"""
pairwise = len(list(pairwise_comparisons(G)))
edge_blocked = len(list(edge_blocked_comparisons(G)))
fuzz_blocked = len(list(fuzzy_blocked_comparisons(G)))
output = [""]
output.append("Number of Pairwise Comparisons: {}".format(pairwise))
output.append("Number of Edge Blocked Comparisons: {}".format(edge_blocked))
output.append("Number of Fuzzy Blocked Comparisons: {}".format(fuzz_blocked))
return nx.info(G) + "\n".join(output)
示例10: link_all_nodes
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import info [as 别名]
def link_all_nodes(graph_obj):
print('Linking nodes')
logging.info('Running link_all_nodes')
for isolate in graph_obj.graph['isolates'].split(','):
logging.info(isolate)
graph_obj = link_nodes(graph_obj, isolate)
return graph_obj
示例11: seq_recreate_check
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import info [as 别名]
def seq_recreate_check(graph_obj, input_dict):
for isolate in input_dict[1].keys():
extracted_seq = extract_original_seq(graph_obj, isolate)
original_seq_from_fasta = input_parser(input_dict[1][isolate])
count = 0
while count < len(extracted_seq):
if extracted_seq[count] != original_seq_from_fasta[0]['DNA_seq'][count]:
logging.warning(count)
logging.warning(extracted_seq[count])
logging.warning(original_seq_from_fasta[0]['DNA_seq'][count])
logging.warning(extracted_seq[count-10:count + 10])
logging.warning(original_seq_from_fasta[0]['DNA_seq'][count-10:count + 10])
count += 1
if extracted_seq.upper() == original_seq_from_fasta[0]['DNA_seq'].upper():
logging.info('Sequence recreate pass')
print('Sequence recreate pass')
recreate_check_result = 'Pass'
else:
logging.error('Sequence recreate fail')
logging.error(len(extracted_seq))
logging.error(len(original_seq_from_fasta[0]['DNA_seq']))
logging.error(extracted_seq[-10:])
logging.error(original_seq_from_fasta[0]['DNA_seq'][-10:])
logging.error(extracted_seq[:10])
logging.error(original_seq_from_fasta[0]['DNA_seq'][:10])
recreate_check_result = 'Fail'
示例12: add_graph_data
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import info [as 别名]
def add_graph_data(graph_obj):
count_dict = {}
# Add start nodes
for node, data in graph_obj.nodes(data=True):
logging.info(node)
logging.info(data)
for an_isolate in data['ids'].split(','):
if abs(int(data[an_isolate + '_leftend'])) == 1:
graph_obj.graph[an_isolate + '_startnode'] = node
if node not in count_dict.keys():
count_dict[node] = 1
else:
count_dict[node] = count_dict[node] + 1
logging.info(count_dict)
most_start_node = ''
most_start_node_number = 0
for a_node in count_dict.keys():
if count_dict[a_node] > most_start_node_number:
most_start_node = a_node
most_start_node_number = count_dict[a_node]
graph_obj.graph['start_node'] = most_start_node
# ---------------------------------------------------- Alignment functions
示例13: progressiveMauve_alignment
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import info [as 别名]
def progressiveMauve_alignment(path_to_progressiveMauve, fasta_path_list, out_aln_name):
"""
A wrapper for progressiveMauve for use in GenGraph for the identification of co-linear blocks
:param path_to_progressiveMauve: Absolute path to progressiveMauve executable
:param fasta_path_list: List of paths to fasta files
:param out_aln_name: Name for alignment file, added to mauve output
:return:
"""
# Maybe add --skip-gapped-alignment flag?
logging.info(path_to_progressiveMauve)
progressiveMauve_call = [path_to_progressiveMauve, '--output=globalAlignment_' + out_aln_name, '--scratch-path-1=./mauveTemp', '--scratch-path-2=./mauveTemp'] + fasta_path_list
try:
return call(progressiveMauve_call, stdout=open(os.devnull, 'wb'))
# Check if file was created successfully
bbone_file = open('globalAlignment_' + out_aln_name + '.backbone')
number_of_lines = 3
for i in range(number_of_lines):
line = bbone_file.readline()
print(len(line.split('\t')))
if len(line.split('\t')) <= 1:
logging.error('progressiveMauve_call error: output of progressiveMauve empty')
print('Error: progressiveMauve_call output appears empty.')
quit()
except OSError:
logging.error('progressiveMauve_call error')
return 'progressiveMauve_call error'
# ---------------------------------------------------- Utility functions
示例14: extract_gene
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import info [as 别名]
def extract_gene(seq_locus_id, seq_isolate_origin, graph_obj, annotation_path_dict):
iso_anno_obj = input_parser(annotation_path_dict[3][seq_isolate_origin])
tar_gene_anno = 'Not found'
for entry in iso_anno_obj:
if entry[2] == 'gene':
if entry[8]['locus_tag'] == seq_locus_id:
tar_gene_anno = entry
if 'old_locus_tag' in entry[8].keys():
if entry[8]['old_locus_tag'] == seq_locus_id:
tar_gene_anno = entry
if tar_gene_anno != 'Not found':
logging.info(tar_gene_anno[3], tar_gene_anno[4])
logging.info(int(tar_gene_anno[4]) - int(tar_gene_anno[3]))
logging.info(tar_gene_anno[6])
out_seq = extract_original_seq_region(graph_obj, tar_gene_anno[3], tar_gene_anno[4], seq_isolate_origin)
if tar_gene_anno[6] == '-':
out_seq = reverse_compliment(out_seq)
return out_seq
else:
return tar_gene_anno
logging.info('in function')
# ---------------------------------------------------- # Testing functions
示例15: test_non_repeated_cuts
# 需要导入模块: import networkx [as 别名]
# 或者: from networkx import info [as 别名]
def test_non_repeated_cuts():
# The algorithm was repeating the cut {0, 1} for the giant biconnected
# component of the Karate club graph.
K = nx.karate_club_graph()
G = max(list(nx.biconnected_component_subgraphs(K)), key=len)
solution = [{32, 33}, {2, 33}, {0, 3}, {0, 1}, {29, 33}]
cuts = list(nx.all_node_cuts(G))
if len(solution) != len(cuts):
print(nx.info(G))
print("Solution: {}".format(solution))
print("Result: {}".format(cuts))
assert_true(len(solution) == len(cuts))
for cut in cuts:
assert_true(cut in solution)