本文整理汇总了Python中networkx.degree_assortativity_coefficient函数的典型用法代码示例。如果您正苦于以下问题:Python degree_assortativity_coefficient函数的具体用法?Python degree_assortativity_coefficient怎么用?Python degree_assortativity_coefficient使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了degree_assortativity_coefficient函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: classify
def classify(request, pk):
#gets object based on id given
graph_file = get_object_or_404(Document, pk=pk)
#reads file into networkx graph based on extension
if graph_file.extension() == ".gml":
G = nx.read_gml(graph_file.uploadfile)
else:
G = nx.read_gexf(graph_file.uploadfile)
#closes file so we can delete it
graph_file.uploadfile.close()
#loads the algorithm and tests the algorithm against the graph
g_json = json_graph.node_link_data(G)
#save graph into json file
with open(os.path.join(settings.MEDIA_ROOT, 'graph.json'), 'w') as graph:
json.dump(g_json, graph)
with open(os.path.join(settings.MEDIA_ROOT, 'rf_classifier.pkl'), 'rb') as malgo:
algo_loaded = pickle.load(malgo, encoding="latin1")
dataset = np.array([G.number_of_nodes(), G.number_of_edges(), nx.density(G), nx.degree_assortativity_coefficient(G), nx.average_clustering(G), nx.graph_clique_number(G)])
print (dataset)
#creates X to test against
X = dataset
prediction = algo_loaded.predict(X)
graph_type = check_prediction(prediction)
graph = GraphPasser(G.number_of_nodes(), G.number_of_edges(), nx.density(G), nx.degree_assortativity_coefficient(G), nx.average_clustering(G), nx.graph_clique_number(G))
#gives certain variables to the view
return render(
request,
'classification/classify.html',
{'graph': graph, 'prediction': graph_type}
)
示例2: knn_pack
def knn_pack(graph, *kwargs):
t = dict()
for k in kwargs:
t.__setitem__(k, kwargs[k])
t.__setitem__('asr', nx.degree_assortativity_coefficient(graph))
t.__setitem__('weighted_asr', nx.degree_assortativity_coefficient(graph, weight = 'weight'))
if graph.is_directed():
t.__setitem__('knn', nx.average_degree_connectivity(graph, source = 'out', target = 'in'))
if len(nx.get_edge_attributes(graph, 'weight')):
t.__setitem__('weighted_knn', nx.average_degree_connectivity(graph, source = 'out', target = 'in', weight = 'weight'))
else:
t.__setitem__('knn', nx.average_degree_connectivity(graph))
if len(nx.get_edge_attributes(graph, 'weight')):
t.__setitem__('weighted_knn', nx.average_degree_connectivity(graph, weight = 'weight'))
return(t)
示例3: gpn_stats
def gpn_stats(genes, gpn, version):
LOGGER.info("Computing GPN statistics")
nodes = sorted(gpn.nodes_iter())
components = sorted(nx.connected_components(gpn), key=len, reverse=True)
ass = nx.degree_assortativity_coefficient(gpn)
deg = [gpn.degree(node) for node in nodes]
stats = pd.DataFrame(data={
"version": version,
"release": pd.to_datetime(RELEASE[version]),
"num_genes": len(genes),
"num_nodes": len(nodes),
"num_links": gpn.size(),
"density": nx.density(gpn),
"num_components": len(components),
"largest_component": len(components[0]),
"assortativity": ass,
"avg_deg": mean(deg),
"hub_deg": max(deg)
}, index=[1])
stats["release"] = pd.to_datetime(stats["release"])
dists = pd.DataFrame(data={
"version": version,
"release": [pd.to_datetime(RELEASE[version])] * len(nodes),
"node": [node.unique_id for node in nodes],
"degree": deg,
})
return (stats, dists)
示例4: Attributes_of_Graph
def Attributes_of_Graph(G):
print "*Statistic attributes of graphs:"
print "N", nx.number_of_nodes(G)
print "M", nx.number_of_edges(G)
print "C", nx.average_clustering(G)
#print "<d>", nx.average_shortest_path_length(G)
print "r", nx.degree_assortativity_coefficient(G)
degree_list = list(G.degree_iter())
max_degree = 0
min_degree = 0
avg_degree_1 = 0.0
avg_degree_2 = 0.0
for node in degree_list:
avg_degree_1 = avg_degree_1 + node[1]
avg_degree_2 = avg_degree_2 + node[1]*node[1]
if node[1] > max_degree:
max_degree = node[1]
if node[1] < min_degree:
min_degree = node[1]
#end for
avg_degree = avg_degree_1/len(degree_list)
avg_degree_square = (avg_degree_2/len(degree_list)) / (avg_degree*avg_degree)
print "<k>", avg_degree
print "k_max", max_degree
print "H", avg_degree_square
print "DH", float(max_degree-min_degree)/G.number_of_nodes()
示例5: draw_graph
def draw_graph(nodes, edges, graphs_dir, default_lang='all'):
lang_graph = nx.MultiDiGraph()
lang_graph.add_nodes_from(nodes)
for edge in edges:
if edges[edge] == 0:
lang_graph.add_edge(edge[0], edge[1])
else:
lang_graph.add_edge(edge[0], edge[1], weight=float(edges[edge]), label=str(edges[edge]))
# print graph info in stdout
# degree centrality
print('-----------------\n\n')
print(default_lang)
print(nx.info(lang_graph))
try:
# When ties are associated to some positive aspects such as friendship or collaboration,
# indegree is often interpreted as a form of popularity, and outdegree as gregariousness.
DC = nx.degree_centrality(lang_graph)
max_dc = max(DC.values())
max_dc_list = [item for item in DC.items() if item[1] == max_dc]
except ZeroDivisionError:
max_dc_list = []
# https://ru.wikipedia.org/wiki/%D0%9A%D0%BE%D0%BC%D0%BF%D0%BB%D0%B5%D0%BA%D1%81%D0%BD%D1%8B%D0%B5_%D1%81%D0%B5%D1%82%D0%B8
print('maxdc', str(max_dc_list), sep=': ')
# assortativity coef
AC = nx.degree_assortativity_coefficient(lang_graph)
print('AC', str(AC), sep=': ')
# connectivity
print("Слабо-связный граф: ", nx.is_weakly_connected(lang_graph))
print("количество слабосвязанных компонент: ", nx.number_weakly_connected_components(lang_graph))
print("Сильно-связный граф: ", nx.is_strongly_connected(lang_graph))
print("количество сильносвязанных компонент: ", nx.number_strongly_connected_components(lang_graph))
print("рекурсивные? компоненты: ", nx.number_attracting_components(lang_graph))
print("число вершинной связности: ", nx.node_connectivity(lang_graph))
print("число рёберной связности: ", nx.edge_connectivity(lang_graph))
# other info
print("average degree connectivity: ", nx.average_degree_connectivity(lang_graph))
print("average neighbor degree: ", sorted(nx.average_neighbor_degree(lang_graph).items(),
key=itemgetter(1), reverse=True))
# best for small graphs, and our graphs are pretty small
print("pagerank: ", sorted(nx.pagerank_numpy(lang_graph).items(), key=itemgetter(1), reverse=True))
plt.figure(figsize=(16.0, 9.0), dpi=80)
plt.axis('off')
pos = graphviz_layout(lang_graph)
nx.draw_networkx_edges(lang_graph, pos, alpha=0.5, arrows=True)
nx.draw_networkx(lang_graph, pos, node_size=1000, font_size=12, with_labels=True, node_color='green')
nx.draw_networkx_edge_labels(lang_graph, pos, edges)
# saving file to draw it with dot-graphviz
# changing overall graph view, default is top-bottom
lang_graph.graph['graph'] = {'rankdir': 'LR'}
# marking with blue nodes with maximum degree centrality
for max_dc_node in max_dc_list:
lang_graph.node[max_dc_node[0]]['fontcolor'] = 'blue'
write_dot(lang_graph, os.path.join(graphs_dir, default_lang + '_links.dot'))
# plt.show()
plt.savefig(os.path.join(graphs_dir, 'python_' + default_lang + '_graph.png'), dpi=100)
plt.close()
示例6: compute_singlevalued_measures
def compute_singlevalued_measures(ntwk, weighted=True, calculate_cliques=False):
"""
Returns a single value per network
"""
iflogger.info("Computing single valued measures:")
measures = {}
iflogger.info("...Computing degree assortativity (pearson number) ...")
try:
measures["degree_pearsonr"] = nx.degree_pearsonr(ntwk)
except AttributeError: # For NetworkX 1.6
measures["degree_pearsonr"] = nx.degree_pearson_correlation_coefficient(ntwk)
iflogger.info("...Computing degree assortativity...")
try:
measures["degree_assortativity"] = nx.degree_assortativity(ntwk)
except AttributeError:
measures["degree_assortativity"] = nx.degree_assortativity_coefficient(ntwk)
iflogger.info("...Computing transitivity...")
measures["transitivity"] = nx.transitivity(ntwk)
iflogger.info("...Computing number of connected_components...")
measures["number_connected_components"] = nx.number_connected_components(ntwk)
iflogger.info("...Computing average clustering...")
measures["average_clustering"] = nx.average_clustering(ntwk)
if nx.is_connected(ntwk):
iflogger.info("...Calculating average shortest path length...")
measures["average_shortest_path_length"] = nx.average_shortest_path_length(ntwk, weighted)
if calculate_cliques:
iflogger.info("...Computing graph clique number...")
measures["graph_clique_number"] = nx.graph_clique_number(ntwk) # out of memory error
return measures
示例7: degree_assortativity
def degree_assortativity(G):
#this wrapper helps avoid error due to change in interface name
if hasattr(nx, 'degree_assortativity_coefficient'):
return nx.degree_assortativity_coefficient(G)
elif hasattr(nx, 'degree_assortativity'):
return nx.degree_assortativity(G)
else:
raise ValueError, 'Cannot compute degree assortativity: method not available'
示例8: SR_nx_assortativity
def SR_nx_assortativity():
#os.chdir("SR_graphs")
os.chdir(IN_DIR_SR)
SR=nx.read_edgelist(f_in_graph_SR, create_using=nx.Graph()) #, data=(('weight',int),))
print(len(SR.nodes(data=True)))
print "Degree assortativity of UNWEIGHTED is %f " % nx.degree_assortativity_coefficient(SR)
#print "Sentiment (by value) numeric assortativity is %f " % nx.numeric_assortativity_coefficient(MENT, 'sentiment_val')
SR=nx.read_edgelist(f_in_graph_SR, create_using=nx.Graph(), data=(('weight',int),))
print(len(SR.nodes(data=True)))
print "Degree assortativity of WEIGHTED is %f " % nx.degree_assortativity_coefficient(SR, weight='weight')
cnt = 0
d=defaultdict(int)
d_val = defaultdict(int)
d1 = defaultdict(int)
with open(f_in_user_sentiment) as f:
for line in f:
(uid, label, val) = line.split()
uid = unicode(uid)
d1[uid]= int(float(val)*10000)
if uid in SR.nodes():
d[uid]= int(float(val)*10000)
d_val[uid] = int(label)
else:
cnt += 1
print "Number of nodes for which we have sentminet but are not in the mention graph is ", cnt
cnt = 0
for node in SR.nodes():
if not node in d1:
cnt += 1
SR.remove_node(node)
print "Number of nodes that do not have sentiment value, so we remove them from the mention graph", cnt
nx.set_node_attributes(SR, 'sentiment' , d)
nx.set_node_attributes(SR, 'sentiment_val' , d_val)
print "Final number of nodes in the graph ", (len(SR.nodes(data=True)))
print "Sentiment (by label) nominal numeric assortativity is %f " % nx.numeric_assortativity_coefficient(SR, 'sentiment')
print "Sentiment (by value) numeric assortativity is %f " % nx.numeric_assortativity_coefficient(SR, 'sentiment_val')
示例9: trn_stats
def trn_stats(genes, trn, t_factors, version):
LOGGER.info("Computing TRN statistics")
nodes = sorted(trn.nodes_iter())
node2id = {n: i for (i, n) in enumerate(nodes)}
id2node = {i: n for (i, n) in enumerate(nodes)}
(grn, node2id) = to_simple(trn.to_grn(), return_map=True)
nodes = sorted(grn.nodes_iter())
regulating = {node for (node, deg) in grn.out_degree_iter() if deg > 0}
regulated = set(nodes) - regulating
components = sorted(nx.weakly_connected_components(grn), key=len,
reverse=True)
data = dict()
for (a, b) in itertools.product(("in", "out"), repeat=2):
data["{a}_{b}_ass".format(a=a, b=b)] = nx.degree_assortativity_coefficient(grn, x=a, y=b)
census = triadic_census(grn)
forward = census["030T"]
feedback = census["030C"]
num_cycles = sum(1 for cyc in nx.simple_cycles(grn) if len(cyc) > 2)
in_deg = [grn.in_degree(node) for node in regulated]
out_deg = [grn.out_degree(node) for node in regulating]
data["version"] = version,
data["release"] = pd.to_datetime(RELEASE[version]),
data["num_genes"] = len(genes),
data["num_tf"] = len(t_factors),
data["num_nodes"] = len(nodes),
data["num_regulating"] = len(regulating),
data["num_regulated"] = len(regulated),
data["num_links"] = grn.size(),
data["density"] = nx.density(grn),
data["num_components"] = len(components),
data["largest_component"] = len(components[0]),
data["feed_forward"] = forward,
data["feedback"] = feedback,
data["fis_out"] = trn.out_degree(TranscriptionFactor[FIS_ID, version]),
data["hns_out"] = trn.out_degree(TranscriptionFactor[HNS_ID, version]),
data["cycles"] = num_cycles,
data["regulated_in_deg"] = mean(in_deg),
data["regulating_out_deg"] = mean(out_deg),
data["hub_out_deg"] = max(out_deg)
stats = pd.DataFrame(data, index=[1])
in_deg = [grn.in_degree(node) for node in nodes]
out_deg = [grn.out_degree(node) for node in nodes]
bc = nx.betweenness_centrality(grn)
bc = [bc[node] for node in nodes]
dists = pd.DataFrame({
"version": version,
"release": [pd.to_datetime(RELEASE[version])] * len(nodes),
"node": [id2node[node].unique_id for node in nodes],
"regulated_in_degree": in_deg,
"regulating_out_degree": out_deg,
"betweenness": bc
})
return (stats, dists)
示例10: main
def main():
domain_name = 'baidu.com'
domain_pkts = get_data(domain_name)
node_cname, node_ip, visit_total, edges, node_main = get_ip_cname(domain_pkts[0]['details'])
for i in domain_pkts[0]['details']:
for v in i['answers']:
edges.append((v['domain_name'],v['dm_data']))
DG = nx.DiGraph()
DG.add_edges_from(edges)
# 分析域名直接解析为IP的node
for node in DG:
if node in node_main and DG.successors(node) in node_ip:
print node
# 分析cname关联的IP数量分布
for node in DG:
if node in node_cname and DG.successors(node) not in node_cname: # 查找与ip直接连接的cname
print "node",DG.out_degree(node),DG.in_degree(node),DG.degree(node)
# 与cname关联的域名个数
# for node in DG:
# if node in node_cname and DG.predecessors(node) not in node_cname:
# print len(DG.predecessors(node))
for node in DG:
if node in node_main:
if len(DG.successors(node)) ==3:
print node
print DG.successors(node)
# print sorted(nx.degree(DG).values())
print nx.degree_assortativity_coefficient(DG)
average_degree = sum(nx.degree(DG).values())/(len(node_cname)+len(node_ip)+len(node_main))
print average_degree
print len(node_cname)+len(node_ip)+len(node_main)
print len(edges)
print nx.degree_histogram(DG)
示例11: compute_singlevalued_measures
def compute_singlevalued_measures(ntwk, weighted=True,
calculate_cliques=False):
"""
Returns a single value per network
"""
iflogger.info('Computing single valued measures:')
measures = {}
iflogger.info('...Computing degree assortativity (pearson number) ...')
try:
measures['degree_pearsonr'] = nx.degree_pearsonr(ntwk)
except AttributeError: # For NetworkX 1.6
measures[
'degree_pearsonr'] = nx.degree_pearson_correlation_coefficient(
ntwk)
iflogger.info('...Computing degree assortativity...')
try:
measures['degree_assortativity'] = nx.degree_assortativity(ntwk)
except AttributeError:
measures['degree_assortativity'] = nx.degree_assortativity_coefficient(
ntwk)
iflogger.info('...Computing transitivity...')
measures['transitivity'] = nx.transitivity(ntwk)
iflogger.info('...Computing number of connected_components...')
measures['number_connected_components'] = nx.number_connected_components(
ntwk)
iflogger.info('...Computing graph density...')
measures['graph_density'] = nx.density(ntwk)
iflogger.info('...Recording number of edges...')
measures['number_of_edges'] = nx.number_of_edges(ntwk)
iflogger.info('...Recording number of nodes...')
measures['number_of_nodes'] = nx.number_of_nodes(ntwk)
iflogger.info('...Computing average clustering...')
measures['average_clustering'] = nx.average_clustering(ntwk)
if nx.is_connected(ntwk):
iflogger.info('...Calculating average shortest path length...')
measures[
'average_shortest_path_length'] = nx.average_shortest_path_length(
ntwk, weighted)
else:
iflogger.info('...Calculating average shortest path length...')
measures[
'average_shortest_path_length'] = nx.average_shortest_path_length(
nx.connected_component_subgraphs(ntwk)[0], weighted)
if calculate_cliques:
iflogger.info('...Computing graph clique number...')
measures['graph_clique_number'] = nx.graph_clique_number(
ntwk) # out of memory error
return measures
示例12: main
def main():
"""
Entry point.
"""
if len(sys.argv) == 1:
sys.exit("Usage: python evolving_network.py <params file>")
# Load the parameters.
params = json.load((open(sys.argv[1], "r")))
seedNetwork = params["seedNetwork"]
# Setup the seed network.
if seedNetwork["name"] == "read_graphml":
G = networkx.convert_node_labels_to_integers(\
networkx.read_graphml(seedNetwork["args"]["path"]))
else:
G = getattr(networkx, seedNetwork["name"])(**seedNetwork["args"])
# Evolve G.
R = robustness(G, params["attackStrategy"], params["sequentialMode"])
countDown = params["stagnantEpochs"]
while countDown > 0:
if params["verbose"]:
v = numpy.var(G.degree().values()) # degree variance
l = networkx.average_shortest_path_length(G) # avg. path length
C = networkx.average_clustering(G) # clustering
r = networkx.degree_assortativity_coefficient(G) # assortativity
print("%.4f %.4f %.4f %.4f %.4f" %(R, v, l, C, r))
mutants = genMutants(G, params)
prevR = R
for mutant in mutants:
mutantR = robustness(mutant, params["attackStrategy"],
params["sequentialMode"])
if params["maximizeRobustness"] and mutantR > R or \
not params["maximizeRobustness"] and mutantR < R:
R = mutantR
G = mutant
if params["maximizeRobustness"] and R > prevR or \
not params["maximizeRobustness"] and R < prevR:
countDown = params["stagnantEpochs"]
else:
countDown -= 1
# Save G.
networkx.write_graphml(G, params["outFile"])
示例13: analysis
def analysis(self):
self.compute_density()
self.degree_correlation = nx.degree_assortativity_coefficient(self)
self.compute_complexity()
self.compute_paths()
self.compute_overlap()
self.compute_variance()
self.pattern_rank = numpy.linalg.matrix_rank(self.ideal_pattern)
self.binary_rank = numpy.linalg.matrix_rank(self.ideal_pattern > 0)
try:
self.compute_modularity()
except nx.NetworkXError:
pass
self.generate_random_ensemble()
self.compute_zscores()
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
self.compute_essentialities()
示例14: compute_network_stats
def compute_network_stats(G, inst):
print 'RECIP:%.5f' % reciprocity(G)
print 'MEAN_DEGREE:%.5f' % mean_degree(G)
print 'MEAN_NB_DEGREE:%.5f' % mean_nb_degree(G)
Gu = G.to_undirected()
print 'AVG_CLUSTER:%.5f' % nx.average_clustering(Gu)
print 'DEGREE_ASSORT:%.5f' % nx.degree_assortativity_coefficient(Gu)
print 'MEAN_GEODESIC:%.5f' % nx.average_shortest_path_length(Gu)
mg, d = mean_max_geodesic(Gu)
print 'MEAN_GEODESIC:%.5f' % mg
print 'DIAMETER:%d' % int(d)
keep = []
for n in Gu.nodes_iter():
if n in inst:
Gu.node[n]['region'] = inst[n]['Region']
keep.append(n)
H = Gu.subgraph(keep)
print 'MOD_REGION:%.5f' % (nx.attribute_assortativity_coefficient(H, 'region'))
示例15: getStats
def getStats(graph):
stats = dict()
stats["Nodes"] = nx.number_of_nodes(graph)
stats["Edges"] = nx.number_of_edges(graph)
stats["Neighbors/node"] = 2 * float(stats["Edges"])/ stats["Nodes"]
c = nx.average_clustering(graph)
stats["Clustering coefficient"] = "%3.2f"%c
try:
r = nx.degree_assortativity_coefficient(graph)
stats["Degree assortativity"] = "%3.2f"%r
r = get_assortativity_coeff(graph)
# stats["Degree assortativity - own"] = "%3.2f"%r
except:
print("Impossible to compute degree assortativity")
if (nx.is_connected(graph)):
stats['Diameter'] = nx.diameter(graph)
p = nx.average_shortest_path_length(graph)
stats["Characteristic path length"] = "%3.2f"%p
stats["Connected components"] = 1
else:
d = 0.0
p = 0.0
i = 0
for g in nx.connected_component_subgraphs(graph):
i += 1
d += nx.diameter(g)
if len(nx.nodes(g)) > 1:
p += nx.average_shortest_path_length(g)
p /= i
stats["Connected components"] = i
stats["Diameter - sum on cc"] = "%3.2f"%d
stats["Characteristic path length - avg on cc"] = "%3.2f"%p
dd = nx.degree_histogram(graph)
stats["Max degree"] = len(dd) - 1
return stats