本文整理汇总了Python中networkx.read_pajek函数的典型用法代码示例。如果您正苦于以下问题:Python read_pajek函数的具体用法?Python read_pajek怎么用?Python read_pajek使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了read_pajek函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
def main():
# Load Zachary data, randomly delete nodes, and report
zachary=nx.Graph(nx.read_pajek("karate.net")) # Do not want graph in default MultiGraph format
zachary.name="Original Zachary Data"
print(nx.info(zachary))
zachary_subset=rand_delete(zachary, 15) # Remove half of the structure
zachary_subset.name="Randomly Deleted Zachary Data"
print(nx.info(zachary_subset))
# Create model, and simulate
zachary_model=gmm.gmm(zachary_subset,R=karate_rule,T=node_ceiling_34)
gmm.algorithms.simulate(zachary_model,4,poisson=False,new_name="Simulation from sample") # Use tau=4 because data is so small (it's fun!)
# Report and visualize
print(nx.info(zachary_model.get_base()))
fig=plt.figure(figsize=(30,10))
fig.add_subplot(131)
nx.draw_spring(zachary,with_labels=False,node_size=45,iterations=5000)
plt.text(0.01,-0.1,"Original Karate Club",color="darkblue",size=20)
fig.add_subplot(132)
nx.draw_spring(zachary_subset,with_labels=False,node_size=45,iterations=5000)
plt.text(0.01,-0.1,"Random sample of Karate Club",color="darkblue",size=20)
fig.add_subplot(133)
nx.draw_spring(zachary_model.get_base(),with_labels=False,node_size=45,iterations=5000)
plt.text(0.01,-0.1,"Simulation from random sample",color="darkblue",size=20)
plt.savefig("zachary_simulation.png")
示例2: main
def main():
g=net.Graph()
#snowball_sampling(g,'navalny')
#print 'done'
print 'loading'
g=net.read_pajek('lj_trim_graph.net')
print len(g)
#g=trim_degrees(g)
#net.write_pajek(g,'lj_trim_graph.net')
#print len(g)
print 'done'
#find the celebrities
print 'calculating degrees'
degrees=net.degree(g)
s_degrees=sorted_map(degrees)
res_degrees=s_degrees[0:9]
print res_degrees
print 'done'
#find the gossipmongers
print 'calculating closeness'
closeness=net.closeness_centrality(g)
s_closeness=sorted_map(closeness)
res_closeness=s_closeness[0:9]
print res_closeness
print 'done'
#find bottlenecks
print 'calculating betweenness'
betweenness=net.betweenness_centrality(g)
s_betweenness=sorted_map(betweenness)
res_betweenness=s_betweenness[0:9]
print res_betweenness
print 'done'
示例3: correlation_betweenness_degree_on_ErdosNetwork
def correlation_betweenness_degree_on_ErdosNetwork():
G = nx.read_pajek("dataset/Erdos971.net")
isolated_nodes = nx.isolates(G)
G.remove_nodes_from(isolated_nodes)
print nx.info(G)
ND, ND_lambda = ECT.get_number_of_driver_nodes(G)
print "ND = ", ND
print "ND lambda:", ND_lambda
ND, driverNodes = ECT.get_driver_nodes(G)
print "ND =", ND
degrees = []
betweenness = []
tot_degree = nx.degree_centrality(G)
tot_betweenness = nx.betweenness_centrality(G,weight=None)
for node in driverNodes:
degrees.append(tot_degree[node])
betweenness.append(tot_betweenness[node])
with open("results/driver_degree_Erdos.txt", "w") as f:
for x in degrees:
print >> f, x
with open("results/driver_betweenness_Erdos.txt", "w") as f:
for x in betweenness:
print >> f, x
with open("results/tot_degree_Erdos.txt", "w") as f:
for key, value in tot_degree.iteritems():
print >> f, value
with open("results/tot_betweenness_Erdos.txt", "w") as f:
for key, value in tot_betweenness.iteritems():
print >> f, value
示例4: read_pajek
def read_pajek(fname, constructor=networkx.DiGraph):
g = networkx.read_pajek(fname)
print g.__class__
# Test for multi-plicitiy
for node in g.nodes_iter():
neighbors = g.neighbors(node)
assert len(neighbors) == len(set(neighbors)), "Not a simple graph..."
return constructor(g)
示例5: read_graph
def read_graph(G, gfile):
print "Reading in artist graph..."
try:
G = nx.read_pajek(gfile)
print "Read successfully!"
print "The artist graph currently contains " + str(len(G)) + " artists."
print "The artist graph currently contains " + str(nx.number_strongly_connected_components(G)) + " strongly connected components."
except IOError:
print "Could not find artistGraph"
示例6: _read_pajek
def _read_pajek(*args, **kwargs):
"""Read Pajek file and make sure that we get an nx.Graph or nx.DiGraph"""
G = nx.read_pajek(*args, **kwargs)
edges = G.edges()
if len(set(edges)) < len(edges): # multiple edges
log.warning("Network contains multiple edges. These will be ignored.")
if G.is_directed():
return nx.DiGraph(G)
else:
return nx.Graph(G)
示例7: test_author_interaction
def test_author_interaction():
clean_data = './test/integration_test/data/clean_data.json'
graph_nodes = './test/integration_test/data/graph_nodes.csv'
graph_edges = './test/integration_test/data/graph_edges.csv'
pajek_file = './.tmp/integration_test/lib/analysis/author/graph/generate/author_graph.net'
req_output1 = './test/integration_test/data/req_data/test_generate1.net'
req_output2 = './test/integration_test/data/req_data/test_generate2.net'
author_interaction(clean_data, graph_nodes, graph_edges, pajek_file, ignore_lat=True)
output_graph = nx.read_pajek(pajek_file)
req_grpah = nx.read_pajek(req_output1)
assert nx.is_isomorphic(output_graph, req_grpah)
author_interaction(clean_data, graph_nodes, graph_edges, pajek_file, ignore_lat=False)
output_graph = nx.read_pajek(pajek_file)
req_grpah = nx.read_pajek(req_output2)
assert nx.is_isomorphic(output_graph, req_grpah)
示例8: get_graph
def get_graph(graph_file, map_file, trim=False):
"""
graph_file --> is the file to convert to a networkx graph
trim --> either takes an integer or 'False'. If integer, returned graph
will call return graph with nodes having degree greater than integer
"""
the_graph = net.DiGraph(net.read_pajek(graph_file))
id_map = make_id_map(map_file)
net.relabel_nodes(the_graph, id_map, copy=False)
if trim:
the_graph = trim_degrees(the_graph, degree=trim)
return the_graph
示例9: main
def main():
#print 'main running!'
#g=nx.read_adjlist("te.adj",nodetype=int)
#ad=list()
#mi=list()
#su=list()
##print sel3(g,3,ad,mi,su)
g=nx.Graph()
g=nx.read_pajek("a.net")
sh(g)
nx.clustering(g)
示例10: read_file_net
def read_file_net(g,path=None):
#read .net file. Ues the function of nx.read_pajek()
g.clear()
try:
mg=nx.read_pajek(path,encoding='ascii')
#g=g.to_undirected()
g=nx.Graph(mg)#Tanslate mg(MultiGraph) to g(Graph)
except:
print "readFileTxt error"
return g
示例11: generate_demo_network_raga_recognition
def generate_demo_network_raga_recognition(network_file, community_file, output_network, colors = cons_net.colors, colorify = True, mydatabase = ''):
"""
This function generates a network used as a demo for demonstrating relations between phrases.
The configuration used to generate this network should ideally be the one that is used for the
raga recognition task reported in the paper.
"""
#loading the network
full_net = nx.read_pajek(network_file)
#loading community data
comm_data = json.load(open(community_file,'r'))
#loading all the phrase data
comm_char.fetch_phrase_attributes(comm_data, database = mydatabase, user= 'sankalp')
#getting all the communities from which we dont want any node in the graph in the demo
#obtaining gamaka communities
gamaka_comms = comm_char.find_gamaka_communities(comm_data)[0]
#obtaining communities with only phrases from one mbid
one_mbid_comms = comm_char.get_comm_1MBID(comm_data)
#collect phrases which should be removed from the graph
phrases = []
for c in gamaka_comms:
for n in comm_data[c]:
phrases.append(int(n['nId']))
for c in one_mbid_comms:
for n in comm_data[c]:
phrases.append(int(n['nId']))
print len(phrases)
#removing the unwanted phrases
full_net = raga_recog.remove_nodes_graph(full_net, phrases)
# colorify the nodes according to raga labels
if colorify:
cmd1 = "select raagaId from file where id = (select file_id from pattern where id =%d)"
con = psy.connect(database='ICASSP2016_10RAGA_2S', user='sankalp')
cur = con.cursor()
for n in full_net.nodes():
cur.execute(cmd1%(int(n)))
ragaId = cur.fetchone()[0]
full_net.node[n]['color'] = ragaId
#saving the network
nx.write_gexf(full_net, output_network)
示例12: exportD3
def exportD3(readName, dumpName):
#G = nx.path_graph(4)
#G = nx.barbell_graph(6,3)
G = nx.read_pajek(readName)
for n in G:
print 'node', n
G.node[n]['name'] = G.node[n]['label']
G.node[n]['size'] = G.node[n]['color']
d = json_graph.node_link_data(G)
json.dump(d, open(dumpName, 'w'))
print('wrote node-link json data to {}'.format(dumpName))
示例13: main
def main():
"""The main function"""
try:
file_name = sys.argv[1]
except IndexError:
print "Input file not given!"
raise
base_name = file_name.split('.')[0]
net = nx.read_pajek(file_name)
nx.draw_graphviz(net, with_labels=True, font_size=4,
node_size=100)
plt.draw()
plt.savefig(base_name + '.eps')
示例14: load_and_process_graph
def load_and_process_graph(filename):
"""Load the graph, normalize edge weights, compute pagerank, and store all
this back in node data."""
# Load the graph
graph = nx.DiGraph(nx.read_pajek(filename))
print "Loaded a graph (%d nodes, %d edges)" % (len(graph),
len(graph.edges()))
# Compute the normalized edge weights
for node in graph:
edges = graph.edges(node, data=True)
total_weight = sum([data['weight'] for (_, _, data) in edges])
for (_, _, data) in edges:
data['weight'] = data['weight'] / total_weight
# Get its PageRank, alpha is 1-tau where [RAB2009 says \tau=0.15]
page_ranks = nx.pagerank(graph, alpha=1-TAU)
for (node, page_rank) in page_ranks.items():
graph.node[node][PAGE_RANK] = page_rank
return graph
示例15: read_graph
def read_graph(self, subgraph_file=None):
if subgraph_file is None:
subraph_file = self.context_graph_file
logging.info("Writing graph.")
# write the graph out
file_format = subgraph_file.split(".")[-1]
if file_format == "graphml":
return nx.read_graphml(subgraph_file)
elif file_format == "gml":
return nx.read_gml(subgraph_file)
elif file_format == "gexf":
return nx.read_gexf(subgraph_file)
elif file_format == "net":
return nx.read_pajek(subgraph_file)
elif file_format == "yaml":
return nx.read_yaml(subgraph_file)
elif file_format == "gpickle":
return nx.read_gpickle(subgraph_file)
else:
logging.warning("File format not found, returning empty graph.")
return nx.MultiDiGraph()