本文整理汇总了Python中igraph.Graph.simplify方法的典型用法代码示例。如果您正苦于以下问题:Python Graph.simplify方法的具体用法?Python Graph.simplify怎么用?Python Graph.simplify使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类igraph.Graph
的用法示例。
在下文中一共展示了Graph.simplify方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Physic
# 需要导入模块: from igraph import Graph [as 别名]
# 或者: from igraph.Graph import simplify [as 别名]
class Physic (BaseInputGraph):
def __init__(self):
'''
@return: Arxiv ASTRO-PH (Astro Physics) collaboration network as iGraph graph instance
'''
edges = []
weights = []
f = open("./physic/compact-physic.txt", "r")
for line in f:
if line and line[0]!='#':
seg = line.split()
edges.append( (int(seg[0]), int(seg[1])) )
weights.append( 1 )
maxid = max( edges, key=itemgetter(1) )[1]
maxid = max( maxid, max(edges,key=itemgetter(0))[0] )
self.g = Graph()
self.g.add_vertices(maxid + 1)
self.g.add_edges(edges)
self.g.to_undirected()
self.g.simplify()
self.g.vs["myID"] = [ str(int(i)) for i in range(maxid+1)]
print "#nodes=", maxid + 1
print "#edges=", len(self.g.es)
def run(self):
C = BaseInputGraph.unsupervised_logexpand(self)
BaseInputGraph.run(self, C, p0=np.array([0.04, 0.04]))
with open("./physic/Physic_weights.pairs", "w+") as txt:
for e in self.g.es:
txt.write("%d %d %f\n" %(e.tuple[0], e.tuple[1], e["weight"]) )
示例2: merge
# 需要导入模块: from igraph import Graph [as 别名]
# 或者: from igraph.Graph import simplify [as 别名]
def merge(g1,g2):
""" merges graph g1 and graph g2 into the output graph"""
g3nslst = list(set(g1.vs['name'][:]) | set(g2.vs['name'][:]))
g3 = Graph(0,directed=True)
g3.add_vertices(g3nslst)
g3elst = []
for e in g1.get_edgelist():
g3elst.append((g1.vs['name'][e[0]],g1.vs['name'][e[1]]))
for e in g2.get_edgelist():
g3elst.append((g2.vs['name'][e[0]],g2.vs['name'][e[1]]))
g3.add_edges(g3elst)
g3.simplify()
#add attributes
g1primlst = [vn for i,vn in enumerate(g1.vs['name'][:]) if int(g1.vs['inprim'][i]) == 1]
g2primlst = [vn for i,vn in enumerate(g2.vs['name'][:]) if int(g2.vs['inprim'][i]) == 1]
g3prim = [1 if vn in g1primlst or vn in g2primlst else 0 for vn in g3.vs['name'][:]]
g3pnamelst = [[] for i in range(len(g3.vs['name'][:]))]
for i,vn1 in enumerate(g3.vs['name'][:]):
for j,vn2 in enumerate(g1.vs['name'][:]):
if vn1 == vn2:
g3pnamelst[i].extend(g1.vs['pnamelst'][j].strip().split('|'))
for j,vn2 in enumerate(g2.vs['name'][:]):
if vn1 == vn2:
g3pnamelst[i].extend(g2.vs['pnamelst'][j].strip().split('|'))
g3.vs['pnamelst'] = ['|'.join(map(str,list(set(inp)))) if inp != [] else '' for inp in g3pnamelst]
#print g1.vs['pnamelst'][:]
#print g3.vs['name'][:]
g3.vs['inprim'] = g3prim
return g3
示例3: LFR
# 需要导入模块: from igraph import Graph [as 别名]
# 或者: from igraph.Graph import simplify [as 别名]
class LFR ( BaseInputGraph ):
def __init__(self, trialval=1):
ws = []
edges = []
self.trial = trialval
with open("./binary_networks/mu0.5/network%d.dat" % self.trial, "r") as txt:
for line in txt:
seg = line.split()
edges.append((int(seg[0]), int(seg[1])))
ws.append(1)
maxid = max( edges, key=itemgetter(1))[1]
maxid = max( maxid, max(edges,key=itemgetter(0))[0] )
self.g = Graph()
print maxid
self.g.add_vertices(maxid + 1)
with open("./binary_networks/mu0.5/community%d.dat" % self.trial, "r") as txt:
for line in txt:
seg = line.split()
#print seg[0]
self.g.vs[int(seg[0])]["comm"] = seg[1] #note: string is returned
self.g.add_edges(edges)
self.g.to_undirected()
self.g.simplify()
self.g.delete_vertices(0)
self.g.es["weight"] = ws
BaseInputGraph.write_ground_truth(self, "./ground_truth_community%d.groups" % self.trial)
print "#nodes=", maxid + 1
print "#edges=", len(self.g.es)
def run(self):
#supervised
C = []
for i in range(6):
commval = str(random.randint(0,100))
group = [i for i, comm in enumerate(self.g.vs["comm"]) if comm == commval]
C += BaseInputGraph.get_C(self, group)
#unsupervised
C = BaseInputGraph.unsupervised_logexpand(self)
BaseInputGraph.run(self, C, p0=np.array([1 , 1]))
BaseInputGraph.results(self, Graph.community_fastgreedy, hasgnc = False,\
filename="%d" %self.trial)
示例4: graph_from_sparse
# 需要导入模块: from igraph import Graph [as 别名]
# 或者: from igraph.Graph import simplify [as 别名]
def graph_from_sparse(data, directed=None):
from igraph import Graph
sources, targets = data.nonzero()
if directed==None:
from numpy import all
directed = not all(data[sources, targets]==data[targets, sources])
from numpy import array
g = Graph(zip(sources, targets), directed=directed, edge_attrs={'weight': array(data[sources, targets])[0]})
if g.is_directed():
return g
else:
return g.simplify(combine_edges="first")
示例5: Enron
# 需要导入模块: from igraph import Graph [as 别名]
# 或者: from igraph.Graph import simplify [as 别名]
class Enron (BaseInputGraph):
def __init__(self):
'''
@return: Enron email communication network as iGraph graph instance
'''
edges = []
weights = []
f = open("./enron/email-Enron.txt", "r")
for line in f:
if line and line[0]!='#':
seg = line.split()
edges.append( (int(seg[0]), int(seg[1])) )
weights.append( 1 )
maxid = max( edges, key=itemgetter(1) )[1]
maxid = max( maxid, max(edges,key=itemgetter(0))[0] )
self.g = Graph()
self.g.add_vertices(maxid + 1)
self.g.add_edges(edges)
self.g.to_undirected()
self.g.simplify()
self.g.vs["myID"] = [ str(int(i)) for i in range(maxid+1)]
print "#nodes=", maxid + 1
print "#edges=", len(self.g.es)
def run(self):
C = BaseInputGraph.unsupervised_logexpand(self)
BaseInputGraph.run(self, C, p0=np.array([0.04, 0.04]))
with open("./enron/email-Enron_weights.pairs", "w+") as txt:
for e in self.g.es:
txt.write("%d %d %f\n" %(e.tuple[0], e.tuple[1], e["weight"]) )
with open("./enron/email-Enron_unweights.pairs", "w+") as txt:
count = 0
for e in self.g.es:
txt.write("%d %d\n" %(e.tuple[0], e.tuple[1]) )
count += 1
print count , "edges written."
示例6: get_igraph_graph
# 需要导入模块: from igraph import Graph [as 别名]
# 或者: from igraph.Graph import simplify [as 别名]
def get_igraph_graph(network):
print 'load %s users into igraph' % len(network)
g = Graph(directed=True)
keys_set = set(network.keys())
g.add_vertices(network.keys())
print 'iterative load into igraph'
edges = []
for source in network:
for target in network[source].intersection(keys_set):
edges.append((source, target))
g.add_edges(edges)
g = g.simplify()
print 'make sure graph is connected'
connected_clusters = g.clusters()
connected_cluster_lengths = [len(x) for x in connected_clusters]
connected_cluster_max_idx = connected_cluster_lengths.index(max(connected_cluster_lengths))
g = connected_clusters.subgraph(connected_cluster_max_idx)
if g.is_connected():
print 'graph is connected'
else:
print 'graph is not connected'
return g
示例7: reduce_and_save_communities
# 需要导入模块: from igraph import Graph [as 别名]
# 或者: from igraph.Graph import simplify [as 别名]
def reduce_and_save_communities(root_user, distance=10, return_graph_for_inspection=False):
print 'starting reduce_and_save_communities'
print 'root_user: %s, following_in_our_db: %s, distance: %s' % (
root_user.screen_name, len(root_user.following), distance)
network = TwitterUser.get_rooted_network(root_user, postgres_handle, distance=distance)
print 'load %s users into igraph' % len(network)
g = Graph(directed=True)
keys_set = set(network.keys())
g.add_vertices(network.keys())
g.vs["id"] = network.keys() #need this for pajek format
print 'iterative load into igraph'
edges = []
for source in network:
for target in network[source].intersection(keys_set):
edges.append((source, target))
g.add_edges(edges)
g = g.simplify()
print 'make sure graph is connected'
connected_clusters = g.clusters()
connected_cluster_lengths = [len(x) for x in connected_clusters]
connected_cluster_max_idx = connected_cluster_lengths.index(max(connected_cluster_lengths))
g = connected_clusters.subgraph(connected_cluster_max_idx)
if g.is_connected():
print 'graph is connected'
else:
print 'graph is not connected'
if return_graph_for_inspection:
return g
print 'write to pajek format'
root_file_name = root_user.screen_name
f = open('io/%s.net' % root_file_name, 'w')
g.write(f, format='pajek')
print 'run infomap'
#infomap_command = 'infomap_dir/infomap 345234 io/%s.net 10'
#infomap_command = 'conf-infomap_dir/conf-infomap 344 io/%s.net 10 10 0.50'
infomap_command = 'infohiermap_dir/infohiermap 345234 io/%s.net 30'
os.system(infomap_command % root_file_name)
print 'read into memory'
f = open('io/%s.smap' % root_file_name)
section_header = ''
communities = defaultdict(lambda: ([], [], []))
for line in f:
if line.startswith('*Modules'):
section_header = 'Modules'
continue
if line.startswith('*Insignificants'):
section_header = 'Insignificants'
continue
if line.startswith('*Nodes'):
section_header = 'Nodes'
continue
if line.startswith('*Links'):
section_header = 'Links'
continue
if section_header == 'Modules':
#looks like this:
#1 "26000689,..." 0.130147 0.0308866
#The names under *Modules are derived from the node with the highest
#flow volume within the module, and 0.25 0.0395432 represent, respectively,
#the aggregated flow volume of all nodes within the module and the per
#step exit flow from the module.
continue
if section_header == 'Nodes':
#looks like this:
#1:10 "2335431" 0.00365772
#or w/ a semicolon instead, semicolon means not significant
#see http://www.tp.umu.se/~rosvall/code.html
if ';' in line:
continue
community_idx = line.split(':')[0]
node_id = line.split('"')[1]
final_volume = float(line.split(' ')[2])
communities[community_idx][1].append(node_id)
communities[community_idx][2].append(final_volume)
if section_header == 'Links':
#community_edges
#looks like this:
#1 4 0.0395432
community_idx = line.split(' ')[0]
target_community_idx = line.split(' ')[1]
edge_weight = line.split(' ')[2]
communities[community_idx][0].append('%s:%s' % (target_community_idx, edge_weight))