本文整理汇总了Python中networkx.algorithms.bipartite.sets函数的典型用法代码示例。如果您正苦于以下问题:Python sets函数的具体用法?Python sets怎么用?Python sets使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sets函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: clusterConstrained
def clusterConstrained(dupes, threshold=0.6):
dupe_graph = networkx.Graph()
dupe_graph.add_weighted_edges_from(((x[0], x[1], y) for (x, y) in dupes), bipartite=1)
dupe_sub_graphs = connected_component_subgraphs(dupe_graph)
clusters = []
for sub_graph in dupe_sub_graphs:
if len(sub_graph) > 2:
row_order, col_order = bipartite.sets(sub_graph)
row_order, col_order = list(row_order), list(col_order)
scored_pairs = numpy.asarray(biadjacency_matrix(sub_graph, row_order, col_order))
scored_pairs[scored_pairs < threshold] = 0
scored_pairs = 1 - scored_pairs
m = _Hungarian()
clustering = m.compute(scored_pairs)
cluster = [set([row_order[l[0]], col_order[l[1]]]) for l in clustering if len(l) > 1]
clusters = clusters + cluster
else:
clusters.append(set(sub_graph.edges()[0]))
return clusters
示例2: create_3comms_bipartite
def create_3comms_bipartite(n,m,p,No_isolates=True):
import community as comm
from networkx.algorithms import bipartite as bip
u=0
while True:
G=nx.bipartite_random_graph(n,m,p)
list_of_isolates=nx.isolates(G)
if No_isolates:
G.remove_nodes_from(nx.isolates(G))
partition=comm.best_partition(G)
sel=max(partition.values())
if sel==2 and nx.is_connected(G):
break
u+=1
print u,sel
ndlss=bip.sets(G)
ndls=[list(i) for i in ndlss]
slayer1=ndls[0]
slayer2=ndls[1]
layer1=[i for i,v in partition.items() if v==0]
layer2=[i for i,v in partition.items() if v==1]
layer3=[i for i,v in partition.items() if v==2]
edgeList=[]
for e in G.edges():
if (e[0] in slayer1 and e[1] in slayer2) or (e[0] in slayer2 and e[1] in slayer1):
edgeList.append(e)
return G,layer1,layer2,layer3,slayer1,slayer2,edgeList,partition
示例3: is_planar
def is_planar(G):
"""
function checks if graph G has K(5) or K(3,3) as minors,
returns True /False on planarity and nodes of "bad_minor"
"""
result = True
bad_minor = []
n = len(G.nodes())
iterazione = 0
if n > 5:
print "N >5"
for subnodes in it.combinations(G.nodes(), 6):
iterazione += 1
print "iterazione %d" % iterazione
subG = G.subgraph(subnodes)
if bipartite.is_bipartite(G): # check if the graph G has a subgraph K(3,3)
X, Y = bipartite.sets(G)
if len(X) == 3:
result = False
bad_minor = subnodes
return result, bad_minor
iterazione = 0
if n > 4 and result:
print "N >4"
for subnodes in it.combinations(G.nodes(), 5):
print "iterazione %d" % iterazione
subG = G.subgraph(subnodes)
if len(subG.edges()) == 10: # check if the graph G has a subgraph K(5)
result = False
bad_minor = subnodes
return result, bad_minor
return result, bad_minor
示例4: get_valid_fragments
def get_valid_fragments(G, stoich_rank):
#reactions, complexes = bipartite.sets(G)
complexes, reactions = bipartite.sets(G)
complexes = list(complexes)
reactions = list(reactions)
if 'w1' not in complexes and 'w1' not in reactions:
raise Exception('my hack to resolve this unexpected behavior shown by bipartite.sets assumes that reaction nodes are named \'w1\', \'w2\', ...')
if 'w1' in complexes:
complexes, reactions = reactions, complexes
if not ('w1' in reactions and 's1' in complexes):
raise Exception('Something went wrong generating the lists of complexes of reactions.')
complex_perms = list(it.combinations(complexes,stoich_rank))
reaction_perms = list(it.combinations_with_replacement(reactions,stoich_rank))
fragments = list(it.product(complex_perms, reaction_perms))
valid_fragments = []
pool = Pool()
chunksize = 100
myval = functools.partial(validate_fragments, G, stoich_rank)
fragment_list = pool.imap(myval, fragments, chunksize)
valid_fragments = [f for f in fragment_list if f is not None]
return get_unique_fragments(valid_fragments)
示例5: test_bipartite_density
def test_bipartite_density(self):
G=nx.path_graph(5)
X,Y=bipartite.sets(G)
density=float(len(G.edges()))/(len(X)*len(Y))
assert_equal(bipartite.density(G,X),density)
D = nx.DiGraph(G.edges())
assert_equal(bipartite.density(D,X),density/2.0)
assert_equal(bipartite.density(nx.Graph(),{}),0.0)
示例6: save_graph
def save_graph(c2map, filepath):
graph = c2map['graph']
X, Y = bipartite.sets(graph)
pos = dict()
pos.update( (n, (1, i)) for i, n in enumerate(X) ) # put nodes from X at x=1
pos.update( (n, (2, i)) for i, n in enumerate(Y) ) # put nodes from Y at x=2
nx.draw(graph, pos=pos,with_labels=True)
plt.savefig(filepath)
示例7: plot_initial_graph
def plot_initial_graph(G):
fig=plt.figure(num=1,figsize=(16,12))
sets=bipartite.sets(G)
pos=nx.spring_layout(G)
nx.draw_networkx_nodes(G,pos=pos,nodelist=list(sets[0]),node_color='grey',alpha=0.3)
nx.draw_networkx_nodes(G,pos=pos,nodelist=list(sets[1]),node_color='gold')
nx.draw_networkx_labels(G,pos=pos)
nx.draw_networkx_edges(G,pos=pos,alpha=0.2)
plt.axis("off")
plt.show()
示例8: _getBipartition
def _getBipartition(G):
topSet, botSet = bipartite.sets(G)
topSet, botSet = list(topSet), list(botSet)
topIndices, botIndices = [], []
V = G.nodes()
for i in range(G.order()):
if V[i] in topSet:
topIndices.append(i)
else:
botIndices.append(i)
#print "Computed bipartition."
return topIndices, botIndices
示例9: main
def main():
G = initialize()
user, business = node_initialize(G)
user = list(set(user) & set(G.nodes()))
business = list(set(business) & set(G.nodes()))
G = make_bipartite(G, user, business)
print nx.is_bipartite(G)
G = list(nx.connected_component_subgraphs(G))[0]
user, business = bipartite.sets(G)
print "nodes separated"
Gu = bipartite.projected_graph(G, user)
print Gu.number_of_nodes()
示例10: plot_graph
def plot_graph(G):
X,Y=bipartite.sets(G)
pos = dict()
pos.update( (n, (1, i)) for i, n in enumerate(X) ) # put nodes from X at x=1
pos.update( (n, (2, i)) for i, n in enumerate(Y) ) # put nodes from Y at x=2
networkx.draw(G, pos=pos, with_labels=False)
labels = {}
for node in G.nodes():
labels[node] = node
networkx.draw_networkx_labels(G, pos, labels)
plt.show()
示例11: splitBipartiteGexf
def splitBipartiteGexf( inputGexf, outputGexfPath ):
outputGexfPath = outputGexfPath + os.sep
jr["input_gexf"] = inputGexf
jr["outputGexfPath"] = outputGexfPath
# otuput files
xgexf = os.path.join( dirname( outputGexfPath ), basename( splitext( inputGexf )[0] ) )+".x.gexf"
ygexf = os.path.join( dirname( outputGexfPath ), basename( splitext( inputGexf )[0] ) )+".y.gexf"
try:
graph = nx.readwrite.gexf.read_gexf( inputGexf );
except:
throwError( "unable to read gexf file" )
return
# bug in networkx, we need to make the directed graph as undirected
graph=graph.to_undirected()
jr["numOfNodes"] = len( graph.nodes() )
jr["numOfEdges"] = len( graph.edges() )
X,Y=bipartite.sets(graph)
print "biparte.sets..."
print X
print Y
#xgr=project_bipartite_graph(graph,X,"weight")
xgr=bipartite.generic_weighted_projected_graph(graph,X)
print "biparte.xgr..."
print len(xgr.nodes())
print len(xgr.edges())
try:
nx.readwrite.gexf.write_gexf(xgr, xgexf )
except:
throwError( "unable to write file, path:'" + xgexf + "'" )
return
#ygr=project_bipartite_graph(graph,Y,"weight")
ygr=bipartite.generic_weighted_projected_graph(graph,Y)
print "biparte.ygr..."
print len(ygr.nodes())
print len(ygr.edges())
try:
nx.readwrite.gexf.write_gexf(ygr, ygexf )
except:
throwError( "unable to write file, path:'" + ygexf + "'" )
#print sys.exc_info()
jr['output_gexf'] = [ xgexf, ygexf ]
print "nodes in X", xgr.nodes()
print "edges in X", list( xgr.edges() )
print "nodes in Y", ygr.nodes()
示例12: get_bipartite_sets
def get_bipartite_sets(G):
complexes, reactions = bipartite.sets(G)
# some unexpected behaviour that networkx shows:
# sometimes 'complexes' and 'reactions' get swapped around by bipartite.sets
# this seems to happen for larger reaction networks
if 'w1' not in complexes and 'w1' not in reactions:
raise Exception('my hack to resolve this unexpected behavior shown by bipartite.sets assumes that reaction nodes are named \'w1\', \'w2\', ...')
if 'w1' in complexes:
return reactions, complexes
else:
return complexes, reactions
示例13: draw_graph
def draw_graph(G):
pos=nx.spring_layout(G) # positions for all nodes
cnodes,unodes = bipartite.sets(G)
nx.draw_networkx_nodes(G,pos,nodelist=cnodes,node_color='r')
nx.draw_networkx_nodes(G,pos,nodelist=unodes,node_color='w')
# nx.draw_networkx_edges(G,pos)
pos2 = copy.deepcopy(pos)
for d in pos:
pos2[d][1] = pos[d][1] - 0.02
nx.draw_networkx_labels(G,pos2)
plt.axis('off')
plt.show()
示例14: get_check_nodes
def get_check_nodes(self,encoded_Graph):
cur_src_node_set, cur_encoded_node_set = bipartite.sets(encoded_Graph)
cur_encoded_node_list = list(cur_encoded_node_set)
num_recovered_packets = self.total_src_packets - len(cur_src_node_set)
#find check nodes in the graph
check_node_list =[]
for j in cur_encoded_node_list:
if encoded_Graph.degree(j) == 1 :
check_node_list.append(j)
#print j
return check_node_list, num_recovered_packets
示例15: getDivFieldEdgeWeight_list
def getDivFieldEdgeWeight_list():
df_biparG = nx.Graph()
df_biparG.add_nodes_from([x['div_id'] for x in _league_div], bipartite=0)
# even through we are using a bipartite graph structure, node names between
# the column nodes need to be distinct, or else edge (1,2) and (2,1) are not distinguished.
# instead use edge (1, f2), (2, f1) - use 'f' prefix for field nodes
df_biparG.add_edges_from([(x['div_id'],'f'+str(y)) for x in _league_div for y in x['divfield_list']])
div_nodes, field_nodes = bipartite.sets(df_biparG)
deg_fnodes = {f:df_biparG.degree(f) for f in field_nodes}
# effective edge sum lists for each division, the sum of the weights of the connected fields;
# the weights of the associated fields, which are represented as field nodes,
# are in turn determined by it's degree. The inverse of the degree for the connected division is
# taken, which becomes the weight of the particular field associated with the division. The weights
# of each field are summed for each division. The weights also represent the 'total fairness share'
# of fields associated with a division.
# Bipartite graph representations, with divisions as one set of nodes, and fields as the other set
# are used. Thus a neighbor of a division is always a field.
edgesum_list = [{'div_id':d, 'edgesum': sum([1.0/deg_fnodes[f] for f in df_biparG.neighbors(d)])}
for d in div_nodes]
sorted_edgesum_list = sorted(edgesum_list, key=itemgetter('div_id'))
logging.debug("div fields bipartite graph %s %s effective edge sum for each node %s",
df_biparG.nodes(), df_biparG.edges(), sorted_edgesum_list)
# depending on the number of teams in each division, the 'fairness share' for each division is adjusted;
# i.e. a division with more teams is expected to contribute a larger amount to field sharing obligations,
# such as the number of expected early/late start times for a particular division. (If one div has 20 teams
# and the other connected div has only 10 teams, the 20-team division should have a larger share of filling
# early and late start time games.
div_indexer = dict((p['div_id'],i) for i,p in enumerate(_league_div))
# ratio is represented as factor that is multiplied against the 'expected' fair share, which is the 1-inverse
# of the number of divisions in the connected group - (dividing by the 1-inverse is equiv to multiple by the
# number of teams - len(connected_list) as shown below)
divratio_list = [{'div_id':x, 'ratio': len(connected_list)*float(_league_div[div_indexer.get(x)]['totalteams'])/
sum(_league_div[div_indexer.get(y)]['totalteams'] for y in connected_list)}
for connected_list in getConnectedDivisions() for x in connected_list]
sorted_divratio_list = sorted(divratio_list, key=itemgetter('div_id'))
# multiply sorted edgesum list elements w. sorted divratio list elements
# because of the sort all dictionary elements in the list should be sorted according to div_id and obviating
# need to create an indexerGet function
# x['div_id'] could have been y['div_id'] in the list comprehension below
prod_list = [{'div_id': x['div_id'], 'prodratio': x['edgesum']*y['ratio']}
for (x,y) in zip(sorted_edgesum_list, sorted_divratio_list)]
logging.debug("getDivFieldEdgeWeight: sorted_edge=%s, sorted_ratio=%s, prod=%s",
sorted_edgesum_list, sorted_divratio_list, prod_list)
# define indexer function object
prod_indexerGet = lambda x: dict((p['div_id'],i) for i,p in enumerate(prod_list)).get(x)
List_Indexer = namedtuple('List_Indexer', 'dict_list indexerGet')
return List_Indexer(prod_list, prod_indexerGet)