本文整理汇总了Python中priorityQueue.PriorityQueue.add_task方法的典型用法代码示例。如果您正苦于以下问题:Python PriorityQueue.add_task方法的具体用法?Python PriorityQueue.add_task怎么用?Python PriorityQueue.add_task使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类priorityQueue.PriorityQueue
的用法示例。
在下文中一共展示了PriorityQueue.add_task方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: degreeDiscountIC
# 需要导入模块: from priorityQueue import PriorityQueue [as 别名]
# 或者: from priorityQueue.PriorityQueue import add_task [as 别名]
def degreeDiscountIC(G, k, p=.01):
''' Finds initial set of nodes to propagate in Independent Cascade model (with priority queue)
Input: G -- networkx graph object
k -- number of nodes needed
p -- propagation probability
Output:
S -- chosen k nodes
'''
S = []
dd = PQ() # degree discount
t = dict() # number of adjacent vertices that are in S
d = dict() # degree of each vertex
# initialize degree discount
for u in G.nodes():
d[u] = sum([G[u][v]['weight'] for v in G[u]]) # each edge adds degree 1
# d[u] = len(G[u]) # each neighbor adds degree 1
dd.add_task(u, -d[u]) # add degree of each node
t[u] = 0
# add vertices to S greedily
for i in range(k):
u, priority = dd.pop_item() # extract node with maximal degree discount
S.append(u)
for v in G[u]:
if v not in S:
t[v] += G[u][v]['weight'] # increase number of selected neighbors
priority = d[v] - 2*t[v] - (d[v] - t[v])*t[v]*p # discount of degree
dd.add_task(v, -priority)
return S
示例2: generalGreedy
# 需要导入模块: from priorityQueue import PriorityQueue [as 别名]
# 或者: from priorityQueue.PriorityQueue import add_task [as 别名]
def generalGreedy(G, k, p=0.01):
""" Finds initial seed set S using general greedy heuristic
Input: G -- networkx Graph object
k -- number of initial nodes needed
p -- propagation probability
Output: S -- initial set of k nodes to propagate
"""
import time
start = time.time()
R = 20 # number of times to run Random Cascade
S = [] # set of selected nodes
# add node to S if achieves maximum propagation for current chosen + this node
for i in range(k):
s = PQ() # priority queue
for v in G.nodes():
if v not in S:
s.add_task(v, 0) # initialize spread value
for j in range(R): # run R times Random Cascade
[priority, count, task] = s.entry_finder[v]
s.add_task(v, priority - float(len(runIC(G, S + [v], p))) / R) # add normalized spread value
task, priority = s.pop_item()
S.append(task)
print i, k, time.time() - start
return S
示例3: GDD
# 需要导入模块: from priorityQueue import PriorityQueue [as 别名]
# 或者: from priorityQueue.PriorityQueue import add_task [as 别名]
def GDD(G, k, Ep):
''' Finds initial set of nodes to propagate in Independent Cascade model (with priority queue)
Input: G -- networkx graph object
k -- number of nodes needed
Ep -- propagation probabilities
Output:
S -- chosen k nodes
'''
S = []
dd = PQ() # degree discount
active = dict()
inactive = dict()
# initialize degree discount
for u in G:
active[u] = 1
# inactive[u] = sum([Ep[(u,v)]*G[u][v]['weight'] for v in G[u]])
inactive[u] = sum([1 - (1 - Ep[(u,v)])**G[u][v]["weight"] for v in G[u]])
priority = active[u]*(1 + inactive[u])
dd.add_task(u, -priority) # add degree of each node
# add vertices to S greedily
for i in range(k):
u, priority = dd.pop_item() # extract node with maximal degree discount
S.append(u)
for v in G[u]:
if v not in S:
active[v] *= (1-Ep[(u,v)])**G[u][v]['weight']
inactive[v] -= 1 - (1 - Ep[(u,v)])**G[u][v]['weight']
priority = active[v]*(1 + inactive[v])
dd.add_task(v, -priority)
return S
示例4: bipartite
# 需要导入模块: from priorityQueue import PriorityQueue [as 别名]
# 或者: from priorityQueue.PriorityQueue import add_task [as 别名]
def bipartite(w, discrepancy):
Ebp_edges = []
Q = PriorityQueue()
incident_edges = dict()
for e in w:
Q.add_task(e, -w[e])
incident_edges.setdefault(e[0], []).append((e[1], w[e]))
incident_edges.setdefault(e[1], []).append((e[0], w[e]))
processed_edges = []
while len(processed_edges) < len(w):
(e, weight) = Q.pop_item()
processed_edges.append(e)
try:
incident_edges[e[0]].remove((e[1], -weight))
incident_edges[e[1]].remove((e[0], -weight))
except ValueError:
pass
Ebp_edges.append(e)
# discard all edges in Q incident to b (i.e. e[1])
for (a, weight) in incident_edges[e[1]]:
try:
Q.remove_task((a,e[1]))
processed_edges.append((a,e[1]))
except KeyError:
pass
try:
incident_edges[a].remove((e[1], weight))
incident_edges[e[1]].remove((a, weight))
except ValueError:
pass
discrepancy[e[0]] += 1
discrepancy[e[1]] += 1
if -1 < discrepancy[e[0]] < .5:
for (x, _) in incident_edges[e[0]]:
try:
Q.remove_task((e[0], x))
except KeyError:
pass
new_weight = abs(discrepancy[e[0]]) + 2*abs(discrepancy[x]) - abs(discrepancy[e[0]]) - 1
if new_weight > 0:
Q.add_task((e[0], x), -new_weight)
else:
processed_edges.append((e[0], x))
elif discrepancy[e[0]] > .5:
for (x, _) in incident_edges[e[0]]:
try:
Q.remove_task((e[0], x))
processed_edges.append((e[0], x))
except KeyError:
pass
return Ebp_edges
示例5: representativeNodes
# 需要导入模块: from priorityQueue import PriorityQueue [as 别名]
# 或者: from priorityQueue.PriorityQueue import add_task [as 别名]
def representativeNodes(G, k, metric=1):
''' Finds the most distinguishable (representative) nodes in graph G greedily.
Takes the most furthest node to the already chosen nodes at each step.
Input: G -- networkx object graph with weighted edges
k -- number of nodes needed
metric -- parameter for differentiating representative qualities
metric == 1 trying to maximize total distance in the chosen set of k nodes
metric == 2 trying to maximize minimal distance between a pair of k nodes
Output:
S -- chosen k nodes
objv -- objective value according to the chosen metric and set of nodes
'''
S = [] # set of chosen nodes
S_dist = PQ() # distances from each node in G to set S according to metric
# initialize S with furthest vertices
try:
u,v,d = max(G.edges(data=True), key=lambda (u, v, d): d['weight'])
except KeyError:
raise KeyError, 'Most likely you have no weight attribute'
S.extend([u,v])
# compute distances from each node in G to S
for v in G.nodes():
if v not in S: # calculate only for nodes in G
if metric == 1:
S_dist.add_task(v, - _sumDist(G, S, v)) # take minus to pop the maximum value from priority queue
elif metric == 2:
S_dist.add_task(v, - _minDist(G, S, v)) # take minus to pop the maximum value from priority queue
# add new nodes to the set greedily
while len(S) < k:
u, priority = S_dist.pop_item() # find maximum value of distance to set S
S.append(u) # append that node to S
# only increase distance for nodes that are connected to u
for v in G[u].keys():
if v not in S: # add only remained nodes
[priority, count, task] = S_dist.entry_finder[v] # finds distance for the previous step
try:
if metric == 1:
S_dist.add_task(v, priority-G[u][v]['weight']) # adds distance to the new member of S
elif metric == 2:
S_dist.add_task(v, max(priority, -G[u][v]['weight'])) # update min distance to the set S
except:
raise u,v, "These are vertices that caused the problem"
# extract objective value of the chosen set
if metric == 1:
objv = 0
for u in S:
objv += _sumDist(G, S, u)
elif metric == 2:
objv = float('Inf')
for u in S:
objv = min(objv, _minDist(G, S, u))
return S, objv
示例6: stopDegreeDiscount
# 需要导入模块: from priorityQueue import PriorityQueue [as 别名]
# 或者: from priorityQueue.PriorityQueue import add_task [as 别名]
def stopDegreeDiscount(G, tsize, ic_step=1, p=.01, iterations=200):
''' Finds initial set of nodes to propagate in Independent Cascade model (with priority queue)
Input: G -- networkx graph object
tsize -- number of nodes necessary to reach
ic_step -- step of change in k between 2 iterations of IC
p -- propagation probability
Output:
S -- seed set
Tspread -- spread values for different sizes of seed set
'''
S = []
dd = PQ() # degree discount
t = dict() # number of adjacent vertices that are in S
d = dict() # degree of each vertex
# initialize degree discount
for u in G.nodes():
d[u] = sum([G[u][v]['weight'] for v in G[u]]) # each edge adds degree 1
# d[u] = len(G[u]) # each neighbor adds degree 1
dd.add_task(u, -d[u]) # add degree of each node
t[u] = 0
# add vertices to S greedily
# until necessary number of nodes can be reached
Tspread = dict() # spread for different k
k = 0
Tspread[k] = 0
stepk = 1
while Tspread[k] < tsize:
u, priority = dd.pop_item() # extract node with maximal degree discount
S.append(u)
for v in G[u]:
if v not in S:
t[v] += G[u][v]['weight'] # increase number of selected neighbors
priority = d[v] - 2*t[v] - (d[v] - t[v])*t[v]*p # discount of degree
dd.add_task(v, -priority)
# calculate IC spread with ic_step
if stepk == ic_step:
k = len(S)
Tspread[k] = avgSize(G, S, p, iterations)
print k, Tspread[k]
stepk = 0
stepk += 1
# search precise boundary
if abs(int(math.ceil(float(ic_step)/2))) == 1:
return S, Tspread
else:
return binarySearchBoundary(G, k, Tspread, tsize, ic_step, p, iterations)
示例7: degreeHeuristic
# 需要导入模块: from priorityQueue import PriorityQueue [as 别名]
# 或者: from priorityQueue.PriorityQueue import add_task [as 别名]
def degreeHeuristic(G, k, p=.01):
''' Finds initial set of nodes to propagate in Independent Cascade model (with priority queue)
Input: G -- networkx graph object
k -- number of nodes needed
p -- propagation probability
Output:
S -- chosen k nodes
'''
S = []
d = PQ()
for u in G:
degree = sum([G[u][v]['weight'] for v in G[u]])
# degree = len(G[u])
d.add_task(u, -degree)
for i in range(k):
u, priority = d.pop_item()
S.append(u)
return S
示例8: getScores
# 需要导入模块: from priorityQueue import PriorityQueue [as 别名]
# 或者: from priorityQueue.PriorityQueue import add_task [as 别名]
def getScores(G, Ep):
'''Finds scores for GDD.
Scores are degree for each node.
'''
scores = PQ() # degree discount
active = dict()
inactive = dict()
# initialize degree discount
for u in G:
active[u] = 1
# inactive[u] = sum([Ep[(u,v)]*G[u][v]['weight'] for v in G[u]])
inactive[u] = sum([1 - (1 - Ep[(u,v)])**G[u][v]["weight"] for v in G[u]])
priority = active[u]*(1 + inactive[u])
scores.add_task(u, -priority) # add degree of each node
return scores, active, inactive
示例9: FIND_LDAG
# 需要导入模块: from priorityQueue import PriorityQueue [as 别名]
# 或者: from priorityQueue.PriorityQueue import add_task [as 别名]
def FIND_LDAG(G, v, t, Ew):
'''
Compute local DAG for vertex v.
Reference: W. Chen "Scalable Influence Maximization in Social Networks under LT model" Algorithm 3
INPUT:
G -- networkx DiGraph object
v -- vertex of G
t -- parameter theta
Ew -- influence weights of G
NOTE: Since graph G can have multiple edges between u and v,
total influence weight between u and v will be
number of edges times influence weight of one edge.
OUTPUT:
D -- networkx DiGraph object that is also LDAG
'''
# intialize Influence of nodes
Inf = PQ()
Inf.add_task(v, -1)
x, priority = Inf.pop_item()
M = -priority
X = [x]
D = nx.DiGraph()
while M >= t:
out_edges = G.out_edges([x], data=True)
for (v1,v2,edata) in out_edges:
if v2 in X:
D.add_edge(v1, v2, edata)
in_edges = G.in_edges([x])
for (u,_) in in_edges:
if u not in X:
try:
[pr, _, _] = Inf.entry_finder[u]
except KeyError:
pr = 0
Inf.add_task(u, pr - G[u][x]['weight']*Ew[(u,x)]*M)
try:
x, priority = Inf.pop_item()
except KeyError:
return D
M = -priority
X.append(x)
return D
示例10: degreeDiscountStar
# 需要导入模块: from priorityQueue import PriorityQueue [as 别名]
# 或者: from priorityQueue.PriorityQueue import add_task [as 别名]
def degreeDiscountStar(G,k,p=.01):
S = []
scores = PQ()
d = dict()
t = dict()
for u in G:
d[u] = sum([G[u][v]['weight'] for v in G[u]])
t[u] = 0
score = -((1-p)**t[u])*(1+(d[u]-t[u])*p)
scores.add_task(u, )
for iteration in range(k):
u, priority = scores.pop_item()
print iteration, -priority
S.append(u)
for v in G[u]:
if v not in S:
t[v] += G[u][v]['weight']
score = -((1-p)**t[u])*(1+(d[u]-t[u])*p)
scores.add_task(v, score)
return S
示例11: singleDiscount
# 需要导入模块: from priorityQueue import PriorityQueue [as 别名]
# 或者: from priorityQueue.PriorityQueue import add_task [as 别名]
def singleDiscount(G, k, p=.1):
''' Finds initial set of nodes to propagate in Independent Cascade model (with priority queue)
Input: G -- networkx graph object
k -- number of nodes needed
p -- propagation probability
Output:
S -- chosen k nodes
'''
S = [] # set of activated nodes
d = PQ() # degrees
for u in G:
degree = sum([G[u][v]['weight'] for v in G[u]])
d.add_task(u, -degree)
for i in range(k):
u, priority = d.pop_item()
S.append(u)
for v in G[u]:
if v not in S:
[priority, count, task] = d.entry_finder[v]
d.add_task(v, priority + G[u][v]['weight']) # discount degree by the weight of the edge
return S
示例12: LDAG_heuristic
# 需要导入模块: from priorityQueue import PriorityQueue [as 别名]
# 或者: from priorityQueue.PriorityQueue import add_task [as 别名]
def LDAG_heuristic(G, Ew, k, t):
''' LDAG algorithm for seed selection.
Reference: [1] Algorithm 5
Input:
G -- directed graph (nx.DiGraph)
Ew -- inlfuence weights of edges (eg. uniform, random) (dict)
k -- size of seed set (int)
t -- parameter theta for finding LDAG (0 <= t <= 1; typical value: 1/320) (int)
Output:
S -- seed set (list)
'''
# define variables
S = []
IncInf = PQ()
for node in G:
IncInf.add_task(node, 0)
# IncInf = dict(zip(G.nodes(), [0]*len(G))) # in case of usage dict instead of PQ
LDAGs = dict()
InfSet = dict()
ap = dict()
A = dict()
print 'Initialization phase'
for v in G:
LDAGs[v] = FIND_LDAG(G, v, t, Ew)
# update influence set for each node in LDAGs[v] with its root
for u in LDAGs[v]:
InfSet.setdefault(u, []).append(v)
alpha = computeAlpha(LDAGs[v], Ew, S, v)
A.update(alpha) # add new linear coefficients to A
# update incremental influence of all nodes in LDAGs[v] with alphas
for u in LDAGs[v]:
ap[(v, u)] = 0 # additionally set initial activation probability (line 7)
priority, _, _ = IncInf.entry_finder[u] # find previous value of IncInf
IncInf.add_task(u, priority - A[(v, u)]) # and add alpha
# IncInf[u] += A[(v, u)] # in case of using dict instead of PQ
print 'Main loop'
for it in range(k):
s, priority = IncInf.pop_item() # chose node with biggest incremental influence
print it+1, s, -priority
for v in InfSet[s]: # for all nodes that s can influence
if v not in S:
D = LDAGs[v]
# update alpha_v_u for all u that can reach s in D (lines 17-22)
alpha_v_s = A[(v,s)]
dA = computeAlpha(D, Ew, S, s, val=-alpha_v_s)
for (s,u) in dA:
if u not in S + [s]: # don't update IncInf if it's already in S
A[(v,u)] += dA[(s,u)]
priority, _, _ = IncInf.entry_finder[u] # find previous value of incremental influence of u
IncInf.add_task(u, priority - dA[(s,u)]*(1 - ap[(v,u)])) # and update it accordingly
# update ap_v_u for all u reachable from s in D (liens 23-28)
dap = computeActProb(D, Ew, S + [s], s, val=1-ap[(v,s)])
for (s,u) in dap:
if u not in S + [s]:
ap[(v,u)] += dap[(s,u)]
priority, _, _ = IncInf.entry_finder[u] # find previous value of incremental influence of u
IncInf.add_task(u, priority + A[(v,u)]*dap[(s,u)]) # and update it accordingly
S.append(s)
return S
示例13: spreadNewGreedyIC
# 需要导入模块: from priorityQueue import PriorityQueue [as 别名]
# 或者: from priorityQueue.PriorityQueue import add_task [as 别名]
def spreadNewGreedyIC(G, targeted_size, step=1, p=.01, S0=[], iterations = 200):
''' Finds initial set of nodes to propagate in Independent Cascade.
Input: G -- networkx graph object
k -- number of nodes needed
p -- propagation probability
Output: S -- set of k nodes chosen
TODO: add step functionality
'''
import time
start = time.time()
assert type(S0) == list, "S0 must be a list. %s provided instead" % type(S0)
S = S0 # set of selected nodes
tsize = 0
R = iterations
for i in range(R):
T = runIC(G, S, p)
tsize += float(len(T))/R
while tsize <= targeted_size:
s = PQ() # number of additional nodes each remained mode will bring to the set S in R iterations
Rv = dict() # number of reachable nodes for node v
# initialize values of s
for v in G.nodes():
if v not in S:
s.add_task(v, 0)
# calculate potential additional spread for each vertex not in S
prg_idx = 1
idx = 1
prcnt = .1 # for progress to print
R = iterations # number of iterations to run RanCas
for j in range(R):
# create new pruned graph E
E = deepcopy(G)
edge_rem = [] # edges to remove
for (u,v) in E.edges():
w = G[u][v]['weight']
if random() < 1 - (1 - p)**w:
edge_rem.append((u,v))
E.remove_edges_from(edge_rem)
# find reachable vertices from S
Rs = bfs(E, S)
# find additional nodes each vertex would bring to the set S
for v in G.nodes():
if v not in S + Rs: # if node has not chosen in S and has chosen by spread from S
[priority, c, task] = s.entry_finder[v]
s.add_task(v, priority - float(len(bfs(E, [v])))/R)
if idx == int(prg_idx*prcnt*R):
print '%s%%...' %(int(prg_idx*prcnt*100))
prg_idx += 1
idx += 1
# add vertex with maximum potential spread
task, priority = s.pop_item()
S.append(task)
print i, len(S), task, -priority, time.time() - start
tsize = 0
for j in range(R):
T = runIC(G, S, p)
tsize += float(len(T))/R
return S