本文整理汇总了Python中networkx.pagerank函数的典型用法代码示例。如果您正苦于以下问题:Python pagerank函数的具体用法?Python pagerank怎么用?Python pagerank使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pagerank函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_schedule_graph
def create_schedule_graph(seasons,teams):
#let's create a directional graph of teams who played each other so we can create a Page rank
#teams =[t[1] for t in teams[1:]]
t_lookup={int(t[0]):t[1] for t in teams[1:]}
print teams
teams =[int(t[0]) for t in teams[1:]]
pr_hist={}
pr_w_hist={}
for year in years:
G=nx.DiGraph()
G.add_nodes_from(teams)
G_w=G.copy()
games=seasons[np.where((seasons['Season']==year))]
for game in games:
#add a directional endorsement from the losing team to winning team
G.add_weighted_edges_from([(game['Lteam'],game['Wteam'],1)])
# weight by win % squared
G_w.add_weighted_edges_from([(game['Lteam'],game['Wteam'],(game['Wscore']/game['Lscore'])**2)])
pr=nx.pagerank(G, alpha=0.9)
pr_w=nx.pagerank(G_w, alpha=0.9)
ranks=[]
ranks_w=[]
for r in pr:
ranks.append((t_lookup[r],pr[r]))
for r in pr_w:
ranks_w.append((t_lookup[r],pr_w[r]))
pr_hist[year]=pr
pr_w_hist[year]=pr_w
sorted_pr = sorted(ranks, key=lambda tup: tup[1],reverse=True)
sorted_pr_w = sorted(ranks_w, key=lambda tup: tup[1],reverse=True)
return pr_hist,pr_w_hist
示例2: calc
def calc():
filepath = "/Users/windwild/Google Drive/CUHK/sina_data/user_rel.csv"
G = nx.DiGraph()
fp = open(filepath,"r")
fp.readline()
array_list = {}
for i in range(0,10):
array_list['fui'] = {}
line = fp.readline()
line_arr = line.split('"')
uid = line_arr[0][:-1]
line = line_arr[1]
print line
line = line.replace("u'","'")
print line
items = demjson.decode(line)
for key in items:
array_list[key] = items[key]
#print items['fui']
print uid,i
for follow in array_list['fui']:
G.add_edge(uid,follow)
fp.close()
print nx.pagerank(G)
示例3: process_data
def process_data():
file = 'data/Homo_Sapiens/EVEX_relations_9606.tab'
t1 = time.time()
g = import_graph(file)
t2 = time.time()
print 'load relations', t2 - t1
t= transitiveness_graph(g)
t3 = time.time()
print 'find transitive relations', t3 - t2
#plot(g, 'relations')
pr = nx.pagerank(t)
t4 = time.time()
print 'pagerank', t4 - t3
for node in pr:
t.node[node]['confirming_weight'] = pr[node]
t5 = time.time()
print 'write pr into graph', t5 - t4
#plot(t, 'good_confirming_relations', 'confirming_weight')
t_rev = reverse(t)
t6 = time.time()
print 'reverse t', t6 - t5
pr_rev = nx.pagerank(t_rev)
t7 = time.time()
print 'pagerank', t7 - t6
for node in pr:
t.node[node]['predicting_weight'] = pr_rev[node]
t8 = time.time()
print 'write pr into graph', t8 - t7
#plot(t, 'good_predicting_relations', 'predicting_weight')
save_data(g, 'data/g')
save_data(t, 'data/t')
save_data(pr, 'data/conf_pr')
save_data(pr_rev, 'data/pre_pr')
示例4: compare_pagerank_algorithms
def compare_pagerank_algorithms(graph_file_name):
algo_name = ["PageRank-DOK", "PageRank-CSR", "PageRank-NetworkX"]
algo_fns = [construct_sparse_graph_dictionary_of_keys, construct_sparse_graph_compressed_sparse_row, construct_sparse_graph_networkx]
for i in range(len(algo_name)):
print "Testing:", algo_name[i]
start_time = time.time()
G = algo_fns[i](graph_file_name)
end_time = time.time()
time_for_graph_construction = end_time - start_time
start_time = time.time()
if algo_name[i] == "PageRank-NetworkX":
nx.pagerank(G)
else:
compute_PageRank(G)
end_time = time.time()
time_for_pagerank_computation = end_time - start_time
total_time = time_for_graph_construction + time_for_pagerank_computation
print "Time for graph, page rank and total", time_for_graph_construction, time_for_pagerank_computation, total_time
示例5: _ppage_rank
def _ppage_rank(self, u, v):
personal = {nid: 0 for nid in self.IG.node}
personal[u] = 1.0
r_uv = nx.pagerank(self.IG, personalization=personal).get(v)
personal[u] = 0
personal[v] = 1.0
r_vu = nx.pagerank(self.IG, personalization=personal).get(u)
return r_uv + r_vu
示例6: stats
def stats(self, g, degree, pagerank, bc):
"""Compute the requested stats and return as a dict."""
options = self.options
stats = {}
if options.partial:
seen = self.seen
empty = self.empty
nonempty_seen = [user for user in seen.keys() if user not in empty]
# create degree CDF
if degree:
if options.partial:
# The way below for computing degree only considers those for which
# we have all the data.
degree = [seen[user] for user in seen.keys()]
else:
# The method below considers all nodes, including those for which
# we may not have all the data. Use w/caution on partial data sets.
degree = nx.degree(g).values()
stats["degree"] = {
"type": "array",
"values": degree
}
# compute PageRank. Note: we have to ignore empties.
if pagerank:
start = time.time()
if options.partial:
pagerank_dict = nx.pagerank(g)
nonempty_seen = [user for user in seen.keys() if user not in empty]
pagerank = ([pagerank_dict[user] for user in nonempty_seen])
else:
# Assumption: no isolated nodes
pagerank = nx.pagerank(g).values()
duration = time.time() - start
print "time to gen pagerank: %0.3f sec" % duration
#print pagerank
stats["pagerank"] = {
"type": "array",
"values": pagerank
}
# compute betweenness centrality - should empties get added back to CDF?
if bc:
start = time.time()
bc_dict = nx.betweenness_centrality(g)
if options.partial:
bc = ([bc_dict[user] for user in nonempty_seen])
else:
bc = bc_dict.values()
duration = time.time() - start
print "time to gen betweenness centrality: %0.3f sec" % duration
stats["bc"] = {
"type": "array",
"values": bc
}
return stats
示例7: train_weight
def train_weight(self,doc):
self.type = 1
self.key_sentences = []
self.key_weight = []
(self.sentences,self.words_all_filters,weight) = self.seg.segment_sentences_weight(text=doc)
#print doc['title']
(title) = self.seg.segment_sentence(sentence=doc['title'])
#print title
source = self.words_all_filters
sim_func = self._get_similarity_standard
sentences_num = len(source)
self.graph = np.zeros((sentences_num, sentences_num))
#import pdb
weights = []
summary = 0
#print self.sentences[0]
#pdb.set_trace()
for x in xrange(sentences_num):
lanlan = sim_func(source[x], title[0])
w=weight[x]*lanlan
weights.append(x)
weights.append(w)
summary+=w
#print w
if summary!=0 :
dicts = {weights[i]: weights[i+1]/summary for i in range(0, len(weights), 2)}
#pdb.set_trace()
for x in xrange(sentences_num):
for y in xrange(x, sentences_num):
similarity = sim_func(source[x], source[y])
self.graph[x, y] = similarity
self.graph[y, x] = similarity
#pdb.set_trace()
# for x in xrange(sentences_num):
# row_sum = np.sum(self.graph[x, :])
# if row_sum > 0:
# self.graph[x, :] = self.graph[x, :] / row_sum
nx_graph = nx.from_numpy_matrix(self.graph)
if summary!=0:
scores = nx.pagerank(G=nx_graph,personalization=dicts)
else:
scores = nx.pagerank(G=nx_graph)
sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)
# print sorted_scores
for index, _ in sorted_scores:
self.key_sentences.append(self.sentences[index])
self.key_weight.append(weight[index])
示例8: test_pagerank
def test_pagerank(self):
G = self.G
p = networkx.pagerank(G, alpha=0.9, tol=1.e-08)
for n in G:
assert_almost_equal(p[n], G.pagerank[n], places=4)
nstart = dict((n, random.random()) for n in G)
p = networkx.pagerank(G, alpha=0.9, tol=1.e-08, nstart=nstart)
for n in G:
assert_almost_equal(p[n], G.pagerank[n], places=4)
示例9: pagerank
def pagerank(self, edge_weights={}, context=None, context_weight=10):
G = self.graphs.unify(edge_weights)
if not context:
return nx.pagerank(G)
else:
weights = {}
for k in G.nodes():
weights[k] = 1
weights[context] = context_weight
return nx.pagerank(G, personalization=weights)
示例10: run
def run(edges, show=False):
G = nx.DiGraph()
# G.add_weighted_edges_from([('A','B',0.5),('A','C',0.5)])
G.add_edges_from(edges)
if show:
nx.draw(G, pos=nx.spring_layout(G))
plt.show()
nx.write_dot(G, './graph.dot')
# dot -n -Tpng graph.dot >graph.png
# print nx.hits(G, max_iter=10**3) #tol=1e-4)
print nx.pagerank(G)
示例11: team_strength
def team_strength(winner_losers):
games_and_weights = defaultdict(int)
for winner, loser, weight in winner_losers:
games_and_weights[winner, loser] += weight
win_graph = nx.DiGraph()
loss_graph = nx.DiGraph()
for (winner, loser), weight in games_and_weights.iteritems():
win_graph.add_edge(loser, winner, weight=weight)
loss_graph.add_edge(winner, loser, weight=weight)
loss_ranks = nx.pagerank(loss_graph)
return {k: v - loss_ranks[k] for k, v in nx.pagerank(win_graph).iteritems()}
示例12: test_pagerank
def test_pagerank(self):
G = self.G
p = networkx.pagerank(G, alpha=0.9, tol=1.0e-08)
for n in G:
assert_almost_equal(p[n], G.pagerank[n], places=4)
nstart = dict((n, random.random()) for n in G)
p = networkx.pagerank(G, alpha=0.9, tol=1.0e-08, nstart=nstart)
for n in G:
assert_almost_equal(p[n], G.pagerank[n], places=4)
assert_raises(networkx.NetworkXError, networkx.pagerank, G, max_iter=0)
示例13: pagerank
def pagerank(graph, records):
""" Reports on the highest (Page)Ranked individuals in the graph """
pr = nx.pagerank(graph)
nodes = sorted(pr.items(), key=operator.itemgetter(1), reverse=True)[:records]
print("Page Rank - top {} individuals".format(records))
for n in nodes:
print(" {:30}:\t{}".format(n[0], n[1]))
示例14: return_summary
def return_summary(text):
sent_list = nltk.tokenize.sent_tokenize(text)
# deletes sentences that are only made of punctuations
sent_list = [sent for sent in sent_list if checkValidSent(sent)]
# makes a list of paragraphs - used to count the number of paragraphs
pg = text.splitlines(0)
pg = [par for par in pg if par != '']
baseline = len(text)
# if tehre are too many sentences, this will pick 150 random sentences
if len(sent_list) > 150:
sent_list = random.sample(sent_list, 150)
baseline = sum([len(sent) for sent in sent_list])
# makes graph to use for pagerank
text_graph = buildGraph(sent_list)
sent_scores = nx.pagerank(text_graph, weight = 'weight')
sent_sorted = sorted(sent_scores, key = sent_scores.get, reverse = True)
summary = ""
scount = 0
# selects a number of the most salient sentences
while sent_sorted:
sent = sent_sorted.pop(0)
scount += 1
if 4 * (len(sent) + len(summary)) >= baseline:
break
if scount > len(pg): break
summary += sent + ' '
return summary
示例15: build
def build(self, matrix, skim_depth=10):
"""
Build graph, with PageRanks on nodes.
:param matrix: A term matrix.
:param skim_depth: The number of sibling edges.
"""
# Register nodes and edges.
for anchor in progress.bar(matrix.terms):
n1 = matrix.text.unstem(anchor)
# Heaviest pair scores:
pairs = matrix.anchored_pairs(anchor).items()
for term, weight in list(pairs)[:skim_depth]:
n2 = matrix.text.unstem(term)
self.graph.add_edge(n1, n2, weight=weight)
# Compute PageRanks.
ranks = nx.pagerank(self.graph)
first = max(ranks.values())
# Convert to 0->1 ratios.
ranks = {k: v/first for k, v in ranks.items()}
# Annotate the nodes.
nx.set_node_attributes(self.graph, 'pagerank', ranks)