本文整理汇总了Python中r2.models.link.Link类的典型用法代码示例。如果您正苦于以下问题:Python Link类的具体用法?Python Link怎么用?Python Link使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Link类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: add_comments
def add_comments(comments):
"""Add comments to the CommentTree and update scores."""
from r2.models.builder import write_comment_orders
link_ids = [comment.link_id for comment in tup(comments)]
links_by_id = Link._byID(link_ids)
comments = tup(comments)
comments_by_link_id = defaultdict(list)
for comment in comments:
comments_by_link_id[comment.link_id].append(comment)
for link_id, link_comments in comments_by_link_id.iteritems():
link = links_by_id[link_id]
timer = g.stats.get_timer('comment_tree.add.1')
timer.start()
write_comment_scores(link, link_comments)
timer.intermediate('scores')
CommentTree.add_comments(link, link_comments)
timer.intermediate('update')
write_comment_orders(link)
timer.intermediate('write_order')
timer.stop()
示例2: add_comments
def add_comments(comments):
"""Add comments to the CommentTree and update scores."""
from r2.models.builder import write_comment_orders
link_ids = [comment.link_id for comment in tup(comments)]
links = Link._byID(link_ids, data=True)
comments = tup(comments)
comments_by_link_id = defaultdict(list)
for comment in comments:
comments_by_link_id[comment.link_id].append(comment)
for link_id, link_comments in comments_by_link_id.iteritems():
link = links[link_id]
timer = g.stats.get_timer(
'comment_tree.add.%s' % link.comment_tree_version)
timer.start()
# write scores before CommentTree because the scores must exist for all
# comments in the tree
for sort in ("_controversy", "_confidence", "_score"):
scores_by_comment = {
comment._id36: getattr(comment, sort)
for comment in link_comments
}
CommentScoresByLink.set_scores(link, sort, scores_by_comment)
scores_by_comment = _get_qa_comment_scores(link, link_comments)
CommentScoresByLink.set_scores(link, "_qa", scores_by_comment)
timer.intermediate('scores')
with CommentTree.mutation_context(link, timeout=180):
try:
timer.intermediate('lock')
comment_tree = CommentTree.by_link(link, timer)
timer.intermediate('get')
comment_tree.add_comments(link_comments)
timer.intermediate('update')
except InconsistentCommentTreeError:
# failed to add a comment to the CommentTree because its parent
# is missing from the tree. this comment will be lost forever
# unless a rebuild is performed.
comment_ids = [comment._id for comment in link_comments]
g.log.error(
"comment_tree_inconsistent: %s %s" % (link, comment_ids))
g.stats.simple_event('comment_tree_inconsistent')
return
# do this under the same lock because we want to ensure we are using
# the same version of the CommentTree as was just written
write_comment_orders(link)
timer.intermediate('write_order')
timer.stop()
示例3: update_score
def update_score(obj, up_change, down_change, new_valid_thing, old_valid_thing):
obj._incr('_ups', up_change)
obj._incr('_downs', down_change)
if isinstance(obj, Comment):
if hasattr(obj, 'parent_id'):
Comment._byID(obj.parent_id).incr_descendant_karma([], up_change - down_change)
Link._byID(obj.link_id)._incr('_descendant_karma', up_change - down_change)
示例4: add_comments
def add_comments(comments):
links = Link._byID([com.link_id for com in tup(comments)], data=True)
comments = tup(comments)
link_map = {}
for com in comments:
link_map.setdefault(com.link_id, []).append(com)
for link_id, coms in link_map.iteritems():
link = links[link_id]
add_comments = [comment for comment in coms if not comment._deleted]
delete_comments = (comment for comment in coms if comment._deleted)
timer = g.stats.get_timer('comment_tree.add.%s'
% link.comment_tree_version)
timer.start()
try:
with CommentTree.mutation_context(link, timeout=30):
timer.intermediate('lock')
cache = get_comment_tree(link, timer=timer)
timer.intermediate('get')
if add_comments:
cache.add_comments(add_comments)
for comment in delete_comments:
cache.delete_comment(comment, link)
timer.intermediate('update')
except InconsistentCommentTreeError:
comment_ids = [comment._id for comment in coms]
g.log.exception(
'add_comments_nolock failed for link %s %s, recomputing',
link_id, comment_ids)
rebuild_comment_tree(link, timer=timer)
timer.stop()
update_comment_votes(coms)
示例5: calc_rising
def calc_rising():
sr_count = count.get_link_counts()
link_count = dict((k, v[0]) for k,v in sr_count.iteritems())
link_names = Link._by_fullname(sr_count.keys(), data=True)
#max is half the average of the top 10 counts
counts = link_count.values()
counts.sort(reverse=True)
maxcount = sum(counts[:10]) / 20
#prune the list
rising = [(n, link_names[n].sr_id)
for n in link_names.keys() if link_count[n] < maxcount]
cur_time = datetime.now(g.tz)
def score(pair):
name = pair[0]
link = link_names[name]
hours = (cur_time - link._date).seconds / 3600 + 1
return float(link._ups) / (max(link_count[name], 1) * hours)
def r(x):
return 1 if x > 0 else -1 if x < 0 else 0
rising.sort(lambda x, y: r(score(y) - score(x)))
return rising
示例6: calc_rising
def calc_rising():
#As far as I can tell this can only ever return a series of 0's as that is what is hard coded in... In which case nothing should ever be rising unless I explicitly make it so.
sr_count = count.get_link_counts()
link_count = dict((k, v[0]) for k,v in sr_count.iteritems())
link_names = Link._by_fullname(sr_count.keys(), data=True)
#max is half the average of the top 10 counts
counts = link_count.values()
counts.sort(reverse=True)
maxcount = sum(counts[:10]) / 2.*min(10,len(counts))
#prune the list
print link_count
print link_names
print maxcount
rising = [(n, link_names[n].sr_id)
for n in link_names.keys() if False or link_count[n] < maxcount]
print rising
cur_time = datetime.now(g.tz)
def score(pair):
name = pair[0]
link = link_names[name]
hours = (cur_time - link._date).seconds / 3600 + 1
return float(link._ups) / (max(link_count[name], 1) * hours)
def r(x):
return 1 if x > 0 else -1 if x < 0 else 0
rising.sort(lambda x, y: r(score(y) - score(x)))
return rising
示例7: add_comments
def add_comments(comments):
links = Link._byID([com.link_id for com in tup(comments)], data=True)
comments = tup(comments)
link_map = {}
for com in comments:
link_map.setdefault(com.link_id, []).append(com)
for link_id, coms in link_map.iteritems():
link = links[link_id]
add_comments = [comment for comment in coms if not comment._deleted]
delete_comments = (comment for comment in coms if comment._deleted)
timer = g.stats.get_timer("comment_tree.add.%s" % link.comment_tree_version)
timer.start()
try:
with CommentTree.mutation_context(link):
timer.intermediate("lock")
cache = get_comment_tree(link, timer=timer)
timer.intermediate("get")
if add_comments:
cache.add_comments(add_comments)
for comment in delete_comments:
cache.delete_comment(comment, link)
timer.intermediate("update")
except:
g.log.exception("add_comments_nolock failed for link %s, recomputing tree", link_id)
# calculate it from scratch
get_comment_tree(link, _update=True, timer=timer)
timer.stop()
update_comment_votes(coms)
示例8: update_comment_votes
def update_comment_votes(comments, write_consistency_level = None):
from r2.models import CommentSortsCache
comments = tup(comments)
link_map = {}
for com in comments:
link_map.setdefault(com.link_id, []).append(com)
all_links = Link._byID(link_map.keys(), data=True)
comment_trees = {}
for link in all_links.values():
comment_trees[link._id] = get_comment_tree(link)
for link_id, coms in link_map.iteritems():
link = all_links[link_id]
for sort in ("_controversy", "_hot", "_confidence", "_score", "_date",
"_qa"):
cid_tree = comment_trees[link_id].tree
sorter = _comment_sorter_from_cids(coms, sort, link, cid_tree,
by_36=True)
# Cassandra always uses the id36 instead of the integer
# ID, so we'll map that first before sending it
c_key = sort_comments_key(link_id, sort)
CommentSortsCache._set_values(c_key, sorter,
write_consistency_level = write_consistency_level)
示例9: link_comments
def link_comments(link_id, _update=False):
key = comments_key(link_id)
r = g.permacache.get(key)
if r and not _update:
return r
else:
# This operation can take longer than most (note the inner
# locks) better to time out request temporarily than to deal
# with an inconsistent tree
with g.make_lock(lock_key(link_id), timeout=180):
r = _load_link_comments(link_id)
# rebuild parent dict
cids, cid_tree, depth, num_children, num_comments = r
r = r[:-1] # Remove num_comments from r; we don't need to cache it.
g.permacache.set(parent_comments_key(link_id),
_parent_dict_from_tree(cid_tree))
g.permacache.set(key, r)
# update the link's comment count and schedule it for search
# reindexing
link = Link._byID(link_id, data = True)
link.num_comments = num_comments
link._commit()
from r2.lib.db.queries import changed
changed(link)
return r
示例10: add_comments
def add_comments(comments):
"""Add comments to the CommentTree and update scores."""
from r2.models.builder import write_comment_orders
link_ids = [comment.link_id for comment in tup(comments)]
links = Link._byID(link_ids, data=True)
comments = tup(comments)
comments_by_link_id = defaultdict(list)
for comment in comments:
comments_by_link_id[comment.link_id].append(comment)
for link_id, link_comments in comments_by_link_id.iteritems():
link = links[link_id]
timer = g.stats.get_timer("comment_tree.add.1")
timer.start()
# write scores before CommentTree because the scores must exist for all
# comments in the tree
for sort in ("_controversy", "_confidence", "_score"):
scores_by_comment = {comment._id36: getattr(comment, sort) for comment in link_comments}
CommentScoresByLink.set_scores(link, sort, scores_by_comment)
scores_by_comment = _get_qa_comment_scores(link, link_comments)
CommentScoresByLink.set_scores(link, "_qa", scores_by_comment)
timer.intermediate("scores")
CommentTree.add_comments(link, link_comments)
timer.intermediate("update")
write_comment_orders(link)
timer.intermediate("write_order")
timer.stop()
示例11: add_comments
def add_comments(comments):
links = Link._byID([com.link_id for com in tup(comments)], data=True)
comments = tup(comments)
link_map = {}
for com in comments:
link_map.setdefault(com.link_id, []).append(com)
for link_id, coms in link_map.iteritems():
link = links[link_id]
timer = g.stats.get_timer('comment_tree.add.%s'
% link.comment_tree_version)
timer.start()
try:
with CommentTree.mutation_context(link):
timer.intermediate('lock')
cache = get_comment_tree(link, timer=timer)
timer.intermediate('get')
cache.add_comments(coms)
timer.intermediate('update')
except:
g.log.exception(
'add_comments_nolock failed for link %s, recomputing tree',
link_id)
# calculate it from scratch
get_comment_tree(link, _update=True, timer=timer)
timer.stop()
update_comment_votes(coms)
示例12: GET_document
def GET_document(self):
try:
#no cookies on errors
c.cookies.clear()
code = request.GET.get('code', '')
srname = request.GET.get('srname', '')
takedown = request.GET.get('takedown', "")
if srname:
c.site = Subreddit._by_name(srname)
if c.render_style not in self.allowed_render_styles:
return str(code)
elif takedown and code == '404':
link = Link._by_fullname(takedown)
return pages.TakedownPage(link).render()
elif code == '403':
return self.send403()
elif code == '500':
return redditbroke % rand_strings.sadmessages
elif code == '503':
c.response.status_code = 503
c.response.headers['Retry-After'] = 1
c.response.content = toofast
return c.response
elif code == '304':
if request.GET.has_key('x-sup-id'):
c.response.headers['x-sup-id'] = request.GET.get('x-sup-id')
return c.response
elif c.site:
return self.send404()
else:
return "page not found"
except:
return handle_awful_failure("something really bad just happened.")
示例13: GET_document
def GET_document(self):
try:
c.errors = c.errors or ErrorSet()
# clear cookies the old fashioned way
c.cookies = Cookies()
code = request.GET.get('code', '')
try:
code = int(code)
except ValueError:
code = 404
srname = request.GET.get('srname', '')
takedown = request.GET.get('takedown', "")
# StatusBasedRedirect will override this anyway, but we need this
# here for pagecache to see.
response.status_int = code
if srname:
c.site = Subreddit._by_name(srname)
if request.GET.has_key('allow_framing'):
c.allow_framing = bool(request.GET['allow_framing'] == '1')
if code in (204, 304):
# NEVER return a content body on 204/304 or downstream
# caches may become very confused.
if request.GET.has_key('x-sup-id'):
x_sup_id = request.GET.get('x-sup-id')
if '\r\n' not in x_sup_id:
response.headers['x-sup-id'] = x_sup_id
return ""
elif c.render_style not in self.allowed_render_styles:
return str(code)
elif c.render_style in extensions.API_TYPES:
data = request.environ.get('extra_error_data', {'error': code})
if request.environ.get("WANT_RAW_JSON"):
return scriptsafe_dumps(data)
return websafe_json(json.dumps(data))
elif takedown and code == 404:
link = Link._by_fullname(takedown)
return pages.TakedownPage(link).render()
elif code == 403:
return self.send403()
elif code == 429:
return self.send429()
elif code == 500:
randmin = {'admin': random.choice(self.admins)}
failien_url = make_failien_url()
sad_message = safemarkdown(rand_strings.sadmessages % randmin)
return redditbroke % (failien_url, sad_message)
elif code == 503:
return self.send503()
elif c.site:
return self.send404()
else:
return "page not found"
except Exception as e:
return handle_awful_failure("ErrorController.GET_document: %r" % e)
示例14: GET_document
def GET_document(self):
try:
c.errors = c.errors or ErrorSet()
# clear cookies the old fashioned way
c.cookies = Cookies()
code = request.GET.get("code", "")
try:
code = int(code)
except ValueError:
code = 404
srname = request.GET.get("srname", "")
takedown = request.GET.get("takedown", "")
# StatusBasedRedirect will override this anyway, but we need this
# here for pagecache to see.
response.status_int = code
if srname:
c.site = Subreddit._by_name(srname)
if code in (204, 304):
# NEVER return a content body on 204/304 or downstream
# caches may become very confused.
if request.GET.has_key("x-sup-id"):
x_sup_id = request.GET.get("x-sup-id")
if "\r\n" not in x_sup_id:
response.headers["x-sup-id"] = x_sup_id
return ""
elif c.render_style not in self.allowed_render_styles:
return str(code)
elif c.render_style in extensions.API_TYPES:
data = request.environ.get("extra_error_data", {"error": code})
return websafe_json(json.dumps(data))
elif takedown and code == 404:
link = Link._by_fullname(takedown)
return pages.TakedownPage(link).render()
elif code == 403:
return self.send403()
elif code == 429:
return self.send429()
elif code == 500:
randmin = {"admin": random.choice(self.admins)}
failien_url = make_failien_url()
return redditbroke % (failien_url, rand_strings.sadmessages % randmin)
elif code == 503:
return self.send503()
elif c.site:
return self.send404()
else:
return "page not found"
except:
return handle_awful_failure("something really bad just happened.")
示例15: process_link
def process_link(msg):
fname = msg.body
link = Link._by_fullname(msg.body, data=True)
try:
TimeoutFunction(_set_media, 30)(link)
except TimeoutFunctionException:
print "Timed out on %s" % fname
except KeyboardInterrupt:
raise
except:
print "Error fetching %s" % fname
print traceback.format_exc()