本文整理汇总了Python中r2.lib.utils.tup函数的典型用法代码示例。如果您正苦于以下问题:Python tup函数的具体用法?Python tup怎么用?Python tup使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了tup函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: add_comments
def add_comments(comments):
"""Add comments to the CommentTree and update scores."""
from r2.models.builder import write_comment_orders
link_ids = [comment.link_id for comment in tup(comments)]
links_by_id = Link._byID(link_ids)
comments = tup(comments)
comments_by_link_id = defaultdict(list)
for comment in comments:
comments_by_link_id[comment.link_id].append(comment)
for link_id, link_comments in comments_by_link_id.iteritems():
link = links_by_id[link_id]
timer = g.stats.get_timer('comment_tree.add.1')
timer.start()
write_comment_scores(link, link_comments)
timer.intermediate('scores')
CommentTree.add_comments(link, link_comments)
timer.intermediate('update')
write_comment_orders(link)
timer.intermediate('write_order')
timer.stop()
示例2: add_comments
def add_comments(comments):
"""Add comments to the CommentTree and update scores."""
from r2.models.builder import write_comment_orders
link_ids = [comment.link_id for comment in tup(comments)]
links = Link._byID(link_ids, data=True)
comments = tup(comments)
comments_by_link_id = defaultdict(list)
for comment in comments:
comments_by_link_id[comment.link_id].append(comment)
for link_id, link_comments in comments_by_link_id.iteritems():
link = links[link_id]
timer = g.stats.get_timer(
'comment_tree.add.%s' % link.comment_tree_version)
timer.start()
# write scores before CommentTree because the scores must exist for all
# comments in the tree
for sort in ("_controversy", "_confidence", "_score"):
scores_by_comment = {
comment._id36: getattr(comment, sort)
for comment in link_comments
}
CommentScoresByLink.set_scores(link, sort, scores_by_comment)
scores_by_comment = _get_qa_comment_scores(link, link_comments)
CommentScoresByLink.set_scores(link, "_qa", scores_by_comment)
timer.intermediate('scores')
with CommentTree.mutation_context(link, timeout=180):
try:
timer.intermediate('lock')
comment_tree = CommentTree.by_link(link, timer)
timer.intermediate('get')
comment_tree.add_comments(link_comments)
timer.intermediate('update')
except InconsistentCommentTreeError:
# failed to add a comment to the CommentTree because its parent
# is missing from the tree. this comment will be lost forever
# unless a rebuild is performed.
comment_ids = [comment._id for comment in link_comments]
g.log.error(
"comment_tree_inconsistent: %s %s" % (link, comment_ids))
g.stats.simple_event('comment_tree_inconsistent')
return
# do this under the same lock because we want to ensure we are using
# the same version of the CommentTree as was just written
write_comment_orders(link)
timer.intermediate('write_order')
timer.stop()
示例3: add_comments
def add_comments(comments):
links = Link._byID([com.link_id for com in tup(comments)], data=True)
comments = tup(comments)
link_map = {}
for com in comments:
link_map.setdefault(com.link_id, []).append(com)
for link_id, coms in link_map.iteritems():
link = links[link_id]
timer = g.stats.get_timer('comment_tree.add.%s'
% link.comment_tree_version)
timer.start()
try:
with CommentTree.mutation_context(link):
timer.intermediate('lock')
cache = get_comment_tree(link, timer=timer)
timer.intermediate('get')
cache.add_comments(coms)
timer.intermediate('update')
except:
g.log.exception(
'add_comments_nolock failed for link %s, recomputing tree',
link_id)
# calculate it from scratch
get_comment_tree(link, _update=True, timer=timer)
timer.stop()
update_comment_votes(coms)
示例4: get_recommendations
def get_recommendations(srs, count=10, source=SRC_MULTIREDDITS, to_omit=None, match_set=True, over18=False):
"""Return subreddits recommended if you like the given subreddits.
Args:
- srs is one Subreddit object or a list of Subreddits
- count is total number of results to return
- source is a prefix telling which set of recommendations to use
- to_omit is a single or list of subreddit id36s that should not be
be included. (Useful for omitting recs that were already rejected.)
- match_set=True will return recs that are similar to each other, useful
for matching the "theme" of the original set
- over18 content is filtered unless over18=True or one of the original srs
is over18
"""
srs = tup(srs)
to_omit = tup(to_omit) if to_omit else []
# fetch more recs than requested because some might get filtered out
rec_id36s = SRRecommendation.for_srs([sr._id36 for sr in srs], to_omit, count * 2, source, match_set=match_set)
# always check for private subreddits at runtime since type might change
rec_srs = Subreddit._byID36(rec_id36s, return_dict=False)
filtered = [sr for sr in rec_srs if is_visible(sr)]
# don't recommend adult srs unless one of the originals was over_18
if not over18 and not any(sr.over_18 for sr in srs):
filtered = [sr for sr in filtered if not sr.over_18]
return filtered[:count]
示例5: add_comments
def add_comments(comments):
"""Add comments to the CommentTree and update scores."""
from r2.models.builder import write_comment_orders
link_ids = [comment.link_id for comment in tup(comments)]
links = Link._byID(link_ids, data=True)
comments = tup(comments)
comments_by_link_id = defaultdict(list)
for comment in comments:
comments_by_link_id[comment.link_id].append(comment)
for link_id, link_comments in comments_by_link_id.iteritems():
link = links[link_id]
timer = g.stats.get_timer("comment_tree.add.1")
timer.start()
# write scores before CommentTree because the scores must exist for all
# comments in the tree
for sort in ("_controversy", "_confidence", "_score"):
scores_by_comment = {comment._id36: getattr(comment, sort) for comment in link_comments}
CommentScoresByLink.set_scores(link, sort, scores_by_comment)
scores_by_comment = _get_qa_comment_scores(link, link_comments)
CommentScoresByLink.set_scores(link, "_qa", scores_by_comment)
timer.intermediate("scores")
CommentTree.add_comments(link, link_comments)
timer.intermediate("update")
write_comment_orders(link)
timer.intermediate("write_order")
timer.stop()
示例6: _fast_query
def _fast_query(cls, sub, obj, name, data=True, eager_load=True, thing_data=False, timestamp_optimize=False):
# divide into types
def type_dict(items):
types = {}
for i in items:
types.setdefault(i.__class__, []).append(i)
return types
sub_dict = type_dict(tup(sub))
obj_dict = type_dict(tup(obj))
# for each pair of types, see if we have a query to send
res = {}
for types, rel in cls.rels.iteritems():
t1, t2 = types
if sub_dict.has_key(t1) and obj_dict.has_key(t2):
res.update(
rel._fast_query(
sub_dict[t1],
obj_dict[t2],
name,
data=data,
eager_load=eager_load,
thing_data=thing_data,
timestamp_optimize=timestamp_optimize,
)
)
return res
示例7: get_actions
def get_actions(cls, srs, mod=None, action=None, after=None, reverse=False, count=1000):
"""
Get a ColumnQuery that yields ModAction objects according to
specified criteria.
"""
if after and isinstance(after, basestring):
after = cls._byID(UUID(after))
elif after and isinstance(after, UUID):
after = cls._byID(after)
if not isinstance(after, cls):
after = None
srs = tup(srs)
if not mod and not action:
rowkeys = [sr._id36 for sr in srs]
q = ModActionBySR.query(rowkeys, after=after, reverse=reverse, count=count)
elif mod and not action:
mods = tup(mod)
rowkeys = itertools.product([sr._id36 for sr in srs],
[mod._id36 for mod in mods])
rowkeys = ['%s_%s' % (sr, mod) for sr, mod in rowkeys]
q = ModActionBySRMod.query(rowkeys, after=after, reverse=reverse, count=count)
elif not mod and action:
rowkeys = ['%s_%s' % (sr._id36, action) for sr in srs]
q = ModActionBySRAction.query(rowkeys, after=after, reverse=reverse, count=count)
else:
raise NotImplementedError("Can't query by both mod and action")
return q
示例8: add_comments
def add_comments(comments):
links = Link._byID([com.link_id for com in tup(comments)], data=True)
comments = tup(comments)
link_map = {}
for com in comments:
link_map.setdefault(com.link_id, []).append(com)
for link_id, coms in link_map.iteritems():
link = links[link_id]
add_comments = [comment for comment in coms if not comment._deleted]
delete_comments = (comment for comment in coms if comment._deleted)
timer = g.stats.get_timer("comment_tree.add.%s" % link.comment_tree_version)
timer.start()
try:
with CommentTree.mutation_context(link):
timer.intermediate("lock")
cache = get_comment_tree(link, timer=timer)
timer.intermediate("get")
if add_comments:
cache.add_comments(add_comments)
for comment in delete_comments:
cache.delete_comment(comment, link)
timer.intermediate("update")
except:
g.log.exception("add_comments_nolock failed for link %s, recomputing tree", link_id)
# calculate it from scratch
get_comment_tree(link, _update=True, timer=timer)
timer.stop()
update_comment_votes(coms)
示例9: _fast_query
def _fast_query(cls, thing1_ids, thing2_ids, properties=None, **kw):
"""Find all of the relations of this class between all of the
members of thing1_ids and thing2_ids"""
thing1_ids, thing1s_is_single = tup(thing1_ids, True)
thing2_ids, thing2s_is_single = tup(thing2_ids, True)
if not thing1_ids or not thing2_ids:
# nothing to permute
return {}
if properties is not None:
properties = set(properties)
# all relations must load these properties, even if
# unrequested
properties.add("thing1_id")
properties.add("thing2_id")
# permute all of the pairs
ids = set(cls._rowkey(x, y) for x in thing1_ids for y in thing2_ids)
rels = cls._byID(ids, properties=properties).values()
if thing1s_is_single and thing2s_is_single:
if rels:
assert len(rels) == 1
return rels[0]
else:
raise NotFound("<%s %r>" % (cls.__name__, cls._rowkey(thing1_ids[0], thing2_ids[0])))
return dict(((rel.thing1_id, rel.thing2_id), rel) for rel in rels)
示例10: add_comments
def add_comments(comments):
links = Link._byID([com.link_id for com in tup(comments)], data=True)
comments = tup(comments)
link_map = {}
for com in comments:
link_map.setdefault(com.link_id, []).append(com)
for link_id, coms in link_map.iteritems():
link = links[link_id]
add_comments = [comment for comment in coms if not comment._deleted]
delete_comments = (comment for comment in coms if comment._deleted)
timer = g.stats.get_timer('comment_tree.add.%s'
% link.comment_tree_version)
timer.start()
try:
with CommentTree.mutation_context(link, timeout=30):
timer.intermediate('lock')
cache = get_comment_tree(link, timer=timer)
timer.intermediate('get')
if add_comments:
cache.add_comments(add_comments)
for comment in delete_comments:
cache.delete_comment(comment, link)
timer.intermediate('update')
except InconsistentCommentTreeError:
comment_ids = [comment._id for comment in coms]
g.log.exception(
'add_comments_nolock failed for link %s %s, recomputing',
link_id, comment_ids)
rebuild_comment_tree(link, timer=timer)
timer.stop()
update_comment_votes(coms)
示例11: add_queries
def add_queries(queries, insert_items=None, delete_items=None, foreground=False):
"""Adds multiple queries to the query queue. If insert_items or
delete_items is specified, the query may not need to be
recomputed against the database."""
if not g.write_query_queue:
return
for q in queries:
if insert_items and q.can_insert():
log.debug("Inserting %s into query %s" % (insert_items, q))
if foreground:
q.insert(insert_items)
else:
worker.do(q.insert, insert_items)
elif delete_items and q.can_delete():
log.debug("Deleting %s from query %s" % (delete_items, q))
if foreground:
q.delete(delete_items)
else:
worker.do(q.delete, delete_items)
else:
raise Exception("Cannot update query %r!" % (q,))
# dual-write any queries that are being migrated to the new query cache
with CachedQueryMutator() as m:
new_queries = [getattr(q, 'new_query') for q in queries if hasattr(q, 'new_query')]
if insert_items:
for query in new_queries:
m.insert(query, tup(insert_items))
if delete_items:
for query in new_queries:
m.delete(query, tup(delete_items))
示例12: get_actions
def get_actions(cls, srs, mod=None, action=None, after=None, reverse=False, count=1000):
"""
Get a ColumnQuery that yields ModAction objects according to
specified criteria.
"""
if after and isinstance(after, basestring):
after = cls._byID(UUID(after))
elif after and isinstance(after, UUID):
after = cls._byID(after)
if not isinstance(after, cls):
after = None
srs = tup(srs)
if not mod and not action:
rowkeys = [sr._id36 for sr in srs]
q = ModActionBySR.query(rowkeys, after=after, reverse=reverse, count=count)
elif mod:
mods = tup(mod)
key = '%s_%s' if not action else '%%s_%%s_%s' % action
rowkeys = itertools.product([sr._id36 for sr in srs],
[mod._id36 for mod in mods])
rowkeys = [key % (sr, mod) for sr, mod in rowkeys]
view = ModActionBySRActionMod if action else ModActionBySRMod
q = view.query(rowkeys, after=after, reverse=reverse, count=count)
else:
rowkeys = ['%s_%s' % (sr._id36, action) for sr in srs]
q = ModActionBySRAction.query(rowkeys, after=after, reverse=reverse, count=count)
return q
示例13: get_recommendations
def get_recommendations(srs, count=10, source=SRC_MULTIREDDITS, to_omit=None):
"""Return subreddits recommended if you like the given subreddits.
Args:
- srs is one Subreddit object or a list of Subreddits
- count is total number of results to return
- source is a prefix telling which set of recommendations to use
- to_omit is one Subreddit object or a list of Subreddits that should not
be included. (Useful for omitting recs that were already rejected.)
"""
srs = tup(srs)
to_omit = tup(to_omit) if to_omit else []
# fetch more recs than requested because some might get filtered out
rec_id36s = SRRecommendation.for_srs([sr._id36 for sr in srs], [o._id36 for o in to_omit], count * 2, source)
# always check for private subreddits at runtime since type might change
rec_srs = Subreddit._byID36(rec_id36s, return_dict=False)
filtered = [sr for sr in rec_srs if sr.type != "private"]
# don't recommend adult srs unless one of the originals was over_18
if not any(sr.over_18 for sr in srs):
filtered = [sr for sr in filtered if not sr.over_18]
return filtered[:count]
示例14: _fast_query
def _fast_query(cls, thing1_ids, thing2_ids, **kw):
"""Find all of the relations of this class between all of the
members of thing1_ids and thing2_ids"""
thing1_ids, thing1s_is_single = tup(thing1_ids, True)
thing2_ids, thing2s_is_single = tup(thing2_ids, True)
# permute all of the pairs
ids = set(('%s_%s' % (x, y))
for x in thing1_ids
for y in thing2_ids)
rels = cls._byID(ids).values()
# does anybody actually use us this way?
if thing1s_is_single and thing2s_is_single:
if rels:
assert len(rels) == 1
return rels[0]
else:
raise NotFound("<%s '%s_%s'>" % (cls.__name__,
thing1_ids[0],
thing2_ids[0]))
return dict(((rel.thing1_id, rel.thing2_id), rel)
for rel in rels)
示例15: _somethinged
def _somethinged(cls, rel, user, link, name):
return rel._fast_query(
tup(user),
tup(link),
name=name,
thing_data=True,
timestamp_optimize=True)