本文整理汇总了Python中r2.models.Comment._byID方法的典型用法代码示例。如果您正苦于以下问题:Python Comment._byID方法的具体用法?Python Comment._byID怎么用?Python Comment._byID使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类r2.models.Comment
的用法示例。
在下文中一共展示了Comment._byID方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: activate_names_requested_in
# 需要导入模块: from r2.models import Comment [as 别名]
# 或者: from r2.models.Comment import _byID [as 别名]
def activate_names_requested_in(link):
tree = get_comment_tree(link)
acceptable_names = []
if tree.tree:
top_level_cids = tree.tree[None]
comments = chain.from_iterable(Comment._byID(chunk, return_dict=False,
data=True)
for chunk in in_chunks(top_level_cids))
for comment in sorted(comments, key=lambda c: c._ups, reverse=True):
if comment._spam or comment._deleted:
continue
sanitized = comment.body.strip()
match = valid_name_re.search(sanitized)
if match:
acceptable_names.append((comment, match.group(1)))
# we activate one name for each 100% of rev goal met
names = acceptable_names[:link.revenue_bucket]
activate_names(link, names)
activated_names = [name for comment, name in names]
link.server_names = activated_names
link.flair_text = ", ".join(activated_names) if names else "/dev/null"
link.flair_css_class = "goal-bucket-%d" % link.revenue_bucket
link._commit()
示例2: comment_reply_effect
# 需要导入模块: from r2.models import Comment [as 别名]
# 或者: from r2.models.Comment import _byID [as 别名]
def comment_reply_effect(comment):
if comment.parent_id is not None:
parent = Comment._byID(comment.parent_id, data=True)
else:
parent = Link._byID(comment.link_id, data=True)
all_effects = effects.get_all_effects([parent._fullname])
parent_effects = all_effects.get(parent._fullname, [])
for item_name in parent_effects:
item = items.get_item(item_name)
item.on_reply(c.user, parent)
示例3: fix_bare_links
# 需要导入模块: from r2.models import Comment [as 别名]
# 或者: from r2.models.Comment import _byID [as 别名]
def fix_bare_links(apply=False):
from r2.models import Comment
from r2.lib.db.thing import NotFound
fbefore = codecs.open('fix_bare_links_before.txt', 'w', 'utf-8')
fafter = codecs.open('fix_bare_links_after.txt', 'w', 'utf-8')
comment_id = 1
try:
# The comments are retrieved like this to prevent the API from
# attempting to load all comments at once and then iterating over them
while True:
comment = Comment._byID(comment_id, data=True)
if (hasattr(comment, 'ob_imported') and comment.ob_imported) and (hasattr(comment, 'is_html') and comment.is_html):
body = comment.body
if isinstance(body, str):
try:
body = body.decode('utf-8')
except UnicodeDecodeError:
print >>sys.stderr, "UnicodeDecodeError, using 'ignore' error mode, comment: %d" % comment._id
body = body.decode('utf-8', errors='ignore')
new_content = rewrite_bare_links(body)
if new_content != body:
print >>fbefore, body
print >>fafter, new_content
if apply:
comment.body = new_content
comment._commit()
try:
print >>sys.stderr, "Rewrote comment %s" % comment.make_permalink_slow().encode('utf-8')
except UnicodeError:
print >>sys.stderr, "Rewrote comment with id: %d" % comment._id
comment_id += 1
except NotFound:
# Assumes that comment ids are sequential and never deleted
# (which I believe to true) -- wjm
print >>sys.stderr, "Comment %d not found, exiting" % comment_id
return
示例4: comment_event
# 需要导入模块: from r2.models import Comment [as 别名]
# 或者: from r2.models.Comment import _byID [as 别名]
def comment_event(self, new_comment, request=None, context=None):
"""Create a 'comment' event for event-collector.
new_comment: An r2.models.Comment object
request, context: Should be pylons.request & pylons.c respectively
"""
from r2.models import Comment, Link
event = Event(
topic="comment_events",
event_type="ss.comment",
time=new_comment._date,
request=request,
context=context,
truncatable_field="comment_body",
)
event.add("comment_id", new_comment._id)
event.add("comment_fullname", new_comment._fullname)
event.add_text("comment_body", new_comment.body)
post = Link._byID(new_comment.link_id)
event.add("post_id", post._id)
event.add("post_fullname", post._fullname)
event.add("post_created_ts", to_epoch_milliseconds(post._date))
if post.promoted:
event.add("post_is_promoted", bool(post.promoted))
if new_comment.parent_id:
parent = Comment._byID(new_comment.parent_id)
else:
# If this is a top-level comment, parent is the same as the post
parent = post
event.add("parent_id", parent._id)
event.add("parent_fullname", parent._fullname)
event.add("parent_created_ts", to_epoch_milliseconds(parent._date))
event.add("user_neutered", new_comment.author_slow._spam)
event.add_subreddit_fields(new_comment.subreddit_slow)
self.save_event(event)
示例5: on_use
# 需要导入模块: from r2.models import Comment [as 别名]
# 或者: from r2.models.Comment import _byID [as 别名]
def on_use(self, user, target):
link = Link._byID(target.link_id)
comment_tree = get_comment_tree(link)
child_ids = comment_tree.tree[target._id]
grandchild_ids = []
for child_id in child_ids:
grandchild_ids.extend(comment_tree.tree[child_id])
comments = Comment._byID(child_ids + grandchild_ids, data=True,
return_dict=True)
children = [comments[cid] for cid in child_ids]
grandchildren = [comments[cid] for cid in grandchild_ids]
for comment in itertools.chain([target], children, grandchildren):
effects.add_effect(user, comment, self.item_name)
self.apply_damage_and_log(user, [target], self.direct_damage)
self.apply_damage_and_log(user, children, self.child_damage)
self.apply_damage_and_log(user, grandchildren, self.grandchild_damage)
示例6: _populate
# 需要导入模块: from r2.models import Comment [as 别名]
# 或者: from r2.models.Comment import _byID [as 别名]
def _populate(after_id=None, estimate=54301242):
from r2.models import Comment, CommentSortsCache, desc
from r2.lib.db import tdb_cassandra
from r2.lib import utils
# larger has a chance to decrease the number of Cassandra writes,
# but the probability is low
chunk_size = 5000
q = Comment._query(Comment.c._spam == (True, False), Comment.c._deleted == (True, False), sort=desc("_date"))
if after_id is not None:
q._after(Comment._byID(after_id))
q = utils.fetch_things2(q, chunk_size=chunk_size)
q = utils.progress(q, verbosity=chunk_size, estimate=estimate)
for chunk in utils.in_chunks(q, chunk_size):
chunk = filter(lambda x: hasattr(x, "link_id"), chunk)
update_comment_votes(chunk, write_consistency_level=tdb_cassandra.CL.ONE)
示例7: get_items
# 需要导入模块: from r2.models import Comment [as 别名]
# 或者: from r2.models.Comment import _byID [as 别名]
def get_items(self):
timer = g.stats.get_timer("CommentBuilder.get_items")
timer.start()
r = link_comments_and_sort(self.link, self.sort.col)
cids, cid_tree, depth, parents, sorter = r
timer.intermediate("load_storage")
if self.comment and not self.comment._id in depth:
g.log.error("Hack - self.comment (%d) not in depth. Defocusing..."
% self.comment._id)
self.comment = None
more_recursions = {}
dont_collapse = []
candidates = []
offset_depth = 0
if self.children:
# requested specific child comments
children = [child._id for child in self.children
if child._id in cids]
self.update_candidates(candidates, sorter, children)
dont_collapse.extend(comment for sort_val, comment in candidates)
elif self.comment:
# requested the tree from a specific comment
# construct path back to top level from this comment, a maximum of
# `context` levels
comment = self.comment._id
path = []
while comment and len(path) <= self.context:
path.append(comment)
comment = parents[comment]
dont_collapse.extend(path)
# rewrite cid_tree so the parents lead only to the requested comment
for comment in path:
parent = parents[comment]
cid_tree[parent] = [comment]
# start building comment tree from earliest comment
self.update_candidates(candidates, sorter, path[-1])
# set offset_depth because we may not be at the top level and can
# show deeper levels
offset_depth = depth.get(path[-1], 0)
else:
# full tree requested, start with the top level comments
top_level_comments = cid_tree.get(None, ())
self.update_candidates(candidates, sorter, top_level_comments)
timer.intermediate("pick_candidates")
if not candidates:
timer.stop()
return []
# choose which comments to show
items = []
while (self.num is None or len(items) < self.num) and candidates:
sort_val, comment_id = heapq.heappop(candidates)
if comment_id not in cids:
continue
comment_depth = depth[comment_id] - offset_depth
if comment_depth < self.max_depth:
items.append(comment_id)
# add children
if comment_id in cid_tree:
children = cid_tree[comment_id]
self.update_candidates(candidates, sorter, children)
elif (self.continue_this_thread and
parents.get(comment_id) is not None):
# the comment is too deep to add, so add a MoreRecursion for
# its parent
parent_id = parents[comment_id]
if parent_id not in more_recursions:
w = Wrapped(MoreRecursion(self.link, depth=0,
parent_id=parent_id))
else:
w = more_recursions[parent_id]
w.children.append(comment_id)
more_recursions[parent_id] = w
timer.intermediate("pick_comments")
# retrieve num_children for the visible comments
top_level_candidates = [comment for sort_val, comment in candidates
if depth.get(comment, 0) == 0]
needs_num_children = items + top_level_candidates
num_children = get_num_children(needs_num_children, cid_tree)
timer.intermediate("calc_num_children")
comments = Comment._byID(items, data=True, return_dict=False,
stale=self.stale)
#.........这里部分代码省略.........
示例8: _get_comments
# 需要导入模块: from r2.models import Comment [as 别名]
# 或者: from r2.models.Comment import _byID [as 别名]
def _get_comments(self):
timer = g.stats.get_timer("CommentBuilder.get_items")
timer.start()
r = link_comments_and_sort(self.link, self.sort.col)
cids, cid_tree, depth, parents, sorter = r
timer.intermediate("load_storage")
if self.comment and not self.comment._id in depth:
g.log.error("Hack - self.comment (%d) not in depth. Defocusing..."
% self.comment._id)
self.comment = None
more_recursions = {}
dont_collapse = []
candidates = []
offset_depth = 0
if self.children:
# requested specific child comments
children = [cid for cid in self.children if cid in cids]
self.update_candidates(candidates, sorter, children)
dont_collapse.extend(comment for sort_val, comment in candidates)
elif self.comment:
# requested the tree from a specific comment
# construct path back to top level from this comment, a maximum of
# `context` levels
comment = self.comment._id
path = []
while comment and len(path) <= self.context:
path.append(comment)
comment = parents[comment]
dont_collapse.extend(path)
# rewrite cid_tree so the parents lead only to the requested comment
for comment in path:
parent = parents[comment]
cid_tree[parent] = [comment]
# start building comment tree from earliest comment
self.update_candidates(candidates, sorter, path[-1])
# set offset_depth because we may not be at the top level and can
# show deeper levels
offset_depth = depth.get(path[-1], 0)
else:
# full tree requested, start with the top level comments
top_level_comments = cid_tree.get(None, ())
self.update_candidates(candidates, sorter, top_level_comments)
timer.intermediate("pick_candidates")
# choose which comments to show
items = []
while (self.num is None or len(items) < self.num) and candidates:
sort_val, comment_id = heapq.heappop(candidates)
if comment_id not in cids:
continue
comment_depth = depth[comment_id] - offset_depth
if comment_depth < self.max_depth:
items.append(comment_id)
# add children
if comment_id in cid_tree:
children = cid_tree[comment_id]
self.update_candidates(candidates, sorter, children)
elif (self.continue_this_thread and
parents.get(comment_id) is not None):
# the comment is too deep to add, so add a MoreRecursion for
# its parent
parent_id = parents[comment_id]
if parent_id not in more_recursions:
w = Wrapped(MoreRecursion(self.link, depth=0,
parent_id=parent_id))
else:
w = more_recursions[parent_id]
w.children.append(comment_id)
more_recursions[parent_id] = w
timer.intermediate("pick_comments")
self.top_level_candidates = [comment for sort_val, comment in candidates
if depth.get(comment, 0) == 0]
self.comments = Comment._byID(
items, data=True, return_dict=False, stale=self.stale)
timer.intermediate("lookup_comments")
self.timer = timer
self.cid_tree = cid_tree
self.depth = depth
self.more_recursions = more_recursions
self.offset_depth = offset_depth
self.dont_collapse = dont_collapse
示例9: link_comments_and_sort
# 需要导入模块: from r2.models import Comment [as 别名]
# 或者: from r2.models.Comment import _byID [as 别名]
def link_comments_and_sort(link_id, sort):
from r2.models import Comment, CommentSortsCache
# This has grown sort of organically over time. Right now the
# cache of the comments tree consists in three keys:
# 1. The comments_key: A tuple of
# (cids, comment_tree, depth, num_children)
# given:
# cids =:= [comment_id]
# comment_tree =:= dict(comment_id -> [comment_id])
# depth =:= dict(comment_id -> int depth)
# num_children =:= dict(comment_id -> int num_children)
# 2. The parent_comments_key =:= dict(comment_id -> parent_id)
# 3. The comments_sorts keys =:= dict(comment_id36 -> float).
# These are represented by a Cassandra model
# (CommentSortsCache) rather than a permacache key. One of
# these exists for each sort (hot, new, etc)
# performance hack: preload these into the LocalCache at the same
# time
g.permacache.get_multi([comments_key(link_id),
parent_comments_key(link_id)])
cids, cid_tree, depth, num_children = link_comments(link_id)
# load the sorter
sorter = _get_comment_sorter(link_id, sort)
sorter_needed = []
if cids and not sorter:
sorter_needed = cids
g.log.debug("comment_tree.py: sorter (%s) cache miss for Link %s"
% (sort, link_id))
sorter = {}
sorter_needed = [x for x in cids if x not in sorter]
if cids and sorter_needed:
g.log.debug(
"Error in comment_tree: sorter %r inconsistent (missing %d e.g. %r)"
% (sort_comments_key(link_id, sort), len(sorter_needed), sorter_needed[:10]))
if not g.disallow_db_writes:
update_comment_votes(Comment._byID(sorter_needed, data=True, return_dict=False))
sorter.update(_comment_sorter_from_cids(sorter_needed, sort))
# load the parents
key = parent_comments_key(link_id)
parents = g.permacache.get(key)
if parents is None:
g.log.debug("comment_tree.py: parents cache miss for Link %s"
% link_id)
parents = {}
elif cids and not all(x in parents for x in cids):
g.log.debug("Error in comment_tree: parents inconsistent for Link %s"
% link_id)
parents = {}
if not parents:
with g.make_lock(lock_key(link_id)):
# reload from the cache so the sorter and parents are
# maximally consistent
r = g.permacache.get(comments_key(link_id))
cids, cid_tree, depth, num_children = r
key = parent_comments_key(link_id)
if not parents:
parents = _parent_dict_from_tree(cid_tree)
g.permacache.set(key, parents)
return cids, cid_tree, depth, num_children, parents, sorter
示例10: _comment_sorter_from_cids
# 需要导入模块: from r2.models import Comment [as 别名]
# 或者: from r2.models.Comment import _byID [as 别名]
def _comment_sorter_from_cids(cids, sort):
from r2.models import Comment
comments = Comment._byID(cids, data = False, return_dict = False)
return dict((x._id, _get_sort_value(x, sort)) for x in comments)
示例11: get_items
# 需要导入模块: from r2.models import Comment [as 别名]
# 或者: from r2.models.Comment import _byID [as 别名]
def get_items(self):
timer = g.stats.get_timer("CommentBuilder.get_items")
timer.start()
r = link_comments_and_sort(self.link, self.sort.col)
cids, cid_tree, depth, parents, sorter = r
timer.intermediate("load_storage")
if self.comment and not self.comment._id in depth:
g.log.error("Hack - self.comment (%d) not in depth. Defocusing..."
% self.comment._id)
self.comment = None
more_recursions = {}
dont_collapse = []
candidates = []
offset_depth = 0
if self.children:
# requested specific child comments
children = [cid for cid in self.children if cid in cids]
self.update_candidates(candidates, sorter, children)
dont_collapse.extend(comment for sort_val, comment in candidates)
elif self.comment:
# requested the tree from a specific comment
# construct path back to top level from this comment, a maximum of
# `context` levels
comment = self.comment._id
path = []
while comment and len(path) <= self.context:
path.append(comment)
comment = parents[comment]
dont_collapse.extend(path)
# rewrite cid_tree so the parents lead only to the requested comment
for comment in path:
parent = parents[comment]
cid_tree[parent] = [comment]
# start building comment tree from earliest comment
self.update_candidates(candidates, sorter, path[-1])
# set offset_depth because we may not be at the top level and can
# show deeper levels
offset_depth = depth.get(path[-1], 0)
else:
# full tree requested, start with the top level comments
top_level_comments = cid_tree.get(None, ())
self.update_candidates(candidates, sorter, top_level_comments)
timer.intermediate("pick_candidates")
if not candidates:
timer.stop()
return []
# choose which comments to show
items = []
while (self.num is None or len(items) < self.num) and candidates:
sort_val, comment_id = heapq.heappop(candidates)
if comment_id not in cids:
continue
comment_depth = depth[comment_id] - offset_depth
if comment_depth < self.max_depth:
items.append(comment_id)
# add children
if comment_id in cid_tree:
children = cid_tree[comment_id]
self.update_candidates(candidates, sorter, children)
elif (self.continue_this_thread and
parents.get(comment_id) is not None):
# the comment is too deep to add, so add a MoreRecursion for
# its parent
parent_id = parents[comment_id]
if parent_id not in more_recursions:
w = Wrapped(MoreRecursion(self.link, depth=0,
parent_id=parent_id))
else:
w = more_recursions[parent_id]
w.children.append(comment_id)
more_recursions[parent_id] = w
timer.intermediate("pick_comments")
# retrieve num_children for the visible comments
top_level_candidates = [comment for sort_val, comment in candidates
if depth.get(comment, 0) == 0]
needs_num_children = items + top_level_candidates
num_children = get_num_children(needs_num_children, cid_tree)
timer.intermediate("calc_num_children")
comments = Comment._byID(items, data=True, return_dict=False,
stale=self.stale)
timer.intermediate("lookup_comments")
#.........这里部分代码省略.........
示例12: _handle_sort
# 需要导入模块: from r2.models import Comment [as 别名]
# 或者: from r2.models.Comment import _byID [as 别名]
def _handle_sort(msgs, chan):
cids = list(set(int(msg.body) for msg in msgs))
comments = Comment._byID(cids, data = True, return_dict = False)
print comments
update_comment_votes(comments)
示例13: message_notification_email
# 需要导入模块: from r2.models import Comment [as 别名]
# 或者: from r2.models.Comment import _byID [as 别名]
def message_notification_email(data):
"""Queues a system email for a new message notification."""
from r2.lib.pages import MessageNotificationEmail
timer_start = time.time()
MAX_EMAILS_PER_USER = 30
MAX_MESSAGES_PER_BATCH = 5
total_messages_sent = 0
inbox_item_lookup_count = 0
unique_user_list = make_message_dict_unique(data)
g.log.info(
"there are %s users for this batch of emails" % len(unique_user_list))
for datum in unique_user_list.itervalues():
user = Account._byID36(datum['to'], data=True)
g.log.info('user fullname: %s' % user._fullname)
# In case a user has enabled the preference while it was enabled for
# them, but we've since turned it off. We need to explicitly state the
# user because we're not in the context of an HTTP request from them.
if not feature.is_enabled('orangereds_as_emails', user=user):
g.log.info('feature not enabled for user: %s' % user._fullname)
continue
# Don't send more than MAX_EMAILS_PER_USER per user per day
user_notification_ratelimit = SimpleRateLimit(
name="email_message_notification_%s" % user._id36,
seconds=int(datetime.timedelta(days=1).total_seconds()),
limit=MAX_EMAILS_PER_USER,
)
if not user_notification_ratelimit.check():
g.log.info('message blocked at user_notification_ratelimit: %s' %
user_notification_ratelimit)
continue
# Get all new messages that haven't been emailed
inbox_items = get_unread_and_unemailed(user)
inbox_item_lookup_count += 1
if not inbox_items:
g.log.info('no inbox items found for %s' % user._fullname)
continue
newest_inbox_rel = inbox_items[-1][0]
oldest_inbox_rel = inbox_items[0][0]
now = datetime.datetime.now(g.tz)
start_date = datetime.datetime.strptime(datum['start_date'],
"%Y-%m-%d %H:%M:%S").replace(tzinfo=g.tz)
# If messages are still being queued within the cooling period or
# messages have been queued past the max delay, then keep waiting
# a little longer to batch all of the messages up
if (start_date != newest_inbox_rel._date and
now < newest_inbox_rel._date + NOTIFICATION_EMAIL_COOLING_PERIOD and
now < oldest_inbox_rel._date + NOTIFICATION_EMAIL_MAX_DELAY):
g.log.info('messages still being batched for: %s' % user._fullname)
continue
messages = []
message_count = 0
more_unread_messages = False
non_preview_usernames = set()
# Batch messages to email starting with older messages
for inbox_rel, message in inbox_items:
# Get sender_name, replacing with display_author if it exists
g.log.info('user fullname: %s, message fullname: %s' % (
user._fullname, message._fullname))
sender_name = get_sender_name(message)
if message_count >= MAX_MESSAGES_PER_BATCH:
# prevent duplicate usernames for template display
non_preview_usernames.add(sender_name)
more_unread_messages = True
else:
link = None
parent = None
if isinstance(message, Comment):
permalink = message.make_permalink_slow(context=1,
force_domain=True)
if message.parent_id:
parent = Comment._byID(message.parent_id, data=True)
else:
link = Link._byID(message.link_id, data=True)
else:
permalink = message.make_permalink(force_domain=True)
message_type = get_message_type(message, parent, user, link)
messages.append({
"author_name": sender_name,
"message_type": message_type,
"body": message.body,
"date": long_datetime(message._date),
"permalink": permalink,
"id": message._id,
#.........这里部分代码省略.........