本文整理汇总了Python中r2.models.Link._query方法的典型用法代码示例。如果您正苦于以下问题:Python Link._query方法的具体用法?Python Link._query怎么用?Python Link._query使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类r2.models.Link
的用法示例。
在下文中一共展示了Link._query方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_links
# 需要导入模块: from r2.models import Link [as 别名]
# 或者: from r2.models.Link import _query [as 别名]
def get_links(sr, sort, time, merge_batched=True):
"""General link query for a subreddit."""
q = Link._query(Link.c.sr_id == sr._id,
sort = db_sort(sort),
data = True)
if time != 'all':
q._filter(db_times[time])
res = make_results(q)
# see the discussion above batched_time_times
if (merge_batched
and g.use_query_cache
and sort in batched_time_sorts
and time in batched_time_times):
byday = Link._query(Link.c.sr_id == sr._id,
sort = db_sort(sort), data=True)
byday._filter(db_times['day'])
res = merge_results(res,
make_results(byday))
return res
示例2: run
# 需要导入模块: from r2.models import Link [as 别名]
# 或者: from r2.models.Link import _query [as 别名]
def run(verbose=True, sleep_time = 60, num_items = 1):
key = "indextank_cursor"
cursor = g.cache.get(key)
if cursor is None:
raise ValueError("%s is not set!" % key)
cursor = int(cursor)
while True:
if verbose:
print "Looking for %d items with _id < %d" % (num_items, cursor)
q = Link._query(sort = desc('_id'),
limit = num_items)
q._after(Link._byID(cursor))
last_date = None
for item in q:
cursor = item._id
last_date = item._date
amqp.add_item('indextank_changes', item._fullname,
message_id = item._fullname,
delivery_mode = amqp.DELIVERY_TRANSIENT)
g.cache.set(key, cursor)
if verbose:
if last_date:
last_date = last_date.strftime("%Y-%m-%d")
print ("Just enqueued %d items. New cursor=%s (%s). Sleeping %d seconds."
% (num_items, cursor, last_date, sleep_time))
sleep(sleep_time)
示例3: get_hot
# 需要导入模块: from r2.models import Link [as 别名]
# 或者: from r2.models.Link import _query [as 别名]
def get_hot(sr):
q = Link._query(Link.c.sr_id == sr._id,
sort = desc('_hot'),
write_cache = True,
limit = 150)
iden = q._iden()
read_cache = True
#if query is in the cache, the expire flag is true, and the access
#time is old, set read_cache = False
if cache.get(iden) is not None:
if cache.get(expire_key(sr)):
access_time = cache.get(access_key(sr))
if not access_time or datetime.now() > access_time + expire_delta:
cache.delete(expire_key(sr))
read_cache = False
#if the query isn't in the cache, set read_cache to false so we
#record the access time
else:
read_cache = False
if not read_cache:
cache.set(access_key(sr), datetime.now())
q._read_cache = read_cache
res = list(q)
#set the #1 link so we can ignore it later. expire after TOP_CACHE
#just in case something happens and that sr doesn't update
if res:
cache.set(top_key(sr), res[0]._fullname, TOP_CACHE)
return res
示例4: get_links
# 需要导入模块: from r2.models import Link [as 别名]
# 或者: from r2.models.Link import _query [as 别名]
def get_links(self, sort, time):
from r2.models import Link
from r2.lib.db import queries
q = Link._query(sort = queries.db_sort(sort))
if time != 'all':
q._filter(queries.db_times[time])
return q
示例5: get_spam_filtered_links
# 需要导入模块: from r2.models import Link [as 别名]
# 或者: from r2.models.Link import _query [as 别名]
def get_spam_filtered_links(sr_id):
""" NOTE: This query will never run unless someone does an "update" on it,
but that will probably timeout. Use insert_spam_filtered_links."""
return Link._query(Link.c.sr_id == sr_id,
Link.c._spam == True,
Link.c.verdict != 'mod-removed',
sort = db_sort('new'))
示例6: get_links
# 需要导入模块: from r2.models import Link [as 别名]
# 或者: from r2.models.Link import _query [as 别名]
def get_links(self, sort, time):
from r2.lib.db import queries
from r2.models import Link
from r2.controllers.errors import UserRequiredException
if not c.user_is_loggedin:
raise UserRequiredException
friends = self.get_important_friends(c.user._id)
if not friends:
return []
if g.use_query_cache:
# with the precomputer enabled, this Subreddit only supports
# being sorted by 'new'. it would be nice to have a
# cleaner UI than just blatantly ignoring their sort,
# though
sort = "new"
time = "all"
friends = Account._byID(friends, return_dict=False)
crs = [queries.get_submitted(friend, sort, time) for friend in friends]
return queries.MergedCachedResults(crs)
else:
q = Link._query(Link.c.author_id == friends, sort=queries.db_sort(sort), data=True)
if time != "all":
q._filter(queries.db_times[time])
return q
示例7: import_missing_comments
# 需要导入模块: from r2.models import Link [as 别名]
# 或者: from r2.models.Link import _query [as 别名]
def import_missing_comments(filename, apply_changes=False):
"""Imports the comments from the supplied YAML"""
missing_comments = yaml.load(open(filename), Loader=yaml.CLoader)
global dryrun
dryrun = not apply_changes
total_posts = len(missing_comments)
post_count = 0
for post in missing_comments:
if post['author'] != 'Eliezer Yudkowsky':
# print "Skipping non-EY post (%s): %s" % (post['author'], post['permalink'])
continue
ob_permalink = adjust_permalink(post['permalink'])
# Attempt to retrieve the post that was imported into Less Wrong
imported_post = list(Link._query(Link.c.ob_permalink == ob_permalink, data=True))
if len(imported_post) < 1:
print "Unable to retrieve imported post: %s" % ob_permalink
continue
elif len(imported_post) > 1:
print "Got more than one result for: %s" % ob_permalink
raise Exception
else:
imported_post = imported_post[0]
post_count += 1
try:
print "Importing (%d of %d) comments on: %s" % (post_count, total_posts, imported_post.canonical_url)
except UnicodeError:
print "Importing comments on post (%d of %d)"
process_comments_on_post(imported_post, post['comments'])
示例8: test_cassasavehide
# 需要导入模块: from r2.models import Link [as 别名]
# 或者: from r2.models.Link import _query [as 别名]
def test_cassasavehide():
from r2.models import Account, Link, CassandraSave, SavesByAccount
from r2.lib.db import tdb_cassandra
a = list(Account._query(sort=desc('_date'),
limit=1))[0]
l = list(Link._query(sort=desc('_date'),
limit=1))[0]
try:
csh = CassandraSave._fast_query(a._id36, l._id36)
print "Warning! Deleting!", csh
CassandraSave._fast_query(a._id36, l._id36)._destroy()
except tdb_cassandra.NotFound:
pass
csh = CassandraSave._save(a, l)
csh._commit()
assert CassandraSave._fast_query(a._id36, l._id36) == csh
# check for the SavesByAccount object too
assert SavesByAccount._byID(a._id36)[csh._id] == csh._id
csh._destroy()
try:
CassandraSave._fast_query(a._id36, l._id36) == csh
raise Exception("shouldn't exist after destroying")
except tdb_cassandra.NotFound:
pass
try:
assert csh._id not in SavesByAccount._byID(a._id36, properties = csh._id)._values()
except tdb_cassandra.NotFound:
pass
示例9: gen_keys
# 需要导入模块: from r2.models import Link [as 别名]
# 或者: from r2.models.Link import _query [as 别名]
def gen_keys():
yield promoted_memo_key
# just let this one do its own writing
load_all_reddits()
yield queries.get_all_comments().iden
l_q = Link._query(Link.c._spam == (True, False),
Link.c._deleted == (True, False),
sort=desc('_date'),
data=True,
)
for link in fetch_things2(l_q, verbosity):
yield comments_key(link._id)
yield last_modified_key(link, 'comments')
a_q = Account._query(Account.c._spam == (True, False),
sort=desc('_date'),
)
for account in fetch_things2(a_q, verbosity):
yield messages_key(account._id)
yield last_modified_key(account, 'overview')
yield last_modified_key(account, 'commented')
yield last_modified_key(account, 'submitted')
yield last_modified_key(account, 'liked')
yield last_modified_key(account, 'disliked')
yield queries.get_comments(account, 'new', 'all').iden
yield queries.get_submitted(account, 'new', 'all').iden
yield queries.get_liked(account).iden
yield queries.get_disliked(account).iden
yield queries.get_hidden(account).iden
yield queries.get_saved(account).iden
yield queries.get_inbox_messages(account).iden
yield queries.get_unread_messages(account).iden
yield queries.get_inbox_comments(account).iden
yield queries.get_unread_comments(account).iden
yield queries.get_inbox_selfreply(account).iden
yield queries.get_unread_selfreply(account).iden
yield queries.get_sent(account).iden
sr_q = Subreddit._query(Subreddit.c._spam == (True, False),
sort=desc('_date'),
)
for sr in fetch_things2(sr_q, verbosity):
yield last_modified_key(sr, 'stylesheet_contents')
yield queries.get_links(sr, 'hot', 'all').iden
yield queries.get_links(sr, 'new', 'all').iden
for sort in 'top', 'controversial':
for time in 'hour', 'day', 'week', 'month', 'year', 'all':
yield queries.get_links(sr, sort, time,
merge_batched=False).iden
yield queries.get_spam_links(sr).iden
yield queries.get_spam_comments(sr).iden
yield queries.get_reported_links(sr).iden
yield queries.get_reported_comments(sr).iden
yield queries.get_subreddit_messages(sr).iden
yield queries.get_unread_subreddit_messages(sr).iden
示例10: _query_post
# 需要导入模块: from r2.models import Link [as 别名]
# 或者: from r2.models.Link import _query [as 别名]
def _query_post(self, *args):
post = None
kwargs = {'data': True}
q = Link._query(*args, **kwargs)
posts = list(q)
if posts:
post = posts[0]
return post
示例11: get_domain_links
# 需要导入模块: from r2.models import Link [as 别名]
# 或者: from r2.models.Link import _query [as 别名]
def get_domain_links(domain, sort, time):
from r2.lib.db import operators
q = Link._query(operators.domain(Link.c.url) == filters._force_utf8(domain), sort=db_sort(sort), data=True)
if time != "all":
q._filter(db_times[time])
return make_results(q)
示例12: get_unmoderated_links
# 需要导入模块: from r2.models import Link [as 别名]
# 或者: from r2.models.Link import _query [as 别名]
def get_unmoderated_links(sr_id):
q = Link._query(Link.c.sr_id == sr_id,
Link.c._spam == (True, False),
sort = db_sort('new'))
# Doesn't really work because will not return Links with no verdict
q._filter(or_(and_(Link.c._spam == True, Link.c.verdict != 'mod-removed'),
and_(Link.c._spam == False, Link.c.verdict != 'mod-approved')))
return q
示例13: get_links
# 需要导入模块: from r2.models import Link [as 别名]
# 或者: from r2.models.Link import _query [as 别名]
def get_links(self, sort, time):
from r2.lib import promote
from r2.models import Link
from r2.lib.db import queries
q = Link._query(sort=queries.db_sort(sort), read_cache=True, write_cache=True, cache_time=60, data=True)
if time != "all":
q._filter(queries.db_times[time])
return q
示例14: _get_links
# 需要导入模块: from r2.models import Link [as 别名]
# 或者: from r2.models.Link import _query [as 别名]
def _get_links(sr_id, sort, time):
"""General link query for a subreddit."""
q = Link._query(Link.c.sr_id == sr_id, sort=db_sort(sort), data=True)
if time != "all":
q._filter(db_times[time])
res = make_results(q)
return res
示例15: get_links
# 需要导入模块: from r2.models import Link [as 别名]
# 或者: from r2.models.Link import _query [as 别名]
def get_links(sr, sort, time):
"""General link query for a subreddit."""
q = Link._query(Link.c.sr_id == sr._id, sort=db_sort(sort))
if sort == "toplinks":
q._filter(Link.c.top_link == True)
if time != "all":
q._filter(db_times[time])
return make_results(q)