本文整理汇总了Python中r2.models.Account._byID方法的典型用法代码示例。如果您正苦于以下问题:Python Account._byID方法的具体用法?Python Account._byID怎么用?Python Account._byID使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类r2.models.Account
的用法示例。
在下文中一共展示了Account._byID方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: recompute_unread
# 需要导入模块: from r2.models import Account [as 别名]
# 或者: from r2.models.Account import _byID [as 别名]
def recompute_unread(min_date = None):
from r2.models import Inbox, Account, Comment, Message
from r2.lib.db import queries
def load_accounts(inbox_rel):
accounts = set()
q = inbox_rel._query(eager_load = False, data = False,
sort = desc("_date"))
if min_date:
q._filter(inbox_rel.c._date > min_date)
for i in fetch_things2(q):
accounts.add(i._thing1_id)
return accounts
accounts_m = load_accounts(Inbox.rel(Account, Message))
for i, a in enumerate(accounts_m):
a = Account._byID(a)
print "%s / %s : %s" % (i, len(accounts_m), a)
queries.get_unread_messages(a).update()
queries.get_unread_comments(a).update()
queries.get_unread_selfreply(a).update()
accounts = load_accounts(Inbox.rel(Account, Comment)) - accounts_m
for i, a in enumerate(accounts):
a = Account._byID(a)
print "%s / %s : %s" % (i, len(accounts), a)
queries.get_unread_comments(a).update()
queries.get_unread_selfreply(a).update()
示例2: store_keys
# 需要导入模块: from r2.models import Account [as 别名]
# 或者: from r2.models.Account import _byID [as 别名]
def store_keys(key, maxes):
# we're building queries using queries.py, but we could make the
# queries ourselves if we wanted to avoid the individual lookups
# for accounts and subreddits.
# Note that we're only generating the 'sr-' type queries here, but
# we're also able to process the other listings generated by the
# old migrate.mr_permacache for convenience
userrel_fns = dict(liked = queries.get_liked,
disliked = queries.get_disliked,
saved = queries.get_saved,
hidden = queries.get_hidden)
if key.startswith('user-'):
acc_str, keytype, account_id = key.split('-')
account_id = int(account_id)
fn = queries.get_submitted if keytype == 'submitted' else queries.get_comments
q = fn(Account._byID(account_id), 'new', 'all')
q._insert_tuples([(fname, float(timestamp))
for (timestamp, fname)
in maxes])
elif key.startswith('sr-'):
sr_str, sort, time, sr_id = key.split('-')
sr_id = int(sr_id)
if sort == 'controversy':
# I screwed this up in the mapper and it's too late to fix
# it
sort = 'controversial'
q = queries.get_links(Subreddit._byID(sr_id), sort, time)
q._insert_tuples([tuple([item[-1]] + map(float, item[:-1]))
for item in maxes])
elif key.startswith('domain/'):
d_str, sort, time, domain = key.split('/')
q = queries.get_domain_links(domain, sort, time)
q._insert_tuples([tuple([item[-1]] + map(float, item[:-1]))
for item in maxes])
elif key.split('-')[0] in userrel_fns:
key_type, account_id = key.split('-')
account_id = int(account_id)
fn = userrel_fns[key_type]
q = fn(Account._byID(account_id))
q._insert_tuples([tuple([item[-1]] + map(float, item[:-1]))
for item in maxes])
示例3: refund_campaign
# 需要导入模块: from r2.models import Account [as 别名]
# 或者: from r2.models.Account import _byID [as 别名]
def refund_campaign(link, camp, billable_amount, billable_impressions):
refund_amount = get_refund_amount(camp, billable_amount)
if refund_amount <= 0:
return
owner = Account._byID(camp.owner_id, data=True)
try:
success = authorize.refund_transaction(owner, camp.trans_id, camp._id, refund_amount)
except authorize.AuthorizeNetException as e:
text = "%s $%s refund failed" % (camp, refund_amount)
PromotionLog.add(link, text)
g.log.debug(text + " (response: %s)" % e)
return
text = "%s completed with $%s billable (%s impressions @ $%s)." " %s refunded." % (
camp,
billable_amount,
billable_impressions,
camp.cpm,
refund_amount,
)
PromotionLog.add(link, text)
camp.refund_amount = refund_amount
camp._commit()
unset_underdelivered_campaigns(camp)
emailer.refunded_promo(link)
示例4: void_campaign
# 需要导入模块: from r2.models import Account [as 别名]
# 或者: from r2.models.Account import _byID [as 别名]
def void_campaign(link, campaign):
transactions = get_transactions(link, [campaign])
bid_record = transactions.get(campaign._id)
if bid_record:
a = Account._byID(link.author_id)
authorize.void_transaction(a, bid_record.transaction, campaign._id)
hooks.get_hook('campaign.void').call(link=link, campaign=campaign)
示例5: edit_campaign
# 需要导入模块: from r2.models import Account [as 别名]
# 或者: from r2.models.Account import _byID [as 别名]
def edit_campaign(link, campaign, dates, bid, cpm, sr, priority):
sr_name = sr.name if sr else '' # empty string means target to all
# if the bid amount changed, cancel any pending transactions
if campaign.bid != bid:
void_campaign(link, campaign)
# update the schedule
PromotionWeights.reschedule(link, campaign._id, sr_name,
dates[0], dates[1], bid)
# update values in the db
campaign.update(dates[0], dates[1], bid, cpm, sr_name,
campaign.trans_id, priority, commit=True)
if campaign.priority.cpm:
# record the transaction
text = 'updated campaign %s. (bid: %0.2f)' % (campaign._id, bid)
PromotionLog.add(link, text)
# make it a freebie, if applicable
author = Account._byID(link.author_id, True)
if getattr(author, "complimentary_promos", False):
free_campaign(link, campaign, c.user)
hooks.get_hook('promote.edit_campaign').call(link=link, campaign=campaign)
示例6: __init__
# 需要导入模块: from r2.models import Account [as 别名]
# 或者: from r2.models.Account import _byID [as 别名]
def __init__(self, link = None, comment = None,
link_title = '', *a, **kw):
# TODO: temp hack until we find place for builder_wrapper
link.render_full = True
from r2.controllers.listingcontroller import ListingController
link_builder = IDBuilder(link._fullname,
wrap = ListingController.builder_wrapper)
# link_listing will be the one-element listing at the top
self.link_listing = LinkListing(link_builder, nextprev=False).listing()
# link is a wrapped Link object
self.link = self.link_listing.things[0]
link_title = ((self.link.title) if hasattr(self.link, 'title') else '')
if comment:
author = Account._byID(comment.author_id, data=True).name
params = {'author' : author, 'title' : _force_unicode(link_title)}
title = strings.permalink_title % params
else:
params = {'title':_force_unicode(link_title), 'site' : c.site.title}
title = strings.link_info_title % params
if not c.default_sr:
# Not on the main page, so include a pointer to the canonical URL for this link
self.canonical_link = link.canonical_url
Reddit.__init__(self, title = title, body_class = 'post', *a, **kw)
示例7: refund_campaign
# 需要导入模块: from r2.models import Account [as 别名]
# 或者: from r2.models.Account import _byID [as 别名]
def refund_campaign(link, camp, refund_amount, billable_amount,
billable_impressions):
owner = Account._byID(camp.owner_id, data=True)
success, reason = authorize.refund_transaction(
owner, camp.trans_id, camp._id, refund_amount)
if not success:
text = ('%s $%s refund failed' % (camp, refund_amount))
PromotionLog.add(link, text)
g.log.debug(text + ' (reason: %s)' % reason)
return False
if billable_impressions:
text = ('%s completed with $%s billable (%s impressions @ $%s).'
' %s refunded.' % (camp, billable_amount,
billable_impressions,
camp.bid_pennies / 100.,
refund_amount))
else:
text = ('%s completed with $%s billable. %s refunded' % (camp,
billable_amount, refund_amount))
PromotionLog.add(link, text)
camp.refund_amount = refund_amount
camp._commit()
queries.unset_underdelivered_campaigns(camp)
emailer.refunded_promo(link)
return True
示例8: new_comment
# 需要导入模块: from r2.models import Account [as 别名]
# 或者: from r2.models.Account import _byID [as 别名]
def new_comment(comment, inbox_rels):
author = Account._byID(comment.author_id)
job = [get_comments(author, "new", "all")]
if comment._deleted:
job.append(get_all_comments())
add_queries(job, delete_items=comment)
else:
# if comment._spam:
# sr = Subreddit._byID(comment.sr_id)
# job.append(get_spam_comments(sr))
add_queries(job, insert_items=comment)
amqp.add_item("new_comment", comment._fullname)
if not g.amqp_host:
l = Link._byID(comment.link_id, data=True)
add_comment_tree(comment, l)
# note that get_all_comments() is updated by the amqp process
# r2.lib.db.queries.run_new_comments
if inbox_rels:
for inbox_rel in tup(inbox_rels):
inbox_owner = inbox_rel._thing1
if inbox_rel._name == "inbox":
add_queries([get_inbox_comments(inbox_owner)], insert_items=inbox_rel)
else:
add_queries([get_inbox_selfreply(inbox_owner)], insert_items=inbox_rel)
set_unread(comment, inbox_owner, True)
示例9: new
# 需要导入模块: from r2.models import Account [as 别名]
# 或者: from r2.models.Account import _byID [as 别名]
def new(cls, user, thing):
from r2.lib.db import queries
# check if this report exists already!
rel = cls.rel(user, thing)
q = rel._fast_query(user, thing, ['-1', '0', '1'])
q = [ report for (tupl, report) in q.iteritems() if report ]
if q:
# stop if we've seen this before, so that we never get the
# same report from the same user twice
oldreport = q[0]
g.log.debug("Ignoring duplicate report %s" % oldreport)
return oldreport
r = Report(user, thing, '0')
if not thing._loaded:
thing._load()
# mark item as reported
thing._incr(cls._field)
r._commit()
if hasattr(thing, 'author_id'):
author = Account._byID(thing.author_id, data=True)
author._incr('reported')
# update the reports queue if it exists
queries.new_report(thing)
# if the thing is already marked as spam, accept the report
if thing._spam:
cls.accept(thing)
return r
示例10: submit_rss_links
# 需要导入模块: from r2.models import Account [as 别名]
# 或者: from r2.models.Account import _byID [as 别名]
def submit_rss_links(srname,rss,user,titlefield='title',linkfield='link'):
#Fuck the API, let's just do it the way we would if we were really doing it. This avoids screwing around with cookies and so forth...
feed=fetch_feed(rss)
if feed is None:
return
ac=Account._byID(user)
sr=Subsciteit._by_name(srname)
ip='0.0.0.0'
niceify=False
if domain(rss)=="arxiv.org":
niceify=dict(find="\(arXiv:.*?\)",replace="")
#Let's randomize why not...
random.shuffle(feed.entries)
for article in feed.entries:
#This can take all night if it has to, we don't want to hammer the server into oblivios
sleep(1)
kw = fetch_article(article,titlefield=titlefield,linkfield=linkfield,niceify=niceify)
if kw is None:
continue
l = Link._submit(kw['title'],kw['link'],ac,sr,ip,spam=False)
l._commit()
l.set_url_cache()
#We don't really need auto-submitted links to be vote on...
queries.queue_vote(ac,l,True,ip,cheater=False)
queries.new_link(l)
changed(l)
print "Submitted %s" % article[titlefield]
sleep(.1)
return
示例11: _handle_upsert_campaign
# 需要导入模块: from r2.models import Account [as 别名]
# 或者: from r2.models.Account import _byID [as 别名]
def _handle_upsert_campaign(payload):
link = Link._by_fullname(payload["link"], data=True)
campaign = PromoCampaign._by_fullname(payload["campaign"], data=True)
owner = Account._byID(campaign.owner_id)
author = Account._byID(link.author_id)
try:
lineitem = lineitems_service.upsert_lineitem(owner, campaign)
except ValueError as e:
g.log.error("unable to upsert lineitem: %s" % e)
return
creative = creatives_service.upsert_creative(author, link)
lineitems_service.associate_with_creative(
lineitem=lineitem, creative=creative)
示例12: new_comment
# 需要导入模块: from r2.models import Account [as 别名]
# 或者: from r2.models.Account import _byID [as 别名]
def new_comment(comment, inbox_rels):
author = Account._byID(comment.author_id)
job = [get_comments(author, 'new', 'all')]
if comment._deleted:
job.append(get_all_comments())
add_queries(job, delete_items = comment)
else:
if comment._spam:
sr = Subreddit._byID(comment.sr_id)
job.append(get_spam_comments(sr))
add_queries(job, insert_items = comment)
amqp.add_item('new_comment', comment._fullname)
if not g.amqp_host:
add_comment_tree([comment])
# note that get_all_comments() is updated by the amqp process
# r2.lib.db.queries.run_new_comments (to minimise lock contention)
if inbox_rels:
for inbox_rel in tup(inbox_rels):
inbox_owner = inbox_rel._thing1
if inbox_rel._name == "inbox":
add_queries([get_inbox_comments(inbox_owner)],
insert_items = inbox_rel)
else:
add_queries([get_inbox_selfreply(inbox_owner)],
insert_items = inbox_rel)
set_unread(comment, inbox_owner, True)
示例13: set_up_embed
# 需要导入模块: from r2.models import Account [as 别名]
# 或者: from r2.models.Account import _byID [as 别名]
def set_up_embed(embed_key, sr, thing, showedits):
expected_mac = hmac.new(g.secrets["comment_embed"], thing._id36, hashlib.sha1).hexdigest()
if not constant_time_compare(embed_key or "", expected_mac):
abort(401)
try:
author = Account._byID(thing.author_id) if thing.author_id else None
except NotFound:
author = None
iso_timestamp = request.GET.get("created", "")
c.embed_config = {
"eventtracker_url": g.eventtracker_url or "",
"anon_eventtracker_url": g.anon_eventtracker_url or "",
"created": iso_timestamp,
"showedits": showedits,
"thing": {
"id": thing._id,
"sr_id": sr._id,
"sr_name": sr.name,
"edited": edited_after(thing, iso_timestamp, showedits),
"deleted": thing.deleted or author._deleted,
},
}
c.render_style = "iframe"
c.user = UnloggedUser([c.lang])
c.user_is_loggedin = False
c.forced_loggedout = True
示例14: update_num_gildings
# 需要导入模块: from r2.models import Account [as 别名]
# 或者: from r2.models.Account import _byID [as 别名]
def update_num_gildings(update_trophy=True, user_id=None):
"""Returns total number of link, comment, and user gildings"""
query = (select([gold_table.c.paying_id, sa_count(gold_table.c.trans_id)])
.where(gold_table.c.trans_id.like('X%'))
.group_by(gold_table.c.paying_id)
.order_by(sa_count(gold_table.c.trans_id).desc())
)
if user_id:
query = query.where(gold_table.c.paying_id == str(user_id))
rows = ENGINE.execute(query)
total_updated = 0
for paying_id, count in rows:
try:
a = Account._byID(int(paying_id), data=True)
a.num_gildings = count
a._commit()
total_updated += 1
#if 'server seconds paid' for are public, update gilding trophies
if update_trophy and a.pref_public_server_seconds:
add_to_trophy_queue(a, "gilding")
except:
g.log.debug("update_num_gildings: paying_id %s is invalid" % paying_id)
g.log.debug("update_num_gildings: updated %s accounts" % total_updated)
示例15: process_message
# 需要导入模块: from r2.models import Account [as 别名]
# 或者: from r2.models.Account import _byID [as 别名]
def process_message(msgs, chan):
"""Update get_submitted(), the Links by author precomputed query.
get_submitted() is a CachedResult which is stored in permacache. To
update these objects we need to do a read-modify-write which requires
obtaining a lock. Sharding these updates by author allows us to run
multiple consumers (but ideally just one per shard) to avoid lock
contention.
"""
from r2.lib.db.queries import add_queries, get_submitted
link_names = {msg.body for msg in msgs}
links = Link._by_fullname(link_names, return_dict=False)
print 'Processing %r' % (links,)
links_by_author_id = defaultdict(list)
for link in links:
links_by_author_id[link.author_id].append(link)
authors_by_id = Account._byID(links_by_author_id.keys())
for author_id, links in links_by_author_id.iteritems():
with g.stats.get_timer("link_vote_processor.author_queries"):
author = authors_by_id[author_id]
add_queries(
queries=[
get_submitted(author, sort, 'all') for sort in SORTS],
insert_items=links,
)