当前位置: 首页>>代码示例>>Python>>正文


Python utils.fetch_things2函数代码示例

本文整理汇总了Python中r2.lib.utils.fetch_things2函数的典型用法代码示例。如果您正苦于以下问题:Python fetch_things2函数的具体用法?Python fetch_things2怎么用?Python fetch_things2使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了fetch_things2函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: upgrade_messages

def upgrade_messages(update_comments=True, update_messages=True, update_trees=True):
    from r2.lib.db import queries
    from r2.lib import comment_tree, cache
    from r2.models import Account
    from pylons import app_globals as g

    accounts = set()

    def batch_fn(items):
        g.reset_caches()
        return items

    if update_messages or update_trees:
        q = Message._query(Message.c.new == True, sort=desc("_date"), data=True)
        for m in fetch_things2(q, batch_fn=batch_fn):
            print m, m._date
            if update_messages:
                accounts = accounts | queries.set_unread(m, m.new)
            else:
                accounts.add(m.to_id)
    if update_comments:
        q = Comment._query(Comment.c.new == True, sort=desc("_date"))
        q._filter(Comment.c._id < 26152162676)

        for m in fetch_things2(q, batch_fn=batch_fn):
            print m, m._date
            queries.set_unread(m, True)

    print "Precomputing comment trees for %d accounts" % len(accounts)

    for i, a in enumerate(accounts):
        if not isinstance(a, Account):
            a = Account._byID(a)
        print i, a
        comment_tree.user_messages(a)
开发者ID:Shilohtd,项目名称:reddit,代码行数:35,代码来源:migrate.py

示例2: gen_keys

    def gen_keys():
        yield promoted_memo_key

        # just let this one do its own writing
        load_all_reddits()

        yield queries.get_all_comments().iden

        l_q = Link._query(Link.c._spam == (True, False),
                          Link.c._deleted == (True, False),
                          sort=desc('_date'),
                          data=True,
                          )
        for link in fetch_things2(l_q, verbosity):
            yield comments_key(link._id)
            yield last_modified_key(link, 'comments')

        a_q = Account._query(Account.c._spam == (True, False),
                             sort=desc('_date'),
                             )
        for account in fetch_things2(a_q, verbosity):
            yield messages_key(account._id)
            yield last_modified_key(account, 'overview')
            yield last_modified_key(account, 'commented')
            yield last_modified_key(account, 'submitted')
            yield last_modified_key(account, 'liked')
            yield last_modified_key(account, 'disliked')
            yield queries.get_comments(account, 'new', 'all').iden
            yield queries.get_submitted(account, 'new', 'all').iden
            yield queries.get_liked(account).iden
            yield queries.get_disliked(account).iden
            yield queries.get_hidden(account).iden
            yield queries.get_saved(account).iden
            yield queries.get_inbox_messages(account).iden
            yield queries.get_unread_messages(account).iden
            yield queries.get_inbox_comments(account).iden
            yield queries.get_unread_comments(account).iden
            yield queries.get_inbox_selfreply(account).iden
            yield queries.get_unread_selfreply(account).iden
            yield queries.get_sent(account).iden

        sr_q = Subreddit._query(Subreddit.c._spam == (True, False),
                                sort=desc('_date'),
                                )
        for sr in fetch_things2(sr_q, verbosity):
            yield last_modified_key(sr, 'stylesheet_contents')
            yield queries.get_links(sr, 'hot', 'all').iden
            yield queries.get_links(sr, 'new', 'all').iden

            for sort in 'top', 'controversial':
                for time in 'hour', 'day', 'week', 'month', 'year', 'all':
                    yield queries.get_links(sr, sort, time,
                                            merge_batched=False).iden
            yield queries.get_spam_links(sr).iden
            yield queries.get_spam_comments(sr).iden
            yield queries.get_reported_links(sr).iden
            yield queries.get_reported_comments(sr).iden
            yield queries.get_subreddit_messages(sr).iden
            yield queries.get_unread_subreddit_messages(sr).iden
开发者ID:MatsT,项目名称:reddit,代码行数:59,代码来源:migrate.py

示例3: gen_keys

    def gen_keys():
        yield promoted_memo_key

        # just let this one do its own writing
        load_all_reddits()

        yield queries.get_all_comments().iden

        l_q = Link._query(
            Link.c._spam == (True, False), Link.c._deleted == (True, False), sort=desc("_date"), data=True
        )
        for link in fetch_things2(l_q, verbosity):
            yield comments_key(link._id)
            yield last_modified_key(link, "comments")

        a_q = Account._query(Account.c._spam == (True, False), sort=desc("_date"))
        for account in fetch_things2(a_q, verbosity):
            yield messages_key(account._id)
            yield last_modified_key(account, "overview")
            yield last_modified_key(account, "commented")
            yield last_modified_key(account, "submitted")
            yield last_modified_key(account, "liked")
            yield last_modified_key(account, "disliked")
            yield queries.get_comments(account, "new", "all").iden
            yield queries.get_submitted(account, "new", "all").iden
            yield queries.get_liked(account).iden
            yield queries.get_disliked(account).iden
            yield queries.get_hidden(account).iden
            yield queries.get_saved(account).iden
            yield queries.get_inbox_messages(account).iden
            yield queries.get_unread_messages(account).iden
            yield queries.get_inbox_comments(account).iden
            yield queries.get_unread_comments(account).iden
            yield queries.get_inbox_selfreply(account).iden
            yield queries.get_unread_selfreply(account).iden
            yield queries.get_sent(account).iden

        sr_q = Subreddit._query(Subreddit.c._spam == (True, False), sort=desc("_date"))
        for sr in fetch_things2(sr_q, verbosity):
            yield last_modified_key(sr, "stylesheet_contents")
            yield queries.get_links(sr, "hot", "all").iden
            yield queries.get_links(sr, "new", "all").iden

            for sort in "top", "controversial":
                for time in "hour", "day", "week", "month", "year", "all":
                    yield queries.get_links(sr, sort, time, merge_batched=False).iden
            yield queries.get_spam_links(sr).iden
            yield queries.get_spam_comments(sr).iden
            yield queries.get_reported_links(sr).iden
            yield queries.get_reported_comments(sr).iden
            yield queries.get_subreddit_messages(sr).iden
            yield queries.get_unread_subreddit_messages(sr).iden
开发者ID:Shilohtd,项目名称:reddit,代码行数:52,代码来源:migrate.py

示例4: reset_last_email_sent_at_for_all_accounts

def reset_last_email_sent_at_for_all_accounts():
    start_of_epoc = pytz.utc.localize(datetime.datetime.utcfromtimestamp(0))

    accounts = fetch_things2(Account._query(Account.c.email != None, sort=asc('_date')))
    for account in accounts:
        account.last_email_sent_at = start_of_epoc
        account._commit()
开发者ID:caseypatrickdriscoll,项目名称:reddit,代码行数:7,代码来源:summary_email.py

示例5: convert_old_media_objects

def convert_old_media_objects():
    q = Link._query(Link.c.media_object is not None,
                    Link.c._date > whenever,
                    data = True)
    for link in utils.fetch_things2(q):
        if not getattr(link, 'media_object', None):
            continue

        if 'youtube' in link.media_object:
            # we can rewrite this one without scraping
            video_id = YoutubeScraper.video_id_rx.match(link.url)
            link.media_object = dict(type='youtube.com',
                                     video_id = video_id.group(1))
        elif ('video.google.com' in link.media_object
              or 'metacafe' in link.media_object):
            scraper = make_scraper(link.url)
            if not scraper:
                continue
            mo = scraper.media_object()
            if not mo:
                continue

            link.media_object = mo

        else:
            print "skipping %s because it confuses me" % link._fullname
            continue

        link._commit()
开发者ID:kevinrose,项目名称:diggit,代码行数:29,代码来源:scraper.py

示例6: port_cassavotes

def port_cassavotes():
    from r2.models import Vote, Account, Link, Comment
    from r2.models.vote import CassandraVote, CassandraLinkVote, CassandraCommentVote
    from r2.lib.db.tdb_cassandra import CL
    from r2.lib.utils import fetch_things2, to36, progress

    ts = [(Vote.rel(Account, Link), CassandraLinkVote),
          (Vote.rel(Account, Comment), CassandraCommentVote)]

    dataattrs = set(['valid_user', 'valid_thing', 'ip', 'organic'])

    for prel, crel in ts:
        vq = prel._query(sort=desc('_date'),
                         data=True,
                         eager_load=False)
        vq = fetch_things2(vq)
        vq = progress(vq, persec=True)
        for v in vq:
            t1 = to36(v._thing1_id)
            t2 = to36(v._thing2_id)
            cv = crel(thing1_id = t1,
                      thing2_id = t2,
                      date=v._date,
                      name=v._name)
            for dkey, dval in v._t.iteritems():
                if dkey in dataattrs:
                    setattr(cv, dkey, dval)

            cv._commit(write_consistency_level=CL.ONE)
开发者ID:MatsT,项目名称:reddit,代码行数:29,代码来源:migrate.py

示例7: rebuild_link_index

def rebuild_link_index(start_at=None, sleeptime=1, cls=Link,
                       uploader=LinkUploader, doc_api='CLOUDSEARCH_DOC_API',
                       estimate=50000000, chunk_size=1000):
    doc_api = getattr(g, doc_api)
    uploader = uploader(doc_api)

    q = cls._query(cls.c._deleted == (True, False), sort=desc('_date'))

    if start_at:
        after = cls._by_fullname(start_at)
        assert isinstance(after, cls)
        q._after(after)

    q = r2utils.fetch_things2(q, chunk_size=chunk_size)
    q = r2utils.progress(q, verbosity=1000, estimate=estimate, persec=True,
                         key=_progress_key)
    for chunk in r2utils.in_chunks(q, size=chunk_size):
        uploader.things = chunk
        for x in range(5):
            try:
                uploader.inject()
            except httplib.HTTPException as err:
                print "Got %s, sleeping %s secs" % (err, x)
                time.sleep(x)
                continue
            else:
                break
        else:
            raise err
        last_update = chunk[-1]
        print "last updated %s" % last_update._fullname
        time.sleep(sleeptime)
开发者ID:KeyserSosa,项目名称:reddit,代码行数:32,代码来源:cloudsearch.py

示例8: rebuild_index

def rebuild_index(start_at=None, sleeptime=1, cls=Link, estimate=50000000,
                  chunk_size=1000):
    if start_at is _REBUILD_INDEX_CACHE_KEY:
        start_at = g.cache.get(start_at)
        if not start_at:
            raise ValueError("Told me to use '%s' key, but it's not set" %
                             _REBUILD_INDEX_CACHE_KEY)
    
    q = cls._query(cls.c._deleted == (True, False),
                   sort=desc('_date'), data=True)
    if start_at:
        after = cls._by_fullname(start_at)
        assert isinstance(after, cls)
        q._after(after)
    q = r2utils.fetch_things2(q, chunk_size=chunk_size)
    q = r2utils.progress(q, verbosity=1000, estimate=estimate, persec=True,
                         key=_progress_key)
    for chunk in r2utils.in_chunks(q, size=chunk_size):
        for x in range(5):
            try:
                inject(chunk)
            except httplib.HTTPException as err:
                print "Got  %s, sleeping %s secs" % (err, x)
                time.sleep(x)
                continue
            else:
                break
        else:
            raise err
        last_update = chunk[-1]
        g.cache.set(_REBUILD_INDEX_CACHE_KEY, last_update._fullname)
        time.sleep(sleeptime)
开发者ID:ProfNandaa,项目名称:reddit,代码行数:32,代码来源:cloudsearch.py

示例9: add_all_srs

def add_all_srs():
    """Adds every listing query for every subreddit to the queue."""
    q = Subreddit._query(sort = asc('_date'))
    for sr in fetch_things2(q):
        add_queries(all_queries(get_links, sr, ('hot', 'new', 'old'), ['all']))
        add_queries(all_queries(get_links, sr, ('top', 'controversial'), db_times.keys()))
        add_queries([get_links(sr, 'toplinks', 'all')])
开发者ID:Craigus,项目名称:lesswrong,代码行数:7,代码来源:queries.py

示例10: test_send_summary_emails

def test_send_summary_emails():
    accounts = fetch_things2(Account._query(Account.c.email != None, sort=asc('_date')))
    for account in accounts:
        a_day_ago = datetime.datetime.now(pytz.utc) - datetime.timedelta(hours=24)
        account.last_email_sent_at = a_day_ago
        account._commit()
        send_account_summary_email(account._id, verbose=True)
开发者ID:caseypatrickdriscoll,项目名称:reddit,代码行数:7,代码来源:summary_email.py

示例11: backfill

def backfill(after=None):
    q = Subreddit._query(sort=asc('_date'))
    if after:
        sr = Subreddit._by_name(after)
        q = q._after(sr)

    for sr in fetch_things2(q):
        backfill_sr(sr)
开发者ID:0xcd03,项目名称:reddit,代码行数:8,代码来源:modaction_by_srandmod.py

示例12: get_participated

def get_participated():
    users = {}

    q = Account._query(Account.c.f2p != "", sort=asc("_date"), data=True)
    for user in progress(fetch_things2(q)):
        users[user._fullname] = user.f2p

    return users
开发者ID:13steinj,项目名称:reddit-plugin-f2p,代码行数:8,代码来源:trophies.py

示例13: by_url_cache

def by_url_cache():
    q = Link._query(Link.c._spam == (True,False),
                    data = True,
                    sort = desc('_date'))
    for i, link in enumerate(fetch_things2(q)):
        if i % 100 == 0:
            print "%s..." % i
        link.set_url_cache()
开发者ID:Acceto,项目名称:reddit,代码行数:8,代码来源:populatedb.py

示例14: send_account_summary_email

def send_account_summary_email(account_thing_id, verbose=False, send_email=send_email):
    account = Account._byID(account_thing_id, data=True)
    if not should_send_activity_summary_email(account):
        return

    # if we've never sent an email, only tell about the last 24 hours
    a_day_ago = datetime.datetime.now(pytz.utc) - datetime.timedelta(hours=24)
    if getattr(account, 'last_email_sent_at', None) is None:
        account.last_email_sent_at = a_day_ago

    c.content_langs = 'en-US'

    # Find all the "active" links for this user.  Frontpage uses the c.user global
    # to find the right subreddits for the current user
    c.user = account
    c.user_is_loggedin = True
    thing_ids = []
    for link in Frontpage.get_links('active', 'all'):
        thing_ids.append(link)
    active_links_hash = Link._by_fullname(thing_ids, data=True)

    active_links = [active_links_hash[t_id] for t_id in thing_ids if active_links_hash[t_id]._active > account.last_email_sent_at]
    idx = 0
    for ll in active_links:
        idx += 1
        ll.num = idx 

    # Find all new spaces created since we last sent the user an email
    new_spaces = list(fetch_things2(Subreddit._query(
        Subreddit.c._date > account.last_email_sent_at,
        sort=asc('_date'))))

    # don't bother sending email if there's noting to report.
    if len(new_spaces) == 0 and len(active_links) == 0:
        return

    # Get the date and time
    now = datetime.datetime.now(pytz.timezone('US/Eastern'))
    date_string = now.strftime("%A %B %d, %Y")
    time_string = now.strftime("%I:%M %p")

    # Render the template
    html_email_template = g.mako_lookup.get_template('summary_email.html')
    html_body = html_email_template.render(
        last_email_sent_at=account.last_email_sent_at,
        new_spaces=new_spaces, 
        active_links=active_links,
        date_string=date_string,
        time_string=time_string)

    # with open('out.html', 'w') as ff:
    #     ff.write(html_body)
    if verbose:
        print "sending email to %s" % (account.email,)
    send_email(account.email, html_body, date_string)

    account.last_email_sent_at = datetime.datetime.now(pytz.utc)
    account._commit()
开发者ID:caseypatrickdriscoll,项目名称:reddit,代码行数:58,代码来源:summary_email.py

示例15: backfill_campaign_targets

def backfill_campaign_targets():
    from r2.lib.db.operators import desc
    from r2.lib.utils import fetch_things2

    q = PromoCampaign._query(sort=desc("_date"), data=True)
    for campaign in fetch_things2(q):
        sr_name = campaign.sr_name or Frontpage.name
        campaign.target = Target(sr_name)
        campaign._commit()
开发者ID:rgandsam,项目名称:reddit,代码行数:9,代码来源:promo.py


注:本文中的r2.lib.utils.fetch_things2函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。