当前位置: 首页>>代码示例>>Python>>正文


Python utils.progress函数代码示例

本文整理汇总了Python中r2.lib.utils.progress函数的典型用法代码示例。如果您正苦于以下问题:Python progress函数的具体用法?Python progress怎么用?Python progress使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了progress函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: write_permacache_from_dir

def write_permacache_from_dir(dirname):
    # we want the whole list so that we can display accurate progress
    # information. If we're operating on more than tens of millions of
    # files, we should either bail out or tweak this to not need the
    # whole list at once
    allfiles = []
    for root, dirs, files in os.walk(dirname):
        for f in files:
            allfiles.append(os.path.join(root, f))

    for fname in progress(allfiles, persec=True):
        try:
            write_permacache_from_file(fname)
            os.unlink(fname)
        except:
            mr_tools.status("failed on %r" % fname)
            raise

    mr_tools.status("Removing empty directories")
    for root, dirs, files in os.walk(dirname, topdown=False):
        for d in dirs:
            dname = os.path.join(root, d)
            try:
                os.rmdir(dname)
            except OSError as e:
                if e.errno == errno.ENOTEMPTY:
                    mr_tools.status("%s not empty" % (dname,))
                else:
                    raise
开发者ID:new-day-international,项目名称:reddit,代码行数:29,代码来源:mr_permacache.py

示例2: rebuild_index

def rebuild_index(start_at=None, sleeptime=1, cls=Link, estimate=50000000,
                  chunk_size=1000):
    if start_at is _REBUILD_INDEX_CACHE_KEY:
        start_at = g.cache.get(start_at)
        if not start_at:
            raise ValueError("Told me to use '%s' key, but it's not set" %
                             _REBUILD_INDEX_CACHE_KEY)
    
    q = cls._query(cls.c._deleted == (True, False),
                   sort=desc('_date'), data=True)
    if start_at:
        after = cls._by_fullname(start_at)
        assert isinstance(after, cls)
        q._after(after)
    q = r2utils.fetch_things2(q, chunk_size=chunk_size)
    q = r2utils.progress(q, verbosity=1000, estimate=estimate, persec=True,
                         key=_progress_key)
    for chunk in r2utils.in_chunks(q, size=chunk_size):
        for x in range(5):
            try:
                inject(chunk)
            except httplib.HTTPException as err:
                print "Got  %s, sleeping %s secs" % (err, x)
                time.sleep(x)
                continue
            else:
                break
        else:
            raise err
        last_update = chunk[-1]
        g.cache.set(_REBUILD_INDEX_CACHE_KEY, last_update._fullname)
        time.sleep(sleeptime)
开发者ID:ProfNandaa,项目名称:reddit,代码行数:32,代码来源:cloudsearch.py

示例3: rebuild_link_index

def rebuild_link_index(start_at=None, sleeptime=1, cls=Link,
                       uploader=LinkUploader, doc_api='CLOUDSEARCH_DOC_API',
                       estimate=50000000, chunk_size=1000):
    doc_api = getattr(g, doc_api)
    uploader = uploader(doc_api)

    q = cls._query(cls.c._deleted == (True, False), sort=desc('_date'))

    if start_at:
        after = cls._by_fullname(start_at)
        assert isinstance(after, cls)
        q._after(after)

    q = r2utils.fetch_things2(q, chunk_size=chunk_size)
    q = r2utils.progress(q, verbosity=1000, estimate=estimate, persec=True,
                         key=_progress_key)
    for chunk in r2utils.in_chunks(q, size=chunk_size):
        uploader.things = chunk
        for x in range(5):
            try:
                uploader.inject()
            except httplib.HTTPException as err:
                print "Got %s, sleeping %s secs" % (err, x)
                time.sleep(x)
                continue
            else:
                break
        else:
            raise err
        last_update = chunk[-1]
        print "last updated %s" % last_update._fullname
        time.sleep(sleeptime)
开发者ID:KeyserSosa,项目名称:reddit,代码行数:32,代码来源:cloudsearch.py

示例4: port_cassavotes

def port_cassavotes():
    from r2.models import Vote, Account, Link, Comment
    from r2.models.vote import CassandraVote, CassandraLinkVote, CassandraCommentVote
    from r2.lib.db.tdb_cassandra import CL
    from r2.lib.utils import fetch_things2, to36, progress

    ts = [(Vote.rel(Account, Link), CassandraLinkVote),
          (Vote.rel(Account, Comment), CassandraCommentVote)]

    dataattrs = set(['valid_user', 'valid_thing', 'ip', 'organic'])

    for prel, crel in ts:
        vq = prel._query(sort=desc('_date'),
                         data=True,
                         eager_load=False)
        vq = fetch_things2(vq)
        vq = progress(vq, persec=True)
        for v in vq:
            t1 = to36(v._thing1_id)
            t2 = to36(v._thing2_id)
            cv = crel(thing1_id = t1,
                      thing2_id = t2,
                      date=v._date,
                      name=v._name)
            for dkey, dval in v._t.iteritems():
                if dkey in dataattrs:
                    setattr(cv, dkey, dval)

            cv._commit(write_consistency_level=CL.ONE)
开发者ID:MatsT,项目名称:reddit,代码行数:29,代码来源:migrate.py

示例5: get_participated

def get_participated():
    users = {}

    q = Account._query(Account.c.f2p != "", sort=asc("_date"), data=True)
    for user in progress(fetch_things2(q)):
        users[user._fullname] = user.f2p

    return users
开发者ID:13steinj,项目名称:reddit-plugin-f2p,代码行数:8,代码来源:trophies.py

示例6: write_permacache_from_dir

def write_permacache_from_dir(dirname):
    for fname in progress(os.listdir(dirname), persec=True):
        try:
            fpath = os.path.join(dirname, fname)
            write_permacache_from_file(fpath)
            os.unlink(fpath)
        except:
            mr_tools.status('failed on %r' % fname)
            raise
开发者ID:JediWatchman,项目名称:reddit,代码行数:9,代码来源:mr_permacache.py

示例7: port_cassahides

def port_cassahides():
    from r2.models import SaveHide, CassandraHide
    from r2.lib.db.tdb_cassandra import CL
    from r2.lib.db.operators import desc
    from r2.lib.utils import fetch_things2, timeago, progress

    q = SaveHide._query(SaveHide.c._date > timeago("1 week"), SaveHide.c._name == "hide", sort=desc("_date"))
    q = fetch_things2(q)
    q = progress(q, estimate=1953374)

    for sh in q:
        CassandraHide._hide(sh._thing1, sh._thing2, write_consistency_level=CL.ONE)
开发者ID:ketralnis,项目名称:reddit,代码行数:12,代码来源:migrate.py

示例8: rebuild_index

def rebuild_index(after_id = None):
    cls = Link

    # don't pull spam/deleted
    q = cls._query(sort=desc('_date'), data=True)

    if after_id:
        q._after(cls._byID(after_id))

    q = fetch_things2(q)

    q = progress(q, verbosity=1000, estimate=10000000, persec=True)
    for chunk in in_chunks(q):
        inject(chunk)
开发者ID:codetripping,项目名称:reddit,代码行数:14,代码来源:indextankupdate.py

示例9: port_cassasaves

def port_cassasaves(after_id=None, estimate=12489897):
    from r2.models import SaveHide, CassandraSave
    from r2.lib.db.operators import desc
    from r2.lib.db.tdb_cassandra import CL
    from r2.lib.utils import fetch_things2, to36, progress

    q = SaveHide._query(SaveHide.c._name == "save", sort=desc("_date"), data=False, eager_load=False)

    if after_id is not None:
        q._after(SaveHide._byID(after_id))

    for sh in progress(fetch_things2(q), estimate=estimate):

        csh = CassandraSave(thing1_id=to36(sh._thing1_id), thing2_id=to36(sh._thing2_id), date=sh._date)
        csh._commit(write_consistency_level=CL.ONE)
开发者ID:ketralnis,项目名称:reddit,代码行数:15,代码来源:migrate.py

示例10: give_trophies

def give_trophies(users):
    for fullnames in in_chunks(progress(users, verbosity=50), size=50):
        users = Account._by_fullname(fullnames, return_dict=False)

        for user in users:
            team = get_user_team(user)

            trophy = Award.give_if_needed(
                codename="f2p_orangered" if team == "red" else "f2p_periwinkle",
                user=user,
            )
            if trophy:
                trophy._commit()

        sleep(.5)
开发者ID:13steinj,项目名称:reddit-plugin-f2p,代码行数:15,代码来源:trophies.py

示例11: port_deleted_links

def port_deleted_links(after_id=None):
    from r2.models import Link
    from r2.lib.db.operators import desc
    from r2.models.query_cache import CachedQueryMutator
    from r2.lib.db.queries import get_deleted_links
    from r2.lib.utils import fetch_things2, in_chunks, progress

    q = Link._query(Link.c._deleted == True, Link.c._spam == (True, False), sort=desc("_date"), data=True)
    q = fetch_things2(q, chunk_size=500)
    q = progress(q, verbosity=1000)

    for chunk in in_chunks(q):
        with CachedQueryMutator() as m:
            for link in chunk:
                query = get_deleted_links(link.author_id)
                m.insert(query, [link])
开发者ID:Shilohtd,项目名称:reddit,代码行数:16,代码来源:migrate.py

示例12: backfill_deleted_accounts

def backfill_deleted_accounts(resume_id=None):
    del_accts = Account._query(Account.c._deleted == True, sort=desc('_date'))
    if resume_id:
        del_accts._filter(Account.c._id < resume_id)

    for i, account in enumerate(progress(fetch_things2(del_accts))):
        # Don't kill the rabbit! Wait for the relevant queues to calm down.
        if i % 1000 == 0:
            del_len = get_queue_length('del_account_q')
            cs_len = get_queue_length('cloudsearch_changes')
            while (del_len > 1000 or
                    cs_len > 10000):
                sys.stderr.write(("CS: %d, DEL: %d" % (cs_len, del_len)) + "\n")
                sys.stderr.flush()
                time.sleep(1)
                del_len = get_queue_length('del_account_q')
                cs_len = get_queue_length('cloudsearch_changes')
        amqp.add_item('account_deleted', account._fullname)
开发者ID:pra85,项目名称:reddit,代码行数:18,代码来源:scrub_deleted_users.py

示例13: rebuild_index

def rebuild_index(after_id = None, estimate=10000000):
    cls = Link

    # don't pull spam/deleted
    q = cls._query(sort=desc('_date'), data=True)

    if after_id:
        q._after(cls._byID(after_id))

    q = fetch_things2(q)

    def key(link):
        # we're going back in time, so this will give us a good idea
        # of how far we've gone
        return "%s/%s" % (link._id, link._date)

    q = progress(q, verbosity=1000, estimate=estimate, persec=True, key=key)
    for chunk in in_chunks(q):
        inject(chunk)
开发者ID:JediWatchman,项目名称:reddit,代码行数:19,代码来源:indextank.py

示例14: _populate

def _populate(after_id=None, estimate=54301242):
    from r2.models import desc
    from r2.lib.db import tdb_cassandra
    from r2.lib import utils

    # larger has a chance to decrease the number of Cassandra writes,
    # but the probability is low
    chunk_size = 5000

    q = Comment._query(Comment.c._spam == (True, False), Comment.c._deleted == (True, False), sort=desc("_date"))

    if after_id is not None:
        q._after(Comment._byID(after_id))

    q = utils.fetch_things2(q, chunk_size=chunk_size)
    q = utils.progress(q, verbosity=chunk_size, estimate=estimate)

    for chunk in utils.in_chunks(q, chunk_size):
        chunk = filter(lambda x: hasattr(x, "link_id"), chunk)
        update_comment_votes(chunk)
开发者ID:druska,项目名称:reddit,代码行数:20,代码来源:comment_tree.py

示例15: rebuild_link_index

def rebuild_link_index(
    start_at=None,
    sleeptime=1,
    cls=Link,
    uploader=LinkUploader,
    doc_api="CLOUDSEARCH_DOC_API",
    estimate=50000000,
    chunk_size=1000,
):
    cache_key = _REBUILD_INDEX_CACHE_KEY % uploader.__name__.lower()
    doc_api = getattr(g, doc_api)
    uploader = uploader(doc_api)

    if start_at is _REBUILD_INDEX_CACHE_KEY:
        start_at = g.cache.get(cache_key)
        if not start_at:
            raise ValueError("Told me to use '%s' key, but it's not set" % cache_key)

    q = cls._query(cls.c._deleted == (True, False), sort=desc("_date"), data=True)
    if start_at:
        after = cls._by_fullname(start_at)
        assert isinstance(after, cls)
        q._after(after)
    q = r2utils.fetch_things2(q, chunk_size=chunk_size)
    q = r2utils.progress(q, verbosity=1000, estimate=estimate, persec=True, key=_progress_key)
    for chunk in r2utils.in_chunks(q, size=chunk_size):
        uploader.things = chunk
        for x in range(5):
            try:
                uploader.inject()
            except httplib.HTTPException as err:
                print "Got %s, sleeping %s secs" % (err, x)
                time.sleep(x)
                continue
            else:
                break
        else:
            raise err
        last_update = chunk[-1]
        g.cache.set(cache_key, last_update._fullname)
        time.sleep(sleeptime)
开发者ID:jzplusplus,项目名称:reddit,代码行数:41,代码来源:cloudsearch.py


注:本文中的r2.lib.utils.progress函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。