當前位置: 首頁>>代碼示例>>Python>>正文


Python Task.get_logger方法代碼示例

本文整理匯總了Python中celery.task.Task.get_logger方法的典型用法代碼示例。如果您正苦於以下問題:Python Task.get_logger方法的具體用法?Python Task.get_logger怎麽用?Python Task.get_logger使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在celery.task.Task的用法示例。


在下文中一共展示了Task.get_logger方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: retrieve_mbs_result

# 需要導入模塊: from celery.task import Task [as 別名]
# 或者: from celery.task.Task import get_logger [as 別名]
def retrieve_mbs_result(target_task_id):
    logger = Task.get_logger()
    r = AsyncResult(target_task_id)
    sr = SimulationResult.objects.get(task_id__exact=target_task_id)
#    sr = SimulationResult.objects.get(sim_id__exact=r['sim_id'])
    logger.info(r)

    while not r.ready():
        time.sleep(0.1)

    result = json.loads(r.result)
    
    if result['exit_code'] == 0:
        ## success
        sr.sim_id = result['sim_id']
        
        ## these are rewrite if you add log collections
        sr.collections = json.dumps([
                "%s_nwk" % sr.sim_id,
                "%s_node" % sr.sim_id,
                "%s_msg" % sr.sim_id,
                "%s_usr" % sr.sim_id,
                "%s_map" % sr.sim_id,
                ])
        sr.task_progress = 100
        sr.task_status = "SUCCESS"
        sr.save()
    else:
        sr.sim_id = "NO SIM_ID (FAILED)"
        sr.task_status = "FAILED"
        sr.task_progress = 0
        sr.save()
開發者ID:koh1,項目名稱:sim_ds,代碼行數:34,代碼來源:tasks.py

示例2: execute_transform

# 需要導入模塊: from celery.task import Task [as 別名]
# 或者: from celery.task.Task import get_logger [as 別名]
def execute_transform(spill, client_id = "Unknown"):
    """
    MAUS Celery transform task used by sub-processes to execute jobs
    from clients. Proxies of this task are invoked by clients.This
    applies the current transform to the spill and returns the new
    spill.  
    @param spill JSON document string holding spill.
    @param client_id ID of client who submitted job.
    @return JSON document string holding new spill.
    @throws Exception if there is a problem when process is called.
    """
    logger = Task.get_logger()  
    if logger.isEnabledFor(logging.INFO):
        logger.info("Task invoked by %s" % client_id)
    try:
        spill_json = json.loads(spill)
        if "maus_event_type" in spill_json.keys() and \
           spill_json["maus_event_type"] != "Spill":
            return spill
        else:
            return maus_cpp.converter.string_repr(MausTransform.process(spill))
    except Exception as exc: # pylint:disable = W0703
        # Filter exceptions so no unPicklable exception causes
        # problems.
        status = {}
        status["error"] = str(exc.__class__)
        status["message"] = str(exc)
        raise WorkerProcessException(MausConfiguration.transform,
            status)
開發者ID:mice-software,項目名稱:maus,代碼行數:31,代碼來源:tasks.py

示例3: crawl

# 需要導入模塊: from celery.task import Task [as 別名]
# 或者: from celery.task.Task import get_logger [as 別名]
def crawl(id=None, screen_name=None, cursor=-1, crawl_mode=False, routing_key=DEFAULT_ROUTING_KEY):
    logger = Task.get_logger()
    logger.info('[crawl] starting crawl(id=%s, screen_name=%s, cursor=%s)' % (id, screen_name, cursor))
    twitter = get_twitter()
    params = {
        "cursor": cursor
    }
    if id:
        params['user_id'] = id
    elif screen_name:
        params['screen_name'] = screen_name
    result = twitter.followers.ids(**params)

    # block while we grab the current user's info
    del params['cursor']
    source_id = twitter.users.show(**params)['id']
    source = sync_user(source_id)

    for id in result['ids']:
        sync_user.apply_async(args=[id,], kwargs={'source':source, 'crawl_mode':crawl_mode}, routing_key=routing_key)

    if result['next_cursor']:
        logger.info("[crawl] continuing at next_cursor=%s" % result['next_cursor'])
        crawl.apply_async(
            args=[twitter_id,],
            kwargs={
                'cursor':result['next_cursor'], 
                'crawl_mode':crawl_mode, 
                'routing_key':routing_key,
            }, 
            routing_key=routing_key
        )
開發者ID:chrisdickinson,項目名稱:d51.django.apps.twitter,代碼行數:34,代碼來源:tasks.py

示例4: sync_user

# 需要導入模塊: from celery.task import Task [as 別名]
# 或者: from celery.task.Task import get_logger [as 別名]
def sync_user(twitter_id, source=None, crawl_mode=False):
    logger = Task.get_logger()
    logger.info("[sync_user] starting sync_user(%s)" % twitter_id)
    try:
        user = TwitterUser.objects.get(id__exact=str(twitter_id))
    except TwitterUser.DoesNotExist:
        user = TwitterUser(id=twitter_id)
    user.update_from_twitter()
    user.save()
    if source:
        user.follow(source)

    if crawl_mode and user.screen_name:
        crawl.delay(screen_name=user.screen_name)
    else:
        logger.info("[sync_user] got a user with an empty name")
    return user
開發者ID:chrisdickinson,項目名稱:d51.django.apps.twitter,代碼行數:19,代碼來源:tasks.py

示例5: update_flickrify_cache

# 需要導入模塊: from celery.task import Task [as 別名]
# 或者: from celery.task.Task import get_logger [as 別名]
def update_flickrify_cache(uuid):
    logger = Task.get_logger()
    logger.info("[update_flickrify_cache] starting work on %s" % uuid)
    do_flickrify(uuid, force_refresh=True)
開發者ID:tswicegood,項目名稱:activitystream,代碼行數:6,代碼來源:tasks.py

示例6: mbs_exec

# 需要導入模塊: from celery.task import Task [as 別名]
# 或者: from celery.task.Task import get_logger [as 別名]
def mbs_exec(conf):
    logger = Task.get_logger()
    return commands.getstatusoutput(cmd)
開發者ID:koh1,項目名稱:sim_ds,代碼行數:5,代碼來源:tasks.py

示例7: exec_d2xp_mbs

# 需要導入模塊: from celery.task import Task [as 別名]
# 或者: from celery.task.Task import get_logger [as 別名]
def exec_d2xp_mbs(conf, scale, num_area):
    logger = Task.get_logger()

    conf_pst_fix = datetime.datetime.today().strftime("%Y%m%d_%H%M%S")
    fo = open("%s/message_simulator/config_%s.yml" % (os.environ['HOME'], conf_pst_fix,) , "w")
    fo.write(yaml.dump(conf))
    fo.close()

    ## routing configuration
    rt_conf_file = "conf/routing_%d_area%d.csv" % (scale, num_area)

    ## node_spec 
    nd_spec_file = "conf/node_spec_%d.yml" % scale

    ## network definition
    nw_def_file = "conf/network_%d.yml" % scale

    ## area definitiion
    area_def_file = "conf/area_info_%d_area%d.csv" % (scale, num_area)
    
    cdir = "%s/message_simulator" % os.environ['HOME']
    cmd = "python d2xp_sim_system.py config_%s.yml %s %s %s %s" % (conf_pst_fix,
                                                                   rt_conf_file, 
                                                                   nd_spec_file,
                                                                   nw_def_file,
                                                                   area_def_file)

    p = subprocess.Popen(cmd, cwd=cdir, shell=True,
                         stdin=subprocess.PIPE,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)
    
    ext_code = p.wait()
    result = {}
    result['exit_code'] = ext_code
    result['stdout'] = p.stdout.readlines()
#    result['stdout'] = r"%s" % p.stdout
    result['stderr'] = p.stderr.readlines()
#    result['stderr'] = r"%s" % p.stderr
    logger.info(json.dumps(result, sort_keys=True, indent=2))

    ## very poor implementation because these worker tasks 
    ## are seperated from the simulation program "mbs". 
    ## Simulation ID will be acquired from the log string.
    sim_id = ""
    if ext_code == 0:
        # mbs is successfully completed.
        for line in result['stdout']:
            items = line.split(' ')
            if items[0] == "Simulation":
                sim_id = items[1]
        if sim_id == "":
            ## simulation was failed
            sim_id = "may_be_failed_%s" % datetime.datetime.today().strftime("%Y%m%d%H%M%S")
    
    result['sim_id'] = sim_id
    task_id = exec_d2xp_mbs.request.id

    ## create and issue a task for retrieving the simulation result.
    ## this task will be got by main worker on GUI with MySQL Server
    r = retrieve_mbs_result.apply_async(args=[task_id], queue='MAIN')

    ## store the simulation result. the result will be stored in broker (RabbitMQ)
    return json.dumps(result)
開發者ID:koh1,項目名稱:sim_ds,代碼行數:66,代碼來源:tasks.py

示例8: add

# 需要導入模塊: from celery.task import Task [as 別名]
# 或者: from celery.task.Task import get_logger [as 別名]
def add(x, y):
    logger = Task.get_logger(task_name=u'decorator')
    logger.info("Adding %s + %s" % (x, y))
    return x + y
開發者ID:koh1,項目名稱:sim_ds,代碼行數:6,代碼來源:tasks.py

示例9: exec_mbs

# 需要導入模塊: from celery.task import Task [as 別名]
# 或者: from celery.task.Task import get_logger [as 別名]
def exec_mbs():
    logger = Task.get_logger()
    os.chdir("~/message_simulator")
    return os.environ['HOME']
開發者ID:koh1,項目名稱:sim_ds,代碼行數:6,代碼來源:tasks.py


注:本文中的celery.task.Task.get_logger方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。