本文整理汇总了Python中celery.task.Task.get_logger方法的典型用法代码示例。如果您正苦于以下问题:Python Task.get_logger方法的具体用法?Python Task.get_logger怎么用?Python Task.get_logger使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类celery.task.Task
的用法示例。
在下文中一共展示了Task.get_logger方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: retrieve_mbs_result
# 需要导入模块: from celery.task import Task [as 别名]
# 或者: from celery.task.Task import get_logger [as 别名]
def retrieve_mbs_result(target_task_id):
logger = Task.get_logger()
r = AsyncResult(target_task_id)
sr = SimulationResult.objects.get(task_id__exact=target_task_id)
# sr = SimulationResult.objects.get(sim_id__exact=r['sim_id'])
logger.info(r)
while not r.ready():
time.sleep(0.1)
result = json.loads(r.result)
if result['exit_code'] == 0:
## success
sr.sim_id = result['sim_id']
## these are rewrite if you add log collections
sr.collections = json.dumps([
"%s_nwk" % sr.sim_id,
"%s_node" % sr.sim_id,
"%s_msg" % sr.sim_id,
"%s_usr" % sr.sim_id,
"%s_map" % sr.sim_id,
])
sr.task_progress = 100
sr.task_status = "SUCCESS"
sr.save()
else:
sr.sim_id = "NO SIM_ID (FAILED)"
sr.task_status = "FAILED"
sr.task_progress = 0
sr.save()
示例2: execute_transform
# 需要导入模块: from celery.task import Task [as 别名]
# 或者: from celery.task.Task import get_logger [as 别名]
def execute_transform(spill, client_id = "Unknown"):
"""
MAUS Celery transform task used by sub-processes to execute jobs
from clients. Proxies of this task are invoked by clients.This
applies the current transform to the spill and returns the new
spill.
@param spill JSON document string holding spill.
@param client_id ID of client who submitted job.
@return JSON document string holding new spill.
@throws Exception if there is a problem when process is called.
"""
logger = Task.get_logger()
if logger.isEnabledFor(logging.INFO):
logger.info("Task invoked by %s" % client_id)
try:
spill_json = json.loads(spill)
if "maus_event_type" in spill_json.keys() and \
spill_json["maus_event_type"] != "Spill":
return spill
else:
return maus_cpp.converter.string_repr(MausTransform.process(spill))
except Exception as exc: # pylint:disable = W0703
# Filter exceptions so no unPicklable exception causes
# problems.
status = {}
status["error"] = str(exc.__class__)
status["message"] = str(exc)
raise WorkerProcessException(MausConfiguration.transform,
status)
示例3: crawl
# 需要导入模块: from celery.task import Task [as 别名]
# 或者: from celery.task.Task import get_logger [as 别名]
def crawl(id=None, screen_name=None, cursor=-1, crawl_mode=False, routing_key=DEFAULT_ROUTING_KEY):
logger = Task.get_logger()
logger.info('[crawl] starting crawl(id=%s, screen_name=%s, cursor=%s)' % (id, screen_name, cursor))
twitter = get_twitter()
params = {
"cursor": cursor
}
if id:
params['user_id'] = id
elif screen_name:
params['screen_name'] = screen_name
result = twitter.followers.ids(**params)
# block while we grab the current user's info
del params['cursor']
source_id = twitter.users.show(**params)['id']
source = sync_user(source_id)
for id in result['ids']:
sync_user.apply_async(args=[id,], kwargs={'source':source, 'crawl_mode':crawl_mode}, routing_key=routing_key)
if result['next_cursor']:
logger.info("[crawl] continuing at next_cursor=%s" % result['next_cursor'])
crawl.apply_async(
args=[twitter_id,],
kwargs={
'cursor':result['next_cursor'],
'crawl_mode':crawl_mode,
'routing_key':routing_key,
},
routing_key=routing_key
)
示例4: sync_user
# 需要导入模块: from celery.task import Task [as 别名]
# 或者: from celery.task.Task import get_logger [as 别名]
def sync_user(twitter_id, source=None, crawl_mode=False):
logger = Task.get_logger()
logger.info("[sync_user] starting sync_user(%s)" % twitter_id)
try:
user = TwitterUser.objects.get(id__exact=str(twitter_id))
except TwitterUser.DoesNotExist:
user = TwitterUser(id=twitter_id)
user.update_from_twitter()
user.save()
if source:
user.follow(source)
if crawl_mode and user.screen_name:
crawl.delay(screen_name=user.screen_name)
else:
logger.info("[sync_user] got a user with an empty name")
return user
示例5: update_flickrify_cache
# 需要导入模块: from celery.task import Task [as 别名]
# 或者: from celery.task.Task import get_logger [as 别名]
def update_flickrify_cache(uuid):
logger = Task.get_logger()
logger.info("[update_flickrify_cache] starting work on %s" % uuid)
do_flickrify(uuid, force_refresh=True)
示例6: mbs_exec
# 需要导入模块: from celery.task import Task [as 别名]
# 或者: from celery.task.Task import get_logger [as 别名]
def mbs_exec(conf):
logger = Task.get_logger()
return commands.getstatusoutput(cmd)
示例7: exec_d2xp_mbs
# 需要导入模块: from celery.task import Task [as 别名]
# 或者: from celery.task.Task import get_logger [as 别名]
def exec_d2xp_mbs(conf, scale, num_area):
logger = Task.get_logger()
conf_pst_fix = datetime.datetime.today().strftime("%Y%m%d_%H%M%S")
fo = open("%s/message_simulator/config_%s.yml" % (os.environ['HOME'], conf_pst_fix,) , "w")
fo.write(yaml.dump(conf))
fo.close()
## routing configuration
rt_conf_file = "conf/routing_%d_area%d.csv" % (scale, num_area)
## node_spec
nd_spec_file = "conf/node_spec_%d.yml" % scale
## network definition
nw_def_file = "conf/network_%d.yml" % scale
## area definitiion
area_def_file = "conf/area_info_%d_area%d.csv" % (scale, num_area)
cdir = "%s/message_simulator" % os.environ['HOME']
cmd = "python d2xp_sim_system.py config_%s.yml %s %s %s %s" % (conf_pst_fix,
rt_conf_file,
nd_spec_file,
nw_def_file,
area_def_file)
p = subprocess.Popen(cmd, cwd=cdir, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
ext_code = p.wait()
result = {}
result['exit_code'] = ext_code
result['stdout'] = p.stdout.readlines()
# result['stdout'] = r"%s" % p.stdout
result['stderr'] = p.stderr.readlines()
# result['stderr'] = r"%s" % p.stderr
logger.info(json.dumps(result, sort_keys=True, indent=2))
## very poor implementation because these worker tasks
## are seperated from the simulation program "mbs".
## Simulation ID will be acquired from the log string.
sim_id = ""
if ext_code == 0:
# mbs is successfully completed.
for line in result['stdout']:
items = line.split(' ')
if items[0] == "Simulation":
sim_id = items[1]
if sim_id == "":
## simulation was failed
sim_id = "may_be_failed_%s" % datetime.datetime.today().strftime("%Y%m%d%H%M%S")
result['sim_id'] = sim_id
task_id = exec_d2xp_mbs.request.id
## create and issue a task for retrieving the simulation result.
## this task will be got by main worker on GUI with MySQL Server
r = retrieve_mbs_result.apply_async(args=[task_id], queue='MAIN')
## store the simulation result. the result will be stored in broker (RabbitMQ)
return json.dumps(result)
示例8: add
# 需要导入模块: from celery.task import Task [as 别名]
# 或者: from celery.task.Task import get_logger [as 别名]
def add(x, y):
logger = Task.get_logger(task_name=u'decorator')
logger.info("Adding %s + %s" % (x, y))
return x + y
示例9: exec_mbs
# 需要导入模块: from celery.task import Task [as 别名]
# 或者: from celery.task.Task import get_logger [as 别名]
def exec_mbs():
logger = Task.get_logger()
os.chdir("~/message_simulator")
return os.environ['HOME']