本文整理汇总了Python中celery.task.Task类的典型用法代码示例。如果您正苦于以下问题:Python Task类的具体用法?Python Task怎么用?Python Task使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Task类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: retrieve_mbs_result
def retrieve_mbs_result(target_task_id):
logger = Task.get_logger()
r = AsyncResult(target_task_id)
sr = SimulationResult.objects.get(task_id__exact=target_task_id)
# sr = SimulationResult.objects.get(sim_id__exact=r['sim_id'])
logger.info(r)
while not r.ready():
time.sleep(0.1)
result = json.loads(r.result)
if result['exit_code'] == 0:
## success
sr.sim_id = result['sim_id']
## these are rewrite if you add log collections
sr.collections = json.dumps([
"%s_nwk" % sr.sim_id,
"%s_node" % sr.sim_id,
"%s_msg" % sr.sim_id,
"%s_usr" % sr.sim_id,
"%s_map" % sr.sim_id,
])
sr.task_progress = 100
sr.task_status = "SUCCESS"
sr.save()
else:
sr.sim_id = "NO SIM_ID (FAILED)"
sr.task_status = "FAILED"
sr.task_progress = 0
sr.save()
示例2: handle
def handle(self, *args, **options):
settings.LOG_TO_STREAM = True
now = datetime.datetime.utcnow()
feeds = Feed.objects.filter(
next_scheduled_update__lte=now,
active=True
).exclude(
active_subscribers=0
).order_by('?')
if options['force']:
feeds = Feed.objects.all().order_by('pk')
print " ---> Tasking %s feeds..." % feeds.count()
publisher = Task.get_publisher()
feed_queue = []
size = 12
for f in feeds:
f.queued_date = datetime.datetime.utcnow()
f.set_next_scheduled_update()
for feed_queue in (feeds[pos:pos + size] for pos in xrange(0, len(feeds), size)):
print feed_queue
feed_ids = [feed.pk for feed in feed_queue]
print feed_ids
UpdateFeeds.apply_async(args=(feed_ids,), queue='update_feeds', publisher=publisher)
publisher.connection.close()
示例3: execute_transform
def execute_transform(spill, client_id = "Unknown"):
"""
MAUS Celery transform task used by sub-processes to execute jobs
from clients. Proxies of this task are invoked by clients.This
applies the current transform to the spill and returns the new
spill.
@param spill JSON document string holding spill.
@param client_id ID of client who submitted job.
@return JSON document string holding new spill.
@throws Exception if there is a problem when process is called.
"""
logger = Task.get_logger()
if logger.isEnabledFor(logging.INFO):
logger.info("Task invoked by %s" % client_id)
try:
spill_json = json.loads(spill)
if "maus_event_type" in spill_json.keys() and \
spill_json["maus_event_type"] != "Spill":
return spill
else:
return maus_cpp.converter.string_repr(MausTransform.process(spill))
except Exception as exc: # pylint:disable = W0703
# Filter exceptions so no unPicklable exception causes
# problems.
status = {}
status["error"] = str(exc.__class__)
status["message"] = str(exc)
raise WorkerProcessException(MausConfiguration.transform,
status)
示例4: crawl
def crawl(id=None, screen_name=None, cursor=-1, crawl_mode=False, routing_key=DEFAULT_ROUTING_KEY):
logger = Task.get_logger()
logger.info('[crawl] starting crawl(id=%s, screen_name=%s, cursor=%s)' % (id, screen_name, cursor))
twitter = get_twitter()
params = {
"cursor": cursor
}
if id:
params['user_id'] = id
elif screen_name:
params['screen_name'] = screen_name
result = twitter.followers.ids(**params)
# block while we grab the current user's info
del params['cursor']
source_id = twitter.users.show(**params)['id']
source = sync_user(source_id)
for id in result['ids']:
sync_user.apply_async(args=[id,], kwargs={'source':source, 'crawl_mode':crawl_mode}, routing_key=routing_key)
if result['next_cursor']:
logger.info("[crawl] continuing at next_cursor=%s" % result['next_cursor'])
crawl.apply_async(
args=[twitter_id,],
kwargs={
'cursor':result['next_cursor'],
'crawl_mode':crawl_mode,
'routing_key':routing_key,
},
routing_key=routing_key
)
示例5: queue_new_feeds
def queue_new_feeds(self):
new_feeds = UserSubscription.objects.filter(user=self.user, feed__fetched_once=False).values("feed_id")
new_feeds = list(set([f["feed_id"] for f in new_feeds]))
logging.info(" ---> [%s] Queueing NewFeeds: (%s) %s" % (self.user, len(new_feeds), new_feeds))
size = 4
publisher = Task.get_publisher(exchange="new_feeds")
for t in (new_feeds[pos : pos + size] for pos in xrange(0, len(new_feeds), size)):
NewFeeds.apply_async(args=(t,), queue="new_feeds", publisher=publisher)
publisher.connection.close()
示例6: __call__
def __call__(self, *args, **kwargs):
# if you don't provide "user" as a kwarg, this just acts like
# a normal ol' boring task
if "user" in kwargs:
user = kwargs.pop("user")
CeleryTaskTracker.objects.create(taskid=self.request.id,
taskclass=self.__class__.__name__,
owner=user)
get_pulp_server(user=user)
self.errors = []
return Task.__call__(self, *args, **kwargs)
示例7: queue_new_feeds
def queue_new_feeds(self, new_feeds=None):
if not new_feeds:
new_feeds = UserSubscription.objects.filter(user=self.user,
feed__fetched_once=False,
active=True).values('feed_id')
new_feeds = list(set([f['feed_id'] for f in new_feeds]))
logging.user(self.user, "~BB~FW~SBQueueing NewFeeds: ~FC(%s) %s" % (len(new_feeds), new_feeds))
size = 4
publisher = Task.get_publisher(exchange="new_feeds")
for t in (new_feeds[pos:pos + size] for pos in xrange(0, len(new_feeds), size)):
NewFeeds.apply_async(args=(t,), queue="new_feeds", publisher=publisher)
publisher.connection.close()
示例8: task_feeds
def task_feeds(cls, feeds, queue_size=12):
print " ---> Tasking %s feeds..." % feeds.count()
publisher = Task.get_publisher()
feed_queue = []
for f in feeds:
f.queued_date = datetime.datetime.utcnow()
f.set_next_scheduled_update()
for feed_queue in (feeds[pos : pos + queue_size] for pos in xrange(0, len(feeds), queue_size)):
feed_ids = [feed.pk for feed in feed_queue]
UpdateFeeds.apply_async(args=(feed_ids,), queue="update_feeds", publisher=publisher)
publisher.connection.close()
示例9: sync_user
def sync_user(twitter_id, source=None, crawl_mode=False):
logger = Task.get_logger()
logger.info("[sync_user] starting sync_user(%s)" % twitter_id)
try:
user = TwitterUser.objects.get(id__exact=str(twitter_id))
except TwitterUser.DoesNotExist:
user = TwitterUser(id=twitter_id)
user.update_from_twitter()
user.save()
if source:
user.follow(source)
if crawl_mode and user.screen_name:
crawl.delay(screen_name=user.screen_name)
else:
logger.info("[sync_user] got a user with an empty name")
return user
示例10: test_annotate
def test_annotate(self):
with patch("celery.app.task.resolve_all_annotations") as anno:
anno.return_value = [{"FOO": "BAR"}]
Task.annotate()
self.assertEqual(Task.FOO, "BAR")
示例11: update_flickrify_cache
def update_flickrify_cache(uuid):
logger = Task.get_logger()
logger.info("[update_flickrify_cache] starting work on %s" % uuid)
do_flickrify(uuid, force_refresh=True)
示例12: mbs_exec
def mbs_exec(conf):
logger = Task.get_logger()
return commands.getstatusoutput(cmd)
示例13: exec_d2xp_mbs
def exec_d2xp_mbs(conf, scale, num_area):
logger = Task.get_logger()
conf_pst_fix = datetime.datetime.today().strftime("%Y%m%d_%H%M%S")
fo = open("%s/message_simulator/config_%s.yml" % (os.environ['HOME'], conf_pst_fix,) , "w")
fo.write(yaml.dump(conf))
fo.close()
## routing configuration
rt_conf_file = "conf/routing_%d_area%d.csv" % (scale, num_area)
## node_spec
nd_spec_file = "conf/node_spec_%d.yml" % scale
## network definition
nw_def_file = "conf/network_%d.yml" % scale
## area definitiion
area_def_file = "conf/area_info_%d_area%d.csv" % (scale, num_area)
cdir = "%s/message_simulator" % os.environ['HOME']
cmd = "python d2xp_sim_system.py config_%s.yml %s %s %s %s" % (conf_pst_fix,
rt_conf_file,
nd_spec_file,
nw_def_file,
area_def_file)
p = subprocess.Popen(cmd, cwd=cdir, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
ext_code = p.wait()
result = {}
result['exit_code'] = ext_code
result['stdout'] = p.stdout.readlines()
# result['stdout'] = r"%s" % p.stdout
result['stderr'] = p.stderr.readlines()
# result['stderr'] = r"%s" % p.stderr
logger.info(json.dumps(result, sort_keys=True, indent=2))
## very poor implementation because these worker tasks
## are seperated from the simulation program "mbs".
## Simulation ID will be acquired from the log string.
sim_id = ""
if ext_code == 0:
# mbs is successfully completed.
for line in result['stdout']:
items = line.split(' ')
if items[0] == "Simulation":
sim_id = items[1]
if sim_id == "":
## simulation was failed
sim_id = "may_be_failed_%s" % datetime.datetime.today().strftime("%Y%m%d%H%M%S")
result['sim_id'] = sim_id
task_id = exec_d2xp_mbs.request.id
## create and issue a task for retrieving the simulation result.
## this task will be got by main worker on GUI with MySQL Server
r = retrieve_mbs_result.apply_async(args=[task_id], queue='MAIN')
## store the simulation result. the result will be stored in broker (RabbitMQ)
return json.dumps(result)
示例14: add
def add(x, y):
logger = Task.get_logger(task_name=u'decorator')
logger.info("Adding %s + %s" % (x, y))
return x + y
示例15: exec_mbs
def exec_mbs():
logger = Task.get_logger()
os.chdir("~/message_simulator")
return os.environ['HOME']