本文整理汇总了Python中celery.result.ResultSet.completed_count方法的典型用法代码示例。如果您正苦于以下问题:Python ResultSet.completed_count方法的具体用法?Python ResultSet.completed_count怎么用?Python ResultSet.completed_count使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类celery.result.ResultSet
的用法示例。
在下文中一共展示了ResultSet.completed_count方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_result
# 需要导入模块: from celery.result import ResultSet [as 别名]
# 或者: from celery.result.ResultSet import completed_count [as 别名]
def get_result(request):
ret = { 'status': 'error', 'result': '', 'messages': [ '', ], }
if request.method == 'POST' and request.user:
try:
user = MDBUser.objects.get(username=request.user.username)
except Exception, e:
ret['messages'][0] = "<strong>FATAL</strong>(get_result.user): %s" % e
else:
# Note: this is NOT status of tasks, 'success' here means that
# get_result() request was processed correctly
ret['status'] = 'success'
async_res = AsyncResult(request.POST['task_id'])
if async_res.ready():
# Get all subtasks spawned by parent
subtasks = None #ust_get_ids(user)
# Create list of AsyncResults from list of task_ids
async_results = []
for task_id in subtasks:
async_results.append(AsyncResult(task_id))
# And also ResultSet for convenience
async_res_set = ResultSet(async_results)
ret['messages'][0] = 'parent task %s: %d of %d subtasks completed' %\
(request.POST['task_id'][:8],
async_res_set.completed_count(),
async_res_set.total,
)
# All tasks completed ?
if async_res_set.ready():
# All tasks done, forget about those task ids
#ust_clear_ids(user)
# Any of them failed ?
if async_res_set.failed():
ret['result'] = 'FAILURE'
for async_res in async_results:
if async_res.state == 'FAILURE':
ret['messages'].append("<strong>ERROR</strong>(get_result.FAILURE): '%s':'%s'" %\
(async_res.task_id[:8], async_res.result, ))
else:
ret['result'] = 'SUCCESS'
else:
ret['result'] = 'PENDING'
else:
ret['result'] = 'PENDING'
ret['messages'][0] = 'parent task %s: PENDING' % \
(request.POST['task_id'], )
示例2: int
# 需要导入模块: from celery.result import ResultSet [as 别名]
# 或者: from celery.result.ResultSet import completed_count [as 别名]
#sponsored = train.loc[train['file'] == openfile]
#if not sponsored.empty:
#result.add(processFile.delay(openfile, data, int(sponsored['sponsored'])))
#testing = sample.loc[sample['file'] == openfile]
#if not testing.empty:
#result.add(processFile.delay(openfile, data, int(sponsored['sponsored'])))
bar.numerator = k
print("Sending out processes ", bar, end='\r')
sys.stdout.flush()
bar = ProgressBar(len(train)+len(test_files), max_width=40)
while not result.ready():
time.sleep(5)
bar.numerator = result.completed_count()
print("Waiting for return results ", bar, end='\r')
sys.stdout.flush()
results = result.join() #wait for jobs to finish
df_full = pd.DataFrame(list(results))
print('--- Training random forest')
clf = RandomForestClassifier(n_estimators=150, n_jobs=-1, random_state=0)
train_data = df_full[df_full.sponsored.notnull()].fillna(0)
test = df_full[df_full.sponsored.isnull() & df_full.file.isin(test_files)].fillna(0)
clf.fit(train_data.drop(['file', 'sponsored'], 1), train_data.sponsored)
print('--- Create predictions and submission')
submission = test[['file']].reset_index(drop=True)