本文整理匯總了Python中celery.result方法的典型用法代碼示例。如果您正苦於以下問題:Python celery.result方法的具體用法?Python celery.result怎麽用?Python celery.result使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類celery
的用法示例。
在下文中一共展示了celery.result方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __add_worker_task
# 需要導入模塊: import celery [as 別名]
# 或者: from celery import result [as 別名]
def __add_worker_task(self, task):
result = AsyncResult(task['id'])
if not task['id'] in self.messages:
try:
timeSubmitted = datetime.fromtimestamp(time.time() - (kombu.five.monotonic() - t['time_start']))
except:
timeSubmitted = str(current_time()) #TODO: dirty hack to make failsafe with UI
self.messages[task['id']] = {
'type': ('train' if 'train' in task['name'] else 'inference'), #TODO
'submitted': timeSubmitted,
'status': celery.states.PENDING,
'meta': {'message':'job at worker'}
}
#TODO: needed?
if result.ready():
result.forget()
示例2: register_job
# 需要導入模塊: import celery [as 別名]
# 或者: from celery import result [as 別名]
def register_job(self, job, taskType, on_complete=None):
self.jobs.append(job)
# look out for children (if group result)
if hasattr(job, 'children') and job.children is not None:
for child in job.children:
self.messages[child.task_id] = {
'type': taskType,
'submitted': str(current_time()),
'status': celery.states.PENDING,
'meta': {'message':'sending job to worker'}
}
elif not job.id in self.messages:
# no children; add job itself
self.messages[job.id] = {
'type': taskType,
'submitted': str(current_time()),
'status': celery.states.PENDING,
'meta': {'message':'sending job to worker'}
}
self.on_complete[job.id] = on_complete
示例3: pollNow
# 需要導入模塊: import celery [as 別名]
# 或者: from celery import result [as 別名]
def pollNow(self):
i = self.celery_app.control.inspect()
stats = i.stats()
if stats is not None and len(stats):
active_tasks = i.active()
for key in active_tasks.keys():
taskList = active_tasks[key]
for t in taskList:
taskID = t['id']
if not taskID in self.messages:
# task got lost (e.g. due to server restart); re-add
try:
timeSubmitted = datetime.fromtimestamp(time.time() - (kombu.five.monotonic() - t['time_start']))
except:
timeSubmitted = str(current_time()) #TODO: dirty hack to make failsafe with UI
self.messages[taskID] = {
'type': ('train' if 'train' in t['name'] else 'inference'), #TODO
'submitted': timeSubmitted,
'status': celery.states.PENDING,
'meta': {'message':'job at worker'}
}
job = celery.result.AsyncResult(taskID) #TODO: task.ready()
self.jobs.append(job)
示例4: reset_server
# 需要導入模塊: import celery [as 別名]
# 或者: from celery import result [as 別名]
def reset_server(result):
sys.stderr.write("reset_server in worker: starting\n")
if hasattr(result, 'ok') and not result.ok:
sys.stderr.write("reset_server in worker: not resetting because result did not succeed.\n")
return result
if USING_SUPERVISOR:
if re.search(r':(web|celery|all):', container_role):
if result.hostname == hostname:
hostname_to_use = 'localhost'
else:
hostname_to_use = result.hostname
args = [SUPERVISORCTL, '-s', 'http://' + hostname_to_use + ':9001', 'start', 'reset']
result = subprocess.run(args).returncode
sys.stderr.write("reset_server in worker: called " + ' '.join(args) + "\n")
else:
sys.stderr.write("reset_server in worker: did not reset due to container role\n")
else:
sys.stderr.write("reset_server in worker: supervisor not active, touching WSGI file\n")
wsgi_file = WEBAPP_PATH
if os.path.isfile(wsgi_file):
with open(wsgi_file, 'a'):
os.utime(wsgi_file, None)
sys.stderr.write("reset_server in worker: finishing\n")
return result
示例5: _maybe_transform_result
# 需要導入模塊: import celery [as 別名]
# 或者: from celery import result [as 別名]
def _maybe_transform_result(self, idx, result, **kwargs):
try:
grh = self.request.girder_result_hooks[idx]
if hasattr(grh, 'transform') and \
six.callable(grh.transform):
return grh.transform(result, **kwargs)
return result
except IndexError:
return result
示例6: __poll_tasks
# 需要導入模塊: import celery [as 別名]
# 或者: from celery import result [as 別名]
def __poll_tasks(self):
status = {}
task_ongoing = False
for key in self.messages.keys():
job = self.messages[key]
msg = self.celery_app.backend.get_task_meta(key)
if not len(msg):
continue
# check for worker failures
if msg['status'] == celery.states.FAILURE:
# append failure message
if 'meta' in msg: #TODO: and isinstance(msg['meta'], BaseException):
info = { 'message': html.escape(str(msg['meta']))}
else:
info = { 'message': 'an unknown error occurred'}
else:
info = msg['result']
status[key] = {
'type': job['type'],
'submitted': job['submitted'], #TODO: not broadcast across AIController threads...
'status': msg['status'],
'meta': info
}
# check if ongoing
if not AsyncResult(key).ready():
task_ongoing = True
return status, task_ongoing
示例7: run
# 需要導入模塊: import celery [as 別名]
# 或者: from celery import result [as 別名]
def run(self):
# iterate over all registered tasks and get their result, one by one
while True:
if not len(self.jobs):
# no jobs in current queue; ping workers for other running tasks
self.pollNow()
if not len(self.jobs):
# still no jobs in queue; wait and then try again
while True:
# check if anything in list
if len(self.jobs):
break
else:
time.sleep(10)
else:
nextJob = self.jobs.pop()
nextJob.get(propagate=True)
# job finished; handle success and failure cases
if nextJob.id in self.on_complete:
callback = self.on_complete[nextJob.id]
if callback is not None:
callback(nextJob)
nextJob.forget()
示例8: start_sequencer
# 需要導入模塊: import celery [as 別名]
# 或者: from celery import result [as 別名]
def start_sequencer(self):
sequencer_task = Status.get_status(self.session, 'SEQUENCER_TASK_ID')
if sequencer_task.value:
task_result = AsyncResult(sequencer_task.value)
if not task_result or task_result.ready():
sequencer_task.value = None
sequencer_task.save(self.session)
if sequencer_task.value is None:
sequencer_task.value = self.request.id
sequencer_task.save(self.session)
harvester = PolkascanHarvesterService(
db_session=self.session,
type_registry=TYPE_REGISTRY,
type_registry_file=TYPE_REGISTRY_FILE
)
try:
result = harvester.start_sequencer()
except BlockIntegrityError as e:
result = {'result': str(e)}
sequencer_task.value = None
sequencer_task.save(self.session)
self.session.commit()
# Check if analytics data need to be generated
#start_generate_analytics.delay()
return result
else:
return {'result': 'Sequencer already running'}
示例9: rebuilding_search_index
# 需要導入模塊: import celery [as 別名]
# 或者: from celery import result [as 別名]
def rebuilding_search_index(self, search_index_id=None, truncate=False):
if truncate:
# Clear search index table
self.session.execute('delete from analytics_search_index where index_type_id={}'.format(search_index_id))
self.session.commit()
harvester = PolkascanHarvesterService(
db_session=self.session,
type_registry=TYPE_REGISTRY,
type_registry_file=TYPE_REGISTRY_FILE
)
harvester.rebuild_search_index()
return {'result': 'index rebuilt'}
示例10: rebuild_search_index
# 需要導入模塊: import celery [as 別名]
# 或者: from celery import result [as 別名]
def rebuild_search_index(self):
harvester = PolkascanHarvesterService(
db_session=self.session,
type_registry=TYPE_REGISTRY,
type_registry_file=TYPE_REGISTRY_FILE
)
harvester.rebuild_search_index()
return {'result': 'search index rebuilt'}
示例11: start_harvester
# 需要導入模塊: import celery [as 別名]
# 或者: from celery import result [as 別名]
def start_harvester(self, check_gaps=False):
substrate = SubstrateInterface(SUBSTRATE_RPC_URL)
block_sets = []
if check_gaps:
# Check for gaps between already harvested blocks and try to fill them first
remaining_sets_result = Block.get_missing_block_ids(self.session)
for block_set in remaining_sets_result:
# Get start and end block hash
end_block_hash = substrate.get_block_hash(int(block_set['block_from']))
start_block_hash = substrate.get_block_hash(int(block_set['block_to']))
# Start processing task
accumulate_block_recursive.delay(start_block_hash, end_block_hash)
block_sets.append({
'start_block_hash': start_block_hash,
'end_block_hash': end_block_hash
})
# Start sequencer
sequencer_task = start_sequencer.delay()
# Continue from current (finalised) head
if FINALIZATION_ONLY == 1:
start_block_hash = substrate.get_chain_finalised_head()
else:
start_block_hash = substrate.get_chain_head()
end_block_hash = None
accumulate_block_recursive.delay(start_block_hash, end_block_hash)
block_sets.append({
'start_block_hash': start_block_hash,
'end_block_hash': end_block_hash
})
return {
'result': 'Harvester job started',
'block_sets': block_sets,
'sequencer_task_id': sequencer_task.task_id
}
示例12: rebuild_account_info_snapshot
# 需要導入模塊: import celery [as 別名]
# 或者: from celery import result [as 別名]
def rebuild_account_info_snapshot(self):
harvester = PolkascanHarvesterService(
db_session=self.session,
type_registry=TYPE_REGISTRY,
type_registry_file=TYPE_REGISTRY_FILE
)
last_full_snapshot_block_nr = 0
self.session.execute('truncate table {}'.format(AccountInfoSnapshot.__tablename__))
for account_id, block_id in self.session.query(SearchIndex.account_id, SearchIndex.block_id).filter(
SearchIndex.block_id >= settings.BALANCE_SYSTEM_ACCOUNT_MIN_BLOCK
).order_by('block_id').group_by(SearchIndex.account_id, SearchIndex.block_id).yield_per(1000):
if block_id > last_full_snapshot_block_nr + settings.BALANCE_FULL_SNAPSHOT_INTERVAL:
last_full_snapshot_block_nr = block_id - block_id % settings.BALANCE_FULL_SNAPSHOT_INTERVAL
harvester.create_full_balance_snaphot(last_full_snapshot_block_nr)
self.session.commit()
else:
harvester.create_balance_snapshot(block_id, account_id)
self.session.commit()
# set balances according to most recent snapshot
account_info = self.session.execute("""
select
a.account_id,
a.balance_total,
a.balance_free,
a.balance_reserved,
a.nonce
from
data_account_info_snapshot as a
inner join (
select
account_id, max(block_id) as max_block_id
from data_account_info_snapshot
group by account_id
) as b
on a.account_id = b.account_id and a.block_id = b.max_block_id
""")
for account_id, balance_total, balance_free, balance_reserved, nonce in account_info:
Account.query(self.session).filter_by(id=account_id).update(
{
Account.balance_total: balance_total,
Account.balance_free: balance_free,
Account.balance_reserved: balance_reserved,
Account.nonce: nonce,
}, synchronize_session='fetch'
)
self.session.commit()
return {'result': 'account info snapshots rebuilt'}