本文整理匯總了Python中google.appengine.api.taskqueue.Queue方法的典型用法代碼示例。如果您正苦於以下問題:Python taskqueue.Queue方法的具體用法?Python taskqueue.Queue怎麽用?Python taskqueue.Queue使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類google.appengine.api.taskqueue
的用法示例。
在下文中一共展示了taskqueue.Queue方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: cleanup
# 需要導入模塊: from google.appengine.api import taskqueue [as 別名]
# 或者: from google.appengine.api.taskqueue import Queue [as 別名]
def cleanup(self):
"""Clean up this Pipeline and all Datastore records used for coordination.
Only works when called on a root pipeline. Child pipelines will ignore
calls to this method.
After this method is called, Pipeline.from_id() and related status
methods will return inconsistent or missing results. This method is
fire-and-forget and asynchronous.
"""
if self._root_pipeline_key is None:
raise UnexpectedPipelineError(
'Could not cleanup Pipeline with unknown root pipeline ID.')
if not self.is_root:
return
task = taskqueue.Task(
params=dict(root_pipeline_key=self._root_pipeline_key),
url=self.base_path + '/cleanup',
headers={'X-Ae-Pipeline-Key': self._root_pipeline_key})
taskqueue.Queue(self.queue_name).add(task)
示例2: get
# 需要導入模塊: from google.appengine.api import taskqueue [as 別名]
# 或者: from google.appengine.api.taskqueue import Queue [as 別名]
def get(self):
"""Indefinitely fetch tasks and update the datastore."""
queue = taskqueue.Queue('pullq')
while True:
try:
tasks = queue.lease_tasks_by_tag(3600, 1000, deadline=60)
except (taskqueue.TransientError,
apiproxy_errors.DeadlineExceededError) as e:
logging.exception(e)
time.sleep(1)
continue
if tasks:
key = tasks[0].tag
try:
update_counter(key, tasks)
except Exception as e:
logging.exception(e)
raise
finally:
queue.delete_tasks(tasks)
time.sleep(1)
示例3: post
# 需要導入模塊: from google.appengine.api import taskqueue [as 別名]
# 或者: from google.appengine.api.taskqueue import Queue [as 別名]
def post(self):
amount = int(self.request.get('amount'))
queue = taskqueue.Queue(name='default')
task = taskqueue.Task(
url='/update_counter',
target='worker',
params={'amount': amount})
rpc = queue.add_async(task)
# Wait for the rpc to complete and return the queued task.
task = rpc.get_result()
self.response.write(
'Task {} enqueued, ETA {}.'.format(task.name, task.eta))
示例4: post
# 需要導入模塊: from google.appengine.api import taskqueue [as 別名]
# 或者: from google.appengine.api.taskqueue import Queue [as 別名]
def post(self):
if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
self.response.set_status(403)
return
context = _PipelineContext.from_environ(self.request.environ)
# Set of stringified db.Keys of children to run.
all_pipeline_keys = set()
# For backwards compatibility with the old style of fan-out requests.
all_pipeline_keys.update(self.request.get_all('pipeline_key'))
# Fetch the child pipelines from the parent. This works around the 10KB
# task payload limit. This get() is consistent-on-read and the fan-out
# task is enqueued in the transaction that updates the parent, so the
# fanned_out property is consistent here.
parent_key = self.request.get('parent_key')
child_indexes = [int(x) for x in self.request.get_all('child_indexes')]
if parent_key:
parent_key = db.Key(parent_key)
parent = db.get(parent_key)
for index in child_indexes:
all_pipeline_keys.add(str(parent.fanned_out[index]))
all_tasks = []
for pipeline_key in all_pipeline_keys:
all_tasks.append(taskqueue.Task(
url=context.pipeline_handler_path,
params=dict(pipeline_key=pipeline_key),
headers={'X-Ae-Pipeline-Key': pipeline_key},
name='ae-pipeline-fan-out-' + db.Key(pipeline_key).name()))
batch_size = 100 # Limit of taskqueue API bulk add.
for i in xrange(0, len(all_tasks), batch_size):
batch = all_tasks[i:i+batch_size]
try:
taskqueue.Queue(context.queue_name).add(batch)
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
pass
示例5: post
# 需要導入模塊: from google.appengine.api import taskqueue [as 別名]
# 或者: from google.appengine.api.taskqueue import Queue [as 別名]
def post(self):
key = self.request.get('key')
if key:
queue = taskqueue.Queue('pullq')
queue.add(taskqueue.Task(payload='', method='PULL', tag=key))
self.redirect('/')
# [END adding_task]
示例6: task_batch_handle_notifications
# 需要導入模塊: from google.appengine.api import taskqueue [as 別名]
# 或者: from google.appengine.api.taskqueue import Queue [as 別名]
def task_batch_handle_notifications():
"""Batches notifications from pull queue, and forwards to push queue."""
# Number of seconds to lease the tasks. Once it expires, the
# tasks will be available again for the next worker.
LEASE_SEC = 60
# The maximum number of tasks to lease from the pull queue.
MAX_TASKS = 1000
queue = taskqueue.Queue('es-notify-tasks-batch')
tasks = queue.lease_tasks(LEASE_SEC, MAX_TASKS)
if not tasks:
return
requests = {}
tasks_per_scheduler = collections.defaultdict(list)
for task in tasks:
proto = plugin_pb2.NotifyTasksRequest()
payload = json.loads(task.payload)
json_format.Parse(payload['request_json'], proto)
s_tuple = (proto.scheduler_id, payload['es_host'])
tasks_per_scheduler[s_tuple].append(task)
if s_tuple not in requests:
requests[s_tuple] = proto
else:
requests[s_tuple].notifications.extend(proto.notifications)
for s_id, address in requests:
request_json = json_format.MessageToJson(requests[s_id, address])
enqueued = utils.enqueue_task(
'/internal/taskqueue/important/external_scheduler/notify-tasks',
'es-notify-tasks',
params={'es_host': address, 'request_json': request_json},
transactional=ndb.in_transaction())
if not enqueued:
logging.warning('Failed to enqueue external scheduler task, skipping')
continue
queue.delete_tasks(tasks_per_scheduler[s_id, address])
示例7: cron_run_import
# 需要導入模塊: from google.appengine.api import taskqueue [as 別名]
# 或者: from google.appengine.api.taskqueue import Queue [as 別名]
def cron_run_import(): # pragma: no cover
"""Schedules a push task for each config set imported from Gitiles."""
conf = admin.GlobalConfig.fetch()
# Collect the list of config sets to import.
config_sets = []
if (conf and conf.services_config_storage_type == GITILES_STORAGE_TYPE and
conf.services_config_location):
loc = _resolved_location(conf.services_config_location)
config_sets += _service_config_sets(loc)
config_sets += _project_and_ref_config_sets()
# For each config set, schedule a push task.
# This assumes that tasks are processed faster than we add them.
tasks = [
taskqueue.Task(url='/internal/task/luci-config/gitiles_import/%s' % cs)
for cs in config_sets
]
# Task Queues try to preserve FIFO semantics. But if something is partially
# failing (e.g. LUCI Config hitting gitiles quota midway through update), we'd
# want to make a slow progress across all config sets. Shuffle tasks, so we
# don't give accidental priority to lexicographically first ones.
random.shuffle(tasks)
q = taskqueue.Queue('gitiles-import')
pending = tasks
while pending:
batch = pending[:100]
pending = pending[len(batch):]
q.add(batch)
logging.info('scheduled %d tasks', len(tasks))
示例8: test_stop_succeeds_with_outdated_tasks
# 需要導入模塊: from google.appengine.api import taskqueue [as 別名]
# 或者: from google.appengine.api.taskqueue import Queue [as 別名]
def test_stop_succeeds_with_outdated_tasks(self):
pipeline = models.Pipeline.create()
job1 = models.Job.create(pipeline_id=pipeline.id)
self.assertTrue(pipeline.get_ready())
task1 = job1.start()
self.assertIsNotNone(task1)
taskqueue.Queue().delete_tasks([taskqueue.Task(name=task1.name)])
self.assertTrue(job1.stop())
self.assertEqual(job1.status, models.Job.STATUS.STOPPING)
示例9: cancel_tasks
# 需要導入模塊: from google.appengine.api import taskqueue [as 別名]
# 或者: from google.appengine.api.taskqueue import Queue [as 別名]
def cancel_tasks(self):
task_namespace = self._get_task_namespace()
enqueued_tasks = TaskEnqueued.where(task_namespace=task_namespace)
if enqueued_tasks:
tasks = [taskqueue.Task(name=t.task_name) for t in enqueued_tasks]
taskqueue.Queue().delete_tasks(tasks)
TaskEnqueued.where(task_namespace=task_namespace).delete()
示例10: QueueSize
# 需要導入模塊: from google.appengine.api import taskqueue [as 別名]
# 或者: from google.appengine.api.taskqueue import Queue [as 別名]
def QueueSize(queue=constants.TASK_QUEUE.DEFAULT, deadline=10):
queue = taskqueue.Queue(name=queue)
queue_stats = queue.fetch_statistics(deadline=deadline)
return queue_stats.tasks