当前位置: 首页>>代码示例>>Python>>正文


Python taskqueue.Queue方法代码示例

本文整理汇总了Python中google.appengine.api.taskqueue.Queue方法的典型用法代码示例。如果您正苦于以下问题:Python taskqueue.Queue方法的具体用法?Python taskqueue.Queue怎么用?Python taskqueue.Queue使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在google.appengine.api.taskqueue的用法示例。


在下文中一共展示了taskqueue.Queue方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: cleanup

# 需要导入模块: from google.appengine.api import taskqueue [as 别名]
# 或者: from google.appengine.api.taskqueue import Queue [as 别名]
def cleanup(self):
    """Clean up this Pipeline and all Datastore records used for coordination.

    Only works when called on a root pipeline. Child pipelines will ignore
    calls to this method.

    After this method is called, Pipeline.from_id() and related status
    methods will return inconsistent or missing results. This method is
    fire-and-forget and asynchronous.
    """
    if self._root_pipeline_key is None:
      raise UnexpectedPipelineError(
          'Could not cleanup Pipeline with unknown root pipeline ID.')
    if not self.is_root:
      return
    task = taskqueue.Task(
        params=dict(root_pipeline_key=self._root_pipeline_key),
        url=self.base_path + '/cleanup',
        headers={'X-Ae-Pipeline-Key': self._root_pipeline_key})
    taskqueue.Queue(self.queue_name).add(task) 
开发者ID:elsigh,项目名称:browserscope,代码行数:22,代码来源:pipeline.py

示例2: get

# 需要导入模块: from google.appengine.api import taskqueue [as 别名]
# 或者: from google.appengine.api.taskqueue import Queue [as 别名]
def get(self):
        """Indefinitely fetch tasks and update the datastore."""
        queue = taskqueue.Queue('pullq')
        while True:
            try:
                tasks = queue.lease_tasks_by_tag(3600, 1000, deadline=60)
            except (taskqueue.TransientError,
                    apiproxy_errors.DeadlineExceededError) as e:
                logging.exception(e)
                time.sleep(1)
                continue

            if tasks:
                key = tasks[0].tag

                try:
                    update_counter(key, tasks)
                except Exception as e:
                    logging.exception(e)
                    raise
                finally:
                    queue.delete_tasks(tasks)

            time.sleep(1) 
开发者ID:GoogleCloudPlatform,项目名称:python-docs-samples,代码行数:26,代码来源:main.py

示例3: post

# 需要导入模块: from google.appengine.api import taskqueue [as 别名]
# 或者: from google.appengine.api.taskqueue import Queue [as 别名]
def post(self):
        amount = int(self.request.get('amount'))

        queue = taskqueue.Queue(name='default')
        task = taskqueue.Task(
            url='/update_counter',
            target='worker',
            params={'amount': amount})

        rpc = queue.add_async(task)

        # Wait for the rpc to complete and return the queued task.
        task = rpc.get_result()

        self.response.write(
            'Task {} enqueued, ETA {}.'.format(task.name, task.eta)) 
开发者ID:GoogleCloudPlatform,项目名称:python-docs-samples,代码行数:18,代码来源:application.py

示例4: post

# 需要导入模块: from google.appengine.api import taskqueue [as 别名]
# 或者: from google.appengine.api.taskqueue import Queue [as 别名]
def post(self):
    if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
      self.response.set_status(403)
      return

    context = _PipelineContext.from_environ(self.request.environ)

    # Set of stringified db.Keys of children to run.
    all_pipeline_keys = set()

    # For backwards compatibility with the old style of fan-out requests.
    all_pipeline_keys.update(self.request.get_all('pipeline_key'))

    # Fetch the child pipelines from the parent. This works around the 10KB
    # task payload limit. This get() is consistent-on-read and the fan-out
    # task is enqueued in the transaction that updates the parent, so the
    # fanned_out property is consistent here.
    parent_key = self.request.get('parent_key')
    child_indexes = [int(x) for x in self.request.get_all('child_indexes')]
    if parent_key:
      parent_key = db.Key(parent_key)
      parent = db.get(parent_key)
      for index in child_indexes:
        all_pipeline_keys.add(str(parent.fanned_out[index]))

    all_tasks = []
    for pipeline_key in all_pipeline_keys:
      all_tasks.append(taskqueue.Task(
          url=context.pipeline_handler_path,
          params=dict(pipeline_key=pipeline_key),
          headers={'X-Ae-Pipeline-Key': pipeline_key},
          name='ae-pipeline-fan-out-' + db.Key(pipeline_key).name()))

    batch_size = 100  # Limit of taskqueue API bulk add.
    for i in xrange(0, len(all_tasks), batch_size):
      batch = all_tasks[i:i+batch_size]
      try:
        taskqueue.Queue(context.queue_name).add(batch)
      except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
        pass 
开发者ID:elsigh,项目名称:browserscope,代码行数:42,代码来源:pipeline.py

示例5: post

# 需要导入模块: from google.appengine.api import taskqueue [as 别名]
# 或者: from google.appengine.api.taskqueue import Queue [as 别名]
def post(self):
        key = self.request.get('key')
        if key:
            queue = taskqueue.Queue('pullq')
            queue.add(taskqueue.Task(payload='', method='PULL', tag=key))
        self.redirect('/')
    # [END adding_task] 
开发者ID:GoogleCloudPlatform,项目名称:python-docs-samples,代码行数:9,代码来源:main.py

示例6: task_batch_handle_notifications

# 需要导入模块: from google.appengine.api import taskqueue [as 别名]
# 或者: from google.appengine.api.taskqueue import Queue [as 别名]
def task_batch_handle_notifications():
  """Batches notifications from pull queue, and forwards to push queue."""

  # Number of seconds to lease the tasks. Once it expires, the
  # tasks will be available again for the next worker.
  LEASE_SEC = 60
  # The maximum number of tasks to lease from the pull queue.
  MAX_TASKS = 1000
  queue = taskqueue.Queue('es-notify-tasks-batch')
  tasks = queue.lease_tasks(LEASE_SEC, MAX_TASKS)
  if not tasks:
    return
  requests = {}
  tasks_per_scheduler = collections.defaultdict(list)
  for task in tasks:
    proto = plugin_pb2.NotifyTasksRequest()
    payload = json.loads(task.payload)
    json_format.Parse(payload['request_json'], proto)
    s_tuple = (proto.scheduler_id, payload['es_host'])
    tasks_per_scheduler[s_tuple].append(task)
    if s_tuple not in requests:
      requests[s_tuple] = proto
    else:
      requests[s_tuple].notifications.extend(proto.notifications)

  for s_id, address in requests:
    request_json = json_format.MessageToJson(requests[s_id, address])
    enqueued = utils.enqueue_task(
        '/internal/taskqueue/important/external_scheduler/notify-tasks',
        'es-notify-tasks',
        params={'es_host': address, 'request_json': request_json},
        transactional=ndb.in_transaction())
    if not enqueued:
      logging.warning('Failed to enqueue external scheduler task, skipping')
      continue
    queue.delete_tasks(tasks_per_scheduler[s_id, address]) 
开发者ID:luci,项目名称:luci-py,代码行数:38,代码来源:external_scheduler.py

示例7: cron_run_import

# 需要导入模块: from google.appengine.api import taskqueue [as 别名]
# 或者: from google.appengine.api.taskqueue import Queue [as 别名]
def cron_run_import():  # pragma: no cover
  """Schedules a push task for each config set imported from Gitiles."""
  conf = admin.GlobalConfig.fetch()

  # Collect the list of config sets to import.
  config_sets = []
  if (conf and conf.services_config_storage_type == GITILES_STORAGE_TYPE and
      conf.services_config_location):
    loc = _resolved_location(conf.services_config_location)
    config_sets += _service_config_sets(loc)
  config_sets += _project_and_ref_config_sets()

  # For each config set, schedule a push task.
  # This assumes that tasks are processed faster than we add them.
  tasks = [
    taskqueue.Task(url='/internal/task/luci-config/gitiles_import/%s' % cs)
    for cs in config_sets
  ]

  # Task Queues try to preserve FIFO semantics. But if something is partially
  # failing (e.g. LUCI Config hitting gitiles quota midway through update), we'd
  # want to make a slow progress across all config sets. Shuffle tasks, so we
  # don't give accidental priority to lexicographically first ones.
  random.shuffle(tasks)

  q = taskqueue.Queue('gitiles-import')
  pending = tasks
  while pending:
    batch = pending[:100]
    pending = pending[len(batch):]
    q.add(batch)

  logging.info('scheduled %d tasks', len(tasks)) 
开发者ID:luci,项目名称:luci-py,代码行数:35,代码来源:gitiles_import.py

示例8: test_stop_succeeds_with_outdated_tasks

# 需要导入模块: from google.appengine.api import taskqueue [as 别名]
# 或者: from google.appengine.api.taskqueue import Queue [as 别名]
def test_stop_succeeds_with_outdated_tasks(self):
    pipeline = models.Pipeline.create()
    job1 = models.Job.create(pipeline_id=pipeline.id)
    self.assertTrue(pipeline.get_ready())
    task1 = job1.start()
    self.assertIsNotNone(task1)
    taskqueue.Queue().delete_tasks([taskqueue.Task(name=task1.name)])
    self.assertTrue(job1.stop())
    self.assertEqual(job1.status, models.Job.STATUS.STOPPING) 
开发者ID:google,项目名称:crmint,代码行数:11,代码来源:models_tests.py

示例9: cancel_tasks

# 需要导入模块: from google.appengine.api import taskqueue [as 别名]
# 或者: from google.appengine.api.taskqueue import Queue [as 别名]
def cancel_tasks(self):
    task_namespace = self._get_task_namespace()
    enqueued_tasks = TaskEnqueued.where(task_namespace=task_namespace)
    if enqueued_tasks:
      tasks = [taskqueue.Task(name=t.task_name) for t in enqueued_tasks]
      taskqueue.Queue().delete_tasks(tasks)
      TaskEnqueued.where(task_namespace=task_namespace).delete() 
开发者ID:google,项目名称:crmint,代码行数:9,代码来源:models.py

示例10: QueueSize

# 需要导入模块: from google.appengine.api import taskqueue [as 别名]
# 或者: from google.appengine.api.taskqueue import Queue [as 别名]
def QueueSize(queue=constants.TASK_QUEUE.DEFAULT, deadline=10):
  queue = taskqueue.Queue(name=queue)
  queue_stats = queue.fetch_statistics(deadline=deadline)
  return queue_stats.tasks 
开发者ID:google,项目名称:upvote,代码行数:6,代码来源:utils.py


注:本文中的google.appengine.api.taskqueue.Queue方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。