當前位置: 首頁>>代碼示例>>Python>>正文


Python states.PENDING屬性代碼示例

本文整理匯總了Python中celery.states.PENDING屬性的典型用法代碼示例。如果您正苦於以下問題:Python states.PENDING屬性的具體用法?Python states.PENDING怎麽用?Python states.PENDING使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在celery.states的用法示例。


在下文中一共展示了states.PENDING屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: reduce_pipeline

# 需要導入模塊: from celery import states [as 別名]
# 或者: from celery.states import PENDING [as 別名]
def reduce_pipeline(self, column_ids_to_keep, new_dataset_name_prefix, dataset_id, project_id):
    logger.info("In reduce pipeline with dataset_id %s and project_id %s", dataset_id, project_id)

    # Unpivot
    self.update_state(state=states.PENDING, meta={'desc': '(1/3) Reducing dataset'})
    df_reduced, new_dataset_title, new_dataset_name, new_dataset_path = \
        reduce_dataset(project_id, dataset_id, column_ids_to_keep, new_dataset_name_prefix)

    # Save
    self.update_state(state=states.PENDING, meta={'desc': '(2/3) Saving reduced dataset'})
    df_reduced.to_csv(new_dataset_path, sep='\t', index=False)
    dataset_docs = save_dataset(project_id, new_dataset_title, new_dataset_name, 'tsv', new_dataset_path)
    dataset_doc = dataset_docs[0]
    new_dataset_id = dataset_doc['id']

    # Ingest
    self.update_state(state=states.PENDING, meta={'desc': '(3/3) Ingesting reduced dataset'})
    ingestion_result = ingestion_pipeline.apply(args=[ new_dataset_id, project_id ])
    return {
        'result': {
            'id': new_dataset_id
        }
    } 
開發者ID:MacroConnections,項目名稱:DIVE-backend,代碼行數:25,代碼來源:pipelines.py

示例2: join_pipeline

# 需要導入模塊: from celery import states [as 別名]
# 或者: from celery.states import PENDING [as 別名]
def join_pipeline(self, left_dataset_id, right_dataset_id, on, left_on, right_on, how, left_suffix, right_suffix, new_dataset_name_prefix, project_id):
    logger.info("In join pipeline with dataset_ids %s %s and project_id %s", left_dataset_id, right_dataset_id, project_id)

    # Unpivot
    self.update_state(state=states.PENDING, meta={'desc': '(1/3) Joining dataset'})
    df_joined, new_dataset_title, new_dataset_name, new_dataset_path = \
        join_datasets(project_id, left_dataset_id, right_dataset_id, on, left_on, right_on, how, left_suffix, right_suffix, new_dataset_name_prefix)

    # Save
    self.update_state(state=states.PENDING, meta={'desc': '(2/3) Saving joined dataset'})
    df_joined.to_csv(new_dataset_path, sep='\t', index=False)
    dataset_docs = save_dataset(project_id, new_dataset_title, new_dataset_name, 'tsv', new_dataset_path)
    dataset_doc = dataset_docs[0]
    new_dataset_id = dataset_doc['id']

    # Ingest
    self.update_state(state=states.PENDING, meta={'desc': '(3/3) Ingesting joined dataset'})
    ingestion_result = ingestion_pipeline.apply(args=[ new_dataset_id, project_id ])
    return {
        'result': {
            'id': new_dataset_id
        }
    } 
開發者ID:MacroConnections,項目名稱:DIVE-backend,代碼行數:25,代碼來源:pipelines.py

示例3: unpivot_pipeline

# 需要導入模塊: from celery import states [as 別名]
# 或者: from celery.states import PENDING [as 別名]
def unpivot_pipeline(self, pivot_fields, variable_name, value_name, new_dataset_name_prefix, dataset_id, project_id):
    logger.info("In unpivot pipeline with dataset_id %s and project_id %s", dataset_id, project_id)

    # Unpivot
    self.update_state(state=states.PENDING, meta={'desc': '(1/3) Unpivoting dataset'})
    df_unpivoted, new_dataset_title, new_dataset_name, new_dataset_path = \
        unpivot_dataset(project_id, dataset_id, pivot_fields, variable_name, value_name, new_dataset_name_prefix)

    # Save
    self.update_state(state=states.PENDING, meta={'desc': '(2/3) Saving unpivoted dataset'})
    df_unpivoted.to_csv(new_dataset_path, sep='\t', index=False)
    dataset_docs = save_dataset(project_id, new_dataset_title, new_dataset_name, 'tsv', new_dataset_path)
    dataset_doc = dataset_docs[0]
    new_dataset_id = dataset_doc['id']

    # Ingest
    self.update_state(state=states.PENDING, meta={'desc': '(3/3) Ingesting unpivoted dataset'})
    ingestion_result = ingestion_pipeline.apply(args=[ new_dataset_id, project_id ])
    return {
        'result': {
            'id': new_dataset_id
        }
    } 
開發者ID:MacroConnections,項目名稱:DIVE-backend,代碼行數:25,代碼來源:pipelines.py

示例4: ingestion_pipeline

# 需要導入模塊: from celery import states [as 別名]
# 或者: from celery.states import PENDING [as 別名]
def ingestion_pipeline(self, dataset_id, project_id):
    '''
    Compute dataset and field properties in parallel

    TODO Accept multiple datasets?
    '''
    logger.info("In ingestion pipeline with dataset_id %s and project_id %s", dataset_id, project_id)
    self.update_state(state=states.PENDING, meta={'desc': '(1/4) Computing dataset properties'})
    dataset_properties = compute_dataset_properties(dataset_id, project_id)

    self.update_state(state=states.PENDING, meta={'desc': '(2/4) Saving dataset properties'})
    save_dataset_properties(dataset_properties, dataset_id, project_id)

    self.update_state(state=states.PENDING, meta={'desc': '(3/4) Computing dataset field properties'})
    field_properties = compute_all_field_properties(dataset_id, project_id)

    self.update_state(state=states.PENDING, meta={'desc': '(4/4) Saving dataset field properties'})
    result = save_field_properties(field_properties, dataset_id, project_id)
    return result 
開發者ID:MacroConnections,項目名稱:DIVE-backend,代碼行數:21,代碼來源:pipelines.py

示例5: drop_clusters

# 需要導入模塊: from celery import states [as 別名]
# 或者: from celery.states import PENDING [as 別名]
def drop_clusters(self, exclude_task_ids: Set = None, exclude_project_clustering_id: int = None):
        project = self
        # Stop running tusks
        from apps.task.tasks import purge_task
        from apps.project.tasks import ClusterProjectDocuments
        task_qr = project.project_tasks \
            .filter(name=ClusterProjectDocuments.name, status__in=UNREADY_STATES)  # type: QuerySet
        if exclude_task_ids:
            task_qr = task_qr.exclude(pk__in=exclude_task_ids)

        for task in task_qr:
            purge_task(task.pk, wait=True, timeout=1.5)
        # delete DocumentClusters
        for pcl in project.projectclustering_set.all():
            pcl.document_clusters.all().delete()
        # delete ProjectClustering
        project.projectclustering_set.exclude(id=exclude_project_clustering_id).delete()
        # delete ClusterProjectDocuments Tasks
        to_delete_qr = project.project_tasks.filter(name=ClusterProjectDocuments.name,
                                                    status__in=[SUCCESS, PENDING])  # type: QuerySet
        if exclude_task_ids:
            to_delete_qr = to_delete_qr.exclude(pk__in=exclude_task_ids)
        to_delete_qr.delete() 
開發者ID:LexPredict,項目名稱:lexpredict-contraxsuite,代碼行數:25,代碼來源:models.py

示例6: check_and_set_completed

# 需要導入模塊: from celery import states [as 別名]
# 或者: from celery.states import PENDING [as 別名]
def check_and_set_completed(self):
        """
        Check and set "completed"
        """
        if self.completed:
            return True
        if not self.session_tasks.exists():
            # session tasks already deleted but documents exist-i.e. session has ended some time ago
            if self.document_set.exists():
                completed = True
            # no documents uploaded into session and more that 1 day passed
            elif (now() - self.created_date).days >= 1:
                completed = True
            else:
                return None
        else:
            completed = not self.session_tasks.filter(status=PENDING).exists()
        if self.completed != completed:
            self.completed = completed
            self.save()
        return completed 
開發者ID:LexPredict,項目名稱:lexpredict-contraxsuite,代碼行數:23,代碼來源:models.py

示例7: get_non_indexed_doc_ids_not_planned_to_index_by_predicate

# 需要導入模塊: from celery import states [as 別名]
# 或者: from celery.states import PENDING [as 別名]
def get_non_indexed_doc_ids_not_planned_to_index_by_predicate(
        doc_type_code: str,
        predicate: str,
        pack_size: int = 100) -> Generator[List[int], None, None]:
    table_name = doc_fields_table_name(doc_type_code)

    with connection.cursor() as cursor:
        # return documents of the specified type which
        # - do not exist in the corresponding fields cache
        # - have no planned but not-started reindex tasks on them
        cursor.execute('select dd.id \n'
                       'from document_document dd \n'
                       f'left outer join "{table_name}" df on dd.id = df.document_id \n'
                       'left outer join lateral (select jsonb_array_elements(args->0) doc_id \n'
                       '                         from task_task \n'
                       f"                         where name = '{_get_reindex_task_name()}' \n"
                       "                         and own_status = 'PENDING'\n"
                       '                         and date_work_start is null) tt on tt.doc_id = to_jsonb(dd.id) \n'
                       f'where {predicate} and df.document_id is null and tt.doc_id is null \n'
                       'and dd.processed is true')

        rows = cursor.fetchmany(pack_size)
        while rows:
            yield [row[0] for row in rows]
            rows = cursor.fetchmany(pack_size) 
開發者ID:LexPredict,項目名稱:lexpredict-contraxsuite,代碼行數:27,代碼來源:tasks.py

示例8: _get_all_doc_ids_not_planned_to_index

# 需要導入模塊: from celery import states [as 別名]
# 或者: from celery.states import PENDING [as 別名]
def _get_all_doc_ids_not_planned_to_index(query_filter: str, params: list, pack_size: int) \
        -> Generator[List[int], None, None]:
    with connection.cursor() as cursor:
        cursor.execute('select d.id from document_document d \n'
                       'left outer join lateral (select jsonb_array_elements(args->0) doc_id \n'
                       '                         from task_task \n'
                       '                         where name = %s \n'
                       '                         and own_status = %s\n'
                       '                         and date_work_start is null) tt on tt.doc_id = to_jsonb(d.id) \n'
                       'where {0} and tt.doc_id is null and d.processed is true'
                       .format(query_filter),
                       [_get_reindex_task_name(), PENDING] + params)

        rows = cursor.fetchmany(pack_size)
        while rows:
            yield [row[0] for row in rows]
            rows = cursor.fetchmany(pack_size) 
開發者ID:LexPredict,項目名稱:lexpredict-contraxsuite,代碼行數:19,代碼來源:tasks.py

示例9: get_task_detail

# 需要導入模塊: from celery import states [as 別名]
# 或者: from celery.states import PENDING [as 別名]
def get_task_detail(cls, task: Task) -> TaskRecord:
        reason = f'is started at {task.date_work_start} and not finished yet' if task.status == PENDING else 'is failed'
        r = TaskRecord(task.pk, task.name, reason, task.status,
                       task.date_work_start or task.date_start,
                       task.date_done or task.own_date_done,
                       task.user_id)
        try:
            for record in task.get_task_log_from_elasticsearch():
                r.kibana_ref = kibana_root_url(record.record_id, record.file_index, add_protocol=False)
                if not hasattr(record, 'stack_trace') or not hasattr(record, 'message') \
                        or record.log_level != 'ERROR':
                    continue
                r.error_message = record.message
                r.stack_trace = record.stack_trace
                return r
        except GeneratorExit:
            return r
        return r 
開發者ID:LexPredict,項目名稱:lexpredict-contraxsuite,代碼行數:20,代碼來源:task_monitor.py

示例10: _set_task_event

# 需要導入模塊: from celery import states [as 別名]
# 或者: from celery.states import PENDING [as 別名]
def _set_task_event(self, event: dict) -> Iterator[Union[Task, str]]:
        task = self.memory.tasks.get(event['uuid'])
        pre_state = task and task.state
        (task, _), _ = self.memory.event(event)

        # fix or insert fields.
        task.result_meta = None
        if task.state == SUCCESS:
            task.result_meta, task.result = self._derive_task_result(task)
        yield task

        # fix shortcomings of `created` field: a task should be displayed as PENDING if not
        # new; if a task is first seen in any other state, it should not be new.
        if not pre_state:
            yield '' if task.state == PENDING else task.state  # empty state will mean new.
            return

        yield from self.gen_task_states.states_through(pre_state, task.state) 
開發者ID:rsalmei,項目名稱:clearly,代碼行數:20,代碼來源:event_listener.py

示例11: wait_for_command

# 需要導入模塊: from celery import states [as 別名]
# 或者: from celery.states import PENDING [as 別名]
def wait_for_command(request, app_name, task_id, after):
    res = AsyncResult(task_id)
    if app_name != '_':
        app = models.App.objects.get(name=app_name)
        task, created = models.TaskLog.objects.get_or_create(task_id=task_id, defaults={'app': app, 'when': datetime.now()})
        description = task.description
    else:
        description = ""
    if res.state == state(SUCCESS):
        return redirect(reverse(after, kwargs={'app_name': app_name, 'task_id': task_id}))
    log = ansi_escape.sub("", get_log(res))
    if res.state == state(FAILURE):
        log += str(res.traceback)
    return render(request, 'command_wait.html', {
        'app': app_name,
        'task_id': task_id,
        'log': log,
        'state': res.state,
        'running': res.state in [state(PENDING), state(STARTED)],
        'description': description
        }) 
開發者ID:palfrey,項目名稱:wharf,代碼行數:23,代碼來源:views.py

示例12: xxx_backend

# 需要導入模塊: from celery import states [as 別名]
# 或者: from celery.states import PENDING [as 別名]
def xxx_backend(self):
        tid = uuid()

        assert self.b.get_status(tid) == states.PENDING
        assert self.b.get_result(tid) is None

        self.b.mark_as_done(tid, 42)
        assert self.b.get_status(tid) == states.SUCCESS
        assert self.b.get_result(tid) == 42

        tid2 = uuid()
        try:
            raise KeyError('foo')
        except KeyError as exception:
            self.b.mark_as_failure(tid2, exception)

        assert self.b.get_status(tid2) == states.FAILURE
        assert isinstance(self.b.get_result(tid2), KeyError) 
開發者ID:guohongze,項目名稱:adminset,代碼行數:20,代碼來源:test_database.py

示例13: __init__

# 需要導入模塊: from celery import states [as 別名]
# 或者: from celery.states import PENDING [as 別名]
def __init__(self, request, result, task_id=None, tt=TT_DUMMY, dc_bound=True, **kwargs):
        if 'task_status' in kwargs:
            task_status = kwargs.pop('task_status')
        else:
            task_status = states.PENDING

        if isinstance(result, Exception):
            tr, kwargs['status'] = get_task_exception(result)
        else:
            if 'status' not in kwargs:
                kwargs['status'] = TASK_STATE[task_status]

            if task_id is None:
                task_id = self.gen_task_id(request, tt=tt, dc_bound=dc_bound, **kwargs)

            tr = {
                'task_id': task_id,
                'status': task_status,
                'result': result,
            }

        super(DummyTaskResponse, self).__init__(request, tr, task_id, **kwargs)

    # noinspection PyUnusedLocal 
開發者ID:erigones,項目名稱:esdc-ce,代碼行數:26,代碼來源:response.py

示例14: format_task_status_class_filter

# 需要導入模塊: from celery import states [as 別名]
# 或者: from celery.states import PENDING [as 別名]
def format_task_status_class_filter(task_result: TaskResult=None) -> str:
    if task_result:
        return {
            states.SUCCESS: 'success',
            states.FAILURE: 'danger',
            states.REVOKED: 'danger',
            states.REJECTED: 'danger',
            states.RETRY: 'warning',
            states.PENDING: 'info',
            states.RECEIVED: 'info',
            states.STARTED: 'info',
        }.get(task_result.taskmeta.status, 'warning')

    return 'warning' 
開發者ID:Salamek,項目名稱:gitlab-tools,代碼行數:16,代碼來源:middleware.py

示例15: _prepare_state_and_info_by_task_dict

# 需要導入模塊: from celery import states [as 別名]
# 或者: from celery.states import PENDING [as 別名]
def _prepare_state_and_info_by_task_dict(task_ids,
                                             task_results_by_task_id) -> Mapping[str, EventBufferValueType]:
        state_info: MutableMapping[str, EventBufferValueType] = {}
        for task_id in task_ids:
            task_result = task_results_by_task_id.get(task_id)
            if task_result:
                state = task_result["status"]
                info = None if not hasattr(task_result, "info") else task_result["info"]
            else:
                state = celery_states.PENDING
                info = None
            state_info[task_id] = state, info
        return state_info 
開發者ID:apache,項目名稱:airflow,代碼行數:15,代碼來源:celery_executor.py


注:本文中的celery.states.PENDING屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。