当前位置: 首页>>代码示例>>Python>>正文


Python states.PENDING属性代码示例

本文整理汇总了Python中celery.states.PENDING属性的典型用法代码示例。如果您正苦于以下问题:Python states.PENDING属性的具体用法?Python states.PENDING怎么用?Python states.PENDING使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在celery.states的用法示例。


在下文中一共展示了states.PENDING属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: reduce_pipeline

# 需要导入模块: from celery import states [as 别名]
# 或者: from celery.states import PENDING [as 别名]
def reduce_pipeline(self, column_ids_to_keep, new_dataset_name_prefix, dataset_id, project_id):
    logger.info("In reduce pipeline with dataset_id %s and project_id %s", dataset_id, project_id)

    # Unpivot
    self.update_state(state=states.PENDING, meta={'desc': '(1/3) Reducing dataset'})
    df_reduced, new_dataset_title, new_dataset_name, new_dataset_path = \
        reduce_dataset(project_id, dataset_id, column_ids_to_keep, new_dataset_name_prefix)

    # Save
    self.update_state(state=states.PENDING, meta={'desc': '(2/3) Saving reduced dataset'})
    df_reduced.to_csv(new_dataset_path, sep='\t', index=False)
    dataset_docs = save_dataset(project_id, new_dataset_title, new_dataset_name, 'tsv', new_dataset_path)
    dataset_doc = dataset_docs[0]
    new_dataset_id = dataset_doc['id']

    # Ingest
    self.update_state(state=states.PENDING, meta={'desc': '(3/3) Ingesting reduced dataset'})
    ingestion_result = ingestion_pipeline.apply(args=[ new_dataset_id, project_id ])
    return {
        'result': {
            'id': new_dataset_id
        }
    } 
开发者ID:MacroConnections,项目名称:DIVE-backend,代码行数:25,代码来源:pipelines.py

示例2: join_pipeline

# 需要导入模块: from celery import states [as 别名]
# 或者: from celery.states import PENDING [as 别名]
def join_pipeline(self, left_dataset_id, right_dataset_id, on, left_on, right_on, how, left_suffix, right_suffix, new_dataset_name_prefix, project_id):
    logger.info("In join pipeline with dataset_ids %s %s and project_id %s", left_dataset_id, right_dataset_id, project_id)

    # Unpivot
    self.update_state(state=states.PENDING, meta={'desc': '(1/3) Joining dataset'})
    df_joined, new_dataset_title, new_dataset_name, new_dataset_path = \
        join_datasets(project_id, left_dataset_id, right_dataset_id, on, left_on, right_on, how, left_suffix, right_suffix, new_dataset_name_prefix)

    # Save
    self.update_state(state=states.PENDING, meta={'desc': '(2/3) Saving joined dataset'})
    df_joined.to_csv(new_dataset_path, sep='\t', index=False)
    dataset_docs = save_dataset(project_id, new_dataset_title, new_dataset_name, 'tsv', new_dataset_path)
    dataset_doc = dataset_docs[0]
    new_dataset_id = dataset_doc['id']

    # Ingest
    self.update_state(state=states.PENDING, meta={'desc': '(3/3) Ingesting joined dataset'})
    ingestion_result = ingestion_pipeline.apply(args=[ new_dataset_id, project_id ])
    return {
        'result': {
            'id': new_dataset_id
        }
    } 
开发者ID:MacroConnections,项目名称:DIVE-backend,代码行数:25,代码来源:pipelines.py

示例3: unpivot_pipeline

# 需要导入模块: from celery import states [as 别名]
# 或者: from celery.states import PENDING [as 别名]
def unpivot_pipeline(self, pivot_fields, variable_name, value_name, new_dataset_name_prefix, dataset_id, project_id):
    logger.info("In unpivot pipeline with dataset_id %s and project_id %s", dataset_id, project_id)

    # Unpivot
    self.update_state(state=states.PENDING, meta={'desc': '(1/3) Unpivoting dataset'})
    df_unpivoted, new_dataset_title, new_dataset_name, new_dataset_path = \
        unpivot_dataset(project_id, dataset_id, pivot_fields, variable_name, value_name, new_dataset_name_prefix)

    # Save
    self.update_state(state=states.PENDING, meta={'desc': '(2/3) Saving unpivoted dataset'})
    df_unpivoted.to_csv(new_dataset_path, sep='\t', index=False)
    dataset_docs = save_dataset(project_id, new_dataset_title, new_dataset_name, 'tsv', new_dataset_path)
    dataset_doc = dataset_docs[0]
    new_dataset_id = dataset_doc['id']

    # Ingest
    self.update_state(state=states.PENDING, meta={'desc': '(3/3) Ingesting unpivoted dataset'})
    ingestion_result = ingestion_pipeline.apply(args=[ new_dataset_id, project_id ])
    return {
        'result': {
            'id': new_dataset_id
        }
    } 
开发者ID:MacroConnections,项目名称:DIVE-backend,代码行数:25,代码来源:pipelines.py

示例4: ingestion_pipeline

# 需要导入模块: from celery import states [as 别名]
# 或者: from celery.states import PENDING [as 别名]
def ingestion_pipeline(self, dataset_id, project_id):
    '''
    Compute dataset and field properties in parallel

    TODO Accept multiple datasets?
    '''
    logger.info("In ingestion pipeline with dataset_id %s and project_id %s", dataset_id, project_id)
    self.update_state(state=states.PENDING, meta={'desc': '(1/4) Computing dataset properties'})
    dataset_properties = compute_dataset_properties(dataset_id, project_id)

    self.update_state(state=states.PENDING, meta={'desc': '(2/4) Saving dataset properties'})
    save_dataset_properties(dataset_properties, dataset_id, project_id)

    self.update_state(state=states.PENDING, meta={'desc': '(3/4) Computing dataset field properties'})
    field_properties = compute_all_field_properties(dataset_id, project_id)

    self.update_state(state=states.PENDING, meta={'desc': '(4/4) Saving dataset field properties'})
    result = save_field_properties(field_properties, dataset_id, project_id)
    return result 
开发者ID:MacroConnections,项目名称:DIVE-backend,代码行数:21,代码来源:pipelines.py

示例5: drop_clusters

# 需要导入模块: from celery import states [as 别名]
# 或者: from celery.states import PENDING [as 别名]
def drop_clusters(self, exclude_task_ids: Set = None, exclude_project_clustering_id: int = None):
        project = self
        # Stop running tusks
        from apps.task.tasks import purge_task
        from apps.project.tasks import ClusterProjectDocuments
        task_qr = project.project_tasks \
            .filter(name=ClusterProjectDocuments.name, status__in=UNREADY_STATES)  # type: QuerySet
        if exclude_task_ids:
            task_qr = task_qr.exclude(pk__in=exclude_task_ids)

        for task in task_qr:
            purge_task(task.pk, wait=True, timeout=1.5)
        # delete DocumentClusters
        for pcl in project.projectclustering_set.all():
            pcl.document_clusters.all().delete()
        # delete ProjectClustering
        project.projectclustering_set.exclude(id=exclude_project_clustering_id).delete()
        # delete ClusterProjectDocuments Tasks
        to_delete_qr = project.project_tasks.filter(name=ClusterProjectDocuments.name,
                                                    status__in=[SUCCESS, PENDING])  # type: QuerySet
        if exclude_task_ids:
            to_delete_qr = to_delete_qr.exclude(pk__in=exclude_task_ids)
        to_delete_qr.delete() 
开发者ID:LexPredict,项目名称:lexpredict-contraxsuite,代码行数:25,代码来源:models.py

示例6: check_and_set_completed

# 需要导入模块: from celery import states [as 别名]
# 或者: from celery.states import PENDING [as 别名]
def check_and_set_completed(self):
        """
        Check and set "completed"
        """
        if self.completed:
            return True
        if not self.session_tasks.exists():
            # session tasks already deleted but documents exist-i.e. session has ended some time ago
            if self.document_set.exists():
                completed = True
            # no documents uploaded into session and more that 1 day passed
            elif (now() - self.created_date).days >= 1:
                completed = True
            else:
                return None
        else:
            completed = not self.session_tasks.filter(status=PENDING).exists()
        if self.completed != completed:
            self.completed = completed
            self.save()
        return completed 
开发者ID:LexPredict,项目名称:lexpredict-contraxsuite,代码行数:23,代码来源:models.py

示例7: get_non_indexed_doc_ids_not_planned_to_index_by_predicate

# 需要导入模块: from celery import states [as 别名]
# 或者: from celery.states import PENDING [as 别名]
def get_non_indexed_doc_ids_not_planned_to_index_by_predicate(
        doc_type_code: str,
        predicate: str,
        pack_size: int = 100) -> Generator[List[int], None, None]:
    table_name = doc_fields_table_name(doc_type_code)

    with connection.cursor() as cursor:
        # return documents of the specified type which
        # - do not exist in the corresponding fields cache
        # - have no planned but not-started reindex tasks on them
        cursor.execute('select dd.id \n'
                       'from document_document dd \n'
                       f'left outer join "{table_name}" df on dd.id = df.document_id \n'
                       'left outer join lateral (select jsonb_array_elements(args->0) doc_id \n'
                       '                         from task_task \n'
                       f"                         where name = '{_get_reindex_task_name()}' \n"
                       "                         and own_status = 'PENDING'\n"
                       '                         and date_work_start is null) tt on tt.doc_id = to_jsonb(dd.id) \n'
                       f'where {predicate} and df.document_id is null and tt.doc_id is null \n'
                       'and dd.processed is true')

        rows = cursor.fetchmany(pack_size)
        while rows:
            yield [row[0] for row in rows]
            rows = cursor.fetchmany(pack_size) 
开发者ID:LexPredict,项目名称:lexpredict-contraxsuite,代码行数:27,代码来源:tasks.py

示例8: _get_all_doc_ids_not_planned_to_index

# 需要导入模块: from celery import states [as 别名]
# 或者: from celery.states import PENDING [as 别名]
def _get_all_doc_ids_not_planned_to_index(query_filter: str, params: list, pack_size: int) \
        -> Generator[List[int], None, None]:
    with connection.cursor() as cursor:
        cursor.execute('select d.id from document_document d \n'
                       'left outer join lateral (select jsonb_array_elements(args->0) doc_id \n'
                       '                         from task_task \n'
                       '                         where name = %s \n'
                       '                         and own_status = %s\n'
                       '                         and date_work_start is null) tt on tt.doc_id = to_jsonb(d.id) \n'
                       'where {0} and tt.doc_id is null and d.processed is true'
                       .format(query_filter),
                       [_get_reindex_task_name(), PENDING] + params)

        rows = cursor.fetchmany(pack_size)
        while rows:
            yield [row[0] for row in rows]
            rows = cursor.fetchmany(pack_size) 
开发者ID:LexPredict,项目名称:lexpredict-contraxsuite,代码行数:19,代码来源:tasks.py

示例9: get_task_detail

# 需要导入模块: from celery import states [as 别名]
# 或者: from celery.states import PENDING [as 别名]
def get_task_detail(cls, task: Task) -> TaskRecord:
        reason = f'is started at {task.date_work_start} and not finished yet' if task.status == PENDING else 'is failed'
        r = TaskRecord(task.pk, task.name, reason, task.status,
                       task.date_work_start or task.date_start,
                       task.date_done or task.own_date_done,
                       task.user_id)
        try:
            for record in task.get_task_log_from_elasticsearch():
                r.kibana_ref = kibana_root_url(record.record_id, record.file_index, add_protocol=False)
                if not hasattr(record, 'stack_trace') or not hasattr(record, 'message') \
                        or record.log_level != 'ERROR':
                    continue
                r.error_message = record.message
                r.stack_trace = record.stack_trace
                return r
        except GeneratorExit:
            return r
        return r 
开发者ID:LexPredict,项目名称:lexpredict-contraxsuite,代码行数:20,代码来源:task_monitor.py

示例10: _set_task_event

# 需要导入模块: from celery import states [as 别名]
# 或者: from celery.states import PENDING [as 别名]
def _set_task_event(self, event: dict) -> Iterator[Union[Task, str]]:
        task = self.memory.tasks.get(event['uuid'])
        pre_state = task and task.state
        (task, _), _ = self.memory.event(event)

        # fix or insert fields.
        task.result_meta = None
        if task.state == SUCCESS:
            task.result_meta, task.result = self._derive_task_result(task)
        yield task

        # fix shortcomings of `created` field: a task should be displayed as PENDING if not
        # new; if a task is first seen in any other state, it should not be new.
        if not pre_state:
            yield '' if task.state == PENDING else task.state  # empty state will mean new.
            return

        yield from self.gen_task_states.states_through(pre_state, task.state) 
开发者ID:rsalmei,项目名称:clearly,代码行数:20,代码来源:event_listener.py

示例11: wait_for_command

# 需要导入模块: from celery import states [as 别名]
# 或者: from celery.states import PENDING [as 别名]
def wait_for_command(request, app_name, task_id, after):
    res = AsyncResult(task_id)
    if app_name != '_':
        app = models.App.objects.get(name=app_name)
        task, created = models.TaskLog.objects.get_or_create(task_id=task_id, defaults={'app': app, 'when': datetime.now()})
        description = task.description
    else:
        description = ""
    if res.state == state(SUCCESS):
        return redirect(reverse(after, kwargs={'app_name': app_name, 'task_id': task_id}))
    log = ansi_escape.sub("", get_log(res))
    if res.state == state(FAILURE):
        log += str(res.traceback)
    return render(request, 'command_wait.html', {
        'app': app_name,
        'task_id': task_id,
        'log': log,
        'state': res.state,
        'running': res.state in [state(PENDING), state(STARTED)],
        'description': description
        }) 
开发者ID:palfrey,项目名称:wharf,代码行数:23,代码来源:views.py

示例12: xxx_backend

# 需要导入模块: from celery import states [as 别名]
# 或者: from celery.states import PENDING [as 别名]
def xxx_backend(self):
        tid = uuid()

        assert self.b.get_status(tid) == states.PENDING
        assert self.b.get_result(tid) is None

        self.b.mark_as_done(tid, 42)
        assert self.b.get_status(tid) == states.SUCCESS
        assert self.b.get_result(tid) == 42

        tid2 = uuid()
        try:
            raise KeyError('foo')
        except KeyError as exception:
            self.b.mark_as_failure(tid2, exception)

        assert self.b.get_status(tid2) == states.FAILURE
        assert isinstance(self.b.get_result(tid2), KeyError) 
开发者ID:guohongze,项目名称:adminset,代码行数:20,代码来源:test_database.py

示例13: __init__

# 需要导入模块: from celery import states [as 别名]
# 或者: from celery.states import PENDING [as 别名]
def __init__(self, request, result, task_id=None, tt=TT_DUMMY, dc_bound=True, **kwargs):
        if 'task_status' in kwargs:
            task_status = kwargs.pop('task_status')
        else:
            task_status = states.PENDING

        if isinstance(result, Exception):
            tr, kwargs['status'] = get_task_exception(result)
        else:
            if 'status' not in kwargs:
                kwargs['status'] = TASK_STATE[task_status]

            if task_id is None:
                task_id = self.gen_task_id(request, tt=tt, dc_bound=dc_bound, **kwargs)

            tr = {
                'task_id': task_id,
                'status': task_status,
                'result': result,
            }

        super(DummyTaskResponse, self).__init__(request, tr, task_id, **kwargs)

    # noinspection PyUnusedLocal 
开发者ID:erigones,项目名称:esdc-ce,代码行数:26,代码来源:response.py

示例14: format_task_status_class_filter

# 需要导入模块: from celery import states [as 别名]
# 或者: from celery.states import PENDING [as 别名]
def format_task_status_class_filter(task_result: TaskResult=None) -> str:
    if task_result:
        return {
            states.SUCCESS: 'success',
            states.FAILURE: 'danger',
            states.REVOKED: 'danger',
            states.REJECTED: 'danger',
            states.RETRY: 'warning',
            states.PENDING: 'info',
            states.RECEIVED: 'info',
            states.STARTED: 'info',
        }.get(task_result.taskmeta.status, 'warning')

    return 'warning' 
开发者ID:Salamek,项目名称:gitlab-tools,代码行数:16,代码来源:middleware.py

示例15: _prepare_state_and_info_by_task_dict

# 需要导入模块: from celery import states [as 别名]
# 或者: from celery.states import PENDING [as 别名]
def _prepare_state_and_info_by_task_dict(task_ids,
                                             task_results_by_task_id) -> Mapping[str, EventBufferValueType]:
        state_info: MutableMapping[str, EventBufferValueType] = {}
        for task_id in task_ids:
            task_result = task_results_by_task_id.get(task_id)
            if task_result:
                state = task_result["status"]
                info = None if not hasattr(task_result, "info") else task_result["info"]
            else:
                state = celery_states.PENDING
                info = None
            state_info[task_id] = state, info
        return state_info 
开发者ID:apache,项目名称:airflow,代码行数:15,代码来源:celery_executor.py


注:本文中的celery.states.PENDING属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。