当前位置: 首页>>代码示例>>Python>>正文


Python name.Name类代码示例

本文整理汇总了Python中pinball.workflow.name.Name的典型用法代码示例。如果您正苦于以下问题:Python Name类的具体用法?Python Name怎么用?Python Name使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Name类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _simulate

 def _simulate(self):
     """Simulate execution of active jobs."""
     tokens = self._store.read_tokens()
     satisfied_deps = set()
     executed_jobs = []
     jobs = {}
     for token in tokens:
         event_name = Name.from_event_token_name(token.name)
         if event_name.event:
             satisfied_deps.add((event_name.input, event_name.job))
         else:
             job_name = Name.from_job_token_name(token.name)
             if job_name.job:
                 job = pickle.loads(token.data)
                 jobs[job.name] = job
     dep_counts = collections.defaultdict(int)
     while satisfied_deps:
         last_satisfied_deps = satisfied_deps
         satisfied_deps = set()
         for (_, job_name) in last_satisfied_deps:
             dep_counts[job_name] += 1
             if dep_counts[job_name] == 2:
                 executed_jobs.append(job_name)
                 job = jobs[job_name]
                 for output in job.outputs:
                     satisfied_deps.add((job_name, output))
     return executed_jobs
开发者ID:runt18,项目名称:pinball,代码行数:27,代码来源:analyzer_test.py

示例2: set_action

    def set_action(self, action):
        """Send a signal with a specific action to the master.

        Local signal store gets updated with the new action if it is
        successfully submitted to the master.  If the communication with the
        master fails, locally stored signals get refreshed.

        Args:
            action: The action to set.
        """
        attributes = {}
        if action == Signal.ABORT:
            attributes[Signal.TIMESTAMP_ATTR] = time.time()
        elif action == Signal.EXIT:
            attributes[Signal.GENERATION_ATTR] = PinballConfig.GENERATION
        signal = self._signals.get(action)
        if signal and signal.attributes == attributes:
            return
        # A signal with the same action but different data may already exist
        # in the master.
        signal_token = self._get_signal_token(action)
        if not signal_token:
            name = Name(workflow=self._workflow, instance=self._instance,
                        signal=Signal.action_to_string(action))
            signal_token = Token(name=name.get_signal_token_name())
        signal = Signal(action, attributes)
        signal_token.data = pickle.dumps(signal)
        request = ModifyRequest(updates=[signal_token])
        if self._send_request(request):
            self._signals[action] = signal
开发者ID:Betterment,项目名称:pinball,代码行数:30,代码来源:signaller.py

示例3: _get_instances_using_cache

    def _get_instances_using_cache(self, workflow):
        """Get workflow instances, preferably from the cache.

        As a side effect, archived instances that do not exist in the cache
        will be added to the cache.

        Args:
            workflow: The name of the workflow whose instances we are
                interested in.
        Returns:
            List of instances for the given workflow.
        """
        name = Name(workflow=workflow)
        workflow_prefix = name.get_workflow_prefix()
        workflow_token_names = self._store.read_token_names(
            name_prefix=workflow_prefix)
        instances_prefixes = DataBuilder._get_instance_prefixes(
            workflow_token_names)
        result = []
        for prefix in instances_prefixes:
            name = Name.from_instance_prefix(prefix)
            assert name.workflow and name.instance, (
                'Expected instance prefix, found %s' % prefix)
            result.append(self.get_instance(name.workflow, name.instance))
        return result
开发者ID:Betterment,项目名称:pinball,代码行数:25,代码来源:data_builder.py

示例4: _get_jobs

    def _get_jobs(self, workflow, job):
        """Get job definitions from the store across all workflow instances.

        Args:
            workflow: The name of the job workflow.
            instance: The name of the job instance.
            job: The name of the job.
        Returns:
            Matching job definition.
        """
        name = Name(workflow=workflow)
        name_prefix = name.get_workflow_prefix()
        # This is a bit hacky since we bypass the Name module where all the
        # token naming logic is supposed to be located.
        # TODO(pawel): extend the Name module to support abstractions needed
        # here.
        name_infix = '/job/'
        name_suffix = '/%s' % job
        job_tokens = self._store.read_tokens(name_prefix=name_prefix,
                                             name_infix=name_infix,
                                             name_suffix=name_suffix)
        result = []
        for job_token in job_tokens:
            job_record = pickle.loads(job_token.data)
            result.append(job_record)
        return result
开发者ID:Betterment,项目名称:pinball,代码行数:26,代码来源:data_builder.py

示例5: get_schedule

    def get_schedule(self, workflow):
        """Get workflow schedule data from the store.

        Args:
            workflow: The name of the workflow whose schedule should be
                retrieved.
        Returns:
            The workflow schedule or None if it was not found.
        """
        name = Name(workflow=workflow)
        schedule_token_name = name.get_workflow_schedule_token_name()
        tokens = self._store.read_tokens(name_prefix=schedule_token_name)
        if tokens:
            for token in tokens:
                if token.name == schedule_token_name:
                    schedule = pickle.loads(token.data)
                    overrun_policy_help = OverrunPolicy.get_help(
                        schedule.overrun_policy)
                    return WorkflowScheduleData(
                        next_run_time=schedule.next_run_time,
                        recurrence_seconds=schedule.recurrence_seconds,
                        overrun_policy=schedule.overrun_policy,
                        overrun_policy_help=overrun_policy_help,
                        workflow=schedule.workflow,
                        parser_params=schedule.parser_params,
                        emails=schedule.emails,
                        max_running_instances=schedule.max_running_instances)
        return None
开发者ID:Betterment,项目名称:pinball,代码行数:28,代码来源:data_builder.py

示例6: _get_instance_using_cache

    def _get_instance_using_cache(self, workflow, instance):
        """Get workflow instance, preferably from the cache.

        As a side effect, if the instance is archived and it does not exist in
        the cache, it will be added to the cache.

        Args:
            workflow: The name of the workflow whose instance we are
                interested in.
            instance: The instance we are interested in.
        Returns:
            The workflow instance or None if it was not found.
        """
        name = Name(workflow=workflow, instance=instance)
        instance_prefix = name.get_instance_prefix()
        data = self._store.get_cached_data(instance_prefix)
        if data:
            instance_data = pickle.loads(data)
        else:
            # Cache only archived instances.
            if self._store.read_archived_token_names(
                    name_prefix=instance_prefix):
                # The ordering of operations is important.  We need to make
                # sure that we add to the cache instance data constructed from
                # the archived tokens.
                instance_data = self._get_instance_no_cache(workflow, instance)
                self._store.set_cached_data(instance_prefix,
                                            pickle.dumps(instance_data))
            else:
                instance_data = self._get_instance_no_cache(workflow, instance)
        return instance_data
开发者ID:Betterment,项目名称:pinball,代码行数:31,代码来源:data_builder.py

示例7: _query_and_own_runnable_job_token

    def _query_and_own_runnable_job_token(self, workflow, instance):
        """Attempt to own a runnable job token from a given workflow instance.

        Try to own a runnable job token in a given workflow instance.  The
        ownership of the qualifying job token lasts for a limited time so it
        has to be periodically renewed.

        Args:
            workflow: The name of the workflow whose jobs should be considered.
            instance: The workflow instance whose jobs should be considered.
        """
        assert not self._owned_job_token
        name = Name(workflow=workflow,
                    instance=instance,
                    job_state=Name.RUNNABLE_STATE)
        query = Query()
        query.namePrefix = name.get_job_state_prefix()
        query.maxTokens = 1
        request = QueryAndOwnRequest()
        request.query = query
        request.expirationTime = time.time() + Worker._LEASE_TIME_SEC
        request.owner = self._name
        try:
            response = self._client.query_and_own(request)
            if response.tokens:
                assert len(response.tokens) == 1
                self._owned_job_token = response.tokens[0]
        except TokenMasterException:
            LOG.exception('error sending request %s', request)
开发者ID:Betterment,项目名称:pinball,代码行数:29,代码来源:worker.py

示例8: is_signal_set

    def is_signal_set(self, workflow, instance, action):
        """Check if a signal is set.

        Args:
            workflow: The workflow whose signal should be checked.  If None,
                signals at the global level are checked.
            instance: The workflow instance whose signal should be checked.  If
                not None, a matching workflow name must be provided.
                If None, signals at the workflow and the global level are
                checked.
            action: The signal action to check.
        Returns:
            True iff the signal exists in the specified context.
        """
        for (workflow_name, instance_name) in [(workflow, instance),
                                               (workflow, None),
                                               (None, None)]:
            name = Name(workflow=workflow_name, instance=instance_name,
                        signal=Signal.action_to_string(action))
            token_name = name.get_signal_token_name()
            tokens = self._store.read_tokens(token_name)
            assert len(tokens) <= 1
            if tokens:
                return True
        return False
开发者ID:Betterment,项目名称:pinball,代码行数:25,代码来源:data_builder.py

示例9: _is_done

    def _is_done(self, workflow, instance):
        """Check if the workflow instance is done.

        A workflow is done if it does not have runnable jobs.

        Returns:
            True if we are certain that the workflow is not running.  Otherwise
            False.  If there were any errors during communication with the
            master, the return value is False.
        """
        # Attempt to make the workflow runnable and verify that no WAITING job
        # tokens were changed in the meantime.
        name = Name(workflow=workflow,
                    instance=instance,
                    job_state=Name.WAITING_STATE)
        query = Query(namePrefix=name.get_job_state_prefix())
        request = QueryRequest(queries=[query])
        try:
            snapshot = Snapshot(self._client, request)
        except:
            LOG.exception('error sending request %s', request)
            return False
        if not self._make_runnable(workflow, instance):
            return False
        if not self._has_no_runnable_jobs(workflow, instance):
            return False
        try:
            return not snapshot.refresh()
        except:
            LOG.exception('error sending request %s', request)
            return False
开发者ID:Betterment,项目名称:pinball,代码行数:31,代码来源:worker.py

示例10: _make_runnable

    def _make_runnable(self, workflow, instance):
        """Attempt to make jobs in a given workflow instance runnable.

        Go over all waiting jobs in a given workflow instance and try to make
        them runnable.

        Args:
            workflow: The name of the workflow whose jobs should be considered.
            instance: The workflow instance whose jobs should be considered.
        Returns:
            True if there were no errors during communication with the master,
            otherwise False.
        """
        name = Name()
        name.workflow = workflow
        name.instance = instance
        name.job_state = Name.WAITING_STATE
        query = Query(namePrefix=name.get_job_state_prefix())
        # TODO(pawel): to prevent multiple workers from trying to make the
        # same job runnable at the same time, this should be a
        # QueryAndOwnRequest.  Note that the current implementation is correct,
        # just inefficient.
        request = QueryRequest(queries=[query])
        try:
            response = self._client.query(request)
        except TokenMasterException:
            LOG.exception('error sending request %s', request)
            return False
        assert len(response.tokens) == 1
        for token in response.tokens[0]:
            if not self._make_job_runnable(token):
                return False
        return True
开发者ID:Betterment,项目名称:pinball,代码行数:33,代码来源:worker.py

示例11: get_workflow_tokens

    def get_workflow_tokens(self):
        """Create Pinball tokens representing a workflow instance.

        Convert workflow jobs to tokens and create event tokens in inputs of
        top-level jobs.

        Returns:
            A list of job and event tokens representing a workflow instance.
        """
        all_jobs = self._get_transitive_deps()
        instance = get_unique_workflow_instance()
        result = []
        for job in all_jobs:
            result.append(job.get_job_token(self.name, instance))
        top_level_jobs = self._get_top_level_jobs()
        for job in top_level_jobs:
            event = Event(creator='parser')
            event_name = Name(workflow=self.name,
                              instance=instance,
                              job=job.name,
                              input_name=Name.WORKFLOW_START_INPUT,
                              event='workflow_start_event')
            result.append(Token(name=event_name.get_event_token_name(),
                                data=pickle.dumps(event)))
        return result
开发者ID:Betterment,项目名称:pinball,代码行数:25,代码来源:parser.py

示例12: _read_tokens_from_store

    def _read_tokens_from_store(self, store):
        """Read archived job tokens from the store.

        Args:
            store: The store to read tokens from.
        """
        name = Name(workflow=self._workflow, instance=self._instance)
        tokens = store.read_archived_tokens(
            name_prefix=name.get_instance_prefix())
        self._filter_job_tokens(tokens)
开发者ID:cafyne,项目名称:pinball,代码行数:10,代码来源:analyzer.py

示例13: _post_signal_tokens

    def _post_signal_tokens(self):
        """Add some signal tokens to the master."""
        request = ModifyRequest(updates=[])

        signal = Signal(action=Signal.EXIT)
        name = Name(signal='exit')
        signal_token = Token(name=name.get_signal_token_name())
        signal_token.data = pickle.dumps(signal)
        request.updates.append(signal_token)

        signal = Signal(action=Signal.DRAIN)
        name.signal = 'drain'
        name.workflow = 'some_workflow'
        signal_token = Token(name=name.get_signal_token_name())
        signal_token.data = pickle.dumps(signal)
        request.updates.append(signal_token)

        name.instance = '123'
        signal_token = Token(name=name.get_signal_token_name())
        signal_token.data = pickle.dumps(signal)
        request.updates.append(signal_token)

        signal = Signal(action=Signal.ABORT)
        name.signal = 'abort'
        signal_token = Token(name=name.get_signal_token_name())
        signal_token.data = pickle.dumps(signal)
        request.updates.append(signal_token)

        client = self._factory.get_client()
        client.modify(request)
开发者ID:Betterment,项目名称:pinball,代码行数:30,代码来源:signaller_test.py

示例14: _generate_signal_tokens

def _generate_signal_tokens(workflows):
    result = []
    for w in range(0, workflows, 2):
        workflow = 'workflow_%d' % w
        signal = Signal(Signal.DRAIN)
        name = Name(workflow=workflow,
                    signal=Signal.action_to_string(signal.action))
        result.append(Token(name=name.get_signal_token_name(),
                            version=10000000000 * w,
                            data=pickle.dumps(signal)))
    return result
开发者ID:Betterment,项目名称:pinball,代码行数:11,代码来源:data_generator.py

示例15: _get_schedule_token

 def _get_schedule_token():
     name = Name(workflow='workflow_0')
     now = int(time.time())
     token = Token(name=name.get_workflow_schedule_token_name(),
                   owner='some_owner',
                   expirationTime=now - 10)
     schedule = WorkflowSchedule(next_run_time=now - 10,
                                 recurrence_seconds=10,
                                 workflow='workflow_0')
     token.data = pickle.dumps(schedule)
     return token
开发者ID:Betterment,项目名称:pinball,代码行数:11,代码来源:scheduler_test.py


注:本文中的pinball.workflow.name.Name类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。