当前位置: 首页>>代码示例>>Python>>正文


Python BlockingScheduler.start方法代码示例

本文整理汇总了Python中apscheduler.schedulers.background.BlockingScheduler.start方法的典型用法代码示例。如果您正苦于以下问题:Python BlockingScheduler.start方法的具体用法?Python BlockingScheduler.start怎么用?Python BlockingScheduler.start使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在apscheduler.schedulers.background.BlockingScheduler的用法示例。


在下文中一共展示了BlockingScheduler.start方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: ScheduledCheck

# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import start [as 别名]
class ScheduledCheck(Check, metaclass=ABCMeta):
    '''
    An abstract base class for a check that runs based on
    the Scheduler from apscheduler

    Child classes need to implement the check method
    '''
    def __init__(self, queue=None, notify_on_exception=True, name=None, **kwargs):
        '''
        Create a new instance of this Check
        The kwargs are handed over to apscheduler.blocking.BlockingScheduler.add_job
        and decide when the checks are run. For example `trigger='cron', hour=8` will
        run this check every day at 8 o'clock
        '''
        super().__init__(queue=queue, notify_on_exception=notify_on_exception, name=name)

        self.scheduler = BlockingScheduler(
            job_defaults={'misfire_grace_time': 5*60}
        )
        self.scheduler.add_job(self.wrapped_check, **kwargs)

    def run(self):
        self.scheduler.start()

    def stop(self):
        self.scheduler.shutdown()
        self.log.info('Check %s stopped', self.__class__.__name__)
开发者ID:fact-project,项目名称:pycustos,代码行数:29,代码来源:__init__.py

示例2: MonkeyHorde

# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import start [as 别名]
class MonkeyHorde(object):

    def __init__(self, config_file):
        self.config_file = config_file
        self.monkey_list = [
            dict(class_name=ChaosMonkey),
            dict(class_name=SecurityMonkey),
        ]
        self.twitter = self.get_twitter_connector()
        self.scheduler = BlockingScheduler()
        for m in self.monkey_list:
            m['class_name'](config_file, self.scheduler, self.twitter)

    def unleash(self):
        if self.twitter:
            try:
                self.twitter.PostUpdate("I unleashed the evil monkey horde!!!")
            except Exception as e:
                log.exception(e)
        self.scheduler.start()

    def get_twitter_connector(self):
        try:
            credentials = self.config_file.items("twitter")
        except ConfigParser.NoSectionError:
            return None
        return twitter.Api(**dict(credentials))
开发者ID:hacktm,项目名称:Open-source-ChaosMonkeys,代码行数:29,代码来源:monkeyhorde.py

示例3: cli

# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import start [as 别名]
def cli(ctx, helium_key, darksky_key, lat, lon, sensor, every):
    """Monitor weather for a lat/lon locaation.

    This sample service shows how you can use an external weather
    service to emit to a virtual sensor in the Helium platform.

    \b
    he-weather  --every <seconds> <sensor> <lat> <lon>

    The given virtual <sensor> is the id of a created Helium virtual
    sensor.

    The optional <seconds> parameter sets how often weather
    information needs to get fetched and posted to Helium. If the
    parameter is not provided a default (60 seconds)) is picked.

    This will run the service based on the given lat/lon.

    """
    client = Client(api_token=helium_key)
    sensor = Sensor.find(client, sensor)

    logging.basicConfig()
    scheduler = BlockingScheduler()
    scheduler.add_job(_process_weather, "interval",
                      seconds=every,
                      next_run_time=datetime.now(),
                      args=[darksky_key, lat, lon, sensor])
    click.echo("Checking every {} seconds".format(every))
    scheduler.start()
开发者ID:helium,项目名称:api-examples,代码行数:32,代码来源:he-weather.py

示例4: cmd_start

# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import start [as 别名]
 def cmd_start(self):
     from apscheduler.schedulers.background import BlockingScheduler
     sched = BlockingScheduler()
     with transaction.manager:
         Scheduler.add_all_to_apscheduler(sched, DbSession, user=SYSTEM_UID,
                                          begin_transaction=True)
     sched.start()
     sched.print_jobs()
开发者ID:dmdm,项目名称:Parenchym,代码行数:10,代码来源:scheduler.py

示例5: run

# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import start [as 别名]
 def run(self):
     """Run watcher"""
     self.logger.info("Running watcher ...")
     scheduler = BlockingScheduler()
     scheduler.add_job(self.watching, 'interval', seconds=self.config["interval"])
     try:
         scheduler.start()
     except (KeyboardInterrupt, SystemExit):
         pass
开发者ID:cuongnb14,项目名称:watcher-elasticsearch,代码行数:11,代码来源:nbc_watcher.py

示例6: MonkeyRunner

# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import start [as 别名]
class MonkeyRunner(object):

    def __init__(self, config_file):
        self.config_file = config_file
        self.monkey_list = [
            dict(class_name=ChaosMonkey),
            dict(class_name=JanitorMonkey),
            dict(class_name=SecurityMonkey),
        ]
        self.scheduler = BlockingScheduler()
        for m in self.monkey_list:
            m['class_name'](config_file, self.scheduler)

    def start(self):
        self.scheduler.start()
开发者ID:ioanaverebi,项目名称:pySimians,代码行数:17,代码来源:monkeyrunner.py

示例7: daily_task

# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import start [as 别名]
    def daily_task(self):
        def func():
            day = datetime.datetime.now().strftime('%Y-%m-%d')
            # 活期
            self.da.init_balance(day, 1)
            self.logger.info(day, '活期每日余额计算完成')
            # 定期
            self.da.init_balance(day, 2)
            self.logger.info(day, '定期每日余额计算完成')
            # 理财
            self.da.init_balance(day, 3)
            self.logger.info(day, '理财每日余额计算完成')

        scheduler = BlockingScheduler()
        scheduler.add_job(func,'cron',day='*',hour='1') # 每天凌晨1点运行

        try:
            scheduler.start()
        except Exception as e:
            # TODO 执行错误的处理方案
            self.logger.error('每日AUM计算出错:',e)
            scheduler.shutdown()
开发者ID:summer-apple,项目名称:spark,代码行数:24,代码来源:tasks.py

示例8: len

# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import start [as 别名]
    if len(my_accounts) is 0:
        brain_key = rpc.suggest_brain_key()
        account_registered, account_registration_response = register_account_faucet(config.account, brain_key['pub_key'])
        if account_registered:
            rpc.import_key(config.account, brain_key['wif_priv_key'])

            print("Account: %s succesfully registered" % config.account)
            print(rpc.list_my_accounts())

            print("Brain key: %s" % brain_key['brain_priv_key'])
            print("Write it down/back it up ^")

            print("Send funds to %s and start the bot again" % config.account)
        else:
            print("Account creation failed")
            print(brain_key)
            print(config.faucet + " response: ", account_registration_response)

    else:
        print(my_accounts)
        print(config.account)
        print(rpc.list_account_balances(config.account))
        print("Bot config: " + str(config.bots["MakerRexp"]))
        
        bot.init(config)
 
        run_bot() # running the bot before the scheduler, otherwise it will run for the first time after config.interval
        scheduler = BlockingScheduler()
        scheduler.add_job(run_bot, 'interval', hours=config.interval)
        scheduler.start()
开发者ID:sfinder,项目名称:bitshares2-liquiditybots,代码行数:32,代码来源:main.py

示例9: DisseminationPlayer

# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import start [as 别名]
class DisseminationPlayer(object):

    MIDNIGHT = datetime.time(0,0,0)

    def __init__(self, top_data_dir, index_file, dir_files_to_parse, files_to_parse, job_func, destination):
        """
            :return:
        """
        self._parser = eumetsat.dmon.parsers.xferlog_parser.XferlogParser(no_gems_header = True)
        self._dir_files = dir_files_to_parse
        self._files = files_to_parse
        self._job_func = job_func
        self._scheduler = BlockingScheduler()

        res = []
        t = ftimer(Indexer.load_index, [top_data_dir, index_file], {}, res)
        print("Read index in %d seconds." % (t))
        self._index = res[0]

        #can now set reference time
        #ref time = now time plus one minute
        self._defer_time = 5 
        self._reference_date = datetime.datetime.now() +  datetime.timedelta(seconds=self._defer_time)

        #destination info (depends on the type of job)
        self._destination = destination


    def add_jobs(self):
        """
          Create the jobs from the reference time
        :return:
        """
        for a_file in self._files:
            f_path = "%s/%s" % (self._dir_files, a_file)
            print("Parsing xferlog file %s" % f_path )
            fd = open(f_path)
            self._parser.set_lines_to_parse(fd)
            for elem in self._parser:
                #print("time = %s, filename = %s\n" % (elem['time'], elem['file']))
                #find file in index
                filepath = self._index.get(elem['file'], None)
                if filepath:
                    #get time difference
                    midnight_date = utc.localize(datetime.datetime.combine(elem['time'].date(), self.MIDNIGHT))
                    #print("midnight date = %s ///// elem[time] = %s" % (midnight_date, elem['time']))
                    time_diff = elem['time'] - midnight_date
                    scheduled_date = self._reference_date + time_diff
                    #create job and schedule it with the time difference added to the starting reference time
                    d_trigger = DateTrigger(scheduled_date)

                    self._scheduler.add_job(self._job_func, d_trigger, args=[filepath, self._destination])
                else:
                    print("Could not find %s\n in Index" % (elem['file']))

        print("Player. %d jobs scheduled.\n" % (len(self._scheduler.get_jobs())))


    def start(self):
        """
        :return:
        """
        self._scheduler.configure(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc)

        print("Start Scheduler. Jobs will start to be played in %d sec." % self._defer_time)
        self._scheduler.start()
开发者ID:gaubert,项目名称:rodd,代码行数:68,代码来源:dissemination_player.py

示例10: St2Timer

# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import start [as 别名]
class St2Timer(object):
    """
    A timer interface that uses APScheduler 3.0.
    """
    def __init__(self, local_timezone=None):
        self._timezone = local_timezone
        self._scheduler = BlockingScheduler(timezone=self._timezone)
        self._jobs = {}
        self._trigger_types = TIMER_TRIGGER_TYPES.keys()
        self._trigger_watcher = TriggerWatcher(create_handler=self._handle_create_trigger,
                                               update_handler=self._handle_update_trigger,
                                               delete_handler=self._handle_delete_trigger,
                                               trigger_types=self._trigger_types,
                                               queue_suffix=self.__class__.__name__,
                                               exclusive=True)
        self._trigger_dispatcher = TriggerDispatcher(LOG)

    def start(self):
        self._register_timer_trigger_types()
        self._trigger_watcher.start()
        self._scheduler.start()

    def cleanup(self):
        self._scheduler.shutdown(wait=True)

    def add_trigger(self, trigger):
        self._add_job_to_scheduler(trigger)

    def update_trigger(self, trigger):
        self.remove_trigger(trigger)
        self.add_trigger(trigger)

    def remove_trigger(self, trigger):
        trigger_id = trigger['id']

        try:
            job_id = self._jobs[trigger_id]
        except KeyError:
            LOG.info('Job not found: %s', trigger_id)
            return

        self._scheduler.remove_job(job_id)
        del self._jobs[trigger_id]

    def _add_job_to_scheduler(self, trigger):
        trigger_type_ref = trigger['type']
        trigger_type = TIMER_TRIGGER_TYPES[trigger_type_ref]
        try:
            jsonschema.validate(trigger['parameters'],
                                trigger_type['parameters_schema'])
        except jsonschema.ValidationError as e:
            LOG.error('Exception scheduling timer: %s, %s',
                      trigger['parameters'], e, exc_info=True)
            raise  # Or should we just return?

        time_spec = trigger['parameters']
        time_zone = aps_utils.astimezone(trigger['parameters'].get('timezone'))

        time_type = None

        if trigger_type['name'] == 'st2.IntervalTimer':
            unit = time_spec.get('unit', None)
            value = time_spec.get('delta', None)
            time_type = IntervalTrigger(**{unit: value, 'timezone': time_zone})
        elif trigger_type['name'] == 'st2.DateTimer':
            # Raises an exception if date string isn't a valid one.
            dat = date_parser.parse(time_spec.get('date', None))
            time_type = DateTrigger(dat, timezone=time_zone)
        elif trigger_type['name'] == 'st2.CronTimer':
            cron = time_spec.copy()
            cron['timezone'] = time_zone

            time_type = CronTrigger(**cron)

        utc_now = date_utils.get_datetime_utc_now()
        if hasattr(time_type, 'run_date') and utc_now > time_type.run_date:
            LOG.warning('Not scheduling expired timer: %s : %s',
                        trigger['parameters'], time_type.run_date)
        else:
            self._add_job(trigger, time_type)
        return time_type

    def _add_job(self, trigger, time_type, replace=True):
        try:
            job = self._scheduler.add_job(self._emit_trigger_instance,
                                          trigger=time_type,
                                          args=[trigger],
                                          replace_existing=replace)
            LOG.info('Job %s scheduled.', job.id)
            self._jobs[trigger['id']] = job.id
        except Exception as e:
            LOG.error('Exception scheduling timer: %s, %s',
                      trigger['parameters'], e, exc_info=True)

    def _emit_trigger_instance(self, trigger):
        utc_now = date_utils.get_datetime_utc_now()
        # debug logging is reasonable for this one. A high resolution timer will end up
        # trashing standard logs.
        LOG.debug('Timer fired at: %s. Trigger: %s', str(utc_now), trigger)

#.........这里部分代码省略.........
开发者ID:hejin,项目名称:st2,代码行数:103,代码来源:base.py

示例11: St2TimerSensor

# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import start [as 别名]
class St2TimerSensor(Sensor):
    '''
    A timer sensor that uses APScheduler 3.0.
    '''
    def __init__(self, sensor_service=None):
        self._timezone = 'America/Los_Angeles'  # Whatever TZ local box runs in.
        self._sensor_service = sensor_service
        self._log = self._sensor_service.get_logger(self.__class__.__name__)
        self._scheduler = BlockingScheduler(timezone=self._timezone)
        self._jobs = {}

    def setup(self):
        pass

    def run(self):
        self._scheduler.start()

    def cleanup(self):
        self._scheduler.shutdown(wait=True)

    def add_trigger(self, trigger):
        self._add_job_to_scheduler(trigger)

    def update_trigger(self, trigger):
        self.remove_trigger(trigger)
        self.add_trigger(trigger)

    def remove_trigger(self, trigger):
        id = trigger['id']

        try:
            job_id = self._jobs[id]
        except KeyError:
            self._log.info('Job not found: %s', id)
            return

        self._scheduler.remove_job(job_id)

    def _get_trigger_type(self, ref):
        pass

    def _add_job_to_scheduler(self, trigger):
        trigger_type_ref = trigger['type']
        trigger_type = TRIGGER_TYPES[trigger_type_ref]
        try:
            jsonschema.validate(trigger['parameters'],
                                trigger_type['parameters_schema'])
        except jsonschema.ValidationError as e:
            self._log.error('Exception scheduling timer: %s, %s',
                            trigger['parameters'], e, exc_info=True)
            raise  # Or should we just return?

        time_spec = trigger['parameters']
        time_zone = aps_utils.astimezone(trigger['parameters'].get('timezone'))

        time_type = None

        if trigger_type['name'] == 'st2.IntervalTimer':
            unit = time_spec.get('unit', None)
            value = time_spec.get('delta', None)
            time_type = IntervalTrigger(**{unit: value, 'timezone': time_zone})
        elif trigger_type['name'] == 'st2.DateTimer':
            # Raises an exception if date string isn't a valid one.
            dat = date_parser.parse(time_spec.get('date', None))
            time_type = DateTrigger(dat, timezone=time_zone)
        elif trigger_type['name'] == 'st2.CronTimer':
            cron = time_spec.copy()
            cron['timezone'] = time_zone

            time_type = CronTrigger(**cron)

        if hasattr(time_type, 'run_date') and datetime.now(tzutc()) > time_type.run_date:
            self._log.warning('Not scheduling expired timer: %s : %s',
                              trigger['parameters'], time_type.run_date)
        else:
            self._add_job(trigger, time_type)

    def _add_job(self, trigger, time_type, replace=True):
        try:
            job = self._scheduler.add_job(self._emit_trigger_instance,
                                          trigger=time_type,
                                          args=[trigger],
                                          replace_existing=replace)
            self._log.info('Job %s scheduled.', job.id)
            self._jobs[trigger['id']] = job.id
        except Exception as e:
            self._log.error('Exception scheduling timer: %s, %s',
                            trigger['parameters'], e, exc_info=True)

    def _emit_trigger_instance(self, trigger):
        self._log.info('Timer fired at: %s. Trigger: %s', str(datetime.utcnow()), trigger)

        payload = {
            'executed_at': str(datetime.utcnow()),
            'schedule': trigger['parameters'].get('time')
        }
        self._sensor_service.dispatch(trigger, payload)
开发者ID:nagyist,项目名称:StackStorm-st2,代码行数:99,代码来源:st2_timer_sensor.py

示例12: Popen

# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import start [as 别名]
      sock = socket.socket()
      sock.connect((carbonServer, carbonPort))
    except socket.error, err:
      print "Could not connect to %s:%s, error code %s, %s" % ( carbonServer, carbonPort, err[0], err[1] )
      return 127
    binary = "/opt/nagios/bin/nagiostats"
    stat = ','.join(unicode(i) for i in stats)
    command = binary + " --mrtg --data=" + stat
    nagprocess = Popen(command, shell=True, stderr=PIPE, stdout=PIPE, universal_newlines=True)
    stdout, stderr = nagprocess.communicate()
    stdout = stdout.splitlines()
    for stat, metaData in stats.items():
        metricName, descr = metaData
        metricValue = stdout[0]
        del stdout[0]
        string = 'datacenter.stats.nagios.%s.%s %s %i\n' % (hostname, metricName, metricValue, calltime)
        sock.send(string)
        print "%s" % string
    sock.close()


if __name__ == "__main__":

  sched = BlockingScheduler()
  sched.add_job(collectStats, 'interval',  seconds=10)
  ret = collectStats()
  try:
    sched.start()
  except (KeyboardInterrupt, SystemExit):
    pass
开发者ID:cekstam,项目名称:nagios-nagiostats-graphite,代码行数:32,代码来源:nagiosStats.py

示例13: __init__

# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import start [as 别名]
class XcxScrapy:
    def __init__(self):
        KEY1 = 'UwVrGX4x2r+Pk7bf1aItja=='
        self.token = '4ac1c0259b27f13dfb78c2959da3bf4e'
        self.pc = prpcrypt(b(KEY1))  # 初始化密钥
        self.info_log = get_logger('logs/info.log')
        self.db = self.connect_db()
        # 查找剩余需要爬取的疾病数量
        self.max_len = self.db.disease.count_documents({ 'finished': 0 })
        self.count = 0
        print('Number of the lefting disease: {}'.format(self.max_len))
        self.info_log.warning('Number of the lefting disease: {}'.format(self.max_len))
        if self.max_len > 0:
            print('Task started.')
            print('-' * 50)
            self.info_log.warning('Task started.....')
            # 定时爬取
            self.scheduler = BlockingScheduler()
            self.scheduler.add_job(self.request_data, 'interval', id='main_schedule', seconds=120, args=[self])
            self.scheduler.start()
        # self.init_database(self)
        # self.request_data(self)

    # 初始化数据库
    @staticmethod
    def init_database(self):
        print('Initial database started!')
        # 初始化疾病表
        disease_file = open('./disease.txt', 'r', encoding='UTF-8')
        try:
            for line in disease_file:
                tmp_line = line.strip().strip('\n')
                self.db.disease.insert_one({
                    'name': tmp_line,
                    'reply': '',
                    'finished': 0
                })
                print('Initial disease: ', tmp_line)
        finally:
            print('Initial database finished!')
            disease_file.close()

    @staticmethod
    def connect_db():
        instance = pymongo.MongoClient('127.0.0.1', 27017)
        db = instance.hebaochacha
        return db
    
    @staticmethod
    def request_data(self):
        # 查找即将爬取的疾病信息
        cur_disease = self.db.disease.find_one({ 'finished': 0 }, skip=self.count)
        question = cur_disease['name']
        print('Start to scrapy: {} ...'.format(question))
        self.info_log.critical('Start to scrapy: {} ...'.format(question))
        res = main(question, self.token)
        print('Response: {}'.format(json.dumps(res, ensure_ascii=False, indent=2)))
        self.info_log.critical('Response: {}'.format(json.dumps(res, ensure_ascii=False, indent=2)))
        if not res: return False
        if res.get('isSuccess'):
            result = res.get('result', {})
            iv = result.get('iv', '')
            content = result.get('content', '')
            if iv and content:
                answer = self.pc.decrypt(b(content), b(iv))
                answer = str(answer, encoding="utf-8")
                if answer:
                    # print(json.dumps(json.loads(str(answer, encoding="utf-8")), ensure_ascii=False, indent=2))
                    answer_re = re.compile('''"content":"(.*?)"''')
                    img_re = re.compile('''"resource_url":"(.*?)"''')
                    answer_list = answer_re.findall(''.join(answer.split()))
                    an = '\n'.join(answer_list)
                    img_list = img_re.findall(''.join(answer.split()))
                    im = '\n'.join(img_list)
                    self.db.disease.update_one({ 'name': question }, { '$set': { 'reply': an, 'images': im, 'finished': 1 } })
                    print('Save data to db: {}'.format({ 'name': question, 'reply': an, 'images': im, 'finished': 1 }))
                    self.info_log.critical('Save data to db: {}'.format({ 'name': question, 'reply': an, 'images': im, 'finished': 1  }))
                    self.count = self.count + 1
                    return True
                else:
                    print('Answer is empty.')
                    self.info_log.warning('Answer is empty.')
                    self.db.disease.update_one({ 'name': question }, { '$set': { 'reply': '', 'images': '', 'finished': 1 } })
                    self.count = self.count + 1
                    return False
            else:
                print('NO iv or content --- {}.'.format(question))
                self.info_log.warning('NO iv or content --- {}.'.format(question))
                self.db.disease.update_one({ 'name': question }, { '$set': { 'reply': '', 'images': '', 'finished': 1 } })
                self.count = self.count + 1
                return False
        else:
            if res.get('errorMsg') == 'token已过期':
                print('Token is invild, please login again.')
                self.info_log.warning('Token is invild, please login again.')
                # 结束进程
                os._exit(0)
            else:
                self.count = self.count + 1
                return False
开发者ID:Andyliwr,项目名称:PythonStudyLoad,代码行数:102,代码来源:xiaochengxu.py


注:本文中的apscheduler.schedulers.background.BlockingScheduler.start方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。