当前位置: 首页>>代码示例>>Python>>正文


Python pool.ThreadPoolExecutor方法代码示例

本文整理汇总了Python中apscheduler.executors.pool.ThreadPoolExecutor方法的典型用法代码示例。如果您正苦于以下问题:Python pool.ThreadPoolExecutor方法的具体用法?Python pool.ThreadPoolExecutor怎么用?Python pool.ThreadPoolExecutor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在apscheduler.executors.pool的用法示例。


在下文中一共展示了pool.ThreadPoolExecutor方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from apscheduler.executors import pool [as 别名]
# 或者: from apscheduler.executors.pool import ThreadPoolExecutor [as 别名]
def __init__(self, datastore_dir, threat_max, timezone):
        global _scheduler_

        self.timezone = timezone

        lock = threading.Lock()
        with lock:
            if not _scheduler_:
                jobstores = {
                    'default': SQLAlchemyJobStore(url='sqlite:///{}/scheduler.sqlite'.format(datastore_dir)),
                }
                executors = {
                    'default': ThreadPoolExecutor(threat_max),
                }
                job_defaults = {
                    'coalesce': False,
                    'max_instances': 1
                }
                _scheduler_ = BackgroundScheduler(
                    jobstores=jobstores, executors=executors,
                    job_defaults=job_defaults, timezone=timezone
                )
                _scheduler_.start() 
开发者ID:ibmresilient,项目名称:resilient-community-apps,代码行数:25,代码来源:scheduler_helper.py

示例2: __init__

# 需要导入模块: from apscheduler.executors import pool [as 别名]
# 或者: from apscheduler.executors.pool import ThreadPoolExecutor [as 别名]
def __init__(self):
        self.run_date = datetime.datetime.now() + datetime.timedelta(seconds=3)
        self.run_date = self.run_date.strftime('%Y-%m-%d %H:%M:%S')
        self.tm = time.strftime('%Y%m%d%H%M%S',time.localtime())
        self.scheduler = BackgroundScheduler()
        self.executors = {'default': ThreadPoolExecutor(10), 'processpool': ProcessPoolExecutor(5)}
        self.job_defaults = {'coalesce': False, 'max_instances': 1}
        self.scheduler.configure(timezone=pytz.timezone('Asia/Shanghai'),job_defaults=self.job_defaults,executors=self.executors) 
开发者ID:wylok,项目名称:sparrow,代码行数:10,代码来源:produce.py

示例3: configure_scheduler

# 需要导入模块: from apscheduler.executors import pool [as 别名]
# 或者: from apscheduler.executors.pool import ThreadPoolExecutor [as 别名]
def configure_scheduler():

	dump_objs()
	dump_objs()

	sched = BackgroundScheduler({
			'apscheduler.jobstores.default': {
				'type': 'memory'
			},
			'apscheduler.executors.default': {
				'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
				'max_workers': '10'
			},
			'apscheduler.job_defaults.coalesce': 'true',
			'apscheduler.job_defaults.max_instances': '2',
		})

	startTime = datetime.datetime.now(tz=pytz.utc)+datetime.timedelta(seconds=5)

	sched.add_job(reload_tree,
				trigger            = 'interval',
				seconds            = hours(6),
				next_run_time      = startTime,
				id                 = "tree-reloader",
				replace_existing   = True,
				max_instances      = 1,
				coalesce           = True,
				misfire_grace_time = 2**30)

	sched.add_job(dump_objs,
				trigger            = 'interval',
				seconds            = minutes(30),
				next_run_time      = startTime,
				id                 = "leak-tracker",
				replace_existing   = True,
				max_instances      = 1,
				coalesce           = True,
				misfire_grace_time = 2**30)


	return sched 
开发者ID:fake-name,项目名称:IntraArchiveDeduplicator,代码行数:43,代码来源:server.py

示例4: start_scheduler

# 需要导入模块: from apscheduler.executors import pool [as 别名]
# 或者: from apscheduler.executors.pool import ThreadPoolExecutor [as 别名]
def start_scheduler():
    job_executors = {"default": ThreadPoolExecutor(cfg.job_executor_pool_size)}
    logger = log if cfg.debug else None
    scheduler.configure(executors=job_executors, logger=logger, timezone=cfg.timezone)
    scheduler.add_listener(on_error, events.EVENT_JOB_ERROR)
    scheduler.add_listener(on_executed, events.EVENT_JOB_EXECUTED)
    scheduler.add_listener(on_max_instances, events.EVENT_JOB_MAX_INSTANCES)
    scheduler.add_listener(on_missed, events.EVENT_JOB_MISSED)
    scheduler.start()


#### 
开发者ID:funkyfuture,项目名称:deck-chores,代码行数:14,代码来源:jobs.py

示例5: _create_default_executor

# 需要导入模块: from apscheduler.executors import pool [as 别名]
# 或者: from apscheduler.executors.pool import ThreadPoolExecutor [as 别名]
def _create_default_executor(self):
        """Creates a default executor store, specific to the particular scheduler type."""
        return ThreadPoolExecutor() 
开发者ID:morpheus65535,项目名称:bazarr,代码行数:5,代码来源:base.py

示例6: __init__

# 需要导入模块: from apscheduler.executors import pool [as 别名]
# 或者: from apscheduler.executors.pool import ThreadPoolExecutor [as 别名]
def __init__(self, scheduler_class_path,
                 datastore_class_path,
                 db_config=None,
                 db_tablenames=None,
                 job_coalesce=constants.DEFAULT_JOB_COALESCE,
                 job_misfire_grace_sec=constants.DEFAULT_JOB_MISFIRE_GRACE_SEC,
                 job_max_instances=constants.DEFAULT_JOB_MAX_INSTANCES,
                 thread_pool_size=constants.DEFAULT_THREAD_POOL_SIZE,
                 timezone=constants.DEFAULT_TIMEZONE):
        """
        :param str scheduler_class_path: string path for scheduler, e.g. 'mysched.FancyScheduler'
        :param str datastore_class_path: string path for datastore, e.g. 'datastore.SQLDatastore'
        :param dict db_config: dictionary containing values for db connection
        :param dict db_tablenames: dictionary containing the names for the jobs,
        executions, or audit logs table, e.g. {
            'executions_tablename': 'scheduler_executions',
            'jobs_tablename': 'scheduler_jobs',
            'auditlogs_tablename': 'scheduler_auditlogs'
        }
        If any of these keys is not provided, the default table name is selected from constants.py
        :param bool job_coalesce: True by default
        :param int job_misfire_grace_sec: Integer number of seconds
        :param int job_max_instances: Int number of instances
        :param int thread_pool_size: Int thread pool size
        :param str timezone: str timezone to schedule jobs in, e.g. 'UTC'
        """
        datastore = utils.get_datastore_instance(datastore_class_path, db_config, db_tablenames)
        job_stores = {
            'default': datastore
        }

        job_default = {
            'coalesce': job_coalesce,
            'misfire_grace_time': job_misfire_grace_sec,
            'max_instances': job_max_instances
        }

        executors = {
            'default': pool.ThreadPoolExecutor(thread_pool_size)
        }

        scheduler_class = utils.import_from_path(scheduler_class_path)
        self.sched = scheduler_class(datastore_class_path, jobstores=job_stores,
                                     executors=executors, job_defaults=job_default,
                                     timezone=timezone) 
开发者ID:Nextdoor,项目名称:ndscheduler,代码行数:47,代码来源:scheduler_manager.py


注:本文中的apscheduler.executors.pool.ThreadPoolExecutor方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。