本文整理汇总了Python中apscheduler.executors.pool.ProcessPoolExecutor方法的典型用法代码示例。如果您正苦于以下问题:Python pool.ProcessPoolExecutor方法的具体用法?Python pool.ProcessPoolExecutor怎么用?Python pool.ProcessPoolExecutor使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类apscheduler.executors.pool
的用法示例。
在下文中一共展示了pool.ProcessPoolExecutor方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from apscheduler.executors import pool [as 别名]
# 或者: from apscheduler.executors.pool import ProcessPoolExecutor [as 别名]
def __init__(self):
"""Initialize the SQSScheduler, setting up the process pools, scheduler and connecting to the required
SQS Queues"""
super().__init__()
self.pool = ProcessPoolExecutor(1)
self.scheduler = APScheduler(
threadpool=self.pool,
job_defaults={
'coalesce': True,
'misfire_grace_time': 30
}
)
session = get_local_aws_session()
sqs = session.resource('sqs', self.dbconfig.get('queue_region', self.ns))
self.job_queue = sqs.Queue(self.dbconfig.get('job_queue_url', self.ns))
self.status_queue = sqs.Queue(self.dbconfig.get('status_queue_url', self.ns))
示例2: __init__
# 需要导入模块: from apscheduler.executors import pool [as 别名]
# 或者: from apscheduler.executors.pool import ProcessPoolExecutor [as 别名]
def __init__(self):
super().__init__()
self.collectors = {}
self.auditors = []
self.region_workers = []
self.pool = ProcessPoolExecutor(self.dbconfig.get('worker_threads', self.ns, 20))
self.scheduler = APScheduler(
threadpool=self.pool,
job_defaults={
'coalesce': True,
'misfire_grace_time': 30
}
)
self.load_plugins()
示例3: runScheduler
# 需要导入模块: from apscheduler.executors import pool [as 别名]
# 或者: from apscheduler.executors.pool import ProcessPoolExecutor [as 别名]
def runScheduler():
runProxyFetch()
scheduler_log = LogHandler("scheduler")
scheduler = BlockingScheduler(logger=scheduler_log)
scheduler.add_job(runProxyFetch, 'interval', minutes=4, id="proxy_fetch", name="proxy采集")
scheduler.add_job(runProxyCheck, 'interval', minutes=2, id="proxy_check", name="proxy检查")
executors = {
'default': {'type': 'threadpool', 'max_workers': 20},
'processpool': ProcessPoolExecutor(max_workers=5)
}
job_defaults = {
'coalesce': False,
'max_instances': 10
}
scheduler.configure(executors=executors, job_defaults=job_defaults)
scheduler.start()
示例4: __init__
# 需要导入模块: from apscheduler.executors import pool [as 别名]
# 或者: from apscheduler.executors.pool import ProcessPoolExecutor [as 别名]
def __init__(self):
self.run_date = datetime.datetime.now() + datetime.timedelta(seconds=3)
self.run_date = self.run_date.strftime('%Y-%m-%d %H:%M:%S')
self.tm = time.strftime('%Y%m%d%H%M%S',time.localtime())
self.scheduler = BackgroundScheduler()
self.executors = {'default': ThreadPoolExecutor(10), 'processpool': ProcessPoolExecutor(5)}
self.job_defaults = {'coalesce': False, 'max_instances': 1}
self.scheduler.configure(timezone=pytz.timezone('Asia/Shanghai'),job_defaults=self.job_defaults,executors=self.executors)
示例5: start_scheduler
# 需要导入模块: from apscheduler.executors import pool [as 别名]
# 或者: from apscheduler.executors.pool import ProcessPoolExecutor [as 别名]
def start_scheduler(settings):
assert settings['scheduler.store'] in ('redis', 'sqlalchemy'),\
'Uknown job store, must by one of redis or sqlalchemy'
if settings['scheduler.store'] == 'redis':
jobstores = {
'default': RedisJobStore(db=settings['scheduler.db'])
}
else:
jobstores = {
'default': SQLAlchemyJobStore(url=settings['scheduler.url'])
}
executors = {
'default': {
'type': settings['scheduler.executors.type'],
'max_workers': settings['scheduler.executors.max_workers']
},
'processpool': ProcessPoolExecutor(
max_workers=settings['scheduler.executors.processpool.max_workers']
)
}
job_defaults = {
'coalesce': False,
'max_instances': settings['scheduler.job_defaults.max_instances']
}
scheduler.configure(
jobstores=jobstores,
executors=executors,
job_defaults=job_defaults,
timezone=timezone('UTC')
)
if settings['scheduler.autostart'] == 'true':
scheduler.start()