本文整理汇总了Python中apscheduler.schedulers.blocking.BlockingScheduler方法的典型用法代码示例。如果您正苦于以下问题:Python blocking.BlockingScheduler方法的具体用法?Python blocking.BlockingScheduler怎么用?Python blocking.BlockingScheduler使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类apscheduler.schedulers.blocking
的用法示例。
在下文中一共展示了blocking.BlockingScheduler方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from apscheduler.schedulers import blocking [as 别名]
# 或者: from apscheduler.schedulers.blocking import BlockingScheduler [as 别名]
def __init__(self):
"""Initialize the SQSScheduler, setting up the process pools, scheduler and connecting to the required
SQS Queues"""
super().__init__()
self.pool = ProcessPoolExecutor(1)
self.scheduler = APScheduler(
threadpool=self.pool,
job_defaults={
'coalesce': True,
'misfire_grace_time': 30
}
)
session = get_local_aws_session()
sqs = session.resource('sqs', self.dbconfig.get('queue_region', self.ns))
self.job_queue = sqs.Queue(self.dbconfig.get('job_queue_url', self.ns))
self.status_queue = sqs.Queue(self.dbconfig.get('status_queue_url', self.ns))
示例2: __init__
# 需要导入模块: from apscheduler.schedulers import blocking [as 别名]
# 或者: from apscheduler.schedulers.blocking import BlockingScheduler [as 别名]
def __init__(self):
super().__init__()
self.collectors = {}
self.auditors = []
self.region_workers = []
self.pool = ProcessPoolExecutor(self.dbconfig.get('worker_threads', self.ns, 20))
self.scheduler = APScheduler(
threadpool=self.pool,
job_defaults={
'coalesce': True,
'misfire_grace_time': 30
}
)
self.load_plugins()
示例3: scrape_scheduled_method
# 需要导入模块: from apscheduler.schedulers import blocking [as 别名]
# 或者: from apscheduler.schedulers.blocking import BlockingScheduler [as 别名]
def scrape_scheduled_method(self):
self.job_execution_counter = self.job_execution_counter + 1
print(
f"Executing the {self.job_execution_counter} job. {self.queue_size - self.job_execution_counter} to be executed at {str(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))}"
)
if self.job_queue.empty() is False:
agencies = self.job_queue.get()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self.scraper_service.scrape_data(agencies))
scheduler = BlockingScheduler()
scheduler.add_job(
self.scrape_scheduled_method,
next_run_time=datetime.now()
+ timedelta(seconds=self.interval_between_runs_seconds),
)
scheduler.start()
else:
print(
f"done with scraping at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
)
示例4: __init__
# 需要导入模块: from apscheduler.schedulers import blocking [as 别名]
# 或者: from apscheduler.schedulers.blocking import BlockingScheduler [as 别名]
def __init__(self, broker, interval=10):
""" Constructor
:type interval: int
:param interval: Check interval, in seconds
"""
self.broker = broker
self.interval = interval
setup_logger(logger_name=__name__, log_file=__name__ + '.log')
self.logger = logging.getLogger(__name__)
#thread = threading.Thread(target=self.run, args=())
#thread.daemon = True # Daemonize thread
#thread.start() # Start the execution
scheduler = BlockingScheduler()
scheduler.add_job(self.sync_job, 'interval', seconds=10)
scheduler.start()
示例5: runScheduler
# 需要导入模块: from apscheduler.schedulers import blocking [as 别名]
# 或者: from apscheduler.schedulers.blocking import BlockingScheduler [as 别名]
def runScheduler():
runProxyFetch()
scheduler_log = LogHandler("scheduler")
scheduler = BlockingScheduler(logger=scheduler_log)
scheduler.add_job(runProxyFetch, 'interval', minutes=4, id="proxy_fetch", name="proxy采集")
scheduler.add_job(runProxyCheck, 'interval', minutes=2, id="proxy_check", name="proxy检查")
executors = {
'default': {'type': 'threadpool', 'max_workers': 20},
'processpool': ProcessPoolExecutor(max_workers=5)
}
job_defaults = {
'coalesce': False,
'max_instances': 10
}
scheduler.configure(executors=executors, job_defaults=job_defaults)
scheduler.start()
示例6: list_current_jobs
# 需要导入模块: from apscheduler.schedulers import blocking [as 别名]
# 或者: from apscheduler.schedulers.blocking import BlockingScheduler [as 别名]
def list_current_jobs(self):
"""Return a list of the currently scheduled jobs in APScheduler
Returns:
`dict` of `str`: :obj:`apscheduler/job:Job`
"""
jobs = {}
for job in self.scheduler.get_jobs():
if job.name not in ('schedule_jobs', 'process_status_queue'):
jobs[job.name] = job
return jobs
示例7: main
# 需要导入模块: from apscheduler.schedulers import blocking [as 别名]
# 或者: from apscheduler.schedulers.blocking import BlockingScheduler [as 别名]
def main():
"""Run tick() at the interval of every ten seconds."""
scheduler = BlockingScheduler(timezone=utc)
scheduler.add_job(tick, 'interval', seconds=10)
try:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
pass
示例8: blocking_schedule
# 需要导入模块: from apscheduler.schedulers import blocking [as 别名]
# 或者: from apscheduler.schedulers.blocking import BlockingScheduler [as 别名]
def blocking_schedule():
from apscheduler.schedulers.blocking import BlockingScheduler
def tick():
print('Tick! The time is: %s' % datetime.now())
scheduler = BlockingScheduler()
scheduler.add_job(tick, 'interval', seconds=3)
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
try:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
pass
示例9: __init__
# 需要导入模块: from apscheduler.schedulers import blocking [as 别名]
# 或者: from apscheduler.schedulers.blocking import BlockingScheduler [as 别名]
def __init__(self, is_auto_connect=True, heart_beat_limit_ms=CONNECT_HEART_BEAT_LIMIT_MS, reconnect_after_ms=RECONNECT_AFTER_TIME_MS):
threading.Thread.__init__(self)
self.is_auto_connect = is_auto_connect
self.heart_beat_limit_ms = heart_beat_limit_ms
self.reconnect_after_ms = reconnect_after_ms if reconnect_after_ms > heart_beat_limit_ms else heart_beat_limit_ms
self.logger = logging.getLogger("huobi-client")
self.scheduler = BlockingScheduler()
self.scheduler.add_job(watch_dog_job, "interval", max_instances=10, seconds=1, args=[self])
self.start()
示例10: setUp
# 需要导入模块: from apscheduler.schedulers import blocking [as 别名]
# 或者: from apscheduler.schedulers.blocking import BlockingScheduler [as 别名]
def setUp(self):
fake_scheduler = BlockingScheduler()
self.store = DatastoreSqlite.get_instance()
self.store.start(fake_scheduler, None)
示例11: runScheduler
# 需要导入模块: from apscheduler.schedulers import blocking [as 别名]
# 或者: from apscheduler.schedulers.blocking import BlockingScheduler [as 别名]
def runScheduler():
rawProxyScheduler()
usefulProxyScheduler()
scheduler_log = LogHandler("scheduler_log")
scheduler = BlockingScheduler(logger=scheduler_log)
scheduler.add_job(rawProxyScheduler, 'interval', minutes=5, id="raw_proxy_check", name="raw_proxy定时采集")
scheduler.add_job(usefulProxyScheduler, 'interval', minutes=1, id="useful_proxy_check", name="useful_proxy定时检查")
scheduler.start()
示例12: schedule_job
# 需要导入模块: from apscheduler.schedulers import blocking [as 别名]
# 或者: from apscheduler.schedulers.blocking import BlockingScheduler [as 别名]
def schedule_job():
# When testing changes, set the "TEST_SCHEDULE" envvar to run more often
if get_config().test_schedule:
schedule_kwargs = {"hour": "*", "minute": "*/10"}
else:
schedule_kwargs = {"hour": get_config().schedule_hour}
sched = BlockingScheduler()
sched.add_job(destalinate_job, "cron", **schedule_kwargs)
sched.start()