本文整理汇总了Python中apscheduler.scheduler.Scheduler.configure方法的典型用法代码示例。如果您正苦于以下问题:Python Scheduler.configure方法的具体用法?Python Scheduler.configure怎么用?Python Scheduler.configure使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类apscheduler.scheduler.Scheduler
的用法示例。
在下文中一共展示了Scheduler.configure方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: handle
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import configure [as 别名]
def handle(self, *args, **options):
sched = Scheduler(daemonic=True)
sched.add_cron_job(job_function, minute='*')
sched.configure()
try:
sched.start()
except (KeyboardInterrupt, SystemExit):
pass
print sched.print_jobs()
示例2: testOne
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import configure [as 别名]
def testOne(self):
sched = Scheduler()
#@sched.interval_schedule(seconds=10)
@sched.cron_schedule(day_of_week=0,hour=5,minute=30)
def testSched():
print "job invoked!"
config = {'apscheduler.standalone': True}
sched.configure(config)
sched.start()
示例3: TestRunningScheduler
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import configure [as 别名]
class TestRunningScheduler(object):
def setup(self):
self.scheduler = Scheduler()
self.scheduler.start()
def teardown(self):
if self.scheduler.running:
self.scheduler.shutdown()
def test_shutdown_timeout(self):
self.scheduler.shutdown()
@raises(SchedulerAlreadyRunningError)
def test_scheduler_double_start(self):
self.scheduler.start()
@raises(SchedulerAlreadyRunningError)
def test_scheduler_configure_running(self):
self.scheduler.configure({})
def test_scheduler_double_shutdown(self):
self.scheduler.shutdown()
self.scheduler.shutdown(False)
示例4: process_items_from_day
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import configure [as 别名]
all_items = {}
def process_items_from_day(items):
data = []
for item in filter(lambda item: item['link'] is not None, [item for item in items]):
item = process(item)
if item['link'] not in all_items:
all_items[item['link']] = True
data.append(item)
return data
collections = ['AND observed > date("now", "start of day", "-1 day") AND observed < date("now", "start of day")',
'AND observed > date("now", "start of day", "-2 day") AND observed < date("now", "start of day", "-1 day")',
'AND observed > date("now", "start of day", "-3 day") AND observed < date("now", "start of day", "-2 day")']
collections = [process_items_from_day(get_items_from_day(date)) for date in collections]
with open('./templates/newsletter.html', 'r') as newspaper:
template = Template(newspaper.read())
html = template.render(title="Reddit News Agency", edition=len(os.listdir('./deploy')),
collections=collections).encode('utf-8')
f = open('./deploy/' + str(int(time())) + '.html', 'w')
f.write(html)
requests.post('http://reddit-snews-agency.herokuapp.com/', data=html, headers={
'Authorization': '9f9fa431c64a86da8324bb370d05377bbf49dbf9'
})
if __name__ == '__main__':
main()
sched.configure(standalone=True)
sched.start()
示例5: Scheduler
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import configure [as 别名]
import datetime
import logging
import time
from apscheduler.scheduler import Scheduler
gconfig = {'apscheduler.threadpool.core_threads':2,
'apscheduler.threadpool.max_threads':50,
'apscheduler.threadpool.keepalive':4,
'apscheduler.misfire_grace_time':1,
'apscheduler.coalesce':True}
logging.basicConfig()
sched = Scheduler()
sched.daemonic = True
sched.configure(gconfig)
sched.start()
def do_job(a):
print "This is " + str(a)
print repr(sched._threadpool), str(sched._threadpool._queue.qsize())
print time.strftime('%Y-%m-%d %H:%M:%S')
current_date = datetime.datetime.now()
for i in range(0,10):
start_date = current_date+datetime.timedelta(seconds=i)
sched.add_interval_job(do_job, seconds=10, args=('number ' + str(i),), start_date=start_date)
while(True):
#print repr(pool)
time.sleep(1)
示例6: IpServer
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import configure [as 别名]
de_quien = 'Xendra info Cambio de ip'
try:
myip = urllib2.urlopen('http://www.curlmyip.com').read()
except urllib2.HTTPError, e:
myip = ''
if myip:
myip = myip.split(' ')[0].split('\n')[0]
try:
ip_history = IpServer.objects.get(pk=1)
except ObjectDoesNotExist, e:
ip_history = IpServer()
ip_history.ip = myip
ip_history.save()
correo = send_mail(asunto, myip, de_quien, para, fail_silently=True)
if not myip == ip_history.ip:
correo = send_mail(asunto, myip, de_quien, para, fail_silently=True)
ip_history.ip = myip
ip_history.save()
def borrar_antiguos():
pass
sched = Scheduler(daemonic=True)
sched.add_cron_job(job_function, minute='*/5')
sched.configure()
try:
sched.start()
except (KeyboardInterrupt, SystemExit):
pass
#print sched.print_jobs()
示例7: Scheduler
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import configure [as 别名]
#!/usr/bin/env python
from apscheduler.scheduler import Scheduler
from datetime import datetime
from time import sleep
from memcheck import check, info
import signal
# Start the scheduler
sched = Scheduler()
options = {'misfire_grace_time': '2',
'daemonic': 'false'}
sched.configure(options)
stop = 0
# KeyboardInterrupt handler
def shutdown(signl, frme):
global stop
global sched
stop = 1
sched.shutdown(10)
#log.info('Catched signal %r. Processing will stop.', signl)
return 0
def get_info(signl, frme):
info()
signal.signal(signal.SIGINT, shutdown )
signal.signal(signal.SIGUSR1, get_info)
示例8: schedule_job
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import configure [as 别名]
def schedule_job(sched, function, periodicity, start_time,args):
sched.add_interval_job(function, seconds=periodicity, start_date=start_time,args=args)
##############################################################
if __name__ == "__main__":
#########################################################
# Palinsesto ArOmino
sched = Scheduler()
sched.configure({'apscheduler.daemonic': False})
sched.start() # start the scheduler
#UPDATE
schedule_job(sched, update_frasi, 600, '2014-09-19 22:00:00',args=[''])
schedule_job(sched, update_sensors, 300, '2014-09-19 22:00:00',args=[''])
schedule_job(sched, update_weather, 3000, '2014-09-19 22:00:00',args=[''])
schedule_job(sched, update_forecast, 86400, '2014-09-20 8:00:00',args=[''])
#PERIODIC
schedule_job(sched, send_message_p, 1800,'2014-09-20 8:10:00',args=['realtime'])
schedule_job(sched, send_message_p, 1800,'2014-09-20 8:10::00',args=['history'])
schedule_job(sched, send_message_p, 1800,'2014-09-20 8:10::00',args=['weather'])
schedule_job(sched, send_message_p, 1800,'2014-09-20 8:10::00',args=['biometeo'])
示例9: Master
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import configure [as 别名]
class Master(object):
def __init__(self):
self.scheduler = Scheduler()
self.scheduler.configure({'daemonic': True})
self.scheduler.add_interval_job(self._balance, seconds=60)
self.scheduler.start()
pass
def _balance(self):
def wrapper():
balancer.rebalance()
self.reload_all()
q.put((wrapper, [], {}))
# reconfigure haproxy
def reload_all(self):
from upscale.utils.common import get_hosts
for host in get_hosts():
print ("Reloading host {0}.".format(host.private_dns_name))
with Tasks("tcp://{0}:10000/".format(host.private_dns_name)) as h:
# should run async and wait for all results to finish
h.reload()
# start host
@queue
def start(self, namespace, application):
from upscale.master.balancer import get_containers
print namespace, application,
(hosts, containers) = get_containers()
# also weighted hosts, so one in static host, one on spot instance
min_host = None
for host in containers:
if (not min_host or len(containers[host])<len(containers[min_host])):
# check if it already contains project
min_host_applications = set([(b.split('_')[0], b.split('_')[1]) for b in containers[host] if len(b.split('_'))==3])
if ((namespace, application) in min_host_applications):
continue
min_host=host
if not min_host:
raise Exception('No host available')
print 'Starting on host {0}.'.format(min_host)
# start container on min host
# check minhost
with Worker("tcp://{0}:10000/".format(hosts[min_host])) as h:
#h.start(namespace, application).get(timeout=5)
print ('Starting new container')
h.start(namespace, application)
self.reload_all()
# health checks, does namespace, application exist
#enqueue(wrapper, )
return (True)
@queue
def destroy(self, namespace, website):
# get all containers for project and destroy them
print namespace, application,
(hosts, containers) = get_containers()
for host in containers:
for container in containers[host]:
pass
@queue
def upgrade(self, namespace, website):
# rolling upgrade, first start new instances with new version,
# then shutdown old ones
# get containers and host of old version
# start new containers with new version
# shutdown old versions
pass
示例10: TestOfflineScheduler
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import configure [as 别名]
class TestOfflineScheduler(object):
def setup(self):
self.scheduler = Scheduler()
def teardown(self):
if self.scheduler.running:
self.scheduler.shutdown()
@raises(KeyError)
def test_jobstore_twice(self):
self.scheduler.add_jobstore(RAMJobStore(), "dummy")
self.scheduler.add_jobstore(RAMJobStore(), "dummy")
def test_add_tentative_job(self):
job = self.scheduler.add_date_job(lambda: None, datetime(2200, 7, 24), jobstore="dummy")
assert isinstance(job, Job)
eq_(self.scheduler.get_jobs(), [])
def test_add_job_by_reference(self):
job = self.scheduler.add_date_job("copy:copy", datetime(2200, 7, 24))
eq_(job.func, copy)
eq_(job.func_ref, "copy:copy")
def test_configure_jobstore(self):
conf = {"apscheduler.jobstore.ramstore.class": "apscheduler.jobstores.ram_store:RAMJobStore"}
self.scheduler.configure(conf)
self.scheduler.remove_jobstore("ramstore")
def test_shutdown_offline(self):
self.scheduler.shutdown()
def test_configure_no_prefix(self):
global_options = {"misfire_grace_time": "2", "daemonic": "false"}
self.scheduler.configure(global_options)
eq_(self.scheduler.misfire_grace_time, 1)
eq_(self.scheduler.daemonic, True)
def test_configure_prefix(self):
global_options = {"apscheduler.misfire_grace_time": 2, "apscheduler.daemonic": False}
self.scheduler.configure(global_options)
eq_(self.scheduler.misfire_grace_time, 2)
eq_(self.scheduler.daemonic, False)
def test_add_listener(self):
val = []
self.scheduler.add_listener(val.append)
event = SchedulerEvent(EVENT_SCHEDULER_START)
self.scheduler._notify_listeners(event)
eq_(len(val), 1)
eq_(val[0], event)
event = SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN)
self.scheduler._notify_listeners(event)
eq_(len(val), 2)
eq_(val[1], event)
self.scheduler.remove_listener(val.append)
self.scheduler._notify_listeners(event)
eq_(len(val), 2)
def test_pending_jobs(self):
# Tests that pending jobs are properly added to the jobs list when
# the scheduler is started (and not before!)
self.scheduler.add_date_job(lambda: None, datetime(9999, 9, 9))
eq_(self.scheduler.get_jobs(), [])
self.scheduler.start()
jobs = self.scheduler.get_jobs()
eq_(len(jobs), 1)
示例11: MessageScheduler
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import configure [as 别名]
class MessageScheduler(object):
def __init__(self, jobstore, url):
self.logger = setup(__name__)
self.logger.debug("Creating MessageScheduler")
self.logger.debug("id = {}".format(id(self)))
config = read_env('config.cfg')
self._scheduler = Scheduler(daemonic=True)
config_scheduler = {'apscheduler.jobstores.file.class': 'apscheduler.jobstores%s' % jobstore,
'apscheduler.jobstores.file.url': url}
self._scheduler.configure(config_scheduler)
#Open a publishing socket to the forwarder to pass messages out
self.broadcast_socket = zmq.Context().socket(zmq.PUB)
self.broadcast_socket.connect(config['ZMQ_FORWARDER_SUCKS_IN'])
def start_ap_daemon(self):
self.logger.info("scheduler start")
setup("apscheduler.scheduler")
self._scheduler.start()
def shutdown(self):
self.logger.info("scheduler shutdown")
self._scheduler.shutdown()
def schedule(self, topic, msg):
""" Takes incoming message, massages it, and dispatches
to appropriate function.
"""
self.logger.debug("schedule received {}: {}".format(topic, msg))
if 'obj_id' in msg:
obj_id = msg.pop('obj_id')
if 'start_time' in msg:
if 'window' in msg:
msg_time = msg['start_time'] - timedelta(seconds=msg['window'])
else:
msg_time = msg['start_time']
else:
offset = timedelta(seconds=10)
#needs to be a little bit in the future, so scheduler can run it
msg_time = datetime.now() + offset
if 'operation' in msg:
if msg['operation'] == 'insert':
self.schedule_message(topic, msg, msg_time, obj_id)
elif msg['operation'] == 'update':
self.reschedule_message(obj_id, topic, msg, msg_time)
elif msg['operation'] == 'delete':
self.cancel_message(obj_id)
else:
self.logger.debug("Scheduler has been sent unknown database signal operation.")
else:
self.schedule_message(topic, msg, msg_time)
def send_to_station(self, topic, msg):
""" Send a message on to rootio_telephony """
topic = "station.{}.db".format(msg['station_id'])
# reserialize any datetime elements for zmq -- unpack again at ts
for key, value in msg.items():
if isinstance(value, datetime):
msg[key] = isodate.datetime_isoformat(value)
msg = json.dumps(msg)
self.logger.debug("fwd %s: %s" % (topic, msg))
self.broadcast_socket.send_multipart((topic, msg))
def schedule_message(self, topic, message, send_at, obj_id):
self.logger.info("schedule message %s:%s at %s" % (topic, message, send_at))
#create lambda for scheduler to call at execution time
#and add it
message['obj_id'] = obj_id
try:
job = self._scheduler.add_date_job(self.send_to_station,
send_at,
args=(topic, message),
name=obj_id)
self.logger.debug("scheduled job: {}".format(job))
self.logger.debug("scheduled job_name: {}".format(job.name))
except ValueError, e:
self.logger.error(e)
示例12: HouseControl
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import configure [as 别名]
class HouseControl(object):
__scheduler = None
__heatingStatusBean = None
busJobsQueue = Queue.Queue()
busWorkerThread = BusWorker(busJobsQueue)
def __init__(self):
self.logger = logging.getLogger(APPLICATION_LOGGER_NAME)
self.logger.info("HouseControl starting...")
configurationReader = ConfigurationReader(self.logger, os.getcwd() + FILEPATH_CONFIGURATION)
#Initialize HeatingStatusBean
self.__initalizeHeatingStatusBean(configurationReader)
#Initialize Scheduler
self.__initializeScheduler(configurationReader)
#Initialize BusQueueWorker
self.busWorkerThread.setDaemon(True)
self.busWorkerThread.start()
self.logger.info("HouseControl started.")
def __initalizeHeatingStatusBean(self, configurationReader):
#HeatingStatusBean
self.__heatingStatusBean = HeatingStatusBean.HeatingStatusBean()
#Configure Bean
self.updateHeatingStatusBeanConfiguration(configurationReader)
#Add ChangeListener
self.__heatingStatusBean.addChangeListener(HeatingControlService.HeatingControlService(self))
self.__heatingStatusBean.addChangeListener(HeatingSwitchService.HeatingSwitchService(self))
##self.__heatingStatusBean.addChangeListener(HeatingMonitorService.HeatingMonitorService(self))
self.logger.info("HeatingStatusBean configured.")
def __initializeScheduler(self, configurationReader):
#Scheduler
self.__scheduler = Scheduler()
self.__scheduler.configure(standalone=True)
self.__scheduler.add_listener(schedulerListener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
#SchedulerTasks
#TemperaturFeedService, TemperatureLogService, MixerControlService
self.__loadBaseSchedulerTasks()
self.__scheduler.start()
#Benutzerdefinierte Schaltzeiten
self.loadUserSchedulerTasks(configurationReader)
self.logger.info("Scheduler started.")
def getHeatingStatusBean(self):
return self.__heatingStatusBean
def getScheduler(self):
return self.__scheduler
def __loadBaseSchedulerTasks(self):
temperatureFeedService = TemperatureFeedService.TemperatureFeedService(self)
temperatureLogService = TemperatureLogService.TemperatureLogService(self)
mixerControlService = MixerControlService.MixerControlService(self)
#TemperaturFeedService
job = self.__scheduler.add_interval_job(temperatureFeedService.run, seconds=INTERVALL_UPDATE_TEMPERATURE)
job.name = SCHEDULE_SERVICE_TEMPERATURE_UPDATER
self.logger.info("Scheduler-Job [" + job.name + "] loaded.")
#TemperatureLogService
job = self.__scheduler.add_interval_job(temperatureLogService.run, seconds=INTERVALL_LOG_TEMPERATURE)
job.name = SCHEDULE_SERVICE_TEMPERATURE_LOGGER
self.logger.info("Scheduler-Job [" + job.name + "] loaded.")
#MixerControlService
job = self.__scheduler.add_interval_job(mixerControlService.run, seconds=INTERVALL_UPDATE_MIXER)
job.name = SCHEDULE_SERVICE_TEMPERATURE_MIXERCONTROL
self.logger.info("Scheduler-Job [" + job.name + "] loaded.")
def updateHeatingStatusBeanConfiguration(self, configurationReader):
temperatures = configurationReader.temperatures
self.__heatingStatusBean.setUpperFloorFlowTargetTemperature(float(temperatures.get('ogv')))
self.__heatingStatusBean.setGroundFloorFlowTargetTemperature(float(temperatures.get('egv')))
self.__heatingStatusBean.setWaterTargetTemperature(float(temperatures.get('hotwater')))
def reloadUserSchedulerTasks(self):
self.removeUserSchedulerTasks()
configurationReader = ConfigurationReader(self.logger, os.getcwd() + FILEPATH_CONFIGURATION)
self.updateHeatingStatusBeanConfiguration(configurationReader)
#.........这里部分代码省略.........