本文整理汇总了Python中apscheduler.schedulers.background.BlockingScheduler.add_job方法的典型用法代码示例。如果您正苦于以下问题:Python BlockingScheduler.add_job方法的具体用法?Python BlockingScheduler.add_job怎么用?Python BlockingScheduler.add_job使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类apscheduler.schedulers.background.BlockingScheduler
的用法示例。
在下文中一共展示了BlockingScheduler.add_job方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ScheduledCheck
# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import add_job [as 别名]
class ScheduledCheck(Check, metaclass=ABCMeta):
'''
An abstract base class for a check that runs based on
the Scheduler from apscheduler
Child classes need to implement the check method
'''
def __init__(self, queue=None, notify_on_exception=True, name=None, **kwargs):
'''
Create a new instance of this Check
The kwargs are handed over to apscheduler.blocking.BlockingScheduler.add_job
and decide when the checks are run. For example `trigger='cron', hour=8` will
run this check every day at 8 o'clock
'''
super().__init__(queue=queue, notify_on_exception=notify_on_exception, name=name)
self.scheduler = BlockingScheduler(
job_defaults={'misfire_grace_time': 5*60}
)
self.scheduler.add_job(self.wrapped_check, **kwargs)
def run(self):
self.scheduler.start()
def stop(self):
self.scheduler.shutdown()
self.log.info('Check %s stopped', self.__class__.__name__)
示例2: cli
# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import add_job [as 别名]
def cli(ctx, helium_key, darksky_key, lat, lon, sensor, every):
"""Monitor weather for a lat/lon locaation.
This sample service shows how you can use an external weather
service to emit to a virtual sensor in the Helium platform.
\b
he-weather --every <seconds> <sensor> <lat> <lon>
The given virtual <sensor> is the id of a created Helium virtual
sensor.
The optional <seconds> parameter sets how often weather
information needs to get fetched and posted to Helium. If the
parameter is not provided a default (60 seconds)) is picked.
This will run the service based on the given lat/lon.
"""
client = Client(api_token=helium_key)
sensor = Sensor.find(client, sensor)
logging.basicConfig()
scheduler = BlockingScheduler()
scheduler.add_job(_process_weather, "interval",
seconds=every,
next_run_time=datetime.now(),
args=[darksky_key, lat, lon, sensor])
click.echo("Checking every {} seconds".format(every))
scheduler.start()
示例3: run
# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import add_job [as 别名]
def run(self):
"""Run watcher"""
self.logger.info("Running watcher ...")
scheduler = BlockingScheduler()
scheduler.add_job(self.watching, 'interval', seconds=self.config["interval"])
try:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
pass
示例4: month_task
# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import add_job [as 别名]
def month_task(self):
def func():
self.dh.aum_total()
self.dh.debt_total()
scheduler = BlockingScheduler()
scheduler.add_job(func, 'cron', month='*/1', day='1', hour='5') # 每月一号五点运行
示例5: half_year_task
# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import add_job [as 别名]
def half_year_task(self):
def func():
month = datetime.datetime.now().month - 1
year = datetime.datetime.now().year
if month == 0:
month = 12
year = year - 1
half_year = month/6
self.dh.customer_value(year,half_year)
scheduler = BlockingScheduler()
scheduler.add_job(func, 'cron', month='7,12', day='2', hour='5') # 7月12月2号五点计算客户价值
示例6: seasonly_task
# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import add_job [as 别名]
def seasonly_task(self):
def func():
# 每个月计算前一个月的数据
month = datetime.datetime.now().month - 1
year = datetime.datetime.now().year
if month == 0:
month = 12
year = year-1
season = month/3
# 计算生命周期
self.dh.run_life_cycle(year,season)
scheduler = BlockingScheduler()
scheduler.add_job(func, 'cron', month='1,4,7,10', day='2', hour='2')
示例7: get_rescheduler
# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import add_job [as 别名]
def get_rescheduler():
timer = BlockingScheduler()
time_spec = {
'seconds': cfg.CONF.scheduler.rescheduling_interval,
'timezone': aps_utils.astimezone('UTC')
}
timer.add_job(recover_delayed_executions,
trigger=IntervalTrigger(**time_spec),
max_instances=1,
misfire_grace_time=60,
next_run_time=date_utils.get_datetime_utc_now(),
replace_existing=True)
return timer
示例8: daily_task
# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import add_job [as 别名]
def daily_task(self):
def func():
day = datetime.datetime.now().strftime('%Y-%m-%d')
# 活期
self.da.init_balance(day, 1)
self.logger.info(day, '活期每日余额计算完成')
# 定期
self.da.init_balance(day, 2)
self.logger.info(day, '定期每日余额计算完成')
# 理财
self.da.init_balance(day, 3)
self.logger.info(day, '理财每日余额计算完成')
scheduler = BlockingScheduler()
scheduler.add_job(func,'cron',day='*',hour='1') # 每天凌晨1点运行
try:
scheduler.start()
except Exception as e:
# TODO 执行错误的处理方案
self.logger.error('每日AUM计算出错:',e)
scheduler.shutdown()
示例9: stretch
# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import add_job [as 别名]
#!/bin/python
from apscheduler.schedulers.background import BlockingScheduler
import notify2
import time
import subprocess
import logging
def stretch():
notify2.init('Stretch')
n = notify2.Notification('Get Up !', 'Time to stretch a bit ')
n.show()
subprocess.call(['espeak', '-g', '5', 'Get Up. Time to Stretch' ])
time.sleep(600)
n = notify2.Notification('Enough Rest', 'Get back to work ')
n.show();
subprocess.call(['espeak', '-g', '5', 'Get back to work' ])
logging.basicConfig()
scheduler = BlockingScheduler()
scheduler.add_job(stretch, 'interval', hours = 1)
scheduler.start()
示例10: St2Timer
# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import add_job [as 别名]
class St2Timer(object):
"""
A timer interface that uses APScheduler 3.0.
"""
def __init__(self, local_timezone=None):
self._timezone = local_timezone
self._scheduler = BlockingScheduler(timezone=self._timezone)
self._jobs = {}
self._trigger_types = TIMER_TRIGGER_TYPES.keys()
self._trigger_watcher = TriggerWatcher(create_handler=self._handle_create_trigger,
update_handler=self._handle_update_trigger,
delete_handler=self._handle_delete_trigger,
trigger_types=self._trigger_types,
queue_suffix=self.__class__.__name__,
exclusive=True)
self._trigger_dispatcher = TriggerDispatcher(LOG)
def start(self):
self._register_timer_trigger_types()
self._trigger_watcher.start()
self._scheduler.start()
def cleanup(self):
self._scheduler.shutdown(wait=True)
def add_trigger(self, trigger):
self._add_job_to_scheduler(trigger)
def update_trigger(self, trigger):
self.remove_trigger(trigger)
self.add_trigger(trigger)
def remove_trigger(self, trigger):
trigger_id = trigger['id']
try:
job_id = self._jobs[trigger_id]
except KeyError:
LOG.info('Job not found: %s', trigger_id)
return
self._scheduler.remove_job(job_id)
del self._jobs[trigger_id]
def _add_job_to_scheduler(self, trigger):
trigger_type_ref = trigger['type']
trigger_type = TIMER_TRIGGER_TYPES[trigger_type_ref]
try:
jsonschema.validate(trigger['parameters'],
trigger_type['parameters_schema'])
except jsonschema.ValidationError as e:
LOG.error('Exception scheduling timer: %s, %s',
trigger['parameters'], e, exc_info=True)
raise # Or should we just return?
time_spec = trigger['parameters']
time_zone = aps_utils.astimezone(trigger['parameters'].get('timezone'))
time_type = None
if trigger_type['name'] == 'st2.IntervalTimer':
unit = time_spec.get('unit', None)
value = time_spec.get('delta', None)
time_type = IntervalTrigger(**{unit: value, 'timezone': time_zone})
elif trigger_type['name'] == 'st2.DateTimer':
# Raises an exception if date string isn't a valid one.
dat = date_parser.parse(time_spec.get('date', None))
time_type = DateTrigger(dat, timezone=time_zone)
elif trigger_type['name'] == 'st2.CronTimer':
cron = time_spec.copy()
cron['timezone'] = time_zone
time_type = CronTrigger(**cron)
utc_now = date_utils.get_datetime_utc_now()
if hasattr(time_type, 'run_date') and utc_now > time_type.run_date:
LOG.warning('Not scheduling expired timer: %s : %s',
trigger['parameters'], time_type.run_date)
else:
self._add_job(trigger, time_type)
return time_type
def _add_job(self, trigger, time_type, replace=True):
try:
job = self._scheduler.add_job(self._emit_trigger_instance,
trigger=time_type,
args=[trigger],
replace_existing=replace)
LOG.info('Job %s scheduled.', job.id)
self._jobs[trigger['id']] = job.id
except Exception as e:
LOG.error('Exception scheduling timer: %s, %s',
trigger['parameters'], e, exc_info=True)
def _emit_trigger_instance(self, trigger):
utc_now = date_utils.get_datetime_utc_now()
# debug logging is reasonable for this one. A high resolution timer will end up
# trashing standard logs.
LOG.debug('Timer fired at: %s. Trigger: %s', str(utc_now), trigger)
#.........这里部分代码省略.........
示例11: DisseminationPlayer
# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import add_job [as 别名]
class DisseminationPlayer(object):
MIDNIGHT = datetime.time(0,0,0)
def __init__(self, top_data_dir, index_file, dir_files_to_parse, files_to_parse, job_func, destination):
"""
:return:
"""
self._parser = eumetsat.dmon.parsers.xferlog_parser.XferlogParser(no_gems_header = True)
self._dir_files = dir_files_to_parse
self._files = files_to_parse
self._job_func = job_func
self._scheduler = BlockingScheduler()
res = []
t = ftimer(Indexer.load_index, [top_data_dir, index_file], {}, res)
print("Read index in %d seconds." % (t))
self._index = res[0]
#can now set reference time
#ref time = now time plus one minute
self._defer_time = 5
self._reference_date = datetime.datetime.now() + datetime.timedelta(seconds=self._defer_time)
#destination info (depends on the type of job)
self._destination = destination
def add_jobs(self):
"""
Create the jobs from the reference time
:return:
"""
for a_file in self._files:
f_path = "%s/%s" % (self._dir_files, a_file)
print("Parsing xferlog file %s" % f_path )
fd = open(f_path)
self._parser.set_lines_to_parse(fd)
for elem in self._parser:
#print("time = %s, filename = %s\n" % (elem['time'], elem['file']))
#find file in index
filepath = self._index.get(elem['file'], None)
if filepath:
#get time difference
midnight_date = utc.localize(datetime.datetime.combine(elem['time'].date(), self.MIDNIGHT))
#print("midnight date = %s ///// elem[time] = %s" % (midnight_date, elem['time']))
time_diff = elem['time'] - midnight_date
scheduled_date = self._reference_date + time_diff
#create job and schedule it with the time difference added to the starting reference time
d_trigger = DateTrigger(scheduled_date)
self._scheduler.add_job(self._job_func, d_trigger, args=[filepath, self._destination])
else:
print("Could not find %s\n in Index" % (elem['file']))
print("Player. %d jobs scheduled.\n" % (len(self._scheduler.get_jobs())))
def start(self):
"""
:return:
"""
self._scheduler.configure(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc)
print("Start Scheduler. Jobs will start to be played in %d sec." % self._defer_time)
self._scheduler.start()
示例12: runnable
# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import add_job [as 别名]
def runnable(file_path):
"""
:return:
"""
# run a job
print("JOB now starting. FIle path %s" % (file_path))
print("JOB .....")
print("JOB now finished")
scheduler = BlockingScheduler()
# .. do something else here, maybe add jobs etc.
the_date = datetime.datetime.now() + datetime.timedelta(seconds=2)
d_trigger = DateTrigger(the_date)
l = lambda: runnable('/tmtmtmtmtmtmt')
scheduler.add_job(func=runnable, trigger=d_trigger, args=['tick\n'])
the_date = datetime.datetime.now() + datetime.timedelta(seconds=2)
d_trigger = DateTrigger(the_date)
scheduler.add_job(func=runnable, trigger=d_trigger, args=['tick1\n'])
scheduler.configure(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc)
scheduler.start()
示例13: str
# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import add_job [as 别名]
except Exception, e:
logging.debug(str(e))
print str(e)
logging.basicConfig(filename='/home/pi/totem/iot_mqtt.log', filemode='w', level=logging.DEBUG)
#initialize port
try:
#Define port and slave address(decimal) here
instr = minimalmodbus.Instrument("/dev/ttyAMA0", 1)
logging.info("Port intialized, connected successfully to /dev/ttyAMA0. ")
except Exception, e:
logging.debug(str(e))
#connect to IBM IoTF
try:
options = ibmiotf.device.ParseConfigFile('/home/pi/totem/hg_001.cfg') #define path for cfg file
client = ibmiotf.device.Client(options)
client.connect()
myQosLevel = 1
logging.info("IBM IoTF connected successfully, QoS Level at %i" % myQosLevel)
except Exception, e:
logging.debug(str(e))
print str(e)
# setup scheduler here, run every 5 seconds
# stupid scheduler runs on UTC, so beware of DST
sched = BlockingScheduler()
sched.add_job(readAndPublish, 'cron', day_of_week="0-5", hour="0-3,11-23", second="*/5")
sched.start()
示例14: Popen
# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import add_job [as 别名]
sock = socket.socket()
sock.connect((carbonServer, carbonPort))
except socket.error, err:
print "Could not connect to %s:%s, error code %s, %s" % ( carbonServer, carbonPort, err[0], err[1] )
return 127
binary = "/opt/nagios/bin/nagiostats"
stat = ','.join(unicode(i) for i in stats)
command = binary + " --mrtg --data=" + stat
nagprocess = Popen(command, shell=True, stderr=PIPE, stdout=PIPE, universal_newlines=True)
stdout, stderr = nagprocess.communicate()
stdout = stdout.splitlines()
for stat, metaData in stats.items():
metricName, descr = metaData
metricValue = stdout[0]
del stdout[0]
string = 'datacenter.stats.nagios.%s.%s %s %i\n' % (hostname, metricName, metricValue, calltime)
sock.send(string)
print "%s" % string
sock.close()
if __name__ == "__main__":
sched = BlockingScheduler()
sched.add_job(collectStats, 'interval', seconds=10)
ret = collectStats()
try:
sched.start()
except (KeyboardInterrupt, SystemExit):
pass
示例15: Adafruit_DHTMOCK
# 需要导入模块: from apscheduler.schedulers.background import BlockingScheduler [as 别名]
# 或者: from apscheduler.schedulers.background.BlockingScheduler import add_job [as 别名]
try:
import Adafruit_DHT
except ImportError, e:
class Adafruit_DHTMOCK():
def read_retry(self):
return 25, 50
Adafruit_DHT = Adafruit_DHTMOCK()
import requests
import logging
from apscheduler.schedulers.background import BlockingScheduler
THERMOSTAT_URI = 'http://192.168.1.214:5000/api/v1/temperature/'
def main():
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, '17')
if humidity is not None and temperature is not None:
requests.post(THERMOSTAT_URI, data=dict(temperature=temperature, humidity=humidity))
logger.warn('Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity))
else:
logger.error('Failed to get reading. Try again!')
if __name__ == '__main__':
logging.basicConfig(level=logging.WARN, format='%(levelname)s - %(asctime)s %(message)s')
logger = logging.getLogger('main')
scheduler = BlockingScheduler()
scheduler.add_job(main, 'interval', seconds=60)
logger.warn('starting scheduler')
scheduler.start()