本文整理汇总了Python中apscheduler.scheduler.Scheduler.unschedule_job方法的典型用法代码示例。如果您正苦于以下问题:Python Scheduler.unschedule_job方法的具体用法?Python Scheduler.unschedule_job怎么用?Python Scheduler.unschedule_job使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类apscheduler.scheduler.Scheduler
的用法示例。
在下文中一共展示了Scheduler.unschedule_job方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Scheduler
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import unschedule_job [as 别名]
class Scheduler(Plugin):
crons = {}
intervals = {}
started = False
def __init__(self):
addEvent("schedule.cron", self.cron)
addEvent("schedule.interval", self.interval)
addEvent("schedule.remove", self.remove)
self.sched = Sched(misfire_grace_time=60)
self.sched.start()
self.started = True
def remove(self, identifier):
for cron_type in ["intervals", "crons"]:
try:
self.sched.unschedule_job(getattr(self, cron_type)[identifier]["job"])
log.debug("%s unscheduled %s", (cron_type.capitalize(), identifier))
except:
pass
def doShutdown(self):
self.stop()
return super(Scheduler, self).doShutdown()
def stop(self):
if self.started:
log.debug("Stopping scheduler")
self.sched.shutdown()
log.debug("Scheduler stopped")
self.started = False
def cron(self, identifier="", handle=None, day="*", hour="*", minute="*"):
log.info('Scheduling "%s", cron: day = %s, hour = %s, minute = %s', (identifier, day, hour, minute))
self.remove(identifier)
self.crons[identifier] = {
"handle": handle,
"day": day,
"hour": hour,
"minute": minute,
"job": self.sched.add_cron_job(handle, day=day, hour=hour, minute=minute),
}
def interval(self, identifier="", handle=None, hours=0, minutes=0, seconds=0):
log.info(
"Scheduling %s, interval: hours = %s, minutes = %s, seconds = %s", (identifier, hours, minutes, seconds)
)
self.remove(identifier)
self.intervals[identifier] = {
"handle": handle,
"hours": hours,
"minutes": minutes,
"seconds": seconds,
"job": self.sched.add_interval_job(handle, hours=hours, minutes=minutes, seconds=seconds),
}
示例2: Scheduler
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import unschedule_job [as 别名]
class Scheduler(Plugin):
crons = {}
intervals = {}
started = False
def __init__(self):
addEvent('schedule.cron', self.cron)
addEvent('schedule.interval', self.interval)
addEvent('schedule.remove', self.remove)
self.sched = Sched(misfire_grace_time = 60)
self.sched.start()
self.started = True
def remove(self, identifier):
for cron_type in ['intervals', 'crons']:
try:
self.sched.unschedule_job(getattr(self, cron_type)[identifier]['job'])
log.debug('%s unscheduled %s', (cron_type.capitalize(), identifier))
except:
pass
def doShutdown(self):
super(Scheduler, self).doShutdown()
self.stop()
def stop(self):
if self.started:
log.debug('Stopping scheduler')
self.sched.shutdown()
log.debug('Scheduler stopped')
self.started = False
def cron(self, identifier = '', handle = None, day = '*', hour = '*', minute = '*'):
log.info('Scheduling "%s", cron: day = %s, hour = %s, minute = %s', (identifier, day, hour, minute))
self.remove(identifier)
self.crons[identifier] = {
'handle': handle,
'day': day,
'hour': hour,
'minute': minute,
'job': self.sched.add_cron_job(handle, day = day, hour = hour, minute = minute)
}
def interval(self, identifier = '', handle = None, hours = 0, minutes = 0, seconds = 0):
log.info('Scheduling %s, interval: hours = %s, minutes = %s, seconds = %s', (identifier, hours, minutes, seconds))
self.remove(identifier)
self.intervals[identifier] = {
'handle': handle,
'hours': hours,
'minutes': minutes,
'seconds': seconds,
'job': self.sched.add_interval_job(handle, hours = hours, minutes = minutes, seconds = seconds)
}
示例3: Scheduler
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import unschedule_job [as 别名]
class Scheduler(Plugin):
crons = {}
intervals = {}
started = False
def __init__(self):
addEvent("schedule.cron", self.cron)
addEvent("schedule.interval", self.interval)
addEvent("schedule.start", self.start)
addEvent("schedule.restart", self.start)
addEvent("app.load", self.start)
self.sched = Sched(misfire_grace_time=60)
def remove(self, identifier):
for type in ["interval", "cron"]:
try:
self.sched.unschedule_job(getattr(self, type)[identifier]["job"])
log.debug("%s unscheduled %s", (type.capitalize(), identifier))
except:
pass
def start(self):
# Stop all running
self.stop()
# Crons
for identifier in self.crons:
try:
self.remove(identifier)
cron = self.crons[identifier]
job = self.sched.add_cron_job(cron["handle"], day=cron["day"], hour=cron["hour"], minute=cron["minute"])
cron["job"] = job
except ValueError, e:
log.error("Failed adding cronjob: %s", e)
# Intervals
for identifier in self.intervals:
try:
self.remove(identifier)
interval = self.intervals[identifier]
job = self.sched.add_interval_job(
interval["handle"],
hours=interval["hours"],
minutes=interval["minutes"],
seconds=interval["seconds"],
)
interval["job"] = job
except ValueError, e:
log.error("Failed adding interval cronjob: %s", e)
示例4: Scheduler
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import unschedule_job [as 别名]
class Scheduler(Plugin):
crons = {}
intervals = {}
started = False
def __init__(self):
sl = logging.getLogger('apscheduler.scheduler')
sl.disabled = True
addEvent('schedule.cron', self.cron)
addEvent('schedule.interval', self.interval)
addEvent('schedule.start', self.start)
addEvent('schedule.restart', self.start)
addEvent('app.load', self.start)
addEvent('app.shutdown', self.stop)
self.sched = Sched(misfire_grace_time = 60)
def remove(self, identifier):
for type in ['interval', 'cron']:
try:
self.sched.unschedule_job(getattr(self, type)[identifier]['job'])
log.debug('%s unscheduled %s' % (type.capitalize(), identifier))
except:
pass
def start(self):
# Stop all running
self.stop()
# Crons
for identifier in self.crons:
try:
self.remove(identifier)
cron = self.crons[identifier]
job = self.sched.add_cron_job(cron['handle'], day = cron['day'], hour = cron['hour'], minute = cron['minute'])
cron['job'] = job
except ValueError, e:
log.error("Failed adding cronjob: %s" % e)
# Intervals
for identifier in self.intervals:
try:
self.remove(identifier)
interval = self.intervals[identifier]
job = self.sched.add_interval_job(interval['handle'], hours = interval['hours'], minutes = interval['minutes'], seconds = interval['seconds'], repeat = interval['repeat'])
interval['job'] = job
except ValueError, e:
log.error("Failed adding interval cronjob: %s" % e)
示例5: recordAndRegulateTemp
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import unschedule_job [as 别名]
def recordAndRegulateTemp(number_of_hours,temperature,csvWriter):
sched = Scheduler()
sched.start()
job = sched.add_interval_job(my_job, minutes=5, args = [temperature,csvWriter,temps])
start_time = time.time()
while time.time() - start_time < (3600*int(number_of_hours)):
text = "time left: " + str(round((3600*int(number_of_hours)) - (time.time()-start_time),0))+ " seconds\n"
sys.stdout.write(text); sys.stdout.flush()
# print "temp list: " + str(temps)
time.sleep(60)
sched.unschedule_job(job)
示例6: __init__
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import unschedule_job [as 别名]
class TimeScheduler:
instance = None
def __init__(self):
'''
'''
@staticmethod
def getInstance():
if TimeScheduler.instance is None:
TimeScheduler.instance = TimeScheduler()
return TimeScheduler.instance
def init(self,threadpool = None):
if threadpool is None :
self.sched = Scheduler({'apscheduler.threadpool.core_threads':1,
'apscheduler.threadpool.max_threads':1,
'apscheduler.threadpool.keepalive':1})
else:
self.sched = Scheduler({'apscheduler.threadpool':threadpool})
self.sched.daemonic = False
def registerCronExp(self,handler,year=None, month=None, day=None, hour=None, minute=None, second=None,
start_date=None):
return self.sched.add_cron_job(handler.execute,year, month, day, None,None,
hour, minute, second,None)
def registerCron(self, handler ,year=None, month=None, day=None, week=None,
day_of_week=None, hour=None, minute=None, second=None,
start_date=None):
return self.sched.add_cron_job(handler.execute,year=None, month=None, day=None, week=None,
day_of_week=None, hour=None, minute=None, second=None,
start_date=None)
'''
register interval task
'''
def registerInterval(self, handler,weeks=0, days=0, hours=0, minutes=0,
seconds=0, start_date=None):
return self.sched.add_interval_job(handler.execute,weeks,days,hours, minutes,
seconds,start_date)
def registerDate(self, handler,date):
return self.sched.add_date_job(handler.execute,date)
def unregister(self,job):
self.sched.unschedule_job(job)
def start(self):
self.sched.start()
示例7: Job_Manager
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import unschedule_job [as 别名]
class Job_Manager(object):
def __init__(self, config):
self.scheduler = Scheduler(config["SCHEDULER"])
if self.scheduler is not None:
self.scheduler.start()
def add_job(self, task, interval, name, *args):
args = args if args is not None else None
self.scheduler.add_interval_job(task, seconds=interval, args=args, name=name, max_instances=50)
def remove_job(self, name):
matchedJobs = self.__get_jobs(name)
self.__remove_jobs(matchedJobs)
def __get_jobs(self, name):
return [job for job in self.scheduler.get_jobs() if job.name == name]
def __remove_jobs(self, matchedJobs):
for job in matchedJobs:
self.scheduler.unschedule_job(job)
示例8: __init__
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import unschedule_job [as 别名]
#.........这里部分代码省略.........
1. Set Turn On/Off Days
2. Set Turn On Time
3. Set Turn Off Time
4. Get On-Off Times
5. Quit/Exit
""")
# get the selection
self.main_selection = input("Please select: ")
print("\n")
if self.main_selection == '1':
print('Current Turn On/Off days:',self.daysLabel)
print('1. Daily')
print('2. WeekDays')
self.newDays = input("Select which days to use: ")
# validate entry
if int(self.newDays)==1 or int(self.newDays)==2:
self.daysLabel = dayLabels[int(self.newDays)-1]
self.days = dayOptions[self.daysLabel]
# cancel old jobs and start new ones
self.schedDisplayOn()
self.schedDisplayOff()
print('New Turn On/Off days:', self.daysLabel)
else:
print('Invalid entry')
self.printMenu()
elif self.main_selection == '2':
print('Current Turn On time ', str(self.turnOnHour), ':', str(self.turnOnMin).zfill(2), sep='')
self.newTurnOnHour = input("Enter new turn on hour (in 24 hour clock): ")
# validate hour entry
if int(self.newTurnOnHour) < 24 and int(self.newTurnOnHour) >= 0:
self.newTurnOnMin = input("Enter new turn on minute: ")
# validate min entry
if int(self.newTurnOnMin) < 60 and int(self.newTurnOnMin) >= 0:
# assign new hour
self.turnOnHour = int(self.newTurnOnHour)
# assign new minute
self.turnOnMin = int(self.newTurnOnMin)
# cancel old job and start new one
self.schedDisplayOn()
# print new turn on time
print('New Turn On time ', str(self.turnOnHour), ':', str(self.turnOnMin).zfill(2), sep='')
else:
print('Invalid Turn On Min')
else:
print('Invalid Turn On Hour')
self.printMenu()
elif self.main_selection == '3':
print('Current Turn Off time ', str(self.turnOffHour), ':', str(self.turnOffMin).zfill(2), sep='')
self.newTurnOffHour = input("Enter new turn off hour (in 24 hour clock): ")
# validate hour entry
if int(self.newTurnOffHour) < 24 and int(self.newTurnOffHour) >= 0:
self.newTurnOffMin = input("Enter new turn off minute: ")
# validate min entry
if int(self.newTurnOffMin) < 60 and int(self.newTurnOffMin) >= 0:
# assign new hour
self.turnOffHour = int(self.newTurnOffHour)
# assign new minute
self.turnOffMin = int(self.newTurnOffMin)
# cancel old job and start new one
self.schedDisplayOff()
# print new turn off time
print('New Turn Off time ', str(self.turnOffHour), ':', str(self.turnOffMin).zfill(2), sep='')
else:
print('Invalid Turn Off Min')
else:
print('Invalid Turn Off Hour')
self.printMenu()
elif self.main_selection == '4':
print('Turn On ',self.daysLabel,' at ',str(self.turnOnHour),':',str(self.turnOnMin).zfill(2), sep='')
print('Turn Off ',self.daysLabel,' at ', str(self.turnOffHour), ':', str(self.turnOffMin).zfill(2), sep='')
self.sched.print_jobs()
self.printMenu()
elif self.main_selection == '5':
sys.exit()
else:
print("Invalid selection.\n")
self.printMenu()
def displayPowerOn(self):
print("Display On")
def displayPowerOff(self):
print("Display Off")
def schedDisplayOn(self):
# cancel the old job
self.sched.unschedule_job(self.DisplayOnJob)
# schedule the new job
self.DisplayOnJob = self.sched.add_cron_job(self.displayPowerOn, day_of_week=self.days,
hour=self.turnOnHour, minute=self.turnOnMin)
def schedDisplayOff(self):
# cancel the old job
self.sched.unschedule_job(self.DisplayOffJob)
# schedule the new job
self.DisplayOffJob = self.sched.add_cron_job(self.displayPowerOff, day_of_week=self.days,
hour=self.turnOffHour, minute=self.turnOffMin)
示例9: TNActionScheduler
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import unschedule_job [as 别名]
#.........这里部分代码省略.........
try:
reply = iq.buildReply("result")
job = iq.getTag("query").getTag("archipel").getAttr("job")
entityClass = self.entity.__class__.__name__
param = None
if entityClass == "TNArchipelVirtualMachine" and not job in self.supported_actions_for_vm:
raise Exception("action %s is not valid" % job)
elif entityClass == "TNArchipelHypervisor" and not job in self.supported_actions_for_hypervisor:
raise Exception("action %s is not valid" % job)
year = iq.getTag("query").getTag("archipel").getAttr("year")
month = iq.getTag("query").getTag("archipel").getAttr("month")
day = iq.getTag("query").getTag("archipel").getAttr("day")
hour = iq.getTag("query").getTag("archipel").getAttr("hour")
minute = iq.getTag("query").getTag("archipel").getAttr("minute")
second = iq.getTag("query").getTag("archipel").getAttr("second")
comment = iq.getTag("query").getTag("archipel").getAttr("comment")
if iq.getTag("query").getTag("archipel").has_attr("param"):
param = iq.getTag("query").getTag("archipel").getAttr("param")
uid = str(uuid.uuid1())
str_date = "%s-%s-%s @ %s : %02d : %02d" % (year, month, day, hour, int(minute), int(second))
if entityClass == "TNArchipelVirtualMachine":
func = self.do_job_for_vm
elif entityClass == "TNArchipelHypervisor":
func = self.do_job_for_hypervisor
self.scheduler.add_cron_job(func, year=year, month=month, day=day, hour=hour, minute=minute, second=second, args=[job, uid, str_date, comment, param])
self.save_jobs(uid, job, year, month, day, hour, minute, second, comment, param)
self.entity.push_change("scheduler", "scheduled")
except Exception as ex:
reply = build_error_iq(self, ex, iq)
return reply
def iq_jobs(self, iq):
"""
Get jobs.
@type iq: xmpp.Protocol.Iq
@param iq: the received IQ
@rtype: xmpp.Protocol.Iq
@return: a ready to send IQ containing the result of the action
"""
try:
reply = iq.buildReply("result")
nodes = []
if hasattr(self.scheduler, "get_jobs"):
jobs = self.scheduler.get_jobs()
else:
jobs = self.scheduler.jobs
for job in jobs:
job_node = xmpp.Node(tag="job", attrs={"action": str(job.args[0]), "uid": str(job.args[1]), "date": str(job.args[2]), "comment": job.args[3]})
nodes.append(job_node)
reply.setQueryPayload(nodes)
except Exception as ex:
reply = build_error_iq(self, ex, iq)
return reply
def iq_unschedule(self, iq):
"""
Unschedule a job.
@type iq: xmpp.Protocol.Iq
@param iq: the received IQ
@rtype: xmpp.Protocol.Iq
@return: a ready to send IQ containing the result of the action
"""
try:
reply = iq.buildReply("result")
uid = iq.getTag("query").getTag("archipel").getAttr("uid")
the_job = self.get_jod_with_uid(uid)
if not the_job:
raise Exception("job with uid %s doesn't exists" % uid)
self.delete_job(uid)
self.scheduler.unschedule_job(the_job)
self.entity.push_change("scheduler", "unscheduled")
except Exception as ex:
reply = build_error_iq(self, ex, iq)
return reply
def iq_actions(self, iq):
"""
Get available actions.
@type iq: xmpp.Protocol.Iq
@param iq: the received IQ
@rtype: xmpp.Protocol.Iq
@return: a ready to send IQ containing the result of the action
"""
try:
reply = iq.buildReply("result")
entityClass = self.entity.__class__.__name__
if entityClass == "TNArchipelVirtualMachine":
actions = self.supported_actions_for_vm
elif entityClass == "TNArchipelHypervisor":
actions = self.supported_actions_for_hypervisor
nodes = []
for action in actions:
action_node = xmpp.Node(tag="action")
action_node.setData(action)
nodes.append(action_node)
reply.setQueryPayload(nodes)
except Exception as ex:
reply = build_error_iq(self, ex, iq)
return reply
示例10: _Direct
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import unschedule_job [as 别名]
class _Direct(Direct):
# set initial conditions for the subclass (in addition to the superclass
# methods defined in Direct) and initialize the scheduler
def __init__(self, *args, **kwargs):
Direct.__init__(self, *args, **kwargs)
self.scheduler = Scheduler()
self.sampling = False
def collect_sample(self):
time_value = gmtime()
self.send('R0\r')
print '#\t--- Collecting Sample at %.3f' % mktime(time_value)
def query_status(self):
time_value = gmtime()
self.send('S\r')
print '#\t--- Query Instrument Status at %.3f' % mktime(time_value)
def run(self):
while True:
# parse the user commands from stdin
cmd = sys.stdin.readline()
cmd = cmd.strip()
# default command set
if cmd == 'q':
if self.sampling is True:
print '#\t--- stop all scheduled sampling'
self.scheduler.unschedule_job(self.sample)
self.scheduler.unschedule_job(self.status)
self.scheduler.shutdown()
print '#\t--- turning on 1 Hz status messages'
self.send('F1\r')
print '### exiting'
sleep(1)
break
elif cmd == 'init':
print '### initialize instrument for sampling'
print '#\t--- turning off 1 Hz status messages'
self.send('F5A\r')
sleep(1)
self.send('F5A\r')
sleep(1)
self.send('F5A\r')
sleep(1)
print '#\t--- flush internal pump 2 times with reagent'
self.send('P2\r')
sleep(2)
print '#\t\t--- * first cycle complete'
self.send('P2\r')
sleep(2)
print '#\t\t--- * second cycle complete, ready for sampling'
elif cmd == 'start':
print '### sampling started, will sample every hour at the top of the hour'
self.scheduler.start()
self.sample = self.scheduler.add_cron_job(self.collect_sample, minute=0)
self.status = self.scheduler.add_cron_job(self.query_status, hour='0,12', minute=15)
#self.scheduler.print_jobs()
self.sampling = True
elif cmd == 'stop':
print '### sampling stopped'
self.scheduler.unschedule_job(self.sample)
self.scheduler.unschedule_job(self.status)
self.scheduler.shutdown()
self.sampling = False
else:
print '### sending %s' % cmd
self.send(cmd + '\r')
示例11: MetaDataGenerationScheduler
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import unschedule_job [as 别名]
class MetaDataGenerationScheduler():
def __init__(self, updateIntervalSeconds=30):
self.interval = updateIntervalSeconds
config = {'apscheduler.daemonic': False}
self.sched = Scheduler(config)
# initialize these per instance.
self.repo_timestamps = {}
self.jobs = {}
repo_timestamps = {} #dictionary with jobName (=reponame) : last scheduler modification timestamp (float)
jobs = {} #dictionary with jobName (=reponame) : jobHandle
configService = RepoConfigService()
static_root_dir = configService.getStaticRepoDir()
sched = None
interval = None
def start(self):
self.update_program_config() #read configs, schedule jobs
# schedule an update as a job
self.sched.add_interval_job(self.update_program_config, seconds=self.interval)
# schedule cleanup cache
self.sched.add_cron_job(self.cleanupCacheDir, hour = 23, minute = 17, second = 20)
self.sched.start()
def createrepo_with_optional_cleanup_job(self, *argList):
monitor = JobMonitorer()
monitor.job_starts()
repoDir = argList[0]
reponame = argList[1]
rpm_max_keep = argList[2]
didCleanUp=False
try:
if rpm_max_keep != None:
didCleanUp=True
self.configService.doCleanup(repoDir, rpm_max_keep)
logging.info("job RpmCleanup on "+reponame+" took "+str(monitor.get_execution_time_until_now_seconds())+" seconds")
self.configService.doCreateRepo(repoDir, reponame)
monitor.job_finishes()
logging.info(monitor.get_pretty_job_summary("createrepo on "+reponame+" (cleanup included : "+str(didCleanUp)+")"))
except Exception as ex:
logging.error(traceback.format_exc())
def update_program_config(self):
updatedJobs = 0
addedJobs = 0
removedJobs = 0
list_of_static_dirs = os.listdir(self.static_root_dir)
self.remove_jobs_where_repo_deleted(list_of_static_dirs)
for static_dir in list_of_static_dirs:
file_path = self.configService.getMetaDataGenerationFilePathRelativeToRepoDirByRepoName(static_dir)
if not os.path.exists(file_path):
if self.repo_timestamps.has_key(static_dir):
logging.debug("unschedule because file does not exist")
self.unschedule_by_reponame(static_dir)
del self.repo_timestamps[static_dir] #repo is unmanaged now, check back later
removedJobs+=1
continue
if not static_dir in self.repo_timestamps:
logging.debug("new repo found..")
addedJobs+=1
self.repo_timestamps[static_dir] = self.determine_last_modification_time(
file_path) #make an entry so we know we processed the repo + remember modification timestamp
self.add_job_for_repo(static_dir)
else: # we already processed the repo because its in the dictionary
logging.debug("check for updates in repo config...")
if self.is_more_recent_metadata_generation_file_than(static_dir, self.repo_timestamps[static_dir]):
logging.debug("update job for repo " + static_dir)
updatedJobs+=1
self.repo_timestamps[static_dir] = self.determine_last_modification_time(file_path)
self.unschedule_by_reponame(static_dir)
self.add_job_for_repo(static_dir)
logging.info("update_program_config finished -- updated %s jobs, added %s jobs, removed %s jobs"%(updatedJobs,addedJobs,removedJobs))
def remove_jobs_where_repo_deleted(self, list_of_existing_repos):
removed_repos = set(self.repo_timestamps.keys()) - set(list_of_existing_repos)
for repo in removed_repos:
self.unschedule_by_reponame(repo)
def determine_last_modification_time(self, file_path):
statbuf = os.stat(file_path)
return statbuf.st_mtime #float representing the last modification timestamp
def unschedule_by_reponame(self, reponame):
if reponame in self.jobs:
self.sched.unschedule_job(self.jobs[reponame])
del self.jobs[reponame] #remove the job from the job-handle dictionary..
def is_more_recent_metadata_generation_file_than(self, repodir, past_timestamp):
file_path = self.configService.getMetaDataGenerationFilePathRelativeToRepoDirByRepoName(repodir)
actual_timestamp = self.determine_last_modification_time(file_path)
if actual_timestamp > past_timestamp:
return True
else:
#.........这里部分代码省略.........
示例12: PyFlowScheduler
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import unschedule_job [as 别名]
#.........这里部分代码省略.........
self.shutdown(err_msg)
return len(self.exceptions)
def cleanup(self):
"""Cleanup routine: remove the pid file and save the pickle database"""
try:
os.remove(self.pid_file)
except OSError:
logger.critical("Could not remove pid_file")
# Save the final status of the flow.
self.flow.pickle_dump()
def shutdown(self, msg):
"""Shutdown the scheduler."""
try:
self.cleanup()
self.history.append("Completed on %s" % time.asctime())
self.history.append("Elapsed time %s" % self.get_delta_etime())
if self.debug:
print(">>>>> shutdown: Number of open file descriptors: %s" % get_open_fds())
retcode = self.send_email(msg)
if self.debug:
print("send_mail retcode", retcode)
# Write file with the list of exceptions:
if self.exceptions:
dump_file = os.path.join(self.flow.workdir, "_exceptions")
with open(dump_file, "w") as fh:
fh.writelines(self.exceptions)
fh.write("Shutdown message:\n%s" % msg)
lines = []
app = lines.append
app("Submitted on %s" % time.ctime(self.start_time))
app("Completed on %s" % time.asctime())
app("Elapsed time %s" % str(self.get_delta_etime()))
if self.flow.all_ok:
app("Flow completed successfully")
else:
app("Flow didn't complete successfully")
app("Shutdown message:\n%s" % msg)
print("\n".join(lines))
finally:
# Shutdown the scheduler thus allowing the process to exit.
logger.debug('this should be the shutdown of the scheduler')
# Unschedule all the jobs before calling shutdown
#self.sched.print_jobs()
for job in self.sched.get_jobs():
self.sched.unschedule_job(job)
#self.sched.print_jobs()
self.sched.shutdown()
# Uncomment the line below if shutdown does not work!
#os.system("kill -9 %d" % os.getpid())
def send_email(self, msg, tag=None):
"""
Send an e-mail before completing the shutdown.
Returns 0 if success.
"""
try:
return self._send_email(msg, tag)
except:
self.exceptions.append(straceback())
return -2
def _send_email(self, msg, tag):
if self.mailto is None:
return -1
header = msg.splitlines()
app = header.append
app("Submitted on %s" % time.ctime(self.start_time))
app("Completed on %s" % time.asctime())
app("Elapsed time %s" % str(self.get_delta_etime()))
app("Number of errored tasks: %d" % self.flow.num_errored_tasks)
app("Number of unconverged tasks: %d" % self.flow.num_unconverged_tasks)
strio = cStringIO()
strio.writelines("\n".join(header) + 4 * "\n")
# Add the status of the flow.
self.flow.show_status(stream=strio)
if self.exceptions:
# Report the list of exceptions.
strio.writelines(self.exceptions)
if tag is None:
tag = " [ALL OK]" if self.flow.all_ok else " [WARNING]"
return sendmail(subject=self.flow.name + tag, text=strio.getvalue(), mailto=self.mailto)
示例13: send_email
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import unschedule_job [as 别名]
email_sched.start()
text = "it's been twelve hours. here are the latest temperature readings from your new brew"
email_job = sched.add_interval_job(send_email,hours=8,args = [text, password, True])
# this is the heart of the program:
# send email to let me know I'm brewing
send_email("starting brew log.", password, graph=False)
# get list of temps:
list_of_temps = sys.argv[2::2]
# get lengths of time for each of those temperatures
list_of_times = sys.argv[3::2]
# convert to ints
list_of_temps = map(int,list_of_temps)
list_of_times = map(int, list_of_times)
print "list of temps"
print list_of_temps
print "list of times"
print list_of_times
for i in range(0,len(list_of_times)):
send_email("changing temperature to " + str(list_of_temps[i]) + " for " + str(list_of_times[i]) + " hours.", password, graph = False)
recordAndRegulateTemp(list_of_times[i],list_of_temps[i],writer)
print "program done. fermenter shutting down."
send_email("ending. fermenter is shutting off", password, graph=True)
email_sched.unschedule_job(send_email)
io.output(power_pin, False)
示例14: PyFlowScheduler
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import unschedule_job [as 别名]
#.........这里部分代码省略.........
self.history.append("Completed on: %s" % time.asctime())
self.history.append("Elapsed time: %s" % self.get_delta_etime())
if self.debug:
print(">>>>> shutdown: Number of open file descriptors: %s" % get_open_fds())
retcode = self.send_email(msg)
if self.debug:
print("send_mail retcode", retcode)
# Write file with the list of exceptions:
if self.exceptions:
dump_file = os.path.join(self.flow.workdir, "_exceptions")
with open(dump_file, "wt") as fh:
fh.writelines(self.exceptions)
fh.write("Shutdown message:\n%s" % msg)
lines = []
app = lines.append
app("Submitted on: %s" % time.ctime(self.start_time))
app("Completed on: %s" % time.asctime())
app("Elapsed time: %s" % str(self.get_delta_etime()))
if self.flow.all_ok:
app("Flow completed successfully")
else:
app("Flow %s didn't complete successfully" % repr(self.flow.workdir))
app("use `abirun.py FLOWDIR debug` to analyze the problem.")
app("Shutdown message:\n%s" % msg)
print("")
print("\n".join(lines))
print("")
self._do_customer_service()
if self.flow.all_ok:
print("Calling flow.finalize()...")
self.flow.finalize()
#print("finalized:", self.flow.finalized)
if self.rmflow:
app("Flow directory will be removed...")
try:
self.flow.rmtree()
except Exception:
logger.warning("Ignoring exception while trying to remove flow dir.")
finally:
# Shutdown the scheduler thus allowing the process to exit.
logger.debug('This should be the shutdown of the scheduler')
# Unschedule all the jobs before calling shutdown
#self.sched.print_jobs()
if not has_sched_v3:
for job in self.sched.get_jobs():
self.sched.unschedule_job(job)
#self.sched.print_jobs()
self.sched.shutdown()
# Uncomment the line below if shutdown does not work!
#os.system("kill -9 %d" % os.getpid())
def send_email(self, msg, tag=None):
"""
Send an e-mail before completing the shutdown.
Returns 0 if success.
"""
try:
return self._send_email(msg, tag)
except:
self.exceptions.append(straceback())
return -2
def _send_email(self, msg, tag):
if self.mailto is None:
return -1
header = msg.splitlines()
app = header.append
app("Submitted on: %s" % time.ctime(self.start_time))
app("Completed on: %s" % time.asctime())
app("Elapsed time: %s" % str(self.get_delta_etime()))
app("Number of errored tasks: %d" % self.flow.num_errored_tasks)
app("Number of unconverged tasks: %d" % self.flow.num_unconverged_tasks)
strio = cStringIO()
strio.writelines("\n".join(header) + 4 * "\n")
# Add the status of the flow.
self.flow.show_status(stream=strio)
if self.exceptions:
# Report the list of exceptions.
strio.writelines(self.exceptions)
if tag is None:
tag = " [ALL OK]" if self.flow.all_ok else " [WARNING]"
return sendmail(subject=self.flow.name + tag, text=strio.getvalue(), mailto=self.mailto)
示例15: __init__
# 需要导入模块: from apscheduler.scheduler import Scheduler [as 别名]
# 或者: from apscheduler.scheduler.Scheduler import unschedule_job [as 别名]
#.........这里部分代码省略.........
hour = iq.getTag("query").getTag("archipel").getAttr("hour")
minute = iq.getTag("query").getTag("archipel").getAttr("minute")
second = iq.getTag("query").getTag("archipel").getAttr("second")
comment = iq.getTag("query").getTag("archipel").getAttr("comment")
if iq.getTag("query").getTag("archipel").has_attr("param"):
param = iq.getTag("query").getTag("archipel").getAttr("param")
uid = str(uuid.uuid1())
str_date = "%s-%s-%s @ %s : %s : %s" % (year, month, day, hour, minute, second)
if entityClass == "TNArchipelVirtualMachine": func = self.do_job_for_vm
elif entityClass == "TNArchipelHypervisor": func = self.do_job_for_hypervisor
self.scheduler.add_cron_job(func, year=year, month=month, day=day, hour=hour, minute=minute, second=second, args=[job, uid, str_date, comment, param])
self.save_jobs(uid, job, year, month, day, hour, minute, second, comment, param)
self.entity.push_change("scheduler", "scheduled")
except Exception as ex:
reply = build_error_iq(self, ex, iq)
return reply
def iq_jobs(self, iq):
"""
gets jobs
@type iq: xmpp.Protocol.Iq
@param iq: the received IQ
@rtype: xmpp.Protocol.Iq
@return: a ready to send IQ containing the result of the action
"""
try:
reply = iq.buildReply("result")
nodes = [];
for job in self.scheduler.jobs:
job_node = xmpp.Node(tag="job", attrs={"action": str(job.args[0]), "uid": str(job.args[1]), "date": str(job.args[2]), "comment": job.args[3]})
nodes.append(job_node)
reply.setQueryPayload(nodes)
except Exception as ex:
reply = build_error_iq(self, ex, iq)
return reply
def iq_unschedule(self, iq):
"""
gets jobs
@type iq: xmpp.Protocol.Iq
@param iq: the received IQ
@rtype: xmpp.Protocol.Iq
@return: a ready to send IQ containing the result of the action
"""
try:
reply = iq.buildReply("result")
uid = iq.getTag("query").getTag("archipel").getAttr("uid")
the_job = self.get_jod_with_uid(uid);
if not the_job:
raise Exception("job with uid %s doesn't exists" % uid)
self.delete_job(uid);
self.scheduler.unschedule_job(the_job);
self.entity.push_change("scheduler", "unscheduled")
except Exception as ex:
reply = build_error_iq(self, ex, iq)
return reply
def iq_actions(self, iq):
"""
get available actions
@type iq: xmpp.Protocol.Iq
@param iq: the received IQ
@rtype: xmpp.Protocol.Iq
@return: a ready to send IQ containing the result of the action
"""
try:
reply = iq.buildReply("result")
entityClass = self.entity.__class__.__name__
if entityClass == "TNArchipelVirtualMachine":
actions = self.supported_actions_for_vm
elif entityClass == "TNArchipelHypervisor":
actions = self.supported_actions_for_hypervisor
nodes = []
for action in actions:
action_node = xmpp.Node(tag="action")
action_node.setData(action)
nodes.append(action_node)
reply.setQueryPayload(nodes)
except Exception as ex:
reply = build_error_iq(self, ex, iq)
return reply