本文整理汇总了Python中schedule.every函数的典型用法代码示例。如果您正苦于以下问题:Python every函数的具体用法?Python every怎么用?Python every使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了every函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: RequestConsumption
def RequestConsumption():
global frame_id
global rooms_id
global frame_sending_attempts
global room_consumption_frame
now = datetime.datetime.now()
time_now = (int(now.strftime("%H")) * 60) + int(now.strftime("%M"))
day = now.strftime("%a")
date = now.strftime("%Y-%m-%d")
sql.SelectColumn("distinct rooms.rooms_address, rooms.rooms_id")
sql.FromTable("room_schedules")
sql.JoinTable([["rooms", "rooms.rooms_id=room_schedules.rooms_id"]])
sql.WhereCondition("(room_schedules.room_schedules_day like '%{}%' or room_schedules.room_schedules_date='{}') and room_schedules.room_schedules_end_time={}".format(day, date, time_now))
room_schedules_result = sql.FetchAll()
for schedule in room_schedules_result:
rooms_id = schedule[2]
frame_id = GetFrameID(sql)
room_addresses_data = GetRoomAddresses(sql, schedule[0])
frame_data = "{} {} 0".format(room_addresses_data, frame_id)
consumption_frame = CreateFrame(frame_data, 4)
frame_sending_attempts = 1
schedule.every(1).seconds.do(AcknowledgementTimer)
SendFrame()
示例2: run_or_schedule
def run_or_schedule(job, schedule=False, exception_handler=None):
"""Runs a job and optionally schedules it to run later
Args:
job (func): The func to run
schedule (bool): Schedule `func` to run in the future (default: False)
exception_handler (func): The exception handler to wrap the function in
(default: None)
Examples:
>>> job = partial(pprint, 'hello world')
>>> run_or_schedule(job)
u'hello world'
>>> exception_handler = ExceptionHandler('[email protected]').handler
>>> run_or_schedule(job, False, exception_handler)
u'hello world'
"""
if exception_handler and schedule:
job = exception_handler(job)
job()
if schedule:
sch.every(1).day.at(SCHEDULE_TIME).do(job)
while True:
sch.run_pending()
time.sleep(1)
示例3: main
def main(arguments=None):
'''Runs thumbor server with the specified arguments.'''
if arguments is None:
arguments = sys.argv[1:]
server_parameters = get_server_parameters(arguments)
config = get_config(server_parameters.config_path)
configure_log(config, server_parameters.log_level.upper())
validate_config(config, server_parameters)
importer = get_importer(config)
with get_context(server_parameters, config, importer) as context:
application = get_application(context)
run_server(application, context)
if (config.GC_INTERVAL and config.GC_INTERVAL > 0):
schedule.every(config.GC_INTERVAL).seconds.do(gc_collect)
try:
logging.debug('thumbor running at %s:%d' % (context.server.ip, context.server.port))
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
sys.stdout.write('\n')
sys.stdout.write("-- thumbor closed by user interruption --\n")
示例4: prepare_jobs
def prepare_jobs(self, jobs):
suffixed_names = {
'week': 'weekly',
'day': 'daily',
'hour': 'hourly',
'minute': 'minutes',
'second': 'seconds',
}
for job in jobs:
if not job.enabled:
continue
interval_name = job.time_unit.lower()
if job.interval > 0: # There can't be a job less than 0 (0 minutes? 0 seconds?)
plural_interval_name = interval_name + 's'
d = getattr(schedule.every(job.interval), plural_interval_name)
d.do(self.run_job, job)
Log.info(" Loading %s job: %s.", suffixed_names[interval_name], job.name)
elif interval_name == 'day':
schedule.every().day.at(job.at_time).do(self.run_job, job)
Log.info(" Loading time-based job: " + job.name)
else:
d = getattr(schedule.every(), interval_name)
d.do(self.run_job, job)
Log.info(" Loading %s job: %s", interval_name, job.name)
示例5: main
def main():
global thingiverse
global twitter
auto_mode = True
if DEBUG: print 'welcome'
thingiverse.DEBUG = False
thingiverse.txt_url_mode = False
thingiverse.connect()
print api.VerifyCredentials().name
print '\n\nThingisimilar\n'
schedule.every(2).minutes.do(exploring)
if auto_mode:
#main_loop()
exploring()
while True:
schedule.run_pending()
sleep(1.0)
else:
#while True:
#num1 = raw_input('#1 --> ')
#num2 = raw_input('#2 -->')
num1 = test_things[0]
num2 = test_things[4]
standard_job(int(num1), int(num2))
示例6: schedule_raw
def schedule_raw(self, df_rawsources):
# Iterate through all sources with 'raw' type
for index, source in df_rawsources.iterrows():
print "[SCHEDULER] Working with raw source: ",source['name']
updateFrequency = source['updateFrequency']
print "[SCHEDULER] Update frequency is <",updateFrequency,">"
updates = source['updates']
if len(updates) > 0:
# Get the most recent update
lastUpdate = dp.parse(updates[0]['createdAt'])
# Get the current time in seconds
now = int(round(time.time()))
# If time between now and the last update is greater than the
# update interval, schedule the event
if(now - int(lastUpdate.strftime('%s')) > updateFrequency):
source_id = source['_id']
print "[SCHEDULER] Scheduling source <",source['name'],"> with id <",source_id,">"
schedule.every(updateFrequency).seconds.do(process_raw, source_id)
source_id = source['_id']
print "[SCHEDULER] Scheduling source <",source['name'],"> with id <",source_id,">"
schedule.every(updateFrequency).seconds.do(self.process_raw, source_id)
# Process all scheduled items
while True:
schedule.run_pending()
time.sleep(1)
示例7: rules
def rules(cube, scheduler_type='minutes', scheduler_interval=59,
dashboard=None):
if scheduler_type:
scheduler_type = cube.get('scheduler_type', 'minutes')
if scheduler_interval:
scheduler_interval = cube.get('scheduler_interval', 59)
log_it("START REGISTER", "bin-scheduler")
log_it("cube: {}".format(cube.get('slug')), "bin-scheduler")
log_it("type: {}".format(scheduler_type), "bin-scheduler")
log_it("interval: {}".format(scheduler_interval), "bin-scheduler")
log_it("END REGISTER", "bin-scheduler")
t = {}
if scheduler_type == 'minutes':
env = schedule.every(int(scheduler_interval))
t = env.minutes
elif scheduler_type == 'hour':
env = schedule.every()
t = env.hour
elif scheduler_type == 'day':
env = schedule.every()
t = env.day
try:
t.do(job, slug=cube.get('slug'))
jobn = cube.get("slug")
if dashboard:
jobn = u"{}-{}".format(cube.get("slug"), dashboard)
onrun[jobn] = env
register.append(jobn)
except Exception, e:
log_it("ERROR {}: {}".format(cube.get('slug'), e))
示例8: dynamically_scrape_and_append_sales_data
def dynamically_scrape_and_append_sales_data(filename,
interval,
num_retries = 10):
"""
Dynamically scrapes sales data and appends the data to a file by generating
a list of links, checking it against an old list and only keeping new links,
and scraping those links for sales data.
"""
old_list = []
def job(old_list):
new_list = collect_all_featured_links()
new_links = remove_old_links(old_list, new_list)
bad_links = collect_bad_links(new_links)
clean_links = remove_bad_links_from_link_list(bad_links, new_links)
scrape_and_append_sales_data_from_featured_links(filename,
clean_links,
num_retries)
old_list = new_list
job(old_list)
schedule.every(interval).hours.do(job)
while True:
schedule.run_pending()
time.sleep(30)
print "Dynamic scraping finished"
示例9: ChangePort
def ChangePort(roomID):
global frame_id
global frame_sending_attempts
global port_frame
sql.GetWhereQuery("rooms", "rooms_id={}".format(roomID))
sql.SelectColumn("rooms_port, rooms_address")
rooms_result = sql.FetchOne()
sql.GetWhereQuery("room_devices", "rooms_id={}".format(roomID))
sql.SelectColumn("room_devices_port")
room_devices_result = sql.FetchAll()
port_data = "{} {}".format(rooms_result[0], len(room_devices_result))
for device in room_devices_result:
port_data = "{} {}".format(port_data, device[0])
room_addresses_data = GetRoomAddresses(sql, rooms_result[1])
frame_id = GetFrameID(sql)
frame_data = "{} {} {}".format(room_addresses_data, frame_id, port_data)
port_frame = CreateFrame(frame_data, 12)
frame_sending_attempts = 1
schedule.every(1).seconds.do(AcknowledgementTimer)
SendFrame()
示例10: routine
def routine(self):
# install schedule
for entity in self.entities:
pieces = entity.getschedule().split(" ")
if re.match("^\d*$", pieces[1]):
every = schedule.every(int(pieces[1]))
pieces = pieces[2 : len(pieces)]
else:
every = schedule.every()
pieces = pieces[1 : len(pieces)]
timedes = getattr(every, pieces[0])
pieces = pieces[1 : len(pieces)]
if len(pieces) and pieces[0] == "at":
finish = timedes.at(pieces[1])
else:
finish = timedes
finish.do(self.monitor, entity)
while True:
time.sleep(1)
for entity in self.entities:
schedule.run_pending()
示例11: main
def main():
args = parser.parse_args()
log = logging.getLogger()
log.level = logging.INFO
stream = logging.StreamHandler()
file_handler = logging.FileHandler(args.logfile)
log.addHandler(stream)
log.addHandler(file_handler)
with open(args.config) as f:
config = yaml.safe_load(f)
log.info('Connecting to database')
database = connect_to_database(**config['mongodb'])
log.info('Connection established')
services = [
service(auxdir=args.auxdir)
for service in supported_services.values()
]
schedule.every().day.at('15:00').do(
fill_last_night, services=services, database=database
)
log.info('Schedule started')
try:
while True:
schedule.run_pending()
sleep(60)
except (KeyboardInterrupt, SystemExit):
pass
示例12: dynamically_scrape_combined_data
def dynamically_scrape_combined_data(data_filename,
sales_filename,
interval,
num_retries = 10):
"""
Dynamically scrapes a continuously updated list of unique clean links and
appends the data to their respective files.
"""
old_list = []
def job(old_list):
new_list = collect_all_featured_links()
new_links = remove_old_links(old_list, new_list)
bad_links = collect_bad_links(new_links)
clean_links = remove_bad_links_from_link_list(bad_links, new_links)
scrape_combined_data_from_all_featured_products(data_filename,
sales_filename,
clean_links,
num_retries)
old_list = new_list
job(old_list)
schedule.every(interval).hours.do(job)
while True:
schedule.run_pending()
time.sleep(30)
print "Dynamic scraping finished"
示例13: scheduler_init
def scheduler_init (parent):
'''
Schedule Init
Start the main loop for the internal scheduler that
ticks every second.
--
@param parent:int The PID of the parent.
@return void
'''
# Define the jobs to run at which intervals
schedule.every().minute.do(Reminder.run_remind_once)
schedule.every().minute.do(Reminder.run_remind_recurring)
# Start the main thread, polling the schedules
# every second
while True:
# Check if the current parent pid matches the original
# parent that started us. If not, we should end.
if os.getppid() != parent:
logger.error(
'Killing scheduler as it has become detached from parent PID.')
sys.exit(1)
# Run the schedule
schedule.run_pending()
time.sleep(1)
return
示例14: watch
def watch():
# set up argument parsing
parser = example.BigFixArgParser()
parser.add_argument('-a', '--actions', required = False, help = 'List of actions to watch')
parser.add_argument('-v', '--verbose', default = False, action = "store_true", required = False, help = 'To see the full list of commands that contain watched actions')
parser.add_argument('-t', '--time', default = 60, required = False, help = 'To set the waiting period')
parser.base_usage += """
-a, --actions [ACTIONS/FILENAME] Specify a list of actions to watch, seperated by comma(,);
if FILENAME with .wal extension detected, will read the file to get the list.
-v, --verbose To see the full list of commands that contain watched actions
-t, --time [MINUTE] A number specifing the waiting period between checks"""
parser.description = 'Used to watch certain actions'
ext = ".wal"
args = parser.parse_args()
args_actions = ""
if ext in args.actions:
actions_file = open(args.actions, 'r')
for line in actions_file:
args_actions += line
else:
args_actions = args.actions
actions_list = args_actions.split(",")
watched_actions = gen_regex(actions_list)
action_record = {}
for a in actions_list:
action_record[a] = False
t = int(args.time)
gen_summary(action_record, watched_actions, args)
schedule.every(t).minutes.do(gen_summary, action_record, watched_actions, args)
while True:
schedule.run_pending()
示例15: main
def main():
port = "5918"
if len(sys.argv) > 1:
port = sys.argv[1]
socket = initiate_zmq(port)
logging.basicConfig(filename='./log/ingest_lottery.log', level=logging.INFO)
tz = pytz.timezone(pytz.country_timezones('cn')[0])
schedule.every(30).seconds.do(run, socket, tz)
while True:
try:
schedule.run_pending()
time.sleep(1)
except KeyboardInterrupt:
now = datetime.now(tz)
message = "CTRL-C to quit the program at [%s]" % now.isoformat()
logging.info(message)
break
except Exception as e:
now = datetime.now(tz)
message = "Error at time [%s]" % now.isoformat()
logging.info(message)
logging.info(e)
# reschedule the job
schedule.clear()
socket = initiate_zmq(port)
schedule.every(30).seconds.do(run, socket, tz)