本文整理汇总了Python中schedule.run_all函数的典型用法代码示例。如果您正苦于以下问题:Python run_all函数的具体用法?Python run_all怎么用?Python run_all使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了run_all函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: add_group
def add_group(group_id):
print(group_id)
type = request.args.get('type')
if type == "group":
group = groupy.Group.list().filter(id=group_id).first
elif type == "member":
group = groupy.Member.list().filter(user_id=group_id).first
if not group:
return render_template(
"layout.html", message="Error! Group ID not found."), 404
if group_id in group_jobs:
return render_template(
"layout.html",
message="Error! Group already added.")
schedule.every(1).minutes.do(
handle_update_group_async,
group_id=group_id,
type=type,
lock=threading.Lock())
group_jobs.append(group_id)
schedule.run_all()
if type == "group":
return render_template(
"layout.html",
message="Fetching group history, please wait. <br> Number of messages: {0}. <br> Estimated time for processing: {1}.".format(
group.message_count,
verbose_timedelta(
timedelta(
seconds=group.message_count /
100 *
1.1))))
elif type == "member":
return render_template(
"layout.html",
message="Fetching message history, please wait.")
示例2: test_run_all
def test_run_all(self):
mock_job = make_mock_job()
every().minute.do(mock_job)
every().hour.do(mock_job)
every().day.at('11:00').do(mock_job)
schedule.run_all()
assert mock_job.call_count == 3
示例3: loop
def loop(self):
if self.test_mode:
schedule.run_all()
else:
self.log_time_of_next_run()
while True:
schedule.run_pending()
time.sleep(60)
示例4: test_cancel_jobs
def test_cancel_jobs(self):
def stop_job():
return schedule.CancelJob
every().second.do(stop_job)
every().second.do(stop_job)
every().second.do(stop_job)
assert len(schedule.jobs) == 3
schedule.run_all()
assert len(schedule.jobs) == 0
示例5: handle
def handle(self, flush, *args, **kwargs):
schedule.every().monday.at('00:30').do(self.new_sheet_job)
if flush:
print "Flushing all scheduled jobs..."
schedule.run_all()
return
print "Running schedule..."
while True:
schedule.run_pending()
time.sleep(60)
示例6: main
def main():
# initialize garduino watcher
arduino.run()
## schedule waits so do first run immediately
# schedule.every(15).seconds.do(run_threaded, test_updates) # debugger
# schedule.every(5).minutes.do(run_threaded, fiveminute_updates)
schedule.every(15).minutes.do(run_threaded, fifteenminute_updates)
schedule.every(12).hours.do(run_threaded, halfday_updates)
schedule.run_all()
schedule.every().day.at('6:05').do(run_threaded, waterlevel_update)
while True:
schedule.run_pending()
time.sleep(5)
示例7: test_run_all_with_decorator
def test_run_all_with_decorator(self):
mock_job = make_mock_job()
@repeat(every().minute)
def _job1():
mock_job()
@repeat(every().hour)
def _job2():
mock_job()
@repeat(every().day.at('11:00'))
def _job3():
mock_job()
schedule.run_all()
assert mock_job.call_count == 3
示例8: test_clear_by_tag
def test_clear_by_tag(self):
every().second.do(make_mock_job(name='job1')).tag('tag1')
every().second.do(make_mock_job(name='job2')).tag('tag1', 'tag2')
every().second.do(make_mock_job(name='job3')).tag('tag3', 'tag3',
'tag3', 'tag2')
assert len(schedule.jobs) == 3
schedule.run_all()
assert len(schedule.jobs) == 3
schedule.clear('tag3')
assert len(schedule.jobs) == 2
schedule.clear('tag1')
assert len(schedule.jobs) == 0
every().second.do(make_mock_job(name='job1'))
every().second.do(make_mock_job(name='job2'))
every().second.do(make_mock_job(name='job3'))
schedule.clear()
assert len(schedule.jobs) == 0
示例9: schedule_updates
def schedule_updates():
# EDGAR
schedule.every(1).days.at("04:30").do(_crawler('sec-edgar'))
schedule.every(1).days.at("01:00").do(_crawler('openoil-internal-documents'))
# SEDAR
# Sedar website stops updating at 11pm ET, i.e. 0500 CET
# We start our scrape just after, at 0511 CET, and allow 3 hours for it to
# upload
schedule.every(1).days.at("08:00").do(_crawler('sedar-partial-content'))
schedule.every(1).days.at("16:00").do(check_alerts)
schedule.run_all()
while(1):
schedule.run_pending()
time.sleep(1)
示例10: test_cancel_job
def test_cancel_job(self):
def stop_job():
return schedule.CancelJob
mock_job = make_mock_job()
every().second.do(stop_job)
mj = every().second.do(mock_job)
assert len(schedule.jobs) == 2
schedule.run_all()
assert len(schedule.jobs) == 1
assert schedule.jobs[0] == mj
schedule.cancel_job('Not a job')
assert len(schedule.jobs) == 1
schedule.default_scheduler.cancel_job('Not a job')
assert len(schedule.jobs) == 1
schedule.cancel_job(mj)
assert len(schedule.jobs) == 0
示例11: test_daily_job
def test_daily_job(self):
zcml.load_string(self.zcml_template % '''
<schedule:job
view="dummy-view"
unit="day"
at="3:00"
/>
''')
jobs = schedule.jobs
self.assertEquals(len(jobs), 1)
job = jobs[0]
self.assertEquals(job.interval, 1)
self.assertEquals(job.unit, 'days')
self.assertEquals(job.at_time, datetime.time(3, 0))
self.assertFalse(self.request.get(VIEW_MARKER))
schedule.run_all()
self.assertTrue(self.request.get(VIEW_MARKER))
示例12: main
def main():
args = parse_cmd_args()
logger.setLevel(logging.__dict__[args.verbosity.upper()])
if args.log_file:
file_handler = logging.handlers.TimedRotatingFileHandler(
args.log_file, when='D')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.info("Beginning rendezvous circuit monitoring."
"Status output every %d seconds", args.tick)
with Controller.from_port(port=args.port) as controller:
# Create a connection to the Tor control port
controller.authenticate()
# Add event listeners for HS_DESC and HS_DESC_CONTENT
controller.add_event_listener(circ_event_handler,
stem.control.EventType.CIRC)
controller.add_event_listener(circ_event_handler,
stem.control.EventType.CIRC_MINOR)
# Schedule rendezvous status output.
schedule.every(args.tick).seconds.do(output_status, controller)
schedule.run_all()
try:
while True:
schedule.run_pending()
time.sleep(1)
except KeyboardInterrupt:
logger.info("Stopping rendezvous circuit monitoring.")
sys.exit(0)
示例13: test_job_func_args_are_passed_on
def test_job_func_args_are_passed_on(self):
mock_job = make_mock_job()
every().second.do(mock_job, 1, 2, 'three', foo=23, bar={})
schedule.run_all()
mock_job.assert_called_once_with(1, 2, 'three', foo=23, bar={})
示例14: len
parsed_items_count = len(obj_ids)
if parsed_items_count < max_item:
itera = range(0, parsed_items_count)
else:
itera = range(parsed_items_count - max_item, parsed_items_count)
for i in itera:
# [6:] remove "entry-" from id value
obj_id = obj_ids[i].get('id')[6:]
# cursor.execute("INSERT INTO RSS (fileid, filename, filedate, source) SELECT (%s, %s, %s, %s) WHERE NOT EXISTS (SELECT * FROM RSS WHERE fileid=%s);", (obj_ids[i].get('id')[6:], obj_names[i], str(datetime.now()), rss,))
cursor.execute("SELECT id FROM RSS WHERE fileid = %s;", (obj_id,))
if not cursor.fetchone():
cursor.execute("SELECT count(*) FROM RSS WHERE source = %s;", (rss,))
if int(get_config((rss))['max_item']) == int(cursor.fetchone()[0]):
print("Limit reached, deleting oldest item from " + rss)
cursor.execute("DELETE FROM rss WHERE ctid in (SELECT ctid FROM rss WHERE source = %s ORDER BY filedate LIMIT 1);", (rss,))
#db.commit()
print(obj_id + " - " + obj_names[i] + " - " + str(datetime.now()) + " - " + rss)
cursor.execute("INSERT INTO RSS (fileid, filename, filedate, source) VALUES (%s, %s, %s, %s);", (obj_id, obj_names[i], str(datetime.now()), rss,))
db.commit()
for rss in rss_list:
schedule.every(float(get_config((rss))['check'])).minutes.do(update, rss)
schedule.run_all(10)
while 1:
schedule.run_pending()
time.sleep(1)
示例15: main
def main():
"""
Entry point when invoked over the command line.
"""
args = parse_cmd_args().parse_args()
config_file_options = settings.parse_config_file(args.config)
# Update global configuration with options specified in the config file
for setting in dir(config):
if setting.isupper() and config_file_options.get(setting):
setattr(config, setting, config_file_options.get(setting))
# Override the log level if specified on the command line.
if args.verbosity:
config.LOG_LEVEL = args.verbosity.upper()
# Write log file if configured in environment variable or config file
if config.LOG_LOCATION:
log.setup_file_logger(config.LOG_LOCATION)
logger.setLevel(logging.__dict__[config.LOG_LEVEL.upper()])
# Create a connection to the Tor control port
try:
controller = Controller.from_port(address=args.ip, port=args.port)
except stem.SocketError as exc:
logger.error("Unable to connect to Tor control port: %s", exc)
sys.exit(1)
else:
logger.debug("Successfully connected to the Tor control port.")
try:
controller.authenticate()
except stem.connection.AuthenticationFailure as exc:
logger.error("Unable to authenticate to Tor control port: %s", exc)
sys.exit(1)
else:
logger.debug("Successfully authenticated to the Tor control port.")
# Disable no-member due to bug with "Instance of 'Enum' has no * member"
# pylint: disable=no-member
# Check that the Tor client supports the HSPOST control port command
if not controller.get_version() >= stem.version.Requirement.HSPOST:
logger.error("A Tor version >= %s is required. You may need to "
"compile Tor from source or install a package from "
"the experimental Tor repository.",
stem.version.Requirement.HSPOST)
sys.exit(1)
# Load the keys and config for each onion service
settings.initialize_services(controller,
config_file_options.get('services'))
# Finished parsing all the config file.
handler = eventhandler.EventHandler()
controller.add_event_listener(handler.new_desc,
EventType.HS_DESC)
controller.add_event_listener(handler.new_desc_content,
EventType.HS_DESC_CONTENT)
# Schedule descriptor fetch and upload events
schedule.every(config.REFRESH_INTERVAL).seconds.do(
onionbalance.instance.fetch_instance_descriptors, controller)
schedule.every(config.PUBLISH_CHECK_INTERVAL).seconds.do(
onionbalance.service.publish_all_descriptors)
try:
# Run initial fetch of HS instance descriptors
schedule.run_all(delay_seconds=30)
# Begin main loop to poll for HS descriptors
while True:
schedule.run_pending()
time.sleep(1)
except KeyboardInterrupt:
logger.info("Keyboard interrupt received. Stopping the "
"management server.")
return 0