本文整理汇总了Python中cli.api_wrapper.APIWrapper类的典型用法代码示例。如果您正苦于以下问题:Python APIWrapper类的具体用法?Python APIWrapper怎么用?Python APIWrapper使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了APIWrapper类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
def main():
tid = int(sys.argv[1])
tdo = TaskDefinition.objects.get(id=tid)
stype = 'task_scheduler'
aw = APIWrapper()
if (tdo.task_type != 'snapshot'):
logger.error('task_type(%s) is not snapshot.' % tdo.task_type)
return
meta = json.loads(tdo.json_meta)
validate_snap_meta(meta)
max_count = int(float(meta['max_count']))
share = Share.objects.get(name=meta['share'])
prefix = ('%s_' % meta['prefix'])
snapshots = Snapshot.objects.filter(share=share, snap_type=stype,
name__startswith=prefix).order_by('-id')
if (len(snapshots) > max_count):
for snap in snapshots[max_count:]:
url = ('shares/%s/snapshots/%s' % (meta['share'], snap.name))
try:
aw.api_call(url, data=None, calltype='delete', save_error=False)
logger.debug('deleted old snapshot at %s' % url)
except Exception, e:
logger.error('Failed to delete old snapshot at %s' % url)
logger.exception(e)
return
示例2: main
def main():
tid = int(sys.argv[1])
tdo = TaskDefinition.objects.get(id=tid)
stype = 'task_scheduler'
aw = APIWrapper()
if (tdo.task_type != 'snapshot'):
logger.error('task_type(%s) is not snapshot.' % tdo.task_type)
return
meta = json.loads(tdo.json_meta)
validate_snap_meta(meta)
share = Share.objects.get(name=meta['share'])
max_count = int(float(meta['max_count']))
prefix = ('%s_' % meta['prefix'])
now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
t = Task(task_def=tdo, state='started', start=now)
snap_created = False
t.state = 'error'
try:
name = ('%s_%s' % (meta['prefix'], datetime.now().strftime(settings.SNAP_TS_FORMAT)))
url = ('shares/%s/snapshots/%s' % (share.name, name))
#only create a new snap if there's no overflow situation. This prevents
#runaway snapshot creation beyond max_count+1.
if(delete(aw, share, stype, prefix, max_count)):
data = {'snap_type': stype,
'uvisible': meta['visible'], }
headers = {'content-type': 'application/json'}
aw.api_call(url, data=data, calltype='post', headers=headers, save_error=False)
logger.debug('created snapshot at %s' % url)
t.state = 'finished'
snap_created = True
except Exception, e:
logger.error('Failed to create snapshot at %s' % url)
logger.exception(e)
示例3: main
def main():
tid = int(sys.argv[1])
cwindow = sys.argv[2] if len(sys.argv) > 2 else "*-*-*-*-*-*"
if crontabwindow.crontab_range(
cwindow
): # Performance note: immediately check task execution time/day window range to avoid other calls
tdo = TaskDefinition.objects.get(id=tid)
if tdo.task_type != "scrub":
return logger.error("task_type(%s) is not scrub." % tdo.task_type)
meta = json.loads(tdo.json_meta)
aw = APIWrapper()
if Task.objects.filter(task_def=tdo).exists():
ll = Task.objects.filter(task_def=tdo).order_by("-id")[0]
if ll.state != "error" and ll.state != "finished":
logger.debug("Non terminal state(%s) for task(%d). Checking " "again." % (ll.state, tid))
cur_state = update_state(ll, meta["pool"], aw)
if cur_state != "error" and cur_state != "finished":
return logger.debug(
"Non terminal state(%s) for task(%d). " "A new task will not be run." % (cur_state, tid)
)
now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
t = Task(task_def=tdo, state="started", start=now)
url = "pools/%s/scrub" % meta["pool"]
try:
aw.api_call(url, data=None, calltype="post", save_error=False)
logger.debug("Started scrub at %s" % url)
t.state = "running"
except Exception, e:
logger.error("Failed to start scrub at %s" % url)
t.state = "error"
logger.exception(e)
finally:
示例4: SysinfoNamespace
class SysinfoNamespace(BaseNamespace, BroadcastMixin):
start = False
supported_kernel = settings.SUPPORTED_KERNEL_VERSION
# Called before the connection is established
def initialize(self):
self.aw = APIWrapper()
# This function is run once on every connection
def recv_connect(self):
self.emit("sysinfo:sysinfo", {
"key": "sysinfo:connected", "data": "connected"
})
self.start = True
gevent.spawn(self.update_storage_state)
gevent.spawn(self.update_check)
gevent.spawn(self.update_rockons)
gevent.spawn(self.send_uptime)
gevent.spawn(self.send_kernel_info)
gevent.spawn(self.prune_logs)
# Run on every disconnect
def recv_disconnect(self):
self.start = False
self.disconnect()
def send_uptime(self):
# Seems redundant
while self.start:
self.emit('sysinfo:uptime', {
'data': uptime(), 'key': 'sysinfo:uptime'
})
gevent.sleep(60)
def send_kernel_info(self):
try:
self.emit('sysinfo:kernel_info', {
'data': kernel_info(self.supported_kernel),
'key': 'sysinfo:kernel_info'
})
except Exception as e:
logger.error('Exception while gathering kernel info: %s' % e.__str__())
# Emit an event to the front end to capture error report
self.emit('sysinfo:kernel_error', {
'data': str(e),
'key': 'sysinfo:kernel_error'
})
self.error('unsupported_kernel', str(e))
def update_rockons(self):
try:
self.aw.api_call('rockons/update', data=None, calltype='post', save_error=False)
except Exception, e:
logger.error('failed to update Rock-on metadata. low-level '
'exception: %s' % e.__str__())
示例5: main
def main():
try:
device_scan()
except Exception as e:
print('BTRFS device scan failed due to an exception. This indicates '
'a serious problem. Aborting. Exception: %s' % e.__str__())
sys.exit(1)
print('BTRFS device scan complete')
# if the appliance is not setup, there's nothing more to do beyond
# device scan
setup = Setup.objects.first()
if (setup is None or setup.setup_user is False):
print('Appliance is not yet setup.')
return
num_attempts = 0
while True:
try:
aw = APIWrapper()
time.sleep(2)
aw.api_call('network')
aw.api_call('commands/bootstrap', calltype='post')
break
except Exception as e:
# Retry on every exception, primarily because of django-oauth
# related code behaving unpredictably while setting
# tokens. Retrying is a decent workaround for now(11302015).
if (num_attempts > 15):
print('Max attempts(15) reached. Connection errors persist. '
'Failed to bootstrap. Error: %s' % e.__str__())
sys.exit(1)
print('Exception occured while bootstrapping. This could be '
'because rockstor.service is still starting up. will '
'wait 2 seconds and try again. Exception: %s' % e.__str__())
time.sleep(2)
num_attempts += 1
print('Bootstrapping complete')
try:
print('Running qgroup cleanup. %s' % QGROUP_CLEAN)
run_command([QGROUP_CLEAN])
except Exception as e:
print('Exception while running %s: %s' % (QGROUP_CLEAN, e.__str__()))
try:
print('Running qgroup limit maxout. %s' % QGROUP_MAXOUT_LIMIT)
run_command([QGROUP_MAXOUT_LIMIT])
except Exception as e:
print('Exception while running %s: %s' %
(QGROUP_MAXOUT_LIMIT, e.__str__()))
示例6: main
def main():
tid = int(sys.argv[1])
cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
if (crontabwindow.crontab_range(cwindow)):
# Performance note: immediately check task execution time/day window
# range to avoid other calls
tdo = TaskDefinition.objects.get(id=tid)
if (tdo.task_type != 'scrub'):
return logger.error('task_type(%s) is not scrub.' % tdo.task_type)
meta = json.loads(tdo.json_meta)
aw = APIWrapper()
if (Task.objects.filter(task_def=tdo).exists()):
ll = Task.objects.filter(task_def=tdo).order_by('-id')[0]
if ll.state not in TERMINAL_SCRUB_STATES:
logger.debug('Non terminal state(%s) for task(%d). Checking '
'again.' % (ll.state, tid))
cur_state = update_state(ll, meta['pool'], aw)
if cur_state not in TERMINAL_SCRUB_STATES:
return logger.debug('Non terminal state(%s) for task(%d). '
'A new task will not be run.' %
(cur_state, tid))
now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
t = Task(task_def=tdo, state='started', start=now)
url = ('pools/%s/scrub' % meta['pool'])
try:
aw.api_call(url, data=None, calltype='post', save_error=False)
logger.debug('Started scrub at %s' % url)
t.state = 'running'
except Exception as e:
logger.error('Failed to start scrub at %s' % url)
t.state = 'error'
logger.exception(e)
finally:
t.save()
while True:
cur_state = update_state(t, meta['pool'], aw)
if cur_state in TERMINAL_SCRUB_STATES:
logger.debug('task(%d) finished with state(%s).' %
(tid, cur_state))
t.end = datetime.utcnow().replace(tzinfo=utc)
t.save()
break
logger.debug('pending state(%s) for scrub task(%d). Will check '
'again in 60 seconds.' % (cur_state, tid))
time.sleep(60)
else:
logger.debug('Cron scheduled task not executed because outside '
'time/day window ranges')
示例7: on_connect
def on_connect(self, sid, environ):
self.aw = APIWrapper()
self.emit('pincardwelcome',
{
'key': 'pincardManager:pincardwelcome',
'data': 'Welcome to Rockstor PincardManager'
})
示例8: get
def get(self, *args, **kwargs):
try:
auuid = self.kwargs.get('auuid', None)
ao = Appliance.objects.get(uuid=auuid)
url = ('https://%s:%s' % (ao.ip, ao.mgmt_port))
aw = APIWrapper(client_id=ao.client_id, client_secret=ao.client_secret, url=url)
response = aw.api_call('pools')
res = [p['name'] for p in response['results']]
return Response(res)
except Appliance.DoesNotExist:
msg = ('Remote appliance with the given uuid(%s) does not exist.' %
auuid)
handle_exception(Exception(msg), self.request)
except Exception, e:
msg = ('Failed to retrieve list of Pools on the remote '
'appliance(%s). Make sure it is running and try again. '
'Here is the exact error: %s' % (ao.ip, e.__str__()))
handle_exception(Exception(msg), self.request)
示例9: main
def main():
aw = APIWrapper()
device_scan()
print('BTRFS device scan complete')
num_attempts = 0
while True:
try:
aw.api_call('network')
aw.api_call('commands/bootstrap', calltype='post')
break
except requests.exceptions.ConnectionError, e:
if (num_attempts > 15):
print('Max attempts(15) reached. Connection errors persist. '
'Failed to bootstrap. Error: %s' % e.__str__())
sys.exit(1)
print('Connection error while bootstrapping. This could be because '
'rockstor.service is still starting up. will wait 2 seconds '
'and try again.')
time.sleep(2)
num_attempts += 1
示例10: on_connect
def on_connect(self, sid, environ):
self.aw = APIWrapper()
self.emit('connected',
{
'key': 'sysinfo:connected',
'data': 'connected'
})
self.start = True
self.spawn(self.update_storage_state, sid)
self.spawn(self.update_check, sid)
self.spawn(self.yum_updates, sid)
self.spawn(self.update_rockons, sid)
self.spawn(self.send_kernel_info, sid)
self.spawn(self.prune_logs, sid)
self.spawn(self.send_localtime, sid)
self.spawn(self.send_uptime, sid)
self.spawn(self.send_distroinfo, sid)
self.spawn(self.shutdown_status, sid)
self.spawn(self.pool_degraded_status, sid)
self.spawn(self.pool_dev_stats, sid)
示例11: main
def main():
tid = int(sys.argv[1])
cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
if (crontabwindow.crontab_range(cwindow)):
# Performance note: immediately check task execution time/day window
# range to avoid other calls
tdo = TaskDefinition.objects.get(id=tid)
aw = APIWrapper()
if (tdo.task_type not in ['reboot', 'shutdown', 'suspend']):
logger.error('task_type(%s) is not a system reboot, '
'shutdown or suspend.' % tdo.task_type)
return
meta = json.loads(tdo.json_meta)
validate_shutdown_meta(meta)
now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
schedule = now + timedelta(minutes=3)
t = Task(task_def=tdo, state='scheduled', start=now, end=schedule)
try:
# set default command url before checking if it's a shutdown
# and if we have an rtc wake up
url = ('commands/%s' % tdo.task_type)
# if task_type is shutdown and rtc wake up true
# parse crontab hour & minute vs rtc hour & minute to state
# if wake will occur same day or next day, finally update
# command url adding wake up epoch time
if (tdo.task_type in ['shutdown', 'suspend'] and meta['wakeup']):
crontab_fields = tdo.crontab.split()
crontab_time = (int(crontab_fields[1]) * 60 +
int(crontab_fields[0]))
wakeup_time = meta['rtc_hour'] * 60 + meta['rtc_minute']
# rtc wake up requires UTC epoch, but users on WebUI set time
# thinking to localtime, so first we set wake up time,
# update it if wake up is on next day, finally move it to UTC
# and get its epoch
epoch = datetime.now().replace(hour=int(meta['rtc_hour']),
minute=int(meta['rtc_minute']),
second=0, microsecond=0)
# if wake up < crontab time wake up will run next day
if (crontab_time > wakeup_time):
epoch += timedelta(days=1)
epoch = epoch.strftime('%s')
url = ('%s/%s' % (url, epoch))
aw.api_call(url, data=None, calltype='post', save_error=False)
logger.debug('System %s scheduled' % tdo.task_type)
t.state = 'finished'
except Exception as e:
t.state = 'failed'
logger.error('Failed to schedule system %s' % tdo.task_type)
logger.exception(e)
finally:
# t.end = datetime.utcnow().replace(tzinfo=utc)
t.save()
else:
logger.debug('Cron scheduled task not executed because outside '
'time/day window ranges')
示例12: SysinfoNamespace
class SysinfoNamespace(RockstorIO):
start = False
supported_kernel = settings.SUPPORTED_KERNEL_VERSION
# This function is run once on every connection
def on_connect(self, sid, environ):
self.aw = APIWrapper()
self.emit('connected',
{
'key': 'sysinfo:connected',
'data': 'connected'
})
self.start = True
self.spawn(self.update_storage_state, sid)
self.spawn(self.update_check, sid)
self.spawn(self.yum_updates, sid)
self.spawn(self.update_rockons, sid)
self.spawn(self.send_kernel_info, sid)
self.spawn(self.prune_logs, sid)
self.spawn(self.send_localtime, sid)
self.spawn(self.send_uptime, sid)
self.spawn(self.shutdown_status, sid)
# Run on every disconnect
def on_disconnect(self, sid):
self.cleanup(sid)
self.start = False
def send_uptime(self):
# Seems redundant
while self.start:
self.emit('uptime', {'key': 'sysinfo:uptime', 'data': uptime()})
gevent.sleep(60)
def send_localtime(self):
while self.start:
self.emit('localtime',
{
'key': 'sysinfo:localtime',
'data': time.strftime('%H:%M (%z %Z)')
})
gevent.sleep(40)
def send_kernel_info(self):
try:
self.emit('kernel_info',
{
'key': 'sysinfo:kernel_info',
'data': kernel_info(self.supported_kernel)
})
except Exception as e:
logger.error('Exception while gathering kernel info: %s' %
e.__str__())
# Emit an event to the front end to capture error report
self.emit('kernel_error', {
'key': 'sysinfo:kernel_error', 'data': str(e)})
self.error('unsupported_kernel', str(e))
def update_rockons(self):
try:
self.aw.api_call('rockons/update', data=None, calltype='post',
save_error=False)
except Exception as e:
logger.error('failed to update Rock-on metadata. low-level '
'exception: %s' % e.__str__())
def update_storage_state(self):
# update storage state once a minute as long as
# there is a client connected.
while self.start:
resources = [{'url': 'disks/scan',
'success': 'Disk state updated successfully',
'error': 'Failed to update disk state.'},
{'url': 'commands/refresh-pool-state',
'success': 'Pool state updated successfully',
'error': 'Failed to update pool state.'},
{'url': 'commands/refresh-share-state',
'success': 'Share state updated successfully',
'error': 'Failed to update share state.'},
{'url': 'commands/refresh-snapshot-state',
'success': 'Snapshot state updated successfully',
'error': 'Failed to update snapshot state.'}, ]
for r in resources:
try:
self.aw.api_call(r['url'], data=None, calltype='post',
save_error=False)
except Exception as e:
logger.error('%s. exception: %s'
% (r['error'], e.__str__()))
gevent.sleep(60)
def update_check(self):
#.........这里部分代码省略.........
示例13: PincardManagerNamespace
class PincardManagerNamespace(RockstorIO):
def on_connect(self, sid, environ):
self.aw = APIWrapper()
self.emit('pincardwelcome',
{
'key': 'pincardManager:pincardwelcome',
'data': 'Welcome to Rockstor PincardManager'
})
def on_disconnect(self, sid):
self.pins_user_uname = None
self.pins_user_uid = None
self.pins_check = None
self.pass_reset_time = None
self.otp = 'none'
self.cleanup(sid)
def on_generatepincard(self, sid, uid):
def create_pincard(uid):
try:
url = 'pincardmanager/create/%s' % uid
new_pincard = self.aw.api_call(url, data=None, calltype='post',
save_error=False)
self.emit('newpincard',
{'key': 'pincardManager:newpincard',
'data': new_pincard})
except Exception as e:
logger.error('Failed to create Pincard with '
'exception: %s' % e.__str__())
self.spawn(create_pincard, sid, uid)
def on_haspincard(self, sid, user):
def check_has_pincard(user):
pins = []
otp = False
self.pins_check = []
self.otp = 'none'
# Convert from username to uid and if user exist check for
# pincardManager We don't tell to frontend if a user exists or not
# to avoid exposure to security flaws/brute forcing etc
uid = username_to_uid(user)
user_exist = True if uid is not None else False
user_has_pincard = False
# If user exists we check if has a pincard
if user_exist:
user_has_pincard = has_pincard(uid)
# If user is root / uid 0 we check also if email notifications are
# enabled If not user won't be able to reset password with pincard
if uid == 0:
user_has_pincard = user_has_pincard and email_notification_enabled() # noqa E501
if user_has_pincard:
self.pins_user_uname = user
self.pins_user_uid = uid
pins = reset_random_pins(uid)
for pin in pins:
self.pins_check.append(pin['pin_number'])
# Set current time, user will have max 3 min to reset password
self.pass_reset_time = datetime.now()
if uid == 0:
self.otp = generate_otp(user)
otp = True
self.emit('haspincard',
{
'key': 'pincardManager:haspincard',
'has_pincard': user_has_pincard,
'pins_check': pins,
'otp': otp
})
self.spawn(check_has_pincard, sid, user)
def on_passreset(self, sid, pinlist, otp='none'):
def password_reset(pinlist, otp):
reset_status = False
reset_response = None
# On pass reset first we check for otp If not required none = none,
# otherwhise sent val has to match stored one
if otp == self.otp:
# If otp is ok we check for elapsed time to be < 3 mins
elapsed_time = (datetime.now()-self.pass_reset_time).total_seconds() # noqa E501
if elapsed_time < 180:
# If received pins equal expected pins, check for values
# via reset_password func
#.........这里部分代码省略.........
示例14: SysinfoNamespace
class SysinfoNamespace(RockstorIO):
start = False
supported_kernel = settings.SUPPORTED_KERNEL_VERSION
os_distro_name = settings.OS_DISTRO_NAME
# This function is run once on every connection
def on_connect(self, sid, environ):
self.aw = APIWrapper()
self.emit('connected',
{
'key': 'sysinfo:connected',
'data': 'connected'
})
self.start = True
self.spawn(self.update_storage_state, sid)
self.spawn(self.update_check, sid)
self.spawn(self.yum_updates, sid)
self.spawn(self.update_rockons, sid)
self.spawn(self.send_kernel_info, sid)
self.spawn(self.prune_logs, sid)
self.spawn(self.send_localtime, sid)
self.spawn(self.send_uptime, sid)
self.spawn(self.send_distroinfo, sid)
self.spawn(self.shutdown_status, sid)
self.spawn(self.pool_degraded_status, sid)
self.spawn(self.pool_dev_stats, sid)
# Run on every disconnect
def on_disconnect(self, sid):
self.cleanup(sid)
self.start = False
def send_uptime(self):
while self.start:
self.emit('uptime', {'key': 'sysinfo:uptime', 'data': uptime()})
gevent.sleep(60)
def send_distroinfo(self):
while self.start:
data = {'distro': self.os_distro_name, 'version': distro.version()}
self.emit('distro_info',
{'key': 'sysinfo:distro_info', 'data': data})
gevent.sleep(600)
def send_localtime(self):
while self.start:
self.emit('localtime',
{
'key': 'sysinfo:localtime',
'data': time.strftime('%H:%M (%z %Z)')
})
gevent.sleep(40)
def send_kernel_info(self):
try:
self.emit('kernel_info',
{
'key': 'sysinfo:kernel_info',
'data': kernel_info(self.supported_kernel)
})
# kernel_info() in above raises an Exception if the running
# kernel != supported kernel and so:
except Exception as e:
logger.error('Exception while gathering kernel info: %s' %
e.__str__())
# Emit an event to the front end to capture error report
self.emit('kernel_error', {
'key': 'sysinfo:kernel_error', 'data': str(e)})
self.error('unsupported_kernel', str(e))
def update_rockons(self):
try:
self.aw.api_call('rockons/update', data=None, calltype='post',
save_error=False)
except Exception as e:
logger.error('failed to update Rock-on metadata. low-level '
'exception: %s' % e.__str__())
def update_storage_state(self):
# update storage state once a minute as long as
# there is a client connected.
while self.start:
resources = [{'url': 'disks/scan',
'success': 'Disk state updated successfully',
'error': 'Failed to update disk state.'},
{'url': 'commands/refresh-pool-state',
'success': 'Pool state updated successfully',
'error': 'Failed to update pool state.'},
{'url': 'commands/refresh-share-state',
'success': 'Share state updated successfully',
'error': 'Failed to update share state.'},
{'url': 'commands/refresh-snapshot-state',
#.........这里部分代码省略.........
示例15: main
def main():
tid = int(sys.argv[1])
cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
if (crontabwindow.crontab_range(cwindow)):
# Performance note: immediately check task execution time/day window
# range to avoid other calls
tdo = TaskDefinition.objects.get(id=tid)
stype = 'task_scheduler'
aw = APIWrapper()
if (tdo.task_type != 'snapshot'):
logger.error('task_type(%s) is not snapshot.' % tdo.task_type)
return
meta = json.loads(tdo.json_meta)
validate_snap_meta(meta)
# to keep backwards compatibility, allow for share to be either
# name or id and migrate the metadata. To be removed in #1854
try:
share = Share.objects.get(id=meta['share'])
except ValueError:
share = Share.objects.get(name=meta['share'])
meta['share'] = share.id
tdo.json_meta = json.dumps(meta)
tdo.save()
max_count = int(float(meta['max_count']))
prefix = ('%s_' % meta['prefix'])
now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
t = Task(task_def=tdo, state='started', start=now)
snap_created = False
t.state = 'error'
try:
name = ('%s_%s'
% (meta['prefix'],
datetime.now().strftime(settings.SNAP_TS_FORMAT)))
url = ('shares/{}/snapshots/{}'.format(share.id, name))
# only create a new snap if there's no overflow situation. This
# prevents runaway snapshot creation beyond max_count+1.
if(delete(aw, share, stype, prefix, max_count)):
data = {'snap_type': stype,
'uvisible': meta['visible'],
'writable': meta['writable'], }
headers = {'content-type': 'application/json'}
aw.api_call(url, data=data, calltype='post', headers=headers,
save_error=False)
logger.debug('created snapshot at %s' % url)
t.state = 'finished'
snap_created = True
except Exception as e:
logger.error('Failed to create snapshot at %s' % url)
logger.exception(e)
finally:
t.end = datetime.utcnow().replace(tzinfo=utc)
t.save()
# best effort pruning without erroring out. If deletion fails, we'll
# have max_count+1 number of snapshots and it would be dealt with on
# the next round.
if (snap_created):
delete(aw, share, stype, prefix, max_count)
else:
logger.debug('Cron scheduled task not executed because outside '
'time/day window ranges')