本文整理汇总了Python中sabnzbd.load_admin函数的典型用法代码示例。如果您正苦于以下问题:Python load_admin函数的具体用法?Python load_admin怎么用?Python load_admin使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了load_admin函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: read
def read(self):
""" Read admin from disk, return True when pause is needed """
res = False
quota = self.left = cfg.quota_size.get_float() # Quota for this period
self.have_quota = bool(cfg.quota_size())
data = sabnzbd.load_admin(BYTES_FILE_NAME)
if not data:
data = sabnzbd.load_admin(BYTES_FILE_NAME_OLD)
data = fix_keys(data)
try:
self.last_update, self.grand_total, \
self.day_total, self.week_total, self.month_total, \
self.end_of_day, self.end_of_week, self.end_of_month = data[:8]
if len(data) >= 11:
self.quota, self.left, self.q_time = data[8:11]
logging.debug('Read quota q=%s l=%s reset=%s',
self.quota, self.left, self.q_time)
if abs(quota - self.quota) > 0.5:
self.change_quota()
# Get timeline stats
if len(data) == 12:
self.timeline_total = data[11]
else:
self.quota = self.left = cfg.quota_size.get_float()
res = self.reset_quota()
except:
self.defaults()
# Force update of counters and validate data
try:
for server in self.grand_total.keys():
self.update(server)
except TypeError:
self.defaults()
self.update()
return res
示例2: read
def read(self):
""" Read admin from disk """
quota = self.left = cfg.quota_size.get_float() # Quota for this period
self.have_quota = bool(cfg.quota_size())
data = sabnzbd.load_admin(BYTES_FILE_NAME)
try:
self.last_update, self.grand_total, \
self.day_total, self.week_total, self.month_total, \
self.end_of_day, self.end_of_week, self.end_of_month = data[:8]
if len(data) == 11:
self.quota, self.left, self.q_time = data[8:]
logging.debug('Read quota q=%s l=%s reset=%s',
self.quota, self.left, self.q_time)
if abs(quota - self.quota) > 0.5:
self.change_quota()
else:
self.quota = self.left = cfg.quota_size.get_float()
res = self.reset_quota()
except:
# Get the latest data from the database and assign to a fake server
logging.debug('Setting default BPS meter values')
grand, month, week = sabnzbd.proxy_get_history_size()
if grand: self.grand_total['x'] = grand
if month: self.month_total['x'] = month
if week: self.week_total['x'] = week
self.quota = self.left = cfg.quota_size.get_float()
res = False
# Force update of counters
self.update()
return res
示例3: read_queue
def read_queue(self, repair):
""" Read queue from disk, supporting repair modes
0 = no repairs
1 = use existing queue, add missing "incomplete" folders
2 = Discard all queue admin, reconstruct from "incomplete" folders
"""
nzo_ids = []
if repair < 2:
# Read the queue from the saved files
data = sabnzbd.load_admin(QUEUE_FILE_NAME)
if data:
try:
queue_vers, nzo_ids, dummy = data
if not queue_vers == QUEUE_VERSION:
nzo_ids = []
logging.error(Ta("Incompatible queuefile found, cannot proceed"))
if not repair:
panic_queue(os.path.join(cfg.cache_dir.get_path(), QUEUE_FILE_NAME))
exit_sab(2)
except ValueError:
nzo_ids = []
logging.error(
Ta("Error loading %s, corrupt file detected"),
os.path.join(cfg.cache_dir.get_path(), QUEUE_FILE_NAME),
)
if not repair:
return
# First handle jobs in the queue file
folders = []
for nzo_id in nzo_ids:
folder, _id = os.path.split(nzo_id)
# Try as normal job
path = get_admin_path(bool(folder), folder, False)
nzo = sabnzbd.load_data(_id, path, remove=False)
if not nzo:
# Try as future job
path = get_admin_path(bool(folder), folder, True)
nzo = sabnzbd.load_data(_id, path)
if nzo:
self.add(nzo, save=False, quiet=True)
folders.append(folder)
# Scan for any folders in "incomplete" that are not yet in the queue
if repair:
self.scan_jobs(not folders)
# Handle any lost future jobs
for path in globber(os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)):
path, nzo_id = os.path.split(path)
if nzo_id not in self.__nzo_table:
nzo = sabnzbd.load_data(nzo_id, path, remove=True)
if nzo:
self.add(nzo, save=True)
示例4: read
def read(self):
""" Read admin from disk """
data = sabnzbd.load_admin(BYTES_FILE_NAME)
try:
self.last_update, self.grand_total, \
self.day_total, self.week_total, self.month_total, \
self.end_of_day, self.end_of_week, self.end_of_month = data
except:
# Get the latest data from the database and assign to a fake server
grand, month, week = sabnzbd.proxy_get_history_size()
if grand: self.grand_total['x'] = grand
if month: self.month_total['x'] = month
if week: self.week_total['x'] = week
# Force update of counters
self.update()
示例5: load
def load(self):
""" Save postproc queue """
self.history_queue = []
logging.info("Loading postproc queue")
data = sabnzbd.load_admin(POSTPROC_QUEUE_FILE_NAME)
if data is None:
return
try:
version, history_queue = data
if POSTPROC_QUEUE_VERSION != version:
logging.warning(Ta('Failed to load postprocessing queue: Wrong version (need:%s, found:%s)'), POSTPROC_QUEUE_VERSION, version)
if isinstance(history_queue, list):
self.history_queue = [nzo for nzo in history_queue if os.path.exists(nzo.downpath)]
except:
logging.info('Corrupt %s file, discarding', POSTPROC_QUEUE_FILE_NAME)
logging.info("Traceback: ", exc_info = True)
示例6: load
def load(self):
""" Save postproc queue """
self.history_queue = []
logging.info("Loading postproc queue")
data = sabnzbd.load_admin(POSTPROC_QUEUE_FILE_NAME)
if data is None:
return
try:
version, history_queue = data
if POSTPROC_QUEUE_VERSION != version:
logging.warning(T('Old queue detected, use Status->Repair to convert the queue'))
elif isinstance(history_queue, list):
self.history_queue = [nzo for nzo in history_queue if os.path.exists(nzo.downpath)]
except:
logging.info('Corrupt %s file, discarding', POSTPROC_QUEUE_FILE_NAME)
logging.info("Traceback: ", exc_info=True)
示例7: read_queue
def read_queue(self, repair):
""" Read queue from disk, supporting repair modes
0 = no repairs
1 = use existing queue, add missing "incomplete" folders
2 = Discard all queue admin, reconstruct from "incomplete" folders
"""
nzo_ids = []
if repair < 2:
# Read the queue from the saved files
data = sabnzbd.load_admin(QUEUE_FILE_NAME)
# Process the data and check compatibility
nzo_ids = self.check_compatibility(repair, data)
# First handle jobs in the queue file
folders = []
for nzo_id in nzo_ids:
folder, _id = os.path.split(nzo_id)
path = get_admin_path(folder, future=False)
# Try as normal job
nzo = sabnzbd.load_data(_id, path, remove=False)
if not nzo:
# Try as future job
path = get_admin_path(folder, future=True)
nzo = sabnzbd.load_data(_id, path)
if nzo:
self.add(nzo, save=False, quiet=True)
folders.append(folder)
# Scan for any folders in "incomplete" that are not yet in the queue
if repair:
self.scan_jobs(not folders)
# Handle any lost future jobs
for item in globber_full(os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)):
path, nzo_id = os.path.split(item)
if nzo_id not in self.__nzo_table:
if nzo_id.startswith('SABnzbd_nzo'):
nzo = sabnzbd.load_data(nzo_id, path, remove=True)
if nzo:
self.add(nzo, save=True)
else:
try:
remove_file(item)
except:
pass
示例8: __init__
def __init__(self):
self.jobs = {}
self.next_run = time.time()
self.shutdown = False
try:
self.jobs = sabnzbd.load_admin(RSS_FILE_NAME)
if self.jobs:
for feed in self.jobs:
remove_obsolete(self.jobs[feed], self.jobs[feed].keys())
except:
logging.warning(T('Cannot read %s'), RSS_FILE_NAME)
logging.info("Traceback: ", exc_info=True)
# Storage needs to be dict
if not self.jobs:
self.jobs = {}
示例9: __init__
def __init__(self):
Rating.do = self
self.shutdown = False
self.queue = OrderedSetQueue()
try:
self.version, self.ratings, self.nzo_indexer_map = sabnzbd.load_admin("Rating.sab",
silent=not cfg.rating_enable())
if self.version == 1:
ratings = {}
for k, v in self.ratings.iteritems():
ratings[k] = NzbRatingV2().to_v2(v)
self.ratings = ratings
self.version = 2
if self.version != Rating.VERSION:
raise Exception()
except:
self.version = Rating.VERSION
self.ratings = {}
self.nzo_indexer_map = {}
Thread.__init__(self)
示例10: __init__
def __init__(self):
Rating.do = self
self.shutdown = False
self.queue = OrderedSetQueue()
try:
(self.version, self.ratings, self.nzo_indexer_map) = sabnzbd.load_admin("Rating.sab")
if self.version == 1:
ratings = {}
for k, v in self.ratings.iteritems():
ratings[k] = NzbRatingV2().to_v2(v)
self.ratings = ratings
self.version = 2
if (self.version != Rating.VERSION):
raise Exception()
except:
self.version = Rating.VERSION
self.ratings = {}
self.nzo_indexer_map = {}
Thread.__init__(self)
if not _HAVE_SSL:
logging.warning('Ratings server requires secure connection')
self.stop()
示例11: __init__
def __init__(self):
threading.Thread.__init__(self)
self.newdir()
try:
dir, self.ignored, self.suspected = sabnzbd.load_admin(SCAN_FILE_NAME)
if dir != self.dirscan_dir:
self.ignored = {}
self.suspected = {}
except:
self.ignored = {} # Will hold all unusable files and the
# successfully processed ones that cannot be deleted
self.suspected = {} # Will hold name/attributes of suspected candidates
self.shutdown = False
self.error_reported = False # Prevents mulitple reporting of missing watched folder
self.dirscan_dir = cfg.dirscan_dir.get_path()
self.dirscan_speed = cfg.dirscan_speed()
self.busy = False
self.trigger = False
cfg.dirscan_dir.callback(self.newdir)
cfg.dirscan_speed.callback(self.newspeed)
DirScanner.do = self
示例12: __init__
def __init__(self):
def check_str(p):
return p is None or p == '' or isinstance(p, basestring)
def check_int(p):
try:
int(p)
return True
except:
return False
self.jobs = {}
self.next_run = time.time()
self.shutdown = False
try:
defined = config.get_rss().keys()
feeds = sabnzbd.load_admin(RSS_FILE_NAME)
if type(feeds) == type({}):
for feed in feeds:
if feed not in defined:
logging.debug('Dropping obsolete data for feed "%s"', feed)
continue
self.jobs[feed] = {}
for link in feeds[feed]:
data = feeds[feed][link]
# Consistency check on data
try:
item = feeds[feed][link]
if not isinstance(item, dict) or not isinstance(item.get('title'), unicode):
raise IndexError
self.jobs[feed][link] = item
except (KeyError, IndexError):
logging.info('Incorrect entry in %s detected, discarding %s', RSS_FILE_NAME, item)
remove_obsolete(self.jobs[feed], self.jobs[feed].keys())
except IOError:
logging.debug('Cannot read file %s', RSS_FILE_NAME)
示例13: __init__
def __init__(self):
def check_str(p):
return p is None or p == '' or isinstance(p, basestring)
def check_int(p):
try:
int(p)
return True
except:
return False
self.jobs = {}
self.next_run = time.time()
try:
defined = config.get_rss().keys()
feeds = sabnzbd.load_admin(RSS_FILE_NAME)
if type(feeds) == type({}):
for feed in feeds:
if feed not in defined:
logging.debug('Dropping obsolete data for feed "%s"', feed)
continue
self.jobs[feed] = {}
for link in feeds[feed]:
data = feeds[feed][link]
if type(data) == type([]):
# Convert previous list-based store to dictionary
new = {}
try:
new['status'] = data[0]
new['title'] = data[1]
new['url'] = data[2]
new['cat'] = data[3]
new['pp'] = data[4]
new['script'] = data[5]
new['time'] = data[6]
new['prio'] = str(NORMAL_PRIORITY)
new['rule'] = 0
self.jobs[feed][link] = new
except IndexError:
del new
else:
# Consistency check on data
try:
item = feeds[feed][link]
if not isinstance(item, dict) or not isinstance(item.get('title'), unicode):
raise IndexError
if item.get('status', ' ')[0] not in ('D', 'G', 'B', 'X'):
item['status'] = 'X'
if not isinstance(item.get('url'), unicode):
item['url'] = ''
if not check_str(item.get('cat')):
item['cat'] = ''
if not check_str(item.get('orgcat')):
item['orgcat'] = ''
if not check_str(item.get('pp')):
item['pp'] = '3'
if not check_str(item.get('script')):
item['script'] = 'None'
if not check_str(item.get('prio')):
item['prio'] = '-100'
if not check_int(item.get('rule', 0)):
item['rule'] = 0
if not check_int(item.get('size', 0L)):
item['size'] = 0L
if not isinstance(item.get('time'), float):
item['time'] = time.time()
if not check_int(item.get('order', 0)):
item.get['order'] = 0
self.jobs[feed][link] = item
except (KeyError, IndexError):
logging.info('Incorrect entry in %s detected, discarding %s', RSS_FILE_NAME, item)
示例14: __init__
def __init__(self):
self.bookmarks = sabnzbd.load_admin(BOOKMARK_FILE_NAME)
if not self.bookmarks:
self.bookmarks = []
self.__busy = False
Bookmarks.do = self
示例15: read_queue
def read_queue(self, repair):
""" Read queue from disk, supporting repair modes
0 = no repairs
1 = use existing queue, add missing "incomplete" folders
2 = Discard all queue admin, reconstruct from "incomplete" folders
"""
nzo_ids = []
if repair < 2:
# Read the queue from the saved files
data = sabnzbd.load_admin(QUEUE_FILE_NAME)
if not data:
try:
# Try previous queue file
queue_vers, nzo_ids, dummy = sabnzbd.load_admin(QUEUE_FILE_TMPL % '9')
except:
nzo_ids = []
if nzo_ids:
logging.warning(T('Old queue detected, use Status->Repair to convert the queue'))
nzo_ids = []
else:
try:
queue_vers, nzo_ids, dummy = data
if not queue_vers == QUEUE_VERSION:
nzo_ids = []
logging.error(T('Incompatible queuefile found, cannot proceed'))
if not repair:
panic_queue(os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME))
exit_sab(2)
except ValueError:
nzo_ids = []
logging.error(T('Error loading %s, corrupt file detected'),
os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME))
if not repair:
return
# First handle jobs in the queue file
folders = []
for nzo_id in nzo_ids:
folder, _id = os.path.split(nzo_id)
# Try as normal job
path = get_admin_path(folder, False)
nzo = sabnzbd.load_data(_id, path, remove=False)
if not nzo:
# Try as future job
path = get_admin_path(folder, True)
nzo = sabnzbd.load_data(_id, path)
if nzo:
self.add(nzo, save=False, quiet=True)
folders.append(folder)
# Scan for any folders in "incomplete" that are not yet in the queue
if repair:
self.scan_jobs(not folders)
# Handle any lost future jobs
for item in globber_full(os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)):
path, nzo_id = os.path.split(item)
if nzo_id not in self.__nzo_table:
if nzo_id.startswith('SABnzbd_nzo'):
nzo = sabnzbd.load_data(nzo_id, path, remove=True)
if nzo:
self.add(nzo, save=True)
else:
try:
os.remove(item)
except:
pass