本文整理汇总了Python中calibre.utils.ipc.server.Server.split方法的典型用法代码示例。如果您正苦于以下问题:Python Server.split方法的具体用法?Python Server.split怎么用?Python Server.split使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类calibre.utils.ipc.server.Server
的用法示例。
在下文中一共展示了Server.split方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: process_pages
# 需要导入模块: from calibre.utils.ipc.server import Server [as 别名]
# 或者: from calibre.utils.ipc.server.Server import split [as 别名]
def process_pages(pages, opts, update, tdir):
'''
Render all identified comic pages.
'''
progress = Progress(len(pages), update)
server = Server()
jobs = []
tasks = [(p, os.path.join(tdir, os.path.basename(p))) for p in pages]
tasks = server.split(pages)
for task in tasks:
jobs.append(ParallelJob('render_pages', '', progress,
args=[task, tdir, opts]))
server.add_job(jobs[-1])
while True:
time.sleep(1)
running = False
for job in jobs:
while True:
try:
x = job.notifications.get_nowait()
progress(*x)
except Empty:
break
job.update()
if not job.is_finished:
running = True
if not running:
break
server.close()
ans, failures = [], []
for job in jobs:
if job.failed or job.result is None:
raise Exception(_('Failed to process comic: \n\n%s')%
job.log_file.read())
pages, failures_ = job.result
ans += pages
failures += failures_
return ans, failures
示例2: _run
# 需要导入模块: from calibre.utils.ipc.server import Server [as 别名]
# 或者: from calibre.utils.ipc.server.Server import split [as 别名]
def _run(self, tdir):
from calibre.library.save_to_disk import config
server = Server() if self.spare_server is None else self.spare_server
ids = set(self.ids)
tasks = server.split(list(ids))
jobs = set([])
c = config()
recs = {}
for pref in c.preferences:
recs[pref.name] = getattr(self.opts, pref.name)
plugboards = self.db.prefs.get('plugboards', {})
template_functions = self.db.prefs.get('user_template_functions', [])
for i, task in enumerate(tasks):
tids = [x[-1] for x in task]
data = self.collect_data(tids, tdir)
dpath = os.path.join(tdir, '%d.json'%i)
with open(dpath, 'wb') as f:
f.write(json.dumps(data, ensure_ascii=False).encode('utf-8'))
job = ParallelJob('save_book',
'Save books (%d of %d)'%(i, len(tasks)),
lambda x,y:x,
args=[tids, dpath, plugboards, template_functions, self.path, recs])
jobs.add(job)
server.add_job(job)
while not self.canceled:
time.sleep(0.2)
running = False
for job in jobs:
self.get_notifications(job, ids)
if not job.is_finished:
running = True
if not running:
break
for job in jobs:
if not job.result:
continue
for id_, title, ok, tb in job.result:
if id_ in ids:
self.result_queue.put((id_, title, ok, tb))
ids.remove(id_)
server.close()
time.sleep(1)
if self.canceled:
return
for job in jobs:
if job.failed:
prints(job.details)
self.error = job.details
if os.path.exists(job.log_path):
try:
os.remove(job.log_path)
except:
pass