本文整理汇总了Python中calibre.utils.ipc.server.Server类的典型用法代码示例。如果您正苦于以下问题:Python Server类的具体用法?Python Server怎么用?Python Server使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Server类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
def run(self):
job = ParallelJob('move_library',
'Move library from %s to %s'%(self.from_, self.to),
lambda x,y:x,
args=[self.from_, self.to])
server = Server(pool_size=1)
server.add_job(job)
while not job.is_finished:
time.sleep(0.2)
job.update(consume_notifications=False)
while True:
try:
title = job.notifications.get_nowait()[0]
self.count += 1
self.result_queue.put((float(self.count)/self.total, title))
except Empty:
break
job.update()
server.close()
if not job.result:
self.failed = True
self.details = job.details
if os.path.exists(job.log_path):
os.remove(job.log_path)
示例2: get_metadata
def get_metadata(stream, cpath=None):
if not podofo:
raise Unavailable(podofo_err)
pt = PersistentTemporaryFile('_podofo.pdf')
pt.write(stream.read())
pt.close()
server = Server(pool_size=1)
job = ParallelJob('read_pdf_metadata', 'Read pdf metadata',
lambda x,y:x, args=[pt.name, cpath])
server.add_job(job)
while not job.is_finished:
time.sleep(0.1)
job.update()
job.update()
server.close()
if job.result is None:
raise ValueError('Failed to read metadata: ' + job.details)
title, authors, creator, tags, ok = job.result
if not ok:
print 'Failed to extract cover:'
print job.details
if title == '_':
title = getattr(stream, 'name', _('Unknown'))
title = os.path.splitext(title)[0]
mi = MetaInformation(title, authors)
if creator:
mi.book_producer = creator
if tags:
mi.tags = tags
if os.path.exists(pt.name): os.remove(pt.name)
if ok:
mi.cover = cpath
return mi
示例3: set_metadata
def set_metadata(stream, mi):
if not podofo:
raise Unavailable(podofo_err)
with TemporaryFile('_podofo_read.pdf') as inputf, \
TemporaryFile('_podofo_write.pdf') as outputf:
server = Server(pool_size=1)
with open(inputf, 'wb') as f:
shutil.copyfileobj(stream, f)
job = ParallelJob('write_pdf_metadata', 'Write pdf metadata',
lambda x,y:x, args=[inputf, outputf, mi.title, mi.authors,
mi.book_producer, mi.tags])
server.add_job(job)
while not job.is_finished:
time.sleep(0.1)
job.update()
job.update()
server.close()
if job.failed:
prints(job.details)
elif job.result:
with open(outputf, 'rb') as f:
f.seek(0, 2)
if f.tell() > 100:
f.seek(0)
stream.seek(0)
stream.truncate()
shutil.copyfileobj(f, stream)
stream.flush()
stream.seek(0)
示例4: do_store_locations
def do_store_locations(books_to_scan, options, notification=lambda x,y:x):
'''
Master job, to launch child jobs to modify each ePub
'''
debug_print("start")
server = Server()
debug_print("options=%s" % (options))
# Queue all the jobs
# args = ['calibre_plugins.sonyutilities.jobs', 'do_sonyutilities_all',
args = ['calibre_plugins.sonyutilities.jobs', 'do_store_bookmarks',
(books_to_scan, options)]
# debug_print("args=%s" % (args))
debug_print("len(books_to_scan)=%d" % (len(books_to_scan)))
job = ParallelJob('arbitrary', "Store locations", done=None, args=args)
server.add_job(job)
# This server is an arbitrary_n job, so there is a notifier available.
# Set the % complete to a small number to avoid the 'unavailable' indicator
notification(0.01, 'Reading device database')
# dequeue the job results as they arrive, saving the results
total = 1
count = 0
stored_locations = dict()
while True:
job = server.changed_jobs_queue.get()
# A job can 'change' when it is not finished, for example if it
# produces a notification. Ignore these.
job.update()
if not job.is_finished:
debug_print("Job not finished")
continue
# debug_print("Job finished")
# A job really finished. Get the information.
stored_locations = job.result
import pydevd;pydevd.settrace()
# book_id = job._book_id
# stored_locations[book_id] = stored_location
count += 1
notification(float(count)/total, 'Storing locations')
# Add this job's output to the current log
#debug_print("Stored_location=", stored_locations)
number_bookmarks = len(stored_locations) if stored_locations else 0
debug_print("Stored_location count=%d" % number_bookmarks)
debug_print(job.details)
if count >= total:
# All done!
break
server.close()
debug_print("finished")
# return the map as the job result
return stored_locations, options
示例5: run
def run(self):
jobs, ids = set([]), set([])
for t in self.tasks:
for b in t:
ids.add(b[0])
progress = Progress(self.result_queue, self.tdir)
server = Server() if self.spare_server is None else self.spare_server
try:
for i, task in enumerate(self.tasks):
job = ParallelJob('read_metadata',
'Read metadata (%d of %d)'%(i, len(self.tasks)),
lambda x,y:x, args=[task, self.tdir])
jobs.add(job)
server.add_job(job)
while not self.canceled:
time.sleep(0.2)
running = False
for job in jobs:
while True:
try:
id = job.notifications.get_nowait()[-1]
if id in ids:
progress(id)
ids.remove(id)
except Empty:
break
job.update(consume_notifications=False)
if not job.is_finished:
running = True
if not running:
break
finally:
server.close()
time.sleep(1)
if self.canceled:
return
for id in ids:
progress(id)
for job in jobs:
if job.failed:
prints(job.details)
if os.path.exists(job.log_path):
try:
os.remove(job.log_path)
except:
pass
示例6: __init__
def __init__(self):
QAbstractTableModel.__init__(self)
SearchQueryParser.__init__(self, ["all"])
self.wait_icon = QVariant(QIcon(I("jobs.png")))
self.running_icon = QVariant(QIcon(I("exec.png")))
self.error_icon = QVariant(QIcon(I("dialog_error.png")))
self.done_icon = QVariant(QIcon(I("ok.png")))
self.jobs = []
self.add_job = Dispatcher(self._add_job)
self.server = Server(limit=int(config["worker_limit"] / 2.0), enforce_cpu_limit=config["enforce_cpu_limit"])
self.threaded_server = ThreadedJobServer()
self.changed_queue = Queue()
self.timer = QTimer(self)
self.timer.timeout.connect(self.update, type=Qt.QueuedConnection)
self.timer.start(1000)
示例7: process_pages
def process_pages(pages, opts, update, tdir):
'''
Render all identified comic pages.
'''
progress = Progress(len(pages), update)
server = Server()
jobs = []
tasks = [(p, os.path.join(tdir, os.path.basename(p))) for p in pages]
tasks = server.split(pages)
for task in tasks:
jobs.append(ParallelJob('render_pages', '', progress,
args=[task, tdir, opts]))
server.add_job(jobs[-1])
while True:
time.sleep(1)
running = False
for job in jobs:
while True:
try:
x = job.notifications.get_nowait()
progress(*x)
except Empty:
break
job.update()
if not job.is_finished:
running = True
if not running:
break
server.close()
ans, failures = [], []
for job in jobs:
if job.failed or job.result is None:
raise Exception(_('Failed to process comic: \n\n%s')%
job.log_file.read())
pages, failures_ = job.result
ans += pages
failures += failures_
return ans, failures
示例8: do_download_worker
def do_download_worker(book_list,
options,
cpus,
merge=False,
notification=lambda x,y:x):
'''
Master job, to launch child jobs to extract ISBN for a set of books
This is run as a worker job in the background to keep the UI more
responsive and get around the memory leak issues as it will launch
a child job for each book as a worker process
'''
server = Server(pool_size=cpus)
logger.info(options['version'])
total = 0
alreadybad = []
# Queue all the jobs
logger.info("Adding jobs for URLs:")
for book in book_list:
logger.info("%s"%book['url'])
if book['good']:
total += 1
args = ['calibre_plugins.fanficfare_plugin.jobs',
'do_download_for_worker',
(book,options,merge)]
job = ParallelJob('arbitrary_n',
"url:(%s) id:(%s)"%(book['url'],book['calibre_id']),
done=None,
args=args)
job._book = book
server.add_job(job)
else:
# was already bad before the subprocess ever started.
alreadybad.append(book)
# This server is an arbitrary_n job, so there is a notifier available.
# Set the % complete to a small number to avoid the 'unavailable' indicator
notification(0.01, _('Downloading FanFiction Stories'))
# dequeue the job results as they arrive, saving the results
count = 0
while True:
job = server.changed_jobs_queue.get()
# A job can 'change' when it is not finished, for example if it
# produces a notification. Ignore these.
job.update()
if not job.is_finished:
continue
# A job really finished. Get the information.
book_list.remove(job._book)
book_list.append(job.result)
book_id = job._book['calibre_id']
count = count + 1
notification(float(count)/total, '%d of %d stories finished downloading'%(count,total))
# Add this job's output to the current log
logger.info('Logfile for book ID %s (%s)'%(book_id, job._book['title']))
logger.info(job.details)
if count >= total:
## ordering first by good vs bad, then by listorder.
good_list = filter(lambda x : x['good'], book_list)
bad_list = filter(lambda x : not x['good'], book_list)
good_list = sorted(good_list,key=lambda x : x['listorder'])
bad_list = sorted(bad_list,key=lambda x : x['listorder'])
logger.info("\n"+_("Download Results:")+"\n%s\n"%("\n".join([ "%(url)s %(comment)s" % book for book in good_list+bad_list])))
logger.info("\n"+_("Successful:")+"\n%s\n"%("\n".join([book['url'] for book in good_list])))
logger.info("\n"+_("Unsuccessful:")+"\n%s\n"%("\n".join([book['url'] for book in bad_list])))
break
server.close()
# return the book list as the job result
return book_list
示例9: JobManager
class JobManager(QAbstractTableModel, SearchQueryParser): # {{{
job_added = pyqtSignal(int)
job_done = pyqtSignal(int)
def __init__(self):
QAbstractTableModel.__init__(self)
SearchQueryParser.__init__(self, ['all'])
self.wait_icon = QVariant(QIcon(I('jobs.png')))
self.running_icon = QVariant(QIcon(I('exec.png')))
self.error_icon = QVariant(QIcon(I('dialog_error.png')))
self.done_icon = QVariant(QIcon(I('ok.png')))
self.jobs = []
self.add_job = Dispatcher(self._add_job)
self.server = Server(limit=int(config['worker_limit']/2.0),
enforce_cpu_limit=config['enforce_cpu_limit'])
self.threaded_server = ThreadedJobServer()
self.changed_queue = Queue()
self.timer = QTimer(self)
self.timer.timeout.connect(self.update, type=Qt.QueuedConnection)
self.timer.start(1000)
def columnCount(self, parent=QModelIndex()):
return 5
def rowCount(self, parent=QModelIndex()):
return len(self.jobs)
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole:
return NONE
if orientation == Qt.Horizontal:
return QVariant({
0: _('Job'),
1: _('Status'),
2: _('Progress'),
3: _('Running time'),
4: _('Start time'),
}.get(section, ''))
else:
return QVariant(section+1)
def show_tooltip(self, arg):
widget, pos = arg
QToolTip.showText(pos, self.get_tooltip())
def get_tooltip(self):
running_jobs = [j for j in self.jobs if j.run_state == j.RUNNING]
waiting_jobs = [j for j in self.jobs if j.run_state == j.WAITING]
lines = [_('There are %d running jobs:')%len(running_jobs)]
for job in running_jobs:
desc = job.description
if not desc:
desc = _('Unknown job')
p = 100. if job.is_finished else job.percent
lines.append('%s: %.0f%% done'%(desc, p))
lines.extend(['', _('There are %d waiting jobs:')%len(waiting_jobs)])
for job in waiting_jobs:
desc = job.description
if not desc:
desc = _('Unknown job')
lines.append(desc)
return '\n'.join(['calibre', '']+ lines)
def data(self, index, role):
try:
if role not in (Qt.DisplayRole, Qt.DecorationRole):
return NONE
row, col = index.row(), index.column()
job = self.jobs[row]
if role == Qt.DisplayRole:
if col == 0:
desc = job.description
if not desc:
desc = _('Unknown job')
return QVariant(desc)
if col == 1:
return QVariant(job.status_text)
if col == 2:
p = 100. if job.is_finished else job.percent
return QVariant(p)
if col == 3:
rtime = job.running_time
if rtime is None:
return NONE
return QVariant('%dm %ds'%(int(rtime)//60, int(rtime)%60))
if col == 4 and job.start_time is not None:
return QVariant(time.strftime('%H:%M -- %d %b', time.localtime(job.start_time)))
if role == Qt.DecorationRole and col == 0:
state = job.run_state
if state == job.WAITING:
return self.wait_icon
if state == job.RUNNING:
return self.running_icon
if job.killed or job.failed:
return self.error_icon
#.........这里部分代码省略.........
示例10: _run
def _run(self, tdir):
from calibre.library.save_to_disk import config
server = Server() if self.spare_server is None else self.spare_server
ids = set(self.ids)
tasks = server.split(list(ids))
jobs = set([])
c = config()
recs = {}
for pref in c.preferences:
recs[pref.name] = getattr(self.opts, pref.name)
plugboards = self.db.prefs.get('plugboards', {})
template_functions = self.db.prefs.get('user_template_functions', [])
for i, task in enumerate(tasks):
tids = [x[-1] for x in task]
data = self.collect_data(tids, tdir)
dpath = os.path.join(tdir, '%d.json'%i)
with open(dpath, 'wb') as f:
f.write(json.dumps(data, ensure_ascii=False).encode('utf-8'))
job = ParallelJob('save_book',
'Save books (%d of %d)'%(i, len(tasks)),
lambda x,y:x,
args=[tids, dpath, plugboards, template_functions, self.path, recs])
jobs.add(job)
server.add_job(job)
while not self.canceled:
time.sleep(0.2)
running = False
for job in jobs:
self.get_notifications(job, ids)
if not job.is_finished:
running = True
if not running:
break
for job in jobs:
if not job.result:
continue
for id_, title, ok, tb in job.result:
if id_ in ids:
self.result_queue.put((id_, title, ok, tb))
ids.remove(id_)
server.close()
time.sleep(1)
if self.canceled:
return
for job in jobs:
if job.failed:
prints(job.details)
self.error = job.details
if os.path.exists(job.log_path):
try:
os.remove(job.log_path)
except:
pass
示例11: do_count_statistics
def do_count_statistics(books_to_scan, pages_algorithm, use_goodreads, nltk_pickle, cpus, notification=lambda x, y: x):
"""
Master job, to launch child jobs to count pages in this list of books
"""
server = Server(pool_size=cpus)
# Queue all the jobs
for book_id, title, book_path, goodreads_id, statistics_to_run in books_to_scan:
args = [
"calibre_plugins.count_pages.jobs",
"do_statistics_for_book",
(book_path, pages_algorithm, goodreads_id, use_goodreads, statistics_to_run, nltk_pickle),
]
job = ParallelJob("arbitrary", str(book_id), done=None, args=args)
job._book_id = book_id
job._title = title
job._pages_algorithm = pages_algorithm
job._goodreads_id = goodreads_id
job._use_goodreads = use_goodreads
job._statistics_to_run = statistics_to_run
server.add_job(job)
# This server is an arbitrary_n job, so there is a notifier available.
# Set the % complete to a small number to avoid the 'unavailable' indicator
notification(0.01, "Counting Statistics")
# dequeue the job results as they arrive, saving the results
total = len(books_to_scan)
count = 0
book_stats_map = dict()
while True:
job = server.changed_jobs_queue.get()
# A job can 'change' when it is not finished, for example if it
# produces a notification. Ignore these.
job.update()
if not job.is_finished:
continue
# A job really finished. Get the information.
results = job.result
book_id = job._book_id
book_stats_map[book_id] = results
count = count + 1
notification(float(count) / total, "Counting Statistics")
# Add this job's output to the current log
print("-------------------------------")
print("Logfile for book ID %d (%s)" % (book_id, job._title))
for stat in job._statistics_to_run:
if stat == cfg.STATISTIC_PAGE_COUNT:
if job._use_goodreads:
if job._goodreads_id is not None:
if stat in results and results[stat]:
print("\tGoodreads edition has %d pages" % results[stat])
else:
print("\tFAILED TO GET PAGE COUNT FROM GOODREADS")
else:
if stat in results and results[stat]:
print("\tFound %d pages" % results[stat])
elif stat == cfg.STATISTIC_WORD_COUNT:
if stat in results and results[stat]:
print("\tFound %d words" % results[stat])
elif stat == cfg.STATISTIC_FLESCH_READING:
if stat in results and results[stat]:
print("\tComputed %.1f Flesch Reading" % results[stat])
elif stat == cfg.STATISTIC_FLESCH_GRADE:
if stat in results and results[stat]:
print("\tComputed %.1f Flesch-Kincaid Grade" % results[stat])
elif stat == cfg.STATISTIC_GUNNING_FOG:
if stat in results and results[stat]:
print("\tComputed %.1f Gunning Fog Index" % results[stat])
print(job.details)
if count >= total:
# All done!
break
server.close()
# return the map as the job result
return book_stats_map