本文整理汇总了Python中polyglot.queue.Queue.put方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.put方法的具体用法?Python Queue.put怎么用?Python Queue.put使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类polyglot.queue.Queue
的用法示例。
在下文中一共展示了Queue.put方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Watcher
# 需要导入模块: from polyglot.queue import Queue [as 别名]
# 或者: from polyglot.queue.Queue import put [as 别名]
class Watcher(WatcherBase):
def __init__(self, root_dirs, worker, log):
WatcherBase.__init__(self, worker, log)
self.stream = Stream(self.notify, *(x.encode('utf-8') for x in root_dirs), file_events=True)
self.wait_queue = Queue()
def wakeup(self):
self.wait_queue.put(True)
def loop(self):
observer = Observer()
observer.schedule(self.stream)
observer.daemon = True
observer.start()
try:
while True:
try:
# Cannot use blocking get() as it is not interrupted by
# Ctrl-C
if self.wait_queue.get(10000) is True:
self.force_restart()
except Empty:
pass
finally:
observer.unschedule(self.stream)
observer.stop()
def notify(self, ev):
name = ev.name
if isinstance(name, bytes):
name = name.decode('utf-8')
if self.file_is_watched(name):
self.handle_modified({name})
示例2: DBThread
# 需要导入模块: from polyglot.queue import Queue [as 别名]
# 或者: from polyglot.queue.Queue import put [as 别名]
class DBThread(Thread):
CLOSE = '-------close---------'
def __init__(self, path, row_factory):
Thread.__init__(self)
self.setDaemon(True)
self.path = path
self.unhandled_error = (None, '')
self.row_factory = row_factory
self.requests = Queue(1)
self.results = Queue(1)
self.conn = None
def connect(self):
self.conn = do_connect(self.path, self.row_factory)
def run(self):
try:
self.connect()
while True:
func, args, kwargs = self.requests.get()
if func == self.CLOSE:
self.conn.close()
break
if func == 'dump':
try:
ok, res = True, tuple(self.conn.iterdump())
except Exception as err:
ok, res = False, (err, traceback.format_exc())
elif func == 'create_dynamic_filter':
try:
f = DynamicFilter(args[0])
self.conn.create_function(args[0], 1, f)
ok, res = True, f
except Exception as err:
ok, res = False, (err, traceback.format_exc())
else:
bfunc = getattr(self.conn, func)
try:
for i in range(3):
try:
ok, res = True, bfunc(*args, **kwargs)
break
except OperationalError as err:
# Retry if unable to open db file
e = str(err)
if 'unable to open' not in e or i == 2:
if 'unable to open' in e:
prints('Unable to open database for func',
func, reprlib.repr(args),
reprlib.repr(kwargs))
raise
time.sleep(0.5)
except Exception as err:
ok, res = False, (err, traceback.format_exc())
self.results.put((ok, res))
except Exception as err:
self.unhandled_error = (err, traceback.format_exc())
示例3: ConnectedWorker
# 需要导入模块: from polyglot.queue import Queue [as 别名]
# 或者: from polyglot.queue.Queue import put [as 别名]
class ConnectedWorker(Thread):
def __init__(self, worker, conn, rfile):
Thread.__init__(self)
self.daemon = True
self.conn = conn
self.worker = worker
self.notifications = Queue()
self._returncode = 'dummy'
self.killed = False
self.log_path = worker.log_path
self.rfile = rfile
self.close_log_file = getattr(worker, 'close_log_file', None)
def start_job(self, job):
notification = PARALLEL_FUNCS[job.name][-1] is not None
eintr_retry_call(self.conn.send, (job.name, job.args, job.kwargs, job.description))
if notification:
self.start()
else:
self.conn.close()
self.job = job
def run(self):
while True:
try:
x = eintr_retry_call(self.conn.recv)
self.notifications.put(x)
except BaseException:
break
try:
self.conn.close()
except BaseException:
pass
def kill(self):
self.killed = True
try:
self.worker.kill()
except BaseException:
pass
@property
def is_alive(self):
return not self.killed and self.worker.is_alive
@property
def returncode(self):
if self._returncode != 'dummy':
return self._returncode
r = self.worker.returncode
if self.killed and r is None:
self._returncode = 1
return 1
if r is not None:
self._returncode = r
return r
示例4: compress_images
# 需要导入模块: from polyglot.queue import Queue [as 别名]
# 或者: from polyglot.queue.Queue import put [as 别名]
def compress_images(container, report=None, names=None, jpeg_quality=None, progress_callback=lambda n, t, name:True):
images = get_compressible_images(container)
if names is not None:
images &= set(names)
results = {}
queue = Queue()
abort = Event()
for name in images:
queue.put(name)
def pc(name):
keep_going = progress_callback(len(results), len(images), name)
if not keep_going:
abort.set()
progress_callback(0, len(images), '')
[Worker(abort, 'CompressImage%d' % i, queue, results, container, jpeg_quality, pc) for i in range(min(detect_ncpus(), len(images)))]
queue.join()
before_total = after_total = 0
changed = False
for name, (ok, res) in iteritems(results):
name = force_unicode(name, filesystem_encoding)
if ok:
before, after = res
if before != after:
changed = True
before_total += before
after_total += after
if report:
if before != after:
report(_('{0} compressed from {1} to {2} bytes [{3:.1%} reduction]').format(
name, human_readable(before), human_readable(after), (before - after)/before))
else:
report(_('{0} could not be further compressed').format(name))
else:
report(_('Failed to process {0} with error:').format(name))
report(res)
if report:
if changed:
report('')
report(_('Total image filesize reduced from {0} to {1} [{2:.1%} reduction]').format(
human_readable(before_total), human_readable(after_total), (before_total - after_total)/before_total))
else:
report(_('Images are already fully optimized'))
return changed, results
示例5: Progress
# 需要导入模块: from polyglot.queue import Queue [as 别名]
# 或者: from polyglot.queue.Queue import put [as 别名]
class Progress(Thread):
def __init__(self, conn):
Thread.__init__(self)
self.daemon = True
self.conn = conn
self.queue = Queue()
def __call__(self, percent, msg=''):
self.queue.put((percent, msg))
def run(self):
while True:
x = self.queue.get()
if x is None:
break
try:
eintr_retry_call(self.conn.send, x)
except:
break
示例6: CoverWorker
# 需要导入模块: from polyglot.queue import Queue [as 别名]
# 或者: from polyglot.queue.Queue import put [as 别名]
class CoverWorker(Thread): # {{{
def __init__(self, log, abort, title, authors, identifiers, caches):
Thread.__init__(self)
self.daemon = True
self.log, self.abort = log, abort
self.title, self.authors, self.identifiers = (title, authors,
identifiers)
self.caches = caches
self.rq = Queue()
self.error = None
def fake_run(self):
images = ['donate.png', 'config.png', 'column.png', 'eject.png', ]
time.sleep(2)
for pl, im in zip(metadata_plugins(['cover']), images):
self.rq.put((pl.name, 1, 1, 'png', I(im, data=True)))
def run(self):
try:
if DEBUG_DIALOG:
self.fake_run()
else:
self.run_fork()
except WorkerError as e:
self.error = force_unicode(e.orig_tb)
except:
import traceback
self.error = force_unicode(traceback.format_exc())
def run_fork(self):
with TemporaryDirectory('_single_metadata_download') as tdir:
self.keep_going = True
t = Thread(target=self.monitor_tdir, args=(tdir,))
t.daemon = True
t.start()
try:
res = fork_job('calibre.ebooks.metadata.sources.worker',
'single_covers',
(self.title, self.authors, self.identifiers, self.caches,
tdir),
no_output=True, abort=self.abort)
self.log.append_dump(res['result'])
finally:
self.keep_going = False
t.join()
def scan_once(self, tdir, seen):
for x in list(os.listdir(tdir)):
if x in seen:
continue
if x.endswith('.cover') and os.path.exists(os.path.join(tdir,
x+'.done')):
name = x.rpartition('.')[0]
try:
plugin_name, width, height, fmt = name.split(',,')
width, height = int(width), int(height)
with open(os.path.join(tdir, x), 'rb') as f:
data = f.read()
except:
import traceback
traceback.print_exc()
else:
seen.add(x)
self.rq.put((plugin_name, width, height, fmt, data))
def monitor_tdir(self, tdir):
seen = set()
while self.keep_going:
time.sleep(1)
self.scan_once(tdir, seen)
# One last scan after the download process has ended
self.scan_once(tdir, seen)
示例7: Pool
# 需要导入模块: from polyglot.queue import Queue [as 别名]
# 或者: from polyglot.queue.Queue import put [as 别名]
class Pool(Thread):
daemon = True
def __init__(self, max_workers=None, name=None):
Thread.__init__(self, name=name)
self.max_workers = max_workers or detect_ncpus()
self.available_workers = []
self.busy_workers = {}
self.pending_jobs = []
self.events = Queue()
self.results = Queue()
self.tracker = Queue()
self.terminal_failure = None
self.common_data = pickle_dumps(None)
self.worker_data = None
self.shutting_down = False
self.start()
def set_common_data(self, data=None):
''' Set some data that will be passed to all subsequent jobs without
needing to be transmitted every time. You must call this method before
queueing any jobs, otherwise the behavior is undefined. You can call it
after all jobs are done, then it will be used for the new round of
jobs. Can raise the :class:`Failure` exception is data could not be
sent to workers.'''
if self.failed:
raise Failure(self.terminal_failure)
self.events.put(data)
def __call__(self, job_id, module, func, *args, **kwargs):
'''
Schedule a job. The job will be run in a worker process, with the
result placed in self.results. If a terminal failure has occurred
previously, this method will raise the :class:`Failure` exception.
:param job_id: A unique id for the job. The result will have this id.
:param module: Either a fully qualified python module name or python
source code which will be executed as a module.
Source code is detected by the presence of newlines in module.
:param func: Name of the function from ``module`` that will be
executed. ``args`` and ``kwargs`` will be passed to the function.
'''
if self.failed:
raise Failure(self.terminal_failure)
job = Job(job_id, module, func, args, kwargs)
self.tracker.put(None)
self.events.put(job)
def wait_for_tasks(self, timeout=None):
''' Wait for all queued jobs to be completed, if timeout is not None,
will raise a RuntimeError if jobs are not completed in the specified
time. Will raise a :class:`Failure` exception if a terminal failure has
occurred previously. '''
if self.failed:
raise Failure(self.terminal_failure)
if timeout is None:
self.tracker.join()
else:
join_with_timeout(self.tracker, timeout)
def shutdown(self, wait_time=0.1):
''' Shutdown this pool, terminating all worker process. The pool cannot
be used after a shutdown. '''
self.shutting_down = True
self.events.put(None)
self.shutdown_workers(wait_time=wait_time)
def create_worker(self):
p = start_worker('from {0} import run_main, {1}; run_main({1})'.format(self.__class__.__module__, 'worker_main'))
sys.stdout.flush()
eintr_retry_call(p.stdin.write, self.worker_data)
p.stdin.flush(), p.stdin.close()
conn = eintr_retry_call(self.listener.accept)
w = Worker(p, conn, self.events, self.name)
if self.common_data != pickle_dumps(None):
w.set_common_data(self.common_data)
return w
def start_worker(self):
try:
w = self.create_worker()
if not self.shutting_down:
self.available_workers.append(w)
except Exception:
import traceback
self.terminal_failure = TerminalFailure('Failed to start worker process', traceback.format_exc(), None)
self.terminal_error()
return False
def run(self):
from calibre.utils.ipc.server import create_listener
self.auth_key = os.urandom(32)
self.address, self.listener = create_listener(self.auth_key)
self.worker_data = msgpack_dumps((self.address, self.auth_key))
if self.start_worker() is False:
return
while True:
#.........这里部分代码省略.........
示例8: Repl
# 需要导入模块: from polyglot.queue import Queue [as 别名]
# 或者: from polyglot.queue.Queue import put [as 别名]
class Repl(Thread):
LINE_CONTINUATION_CHARS = r'\:'
daemon = True
def __init__(self, ps1='>>> ', ps2='... ', show_js=False, libdir=None):
Thread.__init__(self, name='RapydScriptREPL')
self.to_python = to_python
self.JSError = JSError
self.enc = getattr(sys.stdin, 'encoding', None) or 'utf-8'
try:
import readline
self.readline = readline
except ImportError:
pass
self.output = ANSIStream(sys.stdout)
self.to_repl = Queue()
self.from_repl = Queue()
self.ps1, self.ps2 = ps1, ps2
self.show_js, self.libdir = show_js, libdir
self.prompt = ''
self.completions = None
self.start()
def init_ctx(self):
self.prompt = self.ps1
self.ctx = compiler()
self.ctx.g.Duktape.write = self.output.write
self.ctx.eval(r'''console = { log: function() { Duktape.write(Array.prototype.slice.call(arguments).join(' ') + '\n');}};
console['error'] = console['log'];''')
self.ctx.g.repl_options = {
'show_js': self.show_js,
'histfile':False,
'input':True, 'output':True, 'ps1':self.ps1, 'ps2':self.ps2,
'terminal':self.output.isatty,
'enum_global': 'Object.keys(this)',
'lib_path': self.libdir or os.path.dirname(P(COMPILER_PATH)) # TODO: Change this to load pyj files from the src code
}
def get_from_repl(self):
while True:
try:
return self.from_repl.get(True, 1)
except Empty:
if not self.is_alive():
raise SystemExit(1)
def run(self):
self.init_ctx()
rl = None
def set_prompt(p):
self.prompt = p
def prompt(lw):
self.from_repl.put(to_python(lw))
self.ctx.g.set_prompt = set_prompt
self.ctx.g.prompt = prompt
self.ctx.eval('''
listeners = {};
rl = {
setPrompt:set_prompt,
write:Duktape.write,
clearLine: function() {},
on: function(ev, cb) { listeners[ev] = cb; return rl; },
prompt: prompt,
sync_prompt: true,
send_line: function(line) { listeners['line'](line); },
send_interrupt: function() { listeners['SIGINT'](); },
close: function() {listeners['close'](); },
};
repl_options.readline = { createInterface: function(options) { rl.completer = options.completer; return rl; }};
exports.init_repl(repl_options)
''', fname='<init repl>')
rl = self.ctx.g.rl
completer = to_python(rl.completer)
send_interrupt = to_python(rl.send_interrupt)
send_line = to_python(rl.send_line)
while True:
ev, line = self.to_repl.get()
try:
if ev == 'SIGINT':
self.output.write('\n')
send_interrupt()
elif ev == 'line':
send_line(line)
else:
val = completer(line)
val = to_python(val)
self.from_repl.put(val[0])
except Exception as e:
if isinstance(e, JSError):
print(e.stack or error_message(e), file=sys.stderr)
else:
import traceback
traceback.print_exc()
#.........这里部分代码省略.........
示例9: ParseWorker
# 需要导入模块: from polyglot.queue import Queue [as 别名]
# 或者: from polyglot.queue.Queue import put [as 别名]
class ParseWorker(Thread):
daemon = True
SLEEP_TIME = 1
def __init__(self):
Thread.__init__(self)
self.requests = Queue()
self.request_count = 0
self.parse_items = {}
self.launch_error = None
def run(self):
mod, func = 'calibre.gui2.tweak_book.preview', 'parse_html'
try:
# Connect to the worker and send a dummy job to initialize it
self.worker = offload_worker(priority='low')
self.worker(mod, func, '<p></p>')
except:
import traceback
traceback.print_exc()
self.launch_error = traceback.format_exc()
return
while True:
time.sleep(self.SLEEP_TIME)
x = self.requests.get()
requests = [x]
while True:
try:
requests.append(self.requests.get_nowait())
except Empty:
break
if shutdown in requests:
self.worker.shutdown()
break
request = sorted(requests, reverse=True)[0]
del requests
pi, data = request[1:]
try:
res = self.worker(mod, func, data)
except:
import traceback
traceback.print_exc()
else:
pi.parsing_done = True
parsed_data = res['result']
if res['tb']:
prints("Parser error:")
prints(res['tb'])
else:
pi.parsed_data = parsed_data
def add_request(self, name):
data = get_data(name)
ldata, hdata = len(data), hash(data)
pi = self.parse_items.get(name, None)
if pi is None:
self.parse_items[name] = pi = ParseItem(name)
else:
if pi.parsing_done and pi.length == ldata and pi.fingerprint == hdata:
return
pi.parsed_data = None
pi.parsing_done = False
pi.length, pi.fingerprint = ldata, hdata
self.requests.put((self.request_count, pi, data))
self.request_count += 1
def shutdown(self):
self.requests.put(shutdown)
def get_data(self, name):
return getattr(self.parse_items.get(name, None), 'parsed_data', None)
def clear(self):
self.parse_items.clear()
def is_alive(self):
return Thread.is_alive(self) or (hasattr(self, 'worker') and self.worker.is_alive())
示例10: WebSocketConnection
# 需要导入模块: from polyglot.queue import Queue [as 别名]
# 或者: from polyglot.queue.Queue import put [as 别名]
#.........这里部分代码省略.........
f = ReadOnlyFileBuffer(create_frame(1, rcode, data))
f.is_close_frame = opcode == CLOSE
with self.cf_lock:
self.control_frames.append(f)
elif opcode == PONG:
try:
self.websocket_handler.handle_websocket_pong(self.websocket_connection_id, data)
except Exception:
self.log.exception('Error in PONG handler:')
self.set_ws_state()
def websocket_close(self, code=NORMAL_CLOSE, reason=b''):
if isinstance(reason, type('')):
reason = reason.encode('utf-8')
self.stop_reading = True
reason = reason[:123]
if code is None and not reason:
f = ReadOnlyFileBuffer(create_frame(1, CLOSE, b''))
else:
f = ReadOnlyFileBuffer(create_frame(1, CLOSE, pack(b'!H', code) + reason))
f.is_close_frame = True
with self.cf_lock:
self.control_frames.append(f)
self.set_ws_state()
def ws_write(self):
if self.ws_close_sent:
return
if self.send_buf is not None:
if self.write(self.send_buf):
self.end_send_optimization()
if getattr(self.send_buf, 'is_close_frame', False):
self.ws_close_sent = True
self.send_buf = None
else:
with self.cf_lock:
try:
self.send_buf = self.control_frames.popleft()
except IndexError:
if self.sending is not None:
self.send_buf = self.sending.create_frame()
if self.send_buf is None:
self.sending = None
if self.send_buf is not None:
self.optimize_for_sending_packet()
def close(self):
if self.in_websocket_mode:
try:
self.websocket_handler.handle_websocket_close(self.websocket_connection_id)
except Exception:
self.log.exception('Error in WebSocket close handler')
# Try to write a close frame, just once
try:
if self.send_buf is None and not self.ws_close_sent:
self.websocket_close(SHUTTING_DOWN, 'Shutting down')
with self.cf_lock:
self.write(self.control_frames.pop())
except Exception:
pass
Connection.close(self)
else:
HTTPConnection.close(self)
# }}}
def send_websocket_message(self, buf, wakeup=True):
''' Send a complete message. This class will take care of splitting it
into appropriate frames automatically. `buf` must be a file like object. '''
self.sendq.put(MessageWriter(buf))
self.wait_for = RDWR
if wakeup:
self.wakeup()
def send_websocket_frame(self, data, is_first=True, is_last=True):
''' Useful for streaming handlers that want to break up messages into
frames themselves. Note that these frames will be interleaved with
control frames, so they should not be too large. '''
opcode = (TEXT if isinstance(data, type('')) else BINARY) if is_first else CONTINUATION
fin = 1 if is_last else 0
frame = create_frame(fin, opcode, data)
with self.cf_lock:
self.control_frames.append(ReadOnlyFileBuffer(frame))
def send_websocket_ping(self, data=b''):
''' Send a PING to the remote client, it should reply with a PONG which
will be sent to the handle_websocket_pong callback in your handler. '''
if isinstance(data, type('')):
data = data.encode('utf-8')
frame = create_frame(True, PING, data)
with self.cf_lock:
self.control_frames.append(ReadOnlyFileBuffer(frame))
def handle_websocket_data(self, data, message_starting, message_finished):
''' Called when some data is received from the remote client. In
general the data may not constitute a complete "message", use the
message_starting and message_finished flags to re-assemble it into a
complete message in the handler. Note that for binary data, data is a
mutable object. If you intend to keep it around after this method
returns, create a bytestring from it, using tobytes(). '''
self.websocket_handler.handle_websocket_data(self.websocket_connection_id, data, message_starting, message_finished)
示例11: Server
# 需要导入模块: from polyglot.queue import Queue [as 别名]
# 或者: from polyglot.queue.Queue import put [as 别名]
class Server(Thread):
def __init__(self, notify_on_job_done=lambda x: x, pool_size=None,
limit=sys.maxsize, enforce_cpu_limit=True):
Thread.__init__(self)
self.daemon = True
global _counter
self.id = _counter+1
_counter += 1
if enforce_cpu_limit:
limit = min(limit, cpu_count())
self.pool_size = limit if pool_size is None else pool_size
self.notify_on_job_done = notify_on_job_done
self.auth_key = os.urandom(32)
self.address, self.listener = create_listener(self.auth_key, backlog=4)
self.add_jobs_queue, self.changed_jobs_queue = Queue(), Queue()
self.kill_queue = Queue()
self.waiting_jobs = []
self.workers = deque()
self.launched_worker_count = 0
self._worker_launch_lock = RLock()
self.start()
def launch_worker(self, gui=False, redirect_output=None, job_name=None):
start = time.time()
with self._worker_launch_lock:
self.launched_worker_count += 1
id = self.launched_worker_count
fd, rfile = tempfile.mkstemp(prefix=u'ipc_result_%d_%d_'%(self.id, id),
dir=base_dir(), suffix=u'.pickle')
os.close(fd)
if redirect_output is None:
redirect_output = not gui
env = {
'CALIBRE_WORKER_ADDRESS' : environ_item(as_hex_unicode(msgpack_dumps(self.address))),
'CALIBRE_WORKER_KEY' : environ_item(as_hex_unicode(self.auth_key)),
'CALIBRE_WORKER_RESULT' : environ_item(as_hex_unicode(rfile)),
}
cw = self.do_launch(env, gui, redirect_output, rfile, job_name=job_name)
if isinstance(cw, string_or_bytes):
raise CriticalError('Failed to launch worker process:\n'+cw)
if DEBUG:
print('Worker Launch took:', time.time() - start)
return cw
def do_launch(self, env, gui, redirect_output, rfile, job_name=None):
w = Worker(env, gui=gui, job_name=job_name)
try:
w(redirect_output=redirect_output)
conn = eintr_retry_call(self.listener.accept)
if conn is None:
raise Exception('Failed to launch worker process')
except BaseException:
try:
w.kill()
except:
pass
import traceback
return traceback.format_exc()
return ConnectedWorker(w, conn, rfile)
def add_job(self, job):
job.done2 = self.notify_on_job_done
self.add_jobs_queue.put(job)
def run_job(self, job, gui=True, redirect_output=False):
w = self.launch_worker(gui=gui, redirect_output=redirect_output, job_name=getattr(job, 'name', None))
w.start_job(job)
def run(self):
while True:
try:
job = self.add_jobs_queue.get(True, 0.2)
if job is None:
break
self.waiting_jobs.insert(0, job)
except Empty:
pass
# Get notifications from worker process
for worker in self.workers:
while True:
try:
n = worker.notifications.get_nowait()
worker.job.notifications.put(n)
self.changed_jobs_queue.put(worker.job)
except Empty:
break
# Remove finished jobs
for worker in [w for w in self.workers if not w.is_alive]:
try:
worker.close_log_file()
except:
pass
self.workers.remove(worker)
#.........这里部分代码省略.........
示例12: Main
# 需要导入模块: from polyglot.queue import Queue [as 别名]
# 或者: from polyglot.queue.Queue import put [as 别名]
#.........这里部分代码省略.........
if message:
if not self.device_manager.is_running('Wireless Devices'):
error_dialog(self, _('Problem starting the wireless device'),
_('The wireless device driver had problems starting. '
'It said "%s"')%message, show=True)
self.iactions['Connect Share'].set_smartdevice_action_state()
def start_content_server(self, check_started=True):
from calibre.srv.embedded import Server
if not gprefs.get('server3_warning_done', False):
gprefs.set('server3_warning_done', True)
if os.path.exists(os.path.join(config_dir, 'server.py')):
try:
os.remove(os.path.join(config_dir, 'server.py'))
except EnvironmentError:
pass
warning_dialog(self, _('Content server changed!'), _(
'calibre 3 comes with a completely re-written content server.'
' As such any custom configuration you have for the content'
' server no longer applies. You should check and refresh your'
' settings in Preferences->Sharing->Sharing over the net'), show=True)
self.content_server = Server(self.library_broker, Dispatcher(self.handle_changes_from_server))
self.content_server.state_callback = Dispatcher(
self.iactions['Connect Share'].content_server_state_changed)
if check_started:
self.content_server.start_failure_callback = \
Dispatcher(self.content_server_start_failed)
self.content_server.start()
def handle_changes_from_server(self, library_path, change_event):
if DEBUG:
prints('Received server change event: {} for {}'.format(change_event, library_path))
if self.library_broker.is_gui_library(library_path):
self.server_changes.put((library_path, change_event))
self.server_change_notification_timer.start()
def handle_changes_from_server_debounced(self):
if self.shutting_down:
return
changes = []
while True:
try:
library_path, change_event = self.server_changes.get_nowait()
except Empty:
break
if self.library_broker.is_gui_library(library_path):
changes.append(change_event)
if changes:
handle_changes(changes, self)
def content_server_start_failed(self, msg):
self.content_server = None
error_dialog(self, _('Failed to start Content server'),
_('Could not start the Content server. Error:\n\n%s')%msg,
show=True)
def resizeEvent(self, ev):
MainWindow.resizeEvent(self, ev)
self.search.setMaximumWidth(self.width()-150)
def create_spare_pool(self, *args):
if self._spare_pool is None:
num = min(detect_ncpus(), int(config['worker_limit']/2.0))
self._spare_pool = Pool(max_workers=num, name='GUIPool')
def spare_pool(self):
示例13: JobsManager
# 需要导入模块: from polyglot.queue import Queue [as 别名]
# 或者: from polyglot.queue.Queue import put [as 别名]
class JobsManager(object):
def __init__(self, opts, log):
mj = opts.max_jobs
if mj < 1:
mj = detect_ncpus()
self.log = log
self.max_jobs = max(1, mj)
self.max_job_time = max(0, opts.max_job_time * 60)
self.lock = RLock()
self.jobs = {}
self.finished_jobs = {}
self.events = Queue()
self.job_id = count()
self.waiting_job_ids = set()
self.waiting_jobs = deque()
self.max_block = None
self.shutting_down = False
self.event_loop = None
def start_job(self, name, module, func, args=(), kwargs=None, job_done_callback=None, job_data=None):
with self.lock:
if self.shutting_down:
return None
if self.event_loop is None:
self.event_loop = t = Thread(name='JobsEventLoop', target=self.run)
t.daemon = True
t.start()
job_id = next(self.job_id)
self.events.put(StartEvent(job_id, name, module, func, args, kwargs or {}, job_done_callback, job_data))
self.waiting_job_ids.add(job_id)
return job_id
def job_status(self, job_id):
with self.lock:
if not self.shutting_down:
if job_id in self.finished_jobs:
job = self.finished_jobs[job_id]
return 'finished', job.result, job.traceback, job.was_aborted
if job_id in self.jobs:
return 'running', None, None, None
if job_id in self.waiting_job_ids:
return 'waiting', None, None, None
return None, None, None, None
def abort_job(self, job_id):
job = self.jobs.get(job_id)
if job is not None:
job.abort_event.set()
def wait_for_running_job(self, job_id, timeout=None):
job = self.jobs.get(job_id)
if job is not None:
job.wait_for_end.wait(timeout)
if not job.done:
return False
while job_id not in self.finished_jobs:
time.sleep(0.001)
return True
def shutdown(self, timeout=5.0):
with self.lock:
self.shutting_down = True
for job in itervalues(self.jobs):
job.abort_event.set()
self.events.put(False)
def wait_for_shutdown(self, wait_till):
for job in itervalues(self.jobs):
delta = wait_till - monotonic()
if delta > 0:
job.join(delta)
if self.event_loop is not None:
delta = wait_till - monotonic()
if delta > 0:
self.event_loop.join(delta)
# Internal API {{{
def run(self):
while not self.shutting_down:
if self.max_block is None:
ev = self.events.get()
else:
try:
ev = self.events.get(block=True, timeout=self.max_block)
except Empty:
ev = None
if self.shutting_down:
break
if ev is None:
self.abort_hanging_jobs()
elif isinstance(ev, StartEvent):
self.waiting_jobs.append(ev)
self.start_waiting_jobs()
elif isinstance(ev, DoneEvent):
self.job_finished(ev.job_id)
elif ev is False:
break
#.........这里部分代码省略.........
示例14: CompletionWorker
# 需要导入模块: from polyglot.queue import Queue [as 别名]
# 或者: from polyglot.queue.Queue import put [as 别名]
#.........这里部分代码省略.........
eintr_retry_call(conn.send, data)
except:
if not self.shutting_down:
raise
def recv(self, conn=None):
conn = conn or self.control_conn
try:
return eintr_retry_call(conn.recv)
except:
if not self.shutting_down:
raise
def wait_for_connection(self, timeout=None):
self.connected.wait(timeout)
def handle_data_requests(self):
from calibre.gui2.tweak_book.completion.basic import handle_data_request
while True:
try:
req = self.recv(self.data_conn)
except EOFError:
break
except Exception:
import traceback
traceback.print_exc()
break
if req is None or self.shutting_down:
break
result, tb = handle_data_request(req)
try:
self.send((result, tb), self.data_conn)
except EOFError:
break
except Exception:
import traceback
traceback.print_exc()
break
def run(self):
self.launch_worker_process()
while True:
obj = self.main_queue.get()
if obj is None:
break
req_type, req_data = obj
try:
if req_type is COMPLETION_REQUEST:
with self.lock:
if self.current_completion_request is not None:
ccr, self.current_completion_request = self.current_completion_request, None
self.send_completion_request(ccr)
elif req_type is CLEAR_REQUEST:
self.send(req_data)
except EOFError:
break
except Exception:
import traceback
traceback.print_exc()
def send_completion_request(self, request):
self.send(request)
result = self.recv()
if result.request_id == self.latest_completion_request_id:
try:
self.result_callback(result)
except Exception:
import traceback
traceback.print_exc()
def clear_caches(self, cache_type=None):
self.main_queue.put((CLEAR_REQUEST, Request(None, 'clear_caches', cache_type, None)))
def queue_completion(self, request_id, completion_type, completion_data, query=None):
with self.lock:
self.current_completion_request = Request(request_id, completion_type, completion_data, query)
self.latest_completion_request_id = self.current_completion_request.id
self.main_queue.put((COMPLETION_REQUEST, None))
def shutdown(self):
self.shutting_down = True
self.main_queue.put(None)
for conn in (getattr(self, 'control_conn', None), getattr(self, 'data_conn', None)):
try:
conn.close()
except Exception:
pass
p = self.worker_process
if p.poll() is None:
self.worker_process.terminate()
t = self.reap_thread = Thread(target=p.wait)
t.daemon = True
t.start()
def join(self, timeout=0.2):
if self.reap_thread is not None:
self.reap_thread.join(timeout)
if not iswindows and self.worker_process.returncode is None:
self.worker_process.kill()
return self.worker_process.returncode
示例15: DeleteService
# 需要导入模块: from polyglot.queue import Queue [as 别名]
# 或者: from polyglot.queue.Queue import put [as 别名]
class DeleteService(Thread):
''' Provide a blocking file delete implementation with support for the
recycle bin. On windows, deleting files to the recycle bin spins the event
loop, which can cause locking errors in the main thread. We get around this
by only moving the files/folders to be deleted out of the library in the
main thread, they are deleted to recycle bin in a separate worker thread.
This has the added advantage that doing a restore from the recycle bin wont
cause metadata.db and the file system to get out of sync. Also, deleting
becomes much faster, since in the common case, the move is done by a simple
os.rename(). The downside is that if the user quits calibre while a long
move to recycle bin is happening, the files may not all be deleted.'''
daemon = True
def __init__(self):
Thread.__init__(self)
self.requests = Queue()
def shutdown(self, timeout=20):
self.requests.put(None)
self.join(timeout)
def create_staging(self, library_path):
base_path = os.path.dirname(library_path)
base = os.path.basename(library_path)
try:
ans = tempfile.mkdtemp(prefix=base+' deleted ', dir=base_path)
except OSError:
ans = tempfile.mkdtemp(prefix=base+' deleted ')
atexit.register(remove_dir, ans)
return ans
def remove_dir_if_empty(self, path):
try:
os.rmdir(path)
except OSError as e:
if e.errno == errno.ENOTEMPTY or len(os.listdir(path)) > 0:
# Some linux systems appear to raise an EPERM instead of an
# ENOTEMPTY, see https://bugs.launchpad.net/bugs/1240797
return
raise
def delete_books(self, paths, library_path):
tdir = self.create_staging(library_path)
self.queue_paths(tdir, paths, delete_empty_parent=True)
def queue_paths(self, tdir, paths, delete_empty_parent=True):
try:
self._queue_paths(tdir, paths, delete_empty_parent=delete_empty_parent)
except:
if os.path.exists(tdir):
shutil.rmtree(tdir, ignore_errors=True)
raise
def _queue_paths(self, tdir, paths, delete_empty_parent=True):
requests = []
for path in paths:
if os.path.exists(path):
basename = os.path.basename(path)
c = 0
while True:
dest = os.path.join(tdir, basename)
if not os.path.exists(dest):
break
c += 1
basename = '%d - %s' % (c, os.path.basename(path))
try:
shutil.move(path, dest)
except EnvironmentError:
if os.path.isdir(path):
# shutil.move may have partially copied the directory,
# so the subsequent call to move() will fail as the
# destination directory already exists
raise
# Wait a little in case something has locked a file
time.sleep(1)
shutil.move(path, dest)
if delete_empty_parent:
remove_dir_if_empty(os.path.dirname(path), ignore_metadata_caches=True)
requests.append(dest)
if not requests:
remove_dir_if_empty(tdir)
else:
self.requests.put(tdir)
def delete_files(self, paths, library_path):
tdir = self.create_staging(library_path)
self.queue_paths(tdir, paths, delete_empty_parent=False)
def run(self):
while True:
x = self.requests.get()
try:
if x is None:
break
try:
self.do_delete(x)
except:
#.........这里部分代码省略.........