本文整理汇总了Python中polyglot.queue.Queue.task_done方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.task_done方法的具体用法?Python Queue.task_done怎么用?Python Queue.task_done使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类polyglot.queue.Queue
的用法示例。
在下文中一共展示了Queue.task_done方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: DeleteService
# 需要导入模块: from polyglot.queue import Queue [as 别名]
# 或者: from polyglot.queue.Queue import task_done [as 别名]
#.........这里部分代码省略.........
def shutdown(self, timeout=20):
self.requests.put(None)
self.join(timeout)
def create_staging(self, library_path):
base_path = os.path.dirname(library_path)
base = os.path.basename(library_path)
try:
ans = tempfile.mkdtemp(prefix=base+' deleted ', dir=base_path)
except OSError:
ans = tempfile.mkdtemp(prefix=base+' deleted ')
atexit.register(remove_dir, ans)
return ans
def remove_dir_if_empty(self, path):
try:
os.rmdir(path)
except OSError as e:
if e.errno == errno.ENOTEMPTY or len(os.listdir(path)) > 0:
# Some linux systems appear to raise an EPERM instead of an
# ENOTEMPTY, see https://bugs.launchpad.net/bugs/1240797
return
raise
def delete_books(self, paths, library_path):
tdir = self.create_staging(library_path)
self.queue_paths(tdir, paths, delete_empty_parent=True)
def queue_paths(self, tdir, paths, delete_empty_parent=True):
try:
self._queue_paths(tdir, paths, delete_empty_parent=delete_empty_parent)
except:
if os.path.exists(tdir):
shutil.rmtree(tdir, ignore_errors=True)
raise
def _queue_paths(self, tdir, paths, delete_empty_parent=True):
requests = []
for path in paths:
if os.path.exists(path):
basename = os.path.basename(path)
c = 0
while True:
dest = os.path.join(tdir, basename)
if not os.path.exists(dest):
break
c += 1
basename = '%d - %s' % (c, os.path.basename(path))
try:
shutil.move(path, dest)
except EnvironmentError:
if os.path.isdir(path):
# shutil.move may have partially copied the directory,
# so the subsequent call to move() will fail as the
# destination directory already exists
raise
# Wait a little in case something has locked a file
time.sleep(1)
shutil.move(path, dest)
if delete_empty_parent:
remove_dir_if_empty(os.path.dirname(path), ignore_metadata_caches=True)
requests.append(dest)
if not requests:
remove_dir_if_empty(tdir)
else:
self.requests.put(tdir)
def delete_files(self, paths, library_path):
tdir = self.create_staging(library_path)
self.queue_paths(tdir, paths, delete_empty_parent=False)
def run(self):
while True:
x = self.requests.get()
try:
if x is None:
break
try:
self.do_delete(x)
except:
import traceback
traceback.print_exc()
finally:
self.requests.task_done()
def wait(self):
'Blocks until all pending deletes have completed'
self.requests.join()
def do_delete(self, tdir):
if os.path.exists(tdir):
try:
for x in os.listdir(tdir):
x = os.path.join(tdir, x)
if os.path.isdir(x):
delete_tree(x)
else:
delete_file(x)
finally:
shutil.rmtree(tdir)
示例2: Pool
# 需要导入模块: from polyglot.queue import Queue [as 别名]
# 或者: from polyglot.queue.Queue import task_done [as 别名]
#.........这里部分代码省略.........
self.terminal_failure = TerminalFailure('Failed to start worker process', traceback.format_exc(), None)
self.terminal_error()
return False
def run(self):
from calibre.utils.ipc.server import create_listener
self.auth_key = os.urandom(32)
self.address, self.listener = create_listener(self.auth_key)
self.worker_data = msgpack_dumps((self.address, self.auth_key))
if self.start_worker() is False:
return
while True:
event = self.events.get()
if event is None or self.shutting_down:
break
if self.handle_event(event) is False:
break
def handle_event(self, event):
if isinstance(event, Job):
job = event
if not self.available_workers:
if len(self.busy_workers) >= self.max_workers:
self.pending_jobs.append(job)
return
if self.start_worker() is False:
return False
return self.run_job(job)
elif isinstance(event, WorkerResult):
worker_result = event
self.busy_workers.pop(worker_result.worker, None)
self.available_workers.append(worker_result.worker)
self.tracker.task_done()
if worker_result.is_terminal_failure:
self.terminal_failure = TerminalFailure('Worker process crashed while executing job', worker_result.result.traceback, worker_result.id)
self.terminal_error()
return False
self.results.put(worker_result)
else:
self.common_data = pickle_dumps(event)
if len(self.common_data) > MAX_SIZE:
self.cd_file = PersistentTemporaryFile('pool_common_data')
with self.cd_file as f:
f.write(self.common_data)
self.common_data = pickle_dumps(File(f.name))
for worker in self.available_workers:
try:
worker.set_common_data(self.common_data)
except Exception:
import traceback
self.terminal_failure = TerminalFailure('Worker process crashed while sending common data', traceback.format_exc(), None)
self.terminal_error()
return False
while self.pending_jobs and self.available_workers:
if self.run_job(self.pending_jobs.pop()) is False:
return False
def run_job(self, job):
worker = self.available_workers.pop()
try:
worker(job)
except Exception:
import traceback
self.terminal_failure = TerminalFailure('Worker process crashed while sending job', traceback.format_exc(), job.id)