本文整理汇总了Python中multiprocessing.Manager.acquire方法的典型用法代码示例。如果您正苦于以下问题:Python Manager.acquire方法的具体用法?Python Manager.acquire怎么用?Python Manager.acquire使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.Manager
的用法示例。
在下文中一共展示了Manager.acquire方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: LockingSession
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import acquire [as 别名]
class LockingSession(object):
def __init__(self, dataman, session_filename):
self.dataman = dataman
self.session_filename = session_filename
self.lock = Manager().Lock()
def acquire(self):
self.lock.acquire()
self.session = DataManager.shelf(self.session_filename)
def release(self):
self.session.close()
self.session = None
self.lock.release()
def __getitem__(self, item):
self.acquire()
ret = self.session[item]
self.release()
return ret
def __setitem__(self, item, value):
self.acquire()
self.session[item] = value
self.release()
示例2: repository
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import acquire [as 别名]
class Repository:
""" Class to synchronize all accesses to our task repository (from TaskInfo
and TaskFetch and all the pulls necessary)
You have to use one repository object for all of these!
"""
def __init__(self, path, auto_sync=False):
self.lock = Manager().Lock()
self.path = path
self.auto_sync = auto_sync
def __enter__(self):
self.lock.acquire()
self._sync()
def __exit__(self, type, value, traceback):
self.lock.release()
def _sync(self):
if self.auto_sync:
logger.info("Synchronizing {}".format(self.path))
with chdir(self.path):
gitout = ""
try:
gitout = check_output(["git", "pull"])
except:
logger.error("Couldn't sync with repository: " +
"{}".format(gitout))
else:
logger.info("Finished synchronization: " +
"{}".format(gitout))
示例3: __init__
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import acquire [as 别名]
class Broker:
def __init__(self, max_needed_threads, greed=1, debugmode=False):
"""
:param max_needed_threads: exactly what is says, to avoid wasting resources
:param greed: multiplied with cpu_count(), gives the number of spawned subprocesses
:param debugmode:
"""
self.debugmode = debugmode
self.maxthreads = min(math.ceil(getMaxThreads() * greed), max_needed_threads)
self.threadcontrol = Queue()
self.pool = Pool(processes=self.maxthreads)
self.unid = 0
self.freespots = Manager().Semaphore(self.maxthreads)
def appendNfire(self, func, args):
""" launch (runs in a subprocess) a function func, with arguments specified in the tuple args.
:returns true upon success """
try:
self.freespots.acquire()
assert isinstance(args, tuple)
if self.debugmode:
print("Spawning thread #%d" % self.unid)
r = self.pool.apply_async(worker, [func, args, self.freespots])
self.threadcontrol.put((self.unid, r))
self.unid += 1
return True
except ValueError:
self.freespots.release()
return False
def collect(self):
""" generator of the launched functions' results, yields them in the same order as
the function launching order """
while not self.threadcontrol.empty():
cnt = self.threadcontrol.get()
r = cnt[1]
res = r.get()
if self.debugmode:
print("Collecting thread #%d" % cnt[0])
yield res
def stop(self):
""" closes the subthreads """
self.pool.close()
self.pool.join()
def abort(self):
""" kills the subthreads with fire """
self.freespots.release() # this will release a stuck appendNfire()
self.pool.terminate()
self.pool.join()