本文整理汇总了Python中pathos.abstract_launcher.AbstractWorkerPool类的典型用法代码示例。如果您正苦于以下问题:Python AbstractWorkerPool类的具体用法?Python AbstractWorkerPool怎么用?Python AbstractWorkerPool使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了AbstractWorkerPool类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: amap
def amap(self, f, *args, **kwds):
AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds)
def submit(*argz):
"""send a job to the server"""
_pool = self._serve()
#print("using %s local workers" % _pool.get_ncpus())
try:
return _pool.submit(f, argz, globals=globals())
except pp.DestroyedServerError:
self._is_alive(None)
override = True if 'size' in kwds else False
elem_size = kwds.pop('size', 2)
length = min(len(task) for task in args)
args = zip(*args) #XXX: zip iterator ok? or should be list?
# submit all jobs, to be collected later with 'get()'
tasks = [submit(*task) for task in args]
tasks = [ApplyResult(task) for task in tasks]
# build a correctly sized results object
nodes = self.nodes
if self.nodes in ['*','autodetect',None]:
_pool = self._serve()
nodes = _pool.get_ncpus() #XXX: local workers only?
# try to quickly find a small chunksize that gives good results
maxsize = 2**62 #XXX: HOPEFULLY, this will never be reached...
chunksize = 1
while chunksize < maxsize:
chunksize, extra = divmod(length, nodes * elem_size)
if override: break # the user *wants* to override this loop
if extra >= length: break # we found something that 'works'
elem_size = elem_size * 2
if extra: chunksize += 1
m = MapResult((chunksize,length))
# queue the tasks
m.queue(*tasks)
return m
示例2: amap
def amap(self, f, *args, **kwds):
AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds)
def submit(*argz):
"""send a job to the server"""
#print "using", __STATE['server'].get_ncpus(), 'local workers'
return __STATE['server'].submit(f, argz, globals=globals())
override = True if kwds.has_key('size') else False
elem_size = kwds.pop('size', 2)
args = zip(*args)
# submit all jobs, to be collected later with 'get()'
tasks = [submit(*task) for task in args]
tasks = [ApplyResult(task) for task in tasks]
# build a correctly sized results object
length = len(args)
nodes = self.nodes
if self.nodes in ['*','autodetect',None]:
nodes = __STATE['server'].get_ncpus() #XXX: local workers only?
# try to quickly find a small chunksize that gives good results
maxsize = 2**62 #XXX: HOPEFULLY, this will never be reached...
chunksize = 1
while chunksize < maxsize:
chunksize, extra = divmod(length, nodes * elem_size)
if override: break # the user *wants* to override this loop
if extra >= length: break # we found something that 'works'
elem_size = elem_size * 2
if extra: chunksize += 1
m = MapResult((chunksize,length))
# queue the tasks
m.queue(*tasks)
return m
示例3: __init__
def __init__(self, *args, **kwds):
"""\nNOTE: if number of nodes is not given, will default to 1.
If source is not given, will attempt to minimially use TemporaryFiles.
If workdir is not given, will default to scheduler's workdir or $WORKDIR.
If scheduler is not given, will default to only run on the current node.
If timeout is not given, will default to scheduler's timelimit or INF.
For more details, see the docstrings for the "map" method, or the man page
for the associated launcher (e.g mpirun, mpiexec).
"""
AbstractWorkerPool.__init__(self, *args, **kwds)
self.scheduler = kwds.get('scheduler', None)
self.scatter = True #bool(kwds.get('scatter', True))
self.source = bool(kwds.get('source', False))
self.workdir = kwds.get('workdir', None)
self.timeout = kwds.get('timeout', None)
if self.timeout == None:
if self.scheduler:
from pyina.tools import isoseconds
self.timeout = isoseconds(self.scheduler.timelimit)
else:
from numpy import inf
self.timeout = inf #XXX: better than defaults.timelimit ?
elif isinstance(self.timeout, str):
from pyina.tools import isoseconds
self.timeout = isoseconds(self.timeout)
if self.workdir == None:
if self.scheduler:
self.workdir = self.scheduler.workdir
else:
self.workdir = os.environ.get('WORKDIR', os.path.curdir)
self.workdir = os.path.abspath(self.workdir)
return
示例4: imap
def imap(self, f, *args, **kwds):
AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds)
def submit(*argz):
"""send a job to the server"""
#print "using", __STATE['server'].get_ncpus(), 'local workers'
return __STATE['server'].submit(f, argz, globals=globals())
# submit all jobs, then collect results as they become available
return (subproc() for subproc in __builtin__.map(submit, *args))
示例5: imap
def imap(self, f, *args, **kwds):
AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds)
def submit(*argz):
"""send a job to the server"""
_pool = self._serve()
#print "using", _pool.get_ncpus(), 'local workers'
try:
return _pool.submit(f, argz, globals=globals())
except pp.DestroyedServerError:
self._is_alive(None)
# submit all jobs, then collect results as they become available
return (subproc() for subproc in __builtin__.map(submit, *args))
示例6: uimap
def uimap(self, f, *args, **kwds):
AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds)
def submit(*argz):
"""send a job to the server"""
_pool = self._serve()
#print "using", _pool.get_ncpus(), 'local workers'
try:
return _pool.submit(f, argz, globals=globals())
except pp.DestroyedServerError:
self._is_alive(None)
def imap_unordered(it):
"""build a unordered map iterator"""
while len(it):
for i,job in enumerate(it):
if job.finished:
yield it.pop(i)()
break
# yield it.pop(0).get() # wait for the first element?
# *subprocess* # alternately, loop in a subprocess
raise StopIteration
# submit all jobs, then collect results as they become available
return imap_unordered(__builtin__.map(submit, *args))
示例7: amap
def amap(self, f, *args, **kwds):
AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds)
def submit(*argz):
"""send a job to the server"""
#print "using", __STATE['server'].get_ncpus(), 'local workers'
return __STATE['server'].submit(f, argz, globals=globals())
elem_size = kwds.pop('size', 8) #FIXME: should be size of output type
args = zip(*args)
# submit all jobs, to be collected later with 'get()'
tasks = [submit(*task) for task in args]
tasks = [ApplyResult(task) for task in tasks]
# build a correctly sized results object
length = len(args)
nodes = self.nodes
if self.nodes in ['*','autodetect',None]:
nodes = __STATE['server'].get_ncpus() #XXX: local workers only?
chunksize, extra = divmod(length, nodes * elem_size)
if extra: chunksize += 1
m = MapResult((chunksize,length))
# queue the tasks
m.queue(*tasks)
return m
示例8: map
def map(self, f, *args, **kwds):
AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds)
return list(self.imap(f, *args))
示例9: uimap
def uimap(self, f, *args, **kwds):
AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds)
_pool = self._serve()
return _pool.imap_unordered(star(f), zip(*args)) # chunksize
示例10: map
def map(self, f, *args, **kwds):
AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds)
_pool = self._serve()
return _pool.map(star(f), zip(*args)) # chunksize
示例11: amap
def amap(self, f, *args, **kwds): # register a callback ?
AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds)
return __STATE['threads'].map_async(star(f), zip(*args)) # chunksize
示例12: uimap
def uimap(self, f, *args, **kwds):
AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds)
return __STATE['threads'].imap_unordered(star(f), zip(*args)) # chunksize
示例13: map
def map(self, f, *args, **kwds):
AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds)
return __STATE['threads'].map(star(f), zip(*args)) # chunksize
示例14: imap
def imap(self, f, *args, **kwds):
AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds)
return __STATE['pool'].imap(star(f), zip(*args)) # chunksize