本文整理汇总了Python中multiprocessing.managers.SyncManager.get_queue方法的典型用法代码示例。如果您正苦于以下问题:Python SyncManager.get_queue方法的具体用法?Python SyncManager.get_queue怎么用?Python SyncManager.get_queue使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.managers.SyncManager
的用法示例。
在下文中一共展示了SyncManager.get_queue方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Node
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import get_queue [as 别名]
class Node(Daemon):
"""
Node is started up on the remote instance via the bootstrapping process for that instance.
The node is responsible for tracking active streams and managing the workers that process
the jobs from thosee streams. If a stream goes idle (ie, there are no more jobs in the streams
queue and all workers have died) then node will stop tracking the stream. If jobs re-appear
on the stream Node will spawn new workers to process those jobs. If a new stream appears
Node will spawn new workers to processs the jobs on that stream. Each worker is an independent
concurrent process that inherits the stream to process from the Node.
"""
def __init__(self, queue, qauthkey, mpps= 5, dfs= None, dauthkey= None, logdir= curdir, piddir= curdir, **properties):
"""Initialize the Node's I/O stream and connect to the Queue and/or DFS."""
self.id= getipaddress()
self.queue= queue
self.qauthkey= qauthkey
self.mpps= mpps
self.dfs= dfs
self.dauthkey= dauthkey
self.properties= properties
self.shutdown= Value('i', 0)
self.workers= {}
self.alive= True
self.start_time= datetime.utcnow()
self.connect()
super(Node, self).__init__(
pidfile= path.join(piddir, self.__class__.__name__ + ".pid"),
stdout= path.join(logdir, self.__class__.__name__ + ".out"),
stderr= path.join(logdir, self.__class__.__name__ + ".err"),
stdin= path.join(logdir, self.__class__.__name__ + ".in")
)
def connect(self):
"""Connects to the Queue and/or DFS on the host/port for whic hthe Node was intialized for."""
self.qconnect()
if None not in self.dfs:
self.dconnect()
def qconnect(self):
"""
Attempts to connect to the Queue on the host/port for which the Node was initialized for.
If no connection can be made, Node will keep attempting to connect until a connection
can be established. One connection is established the remove methods requested will be
registered.
"""
# remove connection from cache:
# BaseProxy class has thread local storage which caches the connection
# which is reused for future connections causing "borken pipe" errors on
# creating new manager.
if self.queue in BaseProxy._address_to_local:
if hasattr(BaseProxy._address_to_local[self.queue][0], 'connection'):
del BaseProxy._address_to_local[self.queue][0].connection
# register handlers
SyncManager.register("get_streams")
SyncManager.register("get_queue")
SyncManager.register("get_store")
SyncManager.register("get_properties")
print "connecting to queue", self.queue
while self.alive:
try:
self.impq= SyncManager(address= self.queue, authkey= self.qauthkey)
self.impq.connect()
print "connected to queue", self.queue
break
except (EOFError, IOError, SocketError) as e:
print "could not connect ...trying again", str(e)
sleep(1)
def dconnect(self):
"""
Attempts to connect to the DFS on the host/port for which the Node was initialized for.
If no connection can be made, Node will keep attempting to connect until a connection
can be established. Once a connection can be established the remove methods requested
will be registered.
"""
# remove connection from cache:
# BaseProxy class has thread local storage which caches the connection
# which is reused for future connections causing "borken pipe" errors on
# creating new manager.
if self.dfs in BaseProxy._address_to_local:
if hasattr(BaseProxy._address_to_local[self.dfs][0], 'connection'):
del BaseProxy._address_to_local[self.dfs][0].connection
# register handlers
SyncManager.register("get_nodes")
print "connecting to dfs", self.dfs
while self.alive:
try:
#.........这里部分代码省略.........
示例2: Impetus
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import get_queue [as 别名]
class Impetus(object):
"""
Multi-threaded library for interfacing with the Impetus system.
Hides threading considerations from the client. Determines callback
methods through introspection if callbacks are not explicitly stated.
Decorators are provided for the client to indicate methods which run on
the remote nodes and local process methods which consume the results.
Creates a single stream per instance. The client can created additional
streams through the Queue's remote methods via the "impq" handler.
"""
statuses= ("forked", "processed")
def __init__(self, address, authkey, taskdir= "tasks", id= None, **properties):
"""Creates a stream and retrieves the streams priority queue and data-store."""
self.id= id if id else str(uuid1())
self.ipaddress= getipaddress()
self.address= address
self.taskdir= path.join(taskdir, self.id)
self.properties= properties
self.impq= SyncManager(address= self.address, authkey= authkey)
self.impq.register("get_streams")
self.impq.register("create_stream")
self.impq.register("delete_stream")
self.impq.register("get_store")
self.impq.register("get_queue")
self.impq.connect()
self.jobs= []
self.impq.create_stream(id= self.id, ipaddress= self.ipaddress, **properties)
self.store= self.impq.get_store(id= self.id)
self.queue= self.impq.get_queue(id= self.id)
self.alive= True
self._current_thread= None
self._lock= Lock()
self.threads= []
self.errors= {}
self.ready= {}
self._progress= {}
try:
makedirs(self.taskdir)
except:
pass
def __del__(self):
"""Deletes the stream that was created during initialization."""
self.impq.delete_stream(self.id)
@staticmethod
def node(target):
"""
All methods that are to run on remote nodes must be staticmethods
as the context of which the methods was defined can not be serialized.
"""
return target
@staticmethod
def startup(target):
"""
Sets up the startup method for the object to run as a thread.
"""
def _process(self):
target(self)
global _declaration_order
_process.order= _declaration_order
_declaration_order+= 1
return _process
@staticmethod
def shutdown(target):
"""
Sets up the shutdown method to be excuted
after all threads have been terminated. The
ready and errors parameters will contain a dict
of file-handles pointing to the results files
(ie, ../tasks/<task_id>/<method>.ok, .err>
for each @process method.
"""
def _shutdown(self):
target(self, self.ready, self.errors, self._progress)
global _declaration_order
_shutdown.order= _declaration_order
return _shutdown
@staticmethod
def process(target):
"""
#.........这里部分代码省略.........