当前位置: 首页>>代码示例>>Python>>正文


Python SyncManager.connect方法代码示例

本文整理汇总了Python中multiprocessing.managers.SyncManager.connect方法的典型用法代码示例。如果您正苦于以下问题:Python SyncManager.connect方法的具体用法?Python SyncManager.connect怎么用?Python SyncManager.connect使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.managers.SyncManager的用法示例。


在下文中一共展示了SyncManager.connect方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_binary_matrix_from_service

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import connect [as 别名]
def get_binary_matrix_from_service(q):
    global matrix_service
    if matrix_service == None:
        matrices = dict()
        matrix_service = SyncManager(address=("localhost", 50000), authkey="")
        SyncManager.register("get_matrix", lambda q: get_matrix(q, matrices))
        Process(target=lambda: matrix_service.get_server().serve_forever()).start()
    SyncManager.register("get_matrix")
    matrix_service.connect()
    return matrix_service.get_matrix(q)
开发者ID:kurtisz,项目名称:ConicBlockingSets,代码行数:12,代码来源:util.py

示例2: connect

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import connect [as 别名]
   def connect(self):

      # register with Queue
      SyncManager.register('getPipeline')
      SyncManager.register('getStore')
      self.qInstance= self.opts.qInstance
      (self.qHost, self.qPort, self.qKey)= self.opts.queue.split(':')
      queue= SyncManager(address= (self.qHost, int(self.qPort)), authkey= self.qKey)
      queue.connect()
      self.pipeline= queue.getPipeline()
      self.store= queue.getStore()
开发者ID:richardjmarini,项目名称:Impetus-old,代码行数:13,代码来源:dfs.py

示例3: Queue

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import connect [as 别名]
class Queue():
    """Class Queue
    """

    __type = None
    __manager = None
    __address = None
    __authkey = None

    def __init__(self, qtype, address, authkey=''):
        """Class constructor

        Called when object is initialized

        Args:
           qtype (int): queue type, server|client
           address (str): queue address
           authkey (str): authentication key

        Raises:
           error: ValueError

        """

        if type in (queue.QUEUE_TYPE_SERVER, queue.QUEUE_TYPE_CLIENT):
            self.__type = qtype
        else:
            raise ValueError('Invalid Queue type')

        ''' Checking for address format AF_INET '''
        if address.find(':') > 0:
            address = address.split(':')

        self.__address = address
        self.__authkey = authkey

    def create(self):
        """Methods creates queue server

        Args:
           none

        Returns:
           void

        Raises:
           error: ValueError

        """

        if self.__type != queue.QUEUE_TYPE_SERVER:
            raise ValueError(
                'This operation cannot be done on this queue type')

        q = Queue()
        SyncManager.register('get_queue', callable=lambda: q)
        self.__manager = SyncManager(self.__address, self.__authkey)
        self.__manager.start()

    def destroy(self):
        """Methods destroys queue

        Args:
           none

        Returns:
           void

        """

        self.__manager.shutdown()

    def connect(self):
        """Methods connects to queue

        Args:
           none

        Returns:
           void

        Raises:
           error: ValueError

        """

        if self.__type != queue.QUEUE_TYPE_CLIENT:
            raise ValueError(
                'This operation cannot be done on this queue type')

        q = Queue()
        SyncManager.register('get_queue', callable=lambda: q)
        self.__manager = SyncManager(self.__address, self.__authkey)
        self.__manager.connect()
开发者ID:hydratk,项目名称:hydratk,代码行数:96,代码来源:queue.py

示例4: Worker

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import connect [as 别名]
class Worker(Process):
   
   def __init__(self, opts, id, availability):

      super(Worker, self).__init__()
      self.opts= opts
      self.id= id
      self.availability= availability

      self.connect()

      self.alive= True
      self.sleep= self.opts.sleep

   def connect(self):
      (qHost, qPort, qKey)= self.opts.queue.split(':')
      self.queue= SyncManager(address= (qHost, int(qPort)), authkey= qKey)
      self.queue.connect()
      self.pipeline= self.queue.getPipeline()
      self.store= self.queue.getStore()

      # register with DFS
      self.dfs = None
      self.instances= dict()
      if self.opts.dfs != None:
         SyncManager.register('getInstances')
         (dHost, dPort, dKey)= self.opts.dfs.split(':')
         self.dfs= SyncManager(address= (dHost, int(dPort)), authkey= dKey)
         self.dfs.connect()
         self.instances= self.dfs.getInstances()


   def handleTransport(self, processId, transport, results):
      '''
      Handles requested transport types
      '''

      if transport == 's3' and self.opts.s3 != None:

         try:
            (accessKey, secretKey, bucket)= self.opts.s3.split(':')

            s3file= S3File(
               accessKey= accessKey, 
               secretKey= secretKey,
               bucket= bucket,
               processId= processId,
               mode= 'w'
            )

            s3file.write(results)
            results= s3file.getName()
            s3file.close()
            transport= 's3'

         except Exception, e:
            print >> stderr, "s3 transport failure using data store instead: %s" % (str(e))

      elif transport == 'file' and self.opts.taskDir != None:

         try:
            fileStore= FileStore(proxy= self.queue, processId= processId, mode= 'w')
            fileStore.write(results)
            results= fileStore.getName()
            fileStore.close()
            transport= 'file'

         except Exception, e:
            print >> stderr, "fileStore transport failure using data store instead: %s" % (str(e))
开发者ID:richardjmarini,项目名称:Impetus-old,代码行数:71,代码来源:node.py

示例5: Manager

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import connect [as 别名]
class Manager(object):

   def __init__(self, opts):

      self.opts= opts
      super(Manager, self).__init__()

      self.sleep= self.opts.sleep
      self.alive= True
      self.workers= dict()

      self.connect()

      '''
      The fully qualified domain name for the aws ec2 instance
      should match what the instance private_dns_name is 
      '''
      self.id= getfqdn()

   def connect(self):

      # register with queue
      SyncManager.register('getPipeline')
      SyncManager.register('getStore')
      SyncManager.register('setFileContents')
      SyncManager.register('getFileContents')
      SyncManager.register('deleteFile')

      (qHost, qPort, qKey)= self.opts.queue.split(':')
      self.queue= SyncManager(address= (qHost, int(qPort)), authkey= qKey)
      self.queue.connect()
      self.pipeline= self.queue.getPipeline()
      self.store= self.queue.getStore()

      # register with dfs
      self.dfs = None
      self.instances= dict()
      if self.opts.dfs != None:
         SyncManager.register('getInstances')
         (dHost, dPort, dKey)= self.opts.dfs.split(':')
         self.dfs= SyncManager(address= (dHost, int(dPort)), authkey= dKey)
         self.dfs.connect()
         self.instances= self.dfs.getInstances()

   def run(self):

      while self.alive:
     
         try:  
            # stop tracking dead workers
            [self.workers.pop(pid) for (pid, worker) in self.workers.items() if not worker.is_alive()]

            instanceStore= self.instances.get(self.id, dict())

            # update dfs worker availability
            availability= self.opts.maxProcesses - len(self.workers) 
            self.instances.update([(self.id, dict(
               id= self.id,
               status= 'running',
               capacity= self.opts.maxProcesses,
               availability= availability,
               lastTask= instanceStore.get('lastTask', datetime.strftime(datetime.utcnow(), '%Y-%m-%dT%H:%M:%S.000Z')
)
            ))])

            print "========================================================"  
            print "Queue:", self.pipeline.qsize()
            print "Store:", len(self.store)
            print "Capacity:", self.opts.maxProcesses
            print 'Workers:', len(self.workers)
            print "Availability:", self.opts.maxProcesses - len(self.workers)
            print "--------------------------------------------------------"  
          
            # create workers
            for i in range(min(self.pipeline.qsize(), self.opts.maxProcesses - len(self.workers))):
               worker= Worker(self.opts, self.id, availability)
               worker.start()
               self.workers[worker.pid]= worker
         except EOFError:
            self.connect()
         except IOError:
            self.connect()
 
         sleep(self.sleep)

      # if manager is shutting down -- then wait for workers to finish
      print "manager shutting down"
      map(lambda (pid, worker): worker.join(), self.workers.items())

   def stop(self):

      print "de-registering with dfs -- all workers down"

      ''' 
      tell dfs are are shutting down and have no capacity/availabilty
      if dfs doesn't know who we are then create a default stub
      '''
      self.instances.update([(self.id, dict(
         id= self.id,
         status= 'running',
#.........这里部分代码省略.........
开发者ID:richardjmarini,项目名称:Impetus-old,代码行数:103,代码来源:node.py

示例6: f

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import connect [as 别名]
#!/usr/bin/env python

from multiprocessing.managers import SyncManager
import sys

def f(d, l):
    d[1] = '1'
    d['2'] = 2
    d[0.25] = None
    l.reverse()

if __name__ == '__main__':
    manager = SyncManager(address=('127.0.0.1', int(sys.argv[1])), authkey='abc')

    manager.connect()

    print manager.list()

    print 'Done'


开发者ID:benthomasson,项目名称:mp,代码行数:21,代码来源:local_manager.py

示例7: Task

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import connect [as 别名]
class Task(object):

    statuses = ["waiting", "running", "ready", "error"]
    # statuses= dict([(value, id) for (id, value) in enumerate(['waiting', 'running', 'ready', 'error'], start= 1))

    def __init__(self, queue, taskId=str(uuid1()), s3=None, taskDir=None):
        """creates an instance of Task
      :param queue: <host>:<port>:<security key> of queue instance
      :param taskId: optional, auto generated guid representing this 
      instance of Task. all jobs forked with  thecurrent instance will 
      assume this taskId. optionally, the developer may pass in a taskId that is
      meaing full to them.
      :param s3: <access key>:<secret key>:<bucket> of s3 resource to use
      when s3 transport is specified during .forkTask()
      :param taskDir: output directory to write task results to
      :returns: instance of Task
      """

        self.taskId = str(uuid1()) if taskId == None else taskId
        self.s3 = s3
        self.subTaskId = 0
        self.subTasks = dict()
        self.sleep = 0.1
        self.taskDir = taskDir
        self.lock = Lock()

        (qHost, qPort, qKey) = queue.split(":")
        SyncManager.register("getPipeline")
        SyncManager.register("getStore")
        SyncManager.register("getFileContents")
        SyncManager.register("setFileContents")
        SyncManager.register("deleteFile")

        self.queue = SyncManager(address=(qHost, int(qPort)), authkey=qKey)
        self.queue.connect()

        self.pipeline = self.queue.getPipeline()
        self.store = self.queue.getStore()

        super(Task, self).__init__()

    def __genProcessId__(self, subTaskId):
        """used internally by Task to create a processId for the 
      users task.
      :param subTaskId: the subTaskId of the task to be associated with this
      processId.
      :returns: a valid processId representing the process for the task.
      """

        return "%s.%s" % (self.getTaskId(), subTaskId)

    def getTaskId(self):
        """gets the current taskID for the instance of NoddleTask
      :returns: taskId of for the instance of Task
      """

        return self.taskId

    def getSubTaskId(self):
        """gets the last known subTaskId
      :returns: the last forked subTaskId
      """

        return self.subTaskId

    def getSubTaskIds(self):
        """gets a list of subTaskIds associated with the current instance of 
      Task.
      :returns: list of subTaskIds forked by the current instance of Task
      """
        with self.lock:
            subTasks = deepcopy(self.subTasks)
        return subTasks

    def handleTransport(self, transport, results, delete=False):
        """makes the results data from the optional transports accesable
      through the results interface of the data store.  typically this method
      is used internally by Task but is exposed to the developer as 
      there may be stitutations where the developer may want to resolve
      the transport themselves. 
      :param transport: transport type (eg, 's3', 'file')
      :param results: the results store from the data store
      :param delete: True|False delete the originating resource (s3/file) after
      resolving it.
      :returns: returns results store with the transport resolved
      """

        # if transport is s3 then load the results file
        if transport == "s3":

            if self.s3 != None:

                try:
                    (accessKey, secretKey, bucket) = self.s3.split(":")
                except Exception, e:
                    raise ("invalid s3 transport credentials: %s" % (str(e)))

                try:
                    s3file = S3File(accessKey, secretKey, bucket, processId=path.basename(results), mode="r")
                    results = s3file.read()
#.........这里部分代码省略.........
开发者ID:richardjmarini,项目名称:Impetus-old,代码行数:103,代码来源:task.py

示例8: DFS

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import connect [as 别名]
class DFS(Daemon):
   """
   The Dynamic Frequency Scaler is responsible for 
   starting up instances and bootstrapping them with Node 
   to start processing jobs.  DFS will gather update statistics 
   from all the Nodes that are registered with it and spin up/down
   instances as needed. A configurable "billing period" can be set
   so DFS will make the most effective use of the instance for the 
   duration of the billing cycle.  Idle Nodes will be terminated
   when the end of the billing cycle is reached.  
   """

   billing_period= 3600
   shutdown_period= billing_period - 600
   idle_time= 300
   seconds_per_day= 86400

   def __init__(self, address, authkey, queue, qauthkey, mnon, mpps, ec2= None, bootstrap= None, deploykey= None, logdir= curdir, piddir= curdir):
      """
      Initializes the available remote methods 
      and I/O streams to be used for the method.  
      Establishes connection to the Queue. 
      """
      super(DFS, self).__init__(
         pidfile= path.join(piddir, self.__class__.__name__ + ".pid"),
         stdout= path.join(logdir, self.__class__.__name__ + ".out"),
         stderr= path.join(logdir, self.__class__.__name__ + ".err"),
         stdin= path.join(logdir, self.__class__.__name__ + ".in")
      )

      self.id= getipaddress()

      self.address= address
      self.authkey= authkey

      self.queue= queue
      self.qauthkey= qauthkey

      self.mnon= mnon
      self.mpps= mpps
      self.bootstrap= bootstrap
      self.deploykey= deploykey

      self.nodes= {}

      self.ec2= ec2
      if self.ec2 != None:
         (self.access_key, self.security_key, self.ami_id, self.security_group, self.key_name, self.instance_type)= self.ec2.split(',')
         self.ec2= EC2Connection(self.access_key, self.security_key)
         print "Connected to EC2", self.ec2

      self.alive= True

      self.manager= SyncManager(address= self.address, authkey= self.authkey)
      self.manager.register("get_nodes", callable= lambda: self.nodes, proxytype= DictProxy)

      self.connect()
      

   def connect(self):
      """
      Attempts to connect to the Queue on the host/port for which the DFS was initialized for.
      If no connection can be made, DFS will keep attempting to connect until a connection
      can be established.  One connection is established the remove methods requested will be
      registered.
      """

      # remove connection from cache:
      # BaseProxy class has thread local storage which caches the connection
      # which is reused for future connections causing "borken pipe" errors on
      # creating new manager.
      if self.queue in BaseProxy._address_to_local:
         del BaseProxy._address_to_local[self.queue][0].connection

      # register handlers
      SyncManager.register("get_streams")
      SyncManager.register("get_queue")
      SyncManager.register("get_store")
      SyncManager.register("get_properties")

      print "connecting to queue", self.queue
      while self.alive:

         try:
            self.impq= SyncManager(address= self.queue, authkey= self.qauthkey)
            self.impq.connect()
            break
         except (EOFError, IOError, SocketError) as e:
            print "could not connect ...trying again", str(e)
            sleep(1)

   def get_bootstrap(self):
      
      fh= open(self.bootstrap, "r")
      template= Template(fh.read())
      fh.close()

      fh= open(self.deploykey, "r")
      deploykey= fh.read()
      fh.close()
#.........这里部分代码省略.........
开发者ID:richardjmarini,项目名称:Impetus,代码行数:103,代码来源:impetus.py

示例9: Node

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import connect [as 别名]
class Node(Daemon):
   """
   Node is started up on the remote instance via the bootstrapping process for that instance.
   The node is responsible for tracking active streams and managing the workers that process
   the jobs from thosee streams.  If a stream goes idle (ie, there are no more jobs in the streams
   queue and all workers have died) then node will stop tracking the stream.  If jobs re-appear
   on the stream Node will spawn new workers to process those jobs.  If a new stream appears 
   Node will spawn new workers to processs the jobs on that stream.  Each worker is an independent
   concurrent process that inherits the stream to process from the Node.
   """

   def __init__(self, queue, qauthkey, mpps= 5, dfs= None, dauthkey= None, logdir= curdir, piddir= curdir, **properties):
      """Initialize the Node's I/O stream and connect to the Queue and/or DFS."""     

      self.id= getipaddress()
      self.queue= queue
      self.qauthkey= qauthkey
      self.mpps= mpps
      self.dfs= dfs
      self.dauthkey= dauthkey
      self.properties= properties
      self.shutdown= Value('i', 0)

      self.workers= {}
      self.alive= True
      self.start_time= datetime.utcnow()

      self.connect()
  
      super(Node, self).__init__(
         pidfile= path.join(piddir, self.__class__.__name__ + ".pid"),
         stdout= path.join(logdir, self.__class__.__name__ + ".out"),
         stderr= path.join(logdir, self.__class__.__name__ + ".err"),
         stdin= path.join(logdir, self.__class__.__name__ + ".in")
      )

   def connect(self):
      """Connects to the Queue and/or DFS on the host/port for whic hthe Node was intialized for."""

      self.qconnect()
      if None not in self.dfs:
         self.dconnect()

   def qconnect(self):
      """
      Attempts to connect to the Queue on the host/port for which the Node was initialized for.
      If no connection can be made, Node will keep attempting to connect until a connection
      can be established.  One connection is established the remove methods requested will be
      registered.
      """

      # remove connection from cache:
      # BaseProxy class has thread local storage which caches the connection
      # which is reused for future connections causing "borken pipe" errors on 
      # creating new manager.  
      if self.queue in BaseProxy._address_to_local:
         if hasattr(BaseProxy._address_to_local[self.queue][0], 'connection'):
            del BaseProxy._address_to_local[self.queue][0].connection

      # register handlers
      SyncManager.register("get_streams")
      SyncManager.register("get_queue")
      SyncManager.register("get_store")
      SyncManager.register("get_properties")

      print "connecting to queue", self.queue
      while self.alive:

         try:
            self.impq= SyncManager(address= self.queue, authkey= self.qauthkey)
            self.impq.connect() 
            print "connected to queue", self.queue
            break
         except (EOFError, IOError, SocketError) as e:
            print "could not connect ...trying again", str(e)
            sleep(1)

   def dconnect(self):
      """
      Attempts to connect to the DFS on the host/port for which the Node was initialized for.
      If no connection can be made, Node will keep attempting to connect until a connection
      can be established. Once a connection can be established the remove methods requested
      will be registered.
      """

      # remove connection from cache:
      # BaseProxy class has thread local storage which caches the connection
      # which is reused for future connections causing "borken pipe" errors on
      # creating new manager.
      if self.dfs in BaseProxy._address_to_local:
         if hasattr(BaseProxy._address_to_local[self.dfs][0], 'connection'):
            del BaseProxy._address_to_local[self.dfs][0].connection

      # register handlers
      SyncManager.register("get_nodes")

      print "connecting to dfs", self.dfs
      while self.alive:

         try:
#.........这里部分代码省略.........
开发者ID:richardjmarini,项目名称:Impetus,代码行数:103,代码来源:impetus.py

示例10: Impetus

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import connect [as 别名]
class Impetus(object):
   """
   Multi-threaded library for interfacing with the Impetus system. 
   Hides threading considerations from the client.  Determines callback
   methods through introspection if callbacks are not explicitly stated. 
   Decorators are provided for the client to indicate methods which run on 
   the remote nodes and local process methods which consume the results. 
   Creates a single stream per instance.  The client can created additional 
   streams through the Queue's remote methods via the "impq" handler. 
   """

   statuses= ("forked", "processed")

   def __init__(self, address, authkey, taskdir= "tasks", id= None, **properties):
      """Creates a stream and retrieves the streams priority queue and data-store."""

      self.id= id if id else str(uuid1())
      self.ipaddress= getipaddress()

      self.address= address
      self.taskdir= path.join(taskdir, self.id)
      self.properties= properties

      self.impq= SyncManager(address= self.address, authkey= authkey)
      self.impq.register("get_streams")
      self.impq.register("create_stream")
      self.impq.register("delete_stream")
      self.impq.register("get_store")
      self.impq.register("get_queue")
      self.impq.connect()

      self.jobs= []
      self.impq.create_stream(id= self.id, ipaddress= self.ipaddress, **properties)
      self.store= self.impq.get_store(id= self.id)
      self.queue= self.impq.get_queue(id= self.id)
      self.alive= True
      self._current_thread= None
      self._lock= Lock()
      self.threads= []
      self.errors= {}
      self.ready= {}
      self._progress= {}


      try:
         makedirs(self.taskdir)
      except:
         pass

   def __del__(self):
      """Deletes the stream that was created during initialization."""

      self.impq.delete_stream(self.id)

   @staticmethod
   def node(target):
      """
      All methods that are to run on remote nodes must be staticmethods
      as the context of which the methods was defined can not be serialized.
      """

      return target

   @staticmethod
   def startup(target):
      """
      Sets up the startup method for the object to run as a thread.
      """

      def _process(self):

         target(self)

      global _declaration_order
      _process.order= _declaration_order
      _declaration_order+= 1
      return _process

   @staticmethod
   def shutdown(target):
      """
      Sets up the shutdown method to be excuted 
      after all threads have been terminated.  The 
      ready and errors parameters will contain a dict 
      of file-handles pointing to the results files
      (ie, ../tasks/<task_id>/<method>.ok, .err>
      for each @process method.
      """
   
      def _shutdown(self):

         target(self, self.ready, self.errors, self._progress)

      global _declaration_order
      _shutdown.order= _declaration_order
      return _shutdown

   @staticmethod
   def process(target):
      """
#.........这里部分代码省略.........
开发者ID:richardjmarini,项目名称:Impetus,代码行数:103,代码来源:impetus.py


注:本文中的multiprocessing.managers.SyncManager.connect方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。