当前位置: 首页>>代码示例>>Python>>正文


Python SyncManager.getPipeline方法代码示例

本文整理汇总了Python中multiprocessing.managers.SyncManager.getPipeline方法的典型用法代码示例。如果您正苦于以下问题:Python SyncManager.getPipeline方法的具体用法?Python SyncManager.getPipeline怎么用?Python SyncManager.getPipeline使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.managers.SyncManager的用法示例。


在下文中一共展示了SyncManager.getPipeline方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: connect

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import getPipeline [as 别名]
   def connect(self):

      # register with Queue
      SyncManager.register('getPipeline')
      SyncManager.register('getStore')
      self.qInstance= self.opts.qInstance
      (self.qHost, self.qPort, self.qKey)= self.opts.queue.split(':')
      queue= SyncManager(address= (self.qHost, int(self.qPort)), authkey= self.qKey)
      queue.connect()
      self.pipeline= queue.getPipeline()
      self.store= queue.getStore()
开发者ID:richardjmarini,项目名称:Impetus-old,代码行数:13,代码来源:dfs.py

示例2: Manager

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import getPipeline [as 别名]
class Manager(object):

   def __init__(self, opts):

      self.opts= opts
      super(Manager, self).__init__()

      self.sleep= self.opts.sleep
      self.alive= True
      self.workers= dict()

      self.connect()

      '''
      The fully qualified domain name for the aws ec2 instance
      should match what the instance private_dns_name is 
      '''
      self.id= getfqdn()

   def connect(self):

      # register with queue
      SyncManager.register('getPipeline')
      SyncManager.register('getStore')
      SyncManager.register('setFileContents')
      SyncManager.register('getFileContents')
      SyncManager.register('deleteFile')

      (qHost, qPort, qKey)= self.opts.queue.split(':')
      self.queue= SyncManager(address= (qHost, int(qPort)), authkey= qKey)
      self.queue.connect()
      self.pipeline= self.queue.getPipeline()
      self.store= self.queue.getStore()

      # register with dfs
      self.dfs = None
      self.instances= dict()
      if self.opts.dfs != None:
         SyncManager.register('getInstances')
         (dHost, dPort, dKey)= self.opts.dfs.split(':')
         self.dfs= SyncManager(address= (dHost, int(dPort)), authkey= dKey)
         self.dfs.connect()
         self.instances= self.dfs.getInstances()

   def run(self):

      while self.alive:
     
         try:  
            # stop tracking dead workers
            [self.workers.pop(pid) for (pid, worker) in self.workers.items() if not worker.is_alive()]

            instanceStore= self.instances.get(self.id, dict())

            # update dfs worker availability
            availability= self.opts.maxProcesses - len(self.workers) 
            self.instances.update([(self.id, dict(
               id= self.id,
               status= 'running',
               capacity= self.opts.maxProcesses,
               availability= availability,
               lastTask= instanceStore.get('lastTask', datetime.strftime(datetime.utcnow(), '%Y-%m-%dT%H:%M:%S.000Z')
)
            ))])

            print "========================================================"  
            print "Queue:", self.pipeline.qsize()
            print "Store:", len(self.store)
            print "Capacity:", self.opts.maxProcesses
            print 'Workers:', len(self.workers)
            print "Availability:", self.opts.maxProcesses - len(self.workers)
            print "--------------------------------------------------------"  
          
            # create workers
            for i in range(min(self.pipeline.qsize(), self.opts.maxProcesses - len(self.workers))):
               worker= Worker(self.opts, self.id, availability)
               worker.start()
               self.workers[worker.pid]= worker
         except EOFError:
            self.connect()
         except IOError:
            self.connect()
 
         sleep(self.sleep)

      # if manager is shutting down -- then wait for workers to finish
      print "manager shutting down"
      map(lambda (pid, worker): worker.join(), self.workers.items())

   def stop(self):

      print "de-registering with dfs -- all workers down"

      ''' 
      tell dfs are are shutting down and have no capacity/availabilty
      if dfs doesn't know who we are then create a default stub
      '''
      self.instances.update([(self.id, dict(
         id= self.id,
         status= 'running',
#.........这里部分代码省略.........
开发者ID:richardjmarini,项目名称:Impetus-old,代码行数:103,代码来源:node.py

示例3: Worker

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import getPipeline [as 别名]
class Worker(Process):
   
   def __init__(self, opts, id, availability):

      super(Worker, self).__init__()
      self.opts= opts
      self.id= id
      self.availability= availability

      self.connect()

      self.alive= True
      self.sleep= self.opts.sleep

   def connect(self):
      (qHost, qPort, qKey)= self.opts.queue.split(':')
      self.queue= SyncManager(address= (qHost, int(qPort)), authkey= qKey)
      self.queue.connect()
      self.pipeline= self.queue.getPipeline()
      self.store= self.queue.getStore()

      # register with DFS
      self.dfs = None
      self.instances= dict()
      if self.opts.dfs != None:
         SyncManager.register('getInstances')
         (dHost, dPort, dKey)= self.opts.dfs.split(':')
         self.dfs= SyncManager(address= (dHost, int(dPort)), authkey= dKey)
         self.dfs.connect()
         self.instances= self.dfs.getInstances()


   def handleTransport(self, processId, transport, results):
      '''
      Handles requested transport types
      '''

      if transport == 's3' and self.opts.s3 != None:

         try:
            (accessKey, secretKey, bucket)= self.opts.s3.split(':')

            s3file= S3File(
               accessKey= accessKey, 
               secretKey= secretKey,
               bucket= bucket,
               processId= processId,
               mode= 'w'
            )

            s3file.write(results)
            results= s3file.getName()
            s3file.close()
            transport= 's3'

         except Exception, e:
            print >> stderr, "s3 transport failure using data store instead: %s" % (str(e))

      elif transport == 'file' and self.opts.taskDir != None:

         try:
            fileStore= FileStore(proxy= self.queue, processId= processId, mode= 'w')
            fileStore.write(results)
            results= fileStore.getName()
            fileStore.close()
            transport= 'file'

         except Exception, e:
            print >> stderr, "fileStore transport failure using data store instead: %s" % (str(e))
开发者ID:richardjmarini,项目名称:Impetus-old,代码行数:71,代码来源:node.py

示例4: Task

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import getPipeline [as 别名]
class Task(object):

    statuses = ["waiting", "running", "ready", "error"]
    # statuses= dict([(value, id) for (id, value) in enumerate(['waiting', 'running', 'ready', 'error'], start= 1))

    def __init__(self, queue, taskId=str(uuid1()), s3=None, taskDir=None):
        """creates an instance of Task
      :param queue: <host>:<port>:<security key> of queue instance
      :param taskId: optional, auto generated guid representing this 
      instance of Task. all jobs forked with  thecurrent instance will 
      assume this taskId. optionally, the developer may pass in a taskId that is
      meaing full to them.
      :param s3: <access key>:<secret key>:<bucket> of s3 resource to use
      when s3 transport is specified during .forkTask()
      :param taskDir: output directory to write task results to
      :returns: instance of Task
      """

        self.taskId = str(uuid1()) if taskId == None else taskId
        self.s3 = s3
        self.subTaskId = 0
        self.subTasks = dict()
        self.sleep = 0.1
        self.taskDir = taskDir
        self.lock = Lock()

        (qHost, qPort, qKey) = queue.split(":")
        SyncManager.register("getPipeline")
        SyncManager.register("getStore")
        SyncManager.register("getFileContents")
        SyncManager.register("setFileContents")
        SyncManager.register("deleteFile")

        self.queue = SyncManager(address=(qHost, int(qPort)), authkey=qKey)
        self.queue.connect()

        self.pipeline = self.queue.getPipeline()
        self.store = self.queue.getStore()

        super(Task, self).__init__()

    def __genProcessId__(self, subTaskId):
        """used internally by Task to create a processId for the 
      users task.
      :param subTaskId: the subTaskId of the task to be associated with this
      processId.
      :returns: a valid processId representing the process for the task.
      """

        return "%s.%s" % (self.getTaskId(), subTaskId)

    def getTaskId(self):
        """gets the current taskID for the instance of NoddleTask
      :returns: taskId of for the instance of Task
      """

        return self.taskId

    def getSubTaskId(self):
        """gets the last known subTaskId
      :returns: the last forked subTaskId
      """

        return self.subTaskId

    def getSubTaskIds(self):
        """gets a list of subTaskIds associated with the current instance of 
      Task.
      :returns: list of subTaskIds forked by the current instance of Task
      """
        with self.lock:
            subTasks = deepcopy(self.subTasks)
        return subTasks

    def handleTransport(self, transport, results, delete=False):
        """makes the results data from the optional transports accesable
      through the results interface of the data store.  typically this method
      is used internally by Task but is exposed to the developer as 
      there may be stitutations where the developer may want to resolve
      the transport themselves. 
      :param transport: transport type (eg, 's3', 'file')
      :param results: the results store from the data store
      :param delete: True|False delete the originating resource (s3/file) after
      resolving it.
      :returns: returns results store with the transport resolved
      """

        # if transport is s3 then load the results file
        if transport == "s3":

            if self.s3 != None:

                try:
                    (accessKey, secretKey, bucket) = self.s3.split(":")
                except Exception, e:
                    raise ("invalid s3 transport credentials: %s" % (str(e)))

                try:
                    s3file = S3File(accessKey, secretKey, bucket, processId=path.basename(results), mode="r")
                    results = s3file.read()
#.........这里部分代码省略.........
开发者ID:richardjmarini,项目名称:Impetus-old,代码行数:103,代码来源:task.py


注:本文中的multiprocessing.managers.SyncManager.getPipeline方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。