当前位置: 首页>>代码示例>>Python>>正文


Python base.WorkerPool类代码示例

本文整理汇总了Python中gppylib.commands.base.WorkerPool的典型用法代码示例。如果您正苦于以下问题:Python WorkerPool类的具体用法?Python WorkerPool怎么用?Python WorkerPool使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了WorkerPool类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: execute

    def execute(self): 
        gparray = GpArray.initFromCatalog(dbconn.DbURL(port=self.master_port), utility=True)
        from_host, from_path = self.host, self.path
        logger.info("Commencing remote database dump file recovery process, please wait...")
        segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary(current_role=True) or seg.isSegmentMaster()]
        pool = WorkerPool(numWorkers = min(len(segs), self.batch_default))
        for seg in segs:
            if seg.isSegmentMaster():
                file = '%s%s' % (MASTER_DBDUMP_PREFIX, self.restore_timestamp)
            else:
                file = '%s0_%d_%s' % (DBDUMP_PREFIX, seg.getSegmentDbId(), self.restore_timestamp)
            if self.compress:
                file += '.gz'

            to_host = seg.getSegmentHostName()
            to_path = os.path.join(seg.getSegmentDataDirectory(), DUMP_DIR, self.restore_timestamp[0:8])
            if not CheckRemoteDir(to_path, to_host).run():
                logger.info('Creating directory %s on %s' % (to_path, to_host))
                try:
                    MakeRemoteDir(to_path, to_host).run()
                except OSError, e:
                    raise ExceptionNoStackTraceNeeded("Failed to create directory %s on %s" % (to_path, to_host))
   
            logger.info("Commencing remote copy from %s to %s:%s" % (from_host, to_host, to_path))
            pool.addCommand(Scp('Copying dump for seg %d' % seg.getSegmentDbId(),
                            srcFile=os.path.join(from_path, file),
                            dstFile=os.path.join(to_path, file),
                            srcHost=from_host,
                            dstHost=to_host))
开发者ID:BALDELab,项目名称:incubator-hawq,代码行数:29,代码来源:restore.py

示例2: execute

    def execute(self):
        pool = WorkerPool()
        try:
            for seg in self.segments:
                datadir = seg.getSegmentDataDirectory()
                postmaster_pid_file = '%s/postmaster.pid' % datadir
                shared_mem = None
                if os.path.isfile(postmaster_pid_file):
                    with open(postmaster_pid_file) as fp:
                        shared_mem = fp.readlines()[-1].split()[-1].strip()
                if shared_mem:
                    cmd = Command('clean up shared memory', cmdStr="ipcrm -m %s" % shared_mem) 
                    pool.addCommand(cmd)
                pool.join()

            for item in pool.getCompletedItems():
                result = item.get_results()

                # This code is usually called after a GPDB segment has
                # been terminated.  In that case, it is possible that
                # the shared memory has already been freed by the
                # time we are called to clean up.  Due to this race
                # condition, it is possible to get an `ipcrm: invalid
                # id1` error from ipcrm.  We, therefore, ignore it.
                if result.rc != 0 and not result.stderr.startswith("ipcrm: invalid id"):
                    raise Exception('Unable to clean up shared memory for segment: (%s)' % (result.stderr))
        finally:
            pool.haltWork()
            pool.joinWorkers()
            pool = None
开发者ID:adam8157,项目名称:gpdb,代码行数:30,代码来源:unix.py

示例3: impl

def impl(context, tablename, dbname, poolname):
    pool = WorkerPool(numWorkers=1)
    cmd = Command(name='drop a table in a worker pool', cmdStr="""psql -c "DROP TABLE %s" -d %s""" % (tablename, dbname))
    pool.addCommand(cmd)
    if not hasattr(context, 'pool'):
        context.pool = {}
    context.pool[poolname] = pool
开发者ID:xuewindy,项目名称:gpdb,代码行数:7,代码来源:backup_mgmt_utils.py

示例4: restore_pg_hba_on_segment

def restore_pg_hba_on_segment(gparr):
    """
    Restore the pg_hba.conf on all of the segments
    present in the array
    """
    logger.debug('Restoring pg_hba.conf file on segments...')

    host_to_seg_map = defaultdict(list)
    for seg in gparr.getDbList():
        if not seg.isSegmentMaster() and not seg.isSegmentStandby():
            host_to_seg_map[seg.getSegmentHostName()].append(seg.getSegmentDataDirectory())

    pool = WorkerPool(numWorkers=DEFAULT_BATCH_SIZE)

    try:
        for host, data_dirs_list in host_to_seg_map.items():
            pickled_data_dirs_list = base64.urlsafe_b64encode(pickle.dumps(data_dirs_list))
            cmdStr = "$GPHOME/lib/python/gppylib/operations/initstandby.py -d %s -r" % pickled_data_dirs_list
            cmd = Command('Restore the pg_hba.conf on remote hosts', cmdStr=cmdStr , ctxt=REMOTE, remoteHost=host)
            pool.addCommand(cmd)

        pool.join()

        for item in pool.getCompletedItems():
            result = item.get_results()
            if result.rc != 0:
                logger.error('Unable to restore pg_hba.conf %s' % str(result.stderr))
                logger.error('Please check the segment for more details')

    finally:
        pool.haltWork()
        pool.joinWorkers()
        pool = None
开发者ID:adam8157,项目名称:gpdb,代码行数:33,代码来源:initstandby.py

示例5: CleanVerification

class CleanVerification(Operation):
    def __init__(self, token, batch_default):
        self.token = token
        self.batch_default = batch_default
        self.pool = None
    def execute(self):
        entry = ValidateVerificationEntry(token = self.token).run()
        if not entry['verdone']:   
            raise WrongStateError("Only finished verification tasks may be cleaned up.")

        path = os.path.join(get_masterdatadir(), 'pg_verify', self.token)
        Command('cleanup', 'rm -rf %s' % path).run(validateAfter=True)
        #RemoveTree(path).run()

        to_clean = ValidateVerification(content = entry['vercontent'],
                                        primaries_only = False).run()
        self.pool = WorkerPool(min(len(to_clean), self.batch_default))
        for seg in to_clean:
            host = seg.getSegmentHostName()
            path = os.path.join(seg.getSegmentDataDirectory(), 'pg_verify', "*%s*" % self.token)
            cmd = Command('cleanup', 'rm -f %s' % path, remoteHost=host)
            self.pool.addCommand(cmd)

        logger.info('Waiting for clean commands to complete...')
        self.pool.wait_and_printdots(len(to_clean))

        for cmd in self.pool.getCompletedItems():
            res = cmd.get_results()
            if not res.wasSuccessful():
                logger.error('Failed to send cleanup on %s' % cmd.host)
                logger.error('Error: %s' % res.stderr)
                raise CleanVerificationError()
        RemoveVerificationEntry(token = self.token).run()
        logger.info('Verification %s has been cleaned.' % self.token)
开发者ID:ginobiliwang,项目名称:gpdb,代码行数:34,代码来源:verify.py

示例6: run_using_workerpool

    def run_using_workerpool(self, option=''):
        if not (set(option.split()) <= set(['-F' , '-r', '--persistent-check', ' '])):
            raise GpRecoversegException('Not a valid option with gprecoverseg')

        rcvr_cmd = 'gprecoverseg -a  %s' % option
        cmd = Command(name='Run gprecoverseg', cmdStr='source %s/greenplum_path.sh;%s' % (self.gphome, rcvr_cmd))
        tinctest.logger.info("Running gprecoverseg : %s" % cmd)

        pool = WorkerPool(numWorkers=1, daemonize=True)
        pool.addCommand(cmd)
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:10,代码来源:gprecoverseg.py

示例7: TriggerFilerepVerifyMessages

class TriggerFilerepVerifyMessages(Operation):
    def __init__(self, content, token, batch_default, full=None, verify_file=None, verify_dir=None,
                       abort=None, suspend=None, resume=None, ignore_dir=None, ignore_file=None,
                       results=None, results_level=None):
        self.content = content
        self.token = token
        self.full = full
        self.verify_file = verify_file
        self.verify_dir = verify_dir
        self.abort = abort
        self.suspend = suspend
        self.resume = resume
        self.ignore_dir = ignore_dir
        self.ignore_file = ignore_file
        self.results = results
        self.results_level = results_level
        self.batch_default = batch_default
        self.pool = None
    def execute(self):
        """
        Sends arbitrary gp_primarymirror requests to the backend processes defined.
        """
        to_trigger = ValidateVerification(content = self.content).run()

        logger.info('Sending gp_primarymirror requests...')
        self.pool = WorkerPool(min(len(to_trigger), self.batch_default))

        for pseg in to_trigger:
            host, port = pseg.getSegmentHostName(), pseg.getSegmentPort()
            cmd = SendFilerepVerifyMessage(name = 'verify %s' % host, host = host, port = port,
                                           token = self.token,
                                           full = self.full,
                                           verify_file = self.verify_file,
                                           verify_dir = self.verify_dir,
                                           abort = self.abort,
                                           suspend = self.suspend,
                                           resume = self.resume,
                                           ignore_dir = self.ignore_dir,
                                           ignore_file = self.ignore_file,
                                           results = self.results,
                                           results_level = self.results_level)
            logger.debug("Sending request to %s:%d" % (host, port))
            self.pool.addCommand(cmd)

        logger.info('Waiting for gp_primarymirror commands to complete...')
        self.pool.wait_and_printdots(len(to_trigger))

        for cmd in self.pool.getCompletedItems():
            res = cmd.get_results()
            if not res.wasSuccessful():
                logger.error('Failed to send gp_primarymirror message to %s:%s' % (cmd.host, cmd.port))
                logger.error('Error: %s' % res.stderr)
                raise TriggerGpPrimaryMirrorFailure()
        logger.info('gp_primarymirror messages have been triggered succesfully.')
开发者ID:ginobiliwang,项目名称:gpdb,代码行数:54,代码来源:verify.py

示例8: test_print_progress

 def test_print_progress(self, mock1):
     w = WorkerPool(numWorkers=32)
     c1 = Command('dummy command1', '')
     c2 = Command('dummy command2', '')
     w.addCommand(c1)
     w.addCommand(c2)
     w.join()
     w.print_progress(2)
     self.assertTrue(mock1.called_with('100.00% of jobs completed'))
     w.haltWork()
开发者ID:phan-pivotal,项目名称:gpdb,代码行数:10,代码来源:test_unit_base.py

示例9: scp_file_to_hosts

def scp_file_to_hosts(host_list, filename, batch_default):
    pool = WorkerPool(numWorkers=min(len(host_list), batch_default))

    for hname in host_list:
        pool.addCommand(
            Scp("Copying table_filter_file to %s" % hname, srcFile=filename, dstFile=filename, dstHost=hname)
        )
    pool.join()
    pool.haltWork()
    pool.check_results()
开发者ID:andreasscherbaum,项目名称:gpdb,代码行数:10,代码来源:backup_utils.py

示例10: get_host_status

def get_host_status(hostlist):
    """
    Test if SSH command works on a host and return a dictionary
    Return Ex: {host1: True, host2: False}
    where True represents SSH command success and False represents failure
    """
    if not isinstance(hostlist, list):
        raise Exception("Input parameter should be of type list")

    pool = WorkerPool(min(len(hostlist), 16))

    for host in hostlist:
        cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=host)
        pool.addCommand(cmd)

    pool.join()
    pool.haltWork()

    host_status_dict = {}
    for cmd in pool.getCompletedItems():
        if not cmd.get_results().wasSuccessful():
            host_status_dict[cmd.remoteHost] = False
        else:
            host_status_dict[cmd.remoteHost] = True

    return host_status_dict
开发者ID:kdunn926,项目名称:incubator-hawq,代码行数:26,代码来源:hawqlib.py

示例11: _get_pgcontrol_data_from_segments

 def _get_pgcontrol_data_from_segments(self, gpdb_list):
     pool = WorkerPool(numWorkers=self.workers)
     try:
         for gpdb in gpdb_list:  # iterate for all segments
             cmd = PgControlData(name='run pg_controldata', datadir=gpdb.getSegmentDataDirectory(),
                                 ctxt=REMOTE, remoteHost=gpdb.getSegmentHostName())
             cmd.gparray_gpdb = gpdb
             pool.addCommand(cmd)
         pool.join()
     finally:
         # Make sure that we halt the workers or else we'll hang
         pool.haltWork()
         pool.joinWorkers()
     return pool.getCompletedItems()
开发者ID:adam8157,项目名称:gpdb,代码行数:14,代码来源:heapchecksum.py

示例12: run_pool_command

def run_pool_command(host_list, cmd_str, batch_default, check_results=True):
    pool = WorkerPool(numWorkers=min(len(host_list), batch_default))

    for host in host_list:
        cmd = Command(host, cmd_str, ctxt=REMOTE, remoteHost=host)
        pool.addCommand(cmd)

    pool.join()
    pool.haltWork()
    if check_results:
        pool.check_results()
开发者ID:asimrp,项目名称:gpdb,代码行数:11,代码来源:backup_utils.py

示例13: execute

 def execute(self):
     pool = WorkerPool()
     given = set(ListFiles(self.pid_dir).run())
     try:
         for host in self.trail:
             available = ListRemoteFiles(self.pid_dir, host).run()
             to_copy = [dir for dir in available if dir not in given]
             for dir in to_copy:
                 comp_dir = os.path.join(self.pid_dir, dir)
                 pool.addCommand(Scp('collect coverage',
                                     srcFile = comp_dir,
                                     srcHost = host,
                                     dstFile = comp_dir,
                                     recursive = True))
     finally:
         pool.join()
开发者ID:PivotalBigData,项目名称:incubator-hawq,代码行数:16,代码来源:gpcoverage.py

示例14: checkSSH

    def checkSSH(self):
        '''Check that ssh to hostlist is okay.'''

        pool = WorkerPool()

        for h in self.list:
            cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=h)
            pool.addCommand(cmd)

        pool.join()
        pool.haltWork()

        for cmd in pool.getCompletedItems():
            if not cmd.get_results().wasSuccessful():
                raise SSHError("Unable to ssh to '%s'" % cmd.remoteHost)

        return True
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:17,代码来源:ssh_utils.py

示例15: removeBadHosts

    def removeBadHosts(self):
        ''' Update list of host to include only the host on which SSH was successful'''

        pool = WorkerPool()

        for h in self.list:
            cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=h)
            pool.addCommand(cmd)

        pool.join()
        pool.haltWork()

        bad_hosts = []
        working_hosts = []
        for cmd in pool.getCompletedItems():
            if not cmd.get_results().wasSuccessful():
                bad_hosts.append(cmd.remoteHost)
            else:
                working_hosts.append(cmd.remoteHost)

        self.list = working_hosts[:]
        return bad_hosts
开发者ID:andyli029,项目名称:incubator-hawq,代码行数:22,代码来源:ssh_utils.py


注:本文中的gppylib.commands.base.WorkerPool类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。