当前位置: 首页>>代码示例>>Python>>正文


Python PSQL.run_sql_command方法代码示例

本文整理汇总了Python中mpp.lib.PSQL.PSQL.run_sql_command方法的典型用法代码示例。如果您正苦于以下问题:Python PSQL.run_sql_command方法的具体用法?Python PSQL.run_sql_command怎么用?Python PSQL.run_sql_command使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mpp.lib.PSQL.PSQL的用法示例。


在下文中一共展示了PSQL.run_sql_command方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_11_tupcount_in_pg_aoseg_uaotable_upd

# 需要导入模块: from mpp.lib.PSQL import PSQL [as 别名]
# 或者: from mpp.lib.PSQL.PSQL import run_sql_command [as 别名]
    def test_11_tupcount_in_pg_aoseg_uaotable_upd(self):
        tablename = "uao_table_test11"
        tinctest.logger.info("-------------------------------")
        tinctest.logger.info("test_11 Verify the tupcount update to pg_aoseg wile updating uao table ")
        tinctest.logger.info("-------------------------------\n")
        out_file = os.path.join(self.outpath, "create_tab_tupcount_in_pg_aoseg_uaotable_upd_11.out")
        sql_file = os.path.join(self.sqlpath, "create_tab_tupcount_in_pg_aoseg_uaotable_upd_11.sql")
        ans_file = os.path.join(self.anspath, "create_tab_tupcount_in_pg_aoseg_uaotable_upd_11.ans")

        # create uao table and insert 10 rows
        sql_out = PSQL.run_sql_file(sql_file=sql_file, out_file=out_file)
        assert Gpdiff.are_files_equal(out_file, ans_file)
        # get relid for newly created table
        relid = self.get_relid(file_name=tablename)

        # get the count from pg_aoseg before the update is performed
        pgaoseg_tupcount = self.get_tupcount_pgaoseg(relid=relid)
        tab_rowcount_bfrupd = self.get_rowcnt_table(tablename=tablename)

        sql_cmd3 = "update %s set j=j||'test11' where i = 1;" % tablename
        PSQL.run_sql_command(sql_cmd=sql_cmd3, flags="-q -t")

        # get the count from pg_aoseg after  the update is performed
        pgaoseg_tupcount_aftupd = self.get_tupcount_pgaoseg(relid=relid)
        tab_rowcount_aftupd = self.get_rowcnt_table(tablename=tablename)

        assert int(pgaoseg_tupcount_aftupd) == (int(tab_rowcount_aftupd)) + 1
        assert int(tab_rowcount_bfrupd) == (int(tab_rowcount_aftupd))
        assert int(pgaoseg_tupcount_aftupd) == (int(pgaoseg_tupcount)) + 1
        # vacuum
        self.vacuum_full(tablename=tablename)

        pgaoseg_tupcount_aftvacuum = self.get_tupcount_pgaoseg(relid=relid)
        tab_rowcount_aftvacuum = self.get_rowcnt_table(tablename=tablename)
        assert int(pgaoseg_tupcount_aftvacuum) == (int(tab_rowcount_aftvacuum))
开发者ID:andreasscherbaum,项目名称:gpdb,代码行数:37,代码来源:test_persistant_table.py

示例2: test_ddboost_31_restore_without_compression

# 需要导入模块: from mpp.lib.PSQL import PSQL [as 别名]
# 或者: from mpp.lib.PSQL.PSQL import run_sql_command [as 别名]
    def test_ddboost_31_restore_without_compression(self):
        """
        MPP-25620: gpdbrestore is failing for ddboost if compression program is
        not specified.
        """

        PSQL.run_sql_command('''DROP TABLE  IF EXISTS t1 CASCADE;
                                CREATE TABLE t1 (id int);
                                INSERT INTO t1 select generate_series(1, 10);
                                CREATE UNIQUE INDEX t1_idx on t1(id);
                                ALTER TABLE t1 ADD CONSTRAINT verify CHECK (id > 0);
                                ''', dbname = 'gptest');

        self.run_gpcrondump(dbname='gptest', option = '-x gptest -z --ddboost')

        cmd = 'gpdbrestore -t '+ self.full_backup_timestamp + ' -T public.t1 --ddboost -a'
        rc, stdout = self.run_command(cmd)
        self.assertEquals(0, rc)
        self.assertNotIn("Error executing query", stdout)
        self.assertNotIn("Type 2 could not be be found", stdout)

        status_file = self.get_restore_status_file_on_master()
        with open(status_file, 'r') as fd:
            lines = fd.readlines()
            self.assertNotIn('psql finished abnormally with return code 1', lines)
开发者ID:50wu,项目名称:gpdb,代码行数:27,代码来源:test_backup_restore.py

示例3: backend_terminate

# 需要导入模块: from mpp.lib.PSQL import PSQL [as 别名]
# 或者: from mpp.lib.PSQL.PSQL import run_sql_command [as 别名]
 def backend_terminate(self):
     """
     Get the backend process pid by issuing a query to table pg_stat_activity, then
     execute pg_terminate_backend(pid)
     """
     MAX_TRY = 5
     counter = 0
     get_backend_pid = 'SELECT procpid FROM pg_stat_activity WHERE current_query like \'create temp table t as select%\';'
     result = PSQL.run_sql_command(get_backend_pid, flags = '-q -t', dbname = self.db_name)
     tinctest.logger.info('result from getting backend procepid is %s'%result)
     procpid = result.strip()
     while not procpid and counter < MAX_TRY:
         result = PSQL.run_sql_command(get_backend_pid, flags = '-q -t', dbname = self.db_name)
         procpid = result.strip()
         counter += 1
     if counter == MAX_TRY:
         raise Exception('unable to select out the backend process pid')
     kill_backend = 'SELECT pg_terminate_backend(%s);'%procpid
     result = PSQL.run_sql_command(kill_backend, flags = '-q -t', dbname = self.db_name)
     tinctest.logger.info('result from pg_terminate_backend is %s'%result)
     # check if the process was terminated already
     result = PSQL.run_sql_command(get_backend_pid, flags = '-q -t', dbname = self.db_name)
     procpid_after_terminate = result.strip()
     counter = 0
     while procpid_after_terminate == procpid and counter < MAX_TRY:
         result = PSQL.run_sql_command(get_backend_pid, flags = '-q -t', dbname = self.db_name)
         procpid_after_terminate = result.strip()  
         counter += 1
         time.sleep(1)
     if counter == MAX_TRY:
         raise Exception('Running pg_terminated_backend failed!')
开发者ID:50wu,项目名称:gpdb,代码行数:33,代码来源:__init__.py

示例4: test_11_incremental_after_vacuum

# 需要导入模块: from mpp.lib.PSQL import PSQL [as 别名]
# 或者: from mpp.lib.PSQL.PSQL import run_sql_command [as 别名]
 def test_11_incremental_after_vacuum(self):
     tinctest.logger.info("Test 38: Negative: Full backup, Vacumm, incremental backup -should be an empty dirty_list")
     self.cleanup_backup_files()
     self.run_workload("backup_dir", 'bkdb38')
     self.run_full_backup(dbname = 'bkdb38', option = '--ddboost', location=self.BACKUPDIR)
     PSQL.run_sql_command('Vacuum;',dbname='bkdb38')
     self.run_incr_backup("dirty_dir_empty", dbname = 'bkdb38', option = '--ddboost', location=self.BACKUPDIR)
开发者ID:50wu,项目名称:gpdb,代码行数:9,代码来源:test_incr_ddboost.py

示例5: test_run_sql_command_wth_username

# 需要导入模块: from mpp.lib.PSQL import PSQL [as 别名]
# 或者: from mpp.lib.PSQL.PSQL import run_sql_command [as 别名]
    def test_run_sql_command_wth_username(self):
        sql_cmd = 'SELECT 1'
        username = getpass.getuser()
        self.assertTrue(PSQL.run_sql_command(sql_cmd = sql_cmd, username = username))

        #Invalid username
        self.assertFalse(PSQL.run_sql_command(sql_cmd = sql_cmd, username = 'invalidusername'))
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:9,代码来源:regress_psql.py

示例6: resume_faults

# 需要导入模块: from mpp.lib.PSQL import PSQL [as 别名]
# 或者: from mpp.lib.PSQL.PSQL import run_sql_command [as 别名]
    def resume_faults(self, fault_type, cluster_state='sync'):
        '''
        @param fault_type : commit/abort/end_prepare_two_phase_sleep/dtm_broadcast_prepare/dtm_broadcast_commit_prepared/dtm_xlog_distributed_commit
        @description : Resume the suspended faults 
        '''
        tinctest.logger.info('coming to resume faults with xact %s' % fault_type) 
        if fault_type == 'abort':
            self.filereputil.inject_fault(f='twophase_transaction_abort_prepared', y='resume', r='primary', p=self.port , o='0')
            if cluster_state !='resync':
                self.filereputil.inject_fault(f='transaction_abort_after_distributed_prepared', y='reset', p=self.port , o='0', seg_id='1')
        elif fault_type == 'commit':
            self.filereputil.inject_fault(f='twophase_transaction_commit_prepared', y='resume', r='primary', p=self.port , o='0')

        elif fault_type == 'dtm_broadcast_prepare':
            self.filereputil.inject_fault(f='dtm_broadcast_prepare', y='resume', seg_id = '1', p=self.port, o='0')

        elif fault_type == 'dtm_broadcast_commit_prepared':
            tinctest.logger.info('coming to if dtm_broadcast_commit_prepared')
            self.filereputil.inject_fault(f='dtm_broadcast_commit_prepared', y='resume', seg_id = '1', p=self.port, o='0')

        elif fault_type == 'dtm_xlog_distributed_commit':
            self.filereputil.inject_fault(f='dtm_xlog_distributed_commit', y='resume', seg_id = '1', p=self.port, o='0')

        else:
            tinctest.logger.info('No faults to resume')
        tinctest.logger.info('Resumed the suspended transaction fault')
        
        #Wait till all the trigger_sqls are complete before returning
        sql_count = PSQL.run_sql_command('select count(*) from pg_stat_activity;', flags ='-q -t', dbname='postgres')
        while(sql_count.strip() != '1'):
            sleep(5)
            sql_count = PSQL.run_sql_command('select count(*) from pg_stat_activity;', flags ='-q -t', dbname='postgres')
            tinctest.logger.info('stat_activity count %s ' % sql_count)
        return
开发者ID:shwu,项目名称:gpdb,代码行数:36,代码来源:__init__.py

示例7: test_reindex_pg_class

# 需要导入模块: from mpp.lib.PSQL import PSQL [as 别名]
# 或者: from mpp.lib.PSQL.PSQL import run_sql_command [as 别名]
    def test_reindex_pg_class(self):
        tinctest.logger.info("create checkpoint")
        results = {'rc':0, 'stdout':'', 'stderr':''}
        PSQL.run_sql_command("checkpoint", results=results)
        assert results['rc'] == 0, results['stderr']

        tinctest.logger.info("inject fault to skip checkpoints")
        cmd = Command("skip checkpoint on primaries",
                      "gpfaultinjector -f checkpoint -m async -y skip -o 0"
                      " -H ALL -r primary")
        cmd.run(validateAfter=True)
        tinctest.logger.info(cmd.get_results().printResult())

        cmd = Command("skip checkpoint on master",
                      "gpfaultinjector -f checkpoint -m async -y skip -o 0 -s 1")
        cmd.run(validateAfter=True)
        tinctest.logger.info(cmd.get_results().printResult())

        tinctest.logger.info("reindex pg_class indexes")
        assert PSQL.run_sql_file(local_path('reindex_pg_class.sql'))

        tinctest.logger.info("shutdown immediate")
        cmd = Command("shutdown immediate", "gpstop -ai")
        cmd.run(validateAfter=True)
        tinctest.logger.info(cmd.get_results().printResult())

        tinctest.logger.info("trigger recovery")
        cmd = Command("restart the cluster", "gpstart -a")
        cmd.run(validateAfter=True)
        tinctest.logger.info(cmd.get_results().printResult())

        tinctest.logger.info("validate recovery succeeded")
        results = {'rc':0, 'stdout':'', 'stderr':''}
        PSQL.run_sql_command("DROP TABLE reindex_pg_class_test", results=results)
        assert results['rc'] == 0, results['stderr']
开发者ID:kaknikhil,项目名称:gpdb,代码行数:37,代码来源:test_reindex_pg_class.py

示例8: test_restart_on_failure

# 需要导入模块: from mpp.lib.PSQL import PSQL [as 别名]
# 或者: from mpp.lib.PSQL.PSQL import run_sql_command [as 别名]
 def test_restart_on_failure(self):
     """
     @gather_logs_on_failure True
     @restart_on_failure True
     """
     PSQL.run_sql_command("select * from some_table_that_does_not_exist_to_generate_log_errors")
     self.assertTrue(False)
开发者ID:50wu,项目名称:gpdb,代码行数:9,代码来源:regress_mpp_test_case.py

示例9: verify_config_file_with_gp_config

# 需要导入模块: from mpp.lib.PSQL import PSQL [as 别名]
# 或者: from mpp.lib.PSQL.PSQL import run_sql_command [as 别名]
 def verify_config_file_with_gp_config(self):
     """
     compares the gp_segment_configuration and pg_filespace_entry with input file mirror_data_dir, double check 
     if the cluster is configured as intended
     """
     with open(self.mirror_config_file, 'r') as f:
         next(f)            
         for line in f:
             line = line.strip()
             mirror_seg_infor = line.split('=')[1]
             cols = mirror_seg_infor.split(':')
             content_id = cols[0]
             adress = cols[1]
             port = cols[2]
             mir_replication_port = cols[3]
             query_on_configuration = '''select * from gp_segment_configuration where content=\'%s\' and address=\'%s\' and port=\'%s\' 
                                         and replication_port=\'%s\'''' % (content_id, adress, port, mir_replication_port)
             config_info = PSQL.run_sql_command(query_on_configuration, flags='-q -t', dbname='template1')
             config_info = config_info.strip()
             # as intended, the entry should be existing in the cluster
             self.assertNotEqual(0, len(config_info))
             query_on_fselocation = ''' select fselocation from gp_segment_configuration, pg_filespace_entry where dbid=fsedbid 
                                        and preferred_role=\'m\' and content=\'%s\''''%content_id
             fs_locations = PSQL.run_sql_command(query_on_fselocation, flags='-q -t', dbname='template1')
             size = len(cols)
             for fs_index in range(5, size):
                 fs_location = cols[fs_index]
                 self.assertIn(os.path.dirname(fs_location), fs_locations)
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:30,代码来源:test_gpaddmirrors.py

示例10: test_xlogcleanup

# 需要导入模块: from mpp.lib.PSQL import PSQL [as 别名]
# 或者: from mpp.lib.PSQL.PSQL import run_sql_command [as 别名]
    def test_xlogcleanup(self):
        """
        Test for verifying if xlog seg created while basebackup
        dumps out data does not get cleaned
        """

        shutil.rmtree('base', True)
        PSQL.run_sql_command('DROP table if exists foo')

        # Inject fault at post checkpoint create (basebackup)
        logger.info ('Injecting base_backup_post_create_checkpoint fault ...')
        result = self.suspend_at(
                    'base_backup_post_create_checkpoint')
        logger.info(result.stdout)
        self.assertEqual(result.rc, 0, result.stdout)

        # Now execute basebackup. It will be blocked due to the
        # injected fault.
        logger.info ('Perform basebackup with xlog & recovery.conf...')
        pg_basebackup = subprocess.Popen(['pg_basebackup', '-x', '-R', '-D', 'base']
                                          , stdout = subprocess.PIPE
                                          , stderr = subprocess.PIPE)

        # Give basebackup a moment to reach the fault & 
        # trigger it
        logger.info('Check if suspend fault is hit ...')
        triggered = self.wait_triggered(
                    'base_backup_post_create_checkpoint')
        self.assertTrue(triggered, 'Fault was not triggered')

        # Perform operations that causes xlog seg generation
        logger.info ('Performing xlog seg generation ...')
        count = 0
        while (count < 10):
            PSQL.run_sql_command('select pg_switch_xlog(); select pg_switch_xlog(); checkpoint;')
            count = count + 1

        # Resume basebackup
        result = self.resume('base_backup_post_create_checkpoint')
        logger.info(result.stdout)
        self.assertEqual(result.rc, 0, result.stdout)

        # Wait until basebackup end
        logger.info('Waiting for basebackup to end ...')

        sql = "SELECT count(*) FROM pg_stat_replication"
        with dbconn.connect(dbconn.DbURL(), utility=True) as conn:
            while (True):
                curs = dbconn.execSQL(conn, sql)
                results = curs.fetchall()

                if (int(results[0][0]) == 0):
                    break;

        # Verify if basebackup completed successfully
        # See if recovery.conf exists (Yes - Pass)
        self.assertTrue(os.path.exists(os.path.join('base','recovery.conf')))

        logger.info ('Found recovery.conf in the backup directory.')
        logger.info ('Pass')
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:62,代码来源:test_xlog_cleanup.py

示例11: test_gather_logs

# 需要导入模块: from mpp.lib.PSQL import PSQL [as 别名]
# 或者: from mpp.lib.PSQL.PSQL import run_sql_command [as 别名]
 def test_gather_logs(self):
     """
     @gather_logs_on_failure True
     @restart_on_failure True
     """
     PSQL.run_sql_command("select pg_sleep(5)")
     self.assertTrue(False)
开发者ID:50wu,项目名称:gpdb,代码行数:9,代码来源:regress_mpp_test_case.py

示例12: create_filespace

# 需要导入模块: from mpp.lib.PSQL import PSQL [as 别名]
# 或者: from mpp.lib.PSQL.PSQL import run_sql_command [as 别名]
    def create_filespace(self, filespace):
        '''
        @param filespace: Filespace Name
        '''
        if self.exists(filespace) is True:
            tinctest.logger.info('Filespace %s exists' % filespace)
            return

        file1 = local_path(filespace)
        f1 = open(file1+".fs.config","w")
        f1.write('filespace:%s\n' % filespace)
        f1.write('fsysname:hdfs\n')
        fsrep = PSQL.run_sql_command("select fsrep from pg_filespace where fsname='dfs_system';", flags = '-t -q', dbname='postgres')
        f1.write('fsreplica:%s\n' % fsrep.strip())

        dfs_loc_cmd = "SELECT substring(fselocation from length('hdfs:// ') for (position('/' in substring(fselocation from length('hdfs:// ')))-1)::int) FROM pg_filespace pgfs, pg_filespace_entry pgfse  WHERE pgfs.fsname = 'dfs_system' AND fsedbid = 2 AND pgfse.fsefsoid=pgfs.oid ;"
        dfs_loc = PSQL.run_sql_command(dfs_loc_cmd,flags = '-t -q', dbname='postgres')
        for record in self.config.record:
            if record.content == -1:
                fileloc = '%s/hdfs_%s' % (os.path.split(record.datadir)[0], filespace)
                f1.write("%s:%s:%s/%s\n" % (record.hostname, record.dbid, fileloc, os.path.split(record.datadir)[1]))
                cmd = "gpssh -h %s -e 'rm -rf %s; mkdir -p %s'"  % (record.hostname, fileloc, fileloc)
                run_shell_command(cmd)
            else:
                f1.write("%s:%s:[%s/%s/%s]\n" % (record.hostname, record.dbid, dfs_loc.strip(), filespace, os.path.split(record.datadir)[1]))
        f1.close()
        filespace_cmd = '%s/bin/gpfilespace -c %s' % (self.gphome, f1.name)
        cmd = Command(name='Gpfilespace command', cmdStr="%s" % (filespace_cmd))
        tinctest.logger.info(" %s" % cmd)
        cmd.run(validateAfter=True)
        result = cmd.get_results()
        if result.rc != 0:
            raise GPfilespaceException('"gpfilespace creation filespace FAILED".  Output = %s ' % resutl.stdout)
开发者ID:50wu,项目名称:gpdb,代码行数:35,代码来源:gpfilespace.py

示例13: test_no_corruption

# 需要导入模块: from mpp.lib.PSQL import PSQL [as 别名]
# 或者: from mpp.lib.PSQL.PSQL import run_sql_command [as 别名]
    def test_no_corruption(self):
        """
        Test that gpcheckcat does not report any errors and it does
        not generate the verify file if the gpcheckcat test succeeds.
        We choose missing_extraneous test for this purpose.

        """
        dbname = 'test_no_corruption'
        PSQL.run_sql_command('DROP DATABASE IF EXISTS %s' % dbname)
        stdout = PSQL.run_sql_command('CREATE DATABASE %s' % dbname)
        if not stdout.endswith('CREATE DATABASE\n'):
            self.fail('failed to create database: %s' % stdout)

        sql_file = local_path('sql/create_tables.sql')
        if not PSQL.run_sql_file(sql_file, dbname=dbname,
                                 output_to_file=False):
            self.fail('failed to create tables')

        res = {'rc': 0, 'stdout' : '', 'stderr': ''}
        run_shell_command(
            "cd %s && $GPHOME/bin/lib/gpcheckcat -p %s -R missing_extraneous %s" %
            (self.gpcheckcat_test_dir, self.master_port, dbname),
            results=res)
        self.assertEqual(0, res['rc'])
        for f in os.listdir(self.gpcheckcat_test_dir):
            if fnmatch.fnmatch(f, 'gpcheckcat.verify.%s.*' % dbname):
                self.fail('found verify file when not expecting it')
开发者ID:LJoNe,项目名称:gpdb,代码行数:29,代码来源:test_gpcheckcat.py

示例14: test_MPP24237

# 需要导入模块: from mpp.lib.PSQL import PSQL [as 别名]
# 或者: from mpp.lib.PSQL.PSQL import run_sql_command [as 别名]
    def test_MPP24237(self):

        cmd_cleanup = "psql -Atc \"select datname from pg_database where datname != \'template0\'\" | while read a; do echo \"check for ${a}\";psql -Atc \"select \'drop schema if exists \' || nspname || \' cascade;\' from (select nspname from pg_namespace where nspname like \'pg_temp%\' union select nspname from gp_dist_random(\'pg_namespace\') where nspname like \'pg_temp%\' except select \'pg_temp_\' || sess_id::varchar from pg_stat_activity) as foo\" ${a}; done"

        res = {'rc':0, 'stderr':'', 'stdout':''}
        run_shell_command(cmd_cleanup, 'do_clean', res)

        if res['rc'] > 0:
            raise Exception("Failed to do cleanup %s" %res[stderr])

        PSQL.run_sql_file(local_path('pre_script.sql'), out_file=local_path('pre_script.out'))
        self.assertTrue(Gpdiff.are_files_equal(local_path('pre_script.out'), local_path('pre_script.ans')))

        cmd = "select count(*) from pg_tables where schemaname like 'pg_temp%';"
        out = PSQL.run_sql_command(cmd, flags ='-q -t')

        if int(out) != 0:
            tinctest.logger.info("temp tables found")
            tinctest.logger.info(PSQL.run_sql_command("select * from pg_tables where schemaname like 'pg_temp%';"))
            self.fail("temp tables were found")
            PSQL.run_sql_file(local_path('clean_script.sql'))

        PSQL.run_sql_file(local_path('clean_script.sql'))

        run_shell_command(cmd_cleanup, 'do_clean', res)
        if res['rc'] > 0:
            raise Exception("Failed to do cleanup %s" %res[stderr])
开发者ID:LJoNe,项目名称:gpdb,代码行数:29,代码来源:test_func_mpp24237.py

示例15: test_with_concurrent_workload

# 需要导入模块: from mpp.lib.PSQL import PSQL [as 别名]
# 或者: from mpp.lib.PSQL.PSQL import run_sql_command [as 别名]
 def test_with_concurrent_workload(self):
     """
     add new mirrors while concurrent workload in progress, check that mirrors added
     and current workload won't get affected, in the end, run checkmirrorseg.
     Note that: adding mirrors while running workload has checkmirrorseg issue with MPP-24311
     """
     gprecover = GpRecover()
     self._setup_gpaddmirrors()
     self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
     sql_setup_file = local_path('sql/ao_heap_table_setup.sql') 
     sql_file = local_path('sql/ao_heap_table.sql')
     pg_stat_activity = 'SELECT * FROM pg_stat_activity;'
     PSQL.run_sql_file(sql_setup_file)
     subprocess.Popen(["psql", "-f", sql_file])
     time.sleep(15)
     subprocess.Popen(["gpaddmirrors", "-ai", self.mirror_config_file, "-d", self.mdd])
     time.sleep(15)
     result = PSQL.run_sql_command(pg_stat_activity, flags='-q -t', dbname='template1')
     result = result.strip()
     rows = result.split('\n')
     self.assertTrue(len(rows) > 1)
     while len(rows) > 1:
         result = PSQL.run_sql_command(pg_stat_activity, flags='-q -t', dbname='template1')
         result = result.strip()
         rows = result.split('\n')
         time.sleep(3)
     gprecover.wait_till_insync_transition()
     self.verify_config_file_with_gp_config()
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:30,代码来源:test_gpaddmirrors.py


注:本文中的mpp.lib.PSQL.PSQL.run_sql_command方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。