本文整理汇总了Python中mpp.lib.config.GPDBConfig.has_master_mirror方法的典型用法代码示例。如果您正苦于以下问题:Python GPDBConfig.has_master_mirror方法的具体用法?Python GPDBConfig.has_master_mirror怎么用?Python GPDBConfig.has_master_mirror使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mpp.lib.config.GPDBConfig
的用法示例。
在下文中一共展示了GPDBConfig.has_master_mirror方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: DbStateClass
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import has_master_mirror [as 别名]
class DbStateClass(MPPTestCase):
def __init__(self,methodName,config=None):
if config is not None:
self.config = config
else:
self.config = GPDBConfig()
self.gpverify = GpdbVerify(config=self.config)
super(DbStateClass,self).__init__(methodName)
def check_system(self):
'''
@summary: Check whether the system is up and sync. Exit out if not
'''
cmd ="select count(*) from gp_segment_configuration where content<> -1 ;"
count_all = PSQL.run_sql_command(cmd, flags ='-q -t', dbname='postgres')
cmd ="select count(*) from gp_segment_configuration where content<> -1 and mode = 's' and status = 'u';"
count_up_and_sync = PSQL.run_sql_command(cmd, flags ='-q -t', dbname='postgres')
if count_all.strip() != count_up_and_sync.strip() :
raise Exception('The cluster is not in up/sync ............')
else:
tinctest.logger.info("\n Starting New Test: System is up and in sync .........")
def check_catalog(self,dbname=None, alldb=True, online=False, testname=None, outputFile=None, host=None, port=None):
'''1. Run gpcheckcat'''
(errorCode, hasError, gpcheckcat_output, repairScriptDir) = self.gpverify.gpcheckcat(dbname=dbname, alldb=alldb, online=online, testname=testname, outputFile=outputFile, host=host, port=port)
if errorCode != 0:
raise Exception('GpCheckcat failed with errcode %s '% (errorCode))
def check_mirrorintegrity(self, master=False):
'''Runs checkmirrorintegrity(default), check_mastermirrorintegrity(when master=True) '''
(checkmirror, fix_outfile) = self.gpverify.gpcheckmirrorseg(master=master)
if not checkmirror:
self.fail('Checkmirrorseg failed. Fix file location : %s' %fix_outfile)
tinctest.logger.info('Successfully completed integrity check')
def run_validation(self):
'''
1. gpcheckcat
2. checkmirrorintegrity
3. check_mastermirrorintegrity
'''
self.check_catalog()
self.check_mirrorintegrity()
if self.config.has_master_mirror():
self.check_mirrorintegrity(master=True)
示例2: test_with_standby_and_filespace
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import has_master_mirror [as 别名]
def test_with_standby_and_filespace(self):
"""
check that cluster's host address is same when it is with standby and without standby
"""
if not self.config.is_multinode():
self.skipTest('skipping test since the cluster is not multinode')
gprecover = GpRecover()
self._setup_gpaddmirrors()
# adding mirrors first
self._setup_gpaddmirrors()
self._generate_gpinit_config_files()
self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("gpaddmirrors -a -i %s -s -d %s --verbose" % (self.mirror_config_file, self.mdd), 'run gpaddmirrros with mirror spreading', res)
self.assertEqual(0, res['rc'])
gprecover.wait_till_insync_transition()
get_mirror_address = 'SELECT content, address FROM gp_segment_configuration WHERE preferred_role = \'m\';'
rows = self.format_sql_result(get_mirror_address)
# create a dictionary for mirror and its host address
mirror_hosts_wo_stdby = {}
for row in rows:
content = row[0]
address = row[1]
mirror_hosts_wo_stdby[content] = address
# delete and reinitialize cluster again
self._do_gpdeletesystem()
self._do_gpinitsystem()
gprecover.wait_till_insync_transition()
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
# create filespace and standby, needs to get a new config_info instance for new cluster
config_info = GPDBConfig()
if not config_info.has_master_mirror():
self._do_gpinitstandby()
self._create_filespace('user_filespace')
self._setup_gpaddmirrors()
self._generate_gpinit_config_files()
self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
for fs_location in self.fs_location:
self._cleanup_segment_data_dir(self.host_file, fs_location)
# add mirror for the new cluster which has standby and user filespace configured
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("gpaddmirrors -a -i %s -s -d %s --verbose" % (self.mirror_config_file, self.mdd), 'run gpaddmirrros with mirror spreading', res)
self.assertEqual(0, res['rc'])
gprecover.wait_till_insync_transition()
# verify that when there is filespace configured, the configuration will be same as mirror_config_file specified
self.verify_config_file_with_gp_config()
self.check_mirror_seg()
rows = self.format_sql_result(get_mirror_address)
mirror_hosts_with_stdby = {}
for row in rows:
content = row[0]
address = row[1]
mirror_hosts_with_stdby[content] = address
for key in mirror_hosts_wo_stdby:
self.assertEqual(mirror_hosts_wo_stdby[key], mirror_hosts_with_stdby[key])
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("gpinitstandby -ar", 'remove standby', res)
if res['rc'] > 0:
raise GPAddmirrorsTestCaseException("Failed to remove the standby")
self._drop_filespace()
示例3: SuspendCheckpointCrashRecovery
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import has_master_mirror [as 别名]
#.........这里部分代码省略.........
''' Not all SQLs hit the fault for this case, hence wait for them to complete and then others to hit the fault'''
self.wait_till_all_sqls_done(6 + 1)
trigger_count = 6
fault_type = self.get_faults_before_executing_trigger_sqls(pass_num, cluster_state, test_type, ddl_type, aborting_create_needed=False)
fault_hit = self.fileutil.check_fault_status(fault_name=fault_type, status="triggered", num_times_hit=trigger_count)
if not fault_hit:
raise Exception('Fault not hit expected number of times')
self.stop_start_validate(cluster_state)
def wait_till_all_sqls_done(self, count=1):
''' 500 here is just an arbitrarily long time "if-we-exceed-this-then-oh-crap-lets-error-out" value '''
for i in range(1,500):
psql_count = PSQL.run_sql_command("select count(*) from pg_stat_activity where current_query <> '<IDLE>'", flags='-q -t', dbname='postgres')
if int(psql_count.strip()) <= count :
return
sleep(1)
raise Exception('SQLs expected to complete but are still running')
def stop_start_validate(self, cluster_state):
''' Do gpstop immediate, gpstart and see if all segments come back up fine '''
if cluster_state == 'sync' :
self.stop_db()
self.switch_primary_mirror_role_in_utility_mode()
tinctest.logger.info('Successfully switched roles of primary and mirrors in gp_segment_configuration')
self.start_db(down_segments=True)
rc = self.gprecover.incremental()
if not rc:
raise Exception('Gprecoverseg failed')
if not self.gprecover.wait_till_insync_transition():
raise Exception('Segments not in sync')
if cluster_state == 'change_tracking':
self.stop_db()
self.start_db(down_segments=True)
if cluster_state == 'resync':
#Resume the filerep_resync filerep_transition_to_sync_begin before stop-start
self.fileutil.inject_fault(f='filerep_transition_to_sync_begin', y='resume', r='primary')
self.stop_db()
self.start_db()
if not self.gprecover.wait_till_insync_transition():
raise Exception('Segments not in sync')
self.dbstate.check_catalog(alldb=False)
def cluster_in_change_tracking(self):
'''
Put Cluster into change_tracking
'''
self.base.invoke_fault('filerep_consumer', 'fault', role='primary')
self.fileutil.wait_till_change_tracking_transition()
tinctest.logger.info('Change_tracking transition complete')
def validate_system(self, cluster_state):
# Validate the system's integrity
if (cluster_state == 'change_tracking'):
if not self.gprecover.incremental():
raise Exception('Gprecoverseg failed')
if not self.gprecover.wait_till_insync_transition():
raise Exception('Segments not in sync')
tinctest.logger.info('Segments recovered and back in sync')
self.dbstate.check_mirrorintegrity()
if self.config.has_master_mirror():
self.dbstate.check_mirrorintegrity(master=True)
def run_fault_injector_to_skip_checkpoint(self):
tinctest.logger.info('Skip Checkpointing using fault injector.')
self.fileutil.inject_fault(y = 'reset', f = 'checkpoint', r ='primary', H='ALL', m ='async', o = '0', p=self.port)
(ok, out) = self.fileutil.inject_fault(y = 'skip', f = 'checkpoint', r ='primary', H='ALL', m ='async', o = '0', p=self.port)
if not ok:
raise Exception('Problem with injecting fault.')
def backup_output_dir(self,test_dir, test_id):
indir=local_path(test_dir)
outdir = indir+'_'+test_id
cmdstr="cp -r "+ indir + " " + outdir
cmd = Command(name='run cp -r ', cmdStr=cmdstr)
tinctest.logger.info("Taking a backup of SQL directory: %s" %cmd)
try:
cmd.run()
except:
self.fail("cp -r failed.")
tinctest.logger.info("Test SQL directory Backup Done!!")
def do_post_run_checks(self):
self.stop_start_validate('sync')
rc = self.gprecover.incremental()
if not rc:
raise Exception('Gprecvoerseg failed')
self.gprecover.wait_till_insync_transition()
tinctest.logger.info("Done going from resync to insync")
self.dbstate.check_catalog(alldb=False)
self.dbstate.check_mirrorintegrity()
if self.config.has_master_mirror():
self.dbstate.check_mirrorintegrity(master=True)