本文整理汇总了Python中mpp.lib.config.GPDBConfig.is_not_insync_segments方法的典型用法代码示例。如果您正苦于以下问题:Python GPDBConfig.is_not_insync_segments方法的具体用法?Python GPDBConfig.is_not_insync_segments怎么用?Python GPDBConfig.is_not_insync_segments使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mpp.lib.config.GPDBConfig
的用法示例。
在下文中一共展示了GPDBConfig.is_not_insync_segments方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: GpRecover
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import is_not_insync_segments [as 别名]
class GpRecover(GpRecoverseg):
'''Class for gprecoverseg utility methods '''
MAX_COUNTER=400
def __init__(self, config=None):
if config is not None:
self.config = config
else:
self.config = GPDBConfig()
self.gphome = os.environ.get('GPHOME')
def incremental(self, workerPool=False):
'''Incremental Recoverseg '''
tinctest.logger.info('Running Incremental gprecoverseg...')
if workerPool:
return self.run_using_workerpool()
else:
return self.run()
def full(self):
'''Full Recoverseg '''
tinctest.logger.info('Running Full gprecoverseg...')
return self.run(option = '-F')
def rebalance(self):
'''Run gprecoverseg to rebalance the cluster '''
tinctest.logger.info('Running gprecoverseg rebalance...')
return self.run(option = '-r')
def wait_till_insync_transition(self):
'''
Poll till all the segments transition to insync state.
Number of trials set to MAX_COUNTER
'''
counter= 1
while(not self.config.is_not_insync_segments()):
if counter > self.MAX_COUNTER:
raise Exception('Segments did not come insync after 20 minutes')
else:
counter = counter + 1
time.sleep(3) #Wait 3 secs before polling again
tinctest.logger.info('Segments are synchronized ...')
return True
def recover_rebalance_segs(self):
if not self.config.is_balanced_segments():
# recover
if not self.incremental():
raise Exception('Gprecvoerseg failed')
if not self.wait_till_insync_transition():
raise Exception('Segments not in sync')
tinctest.logger.info('Segments recovered and back in sync')
# rebalance
if not self.rebalance():
raise Exception('Gprecvoerseg -r failed')
if not self.wait_till_insync_transition():
raise Exception('Segments not in sync')
tinctest.logger.info('Segments rebalanced and back in sync')
示例2: setUpFilespaceForCTAS
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import is_not_insync_segments [as 别名]
def setUpFilespaceForCTAS(isForHawq):
config = GPDBConfig()
if isForHawq:
filespace = HAWQGpfilespace()
else:
filespace = Gpfilespace()
if config.is_not_insync_segments():
filespace.create_filespace('tincrepo_qp_ddl_ctas')
示例3: is_changetracking
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import is_not_insync_segments [as 别名]
def is_changetracking(self):
"""
@summary: return true if system is in change tracking mode
@return: Boolean value representing the whether the cluster is insync or not
"""
config = GPDBConfig()
return not config.is_not_insync_segments()
示例4: test_do_full_recovery
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import is_not_insync_segments [as 别名]
def test_do_full_recovery(self):
"""
[feature]: Performs Full Recovery
"""
config = GPDBConfig()
recoverseg = GpRecoverseg()
tinctest.logger.info('Running Full gprecoverseg...')
recoverseg.run(option = '-F')
rtrycnt = 0
while (not config.is_not_insync_segments()):
tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
rtrycnt = rtrycnt + 1
示例5: rebalance_cluster
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import is_not_insync_segments [as 别名]
def rebalance_cluster(self):
config = GPDBConfig()
self.run_recovery('r')
rtrycnt = 0
while ((config.is_not_insync_segments()) == False and rtrycnt <= 5):
tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
sleep(10)
rtrycnt = rtrycnt + 1
#Many time it has been observed that gprecoverseg -ar marks segment down
if config.is_not_insync_segments():
return True
else:
self.run_recovery()
rtrycnt = 0
max_rtrycnt = 10
while ((config.is_not_insync_segments()) == False and rtrycnt < max_rtrycnt):
tinctest.logger.info("waiting [%s] for DB to recover" %rtrycnt)
sleep(10)
rtrycnt = rtrycnt + 1
if rtrycnt < max_rtrycnt:
return True
else:
tinctest.logger.error("Segments not up after incremental recovery!!")
return False
示例6: test_full_recovery_skip_persistent_tables_check
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import is_not_insync_segments [as 别名]
def test_full_recovery_skip_persistent_tables_check(self):
"""
[feature]: Run recoverseg with persistent tables check option
"""
config = GPDBConfig()
recoverseg = GpRecoverseg()
tinctest.logger.info('Running gprecoverseg...')
recoverseg.run(option='-F')
self.assertNotIn('Performing persistent table check', recoverseg.stdout)
rtrycnt = 0
while (not config.is_not_insync_segments()):
tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
rtrycnt = rtrycnt + 1
示例7: setUpClass
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import is_not_insync_segments [as 别名]
def setUpClass(cls):
super(GPFilespaceTablespaceTest, cls).setUpClass()
tinctest.logger.info("*** Running the pre-requisite sql files drop.sql and setup.sql")
PSQL.run_sql_file(local_path('sqls/setup/drop.sql'))
#separating dropping of filsepaces
PSQL.run_sql_file(local_path('sqls/setup/drop_filespaces.sql'))
PSQL.run_sql_file(local_path('sqls/setup/create.sql'))
tinctest.logger.info("Starting the Filespace Tablespace test.. ")
config = GPDBConfig()
filespace = Gpfilespace()
filespace_name = 'cdbfast_fs_'
if config.is_not_insync_segments():
tinctest.logger.info("***** Creating filespaces...")
filespace.create_filespace(filespace_name+'sch1')
filespace.create_filespace(filespace_name+'sch2')
filespace.create_filespace(filespace_name+'sch3')
示例8: test_invalid_state_recoverseg
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import is_not_insync_segments [as 别名]
def test_invalid_state_recoverseg(self):
"""
[feature]: Sets the ENV_VAR and runs the incremental recoverseg
"""
''' '''
# setting the ENV_VAR
os.environ[ENV_VAR] = '1'
recoverseg = GpRecoverseg()
config = GPDBConfig()
tinctest.logger.info('Running Incremental gprecoverseg...')
recoverseg.run()
rtrycnt = 0
while (not config.is_not_insync_segments()):
tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
rtrycnt = rtrycnt + 1
示例9: test_recovery_with_new_loc
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import is_not_insync_segments [as 别名]
def test_recovery_with_new_loc(self):
"""
[feature]: Performs recovery by creating a configuration file with new segment locations
"""
newfault = Fault()
config = GPDBConfig()
hosts = newfault.get_segment_host()
newfault.create_new_loc_config(hosts, orig_filename='recovery.conf', new_filename='recovery_new.conf')
if not newfault.run_recovery_with_config(filename='recovery_new.conf'):
self.fail("*** Incremental recovery with config file recovery_new.conf failed")
rtrycnt = 0
while (not config.is_not_insync_segments()):
tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
rtrycnt = rtrycnt + 1
示例10: recover_segments
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import is_not_insync_segments [as 别名]
def recover_segments(self,option,max_rtrycnt):
"""
@summary: Recovers the segments and returns the status of recovery process.
@param option: represents different gprecoverseg command options
@param max_rtrycnt: the max no. of times state of cluster should be checked
@return: Boolean value representing the status of recovery process
"""
config = GPDBConfig()
recoverseg = GpRecoverseg()
tinctest.logger.info("Running gprecoverseg with '%s' option..."%option)
recoverseg.run(option)
rtrycnt = 0
while ((config.is_not_insync_segments()) == False and rtrycnt <= max_rtrycnt):
tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
sleep(10)
rtrycnt = rtrycnt + 1
if rtrycnt > max_rtrycnt:
return False
else:
return True
示例11: check_cluster_health
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import is_not_insync_segments [as 别名]
def check_cluster_health(self, doFullRecovery = False):
"""
@summary: Checks for the cluster health, tries to recover and rebalance the cluster,
fails the test if not able to do so
@param doFullRecovery: Boolean value which decides whether to go for full
recovery or not
@return: None
"""
tinctest.logger.info("***** Checking the cluster health before starting tests")
config = GPDBConfig()
# If the segments are not up, go for recovery
if not config.is_not_insync_segments():
tinctest.logger.info("***** Starting the recovery process")
# if incremental didn't work, go for full recovery
if not self.recover_segments(' ',10):
tinctest.logger.warn("***** Segments not recovered after incremental recovery")
if doFullRecovery:
# if full also fails, the tests cannot proceed, so fail it
if not self.recover_segments('-F',20):
tinctest.logger.error("***** Segments not recovered even after full recovery - Tests cannot proceed further!!")
self.fail("Segments are down - Tests cannot proceed further!!")
# if full recovery passes, check for rebalancing the cluster
else:
tinctest.logger.info("***** Segments up after full recovery : validating their roles...")
self.check_segment_roles()
else:
self.fail("Segments are down - Tests cannot proceed!!")
# if incremental recovery passes, check for rebalancing the cluster
else:
tinctest.logger.info("***** Segments up after incremental recovery : validating their roles...")
self.check_segment_roles()
# If the segments are up, check for rebalancing the cluster
else:
tinctest.logger.info("***** Segments are up : validating their roles...")
self.check_segment_roles()
示例12: OODClass
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import is_not_insync_segments [as 别名]
class OODClass(MPPTestCase):
def __init__(self,methodName):
self.gp = GpactivateStandby()
self.verify = StandbyVerify()
self.config = GPDBConfig()
self.disk = Disk()
self.sdby_mdd = os.environ.get('MASTER_DATA_DIRECTORY')
self.pgport = os.environ.get('PGPORT')
super(OODClass,self).__init__(methodName)
def initiate_standby(self):
self.gp.create_standby(local='no')
def check_standby(self):
self.assertFalse(self.verify.check_standby_processes())
def get_standby_dbid(self):
std_sql = "select dbid from gp_segment_configuration where content='-1' and role='m';"
standby_dbid = PSQL.run_sql_command(std_sql, flags = '-q -t', dbname= 'template1')
return standby_dbid.strip()
def restart_standby(self):
sdby_host = self.config.get_master_standbyhost()
stdby_dbid = self.get_standby_dbid()
cmd="pg_ctl -D %s -o '-p %s --gp_dbid=%s --gp_num_contents_in_cluster=2 --silent-mode=true -i -M master --gp_contentid=-1 -x 0 -E' start &"%(self.sdby_mdd, self.pgport, stdby_dbid)
self.assertTrue(self.gp.run_remote(sdby_host,cmd, self.pgport, self.sdby_mdd))
self.assertTrue(self.verify.check_standby_processes())
def check_diskusage(self, host): # This now checks for only /data
(rc, result) = self.disk.get_disk_usage(host, '/data')
if rc != 0:
raise Exception ("The specified mount /data is not present for the device")
else:
available_usage = result
return available_usage
def _fill(self, filename, host):
cmd_prefix = "ssh " +host+ " \""
cmd_postfix = "\""
location = '/data'
if not os.path.isdir('%s/diskfill/' % location):
os.makedirs('%s/diskfill/' % location)
cmd_str = cmd_prefix + "dd if=/dev/zero bs=16384K count=2000 of=" +location+ "/diskfill/" + filename +cmd_postfix
cmd = Command(name='Fill Disk', cmdStr=cmd_str)
tinctest.logger.info(" %s" % cmd)
cmd.run(validateAfter=False)
result = cmd.get_results()
if result.rc !=0:
tinctest.logger.error('disk fill not working. Its already full')
def filldisk(self):
host = self.config.get_master_standbyhost()
disk_usage = self.check_diskusage(host)
i = 0
while(int(disk_usage.strip()) >1000000):
filename = 'new_space_%s' % i
self._fill(filename, host)
i +=1
disk_usage = self.check_diskusage(host)
def remove_fillfiles(self, filename, host):
location = '/data'
cmd_str = "ssh %s rm %s/diskfill/%s*" % (host,location, filename)
cmd = Command(name='Remove fill files', cmdStr=cmd_str)
tinctest.logger.info(" %s" % cmd)
cmd.run(validateAfter=False)
result = cmd.get_results()
if result.rc !=0:
raise Exception('Unable to delete the fill files')
return
def cleanup(self):
host = self.config.get_master_standbyhost()
self.remove_fillfiles('new_space', host)
#Recover segemnts in case segments and standby were on the same host
cmd = Command(name='gprecoverseg', cmdStr='gprecoverseg -a')
tinctest.logger.info(" %s" % cmd)
cmd.run(validateAfter=False)
result = cmd.get_results()
if result.rc !=0:
raise Exception('gprecoverseg failed')
while(self.config.is_not_insync_segments() == False):
tinctest.logger.info('Waiting for DB to be in sync')
示例13: test_resync_ct_blocks_per_query
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import is_not_insync_segments [as 别名]
def test_resync_ct_blocks_per_query(self):
'''Catch a bug in resync that manifests only after rebalance.
The logic used by a resync worker to obtain changed blocks
from CT log had a bug. The SQL query used to obtain a batch
of changed blocks from CT log was incorrectly using LSN to
filter out changed blocks. All of the following must be true
for the bug to occur:
* More than gp_filerep_ct_batch_size blocks of a relation
are changed on a segment in changetracking.
* A block with a higher number is changed earlier (lower
LSN) than lower numbered blocks.
* The first batch of changed blocks obtained by resync worker
from CT log for this relation contains only lower
(according to block number) blocks. The higher block with
lower LSN is not included in this batch. Another query
must be run against CT log to obtain this block.
* The SQL query used to obtain next batch of changed blocks
for this relation contains incorrect WHERE clause involving
a filter based on LSN of previously obtained blocks. The
higher numbered block is missed out - not returned by the
query as changed block for the relation. The block is
never shipped from primary to mirror, resulting in data
loss. The test aims to verify that this doesn't happen as
the bug is now fixed.
'''
config = GPDBConfig()
assert (config.is_not_insync_segments() &
config.is_balanced_segments()), 'cluster not in-sync and balanced'
# Create table and insert data so that adequate number of
# blocks are occupied.
self.run_sql('resync_bug_setup')
# Bring down primaries and transition mirrors to
# changetracking.
filerep = Filerepe2e_Util()
filerep.inject_fault(y='fault', f='segment_probe_response',
r='primary')
# Trigger the fault by running a sql file.
PSQL.run_sql_file(local_path('test_ddl.sql'))
filerep.wait_till_change_tracking_transition()
# Set gp_filerep_ct_batch_size = 3.
cmd = Command('reduce resync batch size',
'gpconfig -c gp_filerep_ct_batch_size -v 3')
cmd.run()
assert cmd.get_results().rc == 0, 'gpconfig failed'
cmd = Command('load updated config', 'gpstop -au')
cmd.run()
assert cmd.get_results().rc == 0, '"gpstop -au" failed'
self.run_sql('change_blocks_in_ct')
# Capture change tracking log contents from the segment of
# interest for debugging, in case the test fails.
(host, port) = GPDBConfig().get_hostandport_of_segment(0, 'p')
assert PSQL.run_sql_file_utility_mode(
sql_file=local_path('sql/ct_log_contents.sql'),
out_file=local_path('output/ct_log_contents.out'),
host=host, port=port), sql_file
gprecover = GpRecover(GPDBConfig())
gprecover.incremental(False)
gprecover.wait_till_insync_transition()
# Rebalance, so that original primary is back in the role
gprecover = GpRecover(GPDBConfig())
gprecover.rebalance()
gprecover.wait_till_insync_transition()
# Reset gp_filerep_ct_batch_size
cmd = Command('reset resync batch size',
'gpconfig -r gp_filerep_ct_batch_size')
cmd.run()
assert cmd.get_results().rc == 0, 'gpconfig failed'
cmd = Command('load updated config', 'gpstop -au')
cmd.run()
assert cmd.get_results().rc == 0, '"gpstop -au" failed'
self.run_sql('select_after_rebalance')
示例14: FtsTransitions
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import is_not_insync_segments [as 别名]
#.........这里部分代码省略.........
def set_faults(self,fault_name, type, role='mirror', port=None, occurence=None, sleeptime=None, seg_id=None):
''' Reset the fault and then issue the fault with the given type'''
self.fileutil.inject_fault(f=fault_name, y=type, r=role, p=port , o=occurence, sleeptime=sleeptime, seg_id=seg_id)
def resume_faults(self,fault_name, role='mirror'):
''' Resume the fault issues '''
self.fileutil.inject_fault(f=fault_name, y='resume', r=role)
def run_validation(self):
tinctest.logger.info('Veriy the integrity between primary and mirror ...')
self.dbstate = DbStateClass('run_validation')
self.dbstate.check_mirrorintegrity()
def incremental_recoverseg(self, workerPool=False):
self.gprecover.incremental(workerPool)
def run_recoverseg_if_ct(self):
num_down = self.gpconfig.count_of_nodes_in_mode('c')
if (int(num_down) > 0):
self.incremental_recoverseg()
def wait_till_change_tracking(self):
self.fileutil.wait_till_change_tracking_transition()
def wait_till_insync(self):
self.gprecover.wait_till_insync_transition()
def run_gpstate(self, type, phase):
self.gpstate.run_gpstate(type, phase)
def run_gpprimarymirror(self):
self.gpprimarymirror.run_gpprimarymirror()
def verify_gpprimarymirror_output(self, total_resync=0, cur_resync=0):
status = self.gpprimarymirror.verify_gpprimarymirror_output(total_resync, cur_resync)
self.assertTrue(status, 'Total and Cur resync object count mismatch')
def run_gpstate_shell_cmd(self, options):
self.gpstate.run_gpstate_shell_cmd(options)
def verify_gpstate_output(self):
status = self.gpstate.verify_gpstate_output()
self.assertTrue(status, 'Total and Cur resync object count mismatch')
def run_trigger_sql(self):
''' Run a sql statement to trigger postmaster reset '''
PSQL.run_sql_file(local_path('test_ddl.sql'))
def run_fts_test_ddl_dml(self):
PSQL.run_sql_file(local_path('fts_test_ddl_dml.sql'))
def run_fts_test_ddl_dml_before_ct(self):
PSQL.run_sql_file(local_path('fts_test_ddl_dml_before_ct.sql'))
def run_fts_test_ddl_dml_ct(self):
PSQL.run_sql_file(local_path('fts_test_ddl_dml_ct.sql'))
def run_sql_in_background(self):
PSQL.run_sql_command('drop table if exists bar; create table bar(i int);', background=True)
def sleep_for_transition(self):
#gp_segment_connect_timeout is set to 10s , still need a little more time than that to complete the transition to ct
sleep(100)
def restart_db(self):
self.base.stop_db()
self.base.start_db()
def stop_db_with_no_rc_check(self):
''' Gpstop and dont check for rc '''
cmd = Command('Gpstop_a', 'gpstop -a')
tinctest.logger.info('Executing command: gpstop -a')
cmd.run()
def start_db_with_no_rc_check(self):
''' Gpstart and dont check for rc '''
cmd = Command('Gpstart_a', 'gpstart -a')
tinctest.logger.info('Executing command: gpstart -a')
cmd.run()
def restart_db_with_no_rc_check(self):
self.stop_db_with_no_rc_check()
self.start_db_with_no_rc_check()
def set_gpconfig(self, param, value):
''' Set the configuration parameter using gpconfig '''
command = "gpconfig -c %s -v %s --skipvalidation " % (param, value)
run_shell_command(command)
self.restart_db()
def check_db(self):
checkDBUp()
def check_fault_status(self, fault_name, seg_id=None, role=None):
status = self.fileutil.check_fault_status(fault_name = fault_name, status ='triggered', max_cycle=20, role=role, seg_id=seg_id)
self.assertTrue(status, 'The fault is not triggered in the time expected')
def cluster_state(self):
state = self.gpconfig.is_not_insync_segments()
self.assertTrue(state,'The cluster is not up and in sync')