本文整理汇总了Python中mpp.lib.config.GPDBConfig.is_balanced_segments方法的典型用法代码示例。如果您正苦于以下问题:Python GPDBConfig.is_balanced_segments方法的具体用法?Python GPDBConfig.is_balanced_segments怎么用?Python GPDBConfig.is_balanced_segments使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mpp.lib.config.GPDBConfig
的用法示例。
在下文中一共展示了GPDBConfig.is_balanced_segments方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: GpRecover
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import is_balanced_segments [as 别名]
class GpRecover(GpRecoverseg):
'''Class for gprecoverseg utility methods '''
MAX_COUNTER=400
def __init__(self, config=None):
if config is not None:
self.config = config
else:
self.config = GPDBConfig()
self.gphome = os.environ.get('GPHOME')
def incremental(self, workerPool=False):
'''Incremental Recoverseg '''
tinctest.logger.info('Running Incremental gprecoverseg...')
if workerPool:
return self.run_using_workerpool()
else:
return self.run()
def full(self):
'''Full Recoverseg '''
tinctest.logger.info('Running Full gprecoverseg...')
return self.run(option = '-F')
def rebalance(self):
'''Run gprecoverseg to rebalance the cluster '''
tinctest.logger.info('Running gprecoverseg rebalance...')
return self.run(option = '-r')
def wait_till_insync_transition(self):
'''
Poll till all the segments transition to insync state.
Number of trials set to MAX_COUNTER
'''
counter= 1
while(not self.config.is_not_insync_segments()):
if counter > self.MAX_COUNTER:
raise Exception('Segments did not come insync after 20 minutes')
else:
counter = counter + 1
time.sleep(3) #Wait 3 secs before polling again
tinctest.logger.info('Segments are synchronized ...')
return True
def recover_rebalance_segs(self):
if not self.config.is_balanced_segments():
# recover
if not self.incremental():
raise Exception('Gprecvoerseg failed')
if not self.wait_till_insync_transition():
raise Exception('Segments not in sync')
tinctest.logger.info('Segments recovered and back in sync')
# rebalance
if not self.rebalance():
raise Exception('Gprecvoerseg -r failed')
if not self.wait_till_insync_transition():
raise Exception('Segments not in sync')
tinctest.logger.info('Segments rebalanced and back in sync')
示例2: test_resync_ct_blocks_per_query
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import is_balanced_segments [as 别名]
def test_resync_ct_blocks_per_query(self):
'''Catch a bug in resync that manifests only after rebalance.
The logic used by a resync worker to obtain changed blocks
from CT log had a bug. The SQL query used to obtain a batch
of changed blocks from CT log was incorrectly using LSN to
filter out changed blocks. All of the following must be true
for the bug to occur:
* More than gp_filerep_ct_batch_size blocks of a relation
are changed on a segment in changetracking.
* A block with a higher number is changed earlier (lower
LSN) than lower numbered blocks.
* The first batch of changed blocks obtained by resync worker
from CT log for this relation contains only lower
(according to block number) blocks. The higher block with
lower LSN is not included in this batch. Another query
must be run against CT log to obtain this block.
* The SQL query used to obtain next batch of changed blocks
for this relation contains incorrect WHERE clause involving
a filter based on LSN of previously obtained blocks. The
higher numbered block is missed out - not returned by the
query as changed block for the relation. The block is
never shipped from primary to mirror, resulting in data
loss. The test aims to verify that this doesn't happen as
the bug is now fixed.
'''
config = GPDBConfig()
assert (config.is_not_insync_segments() &
config.is_balanced_segments()), 'cluster not in-sync and balanced'
# Create table and insert data so that adequate number of
# blocks are occupied.
self.run_sql('resync_bug_setup')
# Bring down primaries and transition mirrors to
# changetracking.
filerep = Filerepe2e_Util()
filerep.inject_fault(y='fault', f='segment_probe_response',
r='primary')
# Trigger the fault by running a sql file.
PSQL.run_sql_file(local_path('test_ddl.sql'))
filerep.wait_till_change_tracking_transition()
# Set gp_filerep_ct_batch_size = 3.
cmd = Command('reduce resync batch size',
'gpconfig -c gp_filerep_ct_batch_size -v 3')
cmd.run()
assert cmd.get_results().rc == 0, 'gpconfig failed'
cmd = Command('load updated config', 'gpstop -au')
cmd.run()
assert cmd.get_results().rc == 0, '"gpstop -au" failed'
self.run_sql('change_blocks_in_ct')
# Capture change tracking log contents from the segment of
# interest for debugging, in case the test fails.
(host, port) = GPDBConfig().get_hostandport_of_segment(0, 'p')
assert PSQL.run_sql_file_utility_mode(
sql_file=local_path('sql/ct_log_contents.sql'),
out_file=local_path('output/ct_log_contents.out'),
host=host, port=port), sql_file
gprecover = GpRecover(GPDBConfig())
gprecover.incremental(False)
gprecover.wait_till_insync_transition()
# Rebalance, so that original primary is back in the role
gprecover = GpRecover(GPDBConfig())
gprecover.rebalance()
gprecover.wait_till_insync_transition()
# Reset gp_filerep_ct_batch_size
cmd = Command('reset resync batch size',
'gpconfig -r gp_filerep_ct_batch_size')
cmd.run()
assert cmd.get_results().rc == 0, 'gpconfig failed'
cmd = Command('load updated config', 'gpstop -au')
cmd.run()
assert cmd.get_results().rc == 0, '"gpstop -au" failed'
self.run_sql('select_after_rebalance')